e75bebe35774a7b2c0679560eac200d60f425ca3
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum vector_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q,
87 NT_zero,
88 NT_merge
89 };
90
91 /* Bits for DEFINED field in vector_type_el. */
92 #define NTA_HASTYPE 1
93 #define NTA_HASINDEX 2
94 #define NTA_HASVARWIDTH 4
95
96 struct vector_type_el
97 {
98 enum vector_el_type type;
99 unsigned char defined;
100 unsigned width;
101 int64_t index;
102 };
103
104 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
105
106 struct reloc
107 {
108 bfd_reloc_code_real_type type;
109 expressionS exp;
110 int pc_rel;
111 enum aarch64_opnd opnd;
112 uint32_t flags;
113 unsigned need_libopcodes_p : 1;
114 };
115
116 struct aarch64_instruction
117 {
118 /* libopcodes structure for instruction intermediate representation. */
119 aarch64_inst base;
120 /* Record assembly errors found during the parsing. */
121 struct
122 {
123 enum aarch64_operand_error_kind kind;
124 const char *error;
125 } parsing_error;
126 /* The condition that appears in the assembly line. */
127 int cond;
128 /* Relocation information (including the GAS internal fixup). */
129 struct reloc reloc;
130 /* Need to generate an immediate in the literal pool. */
131 unsigned gen_lit_pool : 1;
132 };
133
134 typedef struct aarch64_instruction aarch64_instruction;
135
136 static aarch64_instruction inst;
137
138 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
139 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
140
141 /* Diagnostics inline function utilites.
142
143 These are lightweight utlities which should only be called by parse_operands
144 and other parsers. GAS processes each assembly line by parsing it against
145 instruction template(s), in the case of multiple templates (for the same
146 mnemonic name), those templates are tried one by one until one succeeds or
147 all fail. An assembly line may fail a few templates before being
148 successfully parsed; an error saved here in most cases is not a user error
149 but an error indicating the current template is not the right template.
150 Therefore it is very important that errors can be saved at a low cost during
151 the parsing; we don't want to slow down the whole parsing by recording
152 non-user errors in detail.
153
154 Remember that the objective is to help GAS pick up the most approapriate
155 error message in the case of multiple templates, e.g. FMOV which has 8
156 templates. */
157
158 static inline void
159 clear_error (void)
160 {
161 inst.parsing_error.kind = AARCH64_OPDE_NIL;
162 inst.parsing_error.error = NULL;
163 }
164
165 static inline bfd_boolean
166 error_p (void)
167 {
168 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
169 }
170
171 static inline const char *
172 get_error_message (void)
173 {
174 return inst.parsing_error.error;
175 }
176
177 static inline enum aarch64_operand_error_kind
178 get_error_kind (void)
179 {
180 return inst.parsing_error.kind;
181 }
182
183 static inline void
184 set_error (enum aarch64_operand_error_kind kind, const char *error)
185 {
186 inst.parsing_error.kind = kind;
187 inst.parsing_error.error = error;
188 }
189
190 static inline void
191 set_recoverable_error (const char *error)
192 {
193 set_error (AARCH64_OPDE_RECOVERABLE, error);
194 }
195
196 /* Use the DESC field of the corresponding aarch64_operand entry to compose
197 the error message. */
198 static inline void
199 set_default_error (void)
200 {
201 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
202 }
203
204 static inline void
205 set_syntax_error (const char *error)
206 {
207 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
208 }
209
210 static inline void
211 set_first_syntax_error (const char *error)
212 {
213 if (! error_p ())
214 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
215 }
216
217 static inline void
218 set_fatal_syntax_error (const char *error)
219 {
220 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
221 }
222 \f
223 /* Number of littlenums required to hold an extended precision number. */
224 #define MAX_LITTLENUMS 6
225
226 /* Return value for certain parsers when the parsing fails; those parsers
227 return the information of the parsed result, e.g. register number, on
228 success. */
229 #define PARSE_FAIL -1
230
231 /* This is an invalid condition code that means no conditional field is
232 present. */
233 #define COND_ALWAYS 0x10
234
235 typedef struct
236 {
237 const char *template;
238 unsigned long value;
239 } asm_barrier_opt;
240
241 typedef struct
242 {
243 const char *template;
244 uint32_t value;
245 } asm_nzcv;
246
247 struct reloc_entry
248 {
249 char *name;
250 bfd_reloc_code_real_type reloc;
251 };
252
253 /* Macros to define the register types and masks for the purpose
254 of parsing. */
255
256 #undef AARCH64_REG_TYPES
257 #define AARCH64_REG_TYPES \
258 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
259 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
260 BASIC_REG_TYPE(SP_32) /* wsp */ \
261 BASIC_REG_TYPE(SP_64) /* sp */ \
262 BASIC_REG_TYPE(Z_32) /* wzr */ \
263 BASIC_REG_TYPE(Z_64) /* xzr */ \
264 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
265 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
266 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
267 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
268 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
269 BASIC_REG_TYPE(VN) /* v[0-31] */ \
270 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
271 BASIC_REG_TYPE(PN) /* p[0-15] */ \
272 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
273 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
274 /* Typecheck: same, plus SVE registers. */ \
275 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
276 | REG_TYPE(ZN)) \
277 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
278 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
279 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
280 /* Typecheck: same, plus SVE registers. */ \
281 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
283 | REG_TYPE(ZN)) \
284 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
285 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
286 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
287 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
288 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
289 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
290 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
291 /* Typecheck: any [BHSDQ]P FP. */ \
292 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
293 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
294 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
295 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
296 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
297 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
298 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
299 /* Typecheck: as above, but also Zn and Pn. This should only be \
300 used for SVE instructions, since Zn and Pn are valid symbols \
301 in other contexts. */ \
302 MULTI_REG_TYPE(R_Z_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
303 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
304 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
305 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
306 | REG_TYPE(ZN) | REG_TYPE(PN)) \
307 /* Any integer register; used for error messages only. */ \
308 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
309 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
310 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
311 /* Pseudo type to mark the end of the enumerator sequence. */ \
312 BASIC_REG_TYPE(MAX)
313
314 #undef BASIC_REG_TYPE
315 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
316 #undef MULTI_REG_TYPE
317 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
318
319 /* Register type enumerators. */
320 typedef enum aarch64_reg_type_
321 {
322 /* A list of REG_TYPE_*. */
323 AARCH64_REG_TYPES
324 } aarch64_reg_type;
325
326 #undef BASIC_REG_TYPE
327 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
328 #undef REG_TYPE
329 #define REG_TYPE(T) (1 << REG_TYPE_##T)
330 #undef MULTI_REG_TYPE
331 #define MULTI_REG_TYPE(T,V) V,
332
333 /* Structure for a hash table entry for a register. */
334 typedef struct
335 {
336 const char *name;
337 unsigned char number;
338 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
339 unsigned char builtin;
340 } reg_entry;
341
342 /* Values indexed by aarch64_reg_type to assist the type checking. */
343 static const unsigned reg_type_masks[] =
344 {
345 AARCH64_REG_TYPES
346 };
347
348 #undef BASIC_REG_TYPE
349 #undef REG_TYPE
350 #undef MULTI_REG_TYPE
351 #undef AARCH64_REG_TYPES
352
353 /* Diagnostics used when we don't get a register of the expected type.
354 Note: this has to synchronized with aarch64_reg_type definitions
355 above. */
356 static const char *
357 get_reg_expected_msg (aarch64_reg_type reg_type)
358 {
359 const char *msg;
360
361 switch (reg_type)
362 {
363 case REG_TYPE_R_32:
364 msg = N_("integer 32-bit register expected");
365 break;
366 case REG_TYPE_R_64:
367 msg = N_("integer 64-bit register expected");
368 break;
369 case REG_TYPE_R_N:
370 msg = N_("integer register expected");
371 break;
372 case REG_TYPE_R64_SP:
373 msg = N_("64-bit integer or SP register expected");
374 break;
375 case REG_TYPE_SVE_BASE:
376 msg = N_("base register expected");
377 break;
378 case REG_TYPE_R_Z:
379 msg = N_("integer or zero register expected");
380 break;
381 case REG_TYPE_SVE_OFFSET:
382 msg = N_("offset register expected");
383 break;
384 case REG_TYPE_R_SP:
385 msg = N_("integer or SP register expected");
386 break;
387 case REG_TYPE_R_Z_SP:
388 msg = N_("integer, zero or SP register expected");
389 break;
390 case REG_TYPE_FP_B:
391 msg = N_("8-bit SIMD scalar register expected");
392 break;
393 case REG_TYPE_FP_H:
394 msg = N_("16-bit SIMD scalar or floating-point half precision "
395 "register expected");
396 break;
397 case REG_TYPE_FP_S:
398 msg = N_("32-bit SIMD scalar or floating-point single precision "
399 "register expected");
400 break;
401 case REG_TYPE_FP_D:
402 msg = N_("64-bit SIMD scalar or floating-point double precision "
403 "register expected");
404 break;
405 case REG_TYPE_FP_Q:
406 msg = N_("128-bit SIMD scalar or floating-point quad precision "
407 "register expected");
408 break;
409 case REG_TYPE_R_Z_BHSDQ_V:
410 case REG_TYPE_R_Z_BHSDQ_VZP:
411 msg = N_("register expected");
412 break;
413 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
414 msg = N_("SIMD scalar or floating-point register expected");
415 break;
416 case REG_TYPE_VN: /* any V reg */
417 msg = N_("vector register expected");
418 break;
419 case REG_TYPE_ZN:
420 msg = N_("SVE vector register expected");
421 break;
422 case REG_TYPE_PN:
423 msg = N_("SVE predicate register expected");
424 break;
425 default:
426 as_fatal (_("invalid register type %d"), reg_type);
427 }
428 return msg;
429 }
430
431 /* Some well known registers that we refer to directly elsewhere. */
432 #define REG_SP 31
433
434 /* Instructions take 4 bytes in the object file. */
435 #define INSN_SIZE 4
436
437 static struct hash_control *aarch64_ops_hsh;
438 static struct hash_control *aarch64_cond_hsh;
439 static struct hash_control *aarch64_shift_hsh;
440 static struct hash_control *aarch64_sys_regs_hsh;
441 static struct hash_control *aarch64_pstatefield_hsh;
442 static struct hash_control *aarch64_sys_regs_ic_hsh;
443 static struct hash_control *aarch64_sys_regs_dc_hsh;
444 static struct hash_control *aarch64_sys_regs_at_hsh;
445 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
446 static struct hash_control *aarch64_reg_hsh;
447 static struct hash_control *aarch64_barrier_opt_hsh;
448 static struct hash_control *aarch64_nzcv_hsh;
449 static struct hash_control *aarch64_pldop_hsh;
450 static struct hash_control *aarch64_hint_opt_hsh;
451
452 /* Stuff needed to resolve the label ambiguity
453 As:
454 ...
455 label: <insn>
456 may differ from:
457 ...
458 label:
459 <insn> */
460
461 static symbolS *last_label_seen;
462
463 /* Literal pool structure. Held on a per-section
464 and per-sub-section basis. */
465
466 #define MAX_LITERAL_POOL_SIZE 1024
467 typedef struct literal_expression
468 {
469 expressionS exp;
470 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
471 LITTLENUM_TYPE * bignum;
472 } literal_expression;
473
474 typedef struct literal_pool
475 {
476 literal_expression literals[MAX_LITERAL_POOL_SIZE];
477 unsigned int next_free_entry;
478 unsigned int id;
479 symbolS *symbol;
480 segT section;
481 subsegT sub_section;
482 int size;
483 struct literal_pool *next;
484 } literal_pool;
485
486 /* Pointer to a linked list of literal pools. */
487 static literal_pool *list_of_pools = NULL;
488 \f
489 /* Pure syntax. */
490
491 /* This array holds the chars that always start a comment. If the
492 pre-processor is disabled, these aren't very useful. */
493 const char comment_chars[] = "";
494
495 /* This array holds the chars that only start a comment at the beginning of
496 a line. If the line seems to have the form '# 123 filename'
497 .line and .file directives will appear in the pre-processed output. */
498 /* Note that input_file.c hand checks for '#' at the beginning of the
499 first line of the input file. This is because the compiler outputs
500 #NO_APP at the beginning of its output. */
501 /* Also note that comments like this one will always work. */
502 const char line_comment_chars[] = "#";
503
504 const char line_separator_chars[] = ";";
505
506 /* Chars that can be used to separate mant
507 from exp in floating point numbers. */
508 const char EXP_CHARS[] = "eE";
509
510 /* Chars that mean this number is a floating point constant. */
511 /* As in 0f12.456 */
512 /* or 0d1.2345e12 */
513
514 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
515
516 /* Prefix character that indicates the start of an immediate value. */
517 #define is_immediate_prefix(C) ((C) == '#')
518
519 /* Separator character handling. */
520
521 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
522
523 static inline bfd_boolean
524 skip_past_char (char **str, char c)
525 {
526 if (**str == c)
527 {
528 (*str)++;
529 return TRUE;
530 }
531 else
532 return FALSE;
533 }
534
535 #define skip_past_comma(str) skip_past_char (str, ',')
536
537 /* Arithmetic expressions (possibly involving symbols). */
538
539 static bfd_boolean in_my_get_expression_p = FALSE;
540
541 /* Third argument to my_get_expression. */
542 #define GE_NO_PREFIX 0
543 #define GE_OPT_PREFIX 1
544
545 /* Return TRUE if the string pointed by *STR is successfully parsed
546 as an valid expression; *EP will be filled with the information of
547 such an expression. Otherwise return FALSE. */
548
549 static bfd_boolean
550 my_get_expression (expressionS * ep, char **str, int prefix_mode,
551 int reject_absent)
552 {
553 char *save_in;
554 segT seg;
555 int prefix_present_p = 0;
556
557 switch (prefix_mode)
558 {
559 case GE_NO_PREFIX:
560 break;
561 case GE_OPT_PREFIX:
562 if (is_immediate_prefix (**str))
563 {
564 (*str)++;
565 prefix_present_p = 1;
566 }
567 break;
568 default:
569 abort ();
570 }
571
572 memset (ep, 0, sizeof (expressionS));
573
574 save_in = input_line_pointer;
575 input_line_pointer = *str;
576 in_my_get_expression_p = TRUE;
577 seg = expression (ep);
578 in_my_get_expression_p = FALSE;
579
580 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
581 {
582 /* We found a bad expression in md_operand(). */
583 *str = input_line_pointer;
584 input_line_pointer = save_in;
585 if (prefix_present_p && ! error_p ())
586 set_fatal_syntax_error (_("bad expression"));
587 else
588 set_first_syntax_error (_("bad expression"));
589 return FALSE;
590 }
591
592 #ifdef OBJ_AOUT
593 if (seg != absolute_section
594 && seg != text_section
595 && seg != data_section
596 && seg != bss_section && seg != undefined_section)
597 {
598 set_syntax_error (_("bad segment"));
599 *str = input_line_pointer;
600 input_line_pointer = save_in;
601 return FALSE;
602 }
603 #else
604 (void) seg;
605 #endif
606
607 *str = input_line_pointer;
608 input_line_pointer = save_in;
609 return TRUE;
610 }
611
612 /* Turn a string in input_line_pointer into a floating point constant
613 of type TYPE, and store the appropriate bytes in *LITP. The number
614 of LITTLENUMS emitted is stored in *SIZEP. An error message is
615 returned, or NULL on OK. */
616
617 const char *
618 md_atof (int type, char *litP, int *sizeP)
619 {
620 return ieee_md_atof (type, litP, sizeP, target_big_endian);
621 }
622
623 /* We handle all bad expressions here, so that we can report the faulty
624 instruction in the error message. */
625 void
626 md_operand (expressionS * exp)
627 {
628 if (in_my_get_expression_p)
629 exp->X_op = O_illegal;
630 }
631
632 /* Immediate values. */
633
634 /* Errors may be set multiple times during parsing or bit encoding
635 (particularly in the Neon bits), but usually the earliest error which is set
636 will be the most meaningful. Avoid overwriting it with later (cascading)
637 errors by calling this function. */
638
639 static void
640 first_error (const char *error)
641 {
642 if (! error_p ())
643 set_syntax_error (error);
644 }
645
646 /* Similar to first_error, but this function accepts formatted error
647 message. */
648 static void
649 first_error_fmt (const char *format, ...)
650 {
651 va_list args;
652 enum
653 { size = 100 };
654 /* N.B. this single buffer will not cause error messages for different
655 instructions to pollute each other; this is because at the end of
656 processing of each assembly line, error message if any will be
657 collected by as_bad. */
658 static char buffer[size];
659
660 if (! error_p ())
661 {
662 int ret ATTRIBUTE_UNUSED;
663 va_start (args, format);
664 ret = vsnprintf (buffer, size, format, args);
665 know (ret <= size - 1 && ret >= 0);
666 va_end (args);
667 set_syntax_error (buffer);
668 }
669 }
670
671 /* Register parsing. */
672
673 /* Generic register parser which is called by other specialized
674 register parsers.
675 CCP points to what should be the beginning of a register name.
676 If it is indeed a valid register name, advance CCP over it and
677 return the reg_entry structure; otherwise return NULL.
678 It does not issue diagnostics. */
679
680 static reg_entry *
681 parse_reg (char **ccp)
682 {
683 char *start = *ccp;
684 char *p;
685 reg_entry *reg;
686
687 #ifdef REGISTER_PREFIX
688 if (*start != REGISTER_PREFIX)
689 return NULL;
690 start++;
691 #endif
692
693 p = start;
694 if (!ISALPHA (*p) || !is_name_beginner (*p))
695 return NULL;
696
697 do
698 p++;
699 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
700
701 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
702
703 if (!reg)
704 return NULL;
705
706 *ccp = p;
707 return reg;
708 }
709
710 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
711 return FALSE. */
712 static bfd_boolean
713 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
714 {
715 return (reg_type_masks[type] & (1 << reg->type)) != 0;
716 }
717
718 /* Try to parse a base or offset register. Allow SVE base and offset
719 registers if REG_TYPE includes SVE registers. Return the register
720 entry on success, setting *QUALIFIER to the register qualifier.
721 Return null otherwise.
722
723 Note that this function does not issue any diagnostics. */
724
725 static const reg_entry *
726 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
727 aarch64_opnd_qualifier_t *qualifier)
728 {
729 char *str = *ccp;
730 const reg_entry *reg = parse_reg (&str);
731
732 if (reg == NULL)
733 return NULL;
734
735 switch (reg->type)
736 {
737 case REG_TYPE_R_32:
738 case REG_TYPE_SP_32:
739 case REG_TYPE_Z_32:
740 *qualifier = AARCH64_OPND_QLF_W;
741 break;
742
743 case REG_TYPE_R_64:
744 case REG_TYPE_SP_64:
745 case REG_TYPE_Z_64:
746 *qualifier = AARCH64_OPND_QLF_X;
747 break;
748
749 case REG_TYPE_ZN:
750 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
751 || str[0] != '.')
752 return NULL;
753 switch (TOLOWER (str[1]))
754 {
755 case 's':
756 *qualifier = AARCH64_OPND_QLF_S_S;
757 break;
758 case 'd':
759 *qualifier = AARCH64_OPND_QLF_S_D;
760 break;
761 default:
762 return NULL;
763 }
764 str += 2;
765 break;
766
767 default:
768 return NULL;
769 }
770
771 *ccp = str;
772
773 return reg;
774 }
775
776 /* Try to parse a base or offset register. Return the register entry
777 on success, setting *QUALIFIER to the register qualifier. Return null
778 otherwise.
779
780 Note that this function does not issue any diagnostics. */
781
782 static const reg_entry *
783 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
784 {
785 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
786 }
787
788 /* Parse the qualifier of a vector register or vector element of type
789 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
790 succeeds; otherwise return FALSE.
791
792 Accept only one occurrence of:
793 8b 16b 2h 4h 8h 2s 4s 1d 2d
794 b h s d q */
795 static bfd_boolean
796 parse_vector_type_for_operand (aarch64_reg_type reg_type,
797 struct vector_type_el *parsed_type, char **str)
798 {
799 char *ptr = *str;
800 unsigned width;
801 unsigned element_size;
802 enum vector_el_type type;
803
804 /* skip '.' */
805 gas_assert (*ptr == '.');
806 ptr++;
807
808 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
809 {
810 width = 0;
811 goto elt_size;
812 }
813 width = strtoul (ptr, &ptr, 10);
814 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
815 {
816 first_error_fmt (_("bad size %d in vector width specifier"), width);
817 return FALSE;
818 }
819
820 elt_size:
821 switch (TOLOWER (*ptr))
822 {
823 case 'b':
824 type = NT_b;
825 element_size = 8;
826 break;
827 case 'h':
828 type = NT_h;
829 element_size = 16;
830 break;
831 case 's':
832 type = NT_s;
833 element_size = 32;
834 break;
835 case 'd':
836 type = NT_d;
837 element_size = 64;
838 break;
839 case 'q':
840 if (width == 1)
841 {
842 type = NT_q;
843 element_size = 128;
844 break;
845 }
846 /* fall through. */
847 default:
848 if (*ptr != '\0')
849 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
850 else
851 first_error (_("missing element size"));
852 return FALSE;
853 }
854 if (width != 0 && width * element_size != 64 && width * element_size != 128
855 && !(width == 2 && element_size == 16))
856 {
857 first_error_fmt (_
858 ("invalid element size %d and vector size combination %c"),
859 width, *ptr);
860 return FALSE;
861 }
862 ptr++;
863
864 parsed_type->type = type;
865 parsed_type->width = width;
866
867 *str = ptr;
868
869 return TRUE;
870 }
871
872 /* *STR contains an SVE zero/merge predication suffix. Parse it into
873 *PARSED_TYPE and point *STR at the end of the suffix. */
874
875 static bfd_boolean
876 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
877 {
878 char *ptr = *str;
879
880 /* Skip '/'. */
881 gas_assert (*ptr == '/');
882 ptr++;
883 switch (TOLOWER (*ptr))
884 {
885 case 'z':
886 parsed_type->type = NT_zero;
887 break;
888 case 'm':
889 parsed_type->type = NT_merge;
890 break;
891 default:
892 if (*ptr != '\0' && *ptr != ',')
893 first_error_fmt (_("unexpected character `%c' in predication type"),
894 *ptr);
895 else
896 first_error (_("missing predication type"));
897 return FALSE;
898 }
899 parsed_type->width = 0;
900 *str = ptr + 1;
901 return TRUE;
902 }
903
904 /* Parse a register of the type TYPE.
905
906 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
907 name or the parsed register is not of TYPE.
908
909 Otherwise return the register number, and optionally fill in the actual
910 type of the register in *RTYPE when multiple alternatives were given, and
911 return the register shape and element index information in *TYPEINFO.
912
913 IN_REG_LIST should be set with TRUE if the caller is parsing a register
914 list. */
915
916 static int
917 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
918 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
919 {
920 char *str = *ccp;
921 const reg_entry *reg = parse_reg (&str);
922 struct vector_type_el atype;
923 struct vector_type_el parsetype;
924 bfd_boolean is_typed_vecreg = FALSE;
925
926 atype.defined = 0;
927 atype.type = NT_invtype;
928 atype.width = -1;
929 atype.index = 0;
930
931 if (reg == NULL)
932 {
933 if (typeinfo)
934 *typeinfo = atype;
935 set_default_error ();
936 return PARSE_FAIL;
937 }
938
939 if (! aarch64_check_reg_type (reg, type))
940 {
941 DEBUG_TRACE ("reg type check failed");
942 set_default_error ();
943 return PARSE_FAIL;
944 }
945 type = reg->type;
946
947 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
948 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
949 {
950 if (*str == '.')
951 {
952 if (!parse_vector_type_for_operand (type, &parsetype, &str))
953 return PARSE_FAIL;
954 }
955 else
956 {
957 if (!parse_predication_for_operand (&parsetype, &str))
958 return PARSE_FAIL;
959 }
960
961 /* Register if of the form Vn.[bhsdq]. */
962 is_typed_vecreg = TRUE;
963
964 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
965 {
966 /* The width is always variable; we don't allow an integer width
967 to be specified. */
968 gas_assert (parsetype.width == 0);
969 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
970 }
971 else if (parsetype.width == 0)
972 /* Expect index. In the new scheme we cannot have
973 Vn.[bhsdq] represent a scalar. Therefore any
974 Vn.[bhsdq] should have an index following it.
975 Except in reglists ofcourse. */
976 atype.defined |= NTA_HASINDEX;
977 else
978 atype.defined |= NTA_HASTYPE;
979
980 atype.type = parsetype.type;
981 atype.width = parsetype.width;
982 }
983
984 if (skip_past_char (&str, '['))
985 {
986 expressionS exp;
987
988 /* Reject Sn[index] syntax. */
989 if (!is_typed_vecreg)
990 {
991 first_error (_("this type of register can't be indexed"));
992 return PARSE_FAIL;
993 }
994
995 if (in_reg_list == TRUE)
996 {
997 first_error (_("index not allowed inside register list"));
998 return PARSE_FAIL;
999 }
1000
1001 atype.defined |= NTA_HASINDEX;
1002
1003 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1004
1005 if (exp.X_op != O_constant)
1006 {
1007 first_error (_("constant expression required"));
1008 return PARSE_FAIL;
1009 }
1010
1011 if (! skip_past_char (&str, ']'))
1012 return PARSE_FAIL;
1013
1014 atype.index = exp.X_add_number;
1015 }
1016 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1017 {
1018 /* Indexed vector register expected. */
1019 first_error (_("indexed vector register expected"));
1020 return PARSE_FAIL;
1021 }
1022
1023 /* A vector reg Vn should be typed or indexed. */
1024 if (type == REG_TYPE_VN && atype.defined == 0)
1025 {
1026 first_error (_("invalid use of vector register"));
1027 }
1028
1029 if (typeinfo)
1030 *typeinfo = atype;
1031
1032 if (rtype)
1033 *rtype = type;
1034
1035 *ccp = str;
1036
1037 return reg->number;
1038 }
1039
1040 /* Parse register.
1041
1042 Return the register number on success; return PARSE_FAIL otherwise.
1043
1044 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1045 the register (e.g. NEON double or quad reg when either has been requested).
1046
1047 If this is a NEON vector register with additional type information, fill
1048 in the struct pointed to by VECTYPE (if non-NULL).
1049
1050 This parser does not handle register list. */
1051
1052 static int
1053 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1054 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1055 {
1056 struct vector_type_el atype;
1057 char *str = *ccp;
1058 int reg = parse_typed_reg (&str, type, rtype, &atype,
1059 /*in_reg_list= */ FALSE);
1060
1061 if (reg == PARSE_FAIL)
1062 return PARSE_FAIL;
1063
1064 if (vectype)
1065 *vectype = atype;
1066
1067 *ccp = str;
1068
1069 return reg;
1070 }
1071
1072 static inline bfd_boolean
1073 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1074 {
1075 return
1076 e1.type == e2.type
1077 && e1.defined == e2.defined
1078 && e1.width == e2.width && e1.index == e2.index;
1079 }
1080
1081 /* This function parses a list of vector registers of type TYPE.
1082 On success, it returns the parsed register list information in the
1083 following encoded format:
1084
1085 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1086 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1087
1088 The information of the register shape and/or index is returned in
1089 *VECTYPE.
1090
1091 It returns PARSE_FAIL if the register list is invalid.
1092
1093 The list contains one to four registers.
1094 Each register can be one of:
1095 <Vt>.<T>[<index>]
1096 <Vt>.<T>
1097 All <T> should be identical.
1098 All <index> should be identical.
1099 There are restrictions on <Vt> numbers which are checked later
1100 (by reg_list_valid_p). */
1101
1102 static int
1103 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1104 struct vector_type_el *vectype)
1105 {
1106 char *str = *ccp;
1107 int nb_regs;
1108 struct vector_type_el typeinfo, typeinfo_first;
1109 int val, val_range;
1110 int in_range;
1111 int ret_val;
1112 int i;
1113 bfd_boolean error = FALSE;
1114 bfd_boolean expect_index = FALSE;
1115
1116 if (*str != '{')
1117 {
1118 set_syntax_error (_("expecting {"));
1119 return PARSE_FAIL;
1120 }
1121 str++;
1122
1123 nb_regs = 0;
1124 typeinfo_first.defined = 0;
1125 typeinfo_first.type = NT_invtype;
1126 typeinfo_first.width = -1;
1127 typeinfo_first.index = 0;
1128 ret_val = 0;
1129 val = -1;
1130 val_range = -1;
1131 in_range = 0;
1132 do
1133 {
1134 if (in_range)
1135 {
1136 str++; /* skip over '-' */
1137 val_range = val;
1138 }
1139 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1140 /*in_reg_list= */ TRUE);
1141 if (val == PARSE_FAIL)
1142 {
1143 set_first_syntax_error (_("invalid vector register in list"));
1144 error = TRUE;
1145 continue;
1146 }
1147 /* reject [bhsd]n */
1148 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1149 {
1150 set_first_syntax_error (_("invalid scalar register in list"));
1151 error = TRUE;
1152 continue;
1153 }
1154
1155 if (typeinfo.defined & NTA_HASINDEX)
1156 expect_index = TRUE;
1157
1158 if (in_range)
1159 {
1160 if (val < val_range)
1161 {
1162 set_first_syntax_error
1163 (_("invalid range in vector register list"));
1164 error = TRUE;
1165 }
1166 val_range++;
1167 }
1168 else
1169 {
1170 val_range = val;
1171 if (nb_regs == 0)
1172 typeinfo_first = typeinfo;
1173 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1174 {
1175 set_first_syntax_error
1176 (_("type mismatch in vector register list"));
1177 error = TRUE;
1178 }
1179 }
1180 if (! error)
1181 for (i = val_range; i <= val; i++)
1182 {
1183 ret_val |= i << (5 * nb_regs);
1184 nb_regs++;
1185 }
1186 in_range = 0;
1187 }
1188 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1189
1190 skip_whitespace (str);
1191 if (*str != '}')
1192 {
1193 set_first_syntax_error (_("end of vector register list not found"));
1194 error = TRUE;
1195 }
1196 str++;
1197
1198 skip_whitespace (str);
1199
1200 if (expect_index)
1201 {
1202 if (skip_past_char (&str, '['))
1203 {
1204 expressionS exp;
1205
1206 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1207 if (exp.X_op != O_constant)
1208 {
1209 set_first_syntax_error (_("constant expression required."));
1210 error = TRUE;
1211 }
1212 if (! skip_past_char (&str, ']'))
1213 error = TRUE;
1214 else
1215 typeinfo_first.index = exp.X_add_number;
1216 }
1217 else
1218 {
1219 set_first_syntax_error (_("expected index"));
1220 error = TRUE;
1221 }
1222 }
1223
1224 if (nb_regs > 4)
1225 {
1226 set_first_syntax_error (_("too many registers in vector register list"));
1227 error = TRUE;
1228 }
1229 else if (nb_regs == 0)
1230 {
1231 set_first_syntax_error (_("empty vector register list"));
1232 error = TRUE;
1233 }
1234
1235 *ccp = str;
1236 if (! error)
1237 *vectype = typeinfo_first;
1238
1239 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1240 }
1241
1242 /* Directives: register aliases. */
1243
1244 static reg_entry *
1245 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1246 {
1247 reg_entry *new;
1248 const char *name;
1249
1250 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1251 {
1252 if (new->builtin)
1253 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1254 str);
1255
1256 /* Only warn about a redefinition if it's not defined as the
1257 same register. */
1258 else if (new->number != number || new->type != type)
1259 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1260
1261 return NULL;
1262 }
1263
1264 name = xstrdup (str);
1265 new = XNEW (reg_entry);
1266
1267 new->name = name;
1268 new->number = number;
1269 new->type = type;
1270 new->builtin = FALSE;
1271
1272 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1273 abort ();
1274
1275 return new;
1276 }
1277
1278 /* Look for the .req directive. This is of the form:
1279
1280 new_register_name .req existing_register_name
1281
1282 If we find one, or if it looks sufficiently like one that we want to
1283 handle any error here, return TRUE. Otherwise return FALSE. */
1284
1285 static bfd_boolean
1286 create_register_alias (char *newname, char *p)
1287 {
1288 const reg_entry *old;
1289 char *oldname, *nbuf;
1290 size_t nlen;
1291
1292 /* The input scrubber ensures that whitespace after the mnemonic is
1293 collapsed to single spaces. */
1294 oldname = p;
1295 if (strncmp (oldname, " .req ", 6) != 0)
1296 return FALSE;
1297
1298 oldname += 6;
1299 if (*oldname == '\0')
1300 return FALSE;
1301
1302 old = hash_find (aarch64_reg_hsh, oldname);
1303 if (!old)
1304 {
1305 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1306 return TRUE;
1307 }
1308
1309 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1310 the desired alias name, and p points to its end. If not, then
1311 the desired alias name is in the global original_case_string. */
1312 #ifdef TC_CASE_SENSITIVE
1313 nlen = p - newname;
1314 #else
1315 newname = original_case_string;
1316 nlen = strlen (newname);
1317 #endif
1318
1319 nbuf = xmemdup0 (newname, nlen);
1320
1321 /* Create aliases under the new name as stated; an all-lowercase
1322 version of the new name; and an all-uppercase version of the new
1323 name. */
1324 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1325 {
1326 for (p = nbuf; *p; p++)
1327 *p = TOUPPER (*p);
1328
1329 if (strncmp (nbuf, newname, nlen))
1330 {
1331 /* If this attempt to create an additional alias fails, do not bother
1332 trying to create the all-lower case alias. We will fail and issue
1333 a second, duplicate error message. This situation arises when the
1334 programmer does something like:
1335 foo .req r0
1336 Foo .req r1
1337 The second .req creates the "Foo" alias but then fails to create
1338 the artificial FOO alias because it has already been created by the
1339 first .req. */
1340 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1341 {
1342 free (nbuf);
1343 return TRUE;
1344 }
1345 }
1346
1347 for (p = nbuf; *p; p++)
1348 *p = TOLOWER (*p);
1349
1350 if (strncmp (nbuf, newname, nlen))
1351 insert_reg_alias (nbuf, old->number, old->type);
1352 }
1353
1354 free (nbuf);
1355 return TRUE;
1356 }
1357
1358 /* Should never be called, as .req goes between the alias and the
1359 register name, not at the beginning of the line. */
1360 static void
1361 s_req (int a ATTRIBUTE_UNUSED)
1362 {
1363 as_bad (_("invalid syntax for .req directive"));
1364 }
1365
1366 /* The .unreq directive deletes an alias which was previously defined
1367 by .req. For example:
1368
1369 my_alias .req r11
1370 .unreq my_alias */
1371
1372 static void
1373 s_unreq (int a ATTRIBUTE_UNUSED)
1374 {
1375 char *name;
1376 char saved_char;
1377
1378 name = input_line_pointer;
1379
1380 while (*input_line_pointer != 0
1381 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1382 ++input_line_pointer;
1383
1384 saved_char = *input_line_pointer;
1385 *input_line_pointer = 0;
1386
1387 if (!*name)
1388 as_bad (_("invalid syntax for .unreq directive"));
1389 else
1390 {
1391 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1392
1393 if (!reg)
1394 as_bad (_("unknown register alias '%s'"), name);
1395 else if (reg->builtin)
1396 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1397 name);
1398 else
1399 {
1400 char *p;
1401 char *nbuf;
1402
1403 hash_delete (aarch64_reg_hsh, name, FALSE);
1404 free ((char *) reg->name);
1405 free (reg);
1406
1407 /* Also locate the all upper case and all lower case versions.
1408 Do not complain if we cannot find one or the other as it
1409 was probably deleted above. */
1410
1411 nbuf = strdup (name);
1412 for (p = nbuf; *p; p++)
1413 *p = TOUPPER (*p);
1414 reg = hash_find (aarch64_reg_hsh, nbuf);
1415 if (reg)
1416 {
1417 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1418 free ((char *) reg->name);
1419 free (reg);
1420 }
1421
1422 for (p = nbuf; *p; p++)
1423 *p = TOLOWER (*p);
1424 reg = hash_find (aarch64_reg_hsh, nbuf);
1425 if (reg)
1426 {
1427 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1428 free ((char *) reg->name);
1429 free (reg);
1430 }
1431
1432 free (nbuf);
1433 }
1434 }
1435
1436 *input_line_pointer = saved_char;
1437 demand_empty_rest_of_line ();
1438 }
1439
1440 /* Directives: Instruction set selection. */
1441
1442 #ifdef OBJ_ELF
1443 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1444 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1445 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1446 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1447
1448 /* Create a new mapping symbol for the transition to STATE. */
1449
1450 static void
1451 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1452 {
1453 symbolS *symbolP;
1454 const char *symname;
1455 int type;
1456
1457 switch (state)
1458 {
1459 case MAP_DATA:
1460 symname = "$d";
1461 type = BSF_NO_FLAGS;
1462 break;
1463 case MAP_INSN:
1464 symname = "$x";
1465 type = BSF_NO_FLAGS;
1466 break;
1467 default:
1468 abort ();
1469 }
1470
1471 symbolP = symbol_new (symname, now_seg, value, frag);
1472 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1473
1474 /* Save the mapping symbols for future reference. Also check that
1475 we do not place two mapping symbols at the same offset within a
1476 frag. We'll handle overlap between frags in
1477 check_mapping_symbols.
1478
1479 If .fill or other data filling directive generates zero sized data,
1480 the mapping symbol for the following code will have the same value
1481 as the one generated for the data filling directive. In this case,
1482 we replace the old symbol with the new one at the same address. */
1483 if (value == 0)
1484 {
1485 if (frag->tc_frag_data.first_map != NULL)
1486 {
1487 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1488 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1489 &symbol_lastP);
1490 }
1491 frag->tc_frag_data.first_map = symbolP;
1492 }
1493 if (frag->tc_frag_data.last_map != NULL)
1494 {
1495 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1496 S_GET_VALUE (symbolP));
1497 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1498 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1499 &symbol_lastP);
1500 }
1501 frag->tc_frag_data.last_map = symbolP;
1502 }
1503
1504 /* We must sometimes convert a region marked as code to data during
1505 code alignment, if an odd number of bytes have to be padded. The
1506 code mapping symbol is pushed to an aligned address. */
1507
1508 static void
1509 insert_data_mapping_symbol (enum mstate state,
1510 valueT value, fragS * frag, offsetT bytes)
1511 {
1512 /* If there was already a mapping symbol, remove it. */
1513 if (frag->tc_frag_data.last_map != NULL
1514 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1515 frag->fr_address + value)
1516 {
1517 symbolS *symp = frag->tc_frag_data.last_map;
1518
1519 if (value == 0)
1520 {
1521 know (frag->tc_frag_data.first_map == symp);
1522 frag->tc_frag_data.first_map = NULL;
1523 }
1524 frag->tc_frag_data.last_map = NULL;
1525 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1526 }
1527
1528 make_mapping_symbol (MAP_DATA, value, frag);
1529 make_mapping_symbol (state, value + bytes, frag);
1530 }
1531
1532 static void mapping_state_2 (enum mstate state, int max_chars);
1533
1534 /* Set the mapping state to STATE. Only call this when about to
1535 emit some STATE bytes to the file. */
1536
1537 void
1538 mapping_state (enum mstate state)
1539 {
1540 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1541
1542 if (state == MAP_INSN)
1543 /* AArch64 instructions require 4-byte alignment. When emitting
1544 instructions into any section, record the appropriate section
1545 alignment. */
1546 record_alignment (now_seg, 2);
1547
1548 if (mapstate == state)
1549 /* The mapping symbol has already been emitted.
1550 There is nothing else to do. */
1551 return;
1552
1553 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1554 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1555 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1556 evaluated later in the next else. */
1557 return;
1558 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1559 {
1560 /* Only add the symbol if the offset is > 0:
1561 if we're at the first frag, check it's size > 0;
1562 if we're not at the first frag, then for sure
1563 the offset is > 0. */
1564 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1565 const int add_symbol = (frag_now != frag_first)
1566 || (frag_now_fix () > 0);
1567
1568 if (add_symbol)
1569 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1570 }
1571 #undef TRANSITION
1572
1573 mapping_state_2 (state, 0);
1574 }
1575
1576 /* Same as mapping_state, but MAX_CHARS bytes have already been
1577 allocated. Put the mapping symbol that far back. */
1578
1579 static void
1580 mapping_state_2 (enum mstate state, int max_chars)
1581 {
1582 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1583
1584 if (!SEG_NORMAL (now_seg))
1585 return;
1586
1587 if (mapstate == state)
1588 /* The mapping symbol has already been emitted.
1589 There is nothing else to do. */
1590 return;
1591
1592 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1593 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1594 }
1595 #else
1596 #define mapping_state(x) /* nothing */
1597 #define mapping_state_2(x, y) /* nothing */
1598 #endif
1599
1600 /* Directives: sectioning and alignment. */
1601
1602 static void
1603 s_bss (int ignore ATTRIBUTE_UNUSED)
1604 {
1605 /* We don't support putting frags in the BSS segment, we fake it by
1606 marking in_bss, then looking at s_skip for clues. */
1607 subseg_set (bss_section, 0);
1608 demand_empty_rest_of_line ();
1609 mapping_state (MAP_DATA);
1610 }
1611
1612 static void
1613 s_even (int ignore ATTRIBUTE_UNUSED)
1614 {
1615 /* Never make frag if expect extra pass. */
1616 if (!need_pass_2)
1617 frag_align (1, 0, 0);
1618
1619 record_alignment (now_seg, 1);
1620
1621 demand_empty_rest_of_line ();
1622 }
1623
1624 /* Directives: Literal pools. */
1625
1626 static literal_pool *
1627 find_literal_pool (int size)
1628 {
1629 literal_pool *pool;
1630
1631 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1632 {
1633 if (pool->section == now_seg
1634 && pool->sub_section == now_subseg && pool->size == size)
1635 break;
1636 }
1637
1638 return pool;
1639 }
1640
1641 static literal_pool *
1642 find_or_make_literal_pool (int size)
1643 {
1644 /* Next literal pool ID number. */
1645 static unsigned int latest_pool_num = 1;
1646 literal_pool *pool;
1647
1648 pool = find_literal_pool (size);
1649
1650 if (pool == NULL)
1651 {
1652 /* Create a new pool. */
1653 pool = XNEW (literal_pool);
1654 if (!pool)
1655 return NULL;
1656
1657 /* Currently we always put the literal pool in the current text
1658 section. If we were generating "small" model code where we
1659 knew that all code and initialised data was within 1MB then
1660 we could output literals to mergeable, read-only data
1661 sections. */
1662
1663 pool->next_free_entry = 0;
1664 pool->section = now_seg;
1665 pool->sub_section = now_subseg;
1666 pool->size = size;
1667 pool->next = list_of_pools;
1668 pool->symbol = NULL;
1669
1670 /* Add it to the list. */
1671 list_of_pools = pool;
1672 }
1673
1674 /* New pools, and emptied pools, will have a NULL symbol. */
1675 if (pool->symbol == NULL)
1676 {
1677 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1678 (valueT) 0, &zero_address_frag);
1679 pool->id = latest_pool_num++;
1680 }
1681
1682 /* Done. */
1683 return pool;
1684 }
1685
1686 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1687 Return TRUE on success, otherwise return FALSE. */
1688 static bfd_boolean
1689 add_to_lit_pool (expressionS *exp, int size)
1690 {
1691 literal_pool *pool;
1692 unsigned int entry;
1693
1694 pool = find_or_make_literal_pool (size);
1695
1696 /* Check if this literal value is already in the pool. */
1697 for (entry = 0; entry < pool->next_free_entry; entry++)
1698 {
1699 expressionS * litexp = & pool->literals[entry].exp;
1700
1701 if ((litexp->X_op == exp->X_op)
1702 && (exp->X_op == O_constant)
1703 && (litexp->X_add_number == exp->X_add_number)
1704 && (litexp->X_unsigned == exp->X_unsigned))
1705 break;
1706
1707 if ((litexp->X_op == exp->X_op)
1708 && (exp->X_op == O_symbol)
1709 && (litexp->X_add_number == exp->X_add_number)
1710 && (litexp->X_add_symbol == exp->X_add_symbol)
1711 && (litexp->X_op_symbol == exp->X_op_symbol))
1712 break;
1713 }
1714
1715 /* Do we need to create a new entry? */
1716 if (entry == pool->next_free_entry)
1717 {
1718 if (entry >= MAX_LITERAL_POOL_SIZE)
1719 {
1720 set_syntax_error (_("literal pool overflow"));
1721 return FALSE;
1722 }
1723
1724 pool->literals[entry].exp = *exp;
1725 pool->next_free_entry += 1;
1726 if (exp->X_op == O_big)
1727 {
1728 /* PR 16688: Bignums are held in a single global array. We must
1729 copy and preserve that value now, before it is overwritten. */
1730 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1731 exp->X_add_number);
1732 memcpy (pool->literals[entry].bignum, generic_bignum,
1733 CHARS_PER_LITTLENUM * exp->X_add_number);
1734 }
1735 else
1736 pool->literals[entry].bignum = NULL;
1737 }
1738
1739 exp->X_op = O_symbol;
1740 exp->X_add_number = ((int) entry) * size;
1741 exp->X_add_symbol = pool->symbol;
1742
1743 return TRUE;
1744 }
1745
1746 /* Can't use symbol_new here, so have to create a symbol and then at
1747 a later date assign it a value. Thats what these functions do. */
1748
1749 static void
1750 symbol_locate (symbolS * symbolP,
1751 const char *name,/* It is copied, the caller can modify. */
1752 segT segment, /* Segment identifier (SEG_<something>). */
1753 valueT valu, /* Symbol value. */
1754 fragS * frag) /* Associated fragment. */
1755 {
1756 size_t name_length;
1757 char *preserved_copy_of_name;
1758
1759 name_length = strlen (name) + 1; /* +1 for \0. */
1760 obstack_grow (&notes, name, name_length);
1761 preserved_copy_of_name = obstack_finish (&notes);
1762
1763 #ifdef tc_canonicalize_symbol_name
1764 preserved_copy_of_name =
1765 tc_canonicalize_symbol_name (preserved_copy_of_name);
1766 #endif
1767
1768 S_SET_NAME (symbolP, preserved_copy_of_name);
1769
1770 S_SET_SEGMENT (symbolP, segment);
1771 S_SET_VALUE (symbolP, valu);
1772 symbol_clear_list_pointers (symbolP);
1773
1774 symbol_set_frag (symbolP, frag);
1775
1776 /* Link to end of symbol chain. */
1777 {
1778 extern int symbol_table_frozen;
1779
1780 if (symbol_table_frozen)
1781 abort ();
1782 }
1783
1784 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1785
1786 obj_symbol_new_hook (symbolP);
1787
1788 #ifdef tc_symbol_new_hook
1789 tc_symbol_new_hook (symbolP);
1790 #endif
1791
1792 #ifdef DEBUG_SYMS
1793 verify_symbol_chain (symbol_rootP, symbol_lastP);
1794 #endif /* DEBUG_SYMS */
1795 }
1796
1797
1798 static void
1799 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1800 {
1801 unsigned int entry;
1802 literal_pool *pool;
1803 char sym_name[20];
1804 int align;
1805
1806 for (align = 2; align <= 4; align++)
1807 {
1808 int size = 1 << align;
1809
1810 pool = find_literal_pool (size);
1811 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1812 continue;
1813
1814 /* Align pool as you have word accesses.
1815 Only make a frag if we have to. */
1816 if (!need_pass_2)
1817 frag_align (align, 0, 0);
1818
1819 mapping_state (MAP_DATA);
1820
1821 record_alignment (now_seg, align);
1822
1823 sprintf (sym_name, "$$lit_\002%x", pool->id);
1824
1825 symbol_locate (pool->symbol, sym_name, now_seg,
1826 (valueT) frag_now_fix (), frag_now);
1827 symbol_table_insert (pool->symbol);
1828
1829 for (entry = 0; entry < pool->next_free_entry; entry++)
1830 {
1831 expressionS * exp = & pool->literals[entry].exp;
1832
1833 if (exp->X_op == O_big)
1834 {
1835 /* PR 16688: Restore the global bignum value. */
1836 gas_assert (pool->literals[entry].bignum != NULL);
1837 memcpy (generic_bignum, pool->literals[entry].bignum,
1838 CHARS_PER_LITTLENUM * exp->X_add_number);
1839 }
1840
1841 /* First output the expression in the instruction to the pool. */
1842 emit_expr (exp, size); /* .word|.xword */
1843
1844 if (exp->X_op == O_big)
1845 {
1846 free (pool->literals[entry].bignum);
1847 pool->literals[entry].bignum = NULL;
1848 }
1849 }
1850
1851 /* Mark the pool as empty. */
1852 pool->next_free_entry = 0;
1853 pool->symbol = NULL;
1854 }
1855 }
1856
1857 #ifdef OBJ_ELF
1858 /* Forward declarations for functions below, in the MD interface
1859 section. */
1860 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1861 static struct reloc_table_entry * find_reloc_table_entry (char **);
1862
1863 /* Directives: Data. */
1864 /* N.B. the support for relocation suffix in this directive needs to be
1865 implemented properly. */
1866
1867 static void
1868 s_aarch64_elf_cons (int nbytes)
1869 {
1870 expressionS exp;
1871
1872 #ifdef md_flush_pending_output
1873 md_flush_pending_output ();
1874 #endif
1875
1876 if (is_it_end_of_statement ())
1877 {
1878 demand_empty_rest_of_line ();
1879 return;
1880 }
1881
1882 #ifdef md_cons_align
1883 md_cons_align (nbytes);
1884 #endif
1885
1886 mapping_state (MAP_DATA);
1887 do
1888 {
1889 struct reloc_table_entry *reloc;
1890
1891 expression (&exp);
1892
1893 if (exp.X_op != O_symbol)
1894 emit_expr (&exp, (unsigned int) nbytes);
1895 else
1896 {
1897 skip_past_char (&input_line_pointer, '#');
1898 if (skip_past_char (&input_line_pointer, ':'))
1899 {
1900 reloc = find_reloc_table_entry (&input_line_pointer);
1901 if (reloc == NULL)
1902 as_bad (_("unrecognized relocation suffix"));
1903 else
1904 as_bad (_("unimplemented relocation suffix"));
1905 ignore_rest_of_line ();
1906 return;
1907 }
1908 else
1909 emit_expr (&exp, (unsigned int) nbytes);
1910 }
1911 }
1912 while (*input_line_pointer++ == ',');
1913
1914 /* Put terminator back into stream. */
1915 input_line_pointer--;
1916 demand_empty_rest_of_line ();
1917 }
1918
1919 #endif /* OBJ_ELF */
1920
1921 /* Output a 32-bit word, but mark as an instruction. */
1922
1923 static void
1924 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1925 {
1926 expressionS exp;
1927
1928 #ifdef md_flush_pending_output
1929 md_flush_pending_output ();
1930 #endif
1931
1932 if (is_it_end_of_statement ())
1933 {
1934 demand_empty_rest_of_line ();
1935 return;
1936 }
1937
1938 /* Sections are assumed to start aligned. In executable section, there is no
1939 MAP_DATA symbol pending. So we only align the address during
1940 MAP_DATA --> MAP_INSN transition.
1941 For other sections, this is not guaranteed. */
1942 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1943 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1944 frag_align_code (2, 0);
1945
1946 #ifdef OBJ_ELF
1947 mapping_state (MAP_INSN);
1948 #endif
1949
1950 do
1951 {
1952 expression (&exp);
1953 if (exp.X_op != O_constant)
1954 {
1955 as_bad (_("constant expression required"));
1956 ignore_rest_of_line ();
1957 return;
1958 }
1959
1960 if (target_big_endian)
1961 {
1962 unsigned int val = exp.X_add_number;
1963 exp.X_add_number = SWAP_32 (val);
1964 }
1965 emit_expr (&exp, 4);
1966 }
1967 while (*input_line_pointer++ == ',');
1968
1969 /* Put terminator back into stream. */
1970 input_line_pointer--;
1971 demand_empty_rest_of_line ();
1972 }
1973
1974 #ifdef OBJ_ELF
1975 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1976
1977 static void
1978 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1979 {
1980 expressionS exp;
1981
1982 expression (&exp);
1983 frag_grow (4);
1984 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1985 BFD_RELOC_AARCH64_TLSDESC_ADD);
1986
1987 demand_empty_rest_of_line ();
1988 }
1989
1990 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1991
1992 static void
1993 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1994 {
1995 expressionS exp;
1996
1997 /* Since we're just labelling the code, there's no need to define a
1998 mapping symbol. */
1999 expression (&exp);
2000 /* Make sure there is enough room in this frag for the following
2001 blr. This trick only works if the blr follows immediately after
2002 the .tlsdesc directive. */
2003 frag_grow (4);
2004 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2005 BFD_RELOC_AARCH64_TLSDESC_CALL);
2006
2007 demand_empty_rest_of_line ();
2008 }
2009
2010 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2011
2012 static void
2013 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2014 {
2015 expressionS exp;
2016
2017 expression (&exp);
2018 frag_grow (4);
2019 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2020 BFD_RELOC_AARCH64_TLSDESC_LDR);
2021
2022 demand_empty_rest_of_line ();
2023 }
2024 #endif /* OBJ_ELF */
2025
2026 static void s_aarch64_arch (int);
2027 static void s_aarch64_cpu (int);
2028 static void s_aarch64_arch_extension (int);
2029
2030 /* This table describes all the machine specific pseudo-ops the assembler
2031 has to support. The fields are:
2032 pseudo-op name without dot
2033 function to call to execute this pseudo-op
2034 Integer arg to pass to the function. */
2035
2036 const pseudo_typeS md_pseudo_table[] = {
2037 /* Never called because '.req' does not start a line. */
2038 {"req", s_req, 0},
2039 {"unreq", s_unreq, 0},
2040 {"bss", s_bss, 0},
2041 {"even", s_even, 0},
2042 {"ltorg", s_ltorg, 0},
2043 {"pool", s_ltorg, 0},
2044 {"cpu", s_aarch64_cpu, 0},
2045 {"arch", s_aarch64_arch, 0},
2046 {"arch_extension", s_aarch64_arch_extension, 0},
2047 {"inst", s_aarch64_inst, 0},
2048 #ifdef OBJ_ELF
2049 {"tlsdescadd", s_tlsdescadd, 0},
2050 {"tlsdesccall", s_tlsdesccall, 0},
2051 {"tlsdescldr", s_tlsdescldr, 0},
2052 {"word", s_aarch64_elf_cons, 4},
2053 {"long", s_aarch64_elf_cons, 4},
2054 {"xword", s_aarch64_elf_cons, 8},
2055 {"dword", s_aarch64_elf_cons, 8},
2056 #endif
2057 {0, 0, 0}
2058 };
2059 \f
2060
2061 /* Check whether STR points to a register name followed by a comma or the
2062 end of line; REG_TYPE indicates which register types are checked
2063 against. Return TRUE if STR is such a register name; otherwise return
2064 FALSE. The function does not intend to produce any diagnostics, but since
2065 the register parser aarch64_reg_parse, which is called by this function,
2066 does produce diagnostics, we call clear_error to clear any diagnostics
2067 that may be generated by aarch64_reg_parse.
2068 Also, the function returns FALSE directly if there is any user error
2069 present at the function entry. This prevents the existing diagnostics
2070 state from being spoiled.
2071 The function currently serves parse_constant_immediate and
2072 parse_big_immediate only. */
2073 static bfd_boolean
2074 reg_name_p (char *str, aarch64_reg_type reg_type)
2075 {
2076 int reg;
2077
2078 /* Prevent the diagnostics state from being spoiled. */
2079 if (error_p ())
2080 return FALSE;
2081
2082 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2083
2084 /* Clear the parsing error that may be set by the reg parser. */
2085 clear_error ();
2086
2087 if (reg == PARSE_FAIL)
2088 return FALSE;
2089
2090 skip_whitespace (str);
2091 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2092 return TRUE;
2093
2094 return FALSE;
2095 }
2096
2097 /* Parser functions used exclusively in instruction operands. */
2098
2099 /* Parse an immediate expression which may not be constant.
2100
2101 To prevent the expression parser from pushing a register name
2102 into the symbol table as an undefined symbol, firstly a check is
2103 done to find out whether STR is a register of type REG_TYPE followed
2104 by a comma or the end of line. Return FALSE if STR is such a string. */
2105
2106 static bfd_boolean
2107 parse_immediate_expression (char **str, expressionS *exp,
2108 aarch64_reg_type reg_type)
2109 {
2110 if (reg_name_p (*str, reg_type))
2111 {
2112 set_recoverable_error (_("immediate operand required"));
2113 return FALSE;
2114 }
2115
2116 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2117
2118 if (exp->X_op == O_absent)
2119 {
2120 set_fatal_syntax_error (_("missing immediate expression"));
2121 return FALSE;
2122 }
2123
2124 return TRUE;
2125 }
2126
2127 /* Constant immediate-value read function for use in insn parsing.
2128 STR points to the beginning of the immediate (with the optional
2129 leading #); *VAL receives the value. REG_TYPE says which register
2130 names should be treated as registers rather than as symbolic immediates.
2131
2132 Return TRUE on success; otherwise return FALSE. */
2133
2134 static bfd_boolean
2135 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2136 {
2137 expressionS exp;
2138
2139 if (! parse_immediate_expression (str, &exp, reg_type))
2140 return FALSE;
2141
2142 if (exp.X_op != O_constant)
2143 {
2144 set_syntax_error (_("constant expression required"));
2145 return FALSE;
2146 }
2147
2148 *val = exp.X_add_number;
2149 return TRUE;
2150 }
2151
2152 static uint32_t
2153 encode_imm_float_bits (uint32_t imm)
2154 {
2155 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2156 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2157 }
2158
2159 /* Return TRUE if the single-precision floating-point value encoded in IMM
2160 can be expressed in the AArch64 8-bit signed floating-point format with
2161 3-bit exponent and normalized 4 bits of precision; in other words, the
2162 floating-point value must be expressable as
2163 (+/-) n / 16 * power (2, r)
2164 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2165
2166 static bfd_boolean
2167 aarch64_imm_float_p (uint32_t imm)
2168 {
2169 /* If a single-precision floating-point value has the following bit
2170 pattern, it can be expressed in the AArch64 8-bit floating-point
2171 format:
2172
2173 3 32222222 2221111111111
2174 1 09876543 21098765432109876543210
2175 n Eeeeeexx xxxx0000000000000000000
2176
2177 where n, e and each x are either 0 or 1 independently, with
2178 E == ~ e. */
2179
2180 uint32_t pattern;
2181
2182 /* Prepare the pattern for 'Eeeeee'. */
2183 if (((imm >> 30) & 0x1) == 0)
2184 pattern = 0x3e000000;
2185 else
2186 pattern = 0x40000000;
2187
2188 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2189 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2190 }
2191
2192 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2193 as an IEEE float without any loss of precision. Store the value in
2194 *FPWORD if so. */
2195
2196 static bfd_boolean
2197 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2198 {
2199 /* If a double-precision floating-point value has the following bit
2200 pattern, it can be expressed in a float:
2201
2202 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2203 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2204 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2205
2206 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2207 if Eeee_eeee != 1111_1111
2208
2209 where n, e, s and S are either 0 or 1 independently and where ~ is the
2210 inverse of E. */
2211
2212 uint32_t pattern;
2213 uint32_t high32 = imm >> 32;
2214 uint32_t low32 = imm;
2215
2216 /* Lower 29 bits need to be 0s. */
2217 if ((imm & 0x1fffffff) != 0)
2218 return FALSE;
2219
2220 /* Prepare the pattern for 'Eeeeeeeee'. */
2221 if (((high32 >> 30) & 0x1) == 0)
2222 pattern = 0x38000000;
2223 else
2224 pattern = 0x40000000;
2225
2226 /* Check E~~~. */
2227 if ((high32 & 0x78000000) != pattern)
2228 return FALSE;
2229
2230 /* Check Eeee_eeee != 1111_1111. */
2231 if ((high32 & 0x7ff00000) == 0x47f00000)
2232 return FALSE;
2233
2234 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2235 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2236 | (low32 >> 29)); /* 3 S bits. */
2237 return TRUE;
2238 }
2239
2240 /* Return true if we should treat OPERAND as a double-precision
2241 floating-point operand rather than a single-precision one. */
2242 static bfd_boolean
2243 double_precision_operand_p (const aarch64_opnd_info *operand)
2244 {
2245 /* Check for unsuffixed SVE registers, which are allowed
2246 for LDR and STR but not in instructions that require an
2247 immediate. We get better error messages if we arbitrarily
2248 pick one size, parse the immediate normally, and then
2249 report the match failure in the normal way. */
2250 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2251 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2252 }
2253
2254 /* Parse a floating-point immediate. Return TRUE on success and return the
2255 value in *IMMED in the format of IEEE754 single-precision encoding.
2256 *CCP points to the start of the string; DP_P is TRUE when the immediate
2257 is expected to be in double-precision (N.B. this only matters when
2258 hexadecimal representation is involved). REG_TYPE says which register
2259 names should be treated as registers rather than as symbolic immediates.
2260
2261 This routine accepts any IEEE float; it is up to the callers to reject
2262 invalid ones. */
2263
2264 static bfd_boolean
2265 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2266 aarch64_reg_type reg_type)
2267 {
2268 char *str = *ccp;
2269 char *fpnum;
2270 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2271 int found_fpchar = 0;
2272 int64_t val = 0;
2273 unsigned fpword = 0;
2274 bfd_boolean hex_p = FALSE;
2275
2276 skip_past_char (&str, '#');
2277
2278 fpnum = str;
2279 skip_whitespace (fpnum);
2280
2281 if (strncmp (fpnum, "0x", 2) == 0)
2282 {
2283 /* Support the hexadecimal representation of the IEEE754 encoding.
2284 Double-precision is expected when DP_P is TRUE, otherwise the
2285 representation should be in single-precision. */
2286 if (! parse_constant_immediate (&str, &val, reg_type))
2287 goto invalid_fp;
2288
2289 if (dp_p)
2290 {
2291 if (!can_convert_double_to_float (val, &fpword))
2292 goto invalid_fp;
2293 }
2294 else if ((uint64_t) val > 0xffffffff)
2295 goto invalid_fp;
2296 else
2297 fpword = val;
2298
2299 hex_p = TRUE;
2300 }
2301 else
2302 {
2303 if (reg_name_p (str, reg_type))
2304 {
2305 set_recoverable_error (_("immediate operand required"));
2306 return FALSE;
2307 }
2308
2309 /* We must not accidentally parse an integer as a floating-point number.
2310 Make sure that the value we parse is not an integer by checking for
2311 special characters '.' or 'e'. */
2312 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2313 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2314 {
2315 found_fpchar = 1;
2316 break;
2317 }
2318
2319 if (!found_fpchar)
2320 return FALSE;
2321 }
2322
2323 if (! hex_p)
2324 {
2325 int i;
2326
2327 if ((str = atof_ieee (str, 's', words)) == NULL)
2328 goto invalid_fp;
2329
2330 /* Our FP word must be 32 bits (single-precision FP). */
2331 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2332 {
2333 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2334 fpword |= words[i];
2335 }
2336 }
2337
2338 *immed = fpword;
2339 *ccp = str;
2340 return TRUE;
2341
2342 invalid_fp:
2343 set_fatal_syntax_error (_("invalid floating-point constant"));
2344 return FALSE;
2345 }
2346
2347 /* Less-generic immediate-value read function with the possibility of loading
2348 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2349 instructions.
2350
2351 To prevent the expression parser from pushing a register name into the
2352 symbol table as an undefined symbol, a check is firstly done to find
2353 out whether STR is a register of type REG_TYPE followed by a comma or
2354 the end of line. Return FALSE if STR is such a register. */
2355
2356 static bfd_boolean
2357 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2358 {
2359 char *ptr = *str;
2360
2361 if (reg_name_p (ptr, reg_type))
2362 {
2363 set_syntax_error (_("immediate operand required"));
2364 return FALSE;
2365 }
2366
2367 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2368
2369 if (inst.reloc.exp.X_op == O_constant)
2370 *imm = inst.reloc.exp.X_add_number;
2371
2372 *str = ptr;
2373
2374 return TRUE;
2375 }
2376
2377 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2378 if NEED_LIBOPCODES is non-zero, the fixup will need
2379 assistance from the libopcodes. */
2380
2381 static inline void
2382 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2383 const aarch64_opnd_info *operand,
2384 int need_libopcodes_p)
2385 {
2386 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2387 reloc->opnd = operand->type;
2388 if (need_libopcodes_p)
2389 reloc->need_libopcodes_p = 1;
2390 };
2391
2392 /* Return TRUE if the instruction needs to be fixed up later internally by
2393 the GAS; otherwise return FALSE. */
2394
2395 static inline bfd_boolean
2396 aarch64_gas_internal_fixup_p (void)
2397 {
2398 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2399 }
2400
2401 /* Assign the immediate value to the relavant field in *OPERAND if
2402 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2403 needs an internal fixup in a later stage.
2404 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2405 IMM.VALUE that may get assigned with the constant. */
2406 static inline void
2407 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2408 aarch64_opnd_info *operand,
2409 int addr_off_p,
2410 int need_libopcodes_p,
2411 int skip_p)
2412 {
2413 if (reloc->exp.X_op == O_constant)
2414 {
2415 if (addr_off_p)
2416 operand->addr.offset.imm = reloc->exp.X_add_number;
2417 else
2418 operand->imm.value = reloc->exp.X_add_number;
2419 reloc->type = BFD_RELOC_UNUSED;
2420 }
2421 else
2422 {
2423 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2424 /* Tell libopcodes to ignore this operand or not. This is helpful
2425 when one of the operands needs to be fixed up later but we need
2426 libopcodes to check the other operands. */
2427 operand->skip = skip_p;
2428 }
2429 }
2430
2431 /* Relocation modifiers. Each entry in the table contains the textual
2432 name for the relocation which may be placed before a symbol used as
2433 a load/store offset, or add immediate. It must be surrounded by a
2434 leading and trailing colon, for example:
2435
2436 ldr x0, [x1, #:rello:varsym]
2437 add x0, x1, #:rello:varsym */
2438
2439 struct reloc_table_entry
2440 {
2441 const char *name;
2442 int pc_rel;
2443 bfd_reloc_code_real_type adr_type;
2444 bfd_reloc_code_real_type adrp_type;
2445 bfd_reloc_code_real_type movw_type;
2446 bfd_reloc_code_real_type add_type;
2447 bfd_reloc_code_real_type ldst_type;
2448 bfd_reloc_code_real_type ld_literal_type;
2449 };
2450
2451 static struct reloc_table_entry reloc_table[] = {
2452 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2453 {"lo12", 0,
2454 0, /* adr_type */
2455 0,
2456 0,
2457 BFD_RELOC_AARCH64_ADD_LO12,
2458 BFD_RELOC_AARCH64_LDST_LO12,
2459 0},
2460
2461 /* Higher 21 bits of pc-relative page offset: ADRP */
2462 {"pg_hi21", 1,
2463 0, /* adr_type */
2464 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2465 0,
2466 0,
2467 0,
2468 0},
2469
2470 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2471 {"pg_hi21_nc", 1,
2472 0, /* adr_type */
2473 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2474 0,
2475 0,
2476 0,
2477 0},
2478
2479 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2480 {"abs_g0", 0,
2481 0, /* adr_type */
2482 0,
2483 BFD_RELOC_AARCH64_MOVW_G0,
2484 0,
2485 0,
2486 0},
2487
2488 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2489 {"abs_g0_s", 0,
2490 0, /* adr_type */
2491 0,
2492 BFD_RELOC_AARCH64_MOVW_G0_S,
2493 0,
2494 0,
2495 0},
2496
2497 /* Less significant bits 0-15 of address/value: MOVK, no check */
2498 {"abs_g0_nc", 0,
2499 0, /* adr_type */
2500 0,
2501 BFD_RELOC_AARCH64_MOVW_G0_NC,
2502 0,
2503 0,
2504 0},
2505
2506 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2507 {"abs_g1", 0,
2508 0, /* adr_type */
2509 0,
2510 BFD_RELOC_AARCH64_MOVW_G1,
2511 0,
2512 0,
2513 0},
2514
2515 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2516 {"abs_g1_s", 0,
2517 0, /* adr_type */
2518 0,
2519 BFD_RELOC_AARCH64_MOVW_G1_S,
2520 0,
2521 0,
2522 0},
2523
2524 /* Less significant bits 16-31 of address/value: MOVK, no check */
2525 {"abs_g1_nc", 0,
2526 0, /* adr_type */
2527 0,
2528 BFD_RELOC_AARCH64_MOVW_G1_NC,
2529 0,
2530 0,
2531 0},
2532
2533 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2534 {"abs_g2", 0,
2535 0, /* adr_type */
2536 0,
2537 BFD_RELOC_AARCH64_MOVW_G2,
2538 0,
2539 0,
2540 0},
2541
2542 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2543 {"abs_g2_s", 0,
2544 0, /* adr_type */
2545 0,
2546 BFD_RELOC_AARCH64_MOVW_G2_S,
2547 0,
2548 0,
2549 0},
2550
2551 /* Less significant bits 32-47 of address/value: MOVK, no check */
2552 {"abs_g2_nc", 0,
2553 0, /* adr_type */
2554 0,
2555 BFD_RELOC_AARCH64_MOVW_G2_NC,
2556 0,
2557 0,
2558 0},
2559
2560 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2561 {"abs_g3", 0,
2562 0, /* adr_type */
2563 0,
2564 BFD_RELOC_AARCH64_MOVW_G3,
2565 0,
2566 0,
2567 0},
2568
2569 /* Get to the page containing GOT entry for a symbol. */
2570 {"got", 1,
2571 0, /* adr_type */
2572 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2573 0,
2574 0,
2575 0,
2576 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2577
2578 /* 12 bit offset into the page containing GOT entry for that symbol. */
2579 {"got_lo12", 0,
2580 0, /* adr_type */
2581 0,
2582 0,
2583 0,
2584 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2585 0},
2586
2587 /* 0-15 bits of address/value: MOVk, no check. */
2588 {"gotoff_g0_nc", 0,
2589 0, /* adr_type */
2590 0,
2591 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2592 0,
2593 0,
2594 0},
2595
2596 /* Most significant bits 16-31 of address/value: MOVZ. */
2597 {"gotoff_g1", 0,
2598 0, /* adr_type */
2599 0,
2600 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2601 0,
2602 0,
2603 0},
2604
2605 /* 15 bit offset into the page containing GOT entry for that symbol. */
2606 {"gotoff_lo15", 0,
2607 0, /* adr_type */
2608 0,
2609 0,
2610 0,
2611 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2612 0},
2613
2614 /* Get to the page containing GOT TLS entry for a symbol */
2615 {"gottprel_g0_nc", 0,
2616 0, /* adr_type */
2617 0,
2618 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2619 0,
2620 0,
2621 0},
2622
2623 /* Get to the page containing GOT TLS entry for a symbol */
2624 {"gottprel_g1", 0,
2625 0, /* adr_type */
2626 0,
2627 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2628 0,
2629 0,
2630 0},
2631
2632 /* Get to the page containing GOT TLS entry for a symbol */
2633 {"tlsgd", 0,
2634 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2635 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2636 0,
2637 0,
2638 0,
2639 0},
2640
2641 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2642 {"tlsgd_lo12", 0,
2643 0, /* adr_type */
2644 0,
2645 0,
2646 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2647 0,
2648 0},
2649
2650 /* Lower 16 bits address/value: MOVk. */
2651 {"tlsgd_g0_nc", 0,
2652 0, /* adr_type */
2653 0,
2654 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2655 0,
2656 0,
2657 0},
2658
2659 /* Most significant bits 16-31 of address/value: MOVZ. */
2660 {"tlsgd_g1", 0,
2661 0, /* adr_type */
2662 0,
2663 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2664 0,
2665 0,
2666 0},
2667
2668 /* Get to the page containing GOT TLS entry for a symbol */
2669 {"tlsdesc", 0,
2670 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2671 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2672 0,
2673 0,
2674 0,
2675 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2676
2677 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2678 {"tlsdesc_lo12", 0,
2679 0, /* adr_type */
2680 0,
2681 0,
2682 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2683 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2684 0},
2685
2686 /* Get to the page containing GOT TLS entry for a symbol.
2687 The same as GD, we allocate two consecutive GOT slots
2688 for module index and module offset, the only difference
2689 with GD is the module offset should be intialized to
2690 zero without any outstanding runtime relocation. */
2691 {"tlsldm", 0,
2692 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2693 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2694 0,
2695 0,
2696 0,
2697 0},
2698
2699 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2700 {"tlsldm_lo12_nc", 0,
2701 0, /* adr_type */
2702 0,
2703 0,
2704 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2705 0,
2706 0},
2707
2708 /* 12 bit offset into the module TLS base address. */
2709 {"dtprel_lo12", 0,
2710 0, /* adr_type */
2711 0,
2712 0,
2713 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2714 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2715 0},
2716
2717 /* Same as dtprel_lo12, no overflow check. */
2718 {"dtprel_lo12_nc", 0,
2719 0, /* adr_type */
2720 0,
2721 0,
2722 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2723 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2724 0},
2725
2726 /* bits[23:12] of offset to the module TLS base address. */
2727 {"dtprel_hi12", 0,
2728 0, /* adr_type */
2729 0,
2730 0,
2731 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2732 0,
2733 0},
2734
2735 /* bits[15:0] of offset to the module TLS base address. */
2736 {"dtprel_g0", 0,
2737 0, /* adr_type */
2738 0,
2739 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2740 0,
2741 0,
2742 0},
2743
2744 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2745 {"dtprel_g0_nc", 0,
2746 0, /* adr_type */
2747 0,
2748 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2749 0,
2750 0,
2751 0},
2752
2753 /* bits[31:16] of offset to the module TLS base address. */
2754 {"dtprel_g1", 0,
2755 0, /* adr_type */
2756 0,
2757 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2758 0,
2759 0,
2760 0},
2761
2762 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2763 {"dtprel_g1_nc", 0,
2764 0, /* adr_type */
2765 0,
2766 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2767 0,
2768 0,
2769 0},
2770
2771 /* bits[47:32] of offset to the module TLS base address. */
2772 {"dtprel_g2", 0,
2773 0, /* adr_type */
2774 0,
2775 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2776 0,
2777 0,
2778 0},
2779
2780 /* Lower 16 bit offset into GOT entry for a symbol */
2781 {"tlsdesc_off_g0_nc", 0,
2782 0, /* adr_type */
2783 0,
2784 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2785 0,
2786 0,
2787 0},
2788
2789 /* Higher 16 bit offset into GOT entry for a symbol */
2790 {"tlsdesc_off_g1", 0,
2791 0, /* adr_type */
2792 0,
2793 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2794 0,
2795 0,
2796 0},
2797
2798 /* Get to the page containing GOT TLS entry for a symbol */
2799 {"gottprel", 0,
2800 0, /* adr_type */
2801 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2802 0,
2803 0,
2804 0,
2805 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2806
2807 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2808 {"gottprel_lo12", 0,
2809 0, /* adr_type */
2810 0,
2811 0,
2812 0,
2813 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2814 0},
2815
2816 /* Get tp offset for a symbol. */
2817 {"tprel", 0,
2818 0, /* adr_type */
2819 0,
2820 0,
2821 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2822 0,
2823 0},
2824
2825 /* Get tp offset for a symbol. */
2826 {"tprel_lo12", 0,
2827 0, /* adr_type */
2828 0,
2829 0,
2830 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2831 0,
2832 0},
2833
2834 /* Get tp offset for a symbol. */
2835 {"tprel_hi12", 0,
2836 0, /* adr_type */
2837 0,
2838 0,
2839 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2840 0,
2841 0},
2842
2843 /* Get tp offset for a symbol. */
2844 {"tprel_lo12_nc", 0,
2845 0, /* adr_type */
2846 0,
2847 0,
2848 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2849 0,
2850 0},
2851
2852 /* Most significant bits 32-47 of address/value: MOVZ. */
2853 {"tprel_g2", 0,
2854 0, /* adr_type */
2855 0,
2856 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2857 0,
2858 0,
2859 0},
2860
2861 /* Most significant bits 16-31 of address/value: MOVZ. */
2862 {"tprel_g1", 0,
2863 0, /* adr_type */
2864 0,
2865 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2866 0,
2867 0,
2868 0},
2869
2870 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2871 {"tprel_g1_nc", 0,
2872 0, /* adr_type */
2873 0,
2874 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2875 0,
2876 0,
2877 0},
2878
2879 /* Most significant bits 0-15 of address/value: MOVZ. */
2880 {"tprel_g0", 0,
2881 0, /* adr_type */
2882 0,
2883 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2884 0,
2885 0,
2886 0},
2887
2888 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2889 {"tprel_g0_nc", 0,
2890 0, /* adr_type */
2891 0,
2892 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2893 0,
2894 0,
2895 0},
2896
2897 /* 15bit offset from got entry to base address of GOT table. */
2898 {"gotpage_lo15", 0,
2899 0,
2900 0,
2901 0,
2902 0,
2903 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2904 0},
2905
2906 /* 14bit offset from got entry to base address of GOT table. */
2907 {"gotpage_lo14", 0,
2908 0,
2909 0,
2910 0,
2911 0,
2912 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2913 0},
2914 };
2915
2916 /* Given the address of a pointer pointing to the textual name of a
2917 relocation as may appear in assembler source, attempt to find its
2918 details in reloc_table. The pointer will be updated to the character
2919 after the trailing colon. On failure, NULL will be returned;
2920 otherwise return the reloc_table_entry. */
2921
2922 static struct reloc_table_entry *
2923 find_reloc_table_entry (char **str)
2924 {
2925 unsigned int i;
2926 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2927 {
2928 int length = strlen (reloc_table[i].name);
2929
2930 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2931 && (*str)[length] == ':')
2932 {
2933 *str += (length + 1);
2934 return &reloc_table[i];
2935 }
2936 }
2937
2938 return NULL;
2939 }
2940
2941 /* Mode argument to parse_shift and parser_shifter_operand. */
2942 enum parse_shift_mode
2943 {
2944 SHIFTED_NONE, /* no shifter allowed */
2945 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2946 "#imm{,lsl #n}" */
2947 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2948 "#imm" */
2949 SHIFTED_LSL, /* bare "lsl #n" */
2950 SHIFTED_MUL, /* bare "mul #n" */
2951 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2952 SHIFTED_MUL_VL, /* "mul vl" */
2953 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2954 };
2955
2956 /* Parse a <shift> operator on an AArch64 data processing instruction.
2957 Return TRUE on success; otherwise return FALSE. */
2958 static bfd_boolean
2959 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2960 {
2961 const struct aarch64_name_value_pair *shift_op;
2962 enum aarch64_modifier_kind kind;
2963 expressionS exp;
2964 int exp_has_prefix;
2965 char *s = *str;
2966 char *p = s;
2967
2968 for (p = *str; ISALPHA (*p); p++)
2969 ;
2970
2971 if (p == *str)
2972 {
2973 set_syntax_error (_("shift expression expected"));
2974 return FALSE;
2975 }
2976
2977 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2978
2979 if (shift_op == NULL)
2980 {
2981 set_syntax_error (_("shift operator expected"));
2982 return FALSE;
2983 }
2984
2985 kind = aarch64_get_operand_modifier (shift_op);
2986
2987 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2988 {
2989 set_syntax_error (_("invalid use of 'MSL'"));
2990 return FALSE;
2991 }
2992
2993 if (kind == AARCH64_MOD_MUL
2994 && mode != SHIFTED_MUL
2995 && mode != SHIFTED_MUL_VL)
2996 {
2997 set_syntax_error (_("invalid use of 'MUL'"));
2998 return FALSE;
2999 }
3000
3001 switch (mode)
3002 {
3003 case SHIFTED_LOGIC_IMM:
3004 if (aarch64_extend_operator_p (kind) == TRUE)
3005 {
3006 set_syntax_error (_("extending shift is not permitted"));
3007 return FALSE;
3008 }
3009 break;
3010
3011 case SHIFTED_ARITH_IMM:
3012 if (kind == AARCH64_MOD_ROR)
3013 {
3014 set_syntax_error (_("'ROR' shift is not permitted"));
3015 return FALSE;
3016 }
3017 break;
3018
3019 case SHIFTED_LSL:
3020 if (kind != AARCH64_MOD_LSL)
3021 {
3022 set_syntax_error (_("only 'LSL' shift is permitted"));
3023 return FALSE;
3024 }
3025 break;
3026
3027 case SHIFTED_MUL:
3028 if (kind != AARCH64_MOD_MUL)
3029 {
3030 set_syntax_error (_("only 'MUL' is permitted"));
3031 return FALSE;
3032 }
3033 break;
3034
3035 case SHIFTED_MUL_VL:
3036 /* "MUL VL" consists of two separate tokens. Require the first
3037 token to be "MUL" and look for a following "VL". */
3038 if (kind == AARCH64_MOD_MUL)
3039 {
3040 skip_whitespace (p);
3041 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3042 {
3043 p += 2;
3044 kind = AARCH64_MOD_MUL_VL;
3045 break;
3046 }
3047 }
3048 set_syntax_error (_("only 'MUL VL' is permitted"));
3049 return FALSE;
3050
3051 case SHIFTED_REG_OFFSET:
3052 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3053 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3054 {
3055 set_fatal_syntax_error
3056 (_("invalid shift for the register offset addressing mode"));
3057 return FALSE;
3058 }
3059 break;
3060
3061 case SHIFTED_LSL_MSL:
3062 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3063 {
3064 set_syntax_error (_("invalid shift operator"));
3065 return FALSE;
3066 }
3067 break;
3068
3069 default:
3070 abort ();
3071 }
3072
3073 /* Whitespace can appear here if the next thing is a bare digit. */
3074 skip_whitespace (p);
3075
3076 /* Parse shift amount. */
3077 exp_has_prefix = 0;
3078 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3079 exp.X_op = O_absent;
3080 else
3081 {
3082 if (is_immediate_prefix (*p))
3083 {
3084 p++;
3085 exp_has_prefix = 1;
3086 }
3087 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3088 }
3089 if (kind == AARCH64_MOD_MUL_VL)
3090 /* For consistency, give MUL VL the same shift amount as an implicit
3091 MUL #1. */
3092 operand->shifter.amount = 1;
3093 else if (exp.X_op == O_absent)
3094 {
3095 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
3096 {
3097 set_syntax_error (_("missing shift amount"));
3098 return FALSE;
3099 }
3100 operand->shifter.amount = 0;
3101 }
3102 else if (exp.X_op != O_constant)
3103 {
3104 set_syntax_error (_("constant shift amount required"));
3105 return FALSE;
3106 }
3107 /* For parsing purposes, MUL #n has no inherent range. The range
3108 depends on the operand and will be checked by operand-specific
3109 routines. */
3110 else if (kind != AARCH64_MOD_MUL
3111 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3112 {
3113 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3114 return FALSE;
3115 }
3116 else
3117 {
3118 operand->shifter.amount = exp.X_add_number;
3119 operand->shifter.amount_present = 1;
3120 }
3121
3122 operand->shifter.operator_present = 1;
3123 operand->shifter.kind = kind;
3124
3125 *str = p;
3126 return TRUE;
3127 }
3128
3129 /* Parse a <shifter_operand> for a data processing instruction:
3130
3131 #<immediate>
3132 #<immediate>, LSL #imm
3133
3134 Validation of immediate operands is deferred to md_apply_fix.
3135
3136 Return TRUE on success; otherwise return FALSE. */
3137
3138 static bfd_boolean
3139 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3140 enum parse_shift_mode mode)
3141 {
3142 char *p;
3143
3144 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3145 return FALSE;
3146
3147 p = *str;
3148
3149 /* Accept an immediate expression. */
3150 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3151 return FALSE;
3152
3153 /* Accept optional LSL for arithmetic immediate values. */
3154 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3155 if (! parse_shift (&p, operand, SHIFTED_LSL))
3156 return FALSE;
3157
3158 /* Not accept any shifter for logical immediate values. */
3159 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3160 && parse_shift (&p, operand, mode))
3161 {
3162 set_syntax_error (_("unexpected shift operator"));
3163 return FALSE;
3164 }
3165
3166 *str = p;
3167 return TRUE;
3168 }
3169
3170 /* Parse a <shifter_operand> for a data processing instruction:
3171
3172 <Rm>
3173 <Rm>, <shift>
3174 #<immediate>
3175 #<immediate>, LSL #imm
3176
3177 where <shift> is handled by parse_shift above, and the last two
3178 cases are handled by the function above.
3179
3180 Validation of immediate operands is deferred to md_apply_fix.
3181
3182 Return TRUE on success; otherwise return FALSE. */
3183
3184 static bfd_boolean
3185 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3186 enum parse_shift_mode mode)
3187 {
3188 const reg_entry *reg;
3189 aarch64_opnd_qualifier_t qualifier;
3190 enum aarch64_operand_class opd_class
3191 = aarch64_get_operand_class (operand->type);
3192
3193 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3194 if (reg)
3195 {
3196 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3197 {
3198 set_syntax_error (_("unexpected register in the immediate operand"));
3199 return FALSE;
3200 }
3201
3202 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3203 {
3204 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3205 return FALSE;
3206 }
3207
3208 operand->reg.regno = reg->number;
3209 operand->qualifier = qualifier;
3210
3211 /* Accept optional shift operation on register. */
3212 if (! skip_past_comma (str))
3213 return TRUE;
3214
3215 if (! parse_shift (str, operand, mode))
3216 return FALSE;
3217
3218 return TRUE;
3219 }
3220 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3221 {
3222 set_syntax_error
3223 (_("integer register expected in the extended/shifted operand "
3224 "register"));
3225 return FALSE;
3226 }
3227
3228 /* We have a shifted immediate variable. */
3229 return parse_shifter_operand_imm (str, operand, mode);
3230 }
3231
3232 /* Return TRUE on success; return FALSE otherwise. */
3233
3234 static bfd_boolean
3235 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3236 enum parse_shift_mode mode)
3237 {
3238 char *p = *str;
3239
3240 /* Determine if we have the sequence of characters #: or just :
3241 coming next. If we do, then we check for a :rello: relocation
3242 modifier. If we don't, punt the whole lot to
3243 parse_shifter_operand. */
3244
3245 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3246 {
3247 struct reloc_table_entry *entry;
3248
3249 if (p[0] == '#')
3250 p += 2;
3251 else
3252 p++;
3253 *str = p;
3254
3255 /* Try to parse a relocation. Anything else is an error. */
3256 if (!(entry = find_reloc_table_entry (str)))
3257 {
3258 set_syntax_error (_("unknown relocation modifier"));
3259 return FALSE;
3260 }
3261
3262 if (entry->add_type == 0)
3263 {
3264 set_syntax_error
3265 (_("this relocation modifier is not allowed on this instruction"));
3266 return FALSE;
3267 }
3268
3269 /* Save str before we decompose it. */
3270 p = *str;
3271
3272 /* Next, we parse the expression. */
3273 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3274 return FALSE;
3275
3276 /* Record the relocation type (use the ADD variant here). */
3277 inst.reloc.type = entry->add_type;
3278 inst.reloc.pc_rel = entry->pc_rel;
3279
3280 /* If str is empty, we've reached the end, stop here. */
3281 if (**str == '\0')
3282 return TRUE;
3283
3284 /* Otherwise, we have a shifted reloc modifier, so rewind to
3285 recover the variable name and continue parsing for the shifter. */
3286 *str = p;
3287 return parse_shifter_operand_imm (str, operand, mode);
3288 }
3289
3290 return parse_shifter_operand (str, operand, mode);
3291 }
3292
3293 /* Parse all forms of an address expression. Information is written
3294 to *OPERAND and/or inst.reloc.
3295
3296 The A64 instruction set has the following addressing modes:
3297
3298 Offset
3299 [base] // in SIMD ld/st structure
3300 [base{,#0}] // in ld/st exclusive
3301 [base{,#imm}]
3302 [base,Xm{,LSL #imm}]
3303 [base,Xm,SXTX {#imm}]
3304 [base,Wm,(S|U)XTW {#imm}]
3305 Pre-indexed
3306 [base,#imm]!
3307 Post-indexed
3308 [base],#imm
3309 [base],Xm // in SIMD ld/st structure
3310 PC-relative (literal)
3311 label
3312 SVE:
3313 [base,#imm,MUL VL]
3314 [base,Zm.D{,LSL #imm}]
3315 [base,Zm.S,(S|U)XTW {#imm}]
3316 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3317 [Zn.S,#imm]
3318 [Zn.D,#imm]
3319 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3320 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3321 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3322
3323 (As a convenience, the notation "=immediate" is permitted in conjunction
3324 with the pc-relative literal load instructions to automatically place an
3325 immediate value or symbolic address in a nearby literal pool and generate
3326 a hidden label which references it.)
3327
3328 Upon a successful parsing, the address structure in *OPERAND will be
3329 filled in the following way:
3330
3331 .base_regno = <base>
3332 .offset.is_reg // 1 if the offset is a register
3333 .offset.imm = <imm>
3334 .offset.regno = <Rm>
3335
3336 For different addressing modes defined in the A64 ISA:
3337
3338 Offset
3339 .pcrel=0; .preind=1; .postind=0; .writeback=0
3340 Pre-indexed
3341 .pcrel=0; .preind=1; .postind=0; .writeback=1
3342 Post-indexed
3343 .pcrel=0; .preind=0; .postind=1; .writeback=1
3344 PC-relative (literal)
3345 .pcrel=1; .preind=1; .postind=0; .writeback=0
3346
3347 The shift/extension information, if any, will be stored in .shifter.
3348 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3349 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3350 corresponding register.
3351
3352 BASE_TYPE says which types of base register should be accepted and
3353 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3354 is the type of shifter that is allowed for immediate offsets,
3355 or SHIFTED_NONE if none.
3356
3357 In all other respects, it is the caller's responsibility to check
3358 for addressing modes not supported by the instruction, and to set
3359 inst.reloc.type. */
3360
3361 static bfd_boolean
3362 parse_address_main (char **str, aarch64_opnd_info *operand,
3363 aarch64_opnd_qualifier_t *base_qualifier,
3364 aarch64_opnd_qualifier_t *offset_qualifier,
3365 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3366 enum parse_shift_mode imm_shift_mode)
3367 {
3368 char *p = *str;
3369 const reg_entry *reg;
3370 expressionS *exp = &inst.reloc.exp;
3371
3372 *base_qualifier = AARCH64_OPND_QLF_NIL;
3373 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3374 if (! skip_past_char (&p, '['))
3375 {
3376 /* =immediate or label. */
3377 operand->addr.pcrel = 1;
3378 operand->addr.preind = 1;
3379
3380 /* #:<reloc_op>:<symbol> */
3381 skip_past_char (&p, '#');
3382 if (skip_past_char (&p, ':'))
3383 {
3384 bfd_reloc_code_real_type ty;
3385 struct reloc_table_entry *entry;
3386
3387 /* Try to parse a relocation modifier. Anything else is
3388 an error. */
3389 entry = find_reloc_table_entry (&p);
3390 if (! entry)
3391 {
3392 set_syntax_error (_("unknown relocation modifier"));
3393 return FALSE;
3394 }
3395
3396 switch (operand->type)
3397 {
3398 case AARCH64_OPND_ADDR_PCREL21:
3399 /* adr */
3400 ty = entry->adr_type;
3401 break;
3402
3403 default:
3404 ty = entry->ld_literal_type;
3405 break;
3406 }
3407
3408 if (ty == 0)
3409 {
3410 set_syntax_error
3411 (_("this relocation modifier is not allowed on this "
3412 "instruction"));
3413 return FALSE;
3414 }
3415
3416 /* #:<reloc_op>: */
3417 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3418 {
3419 set_syntax_error (_("invalid relocation expression"));
3420 return FALSE;
3421 }
3422
3423 /* #:<reloc_op>:<expr> */
3424 /* Record the relocation type. */
3425 inst.reloc.type = ty;
3426 inst.reloc.pc_rel = entry->pc_rel;
3427 }
3428 else
3429 {
3430
3431 if (skip_past_char (&p, '='))
3432 /* =immediate; need to generate the literal in the literal pool. */
3433 inst.gen_lit_pool = 1;
3434
3435 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3436 {
3437 set_syntax_error (_("invalid address"));
3438 return FALSE;
3439 }
3440 }
3441
3442 *str = p;
3443 return TRUE;
3444 }
3445
3446 /* [ */
3447
3448 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3449 if (!reg || !aarch64_check_reg_type (reg, base_type))
3450 {
3451 set_syntax_error (_(get_reg_expected_msg (base_type)));
3452 return FALSE;
3453 }
3454 operand->addr.base_regno = reg->number;
3455
3456 /* [Xn */
3457 if (skip_past_comma (&p))
3458 {
3459 /* [Xn, */
3460 operand->addr.preind = 1;
3461
3462 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3463 if (reg)
3464 {
3465 if (!aarch64_check_reg_type (reg, offset_type))
3466 {
3467 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3468 return FALSE;
3469 }
3470
3471 /* [Xn,Rm */
3472 operand->addr.offset.regno = reg->number;
3473 operand->addr.offset.is_reg = 1;
3474 /* Shifted index. */
3475 if (skip_past_comma (&p))
3476 {
3477 /* [Xn,Rm, */
3478 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3479 /* Use the diagnostics set in parse_shift, so not set new
3480 error message here. */
3481 return FALSE;
3482 }
3483 /* We only accept:
3484 [base,Xm{,LSL #imm}]
3485 [base,Xm,SXTX {#imm}]
3486 [base,Wm,(S|U)XTW {#imm}] */
3487 if (operand->shifter.kind == AARCH64_MOD_NONE
3488 || operand->shifter.kind == AARCH64_MOD_LSL
3489 || operand->shifter.kind == AARCH64_MOD_SXTX)
3490 {
3491 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3492 {
3493 set_syntax_error (_("invalid use of 32-bit register offset"));
3494 return FALSE;
3495 }
3496 if (aarch64_get_qualifier_esize (*base_qualifier)
3497 != aarch64_get_qualifier_esize (*offset_qualifier))
3498 {
3499 set_syntax_error (_("offset has different size from base"));
3500 return FALSE;
3501 }
3502 }
3503 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3504 {
3505 set_syntax_error (_("invalid use of 64-bit register offset"));
3506 return FALSE;
3507 }
3508 }
3509 else
3510 {
3511 /* [Xn,#:<reloc_op>:<symbol> */
3512 skip_past_char (&p, '#');
3513 if (skip_past_char (&p, ':'))
3514 {
3515 struct reloc_table_entry *entry;
3516
3517 /* Try to parse a relocation modifier. Anything else is
3518 an error. */
3519 if (!(entry = find_reloc_table_entry (&p)))
3520 {
3521 set_syntax_error (_("unknown relocation modifier"));
3522 return FALSE;
3523 }
3524
3525 if (entry->ldst_type == 0)
3526 {
3527 set_syntax_error
3528 (_("this relocation modifier is not allowed on this "
3529 "instruction"));
3530 return FALSE;
3531 }
3532
3533 /* [Xn,#:<reloc_op>: */
3534 /* We now have the group relocation table entry corresponding to
3535 the name in the assembler source. Next, we parse the
3536 expression. */
3537 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3538 {
3539 set_syntax_error (_("invalid relocation expression"));
3540 return FALSE;
3541 }
3542
3543 /* [Xn,#:<reloc_op>:<expr> */
3544 /* Record the load/store relocation type. */
3545 inst.reloc.type = entry->ldst_type;
3546 inst.reloc.pc_rel = entry->pc_rel;
3547 }
3548 else
3549 {
3550 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3551 {
3552 set_syntax_error (_("invalid expression in the address"));
3553 return FALSE;
3554 }
3555 /* [Xn,<expr> */
3556 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3557 /* [Xn,<expr>,<shifter> */
3558 if (! parse_shift (&p, operand, imm_shift_mode))
3559 return FALSE;
3560 }
3561 }
3562 }
3563
3564 if (! skip_past_char (&p, ']'))
3565 {
3566 set_syntax_error (_("']' expected"));
3567 return FALSE;
3568 }
3569
3570 if (skip_past_char (&p, '!'))
3571 {
3572 if (operand->addr.preind && operand->addr.offset.is_reg)
3573 {
3574 set_syntax_error (_("register offset not allowed in pre-indexed "
3575 "addressing mode"));
3576 return FALSE;
3577 }
3578 /* [Xn]! */
3579 operand->addr.writeback = 1;
3580 }
3581 else if (skip_past_comma (&p))
3582 {
3583 /* [Xn], */
3584 operand->addr.postind = 1;
3585 operand->addr.writeback = 1;
3586
3587 if (operand->addr.preind)
3588 {
3589 set_syntax_error (_("cannot combine pre- and post-indexing"));
3590 return FALSE;
3591 }
3592
3593 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3594 if (reg)
3595 {
3596 /* [Xn],Xm */
3597 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3598 {
3599 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3600 return FALSE;
3601 }
3602
3603 operand->addr.offset.regno = reg->number;
3604 operand->addr.offset.is_reg = 1;
3605 }
3606 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3607 {
3608 /* [Xn],#expr */
3609 set_syntax_error (_("invalid expression in the address"));
3610 return FALSE;
3611 }
3612 }
3613
3614 /* If at this point neither .preind nor .postind is set, we have a
3615 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3616 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3617 {
3618 if (operand->addr.writeback)
3619 {
3620 /* Reject [Rn]! */
3621 set_syntax_error (_("missing offset in the pre-indexed address"));
3622 return FALSE;
3623 }
3624 operand->addr.preind = 1;
3625 inst.reloc.exp.X_op = O_constant;
3626 inst.reloc.exp.X_add_number = 0;
3627 }
3628
3629 *str = p;
3630 return TRUE;
3631 }
3632
3633 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3634 on success. */
3635 static bfd_boolean
3636 parse_address (char **str, aarch64_opnd_info *operand)
3637 {
3638 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3639 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3640 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3641 }
3642
3643 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3644 The arguments have the same meaning as for parse_address_main.
3645 Return TRUE on success. */
3646 static bfd_boolean
3647 parse_sve_address (char **str, aarch64_opnd_info *operand,
3648 aarch64_opnd_qualifier_t *base_qualifier,
3649 aarch64_opnd_qualifier_t *offset_qualifier)
3650 {
3651 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3652 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3653 SHIFTED_MUL_VL);
3654 }
3655
3656 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3657 Return TRUE on success; otherwise return FALSE. */
3658 static bfd_boolean
3659 parse_half (char **str, int *internal_fixup_p)
3660 {
3661 char *p = *str;
3662
3663 skip_past_char (&p, '#');
3664
3665 gas_assert (internal_fixup_p);
3666 *internal_fixup_p = 0;
3667
3668 if (*p == ':')
3669 {
3670 struct reloc_table_entry *entry;
3671
3672 /* Try to parse a relocation. Anything else is an error. */
3673 ++p;
3674 if (!(entry = find_reloc_table_entry (&p)))
3675 {
3676 set_syntax_error (_("unknown relocation modifier"));
3677 return FALSE;
3678 }
3679
3680 if (entry->movw_type == 0)
3681 {
3682 set_syntax_error
3683 (_("this relocation modifier is not allowed on this instruction"));
3684 return FALSE;
3685 }
3686
3687 inst.reloc.type = entry->movw_type;
3688 }
3689 else
3690 *internal_fixup_p = 1;
3691
3692 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3693 return FALSE;
3694
3695 *str = p;
3696 return TRUE;
3697 }
3698
3699 /* Parse an operand for an ADRP instruction:
3700 ADRP <Xd>, <label>
3701 Return TRUE on success; otherwise return FALSE. */
3702
3703 static bfd_boolean
3704 parse_adrp (char **str)
3705 {
3706 char *p;
3707
3708 p = *str;
3709 if (*p == ':')
3710 {
3711 struct reloc_table_entry *entry;
3712
3713 /* Try to parse a relocation. Anything else is an error. */
3714 ++p;
3715 if (!(entry = find_reloc_table_entry (&p)))
3716 {
3717 set_syntax_error (_("unknown relocation modifier"));
3718 return FALSE;
3719 }
3720
3721 if (entry->adrp_type == 0)
3722 {
3723 set_syntax_error
3724 (_("this relocation modifier is not allowed on this instruction"));
3725 return FALSE;
3726 }
3727
3728 inst.reloc.type = entry->adrp_type;
3729 }
3730 else
3731 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3732
3733 inst.reloc.pc_rel = 1;
3734
3735 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3736 return FALSE;
3737
3738 *str = p;
3739 return TRUE;
3740 }
3741
3742 /* Miscellaneous. */
3743
3744 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3745 of SIZE tokens in which index I gives the token for field value I,
3746 or is null if field value I is invalid. REG_TYPE says which register
3747 names should be treated as registers rather than as symbolic immediates.
3748
3749 Return true on success, moving *STR past the operand and storing the
3750 field value in *VAL. */
3751
3752 static int
3753 parse_enum_string (char **str, int64_t *val, const char *const *array,
3754 size_t size, aarch64_reg_type reg_type)
3755 {
3756 expressionS exp;
3757 char *p, *q;
3758 size_t i;
3759
3760 /* Match C-like tokens. */
3761 p = q = *str;
3762 while (ISALNUM (*q))
3763 q++;
3764
3765 for (i = 0; i < size; ++i)
3766 if (array[i]
3767 && strncasecmp (array[i], p, q - p) == 0
3768 && array[i][q - p] == 0)
3769 {
3770 *val = i;
3771 *str = q;
3772 return TRUE;
3773 }
3774
3775 if (!parse_immediate_expression (&p, &exp, reg_type))
3776 return FALSE;
3777
3778 if (exp.X_op == O_constant
3779 && (uint64_t) exp.X_add_number < size)
3780 {
3781 *val = exp.X_add_number;
3782 *str = p;
3783 return TRUE;
3784 }
3785
3786 /* Use the default error for this operand. */
3787 return FALSE;
3788 }
3789
3790 /* Parse an option for a preload instruction. Returns the encoding for the
3791 option, or PARSE_FAIL. */
3792
3793 static int
3794 parse_pldop (char **str)
3795 {
3796 char *p, *q;
3797 const struct aarch64_name_value_pair *o;
3798
3799 p = q = *str;
3800 while (ISALNUM (*q))
3801 q++;
3802
3803 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3804 if (!o)
3805 return PARSE_FAIL;
3806
3807 *str = q;
3808 return o->value;
3809 }
3810
3811 /* Parse an option for a barrier instruction. Returns the encoding for the
3812 option, or PARSE_FAIL. */
3813
3814 static int
3815 parse_barrier (char **str)
3816 {
3817 char *p, *q;
3818 const asm_barrier_opt *o;
3819
3820 p = q = *str;
3821 while (ISALPHA (*q))
3822 q++;
3823
3824 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3825 if (!o)
3826 return PARSE_FAIL;
3827
3828 *str = q;
3829 return o->value;
3830 }
3831
3832 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3833 return 0 if successful. Otherwise return PARSE_FAIL. */
3834
3835 static int
3836 parse_barrier_psb (char **str,
3837 const struct aarch64_name_value_pair ** hint_opt)
3838 {
3839 char *p, *q;
3840 const struct aarch64_name_value_pair *o;
3841
3842 p = q = *str;
3843 while (ISALPHA (*q))
3844 q++;
3845
3846 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3847 if (!o)
3848 {
3849 set_fatal_syntax_error
3850 ( _("unknown or missing option to PSB"));
3851 return PARSE_FAIL;
3852 }
3853
3854 if (o->value != 0x11)
3855 {
3856 /* PSB only accepts option name 'CSYNC'. */
3857 set_syntax_error
3858 (_("the specified option is not accepted for PSB"));
3859 return PARSE_FAIL;
3860 }
3861
3862 *str = q;
3863 *hint_opt = o;
3864 return 0;
3865 }
3866
3867 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3868 Returns the encoding for the option, or PARSE_FAIL.
3869
3870 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3871 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3872
3873 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3874 field, otherwise as a system register.
3875 */
3876
3877 static int
3878 parse_sys_reg (char **str, struct hash_control *sys_regs,
3879 int imple_defined_p, int pstatefield_p)
3880 {
3881 char *p, *q;
3882 char buf[32];
3883 const aarch64_sys_reg *o;
3884 int value;
3885
3886 p = buf;
3887 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3888 if (p < buf + 31)
3889 *p++ = TOLOWER (*q);
3890 *p = '\0';
3891 /* Assert that BUF be large enough. */
3892 gas_assert (p - buf == q - *str);
3893
3894 o = hash_find (sys_regs, buf);
3895 if (!o)
3896 {
3897 if (!imple_defined_p)
3898 return PARSE_FAIL;
3899 else
3900 {
3901 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3902 unsigned int op0, op1, cn, cm, op2;
3903
3904 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3905 != 5)
3906 return PARSE_FAIL;
3907 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3908 return PARSE_FAIL;
3909 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3910 }
3911 }
3912 else
3913 {
3914 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3915 as_bad (_("selected processor does not support PSTATE field "
3916 "name '%s'"), buf);
3917 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3918 as_bad (_("selected processor does not support system register "
3919 "name '%s'"), buf);
3920 if (aarch64_sys_reg_deprecated_p (o))
3921 as_warn (_("system register name '%s' is deprecated and may be "
3922 "removed in a future release"), buf);
3923 value = o->value;
3924 }
3925
3926 *str = q;
3927 return value;
3928 }
3929
3930 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3931 for the option, or NULL. */
3932
3933 static const aarch64_sys_ins_reg *
3934 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3935 {
3936 char *p, *q;
3937 char buf[32];
3938 const aarch64_sys_ins_reg *o;
3939
3940 p = buf;
3941 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3942 if (p < buf + 31)
3943 *p++ = TOLOWER (*q);
3944 *p = '\0';
3945
3946 o = hash_find (sys_ins_regs, buf);
3947 if (!o)
3948 return NULL;
3949
3950 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3951 as_bad (_("selected processor does not support system register "
3952 "name '%s'"), buf);
3953
3954 *str = q;
3955 return o;
3956 }
3957 \f
3958 #define po_char_or_fail(chr) do { \
3959 if (! skip_past_char (&str, chr)) \
3960 goto failure; \
3961 } while (0)
3962
3963 #define po_reg_or_fail(regtype) do { \
3964 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3965 if (val == PARSE_FAIL) \
3966 { \
3967 set_default_error (); \
3968 goto failure; \
3969 } \
3970 } while (0)
3971
3972 #define po_int_reg_or_fail(reg_type) do { \
3973 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
3974 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
3975 { \
3976 set_default_error (); \
3977 goto failure; \
3978 } \
3979 info->reg.regno = reg->number; \
3980 info->qualifier = qualifier; \
3981 } while (0)
3982
3983 #define po_imm_nc_or_fail() do { \
3984 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3985 goto failure; \
3986 } while (0)
3987
3988 #define po_imm_or_fail(min, max) do { \
3989 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3990 goto failure; \
3991 if (val < min || val > max) \
3992 { \
3993 set_fatal_syntax_error (_("immediate value out of range "\
3994 #min " to "#max)); \
3995 goto failure; \
3996 } \
3997 } while (0)
3998
3999 #define po_enum_or_fail(array) do { \
4000 if (!parse_enum_string (&str, &val, array, \
4001 ARRAY_SIZE (array), imm_reg_type)) \
4002 goto failure; \
4003 } while (0)
4004
4005 #define po_misc_or_fail(expr) do { \
4006 if (!expr) \
4007 goto failure; \
4008 } while (0)
4009 \f
4010 /* encode the 12-bit imm field of Add/sub immediate */
4011 static inline uint32_t
4012 encode_addsub_imm (uint32_t imm)
4013 {
4014 return imm << 10;
4015 }
4016
4017 /* encode the shift amount field of Add/sub immediate */
4018 static inline uint32_t
4019 encode_addsub_imm_shift_amount (uint32_t cnt)
4020 {
4021 return cnt << 22;
4022 }
4023
4024
4025 /* encode the imm field of Adr instruction */
4026 static inline uint32_t
4027 encode_adr_imm (uint32_t imm)
4028 {
4029 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4030 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4031 }
4032
4033 /* encode the immediate field of Move wide immediate */
4034 static inline uint32_t
4035 encode_movw_imm (uint32_t imm)
4036 {
4037 return imm << 5;
4038 }
4039
4040 /* encode the 26-bit offset of unconditional branch */
4041 static inline uint32_t
4042 encode_branch_ofs_26 (uint32_t ofs)
4043 {
4044 return ofs & ((1 << 26) - 1);
4045 }
4046
4047 /* encode the 19-bit offset of conditional branch and compare & branch */
4048 static inline uint32_t
4049 encode_cond_branch_ofs_19 (uint32_t ofs)
4050 {
4051 return (ofs & ((1 << 19) - 1)) << 5;
4052 }
4053
4054 /* encode the 19-bit offset of ld literal */
4055 static inline uint32_t
4056 encode_ld_lit_ofs_19 (uint32_t ofs)
4057 {
4058 return (ofs & ((1 << 19) - 1)) << 5;
4059 }
4060
4061 /* Encode the 14-bit offset of test & branch. */
4062 static inline uint32_t
4063 encode_tst_branch_ofs_14 (uint32_t ofs)
4064 {
4065 return (ofs & ((1 << 14) - 1)) << 5;
4066 }
4067
4068 /* Encode the 16-bit imm field of svc/hvc/smc. */
4069 static inline uint32_t
4070 encode_svc_imm (uint32_t imm)
4071 {
4072 return imm << 5;
4073 }
4074
4075 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4076 static inline uint32_t
4077 reencode_addsub_switch_add_sub (uint32_t opcode)
4078 {
4079 return opcode ^ (1 << 30);
4080 }
4081
4082 static inline uint32_t
4083 reencode_movzn_to_movz (uint32_t opcode)
4084 {
4085 return opcode | (1 << 30);
4086 }
4087
4088 static inline uint32_t
4089 reencode_movzn_to_movn (uint32_t opcode)
4090 {
4091 return opcode & ~(1 << 30);
4092 }
4093
4094 /* Overall per-instruction processing. */
4095
4096 /* We need to be able to fix up arbitrary expressions in some statements.
4097 This is so that we can handle symbols that are an arbitrary distance from
4098 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4099 which returns part of an address in a form which will be valid for
4100 a data instruction. We do this by pushing the expression into a symbol
4101 in the expr_section, and creating a fix for that. */
4102
4103 static fixS *
4104 fix_new_aarch64 (fragS * frag,
4105 int where,
4106 short int size, expressionS * exp, int pc_rel, int reloc)
4107 {
4108 fixS *new_fix;
4109
4110 switch (exp->X_op)
4111 {
4112 case O_constant:
4113 case O_symbol:
4114 case O_add:
4115 case O_subtract:
4116 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4117 break;
4118
4119 default:
4120 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4121 pc_rel, reloc);
4122 break;
4123 }
4124 return new_fix;
4125 }
4126 \f
4127 /* Diagnostics on operands errors. */
4128
4129 /* By default, output verbose error message.
4130 Disable the verbose error message by -mno-verbose-error. */
4131 static int verbose_error_p = 1;
4132
4133 #ifdef DEBUG_AARCH64
4134 /* N.B. this is only for the purpose of debugging. */
4135 const char* operand_mismatch_kind_names[] =
4136 {
4137 "AARCH64_OPDE_NIL",
4138 "AARCH64_OPDE_RECOVERABLE",
4139 "AARCH64_OPDE_SYNTAX_ERROR",
4140 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4141 "AARCH64_OPDE_INVALID_VARIANT",
4142 "AARCH64_OPDE_OUT_OF_RANGE",
4143 "AARCH64_OPDE_UNALIGNED",
4144 "AARCH64_OPDE_REG_LIST",
4145 "AARCH64_OPDE_OTHER_ERROR",
4146 };
4147 #endif /* DEBUG_AARCH64 */
4148
4149 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4150
4151 When multiple errors of different kinds are found in the same assembly
4152 line, only the error of the highest severity will be picked up for
4153 issuing the diagnostics. */
4154
4155 static inline bfd_boolean
4156 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4157 enum aarch64_operand_error_kind rhs)
4158 {
4159 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4160 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4161 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4162 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4163 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4164 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4165 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4166 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4167 return lhs > rhs;
4168 }
4169
4170 /* Helper routine to get the mnemonic name from the assembly instruction
4171 line; should only be called for the diagnosis purpose, as there is
4172 string copy operation involved, which may affect the runtime
4173 performance if used in elsewhere. */
4174
4175 static const char*
4176 get_mnemonic_name (const char *str)
4177 {
4178 static char mnemonic[32];
4179 char *ptr;
4180
4181 /* Get the first 15 bytes and assume that the full name is included. */
4182 strncpy (mnemonic, str, 31);
4183 mnemonic[31] = '\0';
4184
4185 /* Scan up to the end of the mnemonic, which must end in white space,
4186 '.', or end of string. */
4187 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4188 ;
4189
4190 *ptr = '\0';
4191
4192 /* Append '...' to the truncated long name. */
4193 if (ptr - mnemonic == 31)
4194 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4195
4196 return mnemonic;
4197 }
4198
4199 static void
4200 reset_aarch64_instruction (aarch64_instruction *instruction)
4201 {
4202 memset (instruction, '\0', sizeof (aarch64_instruction));
4203 instruction->reloc.type = BFD_RELOC_UNUSED;
4204 }
4205
4206 /* Data strutures storing one user error in the assembly code related to
4207 operands. */
4208
4209 struct operand_error_record
4210 {
4211 const aarch64_opcode *opcode;
4212 aarch64_operand_error detail;
4213 struct operand_error_record *next;
4214 };
4215
4216 typedef struct operand_error_record operand_error_record;
4217
4218 struct operand_errors
4219 {
4220 operand_error_record *head;
4221 operand_error_record *tail;
4222 };
4223
4224 typedef struct operand_errors operand_errors;
4225
4226 /* Top-level data structure reporting user errors for the current line of
4227 the assembly code.
4228 The way md_assemble works is that all opcodes sharing the same mnemonic
4229 name are iterated to find a match to the assembly line. In this data
4230 structure, each of the such opcodes will have one operand_error_record
4231 allocated and inserted. In other words, excessive errors related with
4232 a single opcode are disregarded. */
4233 operand_errors operand_error_report;
4234
4235 /* Free record nodes. */
4236 static operand_error_record *free_opnd_error_record_nodes = NULL;
4237
4238 /* Initialize the data structure that stores the operand mismatch
4239 information on assembling one line of the assembly code. */
4240 static void
4241 init_operand_error_report (void)
4242 {
4243 if (operand_error_report.head != NULL)
4244 {
4245 gas_assert (operand_error_report.tail != NULL);
4246 operand_error_report.tail->next = free_opnd_error_record_nodes;
4247 free_opnd_error_record_nodes = operand_error_report.head;
4248 operand_error_report.head = NULL;
4249 operand_error_report.tail = NULL;
4250 return;
4251 }
4252 gas_assert (operand_error_report.tail == NULL);
4253 }
4254
4255 /* Return TRUE if some operand error has been recorded during the
4256 parsing of the current assembly line using the opcode *OPCODE;
4257 otherwise return FALSE. */
4258 static inline bfd_boolean
4259 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4260 {
4261 operand_error_record *record = operand_error_report.head;
4262 return record && record->opcode == opcode;
4263 }
4264
4265 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4266 OPCODE field is initialized with OPCODE.
4267 N.B. only one record for each opcode, i.e. the maximum of one error is
4268 recorded for each instruction template. */
4269
4270 static void
4271 add_operand_error_record (const operand_error_record* new_record)
4272 {
4273 const aarch64_opcode *opcode = new_record->opcode;
4274 operand_error_record* record = operand_error_report.head;
4275
4276 /* The record may have been created for this opcode. If not, we need
4277 to prepare one. */
4278 if (! opcode_has_operand_error_p (opcode))
4279 {
4280 /* Get one empty record. */
4281 if (free_opnd_error_record_nodes == NULL)
4282 {
4283 record = XNEW (operand_error_record);
4284 }
4285 else
4286 {
4287 record = free_opnd_error_record_nodes;
4288 free_opnd_error_record_nodes = record->next;
4289 }
4290 record->opcode = opcode;
4291 /* Insert at the head. */
4292 record->next = operand_error_report.head;
4293 operand_error_report.head = record;
4294 if (operand_error_report.tail == NULL)
4295 operand_error_report.tail = record;
4296 }
4297 else if (record->detail.kind != AARCH64_OPDE_NIL
4298 && record->detail.index <= new_record->detail.index
4299 && operand_error_higher_severity_p (record->detail.kind,
4300 new_record->detail.kind))
4301 {
4302 /* In the case of multiple errors found on operands related with a
4303 single opcode, only record the error of the leftmost operand and
4304 only if the error is of higher severity. */
4305 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4306 " the existing error %s on operand %d",
4307 operand_mismatch_kind_names[new_record->detail.kind],
4308 new_record->detail.index,
4309 operand_mismatch_kind_names[record->detail.kind],
4310 record->detail.index);
4311 return;
4312 }
4313
4314 record->detail = new_record->detail;
4315 }
4316
4317 static inline void
4318 record_operand_error_info (const aarch64_opcode *opcode,
4319 aarch64_operand_error *error_info)
4320 {
4321 operand_error_record record;
4322 record.opcode = opcode;
4323 record.detail = *error_info;
4324 add_operand_error_record (&record);
4325 }
4326
4327 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4328 error message *ERROR, for operand IDX (count from 0). */
4329
4330 static void
4331 record_operand_error (const aarch64_opcode *opcode, int idx,
4332 enum aarch64_operand_error_kind kind,
4333 const char* error)
4334 {
4335 aarch64_operand_error info;
4336 memset(&info, 0, sizeof (info));
4337 info.index = idx;
4338 info.kind = kind;
4339 info.error = error;
4340 record_operand_error_info (opcode, &info);
4341 }
4342
4343 static void
4344 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4345 enum aarch64_operand_error_kind kind,
4346 const char* error, const int *extra_data)
4347 {
4348 aarch64_operand_error info;
4349 info.index = idx;
4350 info.kind = kind;
4351 info.error = error;
4352 info.data[0] = extra_data[0];
4353 info.data[1] = extra_data[1];
4354 info.data[2] = extra_data[2];
4355 record_operand_error_info (opcode, &info);
4356 }
4357
4358 static void
4359 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4360 const char* error, int lower_bound,
4361 int upper_bound)
4362 {
4363 int data[3] = {lower_bound, upper_bound, 0};
4364 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4365 error, data);
4366 }
4367
4368 /* Remove the operand error record for *OPCODE. */
4369 static void ATTRIBUTE_UNUSED
4370 remove_operand_error_record (const aarch64_opcode *opcode)
4371 {
4372 if (opcode_has_operand_error_p (opcode))
4373 {
4374 operand_error_record* record = operand_error_report.head;
4375 gas_assert (record != NULL && operand_error_report.tail != NULL);
4376 operand_error_report.head = record->next;
4377 record->next = free_opnd_error_record_nodes;
4378 free_opnd_error_record_nodes = record;
4379 if (operand_error_report.head == NULL)
4380 {
4381 gas_assert (operand_error_report.tail == record);
4382 operand_error_report.tail = NULL;
4383 }
4384 }
4385 }
4386
4387 /* Given the instruction in *INSTR, return the index of the best matched
4388 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4389
4390 Return -1 if there is no qualifier sequence; return the first match
4391 if there is multiple matches found. */
4392
4393 static int
4394 find_best_match (const aarch64_inst *instr,
4395 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4396 {
4397 int i, num_opnds, max_num_matched, idx;
4398
4399 num_opnds = aarch64_num_of_operands (instr->opcode);
4400 if (num_opnds == 0)
4401 {
4402 DEBUG_TRACE ("no operand");
4403 return -1;
4404 }
4405
4406 max_num_matched = 0;
4407 idx = 0;
4408
4409 /* For each pattern. */
4410 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4411 {
4412 int j, num_matched;
4413 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4414
4415 /* Most opcodes has much fewer patterns in the list. */
4416 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4417 {
4418 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4419 break;
4420 }
4421
4422 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4423 if (*qualifiers == instr->operands[j].qualifier)
4424 ++num_matched;
4425
4426 if (num_matched > max_num_matched)
4427 {
4428 max_num_matched = num_matched;
4429 idx = i;
4430 }
4431 }
4432
4433 DEBUG_TRACE ("return with %d", idx);
4434 return idx;
4435 }
4436
4437 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4438 corresponding operands in *INSTR. */
4439
4440 static inline void
4441 assign_qualifier_sequence (aarch64_inst *instr,
4442 const aarch64_opnd_qualifier_t *qualifiers)
4443 {
4444 int i = 0;
4445 int num_opnds = aarch64_num_of_operands (instr->opcode);
4446 gas_assert (num_opnds);
4447 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4448 instr->operands[i].qualifier = *qualifiers;
4449 }
4450
4451 /* Print operands for the diagnosis purpose. */
4452
4453 static void
4454 print_operands (char *buf, const aarch64_opcode *opcode,
4455 const aarch64_opnd_info *opnds)
4456 {
4457 int i;
4458
4459 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4460 {
4461 char str[128];
4462
4463 /* We regard the opcode operand info more, however we also look into
4464 the inst->operands to support the disassembling of the optional
4465 operand.
4466 The two operand code should be the same in all cases, apart from
4467 when the operand can be optional. */
4468 if (opcode->operands[i] == AARCH64_OPND_NIL
4469 || opnds[i].type == AARCH64_OPND_NIL)
4470 break;
4471
4472 /* Generate the operand string in STR. */
4473 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4474
4475 /* Delimiter. */
4476 if (str[0] != '\0')
4477 strcat (buf, i == 0 ? " " : ", ");
4478
4479 /* Append the operand string. */
4480 strcat (buf, str);
4481 }
4482 }
4483
4484 /* Send to stderr a string as information. */
4485
4486 static void
4487 output_info (const char *format, ...)
4488 {
4489 const char *file;
4490 unsigned int line;
4491 va_list args;
4492
4493 file = as_where (&line);
4494 if (file)
4495 {
4496 if (line != 0)
4497 fprintf (stderr, "%s:%u: ", file, line);
4498 else
4499 fprintf (stderr, "%s: ", file);
4500 }
4501 fprintf (stderr, _("Info: "));
4502 va_start (args, format);
4503 vfprintf (stderr, format, args);
4504 va_end (args);
4505 (void) putc ('\n', stderr);
4506 }
4507
4508 /* Output one operand error record. */
4509
4510 static void
4511 output_operand_error_record (const operand_error_record *record, char *str)
4512 {
4513 const aarch64_operand_error *detail = &record->detail;
4514 int idx = detail->index;
4515 const aarch64_opcode *opcode = record->opcode;
4516 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4517 : AARCH64_OPND_NIL);
4518
4519 switch (detail->kind)
4520 {
4521 case AARCH64_OPDE_NIL:
4522 gas_assert (0);
4523 break;
4524
4525 case AARCH64_OPDE_SYNTAX_ERROR:
4526 case AARCH64_OPDE_RECOVERABLE:
4527 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4528 case AARCH64_OPDE_OTHER_ERROR:
4529 /* Use the prepared error message if there is, otherwise use the
4530 operand description string to describe the error. */
4531 if (detail->error != NULL)
4532 {
4533 if (idx < 0)
4534 as_bad (_("%s -- `%s'"), detail->error, str);
4535 else
4536 as_bad (_("%s at operand %d -- `%s'"),
4537 detail->error, idx + 1, str);
4538 }
4539 else
4540 {
4541 gas_assert (idx >= 0);
4542 as_bad (_("operand %d must be %s -- `%s'"), idx + 1,
4543 aarch64_get_operand_desc (opd_code), str);
4544 }
4545 break;
4546
4547 case AARCH64_OPDE_INVALID_VARIANT:
4548 as_bad (_("operand mismatch -- `%s'"), str);
4549 if (verbose_error_p)
4550 {
4551 /* We will try to correct the erroneous instruction and also provide
4552 more information e.g. all other valid variants.
4553
4554 The string representation of the corrected instruction and other
4555 valid variants are generated by
4556
4557 1) obtaining the intermediate representation of the erroneous
4558 instruction;
4559 2) manipulating the IR, e.g. replacing the operand qualifier;
4560 3) printing out the instruction by calling the printer functions
4561 shared with the disassembler.
4562
4563 The limitation of this method is that the exact input assembly
4564 line cannot be accurately reproduced in some cases, for example an
4565 optional operand present in the actual assembly line will be
4566 omitted in the output; likewise for the optional syntax rules,
4567 e.g. the # before the immediate. Another limitation is that the
4568 assembly symbols and relocation operations in the assembly line
4569 currently cannot be printed out in the error report. Last but not
4570 least, when there is other error(s) co-exist with this error, the
4571 'corrected' instruction may be still incorrect, e.g. given
4572 'ldnp h0,h1,[x0,#6]!'
4573 this diagnosis will provide the version:
4574 'ldnp s0,s1,[x0,#6]!'
4575 which is still not right. */
4576 size_t len = strlen (get_mnemonic_name (str));
4577 int i, qlf_idx;
4578 bfd_boolean result;
4579 char buf[2048];
4580 aarch64_inst *inst_base = &inst.base;
4581 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4582
4583 /* Init inst. */
4584 reset_aarch64_instruction (&inst);
4585 inst_base->opcode = opcode;
4586
4587 /* Reset the error report so that there is no side effect on the
4588 following operand parsing. */
4589 init_operand_error_report ();
4590
4591 /* Fill inst. */
4592 result = parse_operands (str + len, opcode)
4593 && programmer_friendly_fixup (&inst);
4594 gas_assert (result);
4595 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4596 NULL, NULL);
4597 gas_assert (!result);
4598
4599 /* Find the most matched qualifier sequence. */
4600 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4601 gas_assert (qlf_idx > -1);
4602
4603 /* Assign the qualifiers. */
4604 assign_qualifier_sequence (inst_base,
4605 opcode->qualifiers_list[qlf_idx]);
4606
4607 /* Print the hint. */
4608 output_info (_(" did you mean this?"));
4609 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4610 print_operands (buf, opcode, inst_base->operands);
4611 output_info (_(" %s"), buf);
4612
4613 /* Print out other variant(s) if there is any. */
4614 if (qlf_idx != 0 ||
4615 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4616 output_info (_(" other valid variant(s):"));
4617
4618 /* For each pattern. */
4619 qualifiers_list = opcode->qualifiers_list;
4620 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4621 {
4622 /* Most opcodes has much fewer patterns in the list.
4623 First NIL qualifier indicates the end in the list. */
4624 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4625 break;
4626
4627 if (i != qlf_idx)
4628 {
4629 /* Mnemonics name. */
4630 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4631
4632 /* Assign the qualifiers. */
4633 assign_qualifier_sequence (inst_base, *qualifiers_list);
4634
4635 /* Print instruction. */
4636 print_operands (buf, opcode, inst_base->operands);
4637
4638 output_info (_(" %s"), buf);
4639 }
4640 }
4641 }
4642 break;
4643
4644 case AARCH64_OPDE_UNTIED_OPERAND:
4645 as_bad (_("operand %d must be the same register as operand 1 -- `%s'"),
4646 detail->index + 1, str);
4647 break;
4648
4649 case AARCH64_OPDE_OUT_OF_RANGE:
4650 if (detail->data[0] != detail->data[1])
4651 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4652 detail->error ? detail->error : _("immediate value"),
4653 detail->data[0], detail->data[1], idx + 1, str);
4654 else
4655 as_bad (_("%s must be %d at operand %d -- `%s'"),
4656 detail->error ? detail->error : _("immediate value"),
4657 detail->data[0], idx + 1, str);
4658 break;
4659
4660 case AARCH64_OPDE_REG_LIST:
4661 if (detail->data[0] == 1)
4662 as_bad (_("invalid number of registers in the list; "
4663 "only 1 register is expected at operand %d -- `%s'"),
4664 idx + 1, str);
4665 else
4666 as_bad (_("invalid number of registers in the list; "
4667 "%d registers are expected at operand %d -- `%s'"),
4668 detail->data[0], idx + 1, str);
4669 break;
4670
4671 case AARCH64_OPDE_UNALIGNED:
4672 as_bad (_("immediate value must be a multiple of "
4673 "%d at operand %d -- `%s'"),
4674 detail->data[0], idx + 1, str);
4675 break;
4676
4677 default:
4678 gas_assert (0);
4679 break;
4680 }
4681 }
4682
4683 /* Process and output the error message about the operand mismatching.
4684
4685 When this function is called, the operand error information had
4686 been collected for an assembly line and there will be multiple
4687 errors in the case of mulitple instruction templates; output the
4688 error message that most closely describes the problem. */
4689
4690 static void
4691 output_operand_error_report (char *str)
4692 {
4693 int largest_error_pos;
4694 const char *msg = NULL;
4695 enum aarch64_operand_error_kind kind;
4696 operand_error_record *curr;
4697 operand_error_record *head = operand_error_report.head;
4698 operand_error_record *record = NULL;
4699
4700 /* No error to report. */
4701 if (head == NULL)
4702 return;
4703
4704 gas_assert (head != NULL && operand_error_report.tail != NULL);
4705
4706 /* Only one error. */
4707 if (head == operand_error_report.tail)
4708 {
4709 DEBUG_TRACE ("single opcode entry with error kind: %s",
4710 operand_mismatch_kind_names[head->detail.kind]);
4711 output_operand_error_record (head, str);
4712 return;
4713 }
4714
4715 /* Find the error kind of the highest severity. */
4716 DEBUG_TRACE ("multiple opcode entres with error kind");
4717 kind = AARCH64_OPDE_NIL;
4718 for (curr = head; curr != NULL; curr = curr->next)
4719 {
4720 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4721 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4722 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4723 kind = curr->detail.kind;
4724 }
4725 gas_assert (kind != AARCH64_OPDE_NIL);
4726
4727 /* Pick up one of errors of KIND to report. */
4728 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4729 for (curr = head; curr != NULL; curr = curr->next)
4730 {
4731 if (curr->detail.kind != kind)
4732 continue;
4733 /* If there are multiple errors, pick up the one with the highest
4734 mismatching operand index. In the case of multiple errors with
4735 the equally highest operand index, pick up the first one or the
4736 first one with non-NULL error message. */
4737 if (curr->detail.index > largest_error_pos
4738 || (curr->detail.index == largest_error_pos && msg == NULL
4739 && curr->detail.error != NULL))
4740 {
4741 largest_error_pos = curr->detail.index;
4742 record = curr;
4743 msg = record->detail.error;
4744 }
4745 }
4746
4747 gas_assert (largest_error_pos != -2 && record != NULL);
4748 DEBUG_TRACE ("Pick up error kind %s to report",
4749 operand_mismatch_kind_names[record->detail.kind]);
4750
4751 /* Output. */
4752 output_operand_error_record (record, str);
4753 }
4754 \f
4755 /* Write an AARCH64 instruction to buf - always little-endian. */
4756 static void
4757 put_aarch64_insn (char *buf, uint32_t insn)
4758 {
4759 unsigned char *where = (unsigned char *) buf;
4760 where[0] = insn;
4761 where[1] = insn >> 8;
4762 where[2] = insn >> 16;
4763 where[3] = insn >> 24;
4764 }
4765
4766 static uint32_t
4767 get_aarch64_insn (char *buf)
4768 {
4769 unsigned char *where = (unsigned char *) buf;
4770 uint32_t result;
4771 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4772 return result;
4773 }
4774
4775 static void
4776 output_inst (struct aarch64_inst *new_inst)
4777 {
4778 char *to = NULL;
4779
4780 to = frag_more (INSN_SIZE);
4781
4782 frag_now->tc_frag_data.recorded = 1;
4783
4784 put_aarch64_insn (to, inst.base.value);
4785
4786 if (inst.reloc.type != BFD_RELOC_UNUSED)
4787 {
4788 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4789 INSN_SIZE, &inst.reloc.exp,
4790 inst.reloc.pc_rel,
4791 inst.reloc.type);
4792 DEBUG_TRACE ("Prepared relocation fix up");
4793 /* Don't check the addend value against the instruction size,
4794 that's the job of our code in md_apply_fix(). */
4795 fixp->fx_no_overflow = 1;
4796 if (new_inst != NULL)
4797 fixp->tc_fix_data.inst = new_inst;
4798 if (aarch64_gas_internal_fixup_p ())
4799 {
4800 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4801 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4802 fixp->fx_addnumber = inst.reloc.flags;
4803 }
4804 }
4805
4806 dwarf2_emit_insn (INSN_SIZE);
4807 }
4808
4809 /* Link together opcodes of the same name. */
4810
4811 struct templates
4812 {
4813 aarch64_opcode *opcode;
4814 struct templates *next;
4815 };
4816
4817 typedef struct templates templates;
4818
4819 static templates *
4820 lookup_mnemonic (const char *start, int len)
4821 {
4822 templates *templ = NULL;
4823
4824 templ = hash_find_n (aarch64_ops_hsh, start, len);
4825 return templ;
4826 }
4827
4828 /* Subroutine of md_assemble, responsible for looking up the primary
4829 opcode from the mnemonic the user wrote. STR points to the
4830 beginning of the mnemonic. */
4831
4832 static templates *
4833 opcode_lookup (char **str)
4834 {
4835 char *end, *base, *dot;
4836 const aarch64_cond *cond;
4837 char condname[16];
4838 int len;
4839
4840 /* Scan up to the end of the mnemonic, which must end in white space,
4841 '.', or end of string. */
4842 dot = 0;
4843 for (base = end = *str; is_part_of_name(*end); end++)
4844 if (*end == '.' && !dot)
4845 dot = end;
4846
4847 if (end == base || dot == base)
4848 return 0;
4849
4850 inst.cond = COND_ALWAYS;
4851
4852 /* Handle a possible condition. */
4853 if (dot)
4854 {
4855 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
4856 if (cond)
4857 {
4858 inst.cond = cond->value;
4859 *str = end;
4860 }
4861 else
4862 {
4863 *str = dot;
4864 return 0;
4865 }
4866 len = dot - base;
4867 }
4868 else
4869 {
4870 *str = end;
4871 len = end - base;
4872 }
4873
4874 if (inst.cond == COND_ALWAYS)
4875 {
4876 /* Look for unaffixed mnemonic. */
4877 return lookup_mnemonic (base, len);
4878 }
4879 else if (len <= 13)
4880 {
4881 /* append ".c" to mnemonic if conditional */
4882 memcpy (condname, base, len);
4883 memcpy (condname + len, ".c", 2);
4884 base = condname;
4885 len += 2;
4886 return lookup_mnemonic (base, len);
4887 }
4888
4889 return NULL;
4890 }
4891
4892 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4893 to a corresponding operand qualifier. */
4894
4895 static inline aarch64_opnd_qualifier_t
4896 vectype_to_qualifier (const struct vector_type_el *vectype)
4897 {
4898 /* Element size in bytes indexed by vector_el_type. */
4899 const unsigned char ele_size[5]
4900 = {1, 2, 4, 8, 16};
4901 const unsigned int ele_base [5] =
4902 {
4903 AARCH64_OPND_QLF_V_8B,
4904 AARCH64_OPND_QLF_V_2H,
4905 AARCH64_OPND_QLF_V_2S,
4906 AARCH64_OPND_QLF_V_1D,
4907 AARCH64_OPND_QLF_V_1Q
4908 };
4909
4910 if (!vectype->defined || vectype->type == NT_invtype)
4911 goto vectype_conversion_fail;
4912
4913 if (vectype->type == NT_zero)
4914 return AARCH64_OPND_QLF_P_Z;
4915 if (vectype->type == NT_merge)
4916 return AARCH64_OPND_QLF_P_M;
4917
4918 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4919
4920 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
4921 /* Vector element register. */
4922 return AARCH64_OPND_QLF_S_B + vectype->type;
4923 else
4924 {
4925 /* Vector register. */
4926 int reg_size = ele_size[vectype->type] * vectype->width;
4927 unsigned offset;
4928 unsigned shift;
4929 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4930 goto vectype_conversion_fail;
4931
4932 /* The conversion is by calculating the offset from the base operand
4933 qualifier for the vector type. The operand qualifiers are regular
4934 enough that the offset can established by shifting the vector width by
4935 a vector-type dependent amount. */
4936 shift = 0;
4937 if (vectype->type == NT_b)
4938 shift = 4;
4939 else if (vectype->type == NT_h || vectype->type == NT_s)
4940 shift = 2;
4941 else if (vectype->type >= NT_d)
4942 shift = 1;
4943 else
4944 gas_assert (0);
4945
4946 offset = ele_base [vectype->type] + (vectype->width >> shift);
4947 gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4948 && offset <= AARCH64_OPND_QLF_V_1Q);
4949 return offset;
4950 }
4951
4952 vectype_conversion_fail:
4953 first_error (_("bad vector arrangement type"));
4954 return AARCH64_OPND_QLF_NIL;
4955 }
4956
4957 /* Process an optional operand that is found omitted from the assembly line.
4958 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4959 instruction's opcode entry while IDX is the index of this omitted operand.
4960 */
4961
4962 static void
4963 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4964 int idx, aarch64_opnd_info *operand)
4965 {
4966 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4967 gas_assert (optional_operand_p (opcode, idx));
4968 gas_assert (!operand->present);
4969
4970 switch (type)
4971 {
4972 case AARCH64_OPND_Rd:
4973 case AARCH64_OPND_Rn:
4974 case AARCH64_OPND_Rm:
4975 case AARCH64_OPND_Rt:
4976 case AARCH64_OPND_Rt2:
4977 case AARCH64_OPND_Rs:
4978 case AARCH64_OPND_Ra:
4979 case AARCH64_OPND_Rt_SYS:
4980 case AARCH64_OPND_Rd_SP:
4981 case AARCH64_OPND_Rn_SP:
4982 case AARCH64_OPND_Rm_SP:
4983 case AARCH64_OPND_Fd:
4984 case AARCH64_OPND_Fn:
4985 case AARCH64_OPND_Fm:
4986 case AARCH64_OPND_Fa:
4987 case AARCH64_OPND_Ft:
4988 case AARCH64_OPND_Ft2:
4989 case AARCH64_OPND_Sd:
4990 case AARCH64_OPND_Sn:
4991 case AARCH64_OPND_Sm:
4992 case AARCH64_OPND_Vd:
4993 case AARCH64_OPND_Vn:
4994 case AARCH64_OPND_Vm:
4995 case AARCH64_OPND_VdD1:
4996 case AARCH64_OPND_VnD1:
4997 operand->reg.regno = default_value;
4998 break;
4999
5000 case AARCH64_OPND_Ed:
5001 case AARCH64_OPND_En:
5002 case AARCH64_OPND_Em:
5003 operand->reglane.regno = default_value;
5004 break;
5005
5006 case AARCH64_OPND_IDX:
5007 case AARCH64_OPND_BIT_NUM:
5008 case AARCH64_OPND_IMMR:
5009 case AARCH64_OPND_IMMS:
5010 case AARCH64_OPND_SHLL_IMM:
5011 case AARCH64_OPND_IMM_VLSL:
5012 case AARCH64_OPND_IMM_VLSR:
5013 case AARCH64_OPND_CCMP_IMM:
5014 case AARCH64_OPND_FBITS:
5015 case AARCH64_OPND_UIMM4:
5016 case AARCH64_OPND_UIMM3_OP1:
5017 case AARCH64_OPND_UIMM3_OP2:
5018 case AARCH64_OPND_IMM:
5019 case AARCH64_OPND_WIDTH:
5020 case AARCH64_OPND_UIMM7:
5021 case AARCH64_OPND_NZCV:
5022 case AARCH64_OPND_SVE_PATTERN:
5023 case AARCH64_OPND_SVE_PRFOP:
5024 operand->imm.value = default_value;
5025 break;
5026
5027 case AARCH64_OPND_SVE_PATTERN_SCALED:
5028 operand->imm.value = default_value;
5029 operand->shifter.kind = AARCH64_MOD_MUL;
5030 operand->shifter.amount = 1;
5031 break;
5032
5033 case AARCH64_OPND_EXCEPTION:
5034 inst.reloc.type = BFD_RELOC_UNUSED;
5035 break;
5036
5037 case AARCH64_OPND_BARRIER_ISB:
5038 operand->barrier = aarch64_barrier_options + default_value;
5039
5040 default:
5041 break;
5042 }
5043 }
5044
5045 /* Process the relocation type for move wide instructions.
5046 Return TRUE on success; otherwise return FALSE. */
5047
5048 static bfd_boolean
5049 process_movw_reloc_info (void)
5050 {
5051 int is32;
5052 unsigned shift;
5053
5054 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5055
5056 if (inst.base.opcode->op == OP_MOVK)
5057 switch (inst.reloc.type)
5058 {
5059 case BFD_RELOC_AARCH64_MOVW_G0_S:
5060 case BFD_RELOC_AARCH64_MOVW_G1_S:
5061 case BFD_RELOC_AARCH64_MOVW_G2_S:
5062 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5063 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5064 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5065 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5066 set_syntax_error
5067 (_("the specified relocation type is not allowed for MOVK"));
5068 return FALSE;
5069 default:
5070 break;
5071 }
5072
5073 switch (inst.reloc.type)
5074 {
5075 case BFD_RELOC_AARCH64_MOVW_G0:
5076 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5077 case BFD_RELOC_AARCH64_MOVW_G0_S:
5078 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5079 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5080 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5081 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5082 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5083 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5084 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5085 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5086 shift = 0;
5087 break;
5088 case BFD_RELOC_AARCH64_MOVW_G1:
5089 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5090 case BFD_RELOC_AARCH64_MOVW_G1_S:
5091 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5092 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5093 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5094 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5095 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5096 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5097 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5098 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5099 shift = 16;
5100 break;
5101 case BFD_RELOC_AARCH64_MOVW_G2:
5102 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5103 case BFD_RELOC_AARCH64_MOVW_G2_S:
5104 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5105 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5106 if (is32)
5107 {
5108 set_fatal_syntax_error
5109 (_("the specified relocation type is not allowed for 32-bit "
5110 "register"));
5111 return FALSE;
5112 }
5113 shift = 32;
5114 break;
5115 case BFD_RELOC_AARCH64_MOVW_G3:
5116 if (is32)
5117 {
5118 set_fatal_syntax_error
5119 (_("the specified relocation type is not allowed for 32-bit "
5120 "register"));
5121 return FALSE;
5122 }
5123 shift = 48;
5124 break;
5125 default:
5126 /* More cases should be added when more MOVW-related relocation types
5127 are supported in GAS. */
5128 gas_assert (aarch64_gas_internal_fixup_p ());
5129 /* The shift amount should have already been set by the parser. */
5130 return TRUE;
5131 }
5132 inst.base.operands[1].shifter.amount = shift;
5133 return TRUE;
5134 }
5135
5136 /* A primitive log caculator. */
5137
5138 static inline unsigned int
5139 get_logsz (unsigned int size)
5140 {
5141 const unsigned char ls[16] =
5142 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5143 if (size > 16)
5144 {
5145 gas_assert (0);
5146 return -1;
5147 }
5148 gas_assert (ls[size - 1] != (unsigned char)-1);
5149 return ls[size - 1];
5150 }
5151
5152 /* Determine and return the real reloc type code for an instruction
5153 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5154
5155 static inline bfd_reloc_code_real_type
5156 ldst_lo12_determine_real_reloc_type (void)
5157 {
5158 unsigned logsz;
5159 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5160 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5161
5162 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
5163 {
5164 BFD_RELOC_AARCH64_LDST8_LO12,
5165 BFD_RELOC_AARCH64_LDST16_LO12,
5166 BFD_RELOC_AARCH64_LDST32_LO12,
5167 BFD_RELOC_AARCH64_LDST64_LO12,
5168 BFD_RELOC_AARCH64_LDST128_LO12
5169 },
5170 {
5171 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5172 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5173 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5174 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5175 BFD_RELOC_AARCH64_NONE
5176 },
5177 {
5178 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5179 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5180 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5181 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5182 BFD_RELOC_AARCH64_NONE
5183 }
5184 };
5185
5186 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5187 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5188 || (inst.reloc.type
5189 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
5190 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5191
5192 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5193 opd1_qlf =
5194 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5195 1, opd0_qlf, 0);
5196 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5197
5198 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5199 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5200 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5201 gas_assert (logsz <= 3);
5202 else
5203 gas_assert (logsz <= 4);
5204
5205 /* In reloc.c, these pseudo relocation types should be defined in similar
5206 order as above reloc_ldst_lo12 array. Because the array index calcuation
5207 below relies on this. */
5208 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5209 }
5210
5211 /* Check whether a register list REGINFO is valid. The registers must be
5212 numbered in increasing order (modulo 32), in increments of one or two.
5213
5214 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5215 increments of two.
5216
5217 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5218
5219 static bfd_boolean
5220 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5221 {
5222 uint32_t i, nb_regs, prev_regno, incr;
5223
5224 nb_regs = 1 + (reginfo & 0x3);
5225 reginfo >>= 2;
5226 prev_regno = reginfo & 0x1f;
5227 incr = accept_alternate ? 2 : 1;
5228
5229 for (i = 1; i < nb_regs; ++i)
5230 {
5231 uint32_t curr_regno;
5232 reginfo >>= 5;
5233 curr_regno = reginfo & 0x1f;
5234 if (curr_regno != ((prev_regno + incr) & 0x1f))
5235 return FALSE;
5236 prev_regno = curr_regno;
5237 }
5238
5239 return TRUE;
5240 }
5241
5242 /* Generic instruction operand parser. This does no encoding and no
5243 semantic validation; it merely squirrels values away in the inst
5244 structure. Returns TRUE or FALSE depending on whether the
5245 specified grammar matched. */
5246
5247 static bfd_boolean
5248 parse_operands (char *str, const aarch64_opcode *opcode)
5249 {
5250 int i;
5251 char *backtrack_pos = 0;
5252 const enum aarch64_opnd *operands = opcode->operands;
5253 aarch64_reg_type imm_reg_type;
5254
5255 clear_error ();
5256 skip_whitespace (str);
5257
5258 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5259 imm_reg_type = REG_TYPE_R_Z_BHSDQ_VZP;
5260 else
5261 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5262
5263 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5264 {
5265 int64_t val;
5266 const reg_entry *reg;
5267 int comma_skipped_p = 0;
5268 aarch64_reg_type rtype;
5269 struct vector_type_el vectype;
5270 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5271 aarch64_opnd_info *info = &inst.base.operands[i];
5272 aarch64_reg_type reg_type;
5273
5274 DEBUG_TRACE ("parse operand %d", i);
5275
5276 /* Assign the operand code. */
5277 info->type = operands[i];
5278
5279 if (optional_operand_p (opcode, i))
5280 {
5281 /* Remember where we are in case we need to backtrack. */
5282 gas_assert (!backtrack_pos);
5283 backtrack_pos = str;
5284 }
5285
5286 /* Expect comma between operands; the backtrack mechanizm will take
5287 care of cases of omitted optional operand. */
5288 if (i > 0 && ! skip_past_char (&str, ','))
5289 {
5290 set_syntax_error (_("comma expected between operands"));
5291 goto failure;
5292 }
5293 else
5294 comma_skipped_p = 1;
5295
5296 switch (operands[i])
5297 {
5298 case AARCH64_OPND_Rd:
5299 case AARCH64_OPND_Rn:
5300 case AARCH64_OPND_Rm:
5301 case AARCH64_OPND_Rt:
5302 case AARCH64_OPND_Rt2:
5303 case AARCH64_OPND_Rs:
5304 case AARCH64_OPND_Ra:
5305 case AARCH64_OPND_Rt_SYS:
5306 case AARCH64_OPND_PAIRREG:
5307 case AARCH64_OPND_SVE_Rm:
5308 po_int_reg_or_fail (REG_TYPE_R_Z);
5309 break;
5310
5311 case AARCH64_OPND_Rd_SP:
5312 case AARCH64_OPND_Rn_SP:
5313 case AARCH64_OPND_SVE_Rn_SP:
5314 case AARCH64_OPND_Rm_SP:
5315 po_int_reg_or_fail (REG_TYPE_R_SP);
5316 break;
5317
5318 case AARCH64_OPND_Rm_EXT:
5319 case AARCH64_OPND_Rm_SFT:
5320 po_misc_or_fail (parse_shifter_operand
5321 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5322 ? SHIFTED_ARITH_IMM
5323 : SHIFTED_LOGIC_IMM)));
5324 if (!info->shifter.operator_present)
5325 {
5326 /* Default to LSL if not present. Libopcodes prefers shifter
5327 kind to be explicit. */
5328 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5329 info->shifter.kind = AARCH64_MOD_LSL;
5330 /* For Rm_EXT, libopcodes will carry out further check on whether
5331 or not stack pointer is used in the instruction (Recall that
5332 "the extend operator is not optional unless at least one of
5333 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5334 }
5335 break;
5336
5337 case AARCH64_OPND_Fd:
5338 case AARCH64_OPND_Fn:
5339 case AARCH64_OPND_Fm:
5340 case AARCH64_OPND_Fa:
5341 case AARCH64_OPND_Ft:
5342 case AARCH64_OPND_Ft2:
5343 case AARCH64_OPND_Sd:
5344 case AARCH64_OPND_Sn:
5345 case AARCH64_OPND_Sm:
5346 case AARCH64_OPND_SVE_VZn:
5347 case AARCH64_OPND_SVE_Vd:
5348 case AARCH64_OPND_SVE_Vm:
5349 case AARCH64_OPND_SVE_Vn:
5350 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5351 if (val == PARSE_FAIL)
5352 {
5353 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5354 goto failure;
5355 }
5356 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5357
5358 info->reg.regno = val;
5359 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5360 break;
5361
5362 case AARCH64_OPND_SVE_Pd:
5363 case AARCH64_OPND_SVE_Pg3:
5364 case AARCH64_OPND_SVE_Pg4_5:
5365 case AARCH64_OPND_SVE_Pg4_10:
5366 case AARCH64_OPND_SVE_Pg4_16:
5367 case AARCH64_OPND_SVE_Pm:
5368 case AARCH64_OPND_SVE_Pn:
5369 case AARCH64_OPND_SVE_Pt:
5370 reg_type = REG_TYPE_PN;
5371 goto vector_reg;
5372
5373 case AARCH64_OPND_SVE_Za_5:
5374 case AARCH64_OPND_SVE_Za_16:
5375 case AARCH64_OPND_SVE_Zd:
5376 case AARCH64_OPND_SVE_Zm_5:
5377 case AARCH64_OPND_SVE_Zm_16:
5378 case AARCH64_OPND_SVE_Zn:
5379 case AARCH64_OPND_SVE_Zt:
5380 reg_type = REG_TYPE_ZN;
5381 goto vector_reg;
5382
5383 case AARCH64_OPND_Vd:
5384 case AARCH64_OPND_Vn:
5385 case AARCH64_OPND_Vm:
5386 reg_type = REG_TYPE_VN;
5387 vector_reg:
5388 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5389 if (val == PARSE_FAIL)
5390 {
5391 first_error (_(get_reg_expected_msg (reg_type)));
5392 goto failure;
5393 }
5394 if (vectype.defined & NTA_HASINDEX)
5395 goto failure;
5396
5397 info->reg.regno = val;
5398 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5399 && vectype.type == NT_invtype)
5400 /* Unqualified Pn and Zn registers are allowed in certain
5401 contexts. Rely on F_STRICT qualifier checking to catch
5402 invalid uses. */
5403 info->qualifier = AARCH64_OPND_QLF_NIL;
5404 else
5405 {
5406 info->qualifier = vectype_to_qualifier (&vectype);
5407 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5408 goto failure;
5409 }
5410 break;
5411
5412 case AARCH64_OPND_VdD1:
5413 case AARCH64_OPND_VnD1:
5414 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5415 if (val == PARSE_FAIL)
5416 {
5417 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5418 goto failure;
5419 }
5420 if (vectype.type != NT_d || vectype.index != 1)
5421 {
5422 set_fatal_syntax_error
5423 (_("the top half of a 128-bit FP/SIMD register is expected"));
5424 goto failure;
5425 }
5426 info->reg.regno = val;
5427 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5428 here; it is correct for the purpose of encoding/decoding since
5429 only the register number is explicitly encoded in the related
5430 instructions, although this appears a bit hacky. */
5431 info->qualifier = AARCH64_OPND_QLF_S_D;
5432 break;
5433
5434 case AARCH64_OPND_SVE_Zn_INDEX:
5435 reg_type = REG_TYPE_ZN;
5436 goto vector_reg_index;
5437
5438 case AARCH64_OPND_Ed:
5439 case AARCH64_OPND_En:
5440 case AARCH64_OPND_Em:
5441 reg_type = REG_TYPE_VN;
5442 vector_reg_index:
5443 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5444 if (val == PARSE_FAIL)
5445 {
5446 first_error (_(get_reg_expected_msg (reg_type)));
5447 goto failure;
5448 }
5449 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5450 goto failure;
5451
5452 info->reglane.regno = val;
5453 info->reglane.index = vectype.index;
5454 info->qualifier = vectype_to_qualifier (&vectype);
5455 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5456 goto failure;
5457 break;
5458
5459 case AARCH64_OPND_SVE_ZnxN:
5460 case AARCH64_OPND_SVE_ZtxN:
5461 reg_type = REG_TYPE_ZN;
5462 goto vector_reg_list;
5463
5464 case AARCH64_OPND_LVn:
5465 case AARCH64_OPND_LVt:
5466 case AARCH64_OPND_LVt_AL:
5467 case AARCH64_OPND_LEt:
5468 reg_type = REG_TYPE_VN;
5469 vector_reg_list:
5470 if (reg_type == REG_TYPE_ZN
5471 && get_opcode_dependent_value (opcode) == 1
5472 && *str != '{')
5473 {
5474 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5475 if (val == PARSE_FAIL)
5476 {
5477 first_error (_(get_reg_expected_msg (reg_type)));
5478 goto failure;
5479 }
5480 info->reglist.first_regno = val;
5481 info->reglist.num_regs = 1;
5482 }
5483 else
5484 {
5485 val = parse_vector_reg_list (&str, reg_type, &vectype);
5486 if (val == PARSE_FAIL)
5487 goto failure;
5488 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5489 {
5490 set_fatal_syntax_error (_("invalid register list"));
5491 goto failure;
5492 }
5493 info->reglist.first_regno = (val >> 2) & 0x1f;
5494 info->reglist.num_regs = (val & 0x3) + 1;
5495 }
5496 if (operands[i] == AARCH64_OPND_LEt)
5497 {
5498 if (!(vectype.defined & NTA_HASINDEX))
5499 goto failure;
5500 info->reglist.has_index = 1;
5501 info->reglist.index = vectype.index;
5502 }
5503 else
5504 {
5505 if (vectype.defined & NTA_HASINDEX)
5506 goto failure;
5507 if (!(vectype.defined & NTA_HASTYPE))
5508 {
5509 if (reg_type == REG_TYPE_ZN)
5510 set_fatal_syntax_error (_("missing type suffix"));
5511 goto failure;
5512 }
5513 }
5514 info->qualifier = vectype_to_qualifier (&vectype);
5515 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5516 goto failure;
5517 break;
5518
5519 case AARCH64_OPND_CRn:
5520 case AARCH64_OPND_CRm:
5521 {
5522 char prefix = *(str++);
5523 if (prefix != 'c' && prefix != 'C')
5524 goto failure;
5525
5526 po_imm_nc_or_fail ();
5527 if (val > 15)
5528 {
5529 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5530 goto failure;
5531 }
5532 info->qualifier = AARCH64_OPND_QLF_CR;
5533 info->imm.value = val;
5534 break;
5535 }
5536
5537 case AARCH64_OPND_SHLL_IMM:
5538 case AARCH64_OPND_IMM_VLSR:
5539 po_imm_or_fail (1, 64);
5540 info->imm.value = val;
5541 break;
5542
5543 case AARCH64_OPND_CCMP_IMM:
5544 case AARCH64_OPND_SIMM5:
5545 case AARCH64_OPND_FBITS:
5546 case AARCH64_OPND_UIMM4:
5547 case AARCH64_OPND_UIMM3_OP1:
5548 case AARCH64_OPND_UIMM3_OP2:
5549 case AARCH64_OPND_IMM_VLSL:
5550 case AARCH64_OPND_IMM:
5551 case AARCH64_OPND_WIDTH:
5552 case AARCH64_OPND_SVE_INV_LIMM:
5553 case AARCH64_OPND_SVE_LIMM:
5554 case AARCH64_OPND_SVE_LIMM_MOV:
5555 case AARCH64_OPND_SVE_SHLIMM_PRED:
5556 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5557 case AARCH64_OPND_SVE_SHRIMM_PRED:
5558 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5559 case AARCH64_OPND_SVE_SIMM5:
5560 case AARCH64_OPND_SVE_SIMM5B:
5561 case AARCH64_OPND_SVE_SIMM6:
5562 case AARCH64_OPND_SVE_SIMM8:
5563 case AARCH64_OPND_SVE_UIMM3:
5564 case AARCH64_OPND_SVE_UIMM7:
5565 case AARCH64_OPND_SVE_UIMM8:
5566 case AARCH64_OPND_SVE_UIMM8_53:
5567 case AARCH64_OPND_IMM_ROT1:
5568 case AARCH64_OPND_IMM_ROT2:
5569 case AARCH64_OPND_IMM_ROT3:
5570 po_imm_nc_or_fail ();
5571 info->imm.value = val;
5572 break;
5573
5574 case AARCH64_OPND_SVE_AIMM:
5575 case AARCH64_OPND_SVE_ASIMM:
5576 po_imm_nc_or_fail ();
5577 info->imm.value = val;
5578 skip_whitespace (str);
5579 if (skip_past_comma (&str))
5580 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5581 else
5582 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5583 break;
5584
5585 case AARCH64_OPND_SVE_PATTERN:
5586 po_enum_or_fail (aarch64_sve_pattern_array);
5587 info->imm.value = val;
5588 break;
5589
5590 case AARCH64_OPND_SVE_PATTERN_SCALED:
5591 po_enum_or_fail (aarch64_sve_pattern_array);
5592 info->imm.value = val;
5593 if (skip_past_comma (&str)
5594 && !parse_shift (&str, info, SHIFTED_MUL))
5595 goto failure;
5596 if (!info->shifter.operator_present)
5597 {
5598 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5599 info->shifter.kind = AARCH64_MOD_MUL;
5600 info->shifter.amount = 1;
5601 }
5602 break;
5603
5604 case AARCH64_OPND_SVE_PRFOP:
5605 po_enum_or_fail (aarch64_sve_prfop_array);
5606 info->imm.value = val;
5607 break;
5608
5609 case AARCH64_OPND_UIMM7:
5610 po_imm_or_fail (0, 127);
5611 info->imm.value = val;
5612 break;
5613
5614 case AARCH64_OPND_IDX:
5615 case AARCH64_OPND_BIT_NUM:
5616 case AARCH64_OPND_IMMR:
5617 case AARCH64_OPND_IMMS:
5618 po_imm_or_fail (0, 63);
5619 info->imm.value = val;
5620 break;
5621
5622 case AARCH64_OPND_IMM0:
5623 po_imm_nc_or_fail ();
5624 if (val != 0)
5625 {
5626 set_fatal_syntax_error (_("immediate zero expected"));
5627 goto failure;
5628 }
5629 info->imm.value = 0;
5630 break;
5631
5632 case AARCH64_OPND_FPIMM0:
5633 {
5634 int qfloat;
5635 bfd_boolean res1 = FALSE, res2 = FALSE;
5636 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5637 it is probably not worth the effort to support it. */
5638 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5639 imm_reg_type))
5640 && (error_p ()
5641 || !(res2 = parse_constant_immediate (&str, &val,
5642 imm_reg_type))))
5643 goto failure;
5644 if ((res1 && qfloat == 0) || (res2 && val == 0))
5645 {
5646 info->imm.value = 0;
5647 info->imm.is_fp = 1;
5648 break;
5649 }
5650 set_fatal_syntax_error (_("immediate zero expected"));
5651 goto failure;
5652 }
5653
5654 case AARCH64_OPND_IMM_MOV:
5655 {
5656 char *saved = str;
5657 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5658 reg_name_p (str, REG_TYPE_VN))
5659 goto failure;
5660 str = saved;
5661 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5662 GE_OPT_PREFIX, 1));
5663 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5664 later. fix_mov_imm_insn will try to determine a machine
5665 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5666 message if the immediate cannot be moved by a single
5667 instruction. */
5668 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5669 inst.base.operands[i].skip = 1;
5670 }
5671 break;
5672
5673 case AARCH64_OPND_SIMD_IMM:
5674 case AARCH64_OPND_SIMD_IMM_SFT:
5675 if (! parse_big_immediate (&str, &val, imm_reg_type))
5676 goto failure;
5677 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5678 /* addr_off_p */ 0,
5679 /* need_libopcodes_p */ 1,
5680 /* skip_p */ 1);
5681 /* Parse shift.
5682 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5683 shift, we don't check it here; we leave the checking to
5684 the libopcodes (operand_general_constraint_met_p). By
5685 doing this, we achieve better diagnostics. */
5686 if (skip_past_comma (&str)
5687 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5688 goto failure;
5689 if (!info->shifter.operator_present
5690 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5691 {
5692 /* Default to LSL if not present. Libopcodes prefers shifter
5693 kind to be explicit. */
5694 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5695 info->shifter.kind = AARCH64_MOD_LSL;
5696 }
5697 break;
5698
5699 case AARCH64_OPND_FPIMM:
5700 case AARCH64_OPND_SIMD_FPIMM:
5701 case AARCH64_OPND_SVE_FPIMM8:
5702 {
5703 int qfloat;
5704 bfd_boolean dp_p;
5705
5706 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5707 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5708 || !aarch64_imm_float_p (qfloat))
5709 {
5710 if (!error_p ())
5711 set_fatal_syntax_error (_("invalid floating-point"
5712 " constant"));
5713 goto failure;
5714 }
5715 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5716 inst.base.operands[i].imm.is_fp = 1;
5717 }
5718 break;
5719
5720 case AARCH64_OPND_SVE_I1_HALF_ONE:
5721 case AARCH64_OPND_SVE_I1_HALF_TWO:
5722 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5723 {
5724 int qfloat;
5725 bfd_boolean dp_p;
5726
5727 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5728 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5729 {
5730 if (!error_p ())
5731 set_fatal_syntax_error (_("invalid floating-point"
5732 " constant"));
5733 goto failure;
5734 }
5735 inst.base.operands[i].imm.value = qfloat;
5736 inst.base.operands[i].imm.is_fp = 1;
5737 }
5738 break;
5739
5740 case AARCH64_OPND_LIMM:
5741 po_misc_or_fail (parse_shifter_operand (&str, info,
5742 SHIFTED_LOGIC_IMM));
5743 if (info->shifter.operator_present)
5744 {
5745 set_fatal_syntax_error
5746 (_("shift not allowed for bitmask immediate"));
5747 goto failure;
5748 }
5749 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5750 /* addr_off_p */ 0,
5751 /* need_libopcodes_p */ 1,
5752 /* skip_p */ 1);
5753 break;
5754
5755 case AARCH64_OPND_AIMM:
5756 if (opcode->op == OP_ADD)
5757 /* ADD may have relocation types. */
5758 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5759 SHIFTED_ARITH_IMM));
5760 else
5761 po_misc_or_fail (parse_shifter_operand (&str, info,
5762 SHIFTED_ARITH_IMM));
5763 switch (inst.reloc.type)
5764 {
5765 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5766 info->shifter.amount = 12;
5767 break;
5768 case BFD_RELOC_UNUSED:
5769 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5770 if (info->shifter.kind != AARCH64_MOD_NONE)
5771 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5772 inst.reloc.pc_rel = 0;
5773 break;
5774 default:
5775 break;
5776 }
5777 info->imm.value = 0;
5778 if (!info->shifter.operator_present)
5779 {
5780 /* Default to LSL if not present. Libopcodes prefers shifter
5781 kind to be explicit. */
5782 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5783 info->shifter.kind = AARCH64_MOD_LSL;
5784 }
5785 break;
5786
5787 case AARCH64_OPND_HALF:
5788 {
5789 /* #<imm16> or relocation. */
5790 int internal_fixup_p;
5791 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5792 if (internal_fixup_p)
5793 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5794 skip_whitespace (str);
5795 if (skip_past_comma (&str))
5796 {
5797 /* {, LSL #<shift>} */
5798 if (! aarch64_gas_internal_fixup_p ())
5799 {
5800 set_fatal_syntax_error (_("can't mix relocation modifier "
5801 "with explicit shift"));
5802 goto failure;
5803 }
5804 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5805 }
5806 else
5807 inst.base.operands[i].shifter.amount = 0;
5808 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5809 inst.base.operands[i].imm.value = 0;
5810 if (! process_movw_reloc_info ())
5811 goto failure;
5812 }
5813 break;
5814
5815 case AARCH64_OPND_EXCEPTION:
5816 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5817 imm_reg_type));
5818 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5819 /* addr_off_p */ 0,
5820 /* need_libopcodes_p */ 0,
5821 /* skip_p */ 1);
5822 break;
5823
5824 case AARCH64_OPND_NZCV:
5825 {
5826 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5827 if (nzcv != NULL)
5828 {
5829 str += 4;
5830 info->imm.value = nzcv->value;
5831 break;
5832 }
5833 po_imm_or_fail (0, 15);
5834 info->imm.value = val;
5835 }
5836 break;
5837
5838 case AARCH64_OPND_COND:
5839 case AARCH64_OPND_COND1:
5840 {
5841 char *start = str;
5842 do
5843 str++;
5844 while (ISALPHA (*str));
5845 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
5846 if (info->cond == NULL)
5847 {
5848 set_syntax_error (_("invalid condition"));
5849 goto failure;
5850 }
5851 else if (operands[i] == AARCH64_OPND_COND1
5852 && (info->cond->value & 0xe) == 0xe)
5853 {
5854 /* Do not allow AL or NV. */
5855 set_default_error ();
5856 goto failure;
5857 }
5858 }
5859 break;
5860
5861 case AARCH64_OPND_ADDR_ADRP:
5862 po_misc_or_fail (parse_adrp (&str));
5863 /* Clear the value as operand needs to be relocated. */
5864 info->imm.value = 0;
5865 break;
5866
5867 case AARCH64_OPND_ADDR_PCREL14:
5868 case AARCH64_OPND_ADDR_PCREL19:
5869 case AARCH64_OPND_ADDR_PCREL21:
5870 case AARCH64_OPND_ADDR_PCREL26:
5871 po_misc_or_fail (parse_address (&str, info));
5872 if (!info->addr.pcrel)
5873 {
5874 set_syntax_error (_("invalid pc-relative address"));
5875 goto failure;
5876 }
5877 if (inst.gen_lit_pool
5878 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5879 {
5880 /* Only permit "=value" in the literal load instructions.
5881 The literal will be generated by programmer_friendly_fixup. */
5882 set_syntax_error (_("invalid use of \"=immediate\""));
5883 goto failure;
5884 }
5885 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5886 {
5887 set_syntax_error (_("unrecognized relocation suffix"));
5888 goto failure;
5889 }
5890 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5891 {
5892 info->imm.value = inst.reloc.exp.X_add_number;
5893 inst.reloc.type = BFD_RELOC_UNUSED;
5894 }
5895 else
5896 {
5897 info->imm.value = 0;
5898 if (inst.reloc.type == BFD_RELOC_UNUSED)
5899 switch (opcode->iclass)
5900 {
5901 case compbranch:
5902 case condbranch:
5903 /* e.g. CBZ or B.COND */
5904 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5905 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5906 break;
5907 case testbranch:
5908 /* e.g. TBZ */
5909 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5910 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5911 break;
5912 case branch_imm:
5913 /* e.g. B or BL */
5914 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5915 inst.reloc.type =
5916 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5917 : BFD_RELOC_AARCH64_JUMP26;
5918 break;
5919 case loadlit:
5920 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5921 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5922 break;
5923 case pcreladdr:
5924 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5925 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5926 break;
5927 default:
5928 gas_assert (0);
5929 abort ();
5930 }
5931 inst.reloc.pc_rel = 1;
5932 }
5933 break;
5934
5935 case AARCH64_OPND_ADDR_SIMPLE:
5936 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5937 {
5938 /* [<Xn|SP>{, #<simm>}] */
5939 char *start = str;
5940 /* First use the normal address-parsing routines, to get
5941 the usual syntax errors. */
5942 po_misc_or_fail (parse_address (&str, info));
5943 if (info->addr.pcrel || info->addr.offset.is_reg
5944 || !info->addr.preind || info->addr.postind
5945 || info->addr.writeback)
5946 {
5947 set_syntax_error (_("invalid addressing mode"));
5948 goto failure;
5949 }
5950
5951 /* Then retry, matching the specific syntax of these addresses. */
5952 str = start;
5953 po_char_or_fail ('[');
5954 po_reg_or_fail (REG_TYPE_R64_SP);
5955 /* Accept optional ", #0". */
5956 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5957 && skip_past_char (&str, ','))
5958 {
5959 skip_past_char (&str, '#');
5960 if (! skip_past_char (&str, '0'))
5961 {
5962 set_fatal_syntax_error
5963 (_("the optional immediate offset can only be 0"));
5964 goto failure;
5965 }
5966 }
5967 po_char_or_fail (']');
5968 break;
5969 }
5970
5971 case AARCH64_OPND_ADDR_REGOFF:
5972 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5973 po_misc_or_fail (parse_address (&str, info));
5974 regoff_addr:
5975 if (info->addr.pcrel || !info->addr.offset.is_reg
5976 || !info->addr.preind || info->addr.postind
5977 || info->addr.writeback)
5978 {
5979 set_syntax_error (_("invalid addressing mode"));
5980 goto failure;
5981 }
5982 if (!info->shifter.operator_present)
5983 {
5984 /* Default to LSL if not present. Libopcodes prefers shifter
5985 kind to be explicit. */
5986 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5987 info->shifter.kind = AARCH64_MOD_LSL;
5988 }
5989 /* Qualifier to be deduced by libopcodes. */
5990 break;
5991
5992 case AARCH64_OPND_ADDR_SIMM7:
5993 po_misc_or_fail (parse_address (&str, info));
5994 if (info->addr.pcrel || info->addr.offset.is_reg
5995 || (!info->addr.preind && !info->addr.postind))
5996 {
5997 set_syntax_error (_("invalid addressing mode"));
5998 goto failure;
5999 }
6000 if (inst.reloc.type != BFD_RELOC_UNUSED)
6001 {
6002 set_syntax_error (_("relocation not allowed"));
6003 goto failure;
6004 }
6005 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6006 /* addr_off_p */ 1,
6007 /* need_libopcodes_p */ 1,
6008 /* skip_p */ 0);
6009 break;
6010
6011 case AARCH64_OPND_ADDR_SIMM9:
6012 case AARCH64_OPND_ADDR_SIMM9_2:
6013 po_misc_or_fail (parse_address (&str, info));
6014 if (info->addr.pcrel || info->addr.offset.is_reg
6015 || (!info->addr.preind && !info->addr.postind)
6016 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6017 && info->addr.writeback))
6018 {
6019 set_syntax_error (_("invalid addressing mode"));
6020 goto failure;
6021 }
6022 if (inst.reloc.type != BFD_RELOC_UNUSED)
6023 {
6024 set_syntax_error (_("relocation not allowed"));
6025 goto failure;
6026 }
6027 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6028 /* addr_off_p */ 1,
6029 /* need_libopcodes_p */ 1,
6030 /* skip_p */ 0);
6031 break;
6032
6033 case AARCH64_OPND_ADDR_SIMM10:
6034 po_misc_or_fail (parse_address (&str, info));
6035 if (info->addr.pcrel || info->addr.offset.is_reg
6036 || !info->addr.preind || info->addr.postind)
6037 {
6038 set_syntax_error (_("invalid addressing mode"));
6039 goto failure;
6040 }
6041 if (inst.reloc.type != BFD_RELOC_UNUSED)
6042 {
6043 set_syntax_error (_("relocation not allowed"));
6044 goto failure;
6045 }
6046 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6047 /* addr_off_p */ 1,
6048 /* need_libopcodes_p */ 1,
6049 /* skip_p */ 0);
6050 break;
6051
6052 case AARCH64_OPND_ADDR_UIMM12:
6053 po_misc_or_fail (parse_address (&str, info));
6054 if (info->addr.pcrel || info->addr.offset.is_reg
6055 || !info->addr.preind || info->addr.writeback)
6056 {
6057 set_syntax_error (_("invalid addressing mode"));
6058 goto failure;
6059 }
6060 if (inst.reloc.type == BFD_RELOC_UNUSED)
6061 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6062 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6063 || (inst.reloc.type
6064 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6065 || (inst.reloc.type
6066 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
6067 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6068 /* Leave qualifier to be determined by libopcodes. */
6069 break;
6070
6071 case AARCH64_OPND_SIMD_ADDR_POST:
6072 /* [<Xn|SP>], <Xm|#<amount>> */
6073 po_misc_or_fail (parse_address (&str, info));
6074 if (!info->addr.postind || !info->addr.writeback)
6075 {
6076 set_syntax_error (_("invalid addressing mode"));
6077 goto failure;
6078 }
6079 if (!info->addr.offset.is_reg)
6080 {
6081 if (inst.reloc.exp.X_op == O_constant)
6082 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6083 else
6084 {
6085 set_fatal_syntax_error
6086 (_("writeback value must be an immediate constant"));
6087 goto failure;
6088 }
6089 }
6090 /* No qualifier. */
6091 break;
6092
6093 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6094 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6095 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6096 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6097 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6098 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6099 case AARCH64_OPND_SVE_ADDR_RI_U6:
6100 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6101 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6102 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6103 /* [X<n>{, #imm, MUL VL}]
6104 [X<n>{, #imm}]
6105 but recognizing SVE registers. */
6106 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6107 &offset_qualifier));
6108 if (base_qualifier != AARCH64_OPND_QLF_X)
6109 {
6110 set_syntax_error (_("invalid addressing mode"));
6111 goto failure;
6112 }
6113 sve_regimm:
6114 if (info->addr.pcrel || info->addr.offset.is_reg
6115 || !info->addr.preind || info->addr.writeback)
6116 {
6117 set_syntax_error (_("invalid addressing mode"));
6118 goto failure;
6119 }
6120 if (inst.reloc.type != BFD_RELOC_UNUSED
6121 || inst.reloc.exp.X_op != O_constant)
6122 {
6123 /* Make sure this has priority over
6124 "invalid addressing mode". */
6125 set_fatal_syntax_error (_("constant offset required"));
6126 goto failure;
6127 }
6128 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6129 break;
6130
6131 case AARCH64_OPND_SVE_ADDR_RR:
6132 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6133 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6134 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6135 case AARCH64_OPND_SVE_ADDR_RX:
6136 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6137 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6138 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6139 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6140 but recognizing SVE registers. */
6141 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6142 &offset_qualifier));
6143 if (base_qualifier != AARCH64_OPND_QLF_X
6144 || offset_qualifier != AARCH64_OPND_QLF_X)
6145 {
6146 set_syntax_error (_("invalid addressing mode"));
6147 goto failure;
6148 }
6149 goto regoff_addr;
6150
6151 case AARCH64_OPND_SVE_ADDR_RZ:
6152 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6153 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6154 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6155 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6156 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6157 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6158 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6159 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6160 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6161 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6162 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6163 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6164 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6165 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6166 &offset_qualifier));
6167 if (base_qualifier != AARCH64_OPND_QLF_X
6168 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6169 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6170 {
6171 set_syntax_error (_("invalid addressing mode"));
6172 goto failure;
6173 }
6174 info->qualifier = offset_qualifier;
6175 goto regoff_addr;
6176
6177 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6178 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6179 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6180 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6181 /* [Z<n>.<T>{, #imm}] */
6182 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6183 &offset_qualifier));
6184 if (base_qualifier != AARCH64_OPND_QLF_S_S
6185 && base_qualifier != AARCH64_OPND_QLF_S_D)
6186 {
6187 set_syntax_error (_("invalid addressing mode"));
6188 goto failure;
6189 }
6190 info->qualifier = base_qualifier;
6191 goto sve_regimm;
6192
6193 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6194 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6195 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6196 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6197 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6198
6199 We don't reject:
6200
6201 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6202
6203 here since we get better error messages by leaving it to
6204 the qualifier checking routines. */
6205 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6206 &offset_qualifier));
6207 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6208 && base_qualifier != AARCH64_OPND_QLF_S_D)
6209 || offset_qualifier != base_qualifier)
6210 {
6211 set_syntax_error (_("invalid addressing mode"));
6212 goto failure;
6213 }
6214 info->qualifier = base_qualifier;
6215 goto regoff_addr;
6216
6217 case AARCH64_OPND_SYSREG:
6218 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
6219 == PARSE_FAIL)
6220 {
6221 set_syntax_error (_("unknown or missing system register name"));
6222 goto failure;
6223 }
6224 inst.base.operands[i].sysreg = val;
6225 break;
6226
6227 case AARCH64_OPND_PSTATEFIELD:
6228 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
6229 == PARSE_FAIL)
6230 {
6231 set_syntax_error (_("unknown or missing PSTATE field name"));
6232 goto failure;
6233 }
6234 inst.base.operands[i].pstatefield = val;
6235 break;
6236
6237 case AARCH64_OPND_SYSREG_IC:
6238 inst.base.operands[i].sysins_op =
6239 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6240 goto sys_reg_ins;
6241 case AARCH64_OPND_SYSREG_DC:
6242 inst.base.operands[i].sysins_op =
6243 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6244 goto sys_reg_ins;
6245 case AARCH64_OPND_SYSREG_AT:
6246 inst.base.operands[i].sysins_op =
6247 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6248 goto sys_reg_ins;
6249 case AARCH64_OPND_SYSREG_TLBI:
6250 inst.base.operands[i].sysins_op =
6251 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6252 sys_reg_ins:
6253 if (inst.base.operands[i].sysins_op == NULL)
6254 {
6255 set_fatal_syntax_error ( _("unknown or missing operation name"));
6256 goto failure;
6257 }
6258 break;
6259
6260 case AARCH64_OPND_BARRIER:
6261 case AARCH64_OPND_BARRIER_ISB:
6262 val = parse_barrier (&str);
6263 if (val != PARSE_FAIL
6264 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6265 {
6266 /* ISB only accepts options name 'sy'. */
6267 set_syntax_error
6268 (_("the specified option is not accepted in ISB"));
6269 /* Turn off backtrack as this optional operand is present. */
6270 backtrack_pos = 0;
6271 goto failure;
6272 }
6273 /* This is an extension to accept a 0..15 immediate. */
6274 if (val == PARSE_FAIL)
6275 po_imm_or_fail (0, 15);
6276 info->barrier = aarch64_barrier_options + val;
6277 break;
6278
6279 case AARCH64_OPND_PRFOP:
6280 val = parse_pldop (&str);
6281 /* This is an extension to accept a 0..31 immediate. */
6282 if (val == PARSE_FAIL)
6283 po_imm_or_fail (0, 31);
6284 inst.base.operands[i].prfop = aarch64_prfops + val;
6285 break;
6286
6287 case AARCH64_OPND_BARRIER_PSB:
6288 val = parse_barrier_psb (&str, &(info->hint_option));
6289 if (val == PARSE_FAIL)
6290 goto failure;
6291 break;
6292
6293 default:
6294 as_fatal (_("unhandled operand code %d"), operands[i]);
6295 }
6296
6297 /* If we get here, this operand was successfully parsed. */
6298 inst.base.operands[i].present = 1;
6299 continue;
6300
6301 failure:
6302 /* The parse routine should already have set the error, but in case
6303 not, set a default one here. */
6304 if (! error_p ())
6305 set_default_error ();
6306
6307 if (! backtrack_pos)
6308 goto parse_operands_return;
6309
6310 {
6311 /* We reach here because this operand is marked as optional, and
6312 either no operand was supplied or the operand was supplied but it
6313 was syntactically incorrect. In the latter case we report an
6314 error. In the former case we perform a few more checks before
6315 dropping through to the code to insert the default operand. */
6316
6317 char *tmp = backtrack_pos;
6318 char endchar = END_OF_INSN;
6319
6320 if (i != (aarch64_num_of_operands (opcode) - 1))
6321 endchar = ',';
6322 skip_past_char (&tmp, ',');
6323
6324 if (*tmp != endchar)
6325 /* The user has supplied an operand in the wrong format. */
6326 goto parse_operands_return;
6327
6328 /* Make sure there is not a comma before the optional operand.
6329 For example the fifth operand of 'sys' is optional:
6330
6331 sys #0,c0,c0,#0, <--- wrong
6332 sys #0,c0,c0,#0 <--- correct. */
6333 if (comma_skipped_p && i && endchar == END_OF_INSN)
6334 {
6335 set_fatal_syntax_error
6336 (_("unexpected comma before the omitted optional operand"));
6337 goto parse_operands_return;
6338 }
6339 }
6340
6341 /* Reaching here means we are dealing with an optional operand that is
6342 omitted from the assembly line. */
6343 gas_assert (optional_operand_p (opcode, i));
6344 info->present = 0;
6345 process_omitted_operand (operands[i], opcode, i, info);
6346
6347 /* Try again, skipping the optional operand at backtrack_pos. */
6348 str = backtrack_pos;
6349 backtrack_pos = 0;
6350
6351 /* Clear any error record after the omitted optional operand has been
6352 successfully handled. */
6353 clear_error ();
6354 }
6355
6356 /* Check if we have parsed all the operands. */
6357 if (*str != '\0' && ! error_p ())
6358 {
6359 /* Set I to the index of the last present operand; this is
6360 for the purpose of diagnostics. */
6361 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6362 ;
6363 set_fatal_syntax_error
6364 (_("unexpected characters following instruction"));
6365 }
6366
6367 parse_operands_return:
6368
6369 if (error_p ())
6370 {
6371 DEBUG_TRACE ("parsing FAIL: %s - %s",
6372 operand_mismatch_kind_names[get_error_kind ()],
6373 get_error_message ());
6374 /* Record the operand error properly; this is useful when there
6375 are multiple instruction templates for a mnemonic name, so that
6376 later on, we can select the error that most closely describes
6377 the problem. */
6378 record_operand_error (opcode, i, get_error_kind (),
6379 get_error_message ());
6380 return FALSE;
6381 }
6382 else
6383 {
6384 DEBUG_TRACE ("parsing SUCCESS");
6385 return TRUE;
6386 }
6387 }
6388
6389 /* It does some fix-up to provide some programmer friendly feature while
6390 keeping the libopcodes happy, i.e. libopcodes only accepts
6391 the preferred architectural syntax.
6392 Return FALSE if there is any failure; otherwise return TRUE. */
6393
6394 static bfd_boolean
6395 programmer_friendly_fixup (aarch64_instruction *instr)
6396 {
6397 aarch64_inst *base = &instr->base;
6398 const aarch64_opcode *opcode = base->opcode;
6399 enum aarch64_op op = opcode->op;
6400 aarch64_opnd_info *operands = base->operands;
6401
6402 DEBUG_TRACE ("enter");
6403
6404 switch (opcode->iclass)
6405 {
6406 case testbranch:
6407 /* TBNZ Xn|Wn, #uimm6, label
6408 Test and Branch Not Zero: conditionally jumps to label if bit number
6409 uimm6 in register Xn is not zero. The bit number implies the width of
6410 the register, which may be written and should be disassembled as Wn if
6411 uimm is less than 32. */
6412 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6413 {
6414 if (operands[1].imm.value >= 32)
6415 {
6416 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6417 0, 31);
6418 return FALSE;
6419 }
6420 operands[0].qualifier = AARCH64_OPND_QLF_X;
6421 }
6422 break;
6423 case loadlit:
6424 /* LDR Wt, label | =value
6425 As a convenience assemblers will typically permit the notation
6426 "=value" in conjunction with the pc-relative literal load instructions
6427 to automatically place an immediate value or symbolic address in a
6428 nearby literal pool and generate a hidden label which references it.
6429 ISREG has been set to 0 in the case of =value. */
6430 if (instr->gen_lit_pool
6431 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6432 {
6433 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6434 if (op == OP_LDRSW_LIT)
6435 size = 4;
6436 if (instr->reloc.exp.X_op != O_constant
6437 && instr->reloc.exp.X_op != O_big
6438 && instr->reloc.exp.X_op != O_symbol)
6439 {
6440 record_operand_error (opcode, 1,
6441 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6442 _("constant expression expected"));
6443 return FALSE;
6444 }
6445 if (! add_to_lit_pool (&instr->reloc.exp, size))
6446 {
6447 record_operand_error (opcode, 1,
6448 AARCH64_OPDE_OTHER_ERROR,
6449 _("literal pool insertion failed"));
6450 return FALSE;
6451 }
6452 }
6453 break;
6454 case log_shift:
6455 case bitfield:
6456 /* UXT[BHW] Wd, Wn
6457 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6458 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6459 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6460 A programmer-friendly assembler should accept a destination Xd in
6461 place of Wd, however that is not the preferred form for disassembly.
6462 */
6463 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6464 && operands[1].qualifier == AARCH64_OPND_QLF_W
6465 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6466 operands[0].qualifier = AARCH64_OPND_QLF_W;
6467 break;
6468
6469 case addsub_ext:
6470 {
6471 /* In the 64-bit form, the final register operand is written as Wm
6472 for all but the (possibly omitted) UXTX/LSL and SXTX
6473 operators.
6474 As a programmer-friendly assembler, we accept e.g.
6475 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6476 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6477 int idx = aarch64_operand_index (opcode->operands,
6478 AARCH64_OPND_Rm_EXT);
6479 gas_assert (idx == 1 || idx == 2);
6480 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6481 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6482 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6483 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6484 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6485 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6486 }
6487 break;
6488
6489 default:
6490 break;
6491 }
6492
6493 DEBUG_TRACE ("exit with SUCCESS");
6494 return TRUE;
6495 }
6496
6497 /* Check for loads and stores that will cause unpredictable behavior. */
6498
6499 static void
6500 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6501 {
6502 aarch64_inst *base = &instr->base;
6503 const aarch64_opcode *opcode = base->opcode;
6504 const aarch64_opnd_info *opnds = base->operands;
6505 switch (opcode->iclass)
6506 {
6507 case ldst_pos:
6508 case ldst_imm9:
6509 case ldst_imm10:
6510 case ldst_unscaled:
6511 case ldst_unpriv:
6512 /* Loading/storing the base register is unpredictable if writeback. */
6513 if ((aarch64_get_operand_class (opnds[0].type)
6514 == AARCH64_OPND_CLASS_INT_REG)
6515 && opnds[0].reg.regno == opnds[1].addr.base_regno
6516 && opnds[1].addr.base_regno != REG_SP
6517 && opnds[1].addr.writeback)
6518 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6519 break;
6520 case ldstpair_off:
6521 case ldstnapair_offs:
6522 case ldstpair_indexed:
6523 /* Loading/storing the base register is unpredictable if writeback. */
6524 if ((aarch64_get_operand_class (opnds[0].type)
6525 == AARCH64_OPND_CLASS_INT_REG)
6526 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6527 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6528 && opnds[2].addr.base_regno != REG_SP
6529 && opnds[2].addr.writeback)
6530 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6531 /* Load operations must load different registers. */
6532 if ((opcode->opcode & (1 << 22))
6533 && opnds[0].reg.regno == opnds[1].reg.regno)
6534 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6535 break;
6536 default:
6537 break;
6538 }
6539 }
6540
6541 /* A wrapper function to interface with libopcodes on encoding and
6542 record the error message if there is any.
6543
6544 Return TRUE on success; otherwise return FALSE. */
6545
6546 static bfd_boolean
6547 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6548 aarch64_insn *code)
6549 {
6550 aarch64_operand_error error_info;
6551 error_info.kind = AARCH64_OPDE_NIL;
6552 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
6553 return TRUE;
6554 else
6555 {
6556 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6557 record_operand_error_info (opcode, &error_info);
6558 return FALSE;
6559 }
6560 }
6561
6562 #ifdef DEBUG_AARCH64
6563 static inline void
6564 dump_opcode_operands (const aarch64_opcode *opcode)
6565 {
6566 int i = 0;
6567 while (opcode->operands[i] != AARCH64_OPND_NIL)
6568 {
6569 aarch64_verbose ("\t\t opnd%d: %s", i,
6570 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6571 ? aarch64_get_operand_name (opcode->operands[i])
6572 : aarch64_get_operand_desc (opcode->operands[i]));
6573 ++i;
6574 }
6575 }
6576 #endif /* DEBUG_AARCH64 */
6577
6578 /* This is the guts of the machine-dependent assembler. STR points to a
6579 machine dependent instruction. This function is supposed to emit
6580 the frags/bytes it assembles to. */
6581
6582 void
6583 md_assemble (char *str)
6584 {
6585 char *p = str;
6586 templates *template;
6587 aarch64_opcode *opcode;
6588 aarch64_inst *inst_base;
6589 unsigned saved_cond;
6590
6591 /* Align the previous label if needed. */
6592 if (last_label_seen != NULL)
6593 {
6594 symbol_set_frag (last_label_seen, frag_now);
6595 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6596 S_SET_SEGMENT (last_label_seen, now_seg);
6597 }
6598
6599 inst.reloc.type = BFD_RELOC_UNUSED;
6600
6601 DEBUG_TRACE ("\n\n");
6602 DEBUG_TRACE ("==============================");
6603 DEBUG_TRACE ("Enter md_assemble with %s", str);
6604
6605 template = opcode_lookup (&p);
6606 if (!template)
6607 {
6608 /* It wasn't an instruction, but it might be a register alias of
6609 the form alias .req reg directive. */
6610 if (!create_register_alias (str, p))
6611 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6612 str);
6613 return;
6614 }
6615
6616 skip_whitespace (p);
6617 if (*p == ',')
6618 {
6619 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6620 get_mnemonic_name (str), str);
6621 return;
6622 }
6623
6624 init_operand_error_report ();
6625
6626 /* Sections are assumed to start aligned. In executable section, there is no
6627 MAP_DATA symbol pending. So we only align the address during
6628 MAP_DATA --> MAP_INSN transition.
6629 For other sections, this is not guaranteed. */
6630 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6631 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6632 frag_align_code (2, 0);
6633
6634 saved_cond = inst.cond;
6635 reset_aarch64_instruction (&inst);
6636 inst.cond = saved_cond;
6637
6638 /* Iterate through all opcode entries with the same mnemonic name. */
6639 do
6640 {
6641 opcode = template->opcode;
6642
6643 DEBUG_TRACE ("opcode %s found", opcode->name);
6644 #ifdef DEBUG_AARCH64
6645 if (debug_dump)
6646 dump_opcode_operands (opcode);
6647 #endif /* DEBUG_AARCH64 */
6648
6649 mapping_state (MAP_INSN);
6650
6651 inst_base = &inst.base;
6652 inst_base->opcode = opcode;
6653
6654 /* Truly conditionally executed instructions, e.g. b.cond. */
6655 if (opcode->flags & F_COND)
6656 {
6657 gas_assert (inst.cond != COND_ALWAYS);
6658 inst_base->cond = get_cond_from_value (inst.cond);
6659 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6660 }
6661 else if (inst.cond != COND_ALWAYS)
6662 {
6663 /* It shouldn't arrive here, where the assembly looks like a
6664 conditional instruction but the found opcode is unconditional. */
6665 gas_assert (0);
6666 continue;
6667 }
6668
6669 if (parse_operands (p, opcode)
6670 && programmer_friendly_fixup (&inst)
6671 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6672 {
6673 /* Check that this instruction is supported for this CPU. */
6674 if (!opcode->avariant
6675 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6676 {
6677 as_bad (_("selected processor does not support `%s'"), str);
6678 return;
6679 }
6680
6681 warn_unpredictable_ldst (&inst, str);
6682
6683 if (inst.reloc.type == BFD_RELOC_UNUSED
6684 || !inst.reloc.need_libopcodes_p)
6685 output_inst (NULL);
6686 else
6687 {
6688 /* If there is relocation generated for the instruction,
6689 store the instruction information for the future fix-up. */
6690 struct aarch64_inst *copy;
6691 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6692 copy = XNEW (struct aarch64_inst);
6693 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6694 output_inst (copy);
6695 }
6696 return;
6697 }
6698
6699 template = template->next;
6700 if (template != NULL)
6701 {
6702 reset_aarch64_instruction (&inst);
6703 inst.cond = saved_cond;
6704 }
6705 }
6706 while (template != NULL);
6707
6708 /* Issue the error messages if any. */
6709 output_operand_error_report (str);
6710 }
6711
6712 /* Various frobbings of labels and their addresses. */
6713
6714 void
6715 aarch64_start_line_hook (void)
6716 {
6717 last_label_seen = NULL;
6718 }
6719
6720 void
6721 aarch64_frob_label (symbolS * sym)
6722 {
6723 last_label_seen = sym;
6724
6725 dwarf2_emit_label (sym);
6726 }
6727
6728 int
6729 aarch64_data_in_code (void)
6730 {
6731 if (!strncmp (input_line_pointer + 1, "data:", 5))
6732 {
6733 *input_line_pointer = '/';
6734 input_line_pointer += 5;
6735 *input_line_pointer = 0;
6736 return 1;
6737 }
6738
6739 return 0;
6740 }
6741
6742 char *
6743 aarch64_canonicalize_symbol_name (char *name)
6744 {
6745 int len;
6746
6747 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6748 *(name + len - 5) = 0;
6749
6750 return name;
6751 }
6752 \f
6753 /* Table of all register names defined by default. The user can
6754 define additional names with .req. Note that all register names
6755 should appear in both upper and lowercase variants. Some registers
6756 also have mixed-case names. */
6757
6758 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6759 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6760 #define REGSET16(p,t) \
6761 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6762 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6763 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6764 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6765 #define REGSET31(p,t) \
6766 REGSET16(p, t), \
6767 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6768 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6769 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6770 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6771 #define REGSET(p,t) \
6772 REGSET31(p,t), REGNUM(p,31,t)
6773
6774 /* These go into aarch64_reg_hsh hash-table. */
6775 static const reg_entry reg_names[] = {
6776 /* Integer registers. */
6777 REGSET31 (x, R_64), REGSET31 (X, R_64),
6778 REGSET31 (w, R_32), REGSET31 (W, R_32),
6779
6780 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6781 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6782
6783 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6784 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6785
6786 /* Floating-point single precision registers. */
6787 REGSET (s, FP_S), REGSET (S, FP_S),
6788
6789 /* Floating-point double precision registers. */
6790 REGSET (d, FP_D), REGSET (D, FP_D),
6791
6792 /* Floating-point half precision registers. */
6793 REGSET (h, FP_H), REGSET (H, FP_H),
6794
6795 /* Floating-point byte precision registers. */
6796 REGSET (b, FP_B), REGSET (B, FP_B),
6797
6798 /* Floating-point quad precision registers. */
6799 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6800
6801 /* FP/SIMD registers. */
6802 REGSET (v, VN), REGSET (V, VN),
6803
6804 /* SVE vector registers. */
6805 REGSET (z, ZN), REGSET (Z, ZN),
6806
6807 /* SVE predicate registers. */
6808 REGSET16 (p, PN), REGSET16 (P, PN)
6809 };
6810
6811 #undef REGDEF
6812 #undef REGNUM
6813 #undef REGSET16
6814 #undef REGSET31
6815 #undef REGSET
6816
6817 #define N 1
6818 #define n 0
6819 #define Z 1
6820 #define z 0
6821 #define C 1
6822 #define c 0
6823 #define V 1
6824 #define v 0
6825 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6826 static const asm_nzcv nzcv_names[] = {
6827 {"nzcv", B (n, z, c, v)},
6828 {"nzcV", B (n, z, c, V)},
6829 {"nzCv", B (n, z, C, v)},
6830 {"nzCV", B (n, z, C, V)},
6831 {"nZcv", B (n, Z, c, v)},
6832 {"nZcV", B (n, Z, c, V)},
6833 {"nZCv", B (n, Z, C, v)},
6834 {"nZCV", B (n, Z, C, V)},
6835 {"Nzcv", B (N, z, c, v)},
6836 {"NzcV", B (N, z, c, V)},
6837 {"NzCv", B (N, z, C, v)},
6838 {"NzCV", B (N, z, C, V)},
6839 {"NZcv", B (N, Z, c, v)},
6840 {"NZcV", B (N, Z, c, V)},
6841 {"NZCv", B (N, Z, C, v)},
6842 {"NZCV", B (N, Z, C, V)}
6843 };
6844
6845 #undef N
6846 #undef n
6847 #undef Z
6848 #undef z
6849 #undef C
6850 #undef c
6851 #undef V
6852 #undef v
6853 #undef B
6854 \f
6855 /* MD interface: bits in the object file. */
6856
6857 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6858 for use in the a.out file, and stores them in the array pointed to by buf.
6859 This knows about the endian-ness of the target machine and does
6860 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6861 2 (short) and 4 (long) Floating numbers are put out as a series of
6862 LITTLENUMS (shorts, here at least). */
6863
6864 void
6865 md_number_to_chars (char *buf, valueT val, int n)
6866 {
6867 if (target_big_endian)
6868 number_to_chars_bigendian (buf, val, n);
6869 else
6870 number_to_chars_littleendian (buf, val, n);
6871 }
6872
6873 /* MD interface: Sections. */
6874
6875 /* Estimate the size of a frag before relaxing. Assume everything fits in
6876 4 bytes. */
6877
6878 int
6879 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6880 {
6881 fragp->fr_var = 4;
6882 return 4;
6883 }
6884
6885 /* Round up a section size to the appropriate boundary. */
6886
6887 valueT
6888 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6889 {
6890 return size;
6891 }
6892
6893 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6894 of an rs_align_code fragment.
6895
6896 Here we fill the frag with the appropriate info for padding the
6897 output stream. The resulting frag will consist of a fixed (fr_fix)
6898 and of a repeating (fr_var) part.
6899
6900 The fixed content is always emitted before the repeating content and
6901 these two parts are used as follows in constructing the output:
6902 - the fixed part will be used to align to a valid instruction word
6903 boundary, in case that we start at a misaligned address; as no
6904 executable instruction can live at the misaligned location, we
6905 simply fill with zeros;
6906 - the variable part will be used to cover the remaining padding and
6907 we fill using the AArch64 NOP instruction.
6908
6909 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6910 enough storage space for up to 3 bytes for padding the back to a valid
6911 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6912
6913 void
6914 aarch64_handle_align (fragS * fragP)
6915 {
6916 /* NOP = d503201f */
6917 /* AArch64 instructions are always little-endian. */
6918 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6919
6920 int bytes, fix, noop_size;
6921 char *p;
6922
6923 if (fragP->fr_type != rs_align_code)
6924 return;
6925
6926 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6927 p = fragP->fr_literal + fragP->fr_fix;
6928
6929 #ifdef OBJ_ELF
6930 gas_assert (fragP->tc_frag_data.recorded);
6931 #endif
6932
6933 noop_size = sizeof (aarch64_noop);
6934
6935 fix = bytes & (noop_size - 1);
6936 if (fix)
6937 {
6938 #ifdef OBJ_ELF
6939 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6940 #endif
6941 memset (p, 0, fix);
6942 p += fix;
6943 fragP->fr_fix += fix;
6944 }
6945
6946 if (noop_size)
6947 memcpy (p, aarch64_noop, noop_size);
6948 fragP->fr_var = noop_size;
6949 }
6950
6951 /* Perform target specific initialisation of a frag.
6952 Note - despite the name this initialisation is not done when the frag
6953 is created, but only when its type is assigned. A frag can be created
6954 and used a long time before its type is set, so beware of assuming that
6955 this initialisationis performed first. */
6956
6957 #ifndef OBJ_ELF
6958 void
6959 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6960 int max_chars ATTRIBUTE_UNUSED)
6961 {
6962 }
6963
6964 #else /* OBJ_ELF is defined. */
6965 void
6966 aarch64_init_frag (fragS * fragP, int max_chars)
6967 {
6968 /* Record a mapping symbol for alignment frags. We will delete this
6969 later if the alignment ends up empty. */
6970 if (!fragP->tc_frag_data.recorded)
6971 fragP->tc_frag_data.recorded = 1;
6972
6973 switch (fragP->fr_type)
6974 {
6975 case rs_align_test:
6976 case rs_fill:
6977 mapping_state_2 (MAP_DATA, max_chars);
6978 break;
6979 case rs_align:
6980 /* PR 20364: We can get alignment frags in code sections,
6981 so do not just assume that we should use the MAP_DATA state. */
6982 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
6983 break;
6984 case rs_align_code:
6985 mapping_state_2 (MAP_INSN, max_chars);
6986 break;
6987 default:
6988 break;
6989 }
6990 }
6991 \f
6992 /* Initialize the DWARF-2 unwind information for this procedure. */
6993
6994 void
6995 tc_aarch64_frame_initial_instructions (void)
6996 {
6997 cfi_add_CFA_def_cfa (REG_SP, 0);
6998 }
6999 #endif /* OBJ_ELF */
7000
7001 /* Convert REGNAME to a DWARF-2 register number. */
7002
7003 int
7004 tc_aarch64_regname_to_dw2regnum (char *regname)
7005 {
7006 const reg_entry *reg = parse_reg (&regname);
7007 if (reg == NULL)
7008 return -1;
7009
7010 switch (reg->type)
7011 {
7012 case REG_TYPE_SP_32:
7013 case REG_TYPE_SP_64:
7014 case REG_TYPE_R_32:
7015 case REG_TYPE_R_64:
7016 return reg->number;
7017
7018 case REG_TYPE_FP_B:
7019 case REG_TYPE_FP_H:
7020 case REG_TYPE_FP_S:
7021 case REG_TYPE_FP_D:
7022 case REG_TYPE_FP_Q:
7023 return reg->number + 64;
7024
7025 default:
7026 break;
7027 }
7028 return -1;
7029 }
7030
7031 /* Implement DWARF2_ADDR_SIZE. */
7032
7033 int
7034 aarch64_dwarf2_addr_size (void)
7035 {
7036 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7037 if (ilp32_p)
7038 return 4;
7039 #endif
7040 return bfd_arch_bits_per_address (stdoutput) / 8;
7041 }
7042
7043 /* MD interface: Symbol and relocation handling. */
7044
7045 /* Return the address within the segment that a PC-relative fixup is
7046 relative to. For AArch64 PC-relative fixups applied to instructions
7047 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7048
7049 long
7050 md_pcrel_from_section (fixS * fixP, segT seg)
7051 {
7052 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7053
7054 /* If this is pc-relative and we are going to emit a relocation
7055 then we just want to put out any pipeline compensation that the linker
7056 will need. Otherwise we want to use the calculated base. */
7057 if (fixP->fx_pcrel
7058 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7059 || aarch64_force_relocation (fixP)))
7060 base = 0;
7061
7062 /* AArch64 should be consistent for all pc-relative relocations. */
7063 return base + AARCH64_PCREL_OFFSET;
7064 }
7065
7066 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7067 Otherwise we have no need to default values of symbols. */
7068
7069 symbolS *
7070 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7071 {
7072 #ifdef OBJ_ELF
7073 if (name[0] == '_' && name[1] == 'G'
7074 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7075 {
7076 if (!GOT_symbol)
7077 {
7078 if (symbol_find (name))
7079 as_bad (_("GOT already in the symbol table"));
7080
7081 GOT_symbol = symbol_new (name, undefined_section,
7082 (valueT) 0, &zero_address_frag);
7083 }
7084
7085 return GOT_symbol;
7086 }
7087 #endif
7088
7089 return 0;
7090 }
7091
7092 /* Return non-zero if the indicated VALUE has overflowed the maximum
7093 range expressible by a unsigned number with the indicated number of
7094 BITS. */
7095
7096 static bfd_boolean
7097 unsigned_overflow (valueT value, unsigned bits)
7098 {
7099 valueT lim;
7100 if (bits >= sizeof (valueT) * 8)
7101 return FALSE;
7102 lim = (valueT) 1 << bits;
7103 return (value >= lim);
7104 }
7105
7106
7107 /* Return non-zero if the indicated VALUE has overflowed the maximum
7108 range expressible by an signed number with the indicated number of
7109 BITS. */
7110
7111 static bfd_boolean
7112 signed_overflow (offsetT value, unsigned bits)
7113 {
7114 offsetT lim;
7115 if (bits >= sizeof (offsetT) * 8)
7116 return FALSE;
7117 lim = (offsetT) 1 << (bits - 1);
7118 return (value < -lim || value >= lim);
7119 }
7120
7121 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7122 unsigned immediate offset load/store instruction, try to encode it as
7123 an unscaled, 9-bit, signed immediate offset load/store instruction.
7124 Return TRUE if it is successful; otherwise return FALSE.
7125
7126 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7127 in response to the standard LDR/STR mnemonics when the immediate offset is
7128 unambiguous, i.e. when it is negative or unaligned. */
7129
7130 static bfd_boolean
7131 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7132 {
7133 int idx;
7134 enum aarch64_op new_op;
7135 const aarch64_opcode *new_opcode;
7136
7137 gas_assert (instr->opcode->iclass == ldst_pos);
7138
7139 switch (instr->opcode->op)
7140 {
7141 case OP_LDRB_POS:new_op = OP_LDURB; break;
7142 case OP_STRB_POS: new_op = OP_STURB; break;
7143 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7144 case OP_LDRH_POS: new_op = OP_LDURH; break;
7145 case OP_STRH_POS: new_op = OP_STURH; break;
7146 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7147 case OP_LDR_POS: new_op = OP_LDUR; break;
7148 case OP_STR_POS: new_op = OP_STUR; break;
7149 case OP_LDRF_POS: new_op = OP_LDURV; break;
7150 case OP_STRF_POS: new_op = OP_STURV; break;
7151 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7152 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7153 default: new_op = OP_NIL; break;
7154 }
7155
7156 if (new_op == OP_NIL)
7157 return FALSE;
7158
7159 new_opcode = aarch64_get_opcode (new_op);
7160 gas_assert (new_opcode != NULL);
7161
7162 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7163 instr->opcode->op, new_opcode->op);
7164
7165 aarch64_replace_opcode (instr, new_opcode);
7166
7167 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7168 qualifier matching may fail because the out-of-date qualifier will
7169 prevent the operand being updated with a new and correct qualifier. */
7170 idx = aarch64_operand_index (instr->opcode->operands,
7171 AARCH64_OPND_ADDR_SIMM9);
7172 gas_assert (idx == 1);
7173 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7174
7175 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7176
7177 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
7178 return FALSE;
7179
7180 return TRUE;
7181 }
7182
7183 /* Called by fix_insn to fix a MOV immediate alias instruction.
7184
7185 Operand for a generic move immediate instruction, which is an alias
7186 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7187 a 32-bit/64-bit immediate value into general register. An assembler error
7188 shall result if the immediate cannot be created by a single one of these
7189 instructions. If there is a choice, then to ensure reversability an
7190 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7191
7192 static void
7193 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7194 {
7195 const aarch64_opcode *opcode;
7196
7197 /* Need to check if the destination is SP/ZR. The check has to be done
7198 before any aarch64_replace_opcode. */
7199 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7200 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7201
7202 instr->operands[1].imm.value = value;
7203 instr->operands[1].skip = 0;
7204
7205 if (try_mov_wide_p)
7206 {
7207 /* Try the MOVZ alias. */
7208 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7209 aarch64_replace_opcode (instr, opcode);
7210 if (aarch64_opcode_encode (instr->opcode, instr,
7211 &instr->value, NULL, NULL))
7212 {
7213 put_aarch64_insn (buf, instr->value);
7214 return;
7215 }
7216 /* Try the MOVK alias. */
7217 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7218 aarch64_replace_opcode (instr, opcode);
7219 if (aarch64_opcode_encode (instr->opcode, instr,
7220 &instr->value, NULL, NULL))
7221 {
7222 put_aarch64_insn (buf, instr->value);
7223 return;
7224 }
7225 }
7226
7227 if (try_mov_bitmask_p)
7228 {
7229 /* Try the ORR alias. */
7230 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7231 aarch64_replace_opcode (instr, opcode);
7232 if (aarch64_opcode_encode (instr->opcode, instr,
7233 &instr->value, NULL, NULL))
7234 {
7235 put_aarch64_insn (buf, instr->value);
7236 return;
7237 }
7238 }
7239
7240 as_bad_where (fixP->fx_file, fixP->fx_line,
7241 _("immediate cannot be moved by a single instruction"));
7242 }
7243
7244 /* An instruction operand which is immediate related may have symbol used
7245 in the assembly, e.g.
7246
7247 mov w0, u32
7248 .set u32, 0x00ffff00
7249
7250 At the time when the assembly instruction is parsed, a referenced symbol,
7251 like 'u32' in the above example may not have been seen; a fixS is created
7252 in such a case and is handled here after symbols have been resolved.
7253 Instruction is fixed up with VALUE using the information in *FIXP plus
7254 extra information in FLAGS.
7255
7256 This function is called by md_apply_fix to fix up instructions that need
7257 a fix-up described above but does not involve any linker-time relocation. */
7258
7259 static void
7260 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7261 {
7262 int idx;
7263 uint32_t insn;
7264 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7265 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7266 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7267
7268 if (new_inst)
7269 {
7270 /* Now the instruction is about to be fixed-up, so the operand that
7271 was previously marked as 'ignored' needs to be unmarked in order
7272 to get the encoding done properly. */
7273 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7274 new_inst->operands[idx].skip = 0;
7275 }
7276
7277 gas_assert (opnd != AARCH64_OPND_NIL);
7278
7279 switch (opnd)
7280 {
7281 case AARCH64_OPND_EXCEPTION:
7282 if (unsigned_overflow (value, 16))
7283 as_bad_where (fixP->fx_file, fixP->fx_line,
7284 _("immediate out of range"));
7285 insn = get_aarch64_insn (buf);
7286 insn |= encode_svc_imm (value);
7287 put_aarch64_insn (buf, insn);
7288 break;
7289
7290 case AARCH64_OPND_AIMM:
7291 /* ADD or SUB with immediate.
7292 NOTE this assumes we come here with a add/sub shifted reg encoding
7293 3 322|2222|2 2 2 21111 111111
7294 1 098|7654|3 2 1 09876 543210 98765 43210
7295 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7296 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7297 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7298 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7299 ->
7300 3 322|2222|2 2 221111111111
7301 1 098|7654|3 2 109876543210 98765 43210
7302 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7303 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7304 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7305 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7306 Fields sf Rn Rd are already set. */
7307 insn = get_aarch64_insn (buf);
7308 if (value < 0)
7309 {
7310 /* Add <-> sub. */
7311 insn = reencode_addsub_switch_add_sub (insn);
7312 value = -value;
7313 }
7314
7315 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7316 && unsigned_overflow (value, 12))
7317 {
7318 /* Try to shift the value by 12 to make it fit. */
7319 if (((value >> 12) << 12) == value
7320 && ! unsigned_overflow (value, 12 + 12))
7321 {
7322 value >>= 12;
7323 insn |= encode_addsub_imm_shift_amount (1);
7324 }
7325 }
7326
7327 if (unsigned_overflow (value, 12))
7328 as_bad_where (fixP->fx_file, fixP->fx_line,
7329 _("immediate out of range"));
7330
7331 insn |= encode_addsub_imm (value);
7332
7333 put_aarch64_insn (buf, insn);
7334 break;
7335
7336 case AARCH64_OPND_SIMD_IMM:
7337 case AARCH64_OPND_SIMD_IMM_SFT:
7338 case AARCH64_OPND_LIMM:
7339 /* Bit mask immediate. */
7340 gas_assert (new_inst != NULL);
7341 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7342 new_inst->operands[idx].imm.value = value;
7343 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7344 &new_inst->value, NULL, NULL))
7345 put_aarch64_insn (buf, new_inst->value);
7346 else
7347 as_bad_where (fixP->fx_file, fixP->fx_line,
7348 _("invalid immediate"));
7349 break;
7350
7351 case AARCH64_OPND_HALF:
7352 /* 16-bit unsigned immediate. */
7353 if (unsigned_overflow (value, 16))
7354 as_bad_where (fixP->fx_file, fixP->fx_line,
7355 _("immediate out of range"));
7356 insn = get_aarch64_insn (buf);
7357 insn |= encode_movw_imm (value & 0xffff);
7358 put_aarch64_insn (buf, insn);
7359 break;
7360
7361 case AARCH64_OPND_IMM_MOV:
7362 /* Operand for a generic move immediate instruction, which is
7363 an alias instruction that generates a single MOVZ, MOVN or ORR
7364 instruction to loads a 32-bit/64-bit immediate value into general
7365 register. An assembler error shall result if the immediate cannot be
7366 created by a single one of these instructions. If there is a choice,
7367 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7368 and MOVZ or MOVN to ORR. */
7369 gas_assert (new_inst != NULL);
7370 fix_mov_imm_insn (fixP, buf, new_inst, value);
7371 break;
7372
7373 case AARCH64_OPND_ADDR_SIMM7:
7374 case AARCH64_OPND_ADDR_SIMM9:
7375 case AARCH64_OPND_ADDR_SIMM9_2:
7376 case AARCH64_OPND_ADDR_SIMM10:
7377 case AARCH64_OPND_ADDR_UIMM12:
7378 /* Immediate offset in an address. */
7379 insn = get_aarch64_insn (buf);
7380
7381 gas_assert (new_inst != NULL && new_inst->value == insn);
7382 gas_assert (new_inst->opcode->operands[1] == opnd
7383 || new_inst->opcode->operands[2] == opnd);
7384
7385 /* Get the index of the address operand. */
7386 if (new_inst->opcode->operands[1] == opnd)
7387 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7388 idx = 1;
7389 else
7390 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7391 idx = 2;
7392
7393 /* Update the resolved offset value. */
7394 new_inst->operands[idx].addr.offset.imm = value;
7395
7396 /* Encode/fix-up. */
7397 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7398 &new_inst->value, NULL, NULL))
7399 {
7400 put_aarch64_insn (buf, new_inst->value);
7401 break;
7402 }
7403 else if (new_inst->opcode->iclass == ldst_pos
7404 && try_to_encode_as_unscaled_ldst (new_inst))
7405 {
7406 put_aarch64_insn (buf, new_inst->value);
7407 break;
7408 }
7409
7410 as_bad_where (fixP->fx_file, fixP->fx_line,
7411 _("immediate offset out of range"));
7412 break;
7413
7414 default:
7415 gas_assert (0);
7416 as_fatal (_("unhandled operand code %d"), opnd);
7417 }
7418 }
7419
7420 /* Apply a fixup (fixP) to segment data, once it has been determined
7421 by our caller that we have all the info we need to fix it up.
7422
7423 Parameter valP is the pointer to the value of the bits. */
7424
7425 void
7426 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7427 {
7428 offsetT value = *valP;
7429 uint32_t insn;
7430 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7431 int scale;
7432 unsigned flags = fixP->fx_addnumber;
7433
7434 DEBUG_TRACE ("\n\n");
7435 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7436 DEBUG_TRACE ("Enter md_apply_fix");
7437
7438 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7439
7440 /* Note whether this will delete the relocation. */
7441
7442 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7443 fixP->fx_done = 1;
7444
7445 /* Process the relocations. */
7446 switch (fixP->fx_r_type)
7447 {
7448 case BFD_RELOC_NONE:
7449 /* This will need to go in the object file. */
7450 fixP->fx_done = 0;
7451 break;
7452
7453 case BFD_RELOC_8:
7454 case BFD_RELOC_8_PCREL:
7455 if (fixP->fx_done || !seg->use_rela_p)
7456 md_number_to_chars (buf, value, 1);
7457 break;
7458
7459 case BFD_RELOC_16:
7460 case BFD_RELOC_16_PCREL:
7461 if (fixP->fx_done || !seg->use_rela_p)
7462 md_number_to_chars (buf, value, 2);
7463 break;
7464
7465 case BFD_RELOC_32:
7466 case BFD_RELOC_32_PCREL:
7467 if (fixP->fx_done || !seg->use_rela_p)
7468 md_number_to_chars (buf, value, 4);
7469 break;
7470
7471 case BFD_RELOC_64:
7472 case BFD_RELOC_64_PCREL:
7473 if (fixP->fx_done || !seg->use_rela_p)
7474 md_number_to_chars (buf, value, 8);
7475 break;
7476
7477 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7478 /* We claim that these fixups have been processed here, even if
7479 in fact we generate an error because we do not have a reloc
7480 for them, so tc_gen_reloc() will reject them. */
7481 fixP->fx_done = 1;
7482 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7483 {
7484 as_bad_where (fixP->fx_file, fixP->fx_line,
7485 _("undefined symbol %s used as an immediate value"),
7486 S_GET_NAME (fixP->fx_addsy));
7487 goto apply_fix_return;
7488 }
7489 fix_insn (fixP, flags, value);
7490 break;
7491
7492 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7493 if (fixP->fx_done || !seg->use_rela_p)
7494 {
7495 if (value & 3)
7496 as_bad_where (fixP->fx_file, fixP->fx_line,
7497 _("pc-relative load offset not word aligned"));
7498 if (signed_overflow (value, 21))
7499 as_bad_where (fixP->fx_file, fixP->fx_line,
7500 _("pc-relative load offset out of range"));
7501 insn = get_aarch64_insn (buf);
7502 insn |= encode_ld_lit_ofs_19 (value >> 2);
7503 put_aarch64_insn (buf, insn);
7504 }
7505 break;
7506
7507 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7508 if (fixP->fx_done || !seg->use_rela_p)
7509 {
7510 if (signed_overflow (value, 21))
7511 as_bad_where (fixP->fx_file, fixP->fx_line,
7512 _("pc-relative address offset out of range"));
7513 insn = get_aarch64_insn (buf);
7514 insn |= encode_adr_imm (value);
7515 put_aarch64_insn (buf, insn);
7516 }
7517 break;
7518
7519 case BFD_RELOC_AARCH64_BRANCH19:
7520 if (fixP->fx_done || !seg->use_rela_p)
7521 {
7522 if (value & 3)
7523 as_bad_where (fixP->fx_file, fixP->fx_line,
7524 _("conditional branch target not word aligned"));
7525 if (signed_overflow (value, 21))
7526 as_bad_where (fixP->fx_file, fixP->fx_line,
7527 _("conditional branch out of range"));
7528 insn = get_aarch64_insn (buf);
7529 insn |= encode_cond_branch_ofs_19 (value >> 2);
7530 put_aarch64_insn (buf, insn);
7531 }
7532 break;
7533
7534 case BFD_RELOC_AARCH64_TSTBR14:
7535 if (fixP->fx_done || !seg->use_rela_p)
7536 {
7537 if (value & 3)
7538 as_bad_where (fixP->fx_file, fixP->fx_line,
7539 _("conditional branch target not word aligned"));
7540 if (signed_overflow (value, 16))
7541 as_bad_where (fixP->fx_file, fixP->fx_line,
7542 _("conditional branch out of range"));
7543 insn = get_aarch64_insn (buf);
7544 insn |= encode_tst_branch_ofs_14 (value >> 2);
7545 put_aarch64_insn (buf, insn);
7546 }
7547 break;
7548
7549 case BFD_RELOC_AARCH64_CALL26:
7550 case BFD_RELOC_AARCH64_JUMP26:
7551 if (fixP->fx_done || !seg->use_rela_p)
7552 {
7553 if (value & 3)
7554 as_bad_where (fixP->fx_file, fixP->fx_line,
7555 _("branch target not word aligned"));
7556 if (signed_overflow (value, 28))
7557 as_bad_where (fixP->fx_file, fixP->fx_line,
7558 _("branch out of range"));
7559 insn = get_aarch64_insn (buf);
7560 insn |= encode_branch_ofs_26 (value >> 2);
7561 put_aarch64_insn (buf, insn);
7562 }
7563 break;
7564
7565 case BFD_RELOC_AARCH64_MOVW_G0:
7566 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7567 case BFD_RELOC_AARCH64_MOVW_G0_S:
7568 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7569 scale = 0;
7570 goto movw_common;
7571 case BFD_RELOC_AARCH64_MOVW_G1:
7572 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7573 case BFD_RELOC_AARCH64_MOVW_G1_S:
7574 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7575 scale = 16;
7576 goto movw_common;
7577 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7578 scale = 0;
7579 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7580 /* Should always be exported to object file, see
7581 aarch64_force_relocation(). */
7582 gas_assert (!fixP->fx_done);
7583 gas_assert (seg->use_rela_p);
7584 goto movw_common;
7585 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7586 scale = 16;
7587 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7588 /* Should always be exported to object file, see
7589 aarch64_force_relocation(). */
7590 gas_assert (!fixP->fx_done);
7591 gas_assert (seg->use_rela_p);
7592 goto movw_common;
7593 case BFD_RELOC_AARCH64_MOVW_G2:
7594 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7595 case BFD_RELOC_AARCH64_MOVW_G2_S:
7596 scale = 32;
7597 goto movw_common;
7598 case BFD_RELOC_AARCH64_MOVW_G3:
7599 scale = 48;
7600 movw_common:
7601 if (fixP->fx_done || !seg->use_rela_p)
7602 {
7603 insn = get_aarch64_insn (buf);
7604
7605 if (!fixP->fx_done)
7606 {
7607 /* REL signed addend must fit in 16 bits */
7608 if (signed_overflow (value, 16))
7609 as_bad_where (fixP->fx_file, fixP->fx_line,
7610 _("offset out of range"));
7611 }
7612 else
7613 {
7614 /* Check for overflow and scale. */
7615 switch (fixP->fx_r_type)
7616 {
7617 case BFD_RELOC_AARCH64_MOVW_G0:
7618 case BFD_RELOC_AARCH64_MOVW_G1:
7619 case BFD_RELOC_AARCH64_MOVW_G2:
7620 case BFD_RELOC_AARCH64_MOVW_G3:
7621 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7622 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7623 if (unsigned_overflow (value, scale + 16))
7624 as_bad_where (fixP->fx_file, fixP->fx_line,
7625 _("unsigned value out of range"));
7626 break;
7627 case BFD_RELOC_AARCH64_MOVW_G0_S:
7628 case BFD_RELOC_AARCH64_MOVW_G1_S:
7629 case BFD_RELOC_AARCH64_MOVW_G2_S:
7630 /* NOTE: We can only come here with movz or movn. */
7631 if (signed_overflow (value, scale + 16))
7632 as_bad_where (fixP->fx_file, fixP->fx_line,
7633 _("signed value out of range"));
7634 if (value < 0)
7635 {
7636 /* Force use of MOVN. */
7637 value = ~value;
7638 insn = reencode_movzn_to_movn (insn);
7639 }
7640 else
7641 {
7642 /* Force use of MOVZ. */
7643 insn = reencode_movzn_to_movz (insn);
7644 }
7645 break;
7646 default:
7647 /* Unchecked relocations. */
7648 break;
7649 }
7650 value >>= scale;
7651 }
7652
7653 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7654 insn |= encode_movw_imm (value & 0xffff);
7655
7656 put_aarch64_insn (buf, insn);
7657 }
7658 break;
7659
7660 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7661 fixP->fx_r_type = (ilp32_p
7662 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7663 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7664 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7665 /* Should always be exported to object file, see
7666 aarch64_force_relocation(). */
7667 gas_assert (!fixP->fx_done);
7668 gas_assert (seg->use_rela_p);
7669 break;
7670
7671 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7672 fixP->fx_r_type = (ilp32_p
7673 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7674 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
7675 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7676 /* Should always be exported to object file, see
7677 aarch64_force_relocation(). */
7678 gas_assert (!fixP->fx_done);
7679 gas_assert (seg->use_rela_p);
7680 break;
7681
7682 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7683 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7684 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7685 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7686 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7687 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7688 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7689 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7690 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7691 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7692 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7693 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7694 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7695 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7696 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7697 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7698 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7699 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7700 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7701 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7702 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7703 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7704 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7705 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7706 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7707 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7708 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7709 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7710 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7711 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7712 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7713 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7714 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7715 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7716 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7717 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7718 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7719 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7720 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7721 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7722 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7723 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7724 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7725 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7726 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7727 /* Should always be exported to object file, see
7728 aarch64_force_relocation(). */
7729 gas_assert (!fixP->fx_done);
7730 gas_assert (seg->use_rela_p);
7731 break;
7732
7733 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7734 /* Should always be exported to object file, see
7735 aarch64_force_relocation(). */
7736 fixP->fx_r_type = (ilp32_p
7737 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7738 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7739 gas_assert (!fixP->fx_done);
7740 gas_assert (seg->use_rela_p);
7741 break;
7742
7743 case BFD_RELOC_AARCH64_ADD_LO12:
7744 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7745 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7746 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7747 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7748 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7749 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7750 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7751 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7752 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7753 case BFD_RELOC_AARCH64_LDST128_LO12:
7754 case BFD_RELOC_AARCH64_LDST16_LO12:
7755 case BFD_RELOC_AARCH64_LDST32_LO12:
7756 case BFD_RELOC_AARCH64_LDST64_LO12:
7757 case BFD_RELOC_AARCH64_LDST8_LO12:
7758 /* Should always be exported to object file, see
7759 aarch64_force_relocation(). */
7760 gas_assert (!fixP->fx_done);
7761 gas_assert (seg->use_rela_p);
7762 break;
7763
7764 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7765 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7766 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7767 break;
7768
7769 case BFD_RELOC_UNUSED:
7770 /* An error will already have been reported. */
7771 break;
7772
7773 default:
7774 as_bad_where (fixP->fx_file, fixP->fx_line,
7775 _("unexpected %s fixup"),
7776 bfd_get_reloc_code_name (fixP->fx_r_type));
7777 break;
7778 }
7779
7780 apply_fix_return:
7781 /* Free the allocated the struct aarch64_inst.
7782 N.B. currently there are very limited number of fix-up types actually use
7783 this field, so the impact on the performance should be minimal . */
7784 if (fixP->tc_fix_data.inst != NULL)
7785 free (fixP->tc_fix_data.inst);
7786
7787 return;
7788 }
7789
7790 /* Translate internal representation of relocation info to BFD target
7791 format. */
7792
7793 arelent *
7794 tc_gen_reloc (asection * section, fixS * fixp)
7795 {
7796 arelent *reloc;
7797 bfd_reloc_code_real_type code;
7798
7799 reloc = XNEW (arelent);
7800
7801 reloc->sym_ptr_ptr = XNEW (asymbol *);
7802 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7803 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7804
7805 if (fixp->fx_pcrel)
7806 {
7807 if (section->use_rela_p)
7808 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7809 else
7810 fixp->fx_offset = reloc->address;
7811 }
7812 reloc->addend = fixp->fx_offset;
7813
7814 code = fixp->fx_r_type;
7815 switch (code)
7816 {
7817 case BFD_RELOC_16:
7818 if (fixp->fx_pcrel)
7819 code = BFD_RELOC_16_PCREL;
7820 break;
7821
7822 case BFD_RELOC_32:
7823 if (fixp->fx_pcrel)
7824 code = BFD_RELOC_32_PCREL;
7825 break;
7826
7827 case BFD_RELOC_64:
7828 if (fixp->fx_pcrel)
7829 code = BFD_RELOC_64_PCREL;
7830 break;
7831
7832 default:
7833 break;
7834 }
7835
7836 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7837 if (reloc->howto == NULL)
7838 {
7839 as_bad_where (fixp->fx_file, fixp->fx_line,
7840 _
7841 ("cannot represent %s relocation in this object file format"),
7842 bfd_get_reloc_code_name (code));
7843 return NULL;
7844 }
7845
7846 return reloc;
7847 }
7848
7849 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7850
7851 void
7852 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7853 {
7854 bfd_reloc_code_real_type type;
7855 int pcrel = 0;
7856
7857 /* Pick a reloc.
7858 FIXME: @@ Should look at CPU word size. */
7859 switch (size)
7860 {
7861 case 1:
7862 type = BFD_RELOC_8;
7863 break;
7864 case 2:
7865 type = BFD_RELOC_16;
7866 break;
7867 case 4:
7868 type = BFD_RELOC_32;
7869 break;
7870 case 8:
7871 type = BFD_RELOC_64;
7872 break;
7873 default:
7874 as_bad (_("cannot do %u-byte relocation"), size);
7875 type = BFD_RELOC_UNUSED;
7876 break;
7877 }
7878
7879 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7880 }
7881
7882 int
7883 aarch64_force_relocation (struct fix *fixp)
7884 {
7885 switch (fixp->fx_r_type)
7886 {
7887 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7888 /* Perform these "immediate" internal relocations
7889 even if the symbol is extern or weak. */
7890 return 0;
7891
7892 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7893 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7894 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7895 /* Pseudo relocs that need to be fixed up according to
7896 ilp32_p. */
7897 return 0;
7898
7899 case BFD_RELOC_AARCH64_ADD_LO12:
7900 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7901 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7902 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7903 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7904 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7905 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7906 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7907 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7908 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7909 case BFD_RELOC_AARCH64_LDST128_LO12:
7910 case BFD_RELOC_AARCH64_LDST16_LO12:
7911 case BFD_RELOC_AARCH64_LDST32_LO12:
7912 case BFD_RELOC_AARCH64_LDST64_LO12:
7913 case BFD_RELOC_AARCH64_LDST8_LO12:
7914 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7915 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7916 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7917 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7918 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7919 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7920 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7921 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7922 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7923 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7924 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7925 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7926 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7927 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7928 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7929 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7930 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7931 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7932 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7933 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7934 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7935 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7936 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7937 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7938 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7939 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7940 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7941 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7942 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7943 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7944 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7945 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7946 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7947 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7948 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7949 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7950 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7951 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7952 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7953 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7954 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7955 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7956 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7957 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7958 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7959 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7960 /* Always leave these relocations for the linker. */
7961 return 1;
7962
7963 default:
7964 break;
7965 }
7966
7967 return generic_force_reloc (fixp);
7968 }
7969
7970 #ifdef OBJ_ELF
7971
7972 const char *
7973 elf64_aarch64_target_format (void)
7974 {
7975 if (strcmp (TARGET_OS, "cloudabi") == 0)
7976 {
7977 /* FIXME: What to do for ilp32_p ? */
7978 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
7979 }
7980 if (target_big_endian)
7981 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7982 else
7983 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7984 }
7985
7986 void
7987 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7988 {
7989 elf_frob_symbol (symp, puntp);
7990 }
7991 #endif
7992
7993 /* MD interface: Finalization. */
7994
7995 /* A good place to do this, although this was probably not intended
7996 for this kind of use. We need to dump the literal pool before
7997 references are made to a null symbol pointer. */
7998
7999 void
8000 aarch64_cleanup (void)
8001 {
8002 literal_pool *pool;
8003
8004 for (pool = list_of_pools; pool; pool = pool->next)
8005 {
8006 /* Put it at the end of the relevant section. */
8007 subseg_set (pool->section, pool->sub_section);
8008 s_ltorg (0);
8009 }
8010 }
8011
8012 #ifdef OBJ_ELF
8013 /* Remove any excess mapping symbols generated for alignment frags in
8014 SEC. We may have created a mapping symbol before a zero byte
8015 alignment; remove it if there's a mapping symbol after the
8016 alignment. */
8017 static void
8018 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8019 void *dummy ATTRIBUTE_UNUSED)
8020 {
8021 segment_info_type *seginfo = seg_info (sec);
8022 fragS *fragp;
8023
8024 if (seginfo == NULL || seginfo->frchainP == NULL)
8025 return;
8026
8027 for (fragp = seginfo->frchainP->frch_root;
8028 fragp != NULL; fragp = fragp->fr_next)
8029 {
8030 symbolS *sym = fragp->tc_frag_data.last_map;
8031 fragS *next = fragp->fr_next;
8032
8033 /* Variable-sized frags have been converted to fixed size by
8034 this point. But if this was variable-sized to start with,
8035 there will be a fixed-size frag after it. So don't handle
8036 next == NULL. */
8037 if (sym == NULL || next == NULL)
8038 continue;
8039
8040 if (S_GET_VALUE (sym) < next->fr_address)
8041 /* Not at the end of this frag. */
8042 continue;
8043 know (S_GET_VALUE (sym) == next->fr_address);
8044
8045 do
8046 {
8047 if (next->tc_frag_data.first_map != NULL)
8048 {
8049 /* Next frag starts with a mapping symbol. Discard this
8050 one. */
8051 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8052 break;
8053 }
8054
8055 if (next->fr_next == NULL)
8056 {
8057 /* This mapping symbol is at the end of the section. Discard
8058 it. */
8059 know (next->fr_fix == 0 && next->fr_var == 0);
8060 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8061 break;
8062 }
8063
8064 /* As long as we have empty frags without any mapping symbols,
8065 keep looking. */
8066 /* If the next frag is non-empty and does not start with a
8067 mapping symbol, then this mapping symbol is required. */
8068 if (next->fr_address != next->fr_next->fr_address)
8069 break;
8070
8071 next = next->fr_next;
8072 }
8073 while (next != NULL);
8074 }
8075 }
8076 #endif
8077
8078 /* Adjust the symbol table. */
8079
8080 void
8081 aarch64_adjust_symtab (void)
8082 {
8083 #ifdef OBJ_ELF
8084 /* Remove any overlapping mapping symbols generated by alignment frags. */
8085 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8086 /* Now do generic ELF adjustments. */
8087 elf_adjust_symtab ();
8088 #endif
8089 }
8090
8091 static void
8092 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8093 {
8094 const char *hash_err;
8095
8096 hash_err = hash_insert (table, key, value);
8097 if (hash_err)
8098 printf ("Internal Error: Can't hash %s\n", key);
8099 }
8100
8101 static void
8102 fill_instruction_hash_table (void)
8103 {
8104 aarch64_opcode *opcode = aarch64_opcode_table;
8105
8106 while (opcode->name != NULL)
8107 {
8108 templates *templ, *new_templ;
8109 templ = hash_find (aarch64_ops_hsh, opcode->name);
8110
8111 new_templ = XNEW (templates);
8112 new_templ->opcode = opcode;
8113 new_templ->next = NULL;
8114
8115 if (!templ)
8116 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8117 else
8118 {
8119 new_templ->next = templ->next;
8120 templ->next = new_templ;
8121 }
8122 ++opcode;
8123 }
8124 }
8125
8126 static inline void
8127 convert_to_upper (char *dst, const char *src, size_t num)
8128 {
8129 unsigned int i;
8130 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8131 *dst = TOUPPER (*src);
8132 *dst = '\0';
8133 }
8134
8135 /* Assume STR point to a lower-case string, allocate, convert and return
8136 the corresponding upper-case string. */
8137 static inline const char*
8138 get_upper_str (const char *str)
8139 {
8140 char *ret;
8141 size_t len = strlen (str);
8142 ret = XNEWVEC (char, len + 1);
8143 convert_to_upper (ret, str, len);
8144 return ret;
8145 }
8146
8147 /* MD interface: Initialization. */
8148
8149 void
8150 md_begin (void)
8151 {
8152 unsigned mach;
8153 unsigned int i;
8154
8155 if ((aarch64_ops_hsh = hash_new ()) == NULL
8156 || (aarch64_cond_hsh = hash_new ()) == NULL
8157 || (aarch64_shift_hsh = hash_new ()) == NULL
8158 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8159 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8160 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8161 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8162 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8163 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8164 || (aarch64_reg_hsh = hash_new ()) == NULL
8165 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8166 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8167 || (aarch64_pldop_hsh = hash_new ()) == NULL
8168 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8169 as_fatal (_("virtual memory exhausted"));
8170
8171 fill_instruction_hash_table ();
8172
8173 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8174 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8175 (void *) (aarch64_sys_regs + i));
8176
8177 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8178 checked_hash_insert (aarch64_pstatefield_hsh,
8179 aarch64_pstatefields[i].name,
8180 (void *) (aarch64_pstatefields + i));
8181
8182 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8183 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8184 aarch64_sys_regs_ic[i].name,
8185 (void *) (aarch64_sys_regs_ic + i));
8186
8187 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8188 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8189 aarch64_sys_regs_dc[i].name,
8190 (void *) (aarch64_sys_regs_dc + i));
8191
8192 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8193 checked_hash_insert (aarch64_sys_regs_at_hsh,
8194 aarch64_sys_regs_at[i].name,
8195 (void *) (aarch64_sys_regs_at + i));
8196
8197 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8198 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8199 aarch64_sys_regs_tlbi[i].name,
8200 (void *) (aarch64_sys_regs_tlbi + i));
8201
8202 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8203 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8204 (void *) (reg_names + i));
8205
8206 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8207 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8208 (void *) (nzcv_names + i));
8209
8210 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8211 {
8212 const char *name = aarch64_operand_modifiers[i].name;
8213 checked_hash_insert (aarch64_shift_hsh, name,
8214 (void *) (aarch64_operand_modifiers + i));
8215 /* Also hash the name in the upper case. */
8216 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8217 (void *) (aarch64_operand_modifiers + i));
8218 }
8219
8220 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8221 {
8222 unsigned int j;
8223 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8224 the same condition code. */
8225 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8226 {
8227 const char *name = aarch64_conds[i].names[j];
8228 if (name == NULL)
8229 break;
8230 checked_hash_insert (aarch64_cond_hsh, name,
8231 (void *) (aarch64_conds + i));
8232 /* Also hash the name in the upper case. */
8233 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8234 (void *) (aarch64_conds + i));
8235 }
8236 }
8237
8238 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8239 {
8240 const char *name = aarch64_barrier_options[i].name;
8241 /* Skip xx00 - the unallocated values of option. */
8242 if ((i & 0x3) == 0)
8243 continue;
8244 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8245 (void *) (aarch64_barrier_options + i));
8246 /* Also hash the name in the upper case. */
8247 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8248 (void *) (aarch64_barrier_options + i));
8249 }
8250
8251 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8252 {
8253 const char* name = aarch64_prfops[i].name;
8254 /* Skip the unallocated hint encodings. */
8255 if (name == NULL)
8256 continue;
8257 checked_hash_insert (aarch64_pldop_hsh, name,
8258 (void *) (aarch64_prfops + i));
8259 /* Also hash the name in the upper case. */
8260 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8261 (void *) (aarch64_prfops + i));
8262 }
8263
8264 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8265 {
8266 const char* name = aarch64_hint_options[i].name;
8267
8268 checked_hash_insert (aarch64_hint_opt_hsh, name,
8269 (void *) (aarch64_hint_options + i));
8270 /* Also hash the name in the upper case. */
8271 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8272 (void *) (aarch64_hint_options + i));
8273 }
8274
8275 /* Set the cpu variant based on the command-line options. */
8276 if (!mcpu_cpu_opt)
8277 mcpu_cpu_opt = march_cpu_opt;
8278
8279 if (!mcpu_cpu_opt)
8280 mcpu_cpu_opt = &cpu_default;
8281
8282 cpu_variant = *mcpu_cpu_opt;
8283
8284 /* Record the CPU type. */
8285 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8286
8287 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8288 }
8289
8290 /* Command line processing. */
8291
8292 const char *md_shortopts = "m:";
8293
8294 #ifdef AARCH64_BI_ENDIAN
8295 #define OPTION_EB (OPTION_MD_BASE + 0)
8296 #define OPTION_EL (OPTION_MD_BASE + 1)
8297 #else
8298 #if TARGET_BYTES_BIG_ENDIAN
8299 #define OPTION_EB (OPTION_MD_BASE + 0)
8300 #else
8301 #define OPTION_EL (OPTION_MD_BASE + 1)
8302 #endif
8303 #endif
8304
8305 struct option md_longopts[] = {
8306 #ifdef OPTION_EB
8307 {"EB", no_argument, NULL, OPTION_EB},
8308 #endif
8309 #ifdef OPTION_EL
8310 {"EL", no_argument, NULL, OPTION_EL},
8311 #endif
8312 {NULL, no_argument, NULL, 0}
8313 };
8314
8315 size_t md_longopts_size = sizeof (md_longopts);
8316
8317 struct aarch64_option_table
8318 {
8319 const char *option; /* Option name to match. */
8320 const char *help; /* Help information. */
8321 int *var; /* Variable to change. */
8322 int value; /* What to change it to. */
8323 char *deprecated; /* If non-null, print this message. */
8324 };
8325
8326 static struct aarch64_option_table aarch64_opts[] = {
8327 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8328 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8329 NULL},
8330 #ifdef DEBUG_AARCH64
8331 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8332 #endif /* DEBUG_AARCH64 */
8333 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8334 NULL},
8335 {"mno-verbose-error", N_("do not output verbose error messages"),
8336 &verbose_error_p, 0, NULL},
8337 {NULL, NULL, NULL, 0, NULL}
8338 };
8339
8340 struct aarch64_cpu_option_table
8341 {
8342 const char *name;
8343 const aarch64_feature_set value;
8344 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8345 case. */
8346 const char *canonical_name;
8347 };
8348
8349 /* This list should, at a minimum, contain all the cpu names
8350 recognized by GCC. */
8351 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8352 {"all", AARCH64_ANY, NULL},
8353 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8354 AARCH64_FEATURE_CRC), "Cortex-A35"},
8355 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8356 AARCH64_FEATURE_CRC), "Cortex-A53"},
8357 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8358 AARCH64_FEATURE_CRC), "Cortex-A57"},
8359 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8360 AARCH64_FEATURE_CRC), "Cortex-A72"},
8361 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8362 AARCH64_FEATURE_CRC), "Cortex-A73"},
8363 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8364 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8365 "Samsung Exynos M1"},
8366 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8367 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8368 "Qualcomm Falkor"},
8369 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8370 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8371 "Qualcomm QDF24XX"},
8372 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8373 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8374 "Cavium ThunderX"},
8375 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8376 AARCH64_FEATURE_CRYPTO),
8377 "Broadcom Vulcan"},
8378 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8379 in earlier releases and is superseded by 'xgene1' in all
8380 tools. */
8381 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8382 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8383 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8384 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8385 {"generic", AARCH64_ARCH_V8, NULL},
8386
8387 {NULL, AARCH64_ARCH_NONE, NULL}
8388 };
8389
8390 struct aarch64_arch_option_table
8391 {
8392 const char *name;
8393 const aarch64_feature_set value;
8394 };
8395
8396 /* This list should, at a minimum, contain all the architecture names
8397 recognized by GCC. */
8398 static const struct aarch64_arch_option_table aarch64_archs[] = {
8399 {"all", AARCH64_ANY},
8400 {"armv8-a", AARCH64_ARCH_V8},
8401 {"armv8.1-a", AARCH64_ARCH_V8_1},
8402 {"armv8.2-a", AARCH64_ARCH_V8_2},
8403 {"armv8.3-a", AARCH64_ARCH_V8_3},
8404 {NULL, AARCH64_ARCH_NONE}
8405 };
8406
8407 /* ISA extensions. */
8408 struct aarch64_option_cpu_value_table
8409 {
8410 const char *name;
8411 const aarch64_feature_set value;
8412 const aarch64_feature_set require; /* Feature dependencies. */
8413 };
8414
8415 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8416 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8417 AARCH64_ARCH_NONE},
8418 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
8419 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8420 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8421 AARCH64_ARCH_NONE},
8422 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8423 AARCH64_ARCH_NONE},
8424 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8425 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8426 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8427 AARCH64_ARCH_NONE},
8428 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8429 AARCH64_ARCH_NONE},
8430 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8431 AARCH64_ARCH_NONE},
8432 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8433 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8434 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8435 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8436 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8437 AARCH64_ARCH_NONE},
8438 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8439 AARCH64_FEATURE (AARCH64_FEATURE_FP
8440 | AARCH64_FEATURE_SIMD, 0)},
8441 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8442 };
8443
8444 struct aarch64_long_option_table
8445 {
8446 const char *option; /* Substring to match. */
8447 const char *help; /* Help information. */
8448 int (*func) (const char *subopt); /* Function to decode sub-option. */
8449 char *deprecated; /* If non-null, print this message. */
8450 };
8451
8452 /* Transitive closure of features depending on set. */
8453 static aarch64_feature_set
8454 aarch64_feature_disable_set (aarch64_feature_set set)
8455 {
8456 const struct aarch64_option_cpu_value_table *opt;
8457 aarch64_feature_set prev = 0;
8458
8459 while (prev != set) {
8460 prev = set;
8461 for (opt = aarch64_features; opt->name != NULL; opt++)
8462 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8463 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8464 }
8465 return set;
8466 }
8467
8468 /* Transitive closure of dependencies of set. */
8469 static aarch64_feature_set
8470 aarch64_feature_enable_set (aarch64_feature_set set)
8471 {
8472 const struct aarch64_option_cpu_value_table *opt;
8473 aarch64_feature_set prev = 0;
8474
8475 while (prev != set) {
8476 prev = set;
8477 for (opt = aarch64_features; opt->name != NULL; opt++)
8478 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8479 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8480 }
8481 return set;
8482 }
8483
8484 static int
8485 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8486 bfd_boolean ext_only)
8487 {
8488 /* We insist on extensions being added before being removed. We achieve
8489 this by using the ADDING_VALUE variable to indicate whether we are
8490 adding an extension (1) or removing it (0) and only allowing it to
8491 change in the order -1 -> 1 -> 0. */
8492 int adding_value = -1;
8493 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8494
8495 /* Copy the feature set, so that we can modify it. */
8496 *ext_set = **opt_p;
8497 *opt_p = ext_set;
8498
8499 while (str != NULL && *str != 0)
8500 {
8501 const struct aarch64_option_cpu_value_table *opt;
8502 const char *ext = NULL;
8503 int optlen;
8504
8505 if (!ext_only)
8506 {
8507 if (*str != '+')
8508 {
8509 as_bad (_("invalid architectural extension"));
8510 return 0;
8511 }
8512
8513 ext = strchr (++str, '+');
8514 }
8515
8516 if (ext != NULL)
8517 optlen = ext - str;
8518 else
8519 optlen = strlen (str);
8520
8521 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8522 {
8523 if (adding_value != 0)
8524 adding_value = 0;
8525 optlen -= 2;
8526 str += 2;
8527 }
8528 else if (optlen > 0)
8529 {
8530 if (adding_value == -1)
8531 adding_value = 1;
8532 else if (adding_value != 1)
8533 {
8534 as_bad (_("must specify extensions to add before specifying "
8535 "those to remove"));
8536 return FALSE;
8537 }
8538 }
8539
8540 if (optlen == 0)
8541 {
8542 as_bad (_("missing architectural extension"));
8543 return 0;
8544 }
8545
8546 gas_assert (adding_value != -1);
8547
8548 for (opt = aarch64_features; opt->name != NULL; opt++)
8549 if (strncmp (opt->name, str, optlen) == 0)
8550 {
8551 aarch64_feature_set set;
8552
8553 /* Add or remove the extension. */
8554 if (adding_value)
8555 {
8556 set = aarch64_feature_enable_set (opt->value);
8557 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8558 }
8559 else
8560 {
8561 set = aarch64_feature_disable_set (opt->value);
8562 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8563 }
8564 break;
8565 }
8566
8567 if (opt->name == NULL)
8568 {
8569 as_bad (_("unknown architectural extension `%s'"), str);
8570 return 0;
8571 }
8572
8573 str = ext;
8574 };
8575
8576 return 1;
8577 }
8578
8579 static int
8580 aarch64_parse_cpu (const char *str)
8581 {
8582 const struct aarch64_cpu_option_table *opt;
8583 const char *ext = strchr (str, '+');
8584 size_t optlen;
8585
8586 if (ext != NULL)
8587 optlen = ext - str;
8588 else
8589 optlen = strlen (str);
8590
8591 if (optlen == 0)
8592 {
8593 as_bad (_("missing cpu name `%s'"), str);
8594 return 0;
8595 }
8596
8597 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8598 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8599 {
8600 mcpu_cpu_opt = &opt->value;
8601 if (ext != NULL)
8602 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8603
8604 return 1;
8605 }
8606
8607 as_bad (_("unknown cpu `%s'"), str);
8608 return 0;
8609 }
8610
8611 static int
8612 aarch64_parse_arch (const char *str)
8613 {
8614 const struct aarch64_arch_option_table *opt;
8615 const char *ext = strchr (str, '+');
8616 size_t optlen;
8617
8618 if (ext != NULL)
8619 optlen = ext - str;
8620 else
8621 optlen = strlen (str);
8622
8623 if (optlen == 0)
8624 {
8625 as_bad (_("missing architecture name `%s'"), str);
8626 return 0;
8627 }
8628
8629 for (opt = aarch64_archs; opt->name != NULL; opt++)
8630 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8631 {
8632 march_cpu_opt = &opt->value;
8633 if (ext != NULL)
8634 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8635
8636 return 1;
8637 }
8638
8639 as_bad (_("unknown architecture `%s'\n"), str);
8640 return 0;
8641 }
8642
8643 /* ABIs. */
8644 struct aarch64_option_abi_value_table
8645 {
8646 const char *name;
8647 enum aarch64_abi_type value;
8648 };
8649
8650 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8651 {"ilp32", AARCH64_ABI_ILP32},
8652 {"lp64", AARCH64_ABI_LP64},
8653 };
8654
8655 static int
8656 aarch64_parse_abi (const char *str)
8657 {
8658 unsigned int i;
8659
8660 if (str[0] == '\0')
8661 {
8662 as_bad (_("missing abi name `%s'"), str);
8663 return 0;
8664 }
8665
8666 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8667 if (strcmp (str, aarch64_abis[i].name) == 0)
8668 {
8669 aarch64_abi = aarch64_abis[i].value;
8670 return 1;
8671 }
8672
8673 as_bad (_("unknown abi `%s'\n"), str);
8674 return 0;
8675 }
8676
8677 static struct aarch64_long_option_table aarch64_long_opts[] = {
8678 #ifdef OBJ_ELF
8679 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8680 aarch64_parse_abi, NULL},
8681 #endif /* OBJ_ELF */
8682 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8683 aarch64_parse_cpu, NULL},
8684 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8685 aarch64_parse_arch, NULL},
8686 {NULL, NULL, 0, NULL}
8687 };
8688
8689 int
8690 md_parse_option (int c, const char *arg)
8691 {
8692 struct aarch64_option_table *opt;
8693 struct aarch64_long_option_table *lopt;
8694
8695 switch (c)
8696 {
8697 #ifdef OPTION_EB
8698 case OPTION_EB:
8699 target_big_endian = 1;
8700 break;
8701 #endif
8702
8703 #ifdef OPTION_EL
8704 case OPTION_EL:
8705 target_big_endian = 0;
8706 break;
8707 #endif
8708
8709 case 'a':
8710 /* Listing option. Just ignore these, we don't support additional
8711 ones. */
8712 return 0;
8713
8714 default:
8715 for (opt = aarch64_opts; opt->option != NULL; opt++)
8716 {
8717 if (c == opt->option[0]
8718 && ((arg == NULL && opt->option[1] == 0)
8719 || streq (arg, opt->option + 1)))
8720 {
8721 /* If the option is deprecated, tell the user. */
8722 if (opt->deprecated != NULL)
8723 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8724 arg ? arg : "", _(opt->deprecated));
8725
8726 if (opt->var != NULL)
8727 *opt->var = opt->value;
8728
8729 return 1;
8730 }
8731 }
8732
8733 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8734 {
8735 /* These options are expected to have an argument. */
8736 if (c == lopt->option[0]
8737 && arg != NULL
8738 && strncmp (arg, lopt->option + 1,
8739 strlen (lopt->option + 1)) == 0)
8740 {
8741 /* If the option is deprecated, tell the user. */
8742 if (lopt->deprecated != NULL)
8743 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8744 _(lopt->deprecated));
8745
8746 /* Call the sup-option parser. */
8747 return lopt->func (arg + strlen (lopt->option) - 1);
8748 }
8749 }
8750
8751 return 0;
8752 }
8753
8754 return 1;
8755 }
8756
8757 void
8758 md_show_usage (FILE * fp)
8759 {
8760 struct aarch64_option_table *opt;
8761 struct aarch64_long_option_table *lopt;
8762
8763 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8764
8765 for (opt = aarch64_opts; opt->option != NULL; opt++)
8766 if (opt->help != NULL)
8767 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8768
8769 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8770 if (lopt->help != NULL)
8771 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8772
8773 #ifdef OPTION_EB
8774 fprintf (fp, _("\
8775 -EB assemble code for a big-endian cpu\n"));
8776 #endif
8777
8778 #ifdef OPTION_EL
8779 fprintf (fp, _("\
8780 -EL assemble code for a little-endian cpu\n"));
8781 #endif
8782 }
8783
8784 /* Parse a .cpu directive. */
8785
8786 static void
8787 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8788 {
8789 const struct aarch64_cpu_option_table *opt;
8790 char saved_char;
8791 char *name;
8792 char *ext;
8793 size_t optlen;
8794
8795 name = input_line_pointer;
8796 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8797 input_line_pointer++;
8798 saved_char = *input_line_pointer;
8799 *input_line_pointer = 0;
8800
8801 ext = strchr (name, '+');
8802
8803 if (ext != NULL)
8804 optlen = ext - name;
8805 else
8806 optlen = strlen (name);
8807
8808 /* Skip the first "all" entry. */
8809 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8810 if (strlen (opt->name) == optlen
8811 && strncmp (name, opt->name, optlen) == 0)
8812 {
8813 mcpu_cpu_opt = &opt->value;
8814 if (ext != NULL)
8815 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8816 return;
8817
8818 cpu_variant = *mcpu_cpu_opt;
8819
8820 *input_line_pointer = saved_char;
8821 demand_empty_rest_of_line ();
8822 return;
8823 }
8824 as_bad (_("unknown cpu `%s'"), name);
8825 *input_line_pointer = saved_char;
8826 ignore_rest_of_line ();
8827 }
8828
8829
8830 /* Parse a .arch directive. */
8831
8832 static void
8833 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8834 {
8835 const struct aarch64_arch_option_table *opt;
8836 char saved_char;
8837 char *name;
8838 char *ext;
8839 size_t optlen;
8840
8841 name = input_line_pointer;
8842 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8843 input_line_pointer++;
8844 saved_char = *input_line_pointer;
8845 *input_line_pointer = 0;
8846
8847 ext = strchr (name, '+');
8848
8849 if (ext != NULL)
8850 optlen = ext - name;
8851 else
8852 optlen = strlen (name);
8853
8854 /* Skip the first "all" entry. */
8855 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8856 if (strlen (opt->name) == optlen
8857 && strncmp (name, opt->name, optlen) == 0)
8858 {
8859 mcpu_cpu_opt = &opt->value;
8860 if (ext != NULL)
8861 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8862 return;
8863
8864 cpu_variant = *mcpu_cpu_opt;
8865
8866 *input_line_pointer = saved_char;
8867 demand_empty_rest_of_line ();
8868 return;
8869 }
8870
8871 as_bad (_("unknown architecture `%s'\n"), name);
8872 *input_line_pointer = saved_char;
8873 ignore_rest_of_line ();
8874 }
8875
8876 /* Parse a .arch_extension directive. */
8877
8878 static void
8879 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8880 {
8881 char saved_char;
8882 char *ext = input_line_pointer;;
8883
8884 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8885 input_line_pointer++;
8886 saved_char = *input_line_pointer;
8887 *input_line_pointer = 0;
8888
8889 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8890 return;
8891
8892 cpu_variant = *mcpu_cpu_opt;
8893
8894 *input_line_pointer = saved_char;
8895 demand_empty_rest_of_line ();
8896 }
8897
8898 /* Copy symbol information. */
8899
8900 void
8901 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8902 {
8903 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8904 }
This page took 0.252501 seconds and 3 git commands to generate.