01c8000d0a7e779092dcc3e02c5ec1cfef8bc0e9
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum vector_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q,
87 NT_zero,
88 NT_merge
89 };
90
91 /* Bits for DEFINED field in vector_type_el. */
92 #define NTA_HASTYPE 1
93 #define NTA_HASINDEX 2
94 #define NTA_HASVARWIDTH 4
95
96 struct vector_type_el
97 {
98 enum vector_el_type type;
99 unsigned char defined;
100 unsigned width;
101 int64_t index;
102 };
103
104 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
105
106 struct reloc
107 {
108 bfd_reloc_code_real_type type;
109 expressionS exp;
110 int pc_rel;
111 enum aarch64_opnd opnd;
112 uint32_t flags;
113 unsigned need_libopcodes_p : 1;
114 };
115
116 struct aarch64_instruction
117 {
118 /* libopcodes structure for instruction intermediate representation. */
119 aarch64_inst base;
120 /* Record assembly errors found during the parsing. */
121 struct
122 {
123 enum aarch64_operand_error_kind kind;
124 const char *error;
125 } parsing_error;
126 /* The condition that appears in the assembly line. */
127 int cond;
128 /* Relocation information (including the GAS internal fixup). */
129 struct reloc reloc;
130 /* Need to generate an immediate in the literal pool. */
131 unsigned gen_lit_pool : 1;
132 };
133
134 typedef struct aarch64_instruction aarch64_instruction;
135
136 static aarch64_instruction inst;
137
138 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
139 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
140
141 /* Diagnostics inline function utilites.
142
143 These are lightweight utlities which should only be called by parse_operands
144 and other parsers. GAS processes each assembly line by parsing it against
145 instruction template(s), in the case of multiple templates (for the same
146 mnemonic name), those templates are tried one by one until one succeeds or
147 all fail. An assembly line may fail a few templates before being
148 successfully parsed; an error saved here in most cases is not a user error
149 but an error indicating the current template is not the right template.
150 Therefore it is very important that errors can be saved at a low cost during
151 the parsing; we don't want to slow down the whole parsing by recording
152 non-user errors in detail.
153
154 Remember that the objective is to help GAS pick up the most approapriate
155 error message in the case of multiple templates, e.g. FMOV which has 8
156 templates. */
157
158 static inline void
159 clear_error (void)
160 {
161 inst.parsing_error.kind = AARCH64_OPDE_NIL;
162 inst.parsing_error.error = NULL;
163 }
164
165 static inline bfd_boolean
166 error_p (void)
167 {
168 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
169 }
170
171 static inline const char *
172 get_error_message (void)
173 {
174 return inst.parsing_error.error;
175 }
176
177 static inline enum aarch64_operand_error_kind
178 get_error_kind (void)
179 {
180 return inst.parsing_error.kind;
181 }
182
183 static inline void
184 set_error (enum aarch64_operand_error_kind kind, const char *error)
185 {
186 inst.parsing_error.kind = kind;
187 inst.parsing_error.error = error;
188 }
189
190 static inline void
191 set_recoverable_error (const char *error)
192 {
193 set_error (AARCH64_OPDE_RECOVERABLE, error);
194 }
195
196 /* Use the DESC field of the corresponding aarch64_operand entry to compose
197 the error message. */
198 static inline void
199 set_default_error (void)
200 {
201 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
202 }
203
204 static inline void
205 set_syntax_error (const char *error)
206 {
207 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
208 }
209
210 static inline void
211 set_first_syntax_error (const char *error)
212 {
213 if (! error_p ())
214 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
215 }
216
217 static inline void
218 set_fatal_syntax_error (const char *error)
219 {
220 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
221 }
222 \f
223 /* Number of littlenums required to hold an extended precision number. */
224 #define MAX_LITTLENUMS 6
225
226 /* Return value for certain parsers when the parsing fails; those parsers
227 return the information of the parsed result, e.g. register number, on
228 success. */
229 #define PARSE_FAIL -1
230
231 /* This is an invalid condition code that means no conditional field is
232 present. */
233 #define COND_ALWAYS 0x10
234
235 typedef struct
236 {
237 const char *template;
238 unsigned long value;
239 } asm_barrier_opt;
240
241 typedef struct
242 {
243 const char *template;
244 uint32_t value;
245 } asm_nzcv;
246
247 struct reloc_entry
248 {
249 char *name;
250 bfd_reloc_code_real_type reloc;
251 };
252
253 /* Macros to define the register types and masks for the purpose
254 of parsing. */
255
256 #undef AARCH64_REG_TYPES
257 #define AARCH64_REG_TYPES \
258 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
259 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
260 BASIC_REG_TYPE(SP_32) /* wsp */ \
261 BASIC_REG_TYPE(SP_64) /* sp */ \
262 BASIC_REG_TYPE(Z_32) /* wzr */ \
263 BASIC_REG_TYPE(Z_64) /* xzr */ \
264 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
265 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
266 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
267 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
268 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
269 BASIC_REG_TYPE(CN) /* c[0-7] */ \
270 BASIC_REG_TYPE(VN) /* v[0-31] */ \
271 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
272 BASIC_REG_TYPE(PN) /* p[0-15] */ \
273 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
274 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
275 /* Typecheck: same, plus SVE registers. */ \
276 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
277 | REG_TYPE(ZN)) \
278 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
279 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
280 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
281 /* Typecheck: same, plus SVE registers. */ \
282 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
283 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
284 | REG_TYPE(ZN)) \
285 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
286 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
287 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
288 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
289 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
290 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
291 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
292 /* Typecheck: any [BHSDQ]P FP. */ \
293 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
294 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
295 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
296 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
298 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
299 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
300 /* Any integer register; used for error messages only. */ \
301 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
303 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
304 /* Pseudo type to mark the end of the enumerator sequence. */ \
305 BASIC_REG_TYPE(MAX)
306
307 #undef BASIC_REG_TYPE
308 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
309 #undef MULTI_REG_TYPE
310 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
311
312 /* Register type enumerators. */
313 typedef enum aarch64_reg_type_
314 {
315 /* A list of REG_TYPE_*. */
316 AARCH64_REG_TYPES
317 } aarch64_reg_type;
318
319 #undef BASIC_REG_TYPE
320 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
321 #undef REG_TYPE
322 #define REG_TYPE(T) (1 << REG_TYPE_##T)
323 #undef MULTI_REG_TYPE
324 #define MULTI_REG_TYPE(T,V) V,
325
326 /* Structure for a hash table entry for a register. */
327 typedef struct
328 {
329 const char *name;
330 unsigned char number;
331 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
332 unsigned char builtin;
333 } reg_entry;
334
335 /* Values indexed by aarch64_reg_type to assist the type checking. */
336 static const unsigned reg_type_masks[] =
337 {
338 AARCH64_REG_TYPES
339 };
340
341 #undef BASIC_REG_TYPE
342 #undef REG_TYPE
343 #undef MULTI_REG_TYPE
344 #undef AARCH64_REG_TYPES
345
346 /* Diagnostics used when we don't get a register of the expected type.
347 Note: this has to synchronized with aarch64_reg_type definitions
348 above. */
349 static const char *
350 get_reg_expected_msg (aarch64_reg_type reg_type)
351 {
352 const char *msg;
353
354 switch (reg_type)
355 {
356 case REG_TYPE_R_32:
357 msg = N_("integer 32-bit register expected");
358 break;
359 case REG_TYPE_R_64:
360 msg = N_("integer 64-bit register expected");
361 break;
362 case REG_TYPE_R_N:
363 msg = N_("integer register expected");
364 break;
365 case REG_TYPE_R64_SP:
366 msg = N_("64-bit integer or SP register expected");
367 break;
368 case REG_TYPE_SVE_BASE:
369 msg = N_("base register expected");
370 break;
371 case REG_TYPE_R_Z:
372 msg = N_("integer or zero register expected");
373 break;
374 case REG_TYPE_SVE_OFFSET:
375 msg = N_("offset register expected");
376 break;
377 case REG_TYPE_R_SP:
378 msg = N_("integer or SP register expected");
379 break;
380 case REG_TYPE_R_Z_SP:
381 msg = N_("integer, zero or SP register expected");
382 break;
383 case REG_TYPE_FP_B:
384 msg = N_("8-bit SIMD scalar register expected");
385 break;
386 case REG_TYPE_FP_H:
387 msg = N_("16-bit SIMD scalar or floating-point half precision "
388 "register expected");
389 break;
390 case REG_TYPE_FP_S:
391 msg = N_("32-bit SIMD scalar or floating-point single precision "
392 "register expected");
393 break;
394 case REG_TYPE_FP_D:
395 msg = N_("64-bit SIMD scalar or floating-point double precision "
396 "register expected");
397 break;
398 case REG_TYPE_FP_Q:
399 msg = N_("128-bit SIMD scalar or floating-point quad precision "
400 "register expected");
401 break;
402 case REG_TYPE_CN:
403 msg = N_("C0 - C15 expected");
404 break;
405 case REG_TYPE_R_Z_BHSDQ_V:
406 msg = N_("register expected");
407 break;
408 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
409 msg = N_("SIMD scalar or floating-point register expected");
410 break;
411 case REG_TYPE_VN: /* any V reg */
412 msg = N_("vector register expected");
413 break;
414 case REG_TYPE_ZN:
415 msg = N_("SVE vector register expected");
416 break;
417 case REG_TYPE_PN:
418 msg = N_("SVE predicate register expected");
419 break;
420 default:
421 as_fatal (_("invalid register type %d"), reg_type);
422 }
423 return msg;
424 }
425
426 /* Some well known registers that we refer to directly elsewhere. */
427 #define REG_SP 31
428
429 /* Instructions take 4 bytes in the object file. */
430 #define INSN_SIZE 4
431
432 static struct hash_control *aarch64_ops_hsh;
433 static struct hash_control *aarch64_cond_hsh;
434 static struct hash_control *aarch64_shift_hsh;
435 static struct hash_control *aarch64_sys_regs_hsh;
436 static struct hash_control *aarch64_pstatefield_hsh;
437 static struct hash_control *aarch64_sys_regs_ic_hsh;
438 static struct hash_control *aarch64_sys_regs_dc_hsh;
439 static struct hash_control *aarch64_sys_regs_at_hsh;
440 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
441 static struct hash_control *aarch64_reg_hsh;
442 static struct hash_control *aarch64_barrier_opt_hsh;
443 static struct hash_control *aarch64_nzcv_hsh;
444 static struct hash_control *aarch64_pldop_hsh;
445 static struct hash_control *aarch64_hint_opt_hsh;
446
447 /* Stuff needed to resolve the label ambiguity
448 As:
449 ...
450 label: <insn>
451 may differ from:
452 ...
453 label:
454 <insn> */
455
456 static symbolS *last_label_seen;
457
458 /* Literal pool structure. Held on a per-section
459 and per-sub-section basis. */
460
461 #define MAX_LITERAL_POOL_SIZE 1024
462 typedef struct literal_expression
463 {
464 expressionS exp;
465 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
466 LITTLENUM_TYPE * bignum;
467 } literal_expression;
468
469 typedef struct literal_pool
470 {
471 literal_expression literals[MAX_LITERAL_POOL_SIZE];
472 unsigned int next_free_entry;
473 unsigned int id;
474 symbolS *symbol;
475 segT section;
476 subsegT sub_section;
477 int size;
478 struct literal_pool *next;
479 } literal_pool;
480
481 /* Pointer to a linked list of literal pools. */
482 static literal_pool *list_of_pools = NULL;
483 \f
484 /* Pure syntax. */
485
486 /* This array holds the chars that always start a comment. If the
487 pre-processor is disabled, these aren't very useful. */
488 const char comment_chars[] = "";
489
490 /* This array holds the chars that only start a comment at the beginning of
491 a line. If the line seems to have the form '# 123 filename'
492 .line and .file directives will appear in the pre-processed output. */
493 /* Note that input_file.c hand checks for '#' at the beginning of the
494 first line of the input file. This is because the compiler outputs
495 #NO_APP at the beginning of its output. */
496 /* Also note that comments like this one will always work. */
497 const char line_comment_chars[] = "#";
498
499 const char line_separator_chars[] = ";";
500
501 /* Chars that can be used to separate mant
502 from exp in floating point numbers. */
503 const char EXP_CHARS[] = "eE";
504
505 /* Chars that mean this number is a floating point constant. */
506 /* As in 0f12.456 */
507 /* or 0d1.2345e12 */
508
509 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
510
511 /* Prefix character that indicates the start of an immediate value. */
512 #define is_immediate_prefix(C) ((C) == '#')
513
514 /* Separator character handling. */
515
516 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
517
518 static inline bfd_boolean
519 skip_past_char (char **str, char c)
520 {
521 if (**str == c)
522 {
523 (*str)++;
524 return TRUE;
525 }
526 else
527 return FALSE;
528 }
529
530 #define skip_past_comma(str) skip_past_char (str, ',')
531
532 /* Arithmetic expressions (possibly involving symbols). */
533
534 static bfd_boolean in_my_get_expression_p = FALSE;
535
536 /* Third argument to my_get_expression. */
537 #define GE_NO_PREFIX 0
538 #define GE_OPT_PREFIX 1
539
540 /* Return TRUE if the string pointed by *STR is successfully parsed
541 as an valid expression; *EP will be filled with the information of
542 such an expression. Otherwise return FALSE. */
543
544 static bfd_boolean
545 my_get_expression (expressionS * ep, char **str, int prefix_mode,
546 int reject_absent)
547 {
548 char *save_in;
549 segT seg;
550 int prefix_present_p = 0;
551
552 switch (prefix_mode)
553 {
554 case GE_NO_PREFIX:
555 break;
556 case GE_OPT_PREFIX:
557 if (is_immediate_prefix (**str))
558 {
559 (*str)++;
560 prefix_present_p = 1;
561 }
562 break;
563 default:
564 abort ();
565 }
566
567 memset (ep, 0, sizeof (expressionS));
568
569 save_in = input_line_pointer;
570 input_line_pointer = *str;
571 in_my_get_expression_p = TRUE;
572 seg = expression (ep);
573 in_my_get_expression_p = FALSE;
574
575 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
576 {
577 /* We found a bad expression in md_operand(). */
578 *str = input_line_pointer;
579 input_line_pointer = save_in;
580 if (prefix_present_p && ! error_p ())
581 set_fatal_syntax_error (_("bad expression"));
582 else
583 set_first_syntax_error (_("bad expression"));
584 return FALSE;
585 }
586
587 #ifdef OBJ_AOUT
588 if (seg != absolute_section
589 && seg != text_section
590 && seg != data_section
591 && seg != bss_section && seg != undefined_section)
592 {
593 set_syntax_error (_("bad segment"));
594 *str = input_line_pointer;
595 input_line_pointer = save_in;
596 return FALSE;
597 }
598 #else
599 (void) seg;
600 #endif
601
602 *str = input_line_pointer;
603 input_line_pointer = save_in;
604 return TRUE;
605 }
606
607 /* Turn a string in input_line_pointer into a floating point constant
608 of type TYPE, and store the appropriate bytes in *LITP. The number
609 of LITTLENUMS emitted is stored in *SIZEP. An error message is
610 returned, or NULL on OK. */
611
612 const char *
613 md_atof (int type, char *litP, int *sizeP)
614 {
615 return ieee_md_atof (type, litP, sizeP, target_big_endian);
616 }
617
618 /* We handle all bad expressions here, so that we can report the faulty
619 instruction in the error message. */
620 void
621 md_operand (expressionS * exp)
622 {
623 if (in_my_get_expression_p)
624 exp->X_op = O_illegal;
625 }
626
627 /* Immediate values. */
628
629 /* Errors may be set multiple times during parsing or bit encoding
630 (particularly in the Neon bits), but usually the earliest error which is set
631 will be the most meaningful. Avoid overwriting it with later (cascading)
632 errors by calling this function. */
633
634 static void
635 first_error (const char *error)
636 {
637 if (! error_p ())
638 set_syntax_error (error);
639 }
640
641 /* Similiar to first_error, but this function accepts formatted error
642 message. */
643 static void
644 first_error_fmt (const char *format, ...)
645 {
646 va_list args;
647 enum
648 { size = 100 };
649 /* N.B. this single buffer will not cause error messages for different
650 instructions to pollute each other; this is because at the end of
651 processing of each assembly line, error message if any will be
652 collected by as_bad. */
653 static char buffer[size];
654
655 if (! error_p ())
656 {
657 int ret ATTRIBUTE_UNUSED;
658 va_start (args, format);
659 ret = vsnprintf (buffer, size, format, args);
660 know (ret <= size - 1 && ret >= 0);
661 va_end (args);
662 set_syntax_error (buffer);
663 }
664 }
665
666 /* Register parsing. */
667
668 /* Generic register parser which is called by other specialized
669 register parsers.
670 CCP points to what should be the beginning of a register name.
671 If it is indeed a valid register name, advance CCP over it and
672 return the reg_entry structure; otherwise return NULL.
673 It does not issue diagnostics. */
674
675 static reg_entry *
676 parse_reg (char **ccp)
677 {
678 char *start = *ccp;
679 char *p;
680 reg_entry *reg;
681
682 #ifdef REGISTER_PREFIX
683 if (*start != REGISTER_PREFIX)
684 return NULL;
685 start++;
686 #endif
687
688 p = start;
689 if (!ISALPHA (*p) || !is_name_beginner (*p))
690 return NULL;
691
692 do
693 p++;
694 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
695
696 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
697
698 if (!reg)
699 return NULL;
700
701 *ccp = p;
702 return reg;
703 }
704
705 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
706 return FALSE. */
707 static bfd_boolean
708 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
709 {
710 return (reg_type_masks[type] & (1 << reg->type)) != 0;
711 }
712
713 /* Try to parse a base or offset register. Allow SVE base and offset
714 registers if REG_TYPE includes SVE registers. Return the register
715 entry on success, setting *QUALIFIER to the register qualifier.
716 Return null otherwise.
717
718 Note that this function does not issue any diagnostics. */
719
720 static const reg_entry *
721 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
722 aarch64_opnd_qualifier_t *qualifier)
723 {
724 char *str = *ccp;
725 const reg_entry *reg = parse_reg (&str);
726
727 if (reg == NULL)
728 return NULL;
729
730 switch (reg->type)
731 {
732 case REG_TYPE_R_32:
733 case REG_TYPE_SP_32:
734 case REG_TYPE_Z_32:
735 *qualifier = AARCH64_OPND_QLF_W;
736 break;
737
738 case REG_TYPE_R_64:
739 case REG_TYPE_SP_64:
740 case REG_TYPE_Z_64:
741 *qualifier = AARCH64_OPND_QLF_X;
742 break;
743
744 case REG_TYPE_ZN:
745 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
746 || str[0] != '.')
747 return NULL;
748 switch (TOLOWER (str[1]))
749 {
750 case 's':
751 *qualifier = AARCH64_OPND_QLF_S_S;
752 break;
753 case 'd':
754 *qualifier = AARCH64_OPND_QLF_S_D;
755 break;
756 default:
757 return NULL;
758 }
759 str += 2;
760 break;
761
762 default:
763 return NULL;
764 }
765
766 *ccp = str;
767
768 return reg;
769 }
770
771 /* Try to parse a base or offset register. Return the register entry
772 on success, setting *QUALIFIER to the register qualifier. Return null
773 otherwise.
774
775 Note that this function does not issue any diagnostics. */
776
777 static const reg_entry *
778 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
779 {
780 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
781 }
782
783 /* Parse the qualifier of a vector register or vector element of type
784 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
785 succeeds; otherwise return FALSE.
786
787 Accept only one occurrence of:
788 8b 16b 2h 4h 8h 2s 4s 1d 2d
789 b h s d q */
790 static bfd_boolean
791 parse_vector_type_for_operand (aarch64_reg_type reg_type,
792 struct vector_type_el *parsed_type, char **str)
793 {
794 char *ptr = *str;
795 unsigned width;
796 unsigned element_size;
797 enum vector_el_type type;
798
799 /* skip '.' */
800 gas_assert (*ptr == '.');
801 ptr++;
802
803 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
804 {
805 width = 0;
806 goto elt_size;
807 }
808 width = strtoul (ptr, &ptr, 10);
809 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
810 {
811 first_error_fmt (_("bad size %d in vector width specifier"), width);
812 return FALSE;
813 }
814
815 elt_size:
816 switch (TOLOWER (*ptr))
817 {
818 case 'b':
819 type = NT_b;
820 element_size = 8;
821 break;
822 case 'h':
823 type = NT_h;
824 element_size = 16;
825 break;
826 case 's':
827 type = NT_s;
828 element_size = 32;
829 break;
830 case 'd':
831 type = NT_d;
832 element_size = 64;
833 break;
834 case 'q':
835 if (width == 1)
836 {
837 type = NT_q;
838 element_size = 128;
839 break;
840 }
841 /* fall through. */
842 default:
843 if (*ptr != '\0')
844 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
845 else
846 first_error (_("missing element size"));
847 return FALSE;
848 }
849 if (width != 0 && width * element_size != 64 && width * element_size != 128
850 && !(width == 2 && element_size == 16))
851 {
852 first_error_fmt (_
853 ("invalid element size %d and vector size combination %c"),
854 width, *ptr);
855 return FALSE;
856 }
857 ptr++;
858
859 parsed_type->type = type;
860 parsed_type->width = width;
861
862 *str = ptr;
863
864 return TRUE;
865 }
866
867 /* *STR contains an SVE zero/merge predication suffix. Parse it into
868 *PARSED_TYPE and point *STR at the end of the suffix. */
869
870 static bfd_boolean
871 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
872 {
873 char *ptr = *str;
874
875 /* Skip '/'. */
876 gas_assert (*ptr == '/');
877 ptr++;
878 switch (TOLOWER (*ptr))
879 {
880 case 'z':
881 parsed_type->type = NT_zero;
882 break;
883 case 'm':
884 parsed_type->type = NT_merge;
885 break;
886 default:
887 if (*ptr != '\0' && *ptr != ',')
888 first_error_fmt (_("unexpected character `%c' in predication type"),
889 *ptr);
890 else
891 first_error (_("missing predication type"));
892 return FALSE;
893 }
894 parsed_type->width = 0;
895 *str = ptr + 1;
896 return TRUE;
897 }
898
899 /* Parse a register of the type TYPE.
900
901 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
902 name or the parsed register is not of TYPE.
903
904 Otherwise return the register number, and optionally fill in the actual
905 type of the register in *RTYPE when multiple alternatives were given, and
906 return the register shape and element index information in *TYPEINFO.
907
908 IN_REG_LIST should be set with TRUE if the caller is parsing a register
909 list. */
910
911 static int
912 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
913 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
914 {
915 char *str = *ccp;
916 const reg_entry *reg = parse_reg (&str);
917 struct vector_type_el atype;
918 struct vector_type_el parsetype;
919 bfd_boolean is_typed_vecreg = FALSE;
920
921 atype.defined = 0;
922 atype.type = NT_invtype;
923 atype.width = -1;
924 atype.index = 0;
925
926 if (reg == NULL)
927 {
928 if (typeinfo)
929 *typeinfo = atype;
930 set_default_error ();
931 return PARSE_FAIL;
932 }
933
934 if (! aarch64_check_reg_type (reg, type))
935 {
936 DEBUG_TRACE ("reg type check failed");
937 set_default_error ();
938 return PARSE_FAIL;
939 }
940 type = reg->type;
941
942 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
943 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
944 {
945 if (*str == '.')
946 {
947 if (!parse_vector_type_for_operand (type, &parsetype, &str))
948 return PARSE_FAIL;
949 }
950 else
951 {
952 if (!parse_predication_for_operand (&parsetype, &str))
953 return PARSE_FAIL;
954 }
955
956 /* Register if of the form Vn.[bhsdq]. */
957 is_typed_vecreg = TRUE;
958
959 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
960 {
961 /* The width is always variable; we don't allow an integer width
962 to be specified. */
963 gas_assert (parsetype.width == 0);
964 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
965 }
966 else if (parsetype.width == 0)
967 /* Expect index. In the new scheme we cannot have
968 Vn.[bhsdq] represent a scalar. Therefore any
969 Vn.[bhsdq] should have an index following it.
970 Except in reglists ofcourse. */
971 atype.defined |= NTA_HASINDEX;
972 else
973 atype.defined |= NTA_HASTYPE;
974
975 atype.type = parsetype.type;
976 atype.width = parsetype.width;
977 }
978
979 if (skip_past_char (&str, '['))
980 {
981 expressionS exp;
982
983 /* Reject Sn[index] syntax. */
984 if (!is_typed_vecreg)
985 {
986 first_error (_("this type of register can't be indexed"));
987 return PARSE_FAIL;
988 }
989
990 if (in_reg_list == TRUE)
991 {
992 first_error (_("index not allowed inside register list"));
993 return PARSE_FAIL;
994 }
995
996 atype.defined |= NTA_HASINDEX;
997
998 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
999
1000 if (exp.X_op != O_constant)
1001 {
1002 first_error (_("constant expression required"));
1003 return PARSE_FAIL;
1004 }
1005
1006 if (! skip_past_char (&str, ']'))
1007 return PARSE_FAIL;
1008
1009 atype.index = exp.X_add_number;
1010 }
1011 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1012 {
1013 /* Indexed vector register expected. */
1014 first_error (_("indexed vector register expected"));
1015 return PARSE_FAIL;
1016 }
1017
1018 /* A vector reg Vn should be typed or indexed. */
1019 if (type == REG_TYPE_VN && atype.defined == 0)
1020 {
1021 first_error (_("invalid use of vector register"));
1022 }
1023
1024 if (typeinfo)
1025 *typeinfo = atype;
1026
1027 if (rtype)
1028 *rtype = type;
1029
1030 *ccp = str;
1031
1032 return reg->number;
1033 }
1034
1035 /* Parse register.
1036
1037 Return the register number on success; return PARSE_FAIL otherwise.
1038
1039 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1040 the register (e.g. NEON double or quad reg when either has been requested).
1041
1042 If this is a NEON vector register with additional type information, fill
1043 in the struct pointed to by VECTYPE (if non-NULL).
1044
1045 This parser does not handle register list. */
1046
1047 static int
1048 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1049 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1050 {
1051 struct vector_type_el atype;
1052 char *str = *ccp;
1053 int reg = parse_typed_reg (&str, type, rtype, &atype,
1054 /*in_reg_list= */ FALSE);
1055
1056 if (reg == PARSE_FAIL)
1057 return PARSE_FAIL;
1058
1059 if (vectype)
1060 *vectype = atype;
1061
1062 *ccp = str;
1063
1064 return reg;
1065 }
1066
1067 static inline bfd_boolean
1068 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1069 {
1070 return
1071 e1.type == e2.type
1072 && e1.defined == e2.defined
1073 && e1.width == e2.width && e1.index == e2.index;
1074 }
1075
1076 /* This function parses a list of vector registers of type TYPE.
1077 On success, it returns the parsed register list information in the
1078 following encoded format:
1079
1080 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1081 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1082
1083 The information of the register shape and/or index is returned in
1084 *VECTYPE.
1085
1086 It returns PARSE_FAIL if the register list is invalid.
1087
1088 The list contains one to four registers.
1089 Each register can be one of:
1090 <Vt>.<T>[<index>]
1091 <Vt>.<T>
1092 All <T> should be identical.
1093 All <index> should be identical.
1094 There are restrictions on <Vt> numbers which are checked later
1095 (by reg_list_valid_p). */
1096
1097 static int
1098 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1099 struct vector_type_el *vectype)
1100 {
1101 char *str = *ccp;
1102 int nb_regs;
1103 struct vector_type_el typeinfo, typeinfo_first;
1104 int val, val_range;
1105 int in_range;
1106 int ret_val;
1107 int i;
1108 bfd_boolean error = FALSE;
1109 bfd_boolean expect_index = FALSE;
1110
1111 if (*str != '{')
1112 {
1113 set_syntax_error (_("expecting {"));
1114 return PARSE_FAIL;
1115 }
1116 str++;
1117
1118 nb_regs = 0;
1119 typeinfo_first.defined = 0;
1120 typeinfo_first.type = NT_invtype;
1121 typeinfo_first.width = -1;
1122 typeinfo_first.index = 0;
1123 ret_val = 0;
1124 val = -1;
1125 val_range = -1;
1126 in_range = 0;
1127 do
1128 {
1129 if (in_range)
1130 {
1131 str++; /* skip over '-' */
1132 val_range = val;
1133 }
1134 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1135 /*in_reg_list= */ TRUE);
1136 if (val == PARSE_FAIL)
1137 {
1138 set_first_syntax_error (_("invalid vector register in list"));
1139 error = TRUE;
1140 continue;
1141 }
1142 /* reject [bhsd]n */
1143 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1144 {
1145 set_first_syntax_error (_("invalid scalar register in list"));
1146 error = TRUE;
1147 continue;
1148 }
1149
1150 if (typeinfo.defined & NTA_HASINDEX)
1151 expect_index = TRUE;
1152
1153 if (in_range)
1154 {
1155 if (val < val_range)
1156 {
1157 set_first_syntax_error
1158 (_("invalid range in vector register list"));
1159 error = TRUE;
1160 }
1161 val_range++;
1162 }
1163 else
1164 {
1165 val_range = val;
1166 if (nb_regs == 0)
1167 typeinfo_first = typeinfo;
1168 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1169 {
1170 set_first_syntax_error
1171 (_("type mismatch in vector register list"));
1172 error = TRUE;
1173 }
1174 }
1175 if (! error)
1176 for (i = val_range; i <= val; i++)
1177 {
1178 ret_val |= i << (5 * nb_regs);
1179 nb_regs++;
1180 }
1181 in_range = 0;
1182 }
1183 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1184
1185 skip_whitespace (str);
1186 if (*str != '}')
1187 {
1188 set_first_syntax_error (_("end of vector register list not found"));
1189 error = TRUE;
1190 }
1191 str++;
1192
1193 skip_whitespace (str);
1194
1195 if (expect_index)
1196 {
1197 if (skip_past_char (&str, '['))
1198 {
1199 expressionS exp;
1200
1201 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1202 if (exp.X_op != O_constant)
1203 {
1204 set_first_syntax_error (_("constant expression required."));
1205 error = TRUE;
1206 }
1207 if (! skip_past_char (&str, ']'))
1208 error = TRUE;
1209 else
1210 typeinfo_first.index = exp.X_add_number;
1211 }
1212 else
1213 {
1214 set_first_syntax_error (_("expected index"));
1215 error = TRUE;
1216 }
1217 }
1218
1219 if (nb_regs > 4)
1220 {
1221 set_first_syntax_error (_("too many registers in vector register list"));
1222 error = TRUE;
1223 }
1224 else if (nb_regs == 0)
1225 {
1226 set_first_syntax_error (_("empty vector register list"));
1227 error = TRUE;
1228 }
1229
1230 *ccp = str;
1231 if (! error)
1232 *vectype = typeinfo_first;
1233
1234 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1235 }
1236
1237 /* Directives: register aliases. */
1238
1239 static reg_entry *
1240 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1241 {
1242 reg_entry *new;
1243 const char *name;
1244
1245 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1246 {
1247 if (new->builtin)
1248 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1249 str);
1250
1251 /* Only warn about a redefinition if it's not defined as the
1252 same register. */
1253 else if (new->number != number || new->type != type)
1254 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1255
1256 return NULL;
1257 }
1258
1259 name = xstrdup (str);
1260 new = XNEW (reg_entry);
1261
1262 new->name = name;
1263 new->number = number;
1264 new->type = type;
1265 new->builtin = FALSE;
1266
1267 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1268 abort ();
1269
1270 return new;
1271 }
1272
1273 /* Look for the .req directive. This is of the form:
1274
1275 new_register_name .req existing_register_name
1276
1277 If we find one, or if it looks sufficiently like one that we want to
1278 handle any error here, return TRUE. Otherwise return FALSE. */
1279
1280 static bfd_boolean
1281 create_register_alias (char *newname, char *p)
1282 {
1283 const reg_entry *old;
1284 char *oldname, *nbuf;
1285 size_t nlen;
1286
1287 /* The input scrubber ensures that whitespace after the mnemonic is
1288 collapsed to single spaces. */
1289 oldname = p;
1290 if (strncmp (oldname, " .req ", 6) != 0)
1291 return FALSE;
1292
1293 oldname += 6;
1294 if (*oldname == '\0')
1295 return FALSE;
1296
1297 old = hash_find (aarch64_reg_hsh, oldname);
1298 if (!old)
1299 {
1300 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1301 return TRUE;
1302 }
1303
1304 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1305 the desired alias name, and p points to its end. If not, then
1306 the desired alias name is in the global original_case_string. */
1307 #ifdef TC_CASE_SENSITIVE
1308 nlen = p - newname;
1309 #else
1310 newname = original_case_string;
1311 nlen = strlen (newname);
1312 #endif
1313
1314 nbuf = xmemdup0 (newname, nlen);
1315
1316 /* Create aliases under the new name as stated; an all-lowercase
1317 version of the new name; and an all-uppercase version of the new
1318 name. */
1319 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1320 {
1321 for (p = nbuf; *p; p++)
1322 *p = TOUPPER (*p);
1323
1324 if (strncmp (nbuf, newname, nlen))
1325 {
1326 /* If this attempt to create an additional alias fails, do not bother
1327 trying to create the all-lower case alias. We will fail and issue
1328 a second, duplicate error message. This situation arises when the
1329 programmer does something like:
1330 foo .req r0
1331 Foo .req r1
1332 The second .req creates the "Foo" alias but then fails to create
1333 the artificial FOO alias because it has already been created by the
1334 first .req. */
1335 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1336 {
1337 free (nbuf);
1338 return TRUE;
1339 }
1340 }
1341
1342 for (p = nbuf; *p; p++)
1343 *p = TOLOWER (*p);
1344
1345 if (strncmp (nbuf, newname, nlen))
1346 insert_reg_alias (nbuf, old->number, old->type);
1347 }
1348
1349 free (nbuf);
1350 return TRUE;
1351 }
1352
1353 /* Should never be called, as .req goes between the alias and the
1354 register name, not at the beginning of the line. */
1355 static void
1356 s_req (int a ATTRIBUTE_UNUSED)
1357 {
1358 as_bad (_("invalid syntax for .req directive"));
1359 }
1360
1361 /* The .unreq directive deletes an alias which was previously defined
1362 by .req. For example:
1363
1364 my_alias .req r11
1365 .unreq my_alias */
1366
1367 static void
1368 s_unreq (int a ATTRIBUTE_UNUSED)
1369 {
1370 char *name;
1371 char saved_char;
1372
1373 name = input_line_pointer;
1374
1375 while (*input_line_pointer != 0
1376 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1377 ++input_line_pointer;
1378
1379 saved_char = *input_line_pointer;
1380 *input_line_pointer = 0;
1381
1382 if (!*name)
1383 as_bad (_("invalid syntax for .unreq directive"));
1384 else
1385 {
1386 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1387
1388 if (!reg)
1389 as_bad (_("unknown register alias '%s'"), name);
1390 else if (reg->builtin)
1391 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1392 name);
1393 else
1394 {
1395 char *p;
1396 char *nbuf;
1397
1398 hash_delete (aarch64_reg_hsh, name, FALSE);
1399 free ((char *) reg->name);
1400 free (reg);
1401
1402 /* Also locate the all upper case and all lower case versions.
1403 Do not complain if we cannot find one or the other as it
1404 was probably deleted above. */
1405
1406 nbuf = strdup (name);
1407 for (p = nbuf; *p; p++)
1408 *p = TOUPPER (*p);
1409 reg = hash_find (aarch64_reg_hsh, nbuf);
1410 if (reg)
1411 {
1412 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1413 free ((char *) reg->name);
1414 free (reg);
1415 }
1416
1417 for (p = nbuf; *p; p++)
1418 *p = TOLOWER (*p);
1419 reg = hash_find (aarch64_reg_hsh, nbuf);
1420 if (reg)
1421 {
1422 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1423 free ((char *) reg->name);
1424 free (reg);
1425 }
1426
1427 free (nbuf);
1428 }
1429 }
1430
1431 *input_line_pointer = saved_char;
1432 demand_empty_rest_of_line ();
1433 }
1434
1435 /* Directives: Instruction set selection. */
1436
1437 #ifdef OBJ_ELF
1438 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1439 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1440 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1441 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1442
1443 /* Create a new mapping symbol for the transition to STATE. */
1444
1445 static void
1446 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1447 {
1448 symbolS *symbolP;
1449 const char *symname;
1450 int type;
1451
1452 switch (state)
1453 {
1454 case MAP_DATA:
1455 symname = "$d";
1456 type = BSF_NO_FLAGS;
1457 break;
1458 case MAP_INSN:
1459 symname = "$x";
1460 type = BSF_NO_FLAGS;
1461 break;
1462 default:
1463 abort ();
1464 }
1465
1466 symbolP = symbol_new (symname, now_seg, value, frag);
1467 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1468
1469 /* Save the mapping symbols for future reference. Also check that
1470 we do not place two mapping symbols at the same offset within a
1471 frag. We'll handle overlap between frags in
1472 check_mapping_symbols.
1473
1474 If .fill or other data filling directive generates zero sized data,
1475 the mapping symbol for the following code will have the same value
1476 as the one generated for the data filling directive. In this case,
1477 we replace the old symbol with the new one at the same address. */
1478 if (value == 0)
1479 {
1480 if (frag->tc_frag_data.first_map != NULL)
1481 {
1482 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1483 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1484 &symbol_lastP);
1485 }
1486 frag->tc_frag_data.first_map = symbolP;
1487 }
1488 if (frag->tc_frag_data.last_map != NULL)
1489 {
1490 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1491 S_GET_VALUE (symbolP));
1492 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1493 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1494 &symbol_lastP);
1495 }
1496 frag->tc_frag_data.last_map = symbolP;
1497 }
1498
1499 /* We must sometimes convert a region marked as code to data during
1500 code alignment, if an odd number of bytes have to be padded. The
1501 code mapping symbol is pushed to an aligned address. */
1502
1503 static void
1504 insert_data_mapping_symbol (enum mstate state,
1505 valueT value, fragS * frag, offsetT bytes)
1506 {
1507 /* If there was already a mapping symbol, remove it. */
1508 if (frag->tc_frag_data.last_map != NULL
1509 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1510 frag->fr_address + value)
1511 {
1512 symbolS *symp = frag->tc_frag_data.last_map;
1513
1514 if (value == 0)
1515 {
1516 know (frag->tc_frag_data.first_map == symp);
1517 frag->tc_frag_data.first_map = NULL;
1518 }
1519 frag->tc_frag_data.last_map = NULL;
1520 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1521 }
1522
1523 make_mapping_symbol (MAP_DATA, value, frag);
1524 make_mapping_symbol (state, value + bytes, frag);
1525 }
1526
1527 static void mapping_state_2 (enum mstate state, int max_chars);
1528
1529 /* Set the mapping state to STATE. Only call this when about to
1530 emit some STATE bytes to the file. */
1531
1532 void
1533 mapping_state (enum mstate state)
1534 {
1535 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1536
1537 if (state == MAP_INSN)
1538 /* AArch64 instructions require 4-byte alignment. When emitting
1539 instructions into any section, record the appropriate section
1540 alignment. */
1541 record_alignment (now_seg, 2);
1542
1543 if (mapstate == state)
1544 /* The mapping symbol has already been emitted.
1545 There is nothing else to do. */
1546 return;
1547
1548 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1549 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1550 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1551 evaluated later in the next else. */
1552 return;
1553 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1554 {
1555 /* Only add the symbol if the offset is > 0:
1556 if we're at the first frag, check it's size > 0;
1557 if we're not at the first frag, then for sure
1558 the offset is > 0. */
1559 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1560 const int add_symbol = (frag_now != frag_first)
1561 || (frag_now_fix () > 0);
1562
1563 if (add_symbol)
1564 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1565 }
1566 #undef TRANSITION
1567
1568 mapping_state_2 (state, 0);
1569 }
1570
1571 /* Same as mapping_state, but MAX_CHARS bytes have already been
1572 allocated. Put the mapping symbol that far back. */
1573
1574 static void
1575 mapping_state_2 (enum mstate state, int max_chars)
1576 {
1577 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1578
1579 if (!SEG_NORMAL (now_seg))
1580 return;
1581
1582 if (mapstate == state)
1583 /* The mapping symbol has already been emitted.
1584 There is nothing else to do. */
1585 return;
1586
1587 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1588 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1589 }
1590 #else
1591 #define mapping_state(x) /* nothing */
1592 #define mapping_state_2(x, y) /* nothing */
1593 #endif
1594
1595 /* Directives: sectioning and alignment. */
1596
1597 static void
1598 s_bss (int ignore ATTRIBUTE_UNUSED)
1599 {
1600 /* We don't support putting frags in the BSS segment, we fake it by
1601 marking in_bss, then looking at s_skip for clues. */
1602 subseg_set (bss_section, 0);
1603 demand_empty_rest_of_line ();
1604 mapping_state (MAP_DATA);
1605 }
1606
1607 static void
1608 s_even (int ignore ATTRIBUTE_UNUSED)
1609 {
1610 /* Never make frag if expect extra pass. */
1611 if (!need_pass_2)
1612 frag_align (1, 0, 0);
1613
1614 record_alignment (now_seg, 1);
1615
1616 demand_empty_rest_of_line ();
1617 }
1618
1619 /* Directives: Literal pools. */
1620
1621 static literal_pool *
1622 find_literal_pool (int size)
1623 {
1624 literal_pool *pool;
1625
1626 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1627 {
1628 if (pool->section == now_seg
1629 && pool->sub_section == now_subseg && pool->size == size)
1630 break;
1631 }
1632
1633 return pool;
1634 }
1635
1636 static literal_pool *
1637 find_or_make_literal_pool (int size)
1638 {
1639 /* Next literal pool ID number. */
1640 static unsigned int latest_pool_num = 1;
1641 literal_pool *pool;
1642
1643 pool = find_literal_pool (size);
1644
1645 if (pool == NULL)
1646 {
1647 /* Create a new pool. */
1648 pool = XNEW (literal_pool);
1649 if (!pool)
1650 return NULL;
1651
1652 /* Currently we always put the literal pool in the current text
1653 section. If we were generating "small" model code where we
1654 knew that all code and initialised data was within 1MB then
1655 we could output literals to mergeable, read-only data
1656 sections. */
1657
1658 pool->next_free_entry = 0;
1659 pool->section = now_seg;
1660 pool->sub_section = now_subseg;
1661 pool->size = size;
1662 pool->next = list_of_pools;
1663 pool->symbol = NULL;
1664
1665 /* Add it to the list. */
1666 list_of_pools = pool;
1667 }
1668
1669 /* New pools, and emptied pools, will have a NULL symbol. */
1670 if (pool->symbol == NULL)
1671 {
1672 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1673 (valueT) 0, &zero_address_frag);
1674 pool->id = latest_pool_num++;
1675 }
1676
1677 /* Done. */
1678 return pool;
1679 }
1680
1681 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1682 Return TRUE on success, otherwise return FALSE. */
1683 static bfd_boolean
1684 add_to_lit_pool (expressionS *exp, int size)
1685 {
1686 literal_pool *pool;
1687 unsigned int entry;
1688
1689 pool = find_or_make_literal_pool (size);
1690
1691 /* Check if this literal value is already in the pool. */
1692 for (entry = 0; entry < pool->next_free_entry; entry++)
1693 {
1694 expressionS * litexp = & pool->literals[entry].exp;
1695
1696 if ((litexp->X_op == exp->X_op)
1697 && (exp->X_op == O_constant)
1698 && (litexp->X_add_number == exp->X_add_number)
1699 && (litexp->X_unsigned == exp->X_unsigned))
1700 break;
1701
1702 if ((litexp->X_op == exp->X_op)
1703 && (exp->X_op == O_symbol)
1704 && (litexp->X_add_number == exp->X_add_number)
1705 && (litexp->X_add_symbol == exp->X_add_symbol)
1706 && (litexp->X_op_symbol == exp->X_op_symbol))
1707 break;
1708 }
1709
1710 /* Do we need to create a new entry? */
1711 if (entry == pool->next_free_entry)
1712 {
1713 if (entry >= MAX_LITERAL_POOL_SIZE)
1714 {
1715 set_syntax_error (_("literal pool overflow"));
1716 return FALSE;
1717 }
1718
1719 pool->literals[entry].exp = *exp;
1720 pool->next_free_entry += 1;
1721 if (exp->X_op == O_big)
1722 {
1723 /* PR 16688: Bignums are held in a single global array. We must
1724 copy and preserve that value now, before it is overwritten. */
1725 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1726 exp->X_add_number);
1727 memcpy (pool->literals[entry].bignum, generic_bignum,
1728 CHARS_PER_LITTLENUM * exp->X_add_number);
1729 }
1730 else
1731 pool->literals[entry].bignum = NULL;
1732 }
1733
1734 exp->X_op = O_symbol;
1735 exp->X_add_number = ((int) entry) * size;
1736 exp->X_add_symbol = pool->symbol;
1737
1738 return TRUE;
1739 }
1740
1741 /* Can't use symbol_new here, so have to create a symbol and then at
1742 a later date assign it a value. Thats what these functions do. */
1743
1744 static void
1745 symbol_locate (symbolS * symbolP,
1746 const char *name,/* It is copied, the caller can modify. */
1747 segT segment, /* Segment identifier (SEG_<something>). */
1748 valueT valu, /* Symbol value. */
1749 fragS * frag) /* Associated fragment. */
1750 {
1751 size_t name_length;
1752 char *preserved_copy_of_name;
1753
1754 name_length = strlen (name) + 1; /* +1 for \0. */
1755 obstack_grow (&notes, name, name_length);
1756 preserved_copy_of_name = obstack_finish (&notes);
1757
1758 #ifdef tc_canonicalize_symbol_name
1759 preserved_copy_of_name =
1760 tc_canonicalize_symbol_name (preserved_copy_of_name);
1761 #endif
1762
1763 S_SET_NAME (symbolP, preserved_copy_of_name);
1764
1765 S_SET_SEGMENT (symbolP, segment);
1766 S_SET_VALUE (symbolP, valu);
1767 symbol_clear_list_pointers (symbolP);
1768
1769 symbol_set_frag (symbolP, frag);
1770
1771 /* Link to end of symbol chain. */
1772 {
1773 extern int symbol_table_frozen;
1774
1775 if (symbol_table_frozen)
1776 abort ();
1777 }
1778
1779 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1780
1781 obj_symbol_new_hook (symbolP);
1782
1783 #ifdef tc_symbol_new_hook
1784 tc_symbol_new_hook (symbolP);
1785 #endif
1786
1787 #ifdef DEBUG_SYMS
1788 verify_symbol_chain (symbol_rootP, symbol_lastP);
1789 #endif /* DEBUG_SYMS */
1790 }
1791
1792
1793 static void
1794 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1795 {
1796 unsigned int entry;
1797 literal_pool *pool;
1798 char sym_name[20];
1799 int align;
1800
1801 for (align = 2; align <= 4; align++)
1802 {
1803 int size = 1 << align;
1804
1805 pool = find_literal_pool (size);
1806 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1807 continue;
1808
1809 /* Align pool as you have word accesses.
1810 Only make a frag if we have to. */
1811 if (!need_pass_2)
1812 frag_align (align, 0, 0);
1813
1814 mapping_state (MAP_DATA);
1815
1816 record_alignment (now_seg, align);
1817
1818 sprintf (sym_name, "$$lit_\002%x", pool->id);
1819
1820 symbol_locate (pool->symbol, sym_name, now_seg,
1821 (valueT) frag_now_fix (), frag_now);
1822 symbol_table_insert (pool->symbol);
1823
1824 for (entry = 0; entry < pool->next_free_entry; entry++)
1825 {
1826 expressionS * exp = & pool->literals[entry].exp;
1827
1828 if (exp->X_op == O_big)
1829 {
1830 /* PR 16688: Restore the global bignum value. */
1831 gas_assert (pool->literals[entry].bignum != NULL);
1832 memcpy (generic_bignum, pool->literals[entry].bignum,
1833 CHARS_PER_LITTLENUM * exp->X_add_number);
1834 }
1835
1836 /* First output the expression in the instruction to the pool. */
1837 emit_expr (exp, size); /* .word|.xword */
1838
1839 if (exp->X_op == O_big)
1840 {
1841 free (pool->literals[entry].bignum);
1842 pool->literals[entry].bignum = NULL;
1843 }
1844 }
1845
1846 /* Mark the pool as empty. */
1847 pool->next_free_entry = 0;
1848 pool->symbol = NULL;
1849 }
1850 }
1851
1852 #ifdef OBJ_ELF
1853 /* Forward declarations for functions below, in the MD interface
1854 section. */
1855 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1856 static struct reloc_table_entry * find_reloc_table_entry (char **);
1857
1858 /* Directives: Data. */
1859 /* N.B. the support for relocation suffix in this directive needs to be
1860 implemented properly. */
1861
1862 static void
1863 s_aarch64_elf_cons (int nbytes)
1864 {
1865 expressionS exp;
1866
1867 #ifdef md_flush_pending_output
1868 md_flush_pending_output ();
1869 #endif
1870
1871 if (is_it_end_of_statement ())
1872 {
1873 demand_empty_rest_of_line ();
1874 return;
1875 }
1876
1877 #ifdef md_cons_align
1878 md_cons_align (nbytes);
1879 #endif
1880
1881 mapping_state (MAP_DATA);
1882 do
1883 {
1884 struct reloc_table_entry *reloc;
1885
1886 expression (&exp);
1887
1888 if (exp.X_op != O_symbol)
1889 emit_expr (&exp, (unsigned int) nbytes);
1890 else
1891 {
1892 skip_past_char (&input_line_pointer, '#');
1893 if (skip_past_char (&input_line_pointer, ':'))
1894 {
1895 reloc = find_reloc_table_entry (&input_line_pointer);
1896 if (reloc == NULL)
1897 as_bad (_("unrecognized relocation suffix"));
1898 else
1899 as_bad (_("unimplemented relocation suffix"));
1900 ignore_rest_of_line ();
1901 return;
1902 }
1903 else
1904 emit_expr (&exp, (unsigned int) nbytes);
1905 }
1906 }
1907 while (*input_line_pointer++ == ',');
1908
1909 /* Put terminator back into stream. */
1910 input_line_pointer--;
1911 demand_empty_rest_of_line ();
1912 }
1913
1914 #endif /* OBJ_ELF */
1915
1916 /* Output a 32-bit word, but mark as an instruction. */
1917
1918 static void
1919 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1920 {
1921 expressionS exp;
1922
1923 #ifdef md_flush_pending_output
1924 md_flush_pending_output ();
1925 #endif
1926
1927 if (is_it_end_of_statement ())
1928 {
1929 demand_empty_rest_of_line ();
1930 return;
1931 }
1932
1933 /* Sections are assumed to start aligned. In executable section, there is no
1934 MAP_DATA symbol pending. So we only align the address during
1935 MAP_DATA --> MAP_INSN transition.
1936 For other sections, this is not guaranteed. */
1937 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1938 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1939 frag_align_code (2, 0);
1940
1941 #ifdef OBJ_ELF
1942 mapping_state (MAP_INSN);
1943 #endif
1944
1945 do
1946 {
1947 expression (&exp);
1948 if (exp.X_op != O_constant)
1949 {
1950 as_bad (_("constant expression required"));
1951 ignore_rest_of_line ();
1952 return;
1953 }
1954
1955 if (target_big_endian)
1956 {
1957 unsigned int val = exp.X_add_number;
1958 exp.X_add_number = SWAP_32 (val);
1959 }
1960 emit_expr (&exp, 4);
1961 }
1962 while (*input_line_pointer++ == ',');
1963
1964 /* Put terminator back into stream. */
1965 input_line_pointer--;
1966 demand_empty_rest_of_line ();
1967 }
1968
1969 #ifdef OBJ_ELF
1970 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1971
1972 static void
1973 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1974 {
1975 expressionS exp;
1976
1977 expression (&exp);
1978 frag_grow (4);
1979 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1980 BFD_RELOC_AARCH64_TLSDESC_ADD);
1981
1982 demand_empty_rest_of_line ();
1983 }
1984
1985 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1986
1987 static void
1988 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1989 {
1990 expressionS exp;
1991
1992 /* Since we're just labelling the code, there's no need to define a
1993 mapping symbol. */
1994 expression (&exp);
1995 /* Make sure there is enough room in this frag for the following
1996 blr. This trick only works if the blr follows immediately after
1997 the .tlsdesc directive. */
1998 frag_grow (4);
1999 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2000 BFD_RELOC_AARCH64_TLSDESC_CALL);
2001
2002 demand_empty_rest_of_line ();
2003 }
2004
2005 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2006
2007 static void
2008 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2009 {
2010 expressionS exp;
2011
2012 expression (&exp);
2013 frag_grow (4);
2014 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2015 BFD_RELOC_AARCH64_TLSDESC_LDR);
2016
2017 demand_empty_rest_of_line ();
2018 }
2019 #endif /* OBJ_ELF */
2020
2021 static void s_aarch64_arch (int);
2022 static void s_aarch64_cpu (int);
2023 static void s_aarch64_arch_extension (int);
2024
2025 /* This table describes all the machine specific pseudo-ops the assembler
2026 has to support. The fields are:
2027 pseudo-op name without dot
2028 function to call to execute this pseudo-op
2029 Integer arg to pass to the function. */
2030
2031 const pseudo_typeS md_pseudo_table[] = {
2032 /* Never called because '.req' does not start a line. */
2033 {"req", s_req, 0},
2034 {"unreq", s_unreq, 0},
2035 {"bss", s_bss, 0},
2036 {"even", s_even, 0},
2037 {"ltorg", s_ltorg, 0},
2038 {"pool", s_ltorg, 0},
2039 {"cpu", s_aarch64_cpu, 0},
2040 {"arch", s_aarch64_arch, 0},
2041 {"arch_extension", s_aarch64_arch_extension, 0},
2042 {"inst", s_aarch64_inst, 0},
2043 #ifdef OBJ_ELF
2044 {"tlsdescadd", s_tlsdescadd, 0},
2045 {"tlsdesccall", s_tlsdesccall, 0},
2046 {"tlsdescldr", s_tlsdescldr, 0},
2047 {"word", s_aarch64_elf_cons, 4},
2048 {"long", s_aarch64_elf_cons, 4},
2049 {"xword", s_aarch64_elf_cons, 8},
2050 {"dword", s_aarch64_elf_cons, 8},
2051 #endif
2052 {0, 0, 0}
2053 };
2054 \f
2055
2056 /* Check whether STR points to a register name followed by a comma or the
2057 end of line; REG_TYPE indicates which register types are checked
2058 against. Return TRUE if STR is such a register name; otherwise return
2059 FALSE. The function does not intend to produce any diagnostics, but since
2060 the register parser aarch64_reg_parse, which is called by this function,
2061 does produce diagnostics, we call clear_error to clear any diagnostics
2062 that may be generated by aarch64_reg_parse.
2063 Also, the function returns FALSE directly if there is any user error
2064 present at the function entry. This prevents the existing diagnostics
2065 state from being spoiled.
2066 The function currently serves parse_constant_immediate and
2067 parse_big_immediate only. */
2068 static bfd_boolean
2069 reg_name_p (char *str, aarch64_reg_type reg_type)
2070 {
2071 int reg;
2072
2073 /* Prevent the diagnostics state from being spoiled. */
2074 if (error_p ())
2075 return FALSE;
2076
2077 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2078
2079 /* Clear the parsing error that may be set by the reg parser. */
2080 clear_error ();
2081
2082 if (reg == PARSE_FAIL)
2083 return FALSE;
2084
2085 skip_whitespace (str);
2086 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2087 return TRUE;
2088
2089 return FALSE;
2090 }
2091
2092 /* Parser functions used exclusively in instruction operands. */
2093
2094 /* Parse an immediate expression which may not be constant.
2095
2096 To prevent the expression parser from pushing a register name
2097 into the symbol table as an undefined symbol, firstly a check is
2098 done to find out whether STR is a register of type REG_TYPE followed
2099 by a comma or the end of line. Return FALSE if STR is such a string. */
2100
2101 static bfd_boolean
2102 parse_immediate_expression (char **str, expressionS *exp,
2103 aarch64_reg_type reg_type)
2104 {
2105 if (reg_name_p (*str, reg_type))
2106 {
2107 set_recoverable_error (_("immediate operand required"));
2108 return FALSE;
2109 }
2110
2111 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2112
2113 if (exp->X_op == O_absent)
2114 {
2115 set_fatal_syntax_error (_("missing immediate expression"));
2116 return FALSE;
2117 }
2118
2119 return TRUE;
2120 }
2121
2122 /* Constant immediate-value read function for use in insn parsing.
2123 STR points to the beginning of the immediate (with the optional
2124 leading #); *VAL receives the value. REG_TYPE says which register
2125 names should be treated as registers rather than as symbolic immediates.
2126
2127 Return TRUE on success; otherwise return FALSE. */
2128
2129 static bfd_boolean
2130 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2131 {
2132 expressionS exp;
2133
2134 if (! parse_immediate_expression (str, &exp, reg_type))
2135 return FALSE;
2136
2137 if (exp.X_op != O_constant)
2138 {
2139 set_syntax_error (_("constant expression required"));
2140 return FALSE;
2141 }
2142
2143 *val = exp.X_add_number;
2144 return TRUE;
2145 }
2146
2147 static uint32_t
2148 encode_imm_float_bits (uint32_t imm)
2149 {
2150 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2151 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2152 }
2153
2154 /* Return TRUE if the single-precision floating-point value encoded in IMM
2155 can be expressed in the AArch64 8-bit signed floating-point format with
2156 3-bit exponent and normalized 4 bits of precision; in other words, the
2157 floating-point value must be expressable as
2158 (+/-) n / 16 * power (2, r)
2159 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2160
2161 static bfd_boolean
2162 aarch64_imm_float_p (uint32_t imm)
2163 {
2164 /* If a single-precision floating-point value has the following bit
2165 pattern, it can be expressed in the AArch64 8-bit floating-point
2166 format:
2167
2168 3 32222222 2221111111111
2169 1 09876543 21098765432109876543210
2170 n Eeeeeexx xxxx0000000000000000000
2171
2172 where n, e and each x are either 0 or 1 independently, with
2173 E == ~ e. */
2174
2175 uint32_t pattern;
2176
2177 /* Prepare the pattern for 'Eeeeee'. */
2178 if (((imm >> 30) & 0x1) == 0)
2179 pattern = 0x3e000000;
2180 else
2181 pattern = 0x40000000;
2182
2183 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2184 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2185 }
2186
2187 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2188 as an IEEE float without any loss of precision. Store the value in
2189 *FPWORD if so. */
2190
2191 static bfd_boolean
2192 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2193 {
2194 /* If a double-precision floating-point value has the following bit
2195 pattern, it can be expressed in a float:
2196
2197 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2198 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2199 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2200
2201 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2202 if Eeee_eeee != 1111_1111
2203
2204 where n, e, s and S are either 0 or 1 independently and where ~ is the
2205 inverse of E. */
2206
2207 uint32_t pattern;
2208 uint32_t high32 = imm >> 32;
2209 uint32_t low32 = imm;
2210
2211 /* Lower 29 bits need to be 0s. */
2212 if ((imm & 0x1fffffff) != 0)
2213 return FALSE;
2214
2215 /* Prepare the pattern for 'Eeeeeeeee'. */
2216 if (((high32 >> 30) & 0x1) == 0)
2217 pattern = 0x38000000;
2218 else
2219 pattern = 0x40000000;
2220
2221 /* Check E~~~. */
2222 if ((high32 & 0x78000000) != pattern)
2223 return FALSE;
2224
2225 /* Check Eeee_eeee != 1111_1111. */
2226 if ((high32 & 0x7ff00000) == 0x47f00000)
2227 return FALSE;
2228
2229 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2230 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2231 | (low32 >> 29)); /* 3 S bits. */
2232 return TRUE;
2233 }
2234
2235 /* Return true if we should treat OPERAND as a double-precision
2236 floating-point operand rather than a single-precision one. */
2237 static bfd_boolean
2238 double_precision_operand_p (const aarch64_opnd_info *operand)
2239 {
2240 /* Check for unsuffixed SVE registers, which are allowed
2241 for LDR and STR but not in instructions that require an
2242 immediate. We get better error messages if we arbitrarily
2243 pick one size, parse the immediate normally, and then
2244 report the match failure in the normal way. */
2245 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2246 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2247 }
2248
2249 /* Parse a floating-point immediate. Return TRUE on success and return the
2250 value in *IMMED in the format of IEEE754 single-precision encoding.
2251 *CCP points to the start of the string; DP_P is TRUE when the immediate
2252 is expected to be in double-precision (N.B. this only matters when
2253 hexadecimal representation is involved). REG_TYPE says which register
2254 names should be treated as registers rather than as symbolic immediates.
2255
2256 This routine accepts any IEEE float; it is up to the callers to reject
2257 invalid ones. */
2258
2259 static bfd_boolean
2260 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2261 aarch64_reg_type reg_type)
2262 {
2263 char *str = *ccp;
2264 char *fpnum;
2265 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2266 int found_fpchar = 0;
2267 int64_t val = 0;
2268 unsigned fpword = 0;
2269 bfd_boolean hex_p = FALSE;
2270
2271 skip_past_char (&str, '#');
2272
2273 fpnum = str;
2274 skip_whitespace (fpnum);
2275
2276 if (strncmp (fpnum, "0x", 2) == 0)
2277 {
2278 /* Support the hexadecimal representation of the IEEE754 encoding.
2279 Double-precision is expected when DP_P is TRUE, otherwise the
2280 representation should be in single-precision. */
2281 if (! parse_constant_immediate (&str, &val, reg_type))
2282 goto invalid_fp;
2283
2284 if (dp_p)
2285 {
2286 if (!can_convert_double_to_float (val, &fpword))
2287 goto invalid_fp;
2288 }
2289 else if ((uint64_t) val > 0xffffffff)
2290 goto invalid_fp;
2291 else
2292 fpword = val;
2293
2294 hex_p = TRUE;
2295 }
2296 else
2297 {
2298 if (reg_name_p (str, reg_type))
2299 {
2300 set_recoverable_error (_("immediate operand required"));
2301 return FALSE;
2302 }
2303
2304 /* We must not accidentally parse an integer as a floating-point number.
2305 Make sure that the value we parse is not an integer by checking for
2306 special characters '.' or 'e'. */
2307 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2308 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2309 {
2310 found_fpchar = 1;
2311 break;
2312 }
2313
2314 if (!found_fpchar)
2315 return FALSE;
2316 }
2317
2318 if (! hex_p)
2319 {
2320 int i;
2321
2322 if ((str = atof_ieee (str, 's', words)) == NULL)
2323 goto invalid_fp;
2324
2325 /* Our FP word must be 32 bits (single-precision FP). */
2326 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2327 {
2328 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2329 fpword |= words[i];
2330 }
2331 }
2332
2333 *immed = fpword;
2334 *ccp = str;
2335 return TRUE;
2336
2337 invalid_fp:
2338 set_fatal_syntax_error (_("invalid floating-point constant"));
2339 return FALSE;
2340 }
2341
2342 /* Less-generic immediate-value read function with the possibility of loading
2343 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2344 instructions.
2345
2346 To prevent the expression parser from pushing a register name into the
2347 symbol table as an undefined symbol, a check is firstly done to find
2348 out whether STR is a register of type REG_TYPE followed by a comma or
2349 the end of line. Return FALSE if STR is such a register. */
2350
2351 static bfd_boolean
2352 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2353 {
2354 char *ptr = *str;
2355
2356 if (reg_name_p (ptr, reg_type))
2357 {
2358 set_syntax_error (_("immediate operand required"));
2359 return FALSE;
2360 }
2361
2362 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2363
2364 if (inst.reloc.exp.X_op == O_constant)
2365 *imm = inst.reloc.exp.X_add_number;
2366
2367 *str = ptr;
2368
2369 return TRUE;
2370 }
2371
2372 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2373 if NEED_LIBOPCODES is non-zero, the fixup will need
2374 assistance from the libopcodes. */
2375
2376 static inline void
2377 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2378 const aarch64_opnd_info *operand,
2379 int need_libopcodes_p)
2380 {
2381 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2382 reloc->opnd = operand->type;
2383 if (need_libopcodes_p)
2384 reloc->need_libopcodes_p = 1;
2385 };
2386
2387 /* Return TRUE if the instruction needs to be fixed up later internally by
2388 the GAS; otherwise return FALSE. */
2389
2390 static inline bfd_boolean
2391 aarch64_gas_internal_fixup_p (void)
2392 {
2393 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2394 }
2395
2396 /* Assign the immediate value to the relavant field in *OPERAND if
2397 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2398 needs an internal fixup in a later stage.
2399 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2400 IMM.VALUE that may get assigned with the constant. */
2401 static inline void
2402 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2403 aarch64_opnd_info *operand,
2404 int addr_off_p,
2405 int need_libopcodes_p,
2406 int skip_p)
2407 {
2408 if (reloc->exp.X_op == O_constant)
2409 {
2410 if (addr_off_p)
2411 operand->addr.offset.imm = reloc->exp.X_add_number;
2412 else
2413 operand->imm.value = reloc->exp.X_add_number;
2414 reloc->type = BFD_RELOC_UNUSED;
2415 }
2416 else
2417 {
2418 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2419 /* Tell libopcodes to ignore this operand or not. This is helpful
2420 when one of the operands needs to be fixed up later but we need
2421 libopcodes to check the other operands. */
2422 operand->skip = skip_p;
2423 }
2424 }
2425
2426 /* Relocation modifiers. Each entry in the table contains the textual
2427 name for the relocation which may be placed before a symbol used as
2428 a load/store offset, or add immediate. It must be surrounded by a
2429 leading and trailing colon, for example:
2430
2431 ldr x0, [x1, #:rello:varsym]
2432 add x0, x1, #:rello:varsym */
2433
2434 struct reloc_table_entry
2435 {
2436 const char *name;
2437 int pc_rel;
2438 bfd_reloc_code_real_type adr_type;
2439 bfd_reloc_code_real_type adrp_type;
2440 bfd_reloc_code_real_type movw_type;
2441 bfd_reloc_code_real_type add_type;
2442 bfd_reloc_code_real_type ldst_type;
2443 bfd_reloc_code_real_type ld_literal_type;
2444 };
2445
2446 static struct reloc_table_entry reloc_table[] = {
2447 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2448 {"lo12", 0,
2449 0, /* adr_type */
2450 0,
2451 0,
2452 BFD_RELOC_AARCH64_ADD_LO12,
2453 BFD_RELOC_AARCH64_LDST_LO12,
2454 0},
2455
2456 /* Higher 21 bits of pc-relative page offset: ADRP */
2457 {"pg_hi21", 1,
2458 0, /* adr_type */
2459 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2460 0,
2461 0,
2462 0,
2463 0},
2464
2465 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2466 {"pg_hi21_nc", 1,
2467 0, /* adr_type */
2468 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2469 0,
2470 0,
2471 0,
2472 0},
2473
2474 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2475 {"abs_g0", 0,
2476 0, /* adr_type */
2477 0,
2478 BFD_RELOC_AARCH64_MOVW_G0,
2479 0,
2480 0,
2481 0},
2482
2483 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2484 {"abs_g0_s", 0,
2485 0, /* adr_type */
2486 0,
2487 BFD_RELOC_AARCH64_MOVW_G0_S,
2488 0,
2489 0,
2490 0},
2491
2492 /* Less significant bits 0-15 of address/value: MOVK, no check */
2493 {"abs_g0_nc", 0,
2494 0, /* adr_type */
2495 0,
2496 BFD_RELOC_AARCH64_MOVW_G0_NC,
2497 0,
2498 0,
2499 0},
2500
2501 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2502 {"abs_g1", 0,
2503 0, /* adr_type */
2504 0,
2505 BFD_RELOC_AARCH64_MOVW_G1,
2506 0,
2507 0,
2508 0},
2509
2510 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2511 {"abs_g1_s", 0,
2512 0, /* adr_type */
2513 0,
2514 BFD_RELOC_AARCH64_MOVW_G1_S,
2515 0,
2516 0,
2517 0},
2518
2519 /* Less significant bits 16-31 of address/value: MOVK, no check */
2520 {"abs_g1_nc", 0,
2521 0, /* adr_type */
2522 0,
2523 BFD_RELOC_AARCH64_MOVW_G1_NC,
2524 0,
2525 0,
2526 0},
2527
2528 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2529 {"abs_g2", 0,
2530 0, /* adr_type */
2531 0,
2532 BFD_RELOC_AARCH64_MOVW_G2,
2533 0,
2534 0,
2535 0},
2536
2537 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2538 {"abs_g2_s", 0,
2539 0, /* adr_type */
2540 0,
2541 BFD_RELOC_AARCH64_MOVW_G2_S,
2542 0,
2543 0,
2544 0},
2545
2546 /* Less significant bits 32-47 of address/value: MOVK, no check */
2547 {"abs_g2_nc", 0,
2548 0, /* adr_type */
2549 0,
2550 BFD_RELOC_AARCH64_MOVW_G2_NC,
2551 0,
2552 0,
2553 0},
2554
2555 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2556 {"abs_g3", 0,
2557 0, /* adr_type */
2558 0,
2559 BFD_RELOC_AARCH64_MOVW_G3,
2560 0,
2561 0,
2562 0},
2563
2564 /* Get to the page containing GOT entry for a symbol. */
2565 {"got", 1,
2566 0, /* adr_type */
2567 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2568 0,
2569 0,
2570 0,
2571 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2572
2573 /* 12 bit offset into the page containing GOT entry for that symbol. */
2574 {"got_lo12", 0,
2575 0, /* adr_type */
2576 0,
2577 0,
2578 0,
2579 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2580 0},
2581
2582 /* 0-15 bits of address/value: MOVk, no check. */
2583 {"gotoff_g0_nc", 0,
2584 0, /* adr_type */
2585 0,
2586 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2587 0,
2588 0,
2589 0},
2590
2591 /* Most significant bits 16-31 of address/value: MOVZ. */
2592 {"gotoff_g1", 0,
2593 0, /* adr_type */
2594 0,
2595 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2596 0,
2597 0,
2598 0},
2599
2600 /* 15 bit offset into the page containing GOT entry for that symbol. */
2601 {"gotoff_lo15", 0,
2602 0, /* adr_type */
2603 0,
2604 0,
2605 0,
2606 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2607 0},
2608
2609 /* Get to the page containing GOT TLS entry for a symbol */
2610 {"gottprel_g0_nc", 0,
2611 0, /* adr_type */
2612 0,
2613 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2614 0,
2615 0,
2616 0},
2617
2618 /* Get to the page containing GOT TLS entry for a symbol */
2619 {"gottprel_g1", 0,
2620 0, /* adr_type */
2621 0,
2622 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2623 0,
2624 0,
2625 0},
2626
2627 /* Get to the page containing GOT TLS entry for a symbol */
2628 {"tlsgd", 0,
2629 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2630 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2631 0,
2632 0,
2633 0,
2634 0},
2635
2636 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2637 {"tlsgd_lo12", 0,
2638 0, /* adr_type */
2639 0,
2640 0,
2641 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2642 0,
2643 0},
2644
2645 /* Lower 16 bits address/value: MOVk. */
2646 {"tlsgd_g0_nc", 0,
2647 0, /* adr_type */
2648 0,
2649 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2650 0,
2651 0,
2652 0},
2653
2654 /* Most significant bits 16-31 of address/value: MOVZ. */
2655 {"tlsgd_g1", 0,
2656 0, /* adr_type */
2657 0,
2658 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2659 0,
2660 0,
2661 0},
2662
2663 /* Get to the page containing GOT TLS entry for a symbol */
2664 {"tlsdesc", 0,
2665 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2666 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2667 0,
2668 0,
2669 0,
2670 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2671
2672 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2673 {"tlsdesc_lo12", 0,
2674 0, /* adr_type */
2675 0,
2676 0,
2677 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2678 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2679 0},
2680
2681 /* Get to the page containing GOT TLS entry for a symbol.
2682 The same as GD, we allocate two consecutive GOT slots
2683 for module index and module offset, the only difference
2684 with GD is the module offset should be intialized to
2685 zero without any outstanding runtime relocation. */
2686 {"tlsldm", 0,
2687 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2688 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2689 0,
2690 0,
2691 0,
2692 0},
2693
2694 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2695 {"tlsldm_lo12_nc", 0,
2696 0, /* adr_type */
2697 0,
2698 0,
2699 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2700 0,
2701 0},
2702
2703 /* 12 bit offset into the module TLS base address. */
2704 {"dtprel_lo12", 0,
2705 0, /* adr_type */
2706 0,
2707 0,
2708 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2709 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2710 0},
2711
2712 /* Same as dtprel_lo12, no overflow check. */
2713 {"dtprel_lo12_nc", 0,
2714 0, /* adr_type */
2715 0,
2716 0,
2717 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2718 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2719 0},
2720
2721 /* bits[23:12] of offset to the module TLS base address. */
2722 {"dtprel_hi12", 0,
2723 0, /* adr_type */
2724 0,
2725 0,
2726 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2727 0,
2728 0},
2729
2730 /* bits[15:0] of offset to the module TLS base address. */
2731 {"dtprel_g0", 0,
2732 0, /* adr_type */
2733 0,
2734 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2735 0,
2736 0,
2737 0},
2738
2739 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2740 {"dtprel_g0_nc", 0,
2741 0, /* adr_type */
2742 0,
2743 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2744 0,
2745 0,
2746 0},
2747
2748 /* bits[31:16] of offset to the module TLS base address. */
2749 {"dtprel_g1", 0,
2750 0, /* adr_type */
2751 0,
2752 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2753 0,
2754 0,
2755 0},
2756
2757 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2758 {"dtprel_g1_nc", 0,
2759 0, /* adr_type */
2760 0,
2761 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2762 0,
2763 0,
2764 0},
2765
2766 /* bits[47:32] of offset to the module TLS base address. */
2767 {"dtprel_g2", 0,
2768 0, /* adr_type */
2769 0,
2770 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2771 0,
2772 0,
2773 0},
2774
2775 /* Lower 16 bit offset into GOT entry for a symbol */
2776 {"tlsdesc_off_g0_nc", 0,
2777 0, /* adr_type */
2778 0,
2779 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2780 0,
2781 0,
2782 0},
2783
2784 /* Higher 16 bit offset into GOT entry for a symbol */
2785 {"tlsdesc_off_g1", 0,
2786 0, /* adr_type */
2787 0,
2788 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2789 0,
2790 0,
2791 0},
2792
2793 /* Get to the page containing GOT TLS entry for a symbol */
2794 {"gottprel", 0,
2795 0, /* adr_type */
2796 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2797 0,
2798 0,
2799 0,
2800 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2801
2802 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2803 {"gottprel_lo12", 0,
2804 0, /* adr_type */
2805 0,
2806 0,
2807 0,
2808 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2809 0},
2810
2811 /* Get tp offset for a symbol. */
2812 {"tprel", 0,
2813 0, /* adr_type */
2814 0,
2815 0,
2816 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2817 0,
2818 0},
2819
2820 /* Get tp offset for a symbol. */
2821 {"tprel_lo12", 0,
2822 0, /* adr_type */
2823 0,
2824 0,
2825 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2826 0,
2827 0},
2828
2829 /* Get tp offset for a symbol. */
2830 {"tprel_hi12", 0,
2831 0, /* adr_type */
2832 0,
2833 0,
2834 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2835 0,
2836 0},
2837
2838 /* Get tp offset for a symbol. */
2839 {"tprel_lo12_nc", 0,
2840 0, /* adr_type */
2841 0,
2842 0,
2843 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2844 0,
2845 0},
2846
2847 /* Most significant bits 32-47 of address/value: MOVZ. */
2848 {"tprel_g2", 0,
2849 0, /* adr_type */
2850 0,
2851 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2852 0,
2853 0,
2854 0},
2855
2856 /* Most significant bits 16-31 of address/value: MOVZ. */
2857 {"tprel_g1", 0,
2858 0, /* adr_type */
2859 0,
2860 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2861 0,
2862 0,
2863 0},
2864
2865 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2866 {"tprel_g1_nc", 0,
2867 0, /* adr_type */
2868 0,
2869 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2870 0,
2871 0,
2872 0},
2873
2874 /* Most significant bits 0-15 of address/value: MOVZ. */
2875 {"tprel_g0", 0,
2876 0, /* adr_type */
2877 0,
2878 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2879 0,
2880 0,
2881 0},
2882
2883 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2884 {"tprel_g0_nc", 0,
2885 0, /* adr_type */
2886 0,
2887 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2888 0,
2889 0,
2890 0},
2891
2892 /* 15bit offset from got entry to base address of GOT table. */
2893 {"gotpage_lo15", 0,
2894 0,
2895 0,
2896 0,
2897 0,
2898 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2899 0},
2900
2901 /* 14bit offset from got entry to base address of GOT table. */
2902 {"gotpage_lo14", 0,
2903 0,
2904 0,
2905 0,
2906 0,
2907 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2908 0},
2909 };
2910
2911 /* Given the address of a pointer pointing to the textual name of a
2912 relocation as may appear in assembler source, attempt to find its
2913 details in reloc_table. The pointer will be updated to the character
2914 after the trailing colon. On failure, NULL will be returned;
2915 otherwise return the reloc_table_entry. */
2916
2917 static struct reloc_table_entry *
2918 find_reloc_table_entry (char **str)
2919 {
2920 unsigned int i;
2921 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2922 {
2923 int length = strlen (reloc_table[i].name);
2924
2925 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2926 && (*str)[length] == ':')
2927 {
2928 *str += (length + 1);
2929 return &reloc_table[i];
2930 }
2931 }
2932
2933 return NULL;
2934 }
2935
2936 /* Mode argument to parse_shift and parser_shifter_operand. */
2937 enum parse_shift_mode
2938 {
2939 SHIFTED_NONE, /* no shifter allowed */
2940 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2941 "#imm{,lsl #n}" */
2942 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2943 "#imm" */
2944 SHIFTED_LSL, /* bare "lsl #n" */
2945 SHIFTED_MUL, /* bare "mul #n" */
2946 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2947 SHIFTED_MUL_VL, /* "mul vl" */
2948 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2949 };
2950
2951 /* Parse a <shift> operator on an AArch64 data processing instruction.
2952 Return TRUE on success; otherwise return FALSE. */
2953 static bfd_boolean
2954 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2955 {
2956 const struct aarch64_name_value_pair *shift_op;
2957 enum aarch64_modifier_kind kind;
2958 expressionS exp;
2959 int exp_has_prefix;
2960 char *s = *str;
2961 char *p = s;
2962
2963 for (p = *str; ISALPHA (*p); p++)
2964 ;
2965
2966 if (p == *str)
2967 {
2968 set_syntax_error (_("shift expression expected"));
2969 return FALSE;
2970 }
2971
2972 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2973
2974 if (shift_op == NULL)
2975 {
2976 set_syntax_error (_("shift operator expected"));
2977 return FALSE;
2978 }
2979
2980 kind = aarch64_get_operand_modifier (shift_op);
2981
2982 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2983 {
2984 set_syntax_error (_("invalid use of 'MSL'"));
2985 return FALSE;
2986 }
2987
2988 if (kind == AARCH64_MOD_MUL
2989 && mode != SHIFTED_MUL
2990 && mode != SHIFTED_MUL_VL)
2991 {
2992 set_syntax_error (_("invalid use of 'MUL'"));
2993 return FALSE;
2994 }
2995
2996 switch (mode)
2997 {
2998 case SHIFTED_LOGIC_IMM:
2999 if (aarch64_extend_operator_p (kind) == TRUE)
3000 {
3001 set_syntax_error (_("extending shift is not permitted"));
3002 return FALSE;
3003 }
3004 break;
3005
3006 case SHIFTED_ARITH_IMM:
3007 if (kind == AARCH64_MOD_ROR)
3008 {
3009 set_syntax_error (_("'ROR' shift is not permitted"));
3010 return FALSE;
3011 }
3012 break;
3013
3014 case SHIFTED_LSL:
3015 if (kind != AARCH64_MOD_LSL)
3016 {
3017 set_syntax_error (_("only 'LSL' shift is permitted"));
3018 return FALSE;
3019 }
3020 break;
3021
3022 case SHIFTED_MUL:
3023 if (kind != AARCH64_MOD_MUL)
3024 {
3025 set_syntax_error (_("only 'MUL' is permitted"));
3026 return FALSE;
3027 }
3028 break;
3029
3030 case SHIFTED_MUL_VL:
3031 /* "MUL VL" consists of two separate tokens. Require the first
3032 token to be "MUL" and look for a following "VL". */
3033 if (kind == AARCH64_MOD_MUL)
3034 {
3035 skip_whitespace (p);
3036 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3037 {
3038 p += 2;
3039 kind = AARCH64_MOD_MUL_VL;
3040 break;
3041 }
3042 }
3043 set_syntax_error (_("only 'MUL VL' is permitted"));
3044 return FALSE;
3045
3046 case SHIFTED_REG_OFFSET:
3047 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3048 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3049 {
3050 set_fatal_syntax_error
3051 (_("invalid shift for the register offset addressing mode"));
3052 return FALSE;
3053 }
3054 break;
3055
3056 case SHIFTED_LSL_MSL:
3057 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3058 {
3059 set_syntax_error (_("invalid shift operator"));
3060 return FALSE;
3061 }
3062 break;
3063
3064 default:
3065 abort ();
3066 }
3067
3068 /* Whitespace can appear here if the next thing is a bare digit. */
3069 skip_whitespace (p);
3070
3071 /* Parse shift amount. */
3072 exp_has_prefix = 0;
3073 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3074 exp.X_op = O_absent;
3075 else
3076 {
3077 if (is_immediate_prefix (*p))
3078 {
3079 p++;
3080 exp_has_prefix = 1;
3081 }
3082 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3083 }
3084 if (kind == AARCH64_MOD_MUL_VL)
3085 /* For consistency, give MUL VL the same shift amount as an implicit
3086 MUL #1. */
3087 operand->shifter.amount = 1;
3088 else if (exp.X_op == O_absent)
3089 {
3090 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
3091 {
3092 set_syntax_error (_("missing shift amount"));
3093 return FALSE;
3094 }
3095 operand->shifter.amount = 0;
3096 }
3097 else if (exp.X_op != O_constant)
3098 {
3099 set_syntax_error (_("constant shift amount required"));
3100 return FALSE;
3101 }
3102 /* For parsing purposes, MUL #n has no inherent range. The range
3103 depends on the operand and will be checked by operand-specific
3104 routines. */
3105 else if (kind != AARCH64_MOD_MUL
3106 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3107 {
3108 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3109 return FALSE;
3110 }
3111 else
3112 {
3113 operand->shifter.amount = exp.X_add_number;
3114 operand->shifter.amount_present = 1;
3115 }
3116
3117 operand->shifter.operator_present = 1;
3118 operand->shifter.kind = kind;
3119
3120 *str = p;
3121 return TRUE;
3122 }
3123
3124 /* Parse a <shifter_operand> for a data processing instruction:
3125
3126 #<immediate>
3127 #<immediate>, LSL #imm
3128
3129 Validation of immediate operands is deferred to md_apply_fix.
3130
3131 Return TRUE on success; otherwise return FALSE. */
3132
3133 static bfd_boolean
3134 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3135 enum parse_shift_mode mode)
3136 {
3137 char *p;
3138
3139 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3140 return FALSE;
3141
3142 p = *str;
3143
3144 /* Accept an immediate expression. */
3145 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3146 return FALSE;
3147
3148 /* Accept optional LSL for arithmetic immediate values. */
3149 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3150 if (! parse_shift (&p, operand, SHIFTED_LSL))
3151 return FALSE;
3152
3153 /* Not accept any shifter for logical immediate values. */
3154 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3155 && parse_shift (&p, operand, mode))
3156 {
3157 set_syntax_error (_("unexpected shift operator"));
3158 return FALSE;
3159 }
3160
3161 *str = p;
3162 return TRUE;
3163 }
3164
3165 /* Parse a <shifter_operand> for a data processing instruction:
3166
3167 <Rm>
3168 <Rm>, <shift>
3169 #<immediate>
3170 #<immediate>, LSL #imm
3171
3172 where <shift> is handled by parse_shift above, and the last two
3173 cases are handled by the function above.
3174
3175 Validation of immediate operands is deferred to md_apply_fix.
3176
3177 Return TRUE on success; otherwise return FALSE. */
3178
3179 static bfd_boolean
3180 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3181 enum parse_shift_mode mode)
3182 {
3183 const reg_entry *reg;
3184 aarch64_opnd_qualifier_t qualifier;
3185 enum aarch64_operand_class opd_class
3186 = aarch64_get_operand_class (operand->type);
3187
3188 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3189 if (reg)
3190 {
3191 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3192 {
3193 set_syntax_error (_("unexpected register in the immediate operand"));
3194 return FALSE;
3195 }
3196
3197 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3198 {
3199 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3200 return FALSE;
3201 }
3202
3203 operand->reg.regno = reg->number;
3204 operand->qualifier = qualifier;
3205
3206 /* Accept optional shift operation on register. */
3207 if (! skip_past_comma (str))
3208 return TRUE;
3209
3210 if (! parse_shift (str, operand, mode))
3211 return FALSE;
3212
3213 return TRUE;
3214 }
3215 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3216 {
3217 set_syntax_error
3218 (_("integer register expected in the extended/shifted operand "
3219 "register"));
3220 return FALSE;
3221 }
3222
3223 /* We have a shifted immediate variable. */
3224 return parse_shifter_operand_imm (str, operand, mode);
3225 }
3226
3227 /* Return TRUE on success; return FALSE otherwise. */
3228
3229 static bfd_boolean
3230 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3231 enum parse_shift_mode mode)
3232 {
3233 char *p = *str;
3234
3235 /* Determine if we have the sequence of characters #: or just :
3236 coming next. If we do, then we check for a :rello: relocation
3237 modifier. If we don't, punt the whole lot to
3238 parse_shifter_operand. */
3239
3240 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3241 {
3242 struct reloc_table_entry *entry;
3243
3244 if (p[0] == '#')
3245 p += 2;
3246 else
3247 p++;
3248 *str = p;
3249
3250 /* Try to parse a relocation. Anything else is an error. */
3251 if (!(entry = find_reloc_table_entry (str)))
3252 {
3253 set_syntax_error (_("unknown relocation modifier"));
3254 return FALSE;
3255 }
3256
3257 if (entry->add_type == 0)
3258 {
3259 set_syntax_error
3260 (_("this relocation modifier is not allowed on this instruction"));
3261 return FALSE;
3262 }
3263
3264 /* Save str before we decompose it. */
3265 p = *str;
3266
3267 /* Next, we parse the expression. */
3268 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3269 return FALSE;
3270
3271 /* Record the relocation type (use the ADD variant here). */
3272 inst.reloc.type = entry->add_type;
3273 inst.reloc.pc_rel = entry->pc_rel;
3274
3275 /* If str is empty, we've reached the end, stop here. */
3276 if (**str == '\0')
3277 return TRUE;
3278
3279 /* Otherwise, we have a shifted reloc modifier, so rewind to
3280 recover the variable name and continue parsing for the shifter. */
3281 *str = p;
3282 return parse_shifter_operand_imm (str, operand, mode);
3283 }
3284
3285 return parse_shifter_operand (str, operand, mode);
3286 }
3287
3288 /* Parse all forms of an address expression. Information is written
3289 to *OPERAND and/or inst.reloc.
3290
3291 The A64 instruction set has the following addressing modes:
3292
3293 Offset
3294 [base] // in SIMD ld/st structure
3295 [base{,#0}] // in ld/st exclusive
3296 [base{,#imm}]
3297 [base,Xm{,LSL #imm}]
3298 [base,Xm,SXTX {#imm}]
3299 [base,Wm,(S|U)XTW {#imm}]
3300 Pre-indexed
3301 [base,#imm]!
3302 Post-indexed
3303 [base],#imm
3304 [base],Xm // in SIMD ld/st structure
3305 PC-relative (literal)
3306 label
3307 SVE:
3308 [base,#imm,MUL VL]
3309 [base,Zm.D{,LSL #imm}]
3310 [base,Zm.S,(S|U)XTW {#imm}]
3311 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3312 [Zn.S,#imm]
3313 [Zn.D,#imm]
3314 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3315 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3316 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3317
3318 (As a convenience, the notation "=immediate" is permitted in conjunction
3319 with the pc-relative literal load instructions to automatically place an
3320 immediate value or symbolic address in a nearby literal pool and generate
3321 a hidden label which references it.)
3322
3323 Upon a successful parsing, the address structure in *OPERAND will be
3324 filled in the following way:
3325
3326 .base_regno = <base>
3327 .offset.is_reg // 1 if the offset is a register
3328 .offset.imm = <imm>
3329 .offset.regno = <Rm>
3330
3331 For different addressing modes defined in the A64 ISA:
3332
3333 Offset
3334 .pcrel=0; .preind=1; .postind=0; .writeback=0
3335 Pre-indexed
3336 .pcrel=0; .preind=1; .postind=0; .writeback=1
3337 Post-indexed
3338 .pcrel=0; .preind=0; .postind=1; .writeback=1
3339 PC-relative (literal)
3340 .pcrel=1; .preind=1; .postind=0; .writeback=0
3341
3342 The shift/extension information, if any, will be stored in .shifter.
3343 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3344 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3345 corresponding register.
3346
3347 BASE_TYPE says which types of base register should be accepted and
3348 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3349 is the type of shifter that is allowed for immediate offsets,
3350 or SHIFTED_NONE if none.
3351
3352 In all other respects, it is the caller's responsibility to check
3353 for addressing modes not supported by the instruction, and to set
3354 inst.reloc.type. */
3355
3356 static bfd_boolean
3357 parse_address_main (char **str, aarch64_opnd_info *operand,
3358 aarch64_opnd_qualifier_t *base_qualifier,
3359 aarch64_opnd_qualifier_t *offset_qualifier,
3360 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3361 enum parse_shift_mode imm_shift_mode)
3362 {
3363 char *p = *str;
3364 const reg_entry *reg;
3365 expressionS *exp = &inst.reloc.exp;
3366
3367 *base_qualifier = AARCH64_OPND_QLF_NIL;
3368 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3369 if (! skip_past_char (&p, '['))
3370 {
3371 /* =immediate or label. */
3372 operand->addr.pcrel = 1;
3373 operand->addr.preind = 1;
3374
3375 /* #:<reloc_op>:<symbol> */
3376 skip_past_char (&p, '#');
3377 if (skip_past_char (&p, ':'))
3378 {
3379 bfd_reloc_code_real_type ty;
3380 struct reloc_table_entry *entry;
3381
3382 /* Try to parse a relocation modifier. Anything else is
3383 an error. */
3384 entry = find_reloc_table_entry (&p);
3385 if (! entry)
3386 {
3387 set_syntax_error (_("unknown relocation modifier"));
3388 return FALSE;
3389 }
3390
3391 switch (operand->type)
3392 {
3393 case AARCH64_OPND_ADDR_PCREL21:
3394 /* adr */
3395 ty = entry->adr_type;
3396 break;
3397
3398 default:
3399 ty = entry->ld_literal_type;
3400 break;
3401 }
3402
3403 if (ty == 0)
3404 {
3405 set_syntax_error
3406 (_("this relocation modifier is not allowed on this "
3407 "instruction"));
3408 return FALSE;
3409 }
3410
3411 /* #:<reloc_op>: */
3412 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3413 {
3414 set_syntax_error (_("invalid relocation expression"));
3415 return FALSE;
3416 }
3417
3418 /* #:<reloc_op>:<expr> */
3419 /* Record the relocation type. */
3420 inst.reloc.type = ty;
3421 inst.reloc.pc_rel = entry->pc_rel;
3422 }
3423 else
3424 {
3425
3426 if (skip_past_char (&p, '='))
3427 /* =immediate; need to generate the literal in the literal pool. */
3428 inst.gen_lit_pool = 1;
3429
3430 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3431 {
3432 set_syntax_error (_("invalid address"));
3433 return FALSE;
3434 }
3435 }
3436
3437 *str = p;
3438 return TRUE;
3439 }
3440
3441 /* [ */
3442
3443 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3444 if (!reg || !aarch64_check_reg_type (reg, base_type))
3445 {
3446 set_syntax_error (_(get_reg_expected_msg (base_type)));
3447 return FALSE;
3448 }
3449 operand->addr.base_regno = reg->number;
3450
3451 /* [Xn */
3452 if (skip_past_comma (&p))
3453 {
3454 /* [Xn, */
3455 operand->addr.preind = 1;
3456
3457 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3458 if (reg)
3459 {
3460 if (!aarch64_check_reg_type (reg, offset_type))
3461 {
3462 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3463 return FALSE;
3464 }
3465
3466 /* [Xn,Rm */
3467 operand->addr.offset.regno = reg->number;
3468 operand->addr.offset.is_reg = 1;
3469 /* Shifted index. */
3470 if (skip_past_comma (&p))
3471 {
3472 /* [Xn,Rm, */
3473 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3474 /* Use the diagnostics set in parse_shift, so not set new
3475 error message here. */
3476 return FALSE;
3477 }
3478 /* We only accept:
3479 [base,Xm{,LSL #imm}]
3480 [base,Xm,SXTX {#imm}]
3481 [base,Wm,(S|U)XTW {#imm}] */
3482 if (operand->shifter.kind == AARCH64_MOD_NONE
3483 || operand->shifter.kind == AARCH64_MOD_LSL
3484 || operand->shifter.kind == AARCH64_MOD_SXTX)
3485 {
3486 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3487 {
3488 set_syntax_error (_("invalid use of 32-bit register offset"));
3489 return FALSE;
3490 }
3491 if (aarch64_get_qualifier_esize (*base_qualifier)
3492 != aarch64_get_qualifier_esize (*offset_qualifier))
3493 {
3494 set_syntax_error (_("offset has different size from base"));
3495 return FALSE;
3496 }
3497 }
3498 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3499 {
3500 set_syntax_error (_("invalid use of 64-bit register offset"));
3501 return FALSE;
3502 }
3503 }
3504 else
3505 {
3506 /* [Xn,#:<reloc_op>:<symbol> */
3507 skip_past_char (&p, '#');
3508 if (skip_past_char (&p, ':'))
3509 {
3510 struct reloc_table_entry *entry;
3511
3512 /* Try to parse a relocation modifier. Anything else is
3513 an error. */
3514 if (!(entry = find_reloc_table_entry (&p)))
3515 {
3516 set_syntax_error (_("unknown relocation modifier"));
3517 return FALSE;
3518 }
3519
3520 if (entry->ldst_type == 0)
3521 {
3522 set_syntax_error
3523 (_("this relocation modifier is not allowed on this "
3524 "instruction"));
3525 return FALSE;
3526 }
3527
3528 /* [Xn,#:<reloc_op>: */
3529 /* We now have the group relocation table entry corresponding to
3530 the name in the assembler source. Next, we parse the
3531 expression. */
3532 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3533 {
3534 set_syntax_error (_("invalid relocation expression"));
3535 return FALSE;
3536 }
3537
3538 /* [Xn,#:<reloc_op>:<expr> */
3539 /* Record the load/store relocation type. */
3540 inst.reloc.type = entry->ldst_type;
3541 inst.reloc.pc_rel = entry->pc_rel;
3542 }
3543 else
3544 {
3545 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3546 {
3547 set_syntax_error (_("invalid expression in the address"));
3548 return FALSE;
3549 }
3550 /* [Xn,<expr> */
3551 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3552 /* [Xn,<expr>,<shifter> */
3553 if (! parse_shift (&p, operand, imm_shift_mode))
3554 return FALSE;
3555 }
3556 }
3557 }
3558
3559 if (! skip_past_char (&p, ']'))
3560 {
3561 set_syntax_error (_("']' expected"));
3562 return FALSE;
3563 }
3564
3565 if (skip_past_char (&p, '!'))
3566 {
3567 if (operand->addr.preind && operand->addr.offset.is_reg)
3568 {
3569 set_syntax_error (_("register offset not allowed in pre-indexed "
3570 "addressing mode"));
3571 return FALSE;
3572 }
3573 /* [Xn]! */
3574 operand->addr.writeback = 1;
3575 }
3576 else if (skip_past_comma (&p))
3577 {
3578 /* [Xn], */
3579 operand->addr.postind = 1;
3580 operand->addr.writeback = 1;
3581
3582 if (operand->addr.preind)
3583 {
3584 set_syntax_error (_("cannot combine pre- and post-indexing"));
3585 return FALSE;
3586 }
3587
3588 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3589 if (reg)
3590 {
3591 /* [Xn],Xm */
3592 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3593 {
3594 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3595 return FALSE;
3596 }
3597
3598 operand->addr.offset.regno = reg->number;
3599 operand->addr.offset.is_reg = 1;
3600 }
3601 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3602 {
3603 /* [Xn],#expr */
3604 set_syntax_error (_("invalid expression in the address"));
3605 return FALSE;
3606 }
3607 }
3608
3609 /* If at this point neither .preind nor .postind is set, we have a
3610 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3611 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3612 {
3613 if (operand->addr.writeback)
3614 {
3615 /* Reject [Rn]! */
3616 set_syntax_error (_("missing offset in the pre-indexed address"));
3617 return FALSE;
3618 }
3619 operand->addr.preind = 1;
3620 inst.reloc.exp.X_op = O_constant;
3621 inst.reloc.exp.X_add_number = 0;
3622 }
3623
3624 *str = p;
3625 return TRUE;
3626 }
3627
3628 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3629 on success. */
3630 static bfd_boolean
3631 parse_address (char **str, aarch64_opnd_info *operand)
3632 {
3633 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3634 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3635 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3636 }
3637
3638 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3639 The arguments have the same meaning as for parse_address_main.
3640 Return TRUE on success. */
3641 static bfd_boolean
3642 parse_sve_address (char **str, aarch64_opnd_info *operand,
3643 aarch64_opnd_qualifier_t *base_qualifier,
3644 aarch64_opnd_qualifier_t *offset_qualifier)
3645 {
3646 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3647 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3648 SHIFTED_MUL_VL);
3649 }
3650
3651 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3652 Return TRUE on success; otherwise return FALSE. */
3653 static bfd_boolean
3654 parse_half (char **str, int *internal_fixup_p)
3655 {
3656 char *p = *str;
3657
3658 skip_past_char (&p, '#');
3659
3660 gas_assert (internal_fixup_p);
3661 *internal_fixup_p = 0;
3662
3663 if (*p == ':')
3664 {
3665 struct reloc_table_entry *entry;
3666
3667 /* Try to parse a relocation. Anything else is an error. */
3668 ++p;
3669 if (!(entry = find_reloc_table_entry (&p)))
3670 {
3671 set_syntax_error (_("unknown relocation modifier"));
3672 return FALSE;
3673 }
3674
3675 if (entry->movw_type == 0)
3676 {
3677 set_syntax_error
3678 (_("this relocation modifier is not allowed on this instruction"));
3679 return FALSE;
3680 }
3681
3682 inst.reloc.type = entry->movw_type;
3683 }
3684 else
3685 *internal_fixup_p = 1;
3686
3687 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3688 return FALSE;
3689
3690 *str = p;
3691 return TRUE;
3692 }
3693
3694 /* Parse an operand for an ADRP instruction:
3695 ADRP <Xd>, <label>
3696 Return TRUE on success; otherwise return FALSE. */
3697
3698 static bfd_boolean
3699 parse_adrp (char **str)
3700 {
3701 char *p;
3702
3703 p = *str;
3704 if (*p == ':')
3705 {
3706 struct reloc_table_entry *entry;
3707
3708 /* Try to parse a relocation. Anything else is an error. */
3709 ++p;
3710 if (!(entry = find_reloc_table_entry (&p)))
3711 {
3712 set_syntax_error (_("unknown relocation modifier"));
3713 return FALSE;
3714 }
3715
3716 if (entry->adrp_type == 0)
3717 {
3718 set_syntax_error
3719 (_("this relocation modifier is not allowed on this instruction"));
3720 return FALSE;
3721 }
3722
3723 inst.reloc.type = entry->adrp_type;
3724 }
3725 else
3726 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3727
3728 inst.reloc.pc_rel = 1;
3729
3730 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3731 return FALSE;
3732
3733 *str = p;
3734 return TRUE;
3735 }
3736
3737 /* Miscellaneous. */
3738
3739 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3740 of SIZE tokens in which index I gives the token for field value I,
3741 or is null if field value I is invalid. REG_TYPE says which register
3742 names should be treated as registers rather than as symbolic immediates.
3743
3744 Return true on success, moving *STR past the operand and storing the
3745 field value in *VAL. */
3746
3747 static int
3748 parse_enum_string (char **str, int64_t *val, const char *const *array,
3749 size_t size, aarch64_reg_type reg_type)
3750 {
3751 expressionS exp;
3752 char *p, *q;
3753 size_t i;
3754
3755 /* Match C-like tokens. */
3756 p = q = *str;
3757 while (ISALNUM (*q))
3758 q++;
3759
3760 for (i = 0; i < size; ++i)
3761 if (array[i]
3762 && strncasecmp (array[i], p, q - p) == 0
3763 && array[i][q - p] == 0)
3764 {
3765 *val = i;
3766 *str = q;
3767 return TRUE;
3768 }
3769
3770 if (!parse_immediate_expression (&p, &exp, reg_type))
3771 return FALSE;
3772
3773 if (exp.X_op == O_constant
3774 && (uint64_t) exp.X_add_number < size)
3775 {
3776 *val = exp.X_add_number;
3777 *str = p;
3778 return TRUE;
3779 }
3780
3781 /* Use the default error for this operand. */
3782 return FALSE;
3783 }
3784
3785 /* Parse an option for a preload instruction. Returns the encoding for the
3786 option, or PARSE_FAIL. */
3787
3788 static int
3789 parse_pldop (char **str)
3790 {
3791 char *p, *q;
3792 const struct aarch64_name_value_pair *o;
3793
3794 p = q = *str;
3795 while (ISALNUM (*q))
3796 q++;
3797
3798 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3799 if (!o)
3800 return PARSE_FAIL;
3801
3802 *str = q;
3803 return o->value;
3804 }
3805
3806 /* Parse an option for a barrier instruction. Returns the encoding for the
3807 option, or PARSE_FAIL. */
3808
3809 static int
3810 parse_barrier (char **str)
3811 {
3812 char *p, *q;
3813 const asm_barrier_opt *o;
3814
3815 p = q = *str;
3816 while (ISALPHA (*q))
3817 q++;
3818
3819 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3820 if (!o)
3821 return PARSE_FAIL;
3822
3823 *str = q;
3824 return o->value;
3825 }
3826
3827 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3828 return 0 if successful. Otherwise return PARSE_FAIL. */
3829
3830 static int
3831 parse_barrier_psb (char **str,
3832 const struct aarch64_name_value_pair ** hint_opt)
3833 {
3834 char *p, *q;
3835 const struct aarch64_name_value_pair *o;
3836
3837 p = q = *str;
3838 while (ISALPHA (*q))
3839 q++;
3840
3841 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3842 if (!o)
3843 {
3844 set_fatal_syntax_error
3845 ( _("unknown or missing option to PSB"));
3846 return PARSE_FAIL;
3847 }
3848
3849 if (o->value != 0x11)
3850 {
3851 /* PSB only accepts option name 'CSYNC'. */
3852 set_syntax_error
3853 (_("the specified option is not accepted for PSB"));
3854 return PARSE_FAIL;
3855 }
3856
3857 *str = q;
3858 *hint_opt = o;
3859 return 0;
3860 }
3861
3862 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3863 Returns the encoding for the option, or PARSE_FAIL.
3864
3865 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3866 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3867
3868 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3869 field, otherwise as a system register.
3870 */
3871
3872 static int
3873 parse_sys_reg (char **str, struct hash_control *sys_regs,
3874 int imple_defined_p, int pstatefield_p)
3875 {
3876 char *p, *q;
3877 char buf[32];
3878 const aarch64_sys_reg *o;
3879 int value;
3880
3881 p = buf;
3882 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3883 if (p < buf + 31)
3884 *p++ = TOLOWER (*q);
3885 *p = '\0';
3886 /* Assert that BUF be large enough. */
3887 gas_assert (p - buf == q - *str);
3888
3889 o = hash_find (sys_regs, buf);
3890 if (!o)
3891 {
3892 if (!imple_defined_p)
3893 return PARSE_FAIL;
3894 else
3895 {
3896 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3897 unsigned int op0, op1, cn, cm, op2;
3898
3899 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3900 != 5)
3901 return PARSE_FAIL;
3902 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3903 return PARSE_FAIL;
3904 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3905 }
3906 }
3907 else
3908 {
3909 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3910 as_bad (_("selected processor does not support PSTATE field "
3911 "name '%s'"), buf);
3912 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3913 as_bad (_("selected processor does not support system register "
3914 "name '%s'"), buf);
3915 if (aarch64_sys_reg_deprecated_p (o))
3916 as_warn (_("system register name '%s' is deprecated and may be "
3917 "removed in a future release"), buf);
3918 value = o->value;
3919 }
3920
3921 *str = q;
3922 return value;
3923 }
3924
3925 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3926 for the option, or NULL. */
3927
3928 static const aarch64_sys_ins_reg *
3929 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3930 {
3931 char *p, *q;
3932 char buf[32];
3933 const aarch64_sys_ins_reg *o;
3934
3935 p = buf;
3936 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3937 if (p < buf + 31)
3938 *p++ = TOLOWER (*q);
3939 *p = '\0';
3940
3941 o = hash_find (sys_ins_regs, buf);
3942 if (!o)
3943 return NULL;
3944
3945 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3946 as_bad (_("selected processor does not support system register "
3947 "name '%s'"), buf);
3948
3949 *str = q;
3950 return o;
3951 }
3952 \f
3953 #define po_char_or_fail(chr) do { \
3954 if (! skip_past_char (&str, chr)) \
3955 goto failure; \
3956 } while (0)
3957
3958 #define po_reg_or_fail(regtype) do { \
3959 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3960 if (val == PARSE_FAIL) \
3961 { \
3962 set_default_error (); \
3963 goto failure; \
3964 } \
3965 } while (0)
3966
3967 #define po_int_reg_or_fail(reg_type) do { \
3968 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
3969 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
3970 { \
3971 set_default_error (); \
3972 goto failure; \
3973 } \
3974 info->reg.regno = reg->number; \
3975 info->qualifier = qualifier; \
3976 } while (0)
3977
3978 #define po_imm_nc_or_fail() do { \
3979 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3980 goto failure; \
3981 } while (0)
3982
3983 #define po_imm_or_fail(min, max) do { \
3984 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3985 goto failure; \
3986 if (val < min || val > max) \
3987 { \
3988 set_fatal_syntax_error (_("immediate value out of range "\
3989 #min " to "#max)); \
3990 goto failure; \
3991 } \
3992 } while (0)
3993
3994 #define po_enum_or_fail(array) do { \
3995 if (!parse_enum_string (&str, &val, array, \
3996 ARRAY_SIZE (array), imm_reg_type)) \
3997 goto failure; \
3998 } while (0)
3999
4000 #define po_misc_or_fail(expr) do { \
4001 if (!expr) \
4002 goto failure; \
4003 } while (0)
4004 \f
4005 /* encode the 12-bit imm field of Add/sub immediate */
4006 static inline uint32_t
4007 encode_addsub_imm (uint32_t imm)
4008 {
4009 return imm << 10;
4010 }
4011
4012 /* encode the shift amount field of Add/sub immediate */
4013 static inline uint32_t
4014 encode_addsub_imm_shift_amount (uint32_t cnt)
4015 {
4016 return cnt << 22;
4017 }
4018
4019
4020 /* encode the imm field of Adr instruction */
4021 static inline uint32_t
4022 encode_adr_imm (uint32_t imm)
4023 {
4024 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4025 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4026 }
4027
4028 /* encode the immediate field of Move wide immediate */
4029 static inline uint32_t
4030 encode_movw_imm (uint32_t imm)
4031 {
4032 return imm << 5;
4033 }
4034
4035 /* encode the 26-bit offset of unconditional branch */
4036 static inline uint32_t
4037 encode_branch_ofs_26 (uint32_t ofs)
4038 {
4039 return ofs & ((1 << 26) - 1);
4040 }
4041
4042 /* encode the 19-bit offset of conditional branch and compare & branch */
4043 static inline uint32_t
4044 encode_cond_branch_ofs_19 (uint32_t ofs)
4045 {
4046 return (ofs & ((1 << 19) - 1)) << 5;
4047 }
4048
4049 /* encode the 19-bit offset of ld literal */
4050 static inline uint32_t
4051 encode_ld_lit_ofs_19 (uint32_t ofs)
4052 {
4053 return (ofs & ((1 << 19) - 1)) << 5;
4054 }
4055
4056 /* Encode the 14-bit offset of test & branch. */
4057 static inline uint32_t
4058 encode_tst_branch_ofs_14 (uint32_t ofs)
4059 {
4060 return (ofs & ((1 << 14) - 1)) << 5;
4061 }
4062
4063 /* Encode the 16-bit imm field of svc/hvc/smc. */
4064 static inline uint32_t
4065 encode_svc_imm (uint32_t imm)
4066 {
4067 return imm << 5;
4068 }
4069
4070 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4071 static inline uint32_t
4072 reencode_addsub_switch_add_sub (uint32_t opcode)
4073 {
4074 return opcode ^ (1 << 30);
4075 }
4076
4077 static inline uint32_t
4078 reencode_movzn_to_movz (uint32_t opcode)
4079 {
4080 return opcode | (1 << 30);
4081 }
4082
4083 static inline uint32_t
4084 reencode_movzn_to_movn (uint32_t opcode)
4085 {
4086 return opcode & ~(1 << 30);
4087 }
4088
4089 /* Overall per-instruction processing. */
4090
4091 /* We need to be able to fix up arbitrary expressions in some statements.
4092 This is so that we can handle symbols that are an arbitrary distance from
4093 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4094 which returns part of an address in a form which will be valid for
4095 a data instruction. We do this by pushing the expression into a symbol
4096 in the expr_section, and creating a fix for that. */
4097
4098 static fixS *
4099 fix_new_aarch64 (fragS * frag,
4100 int where,
4101 short int size, expressionS * exp, int pc_rel, int reloc)
4102 {
4103 fixS *new_fix;
4104
4105 switch (exp->X_op)
4106 {
4107 case O_constant:
4108 case O_symbol:
4109 case O_add:
4110 case O_subtract:
4111 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4112 break;
4113
4114 default:
4115 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4116 pc_rel, reloc);
4117 break;
4118 }
4119 return new_fix;
4120 }
4121 \f
4122 /* Diagnostics on operands errors. */
4123
4124 /* By default, output verbose error message.
4125 Disable the verbose error message by -mno-verbose-error. */
4126 static int verbose_error_p = 1;
4127
4128 #ifdef DEBUG_AARCH64
4129 /* N.B. this is only for the purpose of debugging. */
4130 const char* operand_mismatch_kind_names[] =
4131 {
4132 "AARCH64_OPDE_NIL",
4133 "AARCH64_OPDE_RECOVERABLE",
4134 "AARCH64_OPDE_SYNTAX_ERROR",
4135 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4136 "AARCH64_OPDE_INVALID_VARIANT",
4137 "AARCH64_OPDE_OUT_OF_RANGE",
4138 "AARCH64_OPDE_UNALIGNED",
4139 "AARCH64_OPDE_REG_LIST",
4140 "AARCH64_OPDE_OTHER_ERROR",
4141 };
4142 #endif /* DEBUG_AARCH64 */
4143
4144 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4145
4146 When multiple errors of different kinds are found in the same assembly
4147 line, only the error of the highest severity will be picked up for
4148 issuing the diagnostics. */
4149
4150 static inline bfd_boolean
4151 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4152 enum aarch64_operand_error_kind rhs)
4153 {
4154 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4155 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4156 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4157 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4158 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4159 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4160 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4161 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4162 return lhs > rhs;
4163 }
4164
4165 /* Helper routine to get the mnemonic name from the assembly instruction
4166 line; should only be called for the diagnosis purpose, as there is
4167 string copy operation involved, which may affect the runtime
4168 performance if used in elsewhere. */
4169
4170 static const char*
4171 get_mnemonic_name (const char *str)
4172 {
4173 static char mnemonic[32];
4174 char *ptr;
4175
4176 /* Get the first 15 bytes and assume that the full name is included. */
4177 strncpy (mnemonic, str, 31);
4178 mnemonic[31] = '\0';
4179
4180 /* Scan up to the end of the mnemonic, which must end in white space,
4181 '.', or end of string. */
4182 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4183 ;
4184
4185 *ptr = '\0';
4186
4187 /* Append '...' to the truncated long name. */
4188 if (ptr - mnemonic == 31)
4189 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4190
4191 return mnemonic;
4192 }
4193
4194 static void
4195 reset_aarch64_instruction (aarch64_instruction *instruction)
4196 {
4197 memset (instruction, '\0', sizeof (aarch64_instruction));
4198 instruction->reloc.type = BFD_RELOC_UNUSED;
4199 }
4200
4201 /* Data strutures storing one user error in the assembly code related to
4202 operands. */
4203
4204 struct operand_error_record
4205 {
4206 const aarch64_opcode *opcode;
4207 aarch64_operand_error detail;
4208 struct operand_error_record *next;
4209 };
4210
4211 typedef struct operand_error_record operand_error_record;
4212
4213 struct operand_errors
4214 {
4215 operand_error_record *head;
4216 operand_error_record *tail;
4217 };
4218
4219 typedef struct operand_errors operand_errors;
4220
4221 /* Top-level data structure reporting user errors for the current line of
4222 the assembly code.
4223 The way md_assemble works is that all opcodes sharing the same mnemonic
4224 name are iterated to find a match to the assembly line. In this data
4225 structure, each of the such opcodes will have one operand_error_record
4226 allocated and inserted. In other words, excessive errors related with
4227 a single opcode are disregarded. */
4228 operand_errors operand_error_report;
4229
4230 /* Free record nodes. */
4231 static operand_error_record *free_opnd_error_record_nodes = NULL;
4232
4233 /* Initialize the data structure that stores the operand mismatch
4234 information on assembling one line of the assembly code. */
4235 static void
4236 init_operand_error_report (void)
4237 {
4238 if (operand_error_report.head != NULL)
4239 {
4240 gas_assert (operand_error_report.tail != NULL);
4241 operand_error_report.tail->next = free_opnd_error_record_nodes;
4242 free_opnd_error_record_nodes = operand_error_report.head;
4243 operand_error_report.head = NULL;
4244 operand_error_report.tail = NULL;
4245 return;
4246 }
4247 gas_assert (operand_error_report.tail == NULL);
4248 }
4249
4250 /* Return TRUE if some operand error has been recorded during the
4251 parsing of the current assembly line using the opcode *OPCODE;
4252 otherwise return FALSE. */
4253 static inline bfd_boolean
4254 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4255 {
4256 operand_error_record *record = operand_error_report.head;
4257 return record && record->opcode == opcode;
4258 }
4259
4260 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4261 OPCODE field is initialized with OPCODE.
4262 N.B. only one record for each opcode, i.e. the maximum of one error is
4263 recorded for each instruction template. */
4264
4265 static void
4266 add_operand_error_record (const operand_error_record* new_record)
4267 {
4268 const aarch64_opcode *opcode = new_record->opcode;
4269 operand_error_record* record = operand_error_report.head;
4270
4271 /* The record may have been created for this opcode. If not, we need
4272 to prepare one. */
4273 if (! opcode_has_operand_error_p (opcode))
4274 {
4275 /* Get one empty record. */
4276 if (free_opnd_error_record_nodes == NULL)
4277 {
4278 record = XNEW (operand_error_record);
4279 }
4280 else
4281 {
4282 record = free_opnd_error_record_nodes;
4283 free_opnd_error_record_nodes = record->next;
4284 }
4285 record->opcode = opcode;
4286 /* Insert at the head. */
4287 record->next = operand_error_report.head;
4288 operand_error_report.head = record;
4289 if (operand_error_report.tail == NULL)
4290 operand_error_report.tail = record;
4291 }
4292 else if (record->detail.kind != AARCH64_OPDE_NIL
4293 && record->detail.index <= new_record->detail.index
4294 && operand_error_higher_severity_p (record->detail.kind,
4295 new_record->detail.kind))
4296 {
4297 /* In the case of multiple errors found on operands related with a
4298 single opcode, only record the error of the leftmost operand and
4299 only if the error is of higher severity. */
4300 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4301 " the existing error %s on operand %d",
4302 operand_mismatch_kind_names[new_record->detail.kind],
4303 new_record->detail.index,
4304 operand_mismatch_kind_names[record->detail.kind],
4305 record->detail.index);
4306 return;
4307 }
4308
4309 record->detail = new_record->detail;
4310 }
4311
4312 static inline void
4313 record_operand_error_info (const aarch64_opcode *opcode,
4314 aarch64_operand_error *error_info)
4315 {
4316 operand_error_record record;
4317 record.opcode = opcode;
4318 record.detail = *error_info;
4319 add_operand_error_record (&record);
4320 }
4321
4322 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4323 error message *ERROR, for operand IDX (count from 0). */
4324
4325 static void
4326 record_operand_error (const aarch64_opcode *opcode, int idx,
4327 enum aarch64_operand_error_kind kind,
4328 const char* error)
4329 {
4330 aarch64_operand_error info;
4331 memset(&info, 0, sizeof (info));
4332 info.index = idx;
4333 info.kind = kind;
4334 info.error = error;
4335 record_operand_error_info (opcode, &info);
4336 }
4337
4338 static void
4339 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4340 enum aarch64_operand_error_kind kind,
4341 const char* error, const int *extra_data)
4342 {
4343 aarch64_operand_error info;
4344 info.index = idx;
4345 info.kind = kind;
4346 info.error = error;
4347 info.data[0] = extra_data[0];
4348 info.data[1] = extra_data[1];
4349 info.data[2] = extra_data[2];
4350 record_operand_error_info (opcode, &info);
4351 }
4352
4353 static void
4354 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4355 const char* error, int lower_bound,
4356 int upper_bound)
4357 {
4358 int data[3] = {lower_bound, upper_bound, 0};
4359 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4360 error, data);
4361 }
4362
4363 /* Remove the operand error record for *OPCODE. */
4364 static void ATTRIBUTE_UNUSED
4365 remove_operand_error_record (const aarch64_opcode *opcode)
4366 {
4367 if (opcode_has_operand_error_p (opcode))
4368 {
4369 operand_error_record* record = operand_error_report.head;
4370 gas_assert (record != NULL && operand_error_report.tail != NULL);
4371 operand_error_report.head = record->next;
4372 record->next = free_opnd_error_record_nodes;
4373 free_opnd_error_record_nodes = record;
4374 if (operand_error_report.head == NULL)
4375 {
4376 gas_assert (operand_error_report.tail == record);
4377 operand_error_report.tail = NULL;
4378 }
4379 }
4380 }
4381
4382 /* Given the instruction in *INSTR, return the index of the best matched
4383 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4384
4385 Return -1 if there is no qualifier sequence; return the first match
4386 if there is multiple matches found. */
4387
4388 static int
4389 find_best_match (const aarch64_inst *instr,
4390 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4391 {
4392 int i, num_opnds, max_num_matched, idx;
4393
4394 num_opnds = aarch64_num_of_operands (instr->opcode);
4395 if (num_opnds == 0)
4396 {
4397 DEBUG_TRACE ("no operand");
4398 return -1;
4399 }
4400
4401 max_num_matched = 0;
4402 idx = 0;
4403
4404 /* For each pattern. */
4405 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4406 {
4407 int j, num_matched;
4408 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4409
4410 /* Most opcodes has much fewer patterns in the list. */
4411 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4412 {
4413 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4414 break;
4415 }
4416
4417 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4418 if (*qualifiers == instr->operands[j].qualifier)
4419 ++num_matched;
4420
4421 if (num_matched > max_num_matched)
4422 {
4423 max_num_matched = num_matched;
4424 idx = i;
4425 }
4426 }
4427
4428 DEBUG_TRACE ("return with %d", idx);
4429 return idx;
4430 }
4431
4432 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4433 corresponding operands in *INSTR. */
4434
4435 static inline void
4436 assign_qualifier_sequence (aarch64_inst *instr,
4437 const aarch64_opnd_qualifier_t *qualifiers)
4438 {
4439 int i = 0;
4440 int num_opnds = aarch64_num_of_operands (instr->opcode);
4441 gas_assert (num_opnds);
4442 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4443 instr->operands[i].qualifier = *qualifiers;
4444 }
4445
4446 /* Print operands for the diagnosis purpose. */
4447
4448 static void
4449 print_operands (char *buf, const aarch64_opcode *opcode,
4450 const aarch64_opnd_info *opnds)
4451 {
4452 int i;
4453
4454 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4455 {
4456 char str[128];
4457
4458 /* We regard the opcode operand info more, however we also look into
4459 the inst->operands to support the disassembling of the optional
4460 operand.
4461 The two operand code should be the same in all cases, apart from
4462 when the operand can be optional. */
4463 if (opcode->operands[i] == AARCH64_OPND_NIL
4464 || opnds[i].type == AARCH64_OPND_NIL)
4465 break;
4466
4467 /* Generate the operand string in STR. */
4468 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4469
4470 /* Delimiter. */
4471 if (str[0] != '\0')
4472 strcat (buf, i == 0 ? " " : ",");
4473
4474 /* Append the operand string. */
4475 strcat (buf, str);
4476 }
4477 }
4478
4479 /* Send to stderr a string as information. */
4480
4481 static void
4482 output_info (const char *format, ...)
4483 {
4484 const char *file;
4485 unsigned int line;
4486 va_list args;
4487
4488 file = as_where (&line);
4489 if (file)
4490 {
4491 if (line != 0)
4492 fprintf (stderr, "%s:%u: ", file, line);
4493 else
4494 fprintf (stderr, "%s: ", file);
4495 }
4496 fprintf (stderr, _("Info: "));
4497 va_start (args, format);
4498 vfprintf (stderr, format, args);
4499 va_end (args);
4500 (void) putc ('\n', stderr);
4501 }
4502
4503 /* Output one operand error record. */
4504
4505 static void
4506 output_operand_error_record (const operand_error_record *record, char *str)
4507 {
4508 const aarch64_operand_error *detail = &record->detail;
4509 int idx = detail->index;
4510 const aarch64_opcode *opcode = record->opcode;
4511 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4512 : AARCH64_OPND_NIL);
4513
4514 switch (detail->kind)
4515 {
4516 case AARCH64_OPDE_NIL:
4517 gas_assert (0);
4518 break;
4519
4520 case AARCH64_OPDE_SYNTAX_ERROR:
4521 case AARCH64_OPDE_RECOVERABLE:
4522 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4523 case AARCH64_OPDE_OTHER_ERROR:
4524 /* Use the prepared error message if there is, otherwise use the
4525 operand description string to describe the error. */
4526 if (detail->error != NULL)
4527 {
4528 if (idx < 0)
4529 as_bad (_("%s -- `%s'"), detail->error, str);
4530 else
4531 as_bad (_("%s at operand %d -- `%s'"),
4532 detail->error, idx + 1, str);
4533 }
4534 else
4535 {
4536 gas_assert (idx >= 0);
4537 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4538 aarch64_get_operand_desc (opd_code), str);
4539 }
4540 break;
4541
4542 case AARCH64_OPDE_INVALID_VARIANT:
4543 as_bad (_("operand mismatch -- `%s'"), str);
4544 if (verbose_error_p)
4545 {
4546 /* We will try to correct the erroneous instruction and also provide
4547 more information e.g. all other valid variants.
4548
4549 The string representation of the corrected instruction and other
4550 valid variants are generated by
4551
4552 1) obtaining the intermediate representation of the erroneous
4553 instruction;
4554 2) manipulating the IR, e.g. replacing the operand qualifier;
4555 3) printing out the instruction by calling the printer functions
4556 shared with the disassembler.
4557
4558 The limitation of this method is that the exact input assembly
4559 line cannot be accurately reproduced in some cases, for example an
4560 optional operand present in the actual assembly line will be
4561 omitted in the output; likewise for the optional syntax rules,
4562 e.g. the # before the immediate. Another limitation is that the
4563 assembly symbols and relocation operations in the assembly line
4564 currently cannot be printed out in the error report. Last but not
4565 least, when there is other error(s) co-exist with this error, the
4566 'corrected' instruction may be still incorrect, e.g. given
4567 'ldnp h0,h1,[x0,#6]!'
4568 this diagnosis will provide the version:
4569 'ldnp s0,s1,[x0,#6]!'
4570 which is still not right. */
4571 size_t len = strlen (get_mnemonic_name (str));
4572 int i, qlf_idx;
4573 bfd_boolean result;
4574 char buf[2048];
4575 aarch64_inst *inst_base = &inst.base;
4576 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4577
4578 /* Init inst. */
4579 reset_aarch64_instruction (&inst);
4580 inst_base->opcode = opcode;
4581
4582 /* Reset the error report so that there is no side effect on the
4583 following operand parsing. */
4584 init_operand_error_report ();
4585
4586 /* Fill inst. */
4587 result = parse_operands (str + len, opcode)
4588 && programmer_friendly_fixup (&inst);
4589 gas_assert (result);
4590 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4591 NULL, NULL);
4592 gas_assert (!result);
4593
4594 /* Find the most matched qualifier sequence. */
4595 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4596 gas_assert (qlf_idx > -1);
4597
4598 /* Assign the qualifiers. */
4599 assign_qualifier_sequence (inst_base,
4600 opcode->qualifiers_list[qlf_idx]);
4601
4602 /* Print the hint. */
4603 output_info (_(" did you mean this?"));
4604 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4605 print_operands (buf, opcode, inst_base->operands);
4606 output_info (_(" %s"), buf);
4607
4608 /* Print out other variant(s) if there is any. */
4609 if (qlf_idx != 0 ||
4610 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4611 output_info (_(" other valid variant(s):"));
4612
4613 /* For each pattern. */
4614 qualifiers_list = opcode->qualifiers_list;
4615 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4616 {
4617 /* Most opcodes has much fewer patterns in the list.
4618 First NIL qualifier indicates the end in the list. */
4619 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4620 break;
4621
4622 if (i != qlf_idx)
4623 {
4624 /* Mnemonics name. */
4625 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4626
4627 /* Assign the qualifiers. */
4628 assign_qualifier_sequence (inst_base, *qualifiers_list);
4629
4630 /* Print instruction. */
4631 print_operands (buf, opcode, inst_base->operands);
4632
4633 output_info (_(" %s"), buf);
4634 }
4635 }
4636 }
4637 break;
4638
4639 case AARCH64_OPDE_UNTIED_OPERAND:
4640 as_bad (_("operand %d must be the same register as operand 1 -- `%s'"),
4641 detail->index + 1, str);
4642 break;
4643
4644 case AARCH64_OPDE_OUT_OF_RANGE:
4645 if (detail->data[0] != detail->data[1])
4646 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4647 detail->error ? detail->error : _("immediate value"),
4648 detail->data[0], detail->data[1], idx + 1, str);
4649 else
4650 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4651 detail->error ? detail->error : _("immediate value"),
4652 detail->data[0], idx + 1, str);
4653 break;
4654
4655 case AARCH64_OPDE_REG_LIST:
4656 if (detail->data[0] == 1)
4657 as_bad (_("invalid number of registers in the list; "
4658 "only 1 register is expected at operand %d -- `%s'"),
4659 idx + 1, str);
4660 else
4661 as_bad (_("invalid number of registers in the list; "
4662 "%d registers are expected at operand %d -- `%s'"),
4663 detail->data[0], idx + 1, str);
4664 break;
4665
4666 case AARCH64_OPDE_UNALIGNED:
4667 as_bad (_("immediate value should be a multiple of "
4668 "%d at operand %d -- `%s'"),
4669 detail->data[0], idx + 1, str);
4670 break;
4671
4672 default:
4673 gas_assert (0);
4674 break;
4675 }
4676 }
4677
4678 /* Process and output the error message about the operand mismatching.
4679
4680 When this function is called, the operand error information had
4681 been collected for an assembly line and there will be multiple
4682 errors in the case of mulitple instruction templates; output the
4683 error message that most closely describes the problem. */
4684
4685 static void
4686 output_operand_error_report (char *str)
4687 {
4688 int largest_error_pos;
4689 const char *msg = NULL;
4690 enum aarch64_operand_error_kind kind;
4691 operand_error_record *curr;
4692 operand_error_record *head = operand_error_report.head;
4693 operand_error_record *record = NULL;
4694
4695 /* No error to report. */
4696 if (head == NULL)
4697 return;
4698
4699 gas_assert (head != NULL && operand_error_report.tail != NULL);
4700
4701 /* Only one error. */
4702 if (head == operand_error_report.tail)
4703 {
4704 DEBUG_TRACE ("single opcode entry with error kind: %s",
4705 operand_mismatch_kind_names[head->detail.kind]);
4706 output_operand_error_record (head, str);
4707 return;
4708 }
4709
4710 /* Find the error kind of the highest severity. */
4711 DEBUG_TRACE ("multiple opcode entres with error kind");
4712 kind = AARCH64_OPDE_NIL;
4713 for (curr = head; curr != NULL; curr = curr->next)
4714 {
4715 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4716 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4717 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4718 kind = curr->detail.kind;
4719 }
4720 gas_assert (kind != AARCH64_OPDE_NIL);
4721
4722 /* Pick up one of errors of KIND to report. */
4723 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4724 for (curr = head; curr != NULL; curr = curr->next)
4725 {
4726 if (curr->detail.kind != kind)
4727 continue;
4728 /* If there are multiple errors, pick up the one with the highest
4729 mismatching operand index. In the case of multiple errors with
4730 the equally highest operand index, pick up the first one or the
4731 first one with non-NULL error message. */
4732 if (curr->detail.index > largest_error_pos
4733 || (curr->detail.index == largest_error_pos && msg == NULL
4734 && curr->detail.error != NULL))
4735 {
4736 largest_error_pos = curr->detail.index;
4737 record = curr;
4738 msg = record->detail.error;
4739 }
4740 }
4741
4742 gas_assert (largest_error_pos != -2 && record != NULL);
4743 DEBUG_TRACE ("Pick up error kind %s to report",
4744 operand_mismatch_kind_names[record->detail.kind]);
4745
4746 /* Output. */
4747 output_operand_error_record (record, str);
4748 }
4749 \f
4750 /* Write an AARCH64 instruction to buf - always little-endian. */
4751 static void
4752 put_aarch64_insn (char *buf, uint32_t insn)
4753 {
4754 unsigned char *where = (unsigned char *) buf;
4755 where[0] = insn;
4756 where[1] = insn >> 8;
4757 where[2] = insn >> 16;
4758 where[3] = insn >> 24;
4759 }
4760
4761 static uint32_t
4762 get_aarch64_insn (char *buf)
4763 {
4764 unsigned char *where = (unsigned char *) buf;
4765 uint32_t result;
4766 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4767 return result;
4768 }
4769
4770 static void
4771 output_inst (struct aarch64_inst *new_inst)
4772 {
4773 char *to = NULL;
4774
4775 to = frag_more (INSN_SIZE);
4776
4777 frag_now->tc_frag_data.recorded = 1;
4778
4779 put_aarch64_insn (to, inst.base.value);
4780
4781 if (inst.reloc.type != BFD_RELOC_UNUSED)
4782 {
4783 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4784 INSN_SIZE, &inst.reloc.exp,
4785 inst.reloc.pc_rel,
4786 inst.reloc.type);
4787 DEBUG_TRACE ("Prepared relocation fix up");
4788 /* Don't check the addend value against the instruction size,
4789 that's the job of our code in md_apply_fix(). */
4790 fixp->fx_no_overflow = 1;
4791 if (new_inst != NULL)
4792 fixp->tc_fix_data.inst = new_inst;
4793 if (aarch64_gas_internal_fixup_p ())
4794 {
4795 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4796 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4797 fixp->fx_addnumber = inst.reloc.flags;
4798 }
4799 }
4800
4801 dwarf2_emit_insn (INSN_SIZE);
4802 }
4803
4804 /* Link together opcodes of the same name. */
4805
4806 struct templates
4807 {
4808 aarch64_opcode *opcode;
4809 struct templates *next;
4810 };
4811
4812 typedef struct templates templates;
4813
4814 static templates *
4815 lookup_mnemonic (const char *start, int len)
4816 {
4817 templates *templ = NULL;
4818
4819 templ = hash_find_n (aarch64_ops_hsh, start, len);
4820 return templ;
4821 }
4822
4823 /* Subroutine of md_assemble, responsible for looking up the primary
4824 opcode from the mnemonic the user wrote. STR points to the
4825 beginning of the mnemonic. */
4826
4827 static templates *
4828 opcode_lookup (char **str)
4829 {
4830 char *end, *base;
4831 const aarch64_cond *cond;
4832 char condname[16];
4833 int len;
4834
4835 /* Scan up to the end of the mnemonic, which must end in white space,
4836 '.', or end of string. */
4837 for (base = end = *str; is_part_of_name(*end); end++)
4838 if (*end == '.')
4839 break;
4840
4841 if (end == base)
4842 return 0;
4843
4844 inst.cond = COND_ALWAYS;
4845
4846 /* Handle a possible condition. */
4847 if (end[0] == '.')
4848 {
4849 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4850 if (cond)
4851 {
4852 inst.cond = cond->value;
4853 *str = end + 3;
4854 }
4855 else
4856 {
4857 *str = end;
4858 return 0;
4859 }
4860 }
4861 else
4862 *str = end;
4863
4864 len = end - base;
4865
4866 if (inst.cond == COND_ALWAYS)
4867 {
4868 /* Look for unaffixed mnemonic. */
4869 return lookup_mnemonic (base, len);
4870 }
4871 else if (len <= 13)
4872 {
4873 /* append ".c" to mnemonic if conditional */
4874 memcpy (condname, base, len);
4875 memcpy (condname + len, ".c", 2);
4876 base = condname;
4877 len += 2;
4878 return lookup_mnemonic (base, len);
4879 }
4880
4881 return NULL;
4882 }
4883
4884 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4885 to a corresponding operand qualifier. */
4886
4887 static inline aarch64_opnd_qualifier_t
4888 vectype_to_qualifier (const struct vector_type_el *vectype)
4889 {
4890 /* Element size in bytes indexed by vector_el_type. */
4891 const unsigned char ele_size[5]
4892 = {1, 2, 4, 8, 16};
4893 const unsigned int ele_base [5] =
4894 {
4895 AARCH64_OPND_QLF_V_8B,
4896 AARCH64_OPND_QLF_V_2H,
4897 AARCH64_OPND_QLF_V_2S,
4898 AARCH64_OPND_QLF_V_1D,
4899 AARCH64_OPND_QLF_V_1Q
4900 };
4901
4902 if (!vectype->defined || vectype->type == NT_invtype)
4903 goto vectype_conversion_fail;
4904
4905 if (vectype->type == NT_zero)
4906 return AARCH64_OPND_QLF_P_Z;
4907 if (vectype->type == NT_merge)
4908 return AARCH64_OPND_QLF_P_M;
4909
4910 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4911
4912 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
4913 /* Vector element register. */
4914 return AARCH64_OPND_QLF_S_B + vectype->type;
4915 else
4916 {
4917 /* Vector register. */
4918 int reg_size = ele_size[vectype->type] * vectype->width;
4919 unsigned offset;
4920 unsigned shift;
4921 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4922 goto vectype_conversion_fail;
4923
4924 /* The conversion is by calculating the offset from the base operand
4925 qualifier for the vector type. The operand qualifiers are regular
4926 enough that the offset can established by shifting the vector width by
4927 a vector-type dependent amount. */
4928 shift = 0;
4929 if (vectype->type == NT_b)
4930 shift = 4;
4931 else if (vectype->type == NT_h || vectype->type == NT_s)
4932 shift = 2;
4933 else if (vectype->type >= NT_d)
4934 shift = 1;
4935 else
4936 gas_assert (0);
4937
4938 offset = ele_base [vectype->type] + (vectype->width >> shift);
4939 gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4940 && offset <= AARCH64_OPND_QLF_V_1Q);
4941 return offset;
4942 }
4943
4944 vectype_conversion_fail:
4945 first_error (_("bad vector arrangement type"));
4946 return AARCH64_OPND_QLF_NIL;
4947 }
4948
4949 /* Process an optional operand that is found omitted from the assembly line.
4950 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4951 instruction's opcode entry while IDX is the index of this omitted operand.
4952 */
4953
4954 static void
4955 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4956 int idx, aarch64_opnd_info *operand)
4957 {
4958 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4959 gas_assert (optional_operand_p (opcode, idx));
4960 gas_assert (!operand->present);
4961
4962 switch (type)
4963 {
4964 case AARCH64_OPND_Rd:
4965 case AARCH64_OPND_Rn:
4966 case AARCH64_OPND_Rm:
4967 case AARCH64_OPND_Rt:
4968 case AARCH64_OPND_Rt2:
4969 case AARCH64_OPND_Rs:
4970 case AARCH64_OPND_Ra:
4971 case AARCH64_OPND_Rt_SYS:
4972 case AARCH64_OPND_Rd_SP:
4973 case AARCH64_OPND_Rn_SP:
4974 case AARCH64_OPND_Fd:
4975 case AARCH64_OPND_Fn:
4976 case AARCH64_OPND_Fm:
4977 case AARCH64_OPND_Fa:
4978 case AARCH64_OPND_Ft:
4979 case AARCH64_OPND_Ft2:
4980 case AARCH64_OPND_Sd:
4981 case AARCH64_OPND_Sn:
4982 case AARCH64_OPND_Sm:
4983 case AARCH64_OPND_Vd:
4984 case AARCH64_OPND_Vn:
4985 case AARCH64_OPND_Vm:
4986 case AARCH64_OPND_VdD1:
4987 case AARCH64_OPND_VnD1:
4988 operand->reg.regno = default_value;
4989 break;
4990
4991 case AARCH64_OPND_Ed:
4992 case AARCH64_OPND_En:
4993 case AARCH64_OPND_Em:
4994 operand->reglane.regno = default_value;
4995 break;
4996
4997 case AARCH64_OPND_IDX:
4998 case AARCH64_OPND_BIT_NUM:
4999 case AARCH64_OPND_IMMR:
5000 case AARCH64_OPND_IMMS:
5001 case AARCH64_OPND_SHLL_IMM:
5002 case AARCH64_OPND_IMM_VLSL:
5003 case AARCH64_OPND_IMM_VLSR:
5004 case AARCH64_OPND_CCMP_IMM:
5005 case AARCH64_OPND_FBITS:
5006 case AARCH64_OPND_UIMM4:
5007 case AARCH64_OPND_UIMM3_OP1:
5008 case AARCH64_OPND_UIMM3_OP2:
5009 case AARCH64_OPND_IMM:
5010 case AARCH64_OPND_WIDTH:
5011 case AARCH64_OPND_UIMM7:
5012 case AARCH64_OPND_NZCV:
5013 case AARCH64_OPND_SVE_PATTERN:
5014 case AARCH64_OPND_SVE_PRFOP:
5015 operand->imm.value = default_value;
5016 break;
5017
5018 case AARCH64_OPND_SVE_PATTERN_SCALED:
5019 operand->imm.value = default_value;
5020 operand->shifter.kind = AARCH64_MOD_MUL;
5021 operand->shifter.amount = 1;
5022 break;
5023
5024 case AARCH64_OPND_EXCEPTION:
5025 inst.reloc.type = BFD_RELOC_UNUSED;
5026 break;
5027
5028 case AARCH64_OPND_BARRIER_ISB:
5029 operand->barrier = aarch64_barrier_options + default_value;
5030
5031 default:
5032 break;
5033 }
5034 }
5035
5036 /* Process the relocation type for move wide instructions.
5037 Return TRUE on success; otherwise return FALSE. */
5038
5039 static bfd_boolean
5040 process_movw_reloc_info (void)
5041 {
5042 int is32;
5043 unsigned shift;
5044
5045 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5046
5047 if (inst.base.opcode->op == OP_MOVK)
5048 switch (inst.reloc.type)
5049 {
5050 case BFD_RELOC_AARCH64_MOVW_G0_S:
5051 case BFD_RELOC_AARCH64_MOVW_G1_S:
5052 case BFD_RELOC_AARCH64_MOVW_G2_S:
5053 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5054 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5055 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5056 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5057 set_syntax_error
5058 (_("the specified relocation type is not allowed for MOVK"));
5059 return FALSE;
5060 default:
5061 break;
5062 }
5063
5064 switch (inst.reloc.type)
5065 {
5066 case BFD_RELOC_AARCH64_MOVW_G0:
5067 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5068 case BFD_RELOC_AARCH64_MOVW_G0_S:
5069 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5070 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5071 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5072 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5073 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5074 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5075 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5076 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5077 shift = 0;
5078 break;
5079 case BFD_RELOC_AARCH64_MOVW_G1:
5080 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5081 case BFD_RELOC_AARCH64_MOVW_G1_S:
5082 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5083 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5084 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5085 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5086 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5087 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5088 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5089 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5090 shift = 16;
5091 break;
5092 case BFD_RELOC_AARCH64_MOVW_G2:
5093 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5094 case BFD_RELOC_AARCH64_MOVW_G2_S:
5095 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5096 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5097 if (is32)
5098 {
5099 set_fatal_syntax_error
5100 (_("the specified relocation type is not allowed for 32-bit "
5101 "register"));
5102 return FALSE;
5103 }
5104 shift = 32;
5105 break;
5106 case BFD_RELOC_AARCH64_MOVW_G3:
5107 if (is32)
5108 {
5109 set_fatal_syntax_error
5110 (_("the specified relocation type is not allowed for 32-bit "
5111 "register"));
5112 return FALSE;
5113 }
5114 shift = 48;
5115 break;
5116 default:
5117 /* More cases should be added when more MOVW-related relocation types
5118 are supported in GAS. */
5119 gas_assert (aarch64_gas_internal_fixup_p ());
5120 /* The shift amount should have already been set by the parser. */
5121 return TRUE;
5122 }
5123 inst.base.operands[1].shifter.amount = shift;
5124 return TRUE;
5125 }
5126
5127 /* A primitive log caculator. */
5128
5129 static inline unsigned int
5130 get_logsz (unsigned int size)
5131 {
5132 const unsigned char ls[16] =
5133 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5134 if (size > 16)
5135 {
5136 gas_assert (0);
5137 return -1;
5138 }
5139 gas_assert (ls[size - 1] != (unsigned char)-1);
5140 return ls[size - 1];
5141 }
5142
5143 /* Determine and return the real reloc type code for an instruction
5144 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5145
5146 static inline bfd_reloc_code_real_type
5147 ldst_lo12_determine_real_reloc_type (void)
5148 {
5149 unsigned logsz;
5150 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5151 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5152
5153 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
5154 {
5155 BFD_RELOC_AARCH64_LDST8_LO12,
5156 BFD_RELOC_AARCH64_LDST16_LO12,
5157 BFD_RELOC_AARCH64_LDST32_LO12,
5158 BFD_RELOC_AARCH64_LDST64_LO12,
5159 BFD_RELOC_AARCH64_LDST128_LO12
5160 },
5161 {
5162 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5163 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5164 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5165 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5166 BFD_RELOC_AARCH64_NONE
5167 },
5168 {
5169 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5170 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5171 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5172 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5173 BFD_RELOC_AARCH64_NONE
5174 }
5175 };
5176
5177 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5178 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5179 || (inst.reloc.type
5180 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
5181 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5182
5183 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5184 opd1_qlf =
5185 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5186 1, opd0_qlf, 0);
5187 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5188
5189 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5190 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5191 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5192 gas_assert (logsz <= 3);
5193 else
5194 gas_assert (logsz <= 4);
5195
5196 /* In reloc.c, these pseudo relocation types should be defined in similar
5197 order as above reloc_ldst_lo12 array. Because the array index calcuation
5198 below relies on this. */
5199 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5200 }
5201
5202 /* Check whether a register list REGINFO is valid. The registers must be
5203 numbered in increasing order (modulo 32), in increments of one or two.
5204
5205 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5206 increments of two.
5207
5208 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5209
5210 static bfd_boolean
5211 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5212 {
5213 uint32_t i, nb_regs, prev_regno, incr;
5214
5215 nb_regs = 1 + (reginfo & 0x3);
5216 reginfo >>= 2;
5217 prev_regno = reginfo & 0x1f;
5218 incr = accept_alternate ? 2 : 1;
5219
5220 for (i = 1; i < nb_regs; ++i)
5221 {
5222 uint32_t curr_regno;
5223 reginfo >>= 5;
5224 curr_regno = reginfo & 0x1f;
5225 if (curr_regno != ((prev_regno + incr) & 0x1f))
5226 return FALSE;
5227 prev_regno = curr_regno;
5228 }
5229
5230 return TRUE;
5231 }
5232
5233 /* Generic instruction operand parser. This does no encoding and no
5234 semantic validation; it merely squirrels values away in the inst
5235 structure. Returns TRUE or FALSE depending on whether the
5236 specified grammar matched. */
5237
5238 static bfd_boolean
5239 parse_operands (char *str, const aarch64_opcode *opcode)
5240 {
5241 int i;
5242 char *backtrack_pos = 0;
5243 const enum aarch64_opnd *operands = opcode->operands;
5244 aarch64_reg_type imm_reg_type;
5245
5246 clear_error ();
5247 skip_whitespace (str);
5248
5249 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5250
5251 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5252 {
5253 int64_t val;
5254 const reg_entry *reg;
5255 int comma_skipped_p = 0;
5256 aarch64_reg_type rtype;
5257 struct vector_type_el vectype;
5258 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5259 aarch64_opnd_info *info = &inst.base.operands[i];
5260 aarch64_reg_type reg_type;
5261
5262 DEBUG_TRACE ("parse operand %d", i);
5263
5264 /* Assign the operand code. */
5265 info->type = operands[i];
5266
5267 if (optional_operand_p (opcode, i))
5268 {
5269 /* Remember where we are in case we need to backtrack. */
5270 gas_assert (!backtrack_pos);
5271 backtrack_pos = str;
5272 }
5273
5274 /* Expect comma between operands; the backtrack mechanizm will take
5275 care of cases of omitted optional operand. */
5276 if (i > 0 && ! skip_past_char (&str, ','))
5277 {
5278 set_syntax_error (_("comma expected between operands"));
5279 goto failure;
5280 }
5281 else
5282 comma_skipped_p = 1;
5283
5284 switch (operands[i])
5285 {
5286 case AARCH64_OPND_Rd:
5287 case AARCH64_OPND_Rn:
5288 case AARCH64_OPND_Rm:
5289 case AARCH64_OPND_Rt:
5290 case AARCH64_OPND_Rt2:
5291 case AARCH64_OPND_Rs:
5292 case AARCH64_OPND_Ra:
5293 case AARCH64_OPND_Rt_SYS:
5294 case AARCH64_OPND_PAIRREG:
5295 po_int_reg_or_fail (REG_TYPE_R_Z);
5296 break;
5297
5298 case AARCH64_OPND_Rd_SP:
5299 case AARCH64_OPND_Rn_SP:
5300 po_int_reg_or_fail (REG_TYPE_R_SP);
5301 break;
5302
5303 case AARCH64_OPND_Rm_EXT:
5304 case AARCH64_OPND_Rm_SFT:
5305 po_misc_or_fail (parse_shifter_operand
5306 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5307 ? SHIFTED_ARITH_IMM
5308 : SHIFTED_LOGIC_IMM)));
5309 if (!info->shifter.operator_present)
5310 {
5311 /* Default to LSL if not present. Libopcodes prefers shifter
5312 kind to be explicit. */
5313 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5314 info->shifter.kind = AARCH64_MOD_LSL;
5315 /* For Rm_EXT, libopcodes will carry out further check on whether
5316 or not stack pointer is used in the instruction (Recall that
5317 "the extend operator is not optional unless at least one of
5318 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5319 }
5320 break;
5321
5322 case AARCH64_OPND_Fd:
5323 case AARCH64_OPND_Fn:
5324 case AARCH64_OPND_Fm:
5325 case AARCH64_OPND_Fa:
5326 case AARCH64_OPND_Ft:
5327 case AARCH64_OPND_Ft2:
5328 case AARCH64_OPND_Sd:
5329 case AARCH64_OPND_Sn:
5330 case AARCH64_OPND_Sm:
5331 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5332 if (val == PARSE_FAIL)
5333 {
5334 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5335 goto failure;
5336 }
5337 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5338
5339 info->reg.regno = val;
5340 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5341 break;
5342
5343 case AARCH64_OPND_SVE_Pd:
5344 case AARCH64_OPND_SVE_Pg3:
5345 case AARCH64_OPND_SVE_Pg4_5:
5346 case AARCH64_OPND_SVE_Pg4_10:
5347 case AARCH64_OPND_SVE_Pg4_16:
5348 case AARCH64_OPND_SVE_Pm:
5349 case AARCH64_OPND_SVE_Pn:
5350 case AARCH64_OPND_SVE_Pt:
5351 reg_type = REG_TYPE_PN;
5352 goto vector_reg;
5353
5354 case AARCH64_OPND_SVE_Za_5:
5355 case AARCH64_OPND_SVE_Za_16:
5356 case AARCH64_OPND_SVE_Zd:
5357 case AARCH64_OPND_SVE_Zm_5:
5358 case AARCH64_OPND_SVE_Zm_16:
5359 case AARCH64_OPND_SVE_Zn:
5360 case AARCH64_OPND_SVE_Zt:
5361 reg_type = REG_TYPE_ZN;
5362 goto vector_reg;
5363
5364 case AARCH64_OPND_Vd:
5365 case AARCH64_OPND_Vn:
5366 case AARCH64_OPND_Vm:
5367 reg_type = REG_TYPE_VN;
5368 vector_reg:
5369 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5370 if (val == PARSE_FAIL)
5371 {
5372 first_error (_(get_reg_expected_msg (reg_type)));
5373 goto failure;
5374 }
5375 if (vectype.defined & NTA_HASINDEX)
5376 goto failure;
5377
5378 info->reg.regno = val;
5379 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5380 && vectype.type == NT_invtype)
5381 /* Unqualified Pn and Zn registers are allowed in certain
5382 contexts. Rely on F_STRICT qualifier checking to catch
5383 invalid uses. */
5384 info->qualifier = AARCH64_OPND_QLF_NIL;
5385 else
5386 {
5387 info->qualifier = vectype_to_qualifier (&vectype);
5388 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5389 goto failure;
5390 }
5391 break;
5392
5393 case AARCH64_OPND_VdD1:
5394 case AARCH64_OPND_VnD1:
5395 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5396 if (val == PARSE_FAIL)
5397 {
5398 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5399 goto failure;
5400 }
5401 if (vectype.type != NT_d || vectype.index != 1)
5402 {
5403 set_fatal_syntax_error
5404 (_("the top half of a 128-bit FP/SIMD register is expected"));
5405 goto failure;
5406 }
5407 info->reg.regno = val;
5408 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5409 here; it is correct for the purpose of encoding/decoding since
5410 only the register number is explicitly encoded in the related
5411 instructions, although this appears a bit hacky. */
5412 info->qualifier = AARCH64_OPND_QLF_S_D;
5413 break;
5414
5415 case AARCH64_OPND_SVE_Zn_INDEX:
5416 reg_type = REG_TYPE_ZN;
5417 goto vector_reg_index;
5418
5419 case AARCH64_OPND_Ed:
5420 case AARCH64_OPND_En:
5421 case AARCH64_OPND_Em:
5422 reg_type = REG_TYPE_VN;
5423 vector_reg_index:
5424 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5425 if (val == PARSE_FAIL)
5426 {
5427 first_error (_(get_reg_expected_msg (reg_type)));
5428 goto failure;
5429 }
5430 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5431 goto failure;
5432
5433 info->reglane.regno = val;
5434 info->reglane.index = vectype.index;
5435 info->qualifier = vectype_to_qualifier (&vectype);
5436 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5437 goto failure;
5438 break;
5439
5440 case AARCH64_OPND_SVE_ZnxN:
5441 case AARCH64_OPND_SVE_ZtxN:
5442 reg_type = REG_TYPE_ZN;
5443 goto vector_reg_list;
5444
5445 case AARCH64_OPND_LVn:
5446 case AARCH64_OPND_LVt:
5447 case AARCH64_OPND_LVt_AL:
5448 case AARCH64_OPND_LEt:
5449 reg_type = REG_TYPE_VN;
5450 vector_reg_list:
5451 if (reg_type == REG_TYPE_ZN
5452 && get_opcode_dependent_value (opcode) == 1
5453 && *str != '{')
5454 {
5455 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5456 if (val == PARSE_FAIL)
5457 {
5458 first_error (_(get_reg_expected_msg (reg_type)));
5459 goto failure;
5460 }
5461 info->reglist.first_regno = val;
5462 info->reglist.num_regs = 1;
5463 }
5464 else
5465 {
5466 val = parse_vector_reg_list (&str, reg_type, &vectype);
5467 if (val == PARSE_FAIL)
5468 goto failure;
5469 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5470 {
5471 set_fatal_syntax_error (_("invalid register list"));
5472 goto failure;
5473 }
5474 info->reglist.first_regno = (val >> 2) & 0x1f;
5475 info->reglist.num_regs = (val & 0x3) + 1;
5476 }
5477 if (operands[i] == AARCH64_OPND_LEt)
5478 {
5479 if (!(vectype.defined & NTA_HASINDEX))
5480 goto failure;
5481 info->reglist.has_index = 1;
5482 info->reglist.index = vectype.index;
5483 }
5484 else
5485 {
5486 if (vectype.defined & NTA_HASINDEX)
5487 goto failure;
5488 if (!(vectype.defined & NTA_HASTYPE))
5489 {
5490 if (reg_type == REG_TYPE_ZN)
5491 set_fatal_syntax_error (_("missing type suffix"));
5492 goto failure;
5493 }
5494 }
5495 info->qualifier = vectype_to_qualifier (&vectype);
5496 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5497 goto failure;
5498 break;
5499
5500 case AARCH64_OPND_Cn:
5501 case AARCH64_OPND_Cm:
5502 po_reg_or_fail (REG_TYPE_CN);
5503 if (val > 15)
5504 {
5505 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5506 goto failure;
5507 }
5508 inst.base.operands[i].reg.regno = val;
5509 break;
5510
5511 case AARCH64_OPND_SHLL_IMM:
5512 case AARCH64_OPND_IMM_VLSR:
5513 po_imm_or_fail (1, 64);
5514 info->imm.value = val;
5515 break;
5516
5517 case AARCH64_OPND_CCMP_IMM:
5518 case AARCH64_OPND_SIMM5:
5519 case AARCH64_OPND_FBITS:
5520 case AARCH64_OPND_UIMM4:
5521 case AARCH64_OPND_UIMM3_OP1:
5522 case AARCH64_OPND_UIMM3_OP2:
5523 case AARCH64_OPND_IMM_VLSL:
5524 case AARCH64_OPND_IMM:
5525 case AARCH64_OPND_WIDTH:
5526 case AARCH64_OPND_SVE_INV_LIMM:
5527 case AARCH64_OPND_SVE_LIMM:
5528 case AARCH64_OPND_SVE_LIMM_MOV:
5529 case AARCH64_OPND_SVE_SHLIMM_PRED:
5530 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5531 case AARCH64_OPND_SVE_SHRIMM_PRED:
5532 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5533 case AARCH64_OPND_SVE_SIMM5:
5534 case AARCH64_OPND_SVE_SIMM5B:
5535 case AARCH64_OPND_SVE_SIMM6:
5536 case AARCH64_OPND_SVE_SIMM8:
5537 case AARCH64_OPND_SVE_UIMM3:
5538 case AARCH64_OPND_SVE_UIMM7:
5539 case AARCH64_OPND_SVE_UIMM8:
5540 case AARCH64_OPND_SVE_UIMM8_53:
5541 po_imm_nc_or_fail ();
5542 info->imm.value = val;
5543 break;
5544
5545 case AARCH64_OPND_SVE_AIMM:
5546 case AARCH64_OPND_SVE_ASIMM:
5547 po_imm_nc_or_fail ();
5548 info->imm.value = val;
5549 skip_whitespace (str);
5550 if (skip_past_comma (&str))
5551 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5552 else
5553 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5554 break;
5555
5556 case AARCH64_OPND_SVE_PATTERN:
5557 po_enum_or_fail (aarch64_sve_pattern_array);
5558 info->imm.value = val;
5559 break;
5560
5561 case AARCH64_OPND_SVE_PATTERN_SCALED:
5562 po_enum_or_fail (aarch64_sve_pattern_array);
5563 info->imm.value = val;
5564 if (skip_past_comma (&str)
5565 && !parse_shift (&str, info, SHIFTED_MUL))
5566 goto failure;
5567 if (!info->shifter.operator_present)
5568 {
5569 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5570 info->shifter.kind = AARCH64_MOD_MUL;
5571 info->shifter.amount = 1;
5572 }
5573 break;
5574
5575 case AARCH64_OPND_SVE_PRFOP:
5576 po_enum_or_fail (aarch64_sve_prfop_array);
5577 info->imm.value = val;
5578 break;
5579
5580 case AARCH64_OPND_UIMM7:
5581 po_imm_or_fail (0, 127);
5582 info->imm.value = val;
5583 break;
5584
5585 case AARCH64_OPND_IDX:
5586 case AARCH64_OPND_BIT_NUM:
5587 case AARCH64_OPND_IMMR:
5588 case AARCH64_OPND_IMMS:
5589 po_imm_or_fail (0, 63);
5590 info->imm.value = val;
5591 break;
5592
5593 case AARCH64_OPND_IMM0:
5594 po_imm_nc_or_fail ();
5595 if (val != 0)
5596 {
5597 set_fatal_syntax_error (_("immediate zero expected"));
5598 goto failure;
5599 }
5600 info->imm.value = 0;
5601 break;
5602
5603 case AARCH64_OPND_FPIMM0:
5604 {
5605 int qfloat;
5606 bfd_boolean res1 = FALSE, res2 = FALSE;
5607 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5608 it is probably not worth the effort to support it. */
5609 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5610 imm_reg_type))
5611 && (error_p ()
5612 || !(res2 = parse_constant_immediate (&str, &val,
5613 imm_reg_type))))
5614 goto failure;
5615 if ((res1 && qfloat == 0) || (res2 && val == 0))
5616 {
5617 info->imm.value = 0;
5618 info->imm.is_fp = 1;
5619 break;
5620 }
5621 set_fatal_syntax_error (_("immediate zero expected"));
5622 goto failure;
5623 }
5624
5625 case AARCH64_OPND_IMM_MOV:
5626 {
5627 char *saved = str;
5628 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5629 reg_name_p (str, REG_TYPE_VN))
5630 goto failure;
5631 str = saved;
5632 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5633 GE_OPT_PREFIX, 1));
5634 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5635 later. fix_mov_imm_insn will try to determine a machine
5636 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5637 message if the immediate cannot be moved by a single
5638 instruction. */
5639 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5640 inst.base.operands[i].skip = 1;
5641 }
5642 break;
5643
5644 case AARCH64_OPND_SIMD_IMM:
5645 case AARCH64_OPND_SIMD_IMM_SFT:
5646 if (! parse_big_immediate (&str, &val, imm_reg_type))
5647 goto failure;
5648 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5649 /* addr_off_p */ 0,
5650 /* need_libopcodes_p */ 1,
5651 /* skip_p */ 1);
5652 /* Parse shift.
5653 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5654 shift, we don't check it here; we leave the checking to
5655 the libopcodes (operand_general_constraint_met_p). By
5656 doing this, we achieve better diagnostics. */
5657 if (skip_past_comma (&str)
5658 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5659 goto failure;
5660 if (!info->shifter.operator_present
5661 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5662 {
5663 /* Default to LSL if not present. Libopcodes prefers shifter
5664 kind to be explicit. */
5665 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5666 info->shifter.kind = AARCH64_MOD_LSL;
5667 }
5668 break;
5669
5670 case AARCH64_OPND_FPIMM:
5671 case AARCH64_OPND_SIMD_FPIMM:
5672 case AARCH64_OPND_SVE_FPIMM8:
5673 {
5674 int qfloat;
5675 bfd_boolean dp_p;
5676
5677 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5678 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5679 || !aarch64_imm_float_p (qfloat))
5680 {
5681 if (!error_p ())
5682 set_fatal_syntax_error (_("invalid floating-point"
5683 " constant"));
5684 goto failure;
5685 }
5686 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5687 inst.base.operands[i].imm.is_fp = 1;
5688 }
5689 break;
5690
5691 case AARCH64_OPND_SVE_I1_HALF_ONE:
5692 case AARCH64_OPND_SVE_I1_HALF_TWO:
5693 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5694 {
5695 int qfloat;
5696 bfd_boolean dp_p;
5697
5698 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5699 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5700 {
5701 if (!error_p ())
5702 set_fatal_syntax_error (_("invalid floating-point"
5703 " constant"));
5704 goto failure;
5705 }
5706 inst.base.operands[i].imm.value = qfloat;
5707 inst.base.operands[i].imm.is_fp = 1;
5708 }
5709 break;
5710
5711 case AARCH64_OPND_LIMM:
5712 po_misc_or_fail (parse_shifter_operand (&str, info,
5713 SHIFTED_LOGIC_IMM));
5714 if (info->shifter.operator_present)
5715 {
5716 set_fatal_syntax_error
5717 (_("shift not allowed for bitmask immediate"));
5718 goto failure;
5719 }
5720 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5721 /* addr_off_p */ 0,
5722 /* need_libopcodes_p */ 1,
5723 /* skip_p */ 1);
5724 break;
5725
5726 case AARCH64_OPND_AIMM:
5727 if (opcode->op == OP_ADD)
5728 /* ADD may have relocation types. */
5729 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5730 SHIFTED_ARITH_IMM));
5731 else
5732 po_misc_or_fail (parse_shifter_operand (&str, info,
5733 SHIFTED_ARITH_IMM));
5734 switch (inst.reloc.type)
5735 {
5736 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5737 info->shifter.amount = 12;
5738 break;
5739 case BFD_RELOC_UNUSED:
5740 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5741 if (info->shifter.kind != AARCH64_MOD_NONE)
5742 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5743 inst.reloc.pc_rel = 0;
5744 break;
5745 default:
5746 break;
5747 }
5748 info->imm.value = 0;
5749 if (!info->shifter.operator_present)
5750 {
5751 /* Default to LSL if not present. Libopcodes prefers shifter
5752 kind to be explicit. */
5753 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5754 info->shifter.kind = AARCH64_MOD_LSL;
5755 }
5756 break;
5757
5758 case AARCH64_OPND_HALF:
5759 {
5760 /* #<imm16> or relocation. */
5761 int internal_fixup_p;
5762 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5763 if (internal_fixup_p)
5764 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5765 skip_whitespace (str);
5766 if (skip_past_comma (&str))
5767 {
5768 /* {, LSL #<shift>} */
5769 if (! aarch64_gas_internal_fixup_p ())
5770 {
5771 set_fatal_syntax_error (_("can't mix relocation modifier "
5772 "with explicit shift"));
5773 goto failure;
5774 }
5775 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5776 }
5777 else
5778 inst.base.operands[i].shifter.amount = 0;
5779 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5780 inst.base.operands[i].imm.value = 0;
5781 if (! process_movw_reloc_info ())
5782 goto failure;
5783 }
5784 break;
5785
5786 case AARCH64_OPND_EXCEPTION:
5787 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5788 imm_reg_type));
5789 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5790 /* addr_off_p */ 0,
5791 /* need_libopcodes_p */ 0,
5792 /* skip_p */ 1);
5793 break;
5794
5795 case AARCH64_OPND_NZCV:
5796 {
5797 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5798 if (nzcv != NULL)
5799 {
5800 str += 4;
5801 info->imm.value = nzcv->value;
5802 break;
5803 }
5804 po_imm_or_fail (0, 15);
5805 info->imm.value = val;
5806 }
5807 break;
5808
5809 case AARCH64_OPND_COND:
5810 case AARCH64_OPND_COND1:
5811 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5812 str += 2;
5813 if (info->cond == NULL)
5814 {
5815 set_syntax_error (_("invalid condition"));
5816 goto failure;
5817 }
5818 else if (operands[i] == AARCH64_OPND_COND1
5819 && (info->cond->value & 0xe) == 0xe)
5820 {
5821 /* Not allow AL or NV. */
5822 set_default_error ();
5823 goto failure;
5824 }
5825 break;
5826
5827 case AARCH64_OPND_ADDR_ADRP:
5828 po_misc_or_fail (parse_adrp (&str));
5829 /* Clear the value as operand needs to be relocated. */
5830 info->imm.value = 0;
5831 break;
5832
5833 case AARCH64_OPND_ADDR_PCREL14:
5834 case AARCH64_OPND_ADDR_PCREL19:
5835 case AARCH64_OPND_ADDR_PCREL21:
5836 case AARCH64_OPND_ADDR_PCREL26:
5837 po_misc_or_fail (parse_address (&str, info));
5838 if (!info->addr.pcrel)
5839 {
5840 set_syntax_error (_("invalid pc-relative address"));
5841 goto failure;
5842 }
5843 if (inst.gen_lit_pool
5844 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5845 {
5846 /* Only permit "=value" in the literal load instructions.
5847 The literal will be generated by programmer_friendly_fixup. */
5848 set_syntax_error (_("invalid use of \"=immediate\""));
5849 goto failure;
5850 }
5851 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5852 {
5853 set_syntax_error (_("unrecognized relocation suffix"));
5854 goto failure;
5855 }
5856 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5857 {
5858 info->imm.value = inst.reloc.exp.X_add_number;
5859 inst.reloc.type = BFD_RELOC_UNUSED;
5860 }
5861 else
5862 {
5863 info->imm.value = 0;
5864 if (inst.reloc.type == BFD_RELOC_UNUSED)
5865 switch (opcode->iclass)
5866 {
5867 case compbranch:
5868 case condbranch:
5869 /* e.g. CBZ or B.COND */
5870 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5871 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5872 break;
5873 case testbranch:
5874 /* e.g. TBZ */
5875 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5876 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5877 break;
5878 case branch_imm:
5879 /* e.g. B or BL */
5880 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5881 inst.reloc.type =
5882 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5883 : BFD_RELOC_AARCH64_JUMP26;
5884 break;
5885 case loadlit:
5886 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5887 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5888 break;
5889 case pcreladdr:
5890 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5891 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5892 break;
5893 default:
5894 gas_assert (0);
5895 abort ();
5896 }
5897 inst.reloc.pc_rel = 1;
5898 }
5899 break;
5900
5901 case AARCH64_OPND_ADDR_SIMPLE:
5902 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5903 {
5904 /* [<Xn|SP>{, #<simm>}] */
5905 char *start = str;
5906 /* First use the normal address-parsing routines, to get
5907 the usual syntax errors. */
5908 po_misc_or_fail (parse_address (&str, info));
5909 if (info->addr.pcrel || info->addr.offset.is_reg
5910 || !info->addr.preind || info->addr.postind
5911 || info->addr.writeback)
5912 {
5913 set_syntax_error (_("invalid addressing mode"));
5914 goto failure;
5915 }
5916
5917 /* Then retry, matching the specific syntax of these addresses. */
5918 str = start;
5919 po_char_or_fail ('[');
5920 po_reg_or_fail (REG_TYPE_R64_SP);
5921 /* Accept optional ", #0". */
5922 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5923 && skip_past_char (&str, ','))
5924 {
5925 skip_past_char (&str, '#');
5926 if (! skip_past_char (&str, '0'))
5927 {
5928 set_fatal_syntax_error
5929 (_("the optional immediate offset can only be 0"));
5930 goto failure;
5931 }
5932 }
5933 po_char_or_fail (']');
5934 break;
5935 }
5936
5937 case AARCH64_OPND_ADDR_REGOFF:
5938 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5939 po_misc_or_fail (parse_address (&str, info));
5940 regoff_addr:
5941 if (info->addr.pcrel || !info->addr.offset.is_reg
5942 || !info->addr.preind || info->addr.postind
5943 || info->addr.writeback)
5944 {
5945 set_syntax_error (_("invalid addressing mode"));
5946 goto failure;
5947 }
5948 if (!info->shifter.operator_present)
5949 {
5950 /* Default to LSL if not present. Libopcodes prefers shifter
5951 kind to be explicit. */
5952 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5953 info->shifter.kind = AARCH64_MOD_LSL;
5954 }
5955 /* Qualifier to be deduced by libopcodes. */
5956 break;
5957
5958 case AARCH64_OPND_ADDR_SIMM7:
5959 po_misc_or_fail (parse_address (&str, info));
5960 if (info->addr.pcrel || info->addr.offset.is_reg
5961 || (!info->addr.preind && !info->addr.postind))
5962 {
5963 set_syntax_error (_("invalid addressing mode"));
5964 goto failure;
5965 }
5966 if (inst.reloc.type != BFD_RELOC_UNUSED)
5967 {
5968 set_syntax_error (_("relocation not allowed"));
5969 goto failure;
5970 }
5971 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5972 /* addr_off_p */ 1,
5973 /* need_libopcodes_p */ 1,
5974 /* skip_p */ 0);
5975 break;
5976
5977 case AARCH64_OPND_ADDR_SIMM9:
5978 case AARCH64_OPND_ADDR_SIMM9_2:
5979 po_misc_or_fail (parse_address (&str, info));
5980 if (info->addr.pcrel || info->addr.offset.is_reg
5981 || (!info->addr.preind && !info->addr.postind)
5982 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5983 && info->addr.writeback))
5984 {
5985 set_syntax_error (_("invalid addressing mode"));
5986 goto failure;
5987 }
5988 if (inst.reloc.type != BFD_RELOC_UNUSED)
5989 {
5990 set_syntax_error (_("relocation not allowed"));
5991 goto failure;
5992 }
5993 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5994 /* addr_off_p */ 1,
5995 /* need_libopcodes_p */ 1,
5996 /* skip_p */ 0);
5997 break;
5998
5999 case AARCH64_OPND_ADDR_UIMM12:
6000 po_misc_or_fail (parse_address (&str, info));
6001 if (info->addr.pcrel || info->addr.offset.is_reg
6002 || !info->addr.preind || info->addr.writeback)
6003 {
6004 set_syntax_error (_("invalid addressing mode"));
6005 goto failure;
6006 }
6007 if (inst.reloc.type == BFD_RELOC_UNUSED)
6008 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6009 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6010 || (inst.reloc.type
6011 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6012 || (inst.reloc.type
6013 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
6014 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6015 /* Leave qualifier to be determined by libopcodes. */
6016 break;
6017
6018 case AARCH64_OPND_SIMD_ADDR_POST:
6019 /* [<Xn|SP>], <Xm|#<amount>> */
6020 po_misc_or_fail (parse_address (&str, info));
6021 if (!info->addr.postind || !info->addr.writeback)
6022 {
6023 set_syntax_error (_("invalid addressing mode"));
6024 goto failure;
6025 }
6026 if (!info->addr.offset.is_reg)
6027 {
6028 if (inst.reloc.exp.X_op == O_constant)
6029 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6030 else
6031 {
6032 set_fatal_syntax_error
6033 (_("writeback value should be an immediate constant"));
6034 goto failure;
6035 }
6036 }
6037 /* No qualifier. */
6038 break;
6039
6040 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6041 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6042 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6043 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6044 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6045 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6046 case AARCH64_OPND_SVE_ADDR_RI_U6:
6047 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6048 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6049 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6050 /* [X<n>{, #imm, MUL VL}]
6051 [X<n>{, #imm}]
6052 but recognizing SVE registers. */
6053 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6054 &offset_qualifier));
6055 if (base_qualifier != AARCH64_OPND_QLF_X)
6056 {
6057 set_syntax_error (_("invalid addressing mode"));
6058 goto failure;
6059 }
6060 sve_regimm:
6061 if (info->addr.pcrel || info->addr.offset.is_reg
6062 || !info->addr.preind || info->addr.writeback)
6063 {
6064 set_syntax_error (_("invalid addressing mode"));
6065 goto failure;
6066 }
6067 if (inst.reloc.type != BFD_RELOC_UNUSED
6068 || inst.reloc.exp.X_op != O_constant)
6069 {
6070 /* Make sure this has priority over
6071 "invalid addressing mode". */
6072 set_fatal_syntax_error (_("constant offset required"));
6073 goto failure;
6074 }
6075 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6076 break;
6077
6078 case AARCH64_OPND_SVE_ADDR_RR:
6079 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6080 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6081 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6082 case AARCH64_OPND_SVE_ADDR_RX:
6083 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6084 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6085 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6086 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6087 but recognizing SVE registers. */
6088 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6089 &offset_qualifier));
6090 if (base_qualifier != AARCH64_OPND_QLF_X
6091 || offset_qualifier != AARCH64_OPND_QLF_X)
6092 {
6093 set_syntax_error (_("invalid addressing mode"));
6094 goto failure;
6095 }
6096 goto regoff_addr;
6097
6098 case AARCH64_OPND_SVE_ADDR_RZ:
6099 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6100 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6101 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6102 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6103 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6104 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6105 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6106 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6107 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6108 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6109 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6110 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6111 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6112 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6113 &offset_qualifier));
6114 if (base_qualifier != AARCH64_OPND_QLF_X
6115 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6116 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6117 {
6118 set_syntax_error (_("invalid addressing mode"));
6119 goto failure;
6120 }
6121 info->qualifier = offset_qualifier;
6122 goto regoff_addr;
6123
6124 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6125 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6126 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6127 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6128 /* [Z<n>.<T>{, #imm}] */
6129 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6130 &offset_qualifier));
6131 if (base_qualifier != AARCH64_OPND_QLF_S_S
6132 && base_qualifier != AARCH64_OPND_QLF_S_D)
6133 {
6134 set_syntax_error (_("invalid addressing mode"));
6135 goto failure;
6136 }
6137 info->qualifier = base_qualifier;
6138 goto sve_regimm;
6139
6140 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6141 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6142 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6143 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6144 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6145
6146 We don't reject:
6147
6148 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6149
6150 here since we get better error messages by leaving it to
6151 the qualifier checking routines. */
6152 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6153 &offset_qualifier));
6154 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6155 && base_qualifier != AARCH64_OPND_QLF_S_D)
6156 || offset_qualifier != base_qualifier)
6157 {
6158 set_syntax_error (_("invalid addressing mode"));
6159 goto failure;
6160 }
6161 info->qualifier = base_qualifier;
6162 goto regoff_addr;
6163
6164 case AARCH64_OPND_SYSREG:
6165 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
6166 == PARSE_FAIL)
6167 {
6168 set_syntax_error (_("unknown or missing system register name"));
6169 goto failure;
6170 }
6171 inst.base.operands[i].sysreg = val;
6172 break;
6173
6174 case AARCH64_OPND_PSTATEFIELD:
6175 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
6176 == PARSE_FAIL)
6177 {
6178 set_syntax_error (_("unknown or missing PSTATE field name"));
6179 goto failure;
6180 }
6181 inst.base.operands[i].pstatefield = val;
6182 break;
6183
6184 case AARCH64_OPND_SYSREG_IC:
6185 inst.base.operands[i].sysins_op =
6186 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6187 goto sys_reg_ins;
6188 case AARCH64_OPND_SYSREG_DC:
6189 inst.base.operands[i].sysins_op =
6190 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6191 goto sys_reg_ins;
6192 case AARCH64_OPND_SYSREG_AT:
6193 inst.base.operands[i].sysins_op =
6194 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6195 goto sys_reg_ins;
6196 case AARCH64_OPND_SYSREG_TLBI:
6197 inst.base.operands[i].sysins_op =
6198 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6199 sys_reg_ins:
6200 if (inst.base.operands[i].sysins_op == NULL)
6201 {
6202 set_fatal_syntax_error ( _("unknown or missing operation name"));
6203 goto failure;
6204 }
6205 break;
6206
6207 case AARCH64_OPND_BARRIER:
6208 case AARCH64_OPND_BARRIER_ISB:
6209 val = parse_barrier (&str);
6210 if (val != PARSE_FAIL
6211 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6212 {
6213 /* ISB only accepts options name 'sy'. */
6214 set_syntax_error
6215 (_("the specified option is not accepted in ISB"));
6216 /* Turn off backtrack as this optional operand is present. */
6217 backtrack_pos = 0;
6218 goto failure;
6219 }
6220 /* This is an extension to accept a 0..15 immediate. */
6221 if (val == PARSE_FAIL)
6222 po_imm_or_fail (0, 15);
6223 info->barrier = aarch64_barrier_options + val;
6224 break;
6225
6226 case AARCH64_OPND_PRFOP:
6227 val = parse_pldop (&str);
6228 /* This is an extension to accept a 0..31 immediate. */
6229 if (val == PARSE_FAIL)
6230 po_imm_or_fail (0, 31);
6231 inst.base.operands[i].prfop = aarch64_prfops + val;
6232 break;
6233
6234 case AARCH64_OPND_BARRIER_PSB:
6235 val = parse_barrier_psb (&str, &(info->hint_option));
6236 if (val == PARSE_FAIL)
6237 goto failure;
6238 break;
6239
6240 default:
6241 as_fatal (_("unhandled operand code %d"), operands[i]);
6242 }
6243
6244 /* If we get here, this operand was successfully parsed. */
6245 inst.base.operands[i].present = 1;
6246 continue;
6247
6248 failure:
6249 /* The parse routine should already have set the error, but in case
6250 not, set a default one here. */
6251 if (! error_p ())
6252 set_default_error ();
6253
6254 if (! backtrack_pos)
6255 goto parse_operands_return;
6256
6257 {
6258 /* We reach here because this operand is marked as optional, and
6259 either no operand was supplied or the operand was supplied but it
6260 was syntactically incorrect. In the latter case we report an
6261 error. In the former case we perform a few more checks before
6262 dropping through to the code to insert the default operand. */
6263
6264 char *tmp = backtrack_pos;
6265 char endchar = END_OF_INSN;
6266
6267 if (i != (aarch64_num_of_operands (opcode) - 1))
6268 endchar = ',';
6269 skip_past_char (&tmp, ',');
6270
6271 if (*tmp != endchar)
6272 /* The user has supplied an operand in the wrong format. */
6273 goto parse_operands_return;
6274
6275 /* Make sure there is not a comma before the optional operand.
6276 For example the fifth operand of 'sys' is optional:
6277
6278 sys #0,c0,c0,#0, <--- wrong
6279 sys #0,c0,c0,#0 <--- correct. */
6280 if (comma_skipped_p && i && endchar == END_OF_INSN)
6281 {
6282 set_fatal_syntax_error
6283 (_("unexpected comma before the omitted optional operand"));
6284 goto parse_operands_return;
6285 }
6286 }
6287
6288 /* Reaching here means we are dealing with an optional operand that is
6289 omitted from the assembly line. */
6290 gas_assert (optional_operand_p (opcode, i));
6291 info->present = 0;
6292 process_omitted_operand (operands[i], opcode, i, info);
6293
6294 /* Try again, skipping the optional operand at backtrack_pos. */
6295 str = backtrack_pos;
6296 backtrack_pos = 0;
6297
6298 /* Clear any error record after the omitted optional operand has been
6299 successfully handled. */
6300 clear_error ();
6301 }
6302
6303 /* Check if we have parsed all the operands. */
6304 if (*str != '\0' && ! error_p ())
6305 {
6306 /* Set I to the index of the last present operand; this is
6307 for the purpose of diagnostics. */
6308 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6309 ;
6310 set_fatal_syntax_error
6311 (_("unexpected characters following instruction"));
6312 }
6313
6314 parse_operands_return:
6315
6316 if (error_p ())
6317 {
6318 DEBUG_TRACE ("parsing FAIL: %s - %s",
6319 operand_mismatch_kind_names[get_error_kind ()],
6320 get_error_message ());
6321 /* Record the operand error properly; this is useful when there
6322 are multiple instruction templates for a mnemonic name, so that
6323 later on, we can select the error that most closely describes
6324 the problem. */
6325 record_operand_error (opcode, i, get_error_kind (),
6326 get_error_message ());
6327 return FALSE;
6328 }
6329 else
6330 {
6331 DEBUG_TRACE ("parsing SUCCESS");
6332 return TRUE;
6333 }
6334 }
6335
6336 /* It does some fix-up to provide some programmer friendly feature while
6337 keeping the libopcodes happy, i.e. libopcodes only accepts
6338 the preferred architectural syntax.
6339 Return FALSE if there is any failure; otherwise return TRUE. */
6340
6341 static bfd_boolean
6342 programmer_friendly_fixup (aarch64_instruction *instr)
6343 {
6344 aarch64_inst *base = &instr->base;
6345 const aarch64_opcode *opcode = base->opcode;
6346 enum aarch64_op op = opcode->op;
6347 aarch64_opnd_info *operands = base->operands;
6348
6349 DEBUG_TRACE ("enter");
6350
6351 switch (opcode->iclass)
6352 {
6353 case testbranch:
6354 /* TBNZ Xn|Wn, #uimm6, label
6355 Test and Branch Not Zero: conditionally jumps to label if bit number
6356 uimm6 in register Xn is not zero. The bit number implies the width of
6357 the register, which may be written and should be disassembled as Wn if
6358 uimm is less than 32. */
6359 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6360 {
6361 if (operands[1].imm.value >= 32)
6362 {
6363 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6364 0, 31);
6365 return FALSE;
6366 }
6367 operands[0].qualifier = AARCH64_OPND_QLF_X;
6368 }
6369 break;
6370 case loadlit:
6371 /* LDR Wt, label | =value
6372 As a convenience assemblers will typically permit the notation
6373 "=value" in conjunction with the pc-relative literal load instructions
6374 to automatically place an immediate value or symbolic address in a
6375 nearby literal pool and generate a hidden label which references it.
6376 ISREG has been set to 0 in the case of =value. */
6377 if (instr->gen_lit_pool
6378 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6379 {
6380 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6381 if (op == OP_LDRSW_LIT)
6382 size = 4;
6383 if (instr->reloc.exp.X_op != O_constant
6384 && instr->reloc.exp.X_op != O_big
6385 && instr->reloc.exp.X_op != O_symbol)
6386 {
6387 record_operand_error (opcode, 1,
6388 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6389 _("constant expression expected"));
6390 return FALSE;
6391 }
6392 if (! add_to_lit_pool (&instr->reloc.exp, size))
6393 {
6394 record_operand_error (opcode, 1,
6395 AARCH64_OPDE_OTHER_ERROR,
6396 _("literal pool insertion failed"));
6397 return FALSE;
6398 }
6399 }
6400 break;
6401 case log_shift:
6402 case bitfield:
6403 /* UXT[BHW] Wd, Wn
6404 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6405 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6406 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6407 A programmer-friendly assembler should accept a destination Xd in
6408 place of Wd, however that is not the preferred form for disassembly.
6409 */
6410 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6411 && operands[1].qualifier == AARCH64_OPND_QLF_W
6412 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6413 operands[0].qualifier = AARCH64_OPND_QLF_W;
6414 break;
6415
6416 case addsub_ext:
6417 {
6418 /* In the 64-bit form, the final register operand is written as Wm
6419 for all but the (possibly omitted) UXTX/LSL and SXTX
6420 operators.
6421 As a programmer-friendly assembler, we accept e.g.
6422 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6423 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6424 int idx = aarch64_operand_index (opcode->operands,
6425 AARCH64_OPND_Rm_EXT);
6426 gas_assert (idx == 1 || idx == 2);
6427 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6428 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6429 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6430 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6431 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6432 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6433 }
6434 break;
6435
6436 default:
6437 break;
6438 }
6439
6440 DEBUG_TRACE ("exit with SUCCESS");
6441 return TRUE;
6442 }
6443
6444 /* Check for loads and stores that will cause unpredictable behavior. */
6445
6446 static void
6447 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6448 {
6449 aarch64_inst *base = &instr->base;
6450 const aarch64_opcode *opcode = base->opcode;
6451 const aarch64_opnd_info *opnds = base->operands;
6452 switch (opcode->iclass)
6453 {
6454 case ldst_pos:
6455 case ldst_imm9:
6456 case ldst_unscaled:
6457 case ldst_unpriv:
6458 /* Loading/storing the base register is unpredictable if writeback. */
6459 if ((aarch64_get_operand_class (opnds[0].type)
6460 == AARCH64_OPND_CLASS_INT_REG)
6461 && opnds[0].reg.regno == opnds[1].addr.base_regno
6462 && opnds[1].addr.base_regno != REG_SP
6463 && opnds[1].addr.writeback)
6464 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6465 break;
6466 case ldstpair_off:
6467 case ldstnapair_offs:
6468 case ldstpair_indexed:
6469 /* Loading/storing the base register is unpredictable if writeback. */
6470 if ((aarch64_get_operand_class (opnds[0].type)
6471 == AARCH64_OPND_CLASS_INT_REG)
6472 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6473 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6474 && opnds[2].addr.base_regno != REG_SP
6475 && opnds[2].addr.writeback)
6476 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6477 /* Load operations must load different registers. */
6478 if ((opcode->opcode & (1 << 22))
6479 && opnds[0].reg.regno == opnds[1].reg.regno)
6480 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6481 break;
6482 default:
6483 break;
6484 }
6485 }
6486
6487 /* A wrapper function to interface with libopcodes on encoding and
6488 record the error message if there is any.
6489
6490 Return TRUE on success; otherwise return FALSE. */
6491
6492 static bfd_boolean
6493 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6494 aarch64_insn *code)
6495 {
6496 aarch64_operand_error error_info;
6497 error_info.kind = AARCH64_OPDE_NIL;
6498 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
6499 return TRUE;
6500 else
6501 {
6502 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6503 record_operand_error_info (opcode, &error_info);
6504 return FALSE;
6505 }
6506 }
6507
6508 #ifdef DEBUG_AARCH64
6509 static inline void
6510 dump_opcode_operands (const aarch64_opcode *opcode)
6511 {
6512 int i = 0;
6513 while (opcode->operands[i] != AARCH64_OPND_NIL)
6514 {
6515 aarch64_verbose ("\t\t opnd%d: %s", i,
6516 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6517 ? aarch64_get_operand_name (opcode->operands[i])
6518 : aarch64_get_operand_desc (opcode->operands[i]));
6519 ++i;
6520 }
6521 }
6522 #endif /* DEBUG_AARCH64 */
6523
6524 /* This is the guts of the machine-dependent assembler. STR points to a
6525 machine dependent instruction. This function is supposed to emit
6526 the frags/bytes it assembles to. */
6527
6528 void
6529 md_assemble (char *str)
6530 {
6531 char *p = str;
6532 templates *template;
6533 aarch64_opcode *opcode;
6534 aarch64_inst *inst_base;
6535 unsigned saved_cond;
6536
6537 /* Align the previous label if needed. */
6538 if (last_label_seen != NULL)
6539 {
6540 symbol_set_frag (last_label_seen, frag_now);
6541 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6542 S_SET_SEGMENT (last_label_seen, now_seg);
6543 }
6544
6545 inst.reloc.type = BFD_RELOC_UNUSED;
6546
6547 DEBUG_TRACE ("\n\n");
6548 DEBUG_TRACE ("==============================");
6549 DEBUG_TRACE ("Enter md_assemble with %s", str);
6550
6551 template = opcode_lookup (&p);
6552 if (!template)
6553 {
6554 /* It wasn't an instruction, but it might be a register alias of
6555 the form alias .req reg directive. */
6556 if (!create_register_alias (str, p))
6557 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6558 str);
6559 return;
6560 }
6561
6562 skip_whitespace (p);
6563 if (*p == ',')
6564 {
6565 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6566 get_mnemonic_name (str), str);
6567 return;
6568 }
6569
6570 init_operand_error_report ();
6571
6572 /* Sections are assumed to start aligned. In executable section, there is no
6573 MAP_DATA symbol pending. So we only align the address during
6574 MAP_DATA --> MAP_INSN transition.
6575 For other sections, this is not guaranteed. */
6576 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6577 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6578 frag_align_code (2, 0);
6579
6580 saved_cond = inst.cond;
6581 reset_aarch64_instruction (&inst);
6582 inst.cond = saved_cond;
6583
6584 /* Iterate through all opcode entries with the same mnemonic name. */
6585 do
6586 {
6587 opcode = template->opcode;
6588
6589 DEBUG_TRACE ("opcode %s found", opcode->name);
6590 #ifdef DEBUG_AARCH64
6591 if (debug_dump)
6592 dump_opcode_operands (opcode);
6593 #endif /* DEBUG_AARCH64 */
6594
6595 mapping_state (MAP_INSN);
6596
6597 inst_base = &inst.base;
6598 inst_base->opcode = opcode;
6599
6600 /* Truly conditionally executed instructions, e.g. b.cond. */
6601 if (opcode->flags & F_COND)
6602 {
6603 gas_assert (inst.cond != COND_ALWAYS);
6604 inst_base->cond = get_cond_from_value (inst.cond);
6605 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6606 }
6607 else if (inst.cond != COND_ALWAYS)
6608 {
6609 /* It shouldn't arrive here, where the assembly looks like a
6610 conditional instruction but the found opcode is unconditional. */
6611 gas_assert (0);
6612 continue;
6613 }
6614
6615 if (parse_operands (p, opcode)
6616 && programmer_friendly_fixup (&inst)
6617 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6618 {
6619 /* Check that this instruction is supported for this CPU. */
6620 if (!opcode->avariant
6621 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6622 {
6623 as_bad (_("selected processor does not support `%s'"), str);
6624 return;
6625 }
6626
6627 warn_unpredictable_ldst (&inst, str);
6628
6629 if (inst.reloc.type == BFD_RELOC_UNUSED
6630 || !inst.reloc.need_libopcodes_p)
6631 output_inst (NULL);
6632 else
6633 {
6634 /* If there is relocation generated for the instruction,
6635 store the instruction information for the future fix-up. */
6636 struct aarch64_inst *copy;
6637 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6638 copy = XNEW (struct aarch64_inst);
6639 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6640 output_inst (copy);
6641 }
6642 return;
6643 }
6644
6645 template = template->next;
6646 if (template != NULL)
6647 {
6648 reset_aarch64_instruction (&inst);
6649 inst.cond = saved_cond;
6650 }
6651 }
6652 while (template != NULL);
6653
6654 /* Issue the error messages if any. */
6655 output_operand_error_report (str);
6656 }
6657
6658 /* Various frobbings of labels and their addresses. */
6659
6660 void
6661 aarch64_start_line_hook (void)
6662 {
6663 last_label_seen = NULL;
6664 }
6665
6666 void
6667 aarch64_frob_label (symbolS * sym)
6668 {
6669 last_label_seen = sym;
6670
6671 dwarf2_emit_label (sym);
6672 }
6673
6674 int
6675 aarch64_data_in_code (void)
6676 {
6677 if (!strncmp (input_line_pointer + 1, "data:", 5))
6678 {
6679 *input_line_pointer = '/';
6680 input_line_pointer += 5;
6681 *input_line_pointer = 0;
6682 return 1;
6683 }
6684
6685 return 0;
6686 }
6687
6688 char *
6689 aarch64_canonicalize_symbol_name (char *name)
6690 {
6691 int len;
6692
6693 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6694 *(name + len - 5) = 0;
6695
6696 return name;
6697 }
6698 \f
6699 /* Table of all register names defined by default. The user can
6700 define additional names with .req. Note that all register names
6701 should appear in both upper and lowercase variants. Some registers
6702 also have mixed-case names. */
6703
6704 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6705 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6706 #define REGSET16(p,t) \
6707 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6708 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6709 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6710 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6711 #define REGSET31(p,t) \
6712 REGSET16(p, t), \
6713 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6714 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6715 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6716 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6717 #define REGSET(p,t) \
6718 REGSET31(p,t), REGNUM(p,31,t)
6719
6720 /* These go into aarch64_reg_hsh hash-table. */
6721 static const reg_entry reg_names[] = {
6722 /* Integer registers. */
6723 REGSET31 (x, R_64), REGSET31 (X, R_64),
6724 REGSET31 (w, R_32), REGSET31 (W, R_32),
6725
6726 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6727 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6728
6729 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6730 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6731
6732 /* Coprocessor register numbers. */
6733 REGSET (c, CN), REGSET (C, CN),
6734
6735 /* Floating-point single precision registers. */
6736 REGSET (s, FP_S), REGSET (S, FP_S),
6737
6738 /* Floating-point double precision registers. */
6739 REGSET (d, FP_D), REGSET (D, FP_D),
6740
6741 /* Floating-point half precision registers. */
6742 REGSET (h, FP_H), REGSET (H, FP_H),
6743
6744 /* Floating-point byte precision registers. */
6745 REGSET (b, FP_B), REGSET (B, FP_B),
6746
6747 /* Floating-point quad precision registers. */
6748 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6749
6750 /* FP/SIMD registers. */
6751 REGSET (v, VN), REGSET (V, VN),
6752
6753 /* SVE vector registers. */
6754 REGSET (z, ZN), REGSET (Z, ZN),
6755
6756 /* SVE predicate registers. */
6757 REGSET16 (p, PN), REGSET16 (P, PN)
6758 };
6759
6760 #undef REGDEF
6761 #undef REGNUM
6762 #undef REGSET16
6763 #undef REGSET31
6764 #undef REGSET
6765
6766 #define N 1
6767 #define n 0
6768 #define Z 1
6769 #define z 0
6770 #define C 1
6771 #define c 0
6772 #define V 1
6773 #define v 0
6774 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6775 static const asm_nzcv nzcv_names[] = {
6776 {"nzcv", B (n, z, c, v)},
6777 {"nzcV", B (n, z, c, V)},
6778 {"nzCv", B (n, z, C, v)},
6779 {"nzCV", B (n, z, C, V)},
6780 {"nZcv", B (n, Z, c, v)},
6781 {"nZcV", B (n, Z, c, V)},
6782 {"nZCv", B (n, Z, C, v)},
6783 {"nZCV", B (n, Z, C, V)},
6784 {"Nzcv", B (N, z, c, v)},
6785 {"NzcV", B (N, z, c, V)},
6786 {"NzCv", B (N, z, C, v)},
6787 {"NzCV", B (N, z, C, V)},
6788 {"NZcv", B (N, Z, c, v)},
6789 {"NZcV", B (N, Z, c, V)},
6790 {"NZCv", B (N, Z, C, v)},
6791 {"NZCV", B (N, Z, C, V)}
6792 };
6793
6794 #undef N
6795 #undef n
6796 #undef Z
6797 #undef z
6798 #undef C
6799 #undef c
6800 #undef V
6801 #undef v
6802 #undef B
6803 \f
6804 /* MD interface: bits in the object file. */
6805
6806 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6807 for use in the a.out file, and stores them in the array pointed to by buf.
6808 This knows about the endian-ness of the target machine and does
6809 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6810 2 (short) and 4 (long) Floating numbers are put out as a series of
6811 LITTLENUMS (shorts, here at least). */
6812
6813 void
6814 md_number_to_chars (char *buf, valueT val, int n)
6815 {
6816 if (target_big_endian)
6817 number_to_chars_bigendian (buf, val, n);
6818 else
6819 number_to_chars_littleendian (buf, val, n);
6820 }
6821
6822 /* MD interface: Sections. */
6823
6824 /* Estimate the size of a frag before relaxing. Assume everything fits in
6825 4 bytes. */
6826
6827 int
6828 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6829 {
6830 fragp->fr_var = 4;
6831 return 4;
6832 }
6833
6834 /* Round up a section size to the appropriate boundary. */
6835
6836 valueT
6837 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6838 {
6839 return size;
6840 }
6841
6842 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6843 of an rs_align_code fragment.
6844
6845 Here we fill the frag with the appropriate info for padding the
6846 output stream. The resulting frag will consist of a fixed (fr_fix)
6847 and of a repeating (fr_var) part.
6848
6849 The fixed content is always emitted before the repeating content and
6850 these two parts are used as follows in constructing the output:
6851 - the fixed part will be used to align to a valid instruction word
6852 boundary, in case that we start at a misaligned address; as no
6853 executable instruction can live at the misaligned location, we
6854 simply fill with zeros;
6855 - the variable part will be used to cover the remaining padding and
6856 we fill using the AArch64 NOP instruction.
6857
6858 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6859 enough storage space for up to 3 bytes for padding the back to a valid
6860 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6861
6862 void
6863 aarch64_handle_align (fragS * fragP)
6864 {
6865 /* NOP = d503201f */
6866 /* AArch64 instructions are always little-endian. */
6867 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6868
6869 int bytes, fix, noop_size;
6870 char *p;
6871
6872 if (fragP->fr_type != rs_align_code)
6873 return;
6874
6875 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6876 p = fragP->fr_literal + fragP->fr_fix;
6877
6878 #ifdef OBJ_ELF
6879 gas_assert (fragP->tc_frag_data.recorded);
6880 #endif
6881
6882 noop_size = sizeof (aarch64_noop);
6883
6884 fix = bytes & (noop_size - 1);
6885 if (fix)
6886 {
6887 #ifdef OBJ_ELF
6888 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6889 #endif
6890 memset (p, 0, fix);
6891 p += fix;
6892 fragP->fr_fix += fix;
6893 }
6894
6895 if (noop_size)
6896 memcpy (p, aarch64_noop, noop_size);
6897 fragP->fr_var = noop_size;
6898 }
6899
6900 /* Perform target specific initialisation of a frag.
6901 Note - despite the name this initialisation is not done when the frag
6902 is created, but only when its type is assigned. A frag can be created
6903 and used a long time before its type is set, so beware of assuming that
6904 this initialisationis performed first. */
6905
6906 #ifndef OBJ_ELF
6907 void
6908 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6909 int max_chars ATTRIBUTE_UNUSED)
6910 {
6911 }
6912
6913 #else /* OBJ_ELF is defined. */
6914 void
6915 aarch64_init_frag (fragS * fragP, int max_chars)
6916 {
6917 /* Record a mapping symbol for alignment frags. We will delete this
6918 later if the alignment ends up empty. */
6919 if (!fragP->tc_frag_data.recorded)
6920 fragP->tc_frag_data.recorded = 1;
6921
6922 switch (fragP->fr_type)
6923 {
6924 case rs_align_test:
6925 case rs_fill:
6926 mapping_state_2 (MAP_DATA, max_chars);
6927 break;
6928 case rs_align:
6929 /* PR 20364: We can get alignment frags in code sections,
6930 so do not just assume that we should use the MAP_DATA state. */
6931 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
6932 break;
6933 case rs_align_code:
6934 mapping_state_2 (MAP_INSN, max_chars);
6935 break;
6936 default:
6937 break;
6938 }
6939 }
6940 \f
6941 /* Initialize the DWARF-2 unwind information for this procedure. */
6942
6943 void
6944 tc_aarch64_frame_initial_instructions (void)
6945 {
6946 cfi_add_CFA_def_cfa (REG_SP, 0);
6947 }
6948 #endif /* OBJ_ELF */
6949
6950 /* Convert REGNAME to a DWARF-2 register number. */
6951
6952 int
6953 tc_aarch64_regname_to_dw2regnum (char *regname)
6954 {
6955 const reg_entry *reg = parse_reg (&regname);
6956 if (reg == NULL)
6957 return -1;
6958
6959 switch (reg->type)
6960 {
6961 case REG_TYPE_SP_32:
6962 case REG_TYPE_SP_64:
6963 case REG_TYPE_R_32:
6964 case REG_TYPE_R_64:
6965 return reg->number;
6966
6967 case REG_TYPE_FP_B:
6968 case REG_TYPE_FP_H:
6969 case REG_TYPE_FP_S:
6970 case REG_TYPE_FP_D:
6971 case REG_TYPE_FP_Q:
6972 return reg->number + 64;
6973
6974 default:
6975 break;
6976 }
6977 return -1;
6978 }
6979
6980 /* Implement DWARF2_ADDR_SIZE. */
6981
6982 int
6983 aarch64_dwarf2_addr_size (void)
6984 {
6985 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6986 if (ilp32_p)
6987 return 4;
6988 #endif
6989 return bfd_arch_bits_per_address (stdoutput) / 8;
6990 }
6991
6992 /* MD interface: Symbol and relocation handling. */
6993
6994 /* Return the address within the segment that a PC-relative fixup is
6995 relative to. For AArch64 PC-relative fixups applied to instructions
6996 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6997
6998 long
6999 md_pcrel_from_section (fixS * fixP, segT seg)
7000 {
7001 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7002
7003 /* If this is pc-relative and we are going to emit a relocation
7004 then we just want to put out any pipeline compensation that the linker
7005 will need. Otherwise we want to use the calculated base. */
7006 if (fixP->fx_pcrel
7007 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7008 || aarch64_force_relocation (fixP)))
7009 base = 0;
7010
7011 /* AArch64 should be consistent for all pc-relative relocations. */
7012 return base + AARCH64_PCREL_OFFSET;
7013 }
7014
7015 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7016 Otherwise we have no need to default values of symbols. */
7017
7018 symbolS *
7019 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7020 {
7021 #ifdef OBJ_ELF
7022 if (name[0] == '_' && name[1] == 'G'
7023 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7024 {
7025 if (!GOT_symbol)
7026 {
7027 if (symbol_find (name))
7028 as_bad (_("GOT already in the symbol table"));
7029
7030 GOT_symbol = symbol_new (name, undefined_section,
7031 (valueT) 0, &zero_address_frag);
7032 }
7033
7034 return GOT_symbol;
7035 }
7036 #endif
7037
7038 return 0;
7039 }
7040
7041 /* Return non-zero if the indicated VALUE has overflowed the maximum
7042 range expressible by a unsigned number with the indicated number of
7043 BITS. */
7044
7045 static bfd_boolean
7046 unsigned_overflow (valueT value, unsigned bits)
7047 {
7048 valueT lim;
7049 if (bits >= sizeof (valueT) * 8)
7050 return FALSE;
7051 lim = (valueT) 1 << bits;
7052 return (value >= lim);
7053 }
7054
7055
7056 /* Return non-zero if the indicated VALUE has overflowed the maximum
7057 range expressible by an signed number with the indicated number of
7058 BITS. */
7059
7060 static bfd_boolean
7061 signed_overflow (offsetT value, unsigned bits)
7062 {
7063 offsetT lim;
7064 if (bits >= sizeof (offsetT) * 8)
7065 return FALSE;
7066 lim = (offsetT) 1 << (bits - 1);
7067 return (value < -lim || value >= lim);
7068 }
7069
7070 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7071 unsigned immediate offset load/store instruction, try to encode it as
7072 an unscaled, 9-bit, signed immediate offset load/store instruction.
7073 Return TRUE if it is successful; otherwise return FALSE.
7074
7075 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7076 in response to the standard LDR/STR mnemonics when the immediate offset is
7077 unambiguous, i.e. when it is negative or unaligned. */
7078
7079 static bfd_boolean
7080 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7081 {
7082 int idx;
7083 enum aarch64_op new_op;
7084 const aarch64_opcode *new_opcode;
7085
7086 gas_assert (instr->opcode->iclass == ldst_pos);
7087
7088 switch (instr->opcode->op)
7089 {
7090 case OP_LDRB_POS:new_op = OP_LDURB; break;
7091 case OP_STRB_POS: new_op = OP_STURB; break;
7092 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7093 case OP_LDRH_POS: new_op = OP_LDURH; break;
7094 case OP_STRH_POS: new_op = OP_STURH; break;
7095 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7096 case OP_LDR_POS: new_op = OP_LDUR; break;
7097 case OP_STR_POS: new_op = OP_STUR; break;
7098 case OP_LDRF_POS: new_op = OP_LDURV; break;
7099 case OP_STRF_POS: new_op = OP_STURV; break;
7100 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7101 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7102 default: new_op = OP_NIL; break;
7103 }
7104
7105 if (new_op == OP_NIL)
7106 return FALSE;
7107
7108 new_opcode = aarch64_get_opcode (new_op);
7109 gas_assert (new_opcode != NULL);
7110
7111 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7112 instr->opcode->op, new_opcode->op);
7113
7114 aarch64_replace_opcode (instr, new_opcode);
7115
7116 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7117 qualifier matching may fail because the out-of-date qualifier will
7118 prevent the operand being updated with a new and correct qualifier. */
7119 idx = aarch64_operand_index (instr->opcode->operands,
7120 AARCH64_OPND_ADDR_SIMM9);
7121 gas_assert (idx == 1);
7122 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7123
7124 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7125
7126 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
7127 return FALSE;
7128
7129 return TRUE;
7130 }
7131
7132 /* Called by fix_insn to fix a MOV immediate alias instruction.
7133
7134 Operand for a generic move immediate instruction, which is an alias
7135 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7136 a 32-bit/64-bit immediate value into general register. An assembler error
7137 shall result if the immediate cannot be created by a single one of these
7138 instructions. If there is a choice, then to ensure reversability an
7139 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7140
7141 static void
7142 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7143 {
7144 const aarch64_opcode *opcode;
7145
7146 /* Need to check if the destination is SP/ZR. The check has to be done
7147 before any aarch64_replace_opcode. */
7148 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7149 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7150
7151 instr->operands[1].imm.value = value;
7152 instr->operands[1].skip = 0;
7153
7154 if (try_mov_wide_p)
7155 {
7156 /* Try the MOVZ alias. */
7157 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7158 aarch64_replace_opcode (instr, opcode);
7159 if (aarch64_opcode_encode (instr->opcode, instr,
7160 &instr->value, NULL, NULL))
7161 {
7162 put_aarch64_insn (buf, instr->value);
7163 return;
7164 }
7165 /* Try the MOVK alias. */
7166 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7167 aarch64_replace_opcode (instr, opcode);
7168 if (aarch64_opcode_encode (instr->opcode, instr,
7169 &instr->value, NULL, NULL))
7170 {
7171 put_aarch64_insn (buf, instr->value);
7172 return;
7173 }
7174 }
7175
7176 if (try_mov_bitmask_p)
7177 {
7178 /* Try the ORR alias. */
7179 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7180 aarch64_replace_opcode (instr, opcode);
7181 if (aarch64_opcode_encode (instr->opcode, instr,
7182 &instr->value, NULL, NULL))
7183 {
7184 put_aarch64_insn (buf, instr->value);
7185 return;
7186 }
7187 }
7188
7189 as_bad_where (fixP->fx_file, fixP->fx_line,
7190 _("immediate cannot be moved by a single instruction"));
7191 }
7192
7193 /* An instruction operand which is immediate related may have symbol used
7194 in the assembly, e.g.
7195
7196 mov w0, u32
7197 .set u32, 0x00ffff00
7198
7199 At the time when the assembly instruction is parsed, a referenced symbol,
7200 like 'u32' in the above example may not have been seen; a fixS is created
7201 in such a case and is handled here after symbols have been resolved.
7202 Instruction is fixed up with VALUE using the information in *FIXP plus
7203 extra information in FLAGS.
7204
7205 This function is called by md_apply_fix to fix up instructions that need
7206 a fix-up described above but does not involve any linker-time relocation. */
7207
7208 static void
7209 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7210 {
7211 int idx;
7212 uint32_t insn;
7213 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7214 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7215 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7216
7217 if (new_inst)
7218 {
7219 /* Now the instruction is about to be fixed-up, so the operand that
7220 was previously marked as 'ignored' needs to be unmarked in order
7221 to get the encoding done properly. */
7222 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7223 new_inst->operands[idx].skip = 0;
7224 }
7225
7226 gas_assert (opnd != AARCH64_OPND_NIL);
7227
7228 switch (opnd)
7229 {
7230 case AARCH64_OPND_EXCEPTION:
7231 if (unsigned_overflow (value, 16))
7232 as_bad_where (fixP->fx_file, fixP->fx_line,
7233 _("immediate out of range"));
7234 insn = get_aarch64_insn (buf);
7235 insn |= encode_svc_imm (value);
7236 put_aarch64_insn (buf, insn);
7237 break;
7238
7239 case AARCH64_OPND_AIMM:
7240 /* ADD or SUB with immediate.
7241 NOTE this assumes we come here with a add/sub shifted reg encoding
7242 3 322|2222|2 2 2 21111 111111
7243 1 098|7654|3 2 1 09876 543210 98765 43210
7244 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7245 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7246 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7247 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7248 ->
7249 3 322|2222|2 2 221111111111
7250 1 098|7654|3 2 109876543210 98765 43210
7251 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7252 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7253 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7254 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7255 Fields sf Rn Rd are already set. */
7256 insn = get_aarch64_insn (buf);
7257 if (value < 0)
7258 {
7259 /* Add <-> sub. */
7260 insn = reencode_addsub_switch_add_sub (insn);
7261 value = -value;
7262 }
7263
7264 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7265 && unsigned_overflow (value, 12))
7266 {
7267 /* Try to shift the value by 12 to make it fit. */
7268 if (((value >> 12) << 12) == value
7269 && ! unsigned_overflow (value, 12 + 12))
7270 {
7271 value >>= 12;
7272 insn |= encode_addsub_imm_shift_amount (1);
7273 }
7274 }
7275
7276 if (unsigned_overflow (value, 12))
7277 as_bad_where (fixP->fx_file, fixP->fx_line,
7278 _("immediate out of range"));
7279
7280 insn |= encode_addsub_imm (value);
7281
7282 put_aarch64_insn (buf, insn);
7283 break;
7284
7285 case AARCH64_OPND_SIMD_IMM:
7286 case AARCH64_OPND_SIMD_IMM_SFT:
7287 case AARCH64_OPND_LIMM:
7288 /* Bit mask immediate. */
7289 gas_assert (new_inst != NULL);
7290 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7291 new_inst->operands[idx].imm.value = value;
7292 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7293 &new_inst->value, NULL, NULL))
7294 put_aarch64_insn (buf, new_inst->value);
7295 else
7296 as_bad_where (fixP->fx_file, fixP->fx_line,
7297 _("invalid immediate"));
7298 break;
7299
7300 case AARCH64_OPND_HALF:
7301 /* 16-bit unsigned immediate. */
7302 if (unsigned_overflow (value, 16))
7303 as_bad_where (fixP->fx_file, fixP->fx_line,
7304 _("immediate out of range"));
7305 insn = get_aarch64_insn (buf);
7306 insn |= encode_movw_imm (value & 0xffff);
7307 put_aarch64_insn (buf, insn);
7308 break;
7309
7310 case AARCH64_OPND_IMM_MOV:
7311 /* Operand for a generic move immediate instruction, which is
7312 an alias instruction that generates a single MOVZ, MOVN or ORR
7313 instruction to loads a 32-bit/64-bit immediate value into general
7314 register. An assembler error shall result if the immediate cannot be
7315 created by a single one of these instructions. If there is a choice,
7316 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7317 and MOVZ or MOVN to ORR. */
7318 gas_assert (new_inst != NULL);
7319 fix_mov_imm_insn (fixP, buf, new_inst, value);
7320 break;
7321
7322 case AARCH64_OPND_ADDR_SIMM7:
7323 case AARCH64_OPND_ADDR_SIMM9:
7324 case AARCH64_OPND_ADDR_SIMM9_2:
7325 case AARCH64_OPND_ADDR_UIMM12:
7326 /* Immediate offset in an address. */
7327 insn = get_aarch64_insn (buf);
7328
7329 gas_assert (new_inst != NULL && new_inst->value == insn);
7330 gas_assert (new_inst->opcode->operands[1] == opnd
7331 || new_inst->opcode->operands[2] == opnd);
7332
7333 /* Get the index of the address operand. */
7334 if (new_inst->opcode->operands[1] == opnd)
7335 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7336 idx = 1;
7337 else
7338 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7339 idx = 2;
7340
7341 /* Update the resolved offset value. */
7342 new_inst->operands[idx].addr.offset.imm = value;
7343
7344 /* Encode/fix-up. */
7345 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7346 &new_inst->value, NULL, NULL))
7347 {
7348 put_aarch64_insn (buf, new_inst->value);
7349 break;
7350 }
7351 else if (new_inst->opcode->iclass == ldst_pos
7352 && try_to_encode_as_unscaled_ldst (new_inst))
7353 {
7354 put_aarch64_insn (buf, new_inst->value);
7355 break;
7356 }
7357
7358 as_bad_where (fixP->fx_file, fixP->fx_line,
7359 _("immediate offset out of range"));
7360 break;
7361
7362 default:
7363 gas_assert (0);
7364 as_fatal (_("unhandled operand code %d"), opnd);
7365 }
7366 }
7367
7368 /* Apply a fixup (fixP) to segment data, once it has been determined
7369 by our caller that we have all the info we need to fix it up.
7370
7371 Parameter valP is the pointer to the value of the bits. */
7372
7373 void
7374 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7375 {
7376 offsetT value = *valP;
7377 uint32_t insn;
7378 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7379 int scale;
7380 unsigned flags = fixP->fx_addnumber;
7381
7382 DEBUG_TRACE ("\n\n");
7383 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7384 DEBUG_TRACE ("Enter md_apply_fix");
7385
7386 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7387
7388 /* Note whether this will delete the relocation. */
7389
7390 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7391 fixP->fx_done = 1;
7392
7393 /* Process the relocations. */
7394 switch (fixP->fx_r_type)
7395 {
7396 case BFD_RELOC_NONE:
7397 /* This will need to go in the object file. */
7398 fixP->fx_done = 0;
7399 break;
7400
7401 case BFD_RELOC_8:
7402 case BFD_RELOC_8_PCREL:
7403 if (fixP->fx_done || !seg->use_rela_p)
7404 md_number_to_chars (buf, value, 1);
7405 break;
7406
7407 case BFD_RELOC_16:
7408 case BFD_RELOC_16_PCREL:
7409 if (fixP->fx_done || !seg->use_rela_p)
7410 md_number_to_chars (buf, value, 2);
7411 break;
7412
7413 case BFD_RELOC_32:
7414 case BFD_RELOC_32_PCREL:
7415 if (fixP->fx_done || !seg->use_rela_p)
7416 md_number_to_chars (buf, value, 4);
7417 break;
7418
7419 case BFD_RELOC_64:
7420 case BFD_RELOC_64_PCREL:
7421 if (fixP->fx_done || !seg->use_rela_p)
7422 md_number_to_chars (buf, value, 8);
7423 break;
7424
7425 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7426 /* We claim that these fixups have been processed here, even if
7427 in fact we generate an error because we do not have a reloc
7428 for them, so tc_gen_reloc() will reject them. */
7429 fixP->fx_done = 1;
7430 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7431 {
7432 as_bad_where (fixP->fx_file, fixP->fx_line,
7433 _("undefined symbol %s used as an immediate value"),
7434 S_GET_NAME (fixP->fx_addsy));
7435 goto apply_fix_return;
7436 }
7437 fix_insn (fixP, flags, value);
7438 break;
7439
7440 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7441 if (fixP->fx_done || !seg->use_rela_p)
7442 {
7443 if (value & 3)
7444 as_bad_where (fixP->fx_file, fixP->fx_line,
7445 _("pc-relative load offset not word aligned"));
7446 if (signed_overflow (value, 21))
7447 as_bad_where (fixP->fx_file, fixP->fx_line,
7448 _("pc-relative load offset out of range"));
7449 insn = get_aarch64_insn (buf);
7450 insn |= encode_ld_lit_ofs_19 (value >> 2);
7451 put_aarch64_insn (buf, insn);
7452 }
7453 break;
7454
7455 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7456 if (fixP->fx_done || !seg->use_rela_p)
7457 {
7458 if (signed_overflow (value, 21))
7459 as_bad_where (fixP->fx_file, fixP->fx_line,
7460 _("pc-relative address offset out of range"));
7461 insn = get_aarch64_insn (buf);
7462 insn |= encode_adr_imm (value);
7463 put_aarch64_insn (buf, insn);
7464 }
7465 break;
7466
7467 case BFD_RELOC_AARCH64_BRANCH19:
7468 if (fixP->fx_done || !seg->use_rela_p)
7469 {
7470 if (value & 3)
7471 as_bad_where (fixP->fx_file, fixP->fx_line,
7472 _("conditional branch target not word aligned"));
7473 if (signed_overflow (value, 21))
7474 as_bad_where (fixP->fx_file, fixP->fx_line,
7475 _("conditional branch out of range"));
7476 insn = get_aarch64_insn (buf);
7477 insn |= encode_cond_branch_ofs_19 (value >> 2);
7478 put_aarch64_insn (buf, insn);
7479 }
7480 break;
7481
7482 case BFD_RELOC_AARCH64_TSTBR14:
7483 if (fixP->fx_done || !seg->use_rela_p)
7484 {
7485 if (value & 3)
7486 as_bad_where (fixP->fx_file, fixP->fx_line,
7487 _("conditional branch target not word aligned"));
7488 if (signed_overflow (value, 16))
7489 as_bad_where (fixP->fx_file, fixP->fx_line,
7490 _("conditional branch out of range"));
7491 insn = get_aarch64_insn (buf);
7492 insn |= encode_tst_branch_ofs_14 (value >> 2);
7493 put_aarch64_insn (buf, insn);
7494 }
7495 break;
7496
7497 case BFD_RELOC_AARCH64_CALL26:
7498 case BFD_RELOC_AARCH64_JUMP26:
7499 if (fixP->fx_done || !seg->use_rela_p)
7500 {
7501 if (value & 3)
7502 as_bad_where (fixP->fx_file, fixP->fx_line,
7503 _("branch target not word aligned"));
7504 if (signed_overflow (value, 28))
7505 as_bad_where (fixP->fx_file, fixP->fx_line,
7506 _("branch out of range"));
7507 insn = get_aarch64_insn (buf);
7508 insn |= encode_branch_ofs_26 (value >> 2);
7509 put_aarch64_insn (buf, insn);
7510 }
7511 break;
7512
7513 case BFD_RELOC_AARCH64_MOVW_G0:
7514 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7515 case BFD_RELOC_AARCH64_MOVW_G0_S:
7516 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7517 scale = 0;
7518 goto movw_common;
7519 case BFD_RELOC_AARCH64_MOVW_G1:
7520 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7521 case BFD_RELOC_AARCH64_MOVW_G1_S:
7522 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7523 scale = 16;
7524 goto movw_common;
7525 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7526 scale = 0;
7527 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7528 /* Should always be exported to object file, see
7529 aarch64_force_relocation(). */
7530 gas_assert (!fixP->fx_done);
7531 gas_assert (seg->use_rela_p);
7532 goto movw_common;
7533 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7534 scale = 16;
7535 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7536 /* Should always be exported to object file, see
7537 aarch64_force_relocation(). */
7538 gas_assert (!fixP->fx_done);
7539 gas_assert (seg->use_rela_p);
7540 goto movw_common;
7541 case BFD_RELOC_AARCH64_MOVW_G2:
7542 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7543 case BFD_RELOC_AARCH64_MOVW_G2_S:
7544 scale = 32;
7545 goto movw_common;
7546 case BFD_RELOC_AARCH64_MOVW_G3:
7547 scale = 48;
7548 movw_common:
7549 if (fixP->fx_done || !seg->use_rela_p)
7550 {
7551 insn = get_aarch64_insn (buf);
7552
7553 if (!fixP->fx_done)
7554 {
7555 /* REL signed addend must fit in 16 bits */
7556 if (signed_overflow (value, 16))
7557 as_bad_where (fixP->fx_file, fixP->fx_line,
7558 _("offset out of range"));
7559 }
7560 else
7561 {
7562 /* Check for overflow and scale. */
7563 switch (fixP->fx_r_type)
7564 {
7565 case BFD_RELOC_AARCH64_MOVW_G0:
7566 case BFD_RELOC_AARCH64_MOVW_G1:
7567 case BFD_RELOC_AARCH64_MOVW_G2:
7568 case BFD_RELOC_AARCH64_MOVW_G3:
7569 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7570 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7571 if (unsigned_overflow (value, scale + 16))
7572 as_bad_where (fixP->fx_file, fixP->fx_line,
7573 _("unsigned value out of range"));
7574 break;
7575 case BFD_RELOC_AARCH64_MOVW_G0_S:
7576 case BFD_RELOC_AARCH64_MOVW_G1_S:
7577 case BFD_RELOC_AARCH64_MOVW_G2_S:
7578 /* NOTE: We can only come here with movz or movn. */
7579 if (signed_overflow (value, scale + 16))
7580 as_bad_where (fixP->fx_file, fixP->fx_line,
7581 _("signed value out of range"));
7582 if (value < 0)
7583 {
7584 /* Force use of MOVN. */
7585 value = ~value;
7586 insn = reencode_movzn_to_movn (insn);
7587 }
7588 else
7589 {
7590 /* Force use of MOVZ. */
7591 insn = reencode_movzn_to_movz (insn);
7592 }
7593 break;
7594 default:
7595 /* Unchecked relocations. */
7596 break;
7597 }
7598 value >>= scale;
7599 }
7600
7601 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7602 insn |= encode_movw_imm (value & 0xffff);
7603
7604 put_aarch64_insn (buf, insn);
7605 }
7606 break;
7607
7608 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7609 fixP->fx_r_type = (ilp32_p
7610 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7611 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7612 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7613 /* Should always be exported to object file, see
7614 aarch64_force_relocation(). */
7615 gas_assert (!fixP->fx_done);
7616 gas_assert (seg->use_rela_p);
7617 break;
7618
7619 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7620 fixP->fx_r_type = (ilp32_p
7621 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7622 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
7623 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7624 /* Should always be exported to object file, see
7625 aarch64_force_relocation(). */
7626 gas_assert (!fixP->fx_done);
7627 gas_assert (seg->use_rela_p);
7628 break;
7629
7630 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7631 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7632 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7633 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7634 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7635 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7636 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7637 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7638 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7639 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7640 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7641 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7642 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7643 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7644 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7645 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7646 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7647 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7648 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7649 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7650 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7651 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7652 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7653 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7654 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7655 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7656 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7657 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7658 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7659 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7660 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7661 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7662 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7663 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7664 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7665 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7666 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7667 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7668 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7669 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7670 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7671 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7672 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7673 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7674 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7675 /* Should always be exported to object file, see
7676 aarch64_force_relocation(). */
7677 gas_assert (!fixP->fx_done);
7678 gas_assert (seg->use_rela_p);
7679 break;
7680
7681 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7682 /* Should always be exported to object file, see
7683 aarch64_force_relocation(). */
7684 fixP->fx_r_type = (ilp32_p
7685 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7686 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7687 gas_assert (!fixP->fx_done);
7688 gas_assert (seg->use_rela_p);
7689 break;
7690
7691 case BFD_RELOC_AARCH64_ADD_LO12:
7692 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7693 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7694 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7695 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7696 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7697 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7698 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7699 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7700 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7701 case BFD_RELOC_AARCH64_LDST128_LO12:
7702 case BFD_RELOC_AARCH64_LDST16_LO12:
7703 case BFD_RELOC_AARCH64_LDST32_LO12:
7704 case BFD_RELOC_AARCH64_LDST64_LO12:
7705 case BFD_RELOC_AARCH64_LDST8_LO12:
7706 /* Should always be exported to object file, see
7707 aarch64_force_relocation(). */
7708 gas_assert (!fixP->fx_done);
7709 gas_assert (seg->use_rela_p);
7710 break;
7711
7712 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7713 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7714 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7715 break;
7716
7717 case BFD_RELOC_UNUSED:
7718 /* An error will already have been reported. */
7719 break;
7720
7721 default:
7722 as_bad_where (fixP->fx_file, fixP->fx_line,
7723 _("unexpected %s fixup"),
7724 bfd_get_reloc_code_name (fixP->fx_r_type));
7725 break;
7726 }
7727
7728 apply_fix_return:
7729 /* Free the allocated the struct aarch64_inst.
7730 N.B. currently there are very limited number of fix-up types actually use
7731 this field, so the impact on the performance should be minimal . */
7732 if (fixP->tc_fix_data.inst != NULL)
7733 free (fixP->tc_fix_data.inst);
7734
7735 return;
7736 }
7737
7738 /* Translate internal representation of relocation info to BFD target
7739 format. */
7740
7741 arelent *
7742 tc_gen_reloc (asection * section, fixS * fixp)
7743 {
7744 arelent *reloc;
7745 bfd_reloc_code_real_type code;
7746
7747 reloc = XNEW (arelent);
7748
7749 reloc->sym_ptr_ptr = XNEW (asymbol *);
7750 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7751 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7752
7753 if (fixp->fx_pcrel)
7754 {
7755 if (section->use_rela_p)
7756 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7757 else
7758 fixp->fx_offset = reloc->address;
7759 }
7760 reloc->addend = fixp->fx_offset;
7761
7762 code = fixp->fx_r_type;
7763 switch (code)
7764 {
7765 case BFD_RELOC_16:
7766 if (fixp->fx_pcrel)
7767 code = BFD_RELOC_16_PCREL;
7768 break;
7769
7770 case BFD_RELOC_32:
7771 if (fixp->fx_pcrel)
7772 code = BFD_RELOC_32_PCREL;
7773 break;
7774
7775 case BFD_RELOC_64:
7776 if (fixp->fx_pcrel)
7777 code = BFD_RELOC_64_PCREL;
7778 break;
7779
7780 default:
7781 break;
7782 }
7783
7784 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7785 if (reloc->howto == NULL)
7786 {
7787 as_bad_where (fixp->fx_file, fixp->fx_line,
7788 _
7789 ("cannot represent %s relocation in this object file format"),
7790 bfd_get_reloc_code_name (code));
7791 return NULL;
7792 }
7793
7794 return reloc;
7795 }
7796
7797 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7798
7799 void
7800 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7801 {
7802 bfd_reloc_code_real_type type;
7803 int pcrel = 0;
7804
7805 /* Pick a reloc.
7806 FIXME: @@ Should look at CPU word size. */
7807 switch (size)
7808 {
7809 case 1:
7810 type = BFD_RELOC_8;
7811 break;
7812 case 2:
7813 type = BFD_RELOC_16;
7814 break;
7815 case 4:
7816 type = BFD_RELOC_32;
7817 break;
7818 case 8:
7819 type = BFD_RELOC_64;
7820 break;
7821 default:
7822 as_bad (_("cannot do %u-byte relocation"), size);
7823 type = BFD_RELOC_UNUSED;
7824 break;
7825 }
7826
7827 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7828 }
7829
7830 int
7831 aarch64_force_relocation (struct fix *fixp)
7832 {
7833 switch (fixp->fx_r_type)
7834 {
7835 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7836 /* Perform these "immediate" internal relocations
7837 even if the symbol is extern or weak. */
7838 return 0;
7839
7840 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7841 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7842 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7843 /* Pseudo relocs that need to be fixed up according to
7844 ilp32_p. */
7845 return 0;
7846
7847 case BFD_RELOC_AARCH64_ADD_LO12:
7848 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7849 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7850 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7851 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7852 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7853 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7854 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7855 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7856 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7857 case BFD_RELOC_AARCH64_LDST128_LO12:
7858 case BFD_RELOC_AARCH64_LDST16_LO12:
7859 case BFD_RELOC_AARCH64_LDST32_LO12:
7860 case BFD_RELOC_AARCH64_LDST64_LO12:
7861 case BFD_RELOC_AARCH64_LDST8_LO12:
7862 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7863 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7864 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7865 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7866 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7867 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7868 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7869 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7870 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7871 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7872 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7873 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7874 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7875 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7876 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7877 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7878 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7879 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7880 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7881 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7882 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7883 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7884 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7885 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7886 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7887 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7888 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7889 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7890 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7891 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7892 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7893 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7894 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7895 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7896 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7897 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7898 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7899 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7900 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7901 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7902 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7903 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7904 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7905 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7906 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7907 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7908 /* Always leave these relocations for the linker. */
7909 return 1;
7910
7911 default:
7912 break;
7913 }
7914
7915 return generic_force_reloc (fixp);
7916 }
7917
7918 #ifdef OBJ_ELF
7919
7920 const char *
7921 elf64_aarch64_target_format (void)
7922 {
7923 if (strcmp (TARGET_OS, "cloudabi") == 0)
7924 {
7925 /* FIXME: What to do for ilp32_p ? */
7926 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
7927 }
7928 if (target_big_endian)
7929 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7930 else
7931 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7932 }
7933
7934 void
7935 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7936 {
7937 elf_frob_symbol (symp, puntp);
7938 }
7939 #endif
7940
7941 /* MD interface: Finalization. */
7942
7943 /* A good place to do this, although this was probably not intended
7944 for this kind of use. We need to dump the literal pool before
7945 references are made to a null symbol pointer. */
7946
7947 void
7948 aarch64_cleanup (void)
7949 {
7950 literal_pool *pool;
7951
7952 for (pool = list_of_pools; pool; pool = pool->next)
7953 {
7954 /* Put it at the end of the relevant section. */
7955 subseg_set (pool->section, pool->sub_section);
7956 s_ltorg (0);
7957 }
7958 }
7959
7960 #ifdef OBJ_ELF
7961 /* Remove any excess mapping symbols generated for alignment frags in
7962 SEC. We may have created a mapping symbol before a zero byte
7963 alignment; remove it if there's a mapping symbol after the
7964 alignment. */
7965 static void
7966 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7967 void *dummy ATTRIBUTE_UNUSED)
7968 {
7969 segment_info_type *seginfo = seg_info (sec);
7970 fragS *fragp;
7971
7972 if (seginfo == NULL || seginfo->frchainP == NULL)
7973 return;
7974
7975 for (fragp = seginfo->frchainP->frch_root;
7976 fragp != NULL; fragp = fragp->fr_next)
7977 {
7978 symbolS *sym = fragp->tc_frag_data.last_map;
7979 fragS *next = fragp->fr_next;
7980
7981 /* Variable-sized frags have been converted to fixed size by
7982 this point. But if this was variable-sized to start with,
7983 there will be a fixed-size frag after it. So don't handle
7984 next == NULL. */
7985 if (sym == NULL || next == NULL)
7986 continue;
7987
7988 if (S_GET_VALUE (sym) < next->fr_address)
7989 /* Not at the end of this frag. */
7990 continue;
7991 know (S_GET_VALUE (sym) == next->fr_address);
7992
7993 do
7994 {
7995 if (next->tc_frag_data.first_map != NULL)
7996 {
7997 /* Next frag starts with a mapping symbol. Discard this
7998 one. */
7999 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8000 break;
8001 }
8002
8003 if (next->fr_next == NULL)
8004 {
8005 /* This mapping symbol is at the end of the section. Discard
8006 it. */
8007 know (next->fr_fix == 0 && next->fr_var == 0);
8008 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8009 break;
8010 }
8011
8012 /* As long as we have empty frags without any mapping symbols,
8013 keep looking. */
8014 /* If the next frag is non-empty and does not start with a
8015 mapping symbol, then this mapping symbol is required. */
8016 if (next->fr_address != next->fr_next->fr_address)
8017 break;
8018
8019 next = next->fr_next;
8020 }
8021 while (next != NULL);
8022 }
8023 }
8024 #endif
8025
8026 /* Adjust the symbol table. */
8027
8028 void
8029 aarch64_adjust_symtab (void)
8030 {
8031 #ifdef OBJ_ELF
8032 /* Remove any overlapping mapping symbols generated by alignment frags. */
8033 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8034 /* Now do generic ELF adjustments. */
8035 elf_adjust_symtab ();
8036 #endif
8037 }
8038
8039 static void
8040 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8041 {
8042 const char *hash_err;
8043
8044 hash_err = hash_insert (table, key, value);
8045 if (hash_err)
8046 printf ("Internal Error: Can't hash %s\n", key);
8047 }
8048
8049 static void
8050 fill_instruction_hash_table (void)
8051 {
8052 aarch64_opcode *opcode = aarch64_opcode_table;
8053
8054 while (opcode->name != NULL)
8055 {
8056 templates *templ, *new_templ;
8057 templ = hash_find (aarch64_ops_hsh, opcode->name);
8058
8059 new_templ = XNEW (templates);
8060 new_templ->opcode = opcode;
8061 new_templ->next = NULL;
8062
8063 if (!templ)
8064 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8065 else
8066 {
8067 new_templ->next = templ->next;
8068 templ->next = new_templ;
8069 }
8070 ++opcode;
8071 }
8072 }
8073
8074 static inline void
8075 convert_to_upper (char *dst, const char *src, size_t num)
8076 {
8077 unsigned int i;
8078 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8079 *dst = TOUPPER (*src);
8080 *dst = '\0';
8081 }
8082
8083 /* Assume STR point to a lower-case string, allocate, convert and return
8084 the corresponding upper-case string. */
8085 static inline const char*
8086 get_upper_str (const char *str)
8087 {
8088 char *ret;
8089 size_t len = strlen (str);
8090 ret = XNEWVEC (char, len + 1);
8091 convert_to_upper (ret, str, len);
8092 return ret;
8093 }
8094
8095 /* MD interface: Initialization. */
8096
8097 void
8098 md_begin (void)
8099 {
8100 unsigned mach;
8101 unsigned int i;
8102
8103 if ((aarch64_ops_hsh = hash_new ()) == NULL
8104 || (aarch64_cond_hsh = hash_new ()) == NULL
8105 || (aarch64_shift_hsh = hash_new ()) == NULL
8106 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8107 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8108 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8109 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8110 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8111 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8112 || (aarch64_reg_hsh = hash_new ()) == NULL
8113 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8114 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8115 || (aarch64_pldop_hsh = hash_new ()) == NULL
8116 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8117 as_fatal (_("virtual memory exhausted"));
8118
8119 fill_instruction_hash_table ();
8120
8121 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8122 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8123 (void *) (aarch64_sys_regs + i));
8124
8125 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8126 checked_hash_insert (aarch64_pstatefield_hsh,
8127 aarch64_pstatefields[i].name,
8128 (void *) (aarch64_pstatefields + i));
8129
8130 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8131 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8132 aarch64_sys_regs_ic[i].name,
8133 (void *) (aarch64_sys_regs_ic + i));
8134
8135 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8136 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8137 aarch64_sys_regs_dc[i].name,
8138 (void *) (aarch64_sys_regs_dc + i));
8139
8140 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8141 checked_hash_insert (aarch64_sys_regs_at_hsh,
8142 aarch64_sys_regs_at[i].name,
8143 (void *) (aarch64_sys_regs_at + i));
8144
8145 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8146 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8147 aarch64_sys_regs_tlbi[i].name,
8148 (void *) (aarch64_sys_regs_tlbi + i));
8149
8150 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8151 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8152 (void *) (reg_names + i));
8153
8154 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8155 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8156 (void *) (nzcv_names + i));
8157
8158 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8159 {
8160 const char *name = aarch64_operand_modifiers[i].name;
8161 checked_hash_insert (aarch64_shift_hsh, name,
8162 (void *) (aarch64_operand_modifiers + i));
8163 /* Also hash the name in the upper case. */
8164 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8165 (void *) (aarch64_operand_modifiers + i));
8166 }
8167
8168 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8169 {
8170 unsigned int j;
8171 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8172 the same condition code. */
8173 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8174 {
8175 const char *name = aarch64_conds[i].names[j];
8176 if (name == NULL)
8177 break;
8178 checked_hash_insert (aarch64_cond_hsh, name,
8179 (void *) (aarch64_conds + i));
8180 /* Also hash the name in the upper case. */
8181 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8182 (void *) (aarch64_conds + i));
8183 }
8184 }
8185
8186 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8187 {
8188 const char *name = aarch64_barrier_options[i].name;
8189 /* Skip xx00 - the unallocated values of option. */
8190 if ((i & 0x3) == 0)
8191 continue;
8192 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8193 (void *) (aarch64_barrier_options + i));
8194 /* Also hash the name in the upper case. */
8195 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8196 (void *) (aarch64_barrier_options + i));
8197 }
8198
8199 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8200 {
8201 const char* name = aarch64_prfops[i].name;
8202 /* Skip the unallocated hint encodings. */
8203 if (name == NULL)
8204 continue;
8205 checked_hash_insert (aarch64_pldop_hsh, name,
8206 (void *) (aarch64_prfops + i));
8207 /* Also hash the name in the upper case. */
8208 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8209 (void *) (aarch64_prfops + i));
8210 }
8211
8212 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8213 {
8214 const char* name = aarch64_hint_options[i].name;
8215
8216 checked_hash_insert (aarch64_hint_opt_hsh, name,
8217 (void *) (aarch64_hint_options + i));
8218 /* Also hash the name in the upper case. */
8219 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8220 (void *) (aarch64_hint_options + i));
8221 }
8222
8223 /* Set the cpu variant based on the command-line options. */
8224 if (!mcpu_cpu_opt)
8225 mcpu_cpu_opt = march_cpu_opt;
8226
8227 if (!mcpu_cpu_opt)
8228 mcpu_cpu_opt = &cpu_default;
8229
8230 cpu_variant = *mcpu_cpu_opt;
8231
8232 /* Record the CPU type. */
8233 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8234
8235 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8236 }
8237
8238 /* Command line processing. */
8239
8240 const char *md_shortopts = "m:";
8241
8242 #ifdef AARCH64_BI_ENDIAN
8243 #define OPTION_EB (OPTION_MD_BASE + 0)
8244 #define OPTION_EL (OPTION_MD_BASE + 1)
8245 #else
8246 #if TARGET_BYTES_BIG_ENDIAN
8247 #define OPTION_EB (OPTION_MD_BASE + 0)
8248 #else
8249 #define OPTION_EL (OPTION_MD_BASE + 1)
8250 #endif
8251 #endif
8252
8253 struct option md_longopts[] = {
8254 #ifdef OPTION_EB
8255 {"EB", no_argument, NULL, OPTION_EB},
8256 #endif
8257 #ifdef OPTION_EL
8258 {"EL", no_argument, NULL, OPTION_EL},
8259 #endif
8260 {NULL, no_argument, NULL, 0}
8261 };
8262
8263 size_t md_longopts_size = sizeof (md_longopts);
8264
8265 struct aarch64_option_table
8266 {
8267 const char *option; /* Option name to match. */
8268 const char *help; /* Help information. */
8269 int *var; /* Variable to change. */
8270 int value; /* What to change it to. */
8271 char *deprecated; /* If non-null, print this message. */
8272 };
8273
8274 static struct aarch64_option_table aarch64_opts[] = {
8275 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8276 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8277 NULL},
8278 #ifdef DEBUG_AARCH64
8279 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8280 #endif /* DEBUG_AARCH64 */
8281 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8282 NULL},
8283 {"mno-verbose-error", N_("do not output verbose error messages"),
8284 &verbose_error_p, 0, NULL},
8285 {NULL, NULL, NULL, 0, NULL}
8286 };
8287
8288 struct aarch64_cpu_option_table
8289 {
8290 const char *name;
8291 const aarch64_feature_set value;
8292 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8293 case. */
8294 const char *canonical_name;
8295 };
8296
8297 /* This list should, at a minimum, contain all the cpu names
8298 recognized by GCC. */
8299 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8300 {"all", AARCH64_ANY, NULL},
8301 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8302 AARCH64_FEATURE_CRC), "Cortex-A35"},
8303 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8304 AARCH64_FEATURE_CRC), "Cortex-A53"},
8305 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8306 AARCH64_FEATURE_CRC), "Cortex-A57"},
8307 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8308 AARCH64_FEATURE_CRC), "Cortex-A72"},
8309 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8310 AARCH64_FEATURE_CRC), "Cortex-A73"},
8311 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8312 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8313 "Samsung Exynos M1"},
8314 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8315 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8316 "Qualcomm QDF24XX"},
8317 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8318 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8319 "Cavium ThunderX"},
8320 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8321 AARCH64_FEATURE_CRYPTO),
8322 "Broadcom Vulcan"},
8323 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8324 in earlier releases and is superseded by 'xgene1' in all
8325 tools. */
8326 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8327 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8328 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8329 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8330 {"generic", AARCH64_ARCH_V8, NULL},
8331
8332 {NULL, AARCH64_ARCH_NONE, NULL}
8333 };
8334
8335 struct aarch64_arch_option_table
8336 {
8337 const char *name;
8338 const aarch64_feature_set value;
8339 };
8340
8341 /* This list should, at a minimum, contain all the architecture names
8342 recognized by GCC. */
8343 static const struct aarch64_arch_option_table aarch64_archs[] = {
8344 {"all", AARCH64_ANY},
8345 {"armv8-a", AARCH64_ARCH_V8},
8346 {"armv8.1-a", AARCH64_ARCH_V8_1},
8347 {"armv8.2-a", AARCH64_ARCH_V8_2},
8348 {NULL, AARCH64_ARCH_NONE}
8349 };
8350
8351 /* ISA extensions. */
8352 struct aarch64_option_cpu_value_table
8353 {
8354 const char *name;
8355 const aarch64_feature_set value;
8356 const aarch64_feature_set require; /* Feature dependencies. */
8357 };
8358
8359 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8360 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8361 AARCH64_ARCH_NONE},
8362 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
8363 AARCH64_ARCH_NONE},
8364 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8365 AARCH64_ARCH_NONE},
8366 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8367 AARCH64_ARCH_NONE},
8368 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8369 AARCH64_ARCH_NONE},
8370 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8371 AARCH64_ARCH_NONE},
8372 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8373 AARCH64_ARCH_NONE},
8374 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8375 AARCH64_ARCH_NONE},
8376 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8377 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8378 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8379 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8380 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8381 AARCH64_ARCH_NONE},
8382 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8383 };
8384
8385 struct aarch64_long_option_table
8386 {
8387 const char *option; /* Substring to match. */
8388 const char *help; /* Help information. */
8389 int (*func) (const char *subopt); /* Function to decode sub-option. */
8390 char *deprecated; /* If non-null, print this message. */
8391 };
8392
8393 /* Transitive closure of features depending on set. */
8394 static aarch64_feature_set
8395 aarch64_feature_disable_set (aarch64_feature_set set)
8396 {
8397 const struct aarch64_option_cpu_value_table *opt;
8398 aarch64_feature_set prev = 0;
8399
8400 while (prev != set) {
8401 prev = set;
8402 for (opt = aarch64_features; opt->name != NULL; opt++)
8403 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8404 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8405 }
8406 return set;
8407 }
8408
8409 /* Transitive closure of dependencies of set. */
8410 static aarch64_feature_set
8411 aarch64_feature_enable_set (aarch64_feature_set set)
8412 {
8413 const struct aarch64_option_cpu_value_table *opt;
8414 aarch64_feature_set prev = 0;
8415
8416 while (prev != set) {
8417 prev = set;
8418 for (opt = aarch64_features; opt->name != NULL; opt++)
8419 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8420 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8421 }
8422 return set;
8423 }
8424
8425 static int
8426 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8427 bfd_boolean ext_only)
8428 {
8429 /* We insist on extensions being added before being removed. We achieve
8430 this by using the ADDING_VALUE variable to indicate whether we are
8431 adding an extension (1) or removing it (0) and only allowing it to
8432 change in the order -1 -> 1 -> 0. */
8433 int adding_value = -1;
8434 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8435
8436 /* Copy the feature set, so that we can modify it. */
8437 *ext_set = **opt_p;
8438 *opt_p = ext_set;
8439
8440 while (str != NULL && *str != 0)
8441 {
8442 const struct aarch64_option_cpu_value_table *opt;
8443 const char *ext = NULL;
8444 int optlen;
8445
8446 if (!ext_only)
8447 {
8448 if (*str != '+')
8449 {
8450 as_bad (_("invalid architectural extension"));
8451 return 0;
8452 }
8453
8454 ext = strchr (++str, '+');
8455 }
8456
8457 if (ext != NULL)
8458 optlen = ext - str;
8459 else
8460 optlen = strlen (str);
8461
8462 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8463 {
8464 if (adding_value != 0)
8465 adding_value = 0;
8466 optlen -= 2;
8467 str += 2;
8468 }
8469 else if (optlen > 0)
8470 {
8471 if (adding_value == -1)
8472 adding_value = 1;
8473 else if (adding_value != 1)
8474 {
8475 as_bad (_("must specify extensions to add before specifying "
8476 "those to remove"));
8477 return FALSE;
8478 }
8479 }
8480
8481 if (optlen == 0)
8482 {
8483 as_bad (_("missing architectural extension"));
8484 return 0;
8485 }
8486
8487 gas_assert (adding_value != -1);
8488
8489 for (opt = aarch64_features; opt->name != NULL; opt++)
8490 if (strncmp (opt->name, str, optlen) == 0)
8491 {
8492 aarch64_feature_set set;
8493
8494 /* Add or remove the extension. */
8495 if (adding_value)
8496 {
8497 set = aarch64_feature_enable_set (opt->value);
8498 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8499 }
8500 else
8501 {
8502 set = aarch64_feature_disable_set (opt->value);
8503 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8504 }
8505 break;
8506 }
8507
8508 if (opt->name == NULL)
8509 {
8510 as_bad (_("unknown architectural extension `%s'"), str);
8511 return 0;
8512 }
8513
8514 str = ext;
8515 };
8516
8517 return 1;
8518 }
8519
8520 static int
8521 aarch64_parse_cpu (const char *str)
8522 {
8523 const struct aarch64_cpu_option_table *opt;
8524 const char *ext = strchr (str, '+');
8525 size_t optlen;
8526
8527 if (ext != NULL)
8528 optlen = ext - str;
8529 else
8530 optlen = strlen (str);
8531
8532 if (optlen == 0)
8533 {
8534 as_bad (_("missing cpu name `%s'"), str);
8535 return 0;
8536 }
8537
8538 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8539 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8540 {
8541 mcpu_cpu_opt = &opt->value;
8542 if (ext != NULL)
8543 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8544
8545 return 1;
8546 }
8547
8548 as_bad (_("unknown cpu `%s'"), str);
8549 return 0;
8550 }
8551
8552 static int
8553 aarch64_parse_arch (const char *str)
8554 {
8555 const struct aarch64_arch_option_table *opt;
8556 const char *ext = strchr (str, '+');
8557 size_t optlen;
8558
8559 if (ext != NULL)
8560 optlen = ext - str;
8561 else
8562 optlen = strlen (str);
8563
8564 if (optlen == 0)
8565 {
8566 as_bad (_("missing architecture name `%s'"), str);
8567 return 0;
8568 }
8569
8570 for (opt = aarch64_archs; opt->name != NULL; opt++)
8571 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8572 {
8573 march_cpu_opt = &opt->value;
8574 if (ext != NULL)
8575 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8576
8577 return 1;
8578 }
8579
8580 as_bad (_("unknown architecture `%s'\n"), str);
8581 return 0;
8582 }
8583
8584 /* ABIs. */
8585 struct aarch64_option_abi_value_table
8586 {
8587 const char *name;
8588 enum aarch64_abi_type value;
8589 };
8590
8591 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8592 {"ilp32", AARCH64_ABI_ILP32},
8593 {"lp64", AARCH64_ABI_LP64},
8594 };
8595
8596 static int
8597 aarch64_parse_abi (const char *str)
8598 {
8599 unsigned int i;
8600
8601 if (str[0] == '\0')
8602 {
8603 as_bad (_("missing abi name `%s'"), str);
8604 return 0;
8605 }
8606
8607 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8608 if (strcmp (str, aarch64_abis[i].name) == 0)
8609 {
8610 aarch64_abi = aarch64_abis[i].value;
8611 return 1;
8612 }
8613
8614 as_bad (_("unknown abi `%s'\n"), str);
8615 return 0;
8616 }
8617
8618 static struct aarch64_long_option_table aarch64_long_opts[] = {
8619 #ifdef OBJ_ELF
8620 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8621 aarch64_parse_abi, NULL},
8622 #endif /* OBJ_ELF */
8623 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8624 aarch64_parse_cpu, NULL},
8625 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8626 aarch64_parse_arch, NULL},
8627 {NULL, NULL, 0, NULL}
8628 };
8629
8630 int
8631 md_parse_option (int c, const char *arg)
8632 {
8633 struct aarch64_option_table *opt;
8634 struct aarch64_long_option_table *lopt;
8635
8636 switch (c)
8637 {
8638 #ifdef OPTION_EB
8639 case OPTION_EB:
8640 target_big_endian = 1;
8641 break;
8642 #endif
8643
8644 #ifdef OPTION_EL
8645 case OPTION_EL:
8646 target_big_endian = 0;
8647 break;
8648 #endif
8649
8650 case 'a':
8651 /* Listing option. Just ignore these, we don't support additional
8652 ones. */
8653 return 0;
8654
8655 default:
8656 for (opt = aarch64_opts; opt->option != NULL; opt++)
8657 {
8658 if (c == opt->option[0]
8659 && ((arg == NULL && opt->option[1] == 0)
8660 || streq (arg, opt->option + 1)))
8661 {
8662 /* If the option is deprecated, tell the user. */
8663 if (opt->deprecated != NULL)
8664 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8665 arg ? arg : "", _(opt->deprecated));
8666
8667 if (opt->var != NULL)
8668 *opt->var = opt->value;
8669
8670 return 1;
8671 }
8672 }
8673
8674 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8675 {
8676 /* These options are expected to have an argument. */
8677 if (c == lopt->option[0]
8678 && arg != NULL
8679 && strncmp (arg, lopt->option + 1,
8680 strlen (lopt->option + 1)) == 0)
8681 {
8682 /* If the option is deprecated, tell the user. */
8683 if (lopt->deprecated != NULL)
8684 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8685 _(lopt->deprecated));
8686
8687 /* Call the sup-option parser. */
8688 return lopt->func (arg + strlen (lopt->option) - 1);
8689 }
8690 }
8691
8692 return 0;
8693 }
8694
8695 return 1;
8696 }
8697
8698 void
8699 md_show_usage (FILE * fp)
8700 {
8701 struct aarch64_option_table *opt;
8702 struct aarch64_long_option_table *lopt;
8703
8704 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8705
8706 for (opt = aarch64_opts; opt->option != NULL; opt++)
8707 if (opt->help != NULL)
8708 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8709
8710 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8711 if (lopt->help != NULL)
8712 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8713
8714 #ifdef OPTION_EB
8715 fprintf (fp, _("\
8716 -EB assemble code for a big-endian cpu\n"));
8717 #endif
8718
8719 #ifdef OPTION_EL
8720 fprintf (fp, _("\
8721 -EL assemble code for a little-endian cpu\n"));
8722 #endif
8723 }
8724
8725 /* Parse a .cpu directive. */
8726
8727 static void
8728 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8729 {
8730 const struct aarch64_cpu_option_table *opt;
8731 char saved_char;
8732 char *name;
8733 char *ext;
8734 size_t optlen;
8735
8736 name = input_line_pointer;
8737 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8738 input_line_pointer++;
8739 saved_char = *input_line_pointer;
8740 *input_line_pointer = 0;
8741
8742 ext = strchr (name, '+');
8743
8744 if (ext != NULL)
8745 optlen = ext - name;
8746 else
8747 optlen = strlen (name);
8748
8749 /* Skip the first "all" entry. */
8750 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8751 if (strlen (opt->name) == optlen
8752 && strncmp (name, opt->name, optlen) == 0)
8753 {
8754 mcpu_cpu_opt = &opt->value;
8755 if (ext != NULL)
8756 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8757 return;
8758
8759 cpu_variant = *mcpu_cpu_opt;
8760
8761 *input_line_pointer = saved_char;
8762 demand_empty_rest_of_line ();
8763 return;
8764 }
8765 as_bad (_("unknown cpu `%s'"), name);
8766 *input_line_pointer = saved_char;
8767 ignore_rest_of_line ();
8768 }
8769
8770
8771 /* Parse a .arch directive. */
8772
8773 static void
8774 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8775 {
8776 const struct aarch64_arch_option_table *opt;
8777 char saved_char;
8778 char *name;
8779 char *ext;
8780 size_t optlen;
8781
8782 name = input_line_pointer;
8783 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8784 input_line_pointer++;
8785 saved_char = *input_line_pointer;
8786 *input_line_pointer = 0;
8787
8788 ext = strchr (name, '+');
8789
8790 if (ext != NULL)
8791 optlen = ext - name;
8792 else
8793 optlen = strlen (name);
8794
8795 /* Skip the first "all" entry. */
8796 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8797 if (strlen (opt->name) == optlen
8798 && strncmp (name, opt->name, optlen) == 0)
8799 {
8800 mcpu_cpu_opt = &opt->value;
8801 if (ext != NULL)
8802 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8803 return;
8804
8805 cpu_variant = *mcpu_cpu_opt;
8806
8807 *input_line_pointer = saved_char;
8808 demand_empty_rest_of_line ();
8809 return;
8810 }
8811
8812 as_bad (_("unknown architecture `%s'\n"), name);
8813 *input_line_pointer = saved_char;
8814 ignore_rest_of_line ();
8815 }
8816
8817 /* Parse a .arch_extension directive. */
8818
8819 static void
8820 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8821 {
8822 char saved_char;
8823 char *ext = input_line_pointer;;
8824
8825 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8826 input_line_pointer++;
8827 saved_char = *input_line_pointer;
8828 *input_line_pointer = 0;
8829
8830 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8831 return;
8832
8833 cpu_variant = *mcpu_cpu_opt;
8834
8835 *input_line_pointer = saved_char;
8836 demand_empty_rest_of_line ();
8837 }
8838
8839 /* Copy symbol information. */
8840
8841 void
8842 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8843 {
8844 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8845 }
This page took 0.362543 seconds and 4 git commands to generate.