AArch64: Add SVE constraints verifier.
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Number of littlenums required to hold an extended precision number. */
242 #define MAX_LITTLENUMS 6
243
244 /* Return value for certain parsers when the parsing fails; those parsers
245 return the information of the parsed result, e.g. register number, on
246 success. */
247 #define PARSE_FAIL -1
248
249 /* This is an invalid condition code that means no conditional field is
250 present. */
251 #define COND_ALWAYS 0x10
252
253 typedef struct
254 {
255 const char *template;
256 unsigned long value;
257 } asm_barrier_opt;
258
259 typedef struct
260 {
261 const char *template;
262 uint32_t value;
263 } asm_nzcv;
264
265 struct reloc_entry
266 {
267 char *name;
268 bfd_reloc_code_real_type reloc;
269 };
270
271 /* Macros to define the register types and masks for the purpose
272 of parsing. */
273
274 #undef AARCH64_REG_TYPES
275 #define AARCH64_REG_TYPES \
276 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
277 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
278 BASIC_REG_TYPE(SP_32) /* wsp */ \
279 BASIC_REG_TYPE(SP_64) /* sp */ \
280 BASIC_REG_TYPE(Z_32) /* wzr */ \
281 BASIC_REG_TYPE(Z_64) /* xzr */ \
282 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
283 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
284 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
285 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
286 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
287 BASIC_REG_TYPE(VN) /* v[0-31] */ \
288 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
289 BASIC_REG_TYPE(PN) /* p[0-15] */ \
290 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
291 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
292 /* Typecheck: same, plus SVE registers. */ \
293 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
294 | REG_TYPE(ZN)) \
295 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
296 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
298 /* Typecheck: same, plus SVE registers. */ \
299 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
300 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
301 | REG_TYPE(ZN)) \
302 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
303 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
305 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
306 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
307 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
309 /* Typecheck: any [BHSDQ]P FP. */ \
310 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
311 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
312 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
313 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
315 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
318 be used for SVE instructions, since Zn and Pn are valid symbols \
319 in other contexts. */ \
320 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
323 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
324 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
325 | REG_TYPE(ZN) | REG_TYPE(PN)) \
326 /* Any integer register; used for error messages only. */ \
327 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
329 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
330 /* Pseudo type to mark the end of the enumerator sequence. */ \
331 BASIC_REG_TYPE(MAX)
332
333 #undef BASIC_REG_TYPE
334 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
335 #undef MULTI_REG_TYPE
336 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
337
338 /* Register type enumerators. */
339 typedef enum aarch64_reg_type_
340 {
341 /* A list of REG_TYPE_*. */
342 AARCH64_REG_TYPES
343 } aarch64_reg_type;
344
345 #undef BASIC_REG_TYPE
346 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
347 #undef REG_TYPE
348 #define REG_TYPE(T) (1 << REG_TYPE_##T)
349 #undef MULTI_REG_TYPE
350 #define MULTI_REG_TYPE(T,V) V,
351
352 /* Structure for a hash table entry for a register. */
353 typedef struct
354 {
355 const char *name;
356 unsigned char number;
357 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
358 unsigned char builtin;
359 } reg_entry;
360
361 /* Values indexed by aarch64_reg_type to assist the type checking. */
362 static const unsigned reg_type_masks[] =
363 {
364 AARCH64_REG_TYPES
365 };
366
367 #undef BASIC_REG_TYPE
368 #undef REG_TYPE
369 #undef MULTI_REG_TYPE
370 #undef AARCH64_REG_TYPES
371
372 /* Diagnostics used when we don't get a register of the expected type.
373 Note: this has to synchronized with aarch64_reg_type definitions
374 above. */
375 static const char *
376 get_reg_expected_msg (aarch64_reg_type reg_type)
377 {
378 const char *msg;
379
380 switch (reg_type)
381 {
382 case REG_TYPE_R_32:
383 msg = N_("integer 32-bit register expected");
384 break;
385 case REG_TYPE_R_64:
386 msg = N_("integer 64-bit register expected");
387 break;
388 case REG_TYPE_R_N:
389 msg = N_("integer register expected");
390 break;
391 case REG_TYPE_R64_SP:
392 msg = N_("64-bit integer or SP register expected");
393 break;
394 case REG_TYPE_SVE_BASE:
395 msg = N_("base register expected");
396 break;
397 case REG_TYPE_R_Z:
398 msg = N_("integer or zero register expected");
399 break;
400 case REG_TYPE_SVE_OFFSET:
401 msg = N_("offset register expected");
402 break;
403 case REG_TYPE_R_SP:
404 msg = N_("integer or SP register expected");
405 break;
406 case REG_TYPE_R_Z_SP:
407 msg = N_("integer, zero or SP register expected");
408 break;
409 case REG_TYPE_FP_B:
410 msg = N_("8-bit SIMD scalar register expected");
411 break;
412 case REG_TYPE_FP_H:
413 msg = N_("16-bit SIMD scalar or floating-point half precision "
414 "register expected");
415 break;
416 case REG_TYPE_FP_S:
417 msg = N_("32-bit SIMD scalar or floating-point single precision "
418 "register expected");
419 break;
420 case REG_TYPE_FP_D:
421 msg = N_("64-bit SIMD scalar or floating-point double precision "
422 "register expected");
423 break;
424 case REG_TYPE_FP_Q:
425 msg = N_("128-bit SIMD scalar or floating-point quad precision "
426 "register expected");
427 break;
428 case REG_TYPE_R_Z_BHSDQ_V:
429 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
430 msg = N_("register expected");
431 break;
432 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
433 msg = N_("SIMD scalar or floating-point register expected");
434 break;
435 case REG_TYPE_VN: /* any V reg */
436 msg = N_("vector register expected");
437 break;
438 case REG_TYPE_ZN:
439 msg = N_("SVE vector register expected");
440 break;
441 case REG_TYPE_PN:
442 msg = N_("SVE predicate register expected");
443 break;
444 default:
445 as_fatal (_("invalid register type %d"), reg_type);
446 }
447 return msg;
448 }
449
450 /* Some well known registers that we refer to directly elsewhere. */
451 #define REG_SP 31
452
453 /* Instructions take 4 bytes in the object file. */
454 #define INSN_SIZE 4
455
456 static struct hash_control *aarch64_ops_hsh;
457 static struct hash_control *aarch64_cond_hsh;
458 static struct hash_control *aarch64_shift_hsh;
459 static struct hash_control *aarch64_sys_regs_hsh;
460 static struct hash_control *aarch64_pstatefield_hsh;
461 static struct hash_control *aarch64_sys_regs_ic_hsh;
462 static struct hash_control *aarch64_sys_regs_dc_hsh;
463 static struct hash_control *aarch64_sys_regs_at_hsh;
464 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
465 static struct hash_control *aarch64_reg_hsh;
466 static struct hash_control *aarch64_barrier_opt_hsh;
467 static struct hash_control *aarch64_nzcv_hsh;
468 static struct hash_control *aarch64_pldop_hsh;
469 static struct hash_control *aarch64_hint_opt_hsh;
470
471 /* Stuff needed to resolve the label ambiguity
472 As:
473 ...
474 label: <insn>
475 may differ from:
476 ...
477 label:
478 <insn> */
479
480 static symbolS *last_label_seen;
481
482 /* Literal pool structure. Held on a per-section
483 and per-sub-section basis. */
484
485 #define MAX_LITERAL_POOL_SIZE 1024
486 typedef struct literal_expression
487 {
488 expressionS exp;
489 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
490 LITTLENUM_TYPE * bignum;
491 } literal_expression;
492
493 typedef struct literal_pool
494 {
495 literal_expression literals[MAX_LITERAL_POOL_SIZE];
496 unsigned int next_free_entry;
497 unsigned int id;
498 symbolS *symbol;
499 segT section;
500 subsegT sub_section;
501 int size;
502 struct literal_pool *next;
503 } literal_pool;
504
505 /* Pointer to a linked list of literal pools. */
506 static literal_pool *list_of_pools = NULL;
507 \f
508 /* Pure syntax. */
509
510 /* This array holds the chars that always start a comment. If the
511 pre-processor is disabled, these aren't very useful. */
512 const char comment_chars[] = "";
513
514 /* This array holds the chars that only start a comment at the beginning of
515 a line. If the line seems to have the form '# 123 filename'
516 .line and .file directives will appear in the pre-processed output. */
517 /* Note that input_file.c hand checks for '#' at the beginning of the
518 first line of the input file. This is because the compiler outputs
519 #NO_APP at the beginning of its output. */
520 /* Also note that comments like this one will always work. */
521 const char line_comment_chars[] = "#";
522
523 const char line_separator_chars[] = ";";
524
525 /* Chars that can be used to separate mant
526 from exp in floating point numbers. */
527 const char EXP_CHARS[] = "eE";
528
529 /* Chars that mean this number is a floating point constant. */
530 /* As in 0f12.456 */
531 /* or 0d1.2345e12 */
532
533 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
534
535 /* Prefix character that indicates the start of an immediate value. */
536 #define is_immediate_prefix(C) ((C) == '#')
537
538 /* Separator character handling. */
539
540 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
541
542 static inline bfd_boolean
543 skip_past_char (char **str, char c)
544 {
545 if (**str == c)
546 {
547 (*str)++;
548 return TRUE;
549 }
550 else
551 return FALSE;
552 }
553
554 #define skip_past_comma(str) skip_past_char (str, ',')
555
556 /* Arithmetic expressions (possibly involving symbols). */
557
558 static bfd_boolean in_my_get_expression_p = FALSE;
559
560 /* Third argument to my_get_expression. */
561 #define GE_NO_PREFIX 0
562 #define GE_OPT_PREFIX 1
563
564 /* Return TRUE if the string pointed by *STR is successfully parsed
565 as an valid expression; *EP will be filled with the information of
566 such an expression. Otherwise return FALSE. */
567
568 static bfd_boolean
569 my_get_expression (expressionS * ep, char **str, int prefix_mode,
570 int reject_absent)
571 {
572 char *save_in;
573 segT seg;
574 int prefix_present_p = 0;
575
576 switch (prefix_mode)
577 {
578 case GE_NO_PREFIX:
579 break;
580 case GE_OPT_PREFIX:
581 if (is_immediate_prefix (**str))
582 {
583 (*str)++;
584 prefix_present_p = 1;
585 }
586 break;
587 default:
588 abort ();
589 }
590
591 memset (ep, 0, sizeof (expressionS));
592
593 save_in = input_line_pointer;
594 input_line_pointer = *str;
595 in_my_get_expression_p = TRUE;
596 seg = expression (ep);
597 in_my_get_expression_p = FALSE;
598
599 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
600 {
601 /* We found a bad expression in md_operand(). */
602 *str = input_line_pointer;
603 input_line_pointer = save_in;
604 if (prefix_present_p && ! error_p ())
605 set_fatal_syntax_error (_("bad expression"));
606 else
607 set_first_syntax_error (_("bad expression"));
608 return FALSE;
609 }
610
611 #ifdef OBJ_AOUT
612 if (seg != absolute_section
613 && seg != text_section
614 && seg != data_section
615 && seg != bss_section && seg != undefined_section)
616 {
617 set_syntax_error (_("bad segment"));
618 *str = input_line_pointer;
619 input_line_pointer = save_in;
620 return FALSE;
621 }
622 #else
623 (void) seg;
624 #endif
625
626 *str = input_line_pointer;
627 input_line_pointer = save_in;
628 return TRUE;
629 }
630
631 /* Turn a string in input_line_pointer into a floating point constant
632 of type TYPE, and store the appropriate bytes in *LITP. The number
633 of LITTLENUMS emitted is stored in *SIZEP. An error message is
634 returned, or NULL on OK. */
635
636 const char *
637 md_atof (int type, char *litP, int *sizeP)
638 {
639 return ieee_md_atof (type, litP, sizeP, target_big_endian);
640 }
641
642 /* We handle all bad expressions here, so that we can report the faulty
643 instruction in the error message. */
644 void
645 md_operand (expressionS * exp)
646 {
647 if (in_my_get_expression_p)
648 exp->X_op = O_illegal;
649 }
650
651 /* Immediate values. */
652
653 /* Errors may be set multiple times during parsing or bit encoding
654 (particularly in the Neon bits), but usually the earliest error which is set
655 will be the most meaningful. Avoid overwriting it with later (cascading)
656 errors by calling this function. */
657
658 static void
659 first_error (const char *error)
660 {
661 if (! error_p ())
662 set_syntax_error (error);
663 }
664
665 /* Similar to first_error, but this function accepts formatted error
666 message. */
667 static void
668 first_error_fmt (const char *format, ...)
669 {
670 va_list args;
671 enum
672 { size = 100 };
673 /* N.B. this single buffer will not cause error messages for different
674 instructions to pollute each other; this is because at the end of
675 processing of each assembly line, error message if any will be
676 collected by as_bad. */
677 static char buffer[size];
678
679 if (! error_p ())
680 {
681 int ret ATTRIBUTE_UNUSED;
682 va_start (args, format);
683 ret = vsnprintf (buffer, size, format, args);
684 know (ret <= size - 1 && ret >= 0);
685 va_end (args);
686 set_syntax_error (buffer);
687 }
688 }
689
690 /* Register parsing. */
691
692 /* Generic register parser which is called by other specialized
693 register parsers.
694 CCP points to what should be the beginning of a register name.
695 If it is indeed a valid register name, advance CCP over it and
696 return the reg_entry structure; otherwise return NULL.
697 It does not issue diagnostics. */
698
699 static reg_entry *
700 parse_reg (char **ccp)
701 {
702 char *start = *ccp;
703 char *p;
704 reg_entry *reg;
705
706 #ifdef REGISTER_PREFIX
707 if (*start != REGISTER_PREFIX)
708 return NULL;
709 start++;
710 #endif
711
712 p = start;
713 if (!ISALPHA (*p) || !is_name_beginner (*p))
714 return NULL;
715
716 do
717 p++;
718 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
719
720 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
721
722 if (!reg)
723 return NULL;
724
725 *ccp = p;
726 return reg;
727 }
728
729 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
730 return FALSE. */
731 static bfd_boolean
732 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
733 {
734 return (reg_type_masks[type] & (1 << reg->type)) != 0;
735 }
736
737 /* Try to parse a base or offset register. Allow SVE base and offset
738 registers if REG_TYPE includes SVE registers. Return the register
739 entry on success, setting *QUALIFIER to the register qualifier.
740 Return null otherwise.
741
742 Note that this function does not issue any diagnostics. */
743
744 static const reg_entry *
745 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
746 aarch64_opnd_qualifier_t *qualifier)
747 {
748 char *str = *ccp;
749 const reg_entry *reg = parse_reg (&str);
750
751 if (reg == NULL)
752 return NULL;
753
754 switch (reg->type)
755 {
756 case REG_TYPE_R_32:
757 case REG_TYPE_SP_32:
758 case REG_TYPE_Z_32:
759 *qualifier = AARCH64_OPND_QLF_W;
760 break;
761
762 case REG_TYPE_R_64:
763 case REG_TYPE_SP_64:
764 case REG_TYPE_Z_64:
765 *qualifier = AARCH64_OPND_QLF_X;
766 break;
767
768 case REG_TYPE_ZN:
769 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
770 || str[0] != '.')
771 return NULL;
772 switch (TOLOWER (str[1]))
773 {
774 case 's':
775 *qualifier = AARCH64_OPND_QLF_S_S;
776 break;
777 case 'd':
778 *qualifier = AARCH64_OPND_QLF_S_D;
779 break;
780 default:
781 return NULL;
782 }
783 str += 2;
784 break;
785
786 default:
787 return NULL;
788 }
789
790 *ccp = str;
791
792 return reg;
793 }
794
795 /* Try to parse a base or offset register. Return the register entry
796 on success, setting *QUALIFIER to the register qualifier. Return null
797 otherwise.
798
799 Note that this function does not issue any diagnostics. */
800
801 static const reg_entry *
802 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
803 {
804 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
805 }
806
807 /* Parse the qualifier of a vector register or vector element of type
808 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
809 succeeds; otherwise return FALSE.
810
811 Accept only one occurrence of:
812 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
813 b h s d q */
814 static bfd_boolean
815 parse_vector_type_for_operand (aarch64_reg_type reg_type,
816 struct vector_type_el *parsed_type, char **str)
817 {
818 char *ptr = *str;
819 unsigned width;
820 unsigned element_size;
821 enum vector_el_type type;
822
823 /* skip '.' */
824 gas_assert (*ptr == '.');
825 ptr++;
826
827 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
828 {
829 width = 0;
830 goto elt_size;
831 }
832 width = strtoul (ptr, &ptr, 10);
833 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
834 {
835 first_error_fmt (_("bad size %d in vector width specifier"), width);
836 return FALSE;
837 }
838
839 elt_size:
840 switch (TOLOWER (*ptr))
841 {
842 case 'b':
843 type = NT_b;
844 element_size = 8;
845 break;
846 case 'h':
847 type = NT_h;
848 element_size = 16;
849 break;
850 case 's':
851 type = NT_s;
852 element_size = 32;
853 break;
854 case 'd':
855 type = NT_d;
856 element_size = 64;
857 break;
858 case 'q':
859 if (reg_type == REG_TYPE_ZN || width == 1)
860 {
861 type = NT_q;
862 element_size = 128;
863 break;
864 }
865 /* fall through. */
866 default:
867 if (*ptr != '\0')
868 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
869 else
870 first_error (_("missing element size"));
871 return FALSE;
872 }
873 if (width != 0 && width * element_size != 64
874 && width * element_size != 128
875 && !(width == 2 && element_size == 16)
876 && !(width == 4 && element_size == 8))
877 {
878 first_error_fmt (_
879 ("invalid element size %d and vector size combination %c"),
880 width, *ptr);
881 return FALSE;
882 }
883 ptr++;
884
885 parsed_type->type = type;
886 parsed_type->width = width;
887
888 *str = ptr;
889
890 return TRUE;
891 }
892
893 /* *STR contains an SVE zero/merge predication suffix. Parse it into
894 *PARSED_TYPE and point *STR at the end of the suffix. */
895
896 static bfd_boolean
897 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
898 {
899 char *ptr = *str;
900
901 /* Skip '/'. */
902 gas_assert (*ptr == '/');
903 ptr++;
904 switch (TOLOWER (*ptr))
905 {
906 case 'z':
907 parsed_type->type = NT_zero;
908 break;
909 case 'm':
910 parsed_type->type = NT_merge;
911 break;
912 default:
913 if (*ptr != '\0' && *ptr != ',')
914 first_error_fmt (_("unexpected character `%c' in predication type"),
915 *ptr);
916 else
917 first_error (_("missing predication type"));
918 return FALSE;
919 }
920 parsed_type->width = 0;
921 *str = ptr + 1;
922 return TRUE;
923 }
924
925 /* Parse a register of the type TYPE.
926
927 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
928 name or the parsed register is not of TYPE.
929
930 Otherwise return the register number, and optionally fill in the actual
931 type of the register in *RTYPE when multiple alternatives were given, and
932 return the register shape and element index information in *TYPEINFO.
933
934 IN_REG_LIST should be set with TRUE if the caller is parsing a register
935 list. */
936
937 static int
938 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
939 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
940 {
941 char *str = *ccp;
942 const reg_entry *reg = parse_reg (&str);
943 struct vector_type_el atype;
944 struct vector_type_el parsetype;
945 bfd_boolean is_typed_vecreg = FALSE;
946
947 atype.defined = 0;
948 atype.type = NT_invtype;
949 atype.width = -1;
950 atype.index = 0;
951
952 if (reg == NULL)
953 {
954 if (typeinfo)
955 *typeinfo = atype;
956 set_default_error ();
957 return PARSE_FAIL;
958 }
959
960 if (! aarch64_check_reg_type (reg, type))
961 {
962 DEBUG_TRACE ("reg type check failed");
963 set_default_error ();
964 return PARSE_FAIL;
965 }
966 type = reg->type;
967
968 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
969 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
970 {
971 if (*str == '.')
972 {
973 if (!parse_vector_type_for_operand (type, &parsetype, &str))
974 return PARSE_FAIL;
975 }
976 else
977 {
978 if (!parse_predication_for_operand (&parsetype, &str))
979 return PARSE_FAIL;
980 }
981
982 /* Register if of the form Vn.[bhsdq]. */
983 is_typed_vecreg = TRUE;
984
985 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
986 {
987 /* The width is always variable; we don't allow an integer width
988 to be specified. */
989 gas_assert (parsetype.width == 0);
990 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
991 }
992 else if (parsetype.width == 0)
993 /* Expect index. In the new scheme we cannot have
994 Vn.[bhsdq] represent a scalar. Therefore any
995 Vn.[bhsdq] should have an index following it.
996 Except in reglists of course. */
997 atype.defined |= NTA_HASINDEX;
998 else
999 atype.defined |= NTA_HASTYPE;
1000
1001 atype.type = parsetype.type;
1002 atype.width = parsetype.width;
1003 }
1004
1005 if (skip_past_char (&str, '['))
1006 {
1007 expressionS exp;
1008
1009 /* Reject Sn[index] syntax. */
1010 if (!is_typed_vecreg)
1011 {
1012 first_error (_("this type of register can't be indexed"));
1013 return PARSE_FAIL;
1014 }
1015
1016 if (in_reg_list)
1017 {
1018 first_error (_("index not allowed inside register list"));
1019 return PARSE_FAIL;
1020 }
1021
1022 atype.defined |= NTA_HASINDEX;
1023
1024 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1025
1026 if (exp.X_op != O_constant)
1027 {
1028 first_error (_("constant expression required"));
1029 return PARSE_FAIL;
1030 }
1031
1032 if (! skip_past_char (&str, ']'))
1033 return PARSE_FAIL;
1034
1035 atype.index = exp.X_add_number;
1036 }
1037 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1038 {
1039 /* Indexed vector register expected. */
1040 first_error (_("indexed vector register expected"));
1041 return PARSE_FAIL;
1042 }
1043
1044 /* A vector reg Vn should be typed or indexed. */
1045 if (type == REG_TYPE_VN && atype.defined == 0)
1046 {
1047 first_error (_("invalid use of vector register"));
1048 }
1049
1050 if (typeinfo)
1051 *typeinfo = atype;
1052
1053 if (rtype)
1054 *rtype = type;
1055
1056 *ccp = str;
1057
1058 return reg->number;
1059 }
1060
1061 /* Parse register.
1062
1063 Return the register number on success; return PARSE_FAIL otherwise.
1064
1065 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1066 the register (e.g. NEON double or quad reg when either has been requested).
1067
1068 If this is a NEON vector register with additional type information, fill
1069 in the struct pointed to by VECTYPE (if non-NULL).
1070
1071 This parser does not handle register list. */
1072
1073 static int
1074 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1075 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1076 {
1077 struct vector_type_el atype;
1078 char *str = *ccp;
1079 int reg = parse_typed_reg (&str, type, rtype, &atype,
1080 /*in_reg_list= */ FALSE);
1081
1082 if (reg == PARSE_FAIL)
1083 return PARSE_FAIL;
1084
1085 if (vectype)
1086 *vectype = atype;
1087
1088 *ccp = str;
1089
1090 return reg;
1091 }
1092
1093 static inline bfd_boolean
1094 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1095 {
1096 return
1097 e1.type == e2.type
1098 && e1.defined == e2.defined
1099 && e1.width == e2.width && e1.index == e2.index;
1100 }
1101
1102 /* This function parses a list of vector registers of type TYPE.
1103 On success, it returns the parsed register list information in the
1104 following encoded format:
1105
1106 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1107 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1108
1109 The information of the register shape and/or index is returned in
1110 *VECTYPE.
1111
1112 It returns PARSE_FAIL if the register list is invalid.
1113
1114 The list contains one to four registers.
1115 Each register can be one of:
1116 <Vt>.<T>[<index>]
1117 <Vt>.<T>
1118 All <T> should be identical.
1119 All <index> should be identical.
1120 There are restrictions on <Vt> numbers which are checked later
1121 (by reg_list_valid_p). */
1122
1123 static int
1124 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1125 struct vector_type_el *vectype)
1126 {
1127 char *str = *ccp;
1128 int nb_regs;
1129 struct vector_type_el typeinfo, typeinfo_first;
1130 int val, val_range;
1131 int in_range;
1132 int ret_val;
1133 int i;
1134 bfd_boolean error = FALSE;
1135 bfd_boolean expect_index = FALSE;
1136
1137 if (*str != '{')
1138 {
1139 set_syntax_error (_("expecting {"));
1140 return PARSE_FAIL;
1141 }
1142 str++;
1143
1144 nb_regs = 0;
1145 typeinfo_first.defined = 0;
1146 typeinfo_first.type = NT_invtype;
1147 typeinfo_first.width = -1;
1148 typeinfo_first.index = 0;
1149 ret_val = 0;
1150 val = -1;
1151 val_range = -1;
1152 in_range = 0;
1153 do
1154 {
1155 if (in_range)
1156 {
1157 str++; /* skip over '-' */
1158 val_range = val;
1159 }
1160 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1161 /*in_reg_list= */ TRUE);
1162 if (val == PARSE_FAIL)
1163 {
1164 set_first_syntax_error (_("invalid vector register in list"));
1165 error = TRUE;
1166 continue;
1167 }
1168 /* reject [bhsd]n */
1169 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1170 {
1171 set_first_syntax_error (_("invalid scalar register in list"));
1172 error = TRUE;
1173 continue;
1174 }
1175
1176 if (typeinfo.defined & NTA_HASINDEX)
1177 expect_index = TRUE;
1178
1179 if (in_range)
1180 {
1181 if (val < val_range)
1182 {
1183 set_first_syntax_error
1184 (_("invalid range in vector register list"));
1185 error = TRUE;
1186 }
1187 val_range++;
1188 }
1189 else
1190 {
1191 val_range = val;
1192 if (nb_regs == 0)
1193 typeinfo_first = typeinfo;
1194 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1195 {
1196 set_first_syntax_error
1197 (_("type mismatch in vector register list"));
1198 error = TRUE;
1199 }
1200 }
1201 if (! error)
1202 for (i = val_range; i <= val; i++)
1203 {
1204 ret_val |= i << (5 * nb_regs);
1205 nb_regs++;
1206 }
1207 in_range = 0;
1208 }
1209 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1210
1211 skip_whitespace (str);
1212 if (*str != '}')
1213 {
1214 set_first_syntax_error (_("end of vector register list not found"));
1215 error = TRUE;
1216 }
1217 str++;
1218
1219 skip_whitespace (str);
1220
1221 if (expect_index)
1222 {
1223 if (skip_past_char (&str, '['))
1224 {
1225 expressionS exp;
1226
1227 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1228 if (exp.X_op != O_constant)
1229 {
1230 set_first_syntax_error (_("constant expression required."));
1231 error = TRUE;
1232 }
1233 if (! skip_past_char (&str, ']'))
1234 error = TRUE;
1235 else
1236 typeinfo_first.index = exp.X_add_number;
1237 }
1238 else
1239 {
1240 set_first_syntax_error (_("expected index"));
1241 error = TRUE;
1242 }
1243 }
1244
1245 if (nb_regs > 4)
1246 {
1247 set_first_syntax_error (_("too many registers in vector register list"));
1248 error = TRUE;
1249 }
1250 else if (nb_regs == 0)
1251 {
1252 set_first_syntax_error (_("empty vector register list"));
1253 error = TRUE;
1254 }
1255
1256 *ccp = str;
1257 if (! error)
1258 *vectype = typeinfo_first;
1259
1260 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1261 }
1262
1263 /* Directives: register aliases. */
1264
1265 static reg_entry *
1266 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1267 {
1268 reg_entry *new;
1269 const char *name;
1270
1271 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1272 {
1273 if (new->builtin)
1274 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1275 str);
1276
1277 /* Only warn about a redefinition if it's not defined as the
1278 same register. */
1279 else if (new->number != number || new->type != type)
1280 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1281
1282 return NULL;
1283 }
1284
1285 name = xstrdup (str);
1286 new = XNEW (reg_entry);
1287
1288 new->name = name;
1289 new->number = number;
1290 new->type = type;
1291 new->builtin = FALSE;
1292
1293 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1294 abort ();
1295
1296 return new;
1297 }
1298
1299 /* Look for the .req directive. This is of the form:
1300
1301 new_register_name .req existing_register_name
1302
1303 If we find one, or if it looks sufficiently like one that we want to
1304 handle any error here, return TRUE. Otherwise return FALSE. */
1305
1306 static bfd_boolean
1307 create_register_alias (char *newname, char *p)
1308 {
1309 const reg_entry *old;
1310 char *oldname, *nbuf;
1311 size_t nlen;
1312
1313 /* The input scrubber ensures that whitespace after the mnemonic is
1314 collapsed to single spaces. */
1315 oldname = p;
1316 if (strncmp (oldname, " .req ", 6) != 0)
1317 return FALSE;
1318
1319 oldname += 6;
1320 if (*oldname == '\0')
1321 return FALSE;
1322
1323 old = hash_find (aarch64_reg_hsh, oldname);
1324 if (!old)
1325 {
1326 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1327 return TRUE;
1328 }
1329
1330 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1331 the desired alias name, and p points to its end. If not, then
1332 the desired alias name is in the global original_case_string. */
1333 #ifdef TC_CASE_SENSITIVE
1334 nlen = p - newname;
1335 #else
1336 newname = original_case_string;
1337 nlen = strlen (newname);
1338 #endif
1339
1340 nbuf = xmemdup0 (newname, nlen);
1341
1342 /* Create aliases under the new name as stated; an all-lowercase
1343 version of the new name; and an all-uppercase version of the new
1344 name. */
1345 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1346 {
1347 for (p = nbuf; *p; p++)
1348 *p = TOUPPER (*p);
1349
1350 if (strncmp (nbuf, newname, nlen))
1351 {
1352 /* If this attempt to create an additional alias fails, do not bother
1353 trying to create the all-lower case alias. We will fail and issue
1354 a second, duplicate error message. This situation arises when the
1355 programmer does something like:
1356 foo .req r0
1357 Foo .req r1
1358 The second .req creates the "Foo" alias but then fails to create
1359 the artificial FOO alias because it has already been created by the
1360 first .req. */
1361 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1362 {
1363 free (nbuf);
1364 return TRUE;
1365 }
1366 }
1367
1368 for (p = nbuf; *p; p++)
1369 *p = TOLOWER (*p);
1370
1371 if (strncmp (nbuf, newname, nlen))
1372 insert_reg_alias (nbuf, old->number, old->type);
1373 }
1374
1375 free (nbuf);
1376 return TRUE;
1377 }
1378
1379 /* Should never be called, as .req goes between the alias and the
1380 register name, not at the beginning of the line. */
1381 static void
1382 s_req (int a ATTRIBUTE_UNUSED)
1383 {
1384 as_bad (_("invalid syntax for .req directive"));
1385 }
1386
1387 /* The .unreq directive deletes an alias which was previously defined
1388 by .req. For example:
1389
1390 my_alias .req r11
1391 .unreq my_alias */
1392
1393 static void
1394 s_unreq (int a ATTRIBUTE_UNUSED)
1395 {
1396 char *name;
1397 char saved_char;
1398
1399 name = input_line_pointer;
1400
1401 while (*input_line_pointer != 0
1402 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1403 ++input_line_pointer;
1404
1405 saved_char = *input_line_pointer;
1406 *input_line_pointer = 0;
1407
1408 if (!*name)
1409 as_bad (_("invalid syntax for .unreq directive"));
1410 else
1411 {
1412 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1413
1414 if (!reg)
1415 as_bad (_("unknown register alias '%s'"), name);
1416 else if (reg->builtin)
1417 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1418 name);
1419 else
1420 {
1421 char *p;
1422 char *nbuf;
1423
1424 hash_delete (aarch64_reg_hsh, name, FALSE);
1425 free ((char *) reg->name);
1426 free (reg);
1427
1428 /* Also locate the all upper case and all lower case versions.
1429 Do not complain if we cannot find one or the other as it
1430 was probably deleted above. */
1431
1432 nbuf = strdup (name);
1433 for (p = nbuf; *p; p++)
1434 *p = TOUPPER (*p);
1435 reg = hash_find (aarch64_reg_hsh, nbuf);
1436 if (reg)
1437 {
1438 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1439 free ((char *) reg->name);
1440 free (reg);
1441 }
1442
1443 for (p = nbuf; *p; p++)
1444 *p = TOLOWER (*p);
1445 reg = hash_find (aarch64_reg_hsh, nbuf);
1446 if (reg)
1447 {
1448 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1449 free ((char *) reg->name);
1450 free (reg);
1451 }
1452
1453 free (nbuf);
1454 }
1455 }
1456
1457 *input_line_pointer = saved_char;
1458 demand_empty_rest_of_line ();
1459 }
1460
1461 /* Directives: Instruction set selection. */
1462
1463 #ifdef OBJ_ELF
1464 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1465 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1466 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1467 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1468
1469 /* Create a new mapping symbol for the transition to STATE. */
1470
1471 static void
1472 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1473 {
1474 symbolS *symbolP;
1475 const char *symname;
1476 int type;
1477
1478 switch (state)
1479 {
1480 case MAP_DATA:
1481 symname = "$d";
1482 type = BSF_NO_FLAGS;
1483 break;
1484 case MAP_INSN:
1485 symname = "$x";
1486 type = BSF_NO_FLAGS;
1487 break;
1488 default:
1489 abort ();
1490 }
1491
1492 symbolP = symbol_new (symname, now_seg, value, frag);
1493 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1494
1495 /* Save the mapping symbols for future reference. Also check that
1496 we do not place two mapping symbols at the same offset within a
1497 frag. We'll handle overlap between frags in
1498 check_mapping_symbols.
1499
1500 If .fill or other data filling directive generates zero sized data,
1501 the mapping symbol for the following code will have the same value
1502 as the one generated for the data filling directive. In this case,
1503 we replace the old symbol with the new one at the same address. */
1504 if (value == 0)
1505 {
1506 if (frag->tc_frag_data.first_map != NULL)
1507 {
1508 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1509 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1510 &symbol_lastP);
1511 }
1512 frag->tc_frag_data.first_map = symbolP;
1513 }
1514 if (frag->tc_frag_data.last_map != NULL)
1515 {
1516 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1517 S_GET_VALUE (symbolP));
1518 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1519 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1520 &symbol_lastP);
1521 }
1522 frag->tc_frag_data.last_map = symbolP;
1523 }
1524
1525 /* We must sometimes convert a region marked as code to data during
1526 code alignment, if an odd number of bytes have to be padded. The
1527 code mapping symbol is pushed to an aligned address. */
1528
1529 static void
1530 insert_data_mapping_symbol (enum mstate state,
1531 valueT value, fragS * frag, offsetT bytes)
1532 {
1533 /* If there was already a mapping symbol, remove it. */
1534 if (frag->tc_frag_data.last_map != NULL
1535 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1536 frag->fr_address + value)
1537 {
1538 symbolS *symp = frag->tc_frag_data.last_map;
1539
1540 if (value == 0)
1541 {
1542 know (frag->tc_frag_data.first_map == symp);
1543 frag->tc_frag_data.first_map = NULL;
1544 }
1545 frag->tc_frag_data.last_map = NULL;
1546 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1547 }
1548
1549 make_mapping_symbol (MAP_DATA, value, frag);
1550 make_mapping_symbol (state, value + bytes, frag);
1551 }
1552
1553 static void mapping_state_2 (enum mstate state, int max_chars);
1554
1555 /* Set the mapping state to STATE. Only call this when about to
1556 emit some STATE bytes to the file. */
1557
1558 void
1559 mapping_state (enum mstate state)
1560 {
1561 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1562
1563 if (state == MAP_INSN)
1564 /* AArch64 instructions require 4-byte alignment. When emitting
1565 instructions into any section, record the appropriate section
1566 alignment. */
1567 record_alignment (now_seg, 2);
1568
1569 if (mapstate == state)
1570 /* The mapping symbol has already been emitted.
1571 There is nothing else to do. */
1572 return;
1573
1574 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1575 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1576 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1577 evaluated later in the next else. */
1578 return;
1579 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1580 {
1581 /* Only add the symbol if the offset is > 0:
1582 if we're at the first frag, check it's size > 0;
1583 if we're not at the first frag, then for sure
1584 the offset is > 0. */
1585 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1586 const int add_symbol = (frag_now != frag_first)
1587 || (frag_now_fix () > 0);
1588
1589 if (add_symbol)
1590 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1591 }
1592 #undef TRANSITION
1593
1594 mapping_state_2 (state, 0);
1595 }
1596
1597 /* Same as mapping_state, but MAX_CHARS bytes have already been
1598 allocated. Put the mapping symbol that far back. */
1599
1600 static void
1601 mapping_state_2 (enum mstate state, int max_chars)
1602 {
1603 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1604
1605 if (!SEG_NORMAL (now_seg))
1606 return;
1607
1608 if (mapstate == state)
1609 /* The mapping symbol has already been emitted.
1610 There is nothing else to do. */
1611 return;
1612
1613 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1614 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1615 }
1616 #else
1617 #define mapping_state(x) /* nothing */
1618 #define mapping_state_2(x, y) /* nothing */
1619 #endif
1620
1621 /* Directives: sectioning and alignment. */
1622
1623 static void
1624 s_bss (int ignore ATTRIBUTE_UNUSED)
1625 {
1626 /* We don't support putting frags in the BSS segment, we fake it by
1627 marking in_bss, then looking at s_skip for clues. */
1628 subseg_set (bss_section, 0);
1629 demand_empty_rest_of_line ();
1630 mapping_state (MAP_DATA);
1631 }
1632
1633 static void
1634 s_even (int ignore ATTRIBUTE_UNUSED)
1635 {
1636 /* Never make frag if expect extra pass. */
1637 if (!need_pass_2)
1638 frag_align (1, 0, 0);
1639
1640 record_alignment (now_seg, 1);
1641
1642 demand_empty_rest_of_line ();
1643 }
1644
1645 /* Directives: Literal pools. */
1646
1647 static literal_pool *
1648 find_literal_pool (int size)
1649 {
1650 literal_pool *pool;
1651
1652 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1653 {
1654 if (pool->section == now_seg
1655 && pool->sub_section == now_subseg && pool->size == size)
1656 break;
1657 }
1658
1659 return pool;
1660 }
1661
1662 static literal_pool *
1663 find_or_make_literal_pool (int size)
1664 {
1665 /* Next literal pool ID number. */
1666 static unsigned int latest_pool_num = 1;
1667 literal_pool *pool;
1668
1669 pool = find_literal_pool (size);
1670
1671 if (pool == NULL)
1672 {
1673 /* Create a new pool. */
1674 pool = XNEW (literal_pool);
1675 if (!pool)
1676 return NULL;
1677
1678 /* Currently we always put the literal pool in the current text
1679 section. If we were generating "small" model code where we
1680 knew that all code and initialised data was within 1MB then
1681 we could output literals to mergeable, read-only data
1682 sections. */
1683
1684 pool->next_free_entry = 0;
1685 pool->section = now_seg;
1686 pool->sub_section = now_subseg;
1687 pool->size = size;
1688 pool->next = list_of_pools;
1689 pool->symbol = NULL;
1690
1691 /* Add it to the list. */
1692 list_of_pools = pool;
1693 }
1694
1695 /* New pools, and emptied pools, will have a NULL symbol. */
1696 if (pool->symbol == NULL)
1697 {
1698 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1699 (valueT) 0, &zero_address_frag);
1700 pool->id = latest_pool_num++;
1701 }
1702
1703 /* Done. */
1704 return pool;
1705 }
1706
1707 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1708 Return TRUE on success, otherwise return FALSE. */
1709 static bfd_boolean
1710 add_to_lit_pool (expressionS *exp, int size)
1711 {
1712 literal_pool *pool;
1713 unsigned int entry;
1714
1715 pool = find_or_make_literal_pool (size);
1716
1717 /* Check if this literal value is already in the pool. */
1718 for (entry = 0; entry < pool->next_free_entry; entry++)
1719 {
1720 expressionS * litexp = & pool->literals[entry].exp;
1721
1722 if ((litexp->X_op == exp->X_op)
1723 && (exp->X_op == O_constant)
1724 && (litexp->X_add_number == exp->X_add_number)
1725 && (litexp->X_unsigned == exp->X_unsigned))
1726 break;
1727
1728 if ((litexp->X_op == exp->X_op)
1729 && (exp->X_op == O_symbol)
1730 && (litexp->X_add_number == exp->X_add_number)
1731 && (litexp->X_add_symbol == exp->X_add_symbol)
1732 && (litexp->X_op_symbol == exp->X_op_symbol))
1733 break;
1734 }
1735
1736 /* Do we need to create a new entry? */
1737 if (entry == pool->next_free_entry)
1738 {
1739 if (entry >= MAX_LITERAL_POOL_SIZE)
1740 {
1741 set_syntax_error (_("literal pool overflow"));
1742 return FALSE;
1743 }
1744
1745 pool->literals[entry].exp = *exp;
1746 pool->next_free_entry += 1;
1747 if (exp->X_op == O_big)
1748 {
1749 /* PR 16688: Bignums are held in a single global array. We must
1750 copy and preserve that value now, before it is overwritten. */
1751 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1752 exp->X_add_number);
1753 memcpy (pool->literals[entry].bignum, generic_bignum,
1754 CHARS_PER_LITTLENUM * exp->X_add_number);
1755 }
1756 else
1757 pool->literals[entry].bignum = NULL;
1758 }
1759
1760 exp->X_op = O_symbol;
1761 exp->X_add_number = ((int) entry) * size;
1762 exp->X_add_symbol = pool->symbol;
1763
1764 return TRUE;
1765 }
1766
1767 /* Can't use symbol_new here, so have to create a symbol and then at
1768 a later date assign it a value. That's what these functions do. */
1769
1770 static void
1771 symbol_locate (symbolS * symbolP,
1772 const char *name,/* It is copied, the caller can modify. */
1773 segT segment, /* Segment identifier (SEG_<something>). */
1774 valueT valu, /* Symbol value. */
1775 fragS * frag) /* Associated fragment. */
1776 {
1777 size_t name_length;
1778 char *preserved_copy_of_name;
1779
1780 name_length = strlen (name) + 1; /* +1 for \0. */
1781 obstack_grow (&notes, name, name_length);
1782 preserved_copy_of_name = obstack_finish (&notes);
1783
1784 #ifdef tc_canonicalize_symbol_name
1785 preserved_copy_of_name =
1786 tc_canonicalize_symbol_name (preserved_copy_of_name);
1787 #endif
1788
1789 S_SET_NAME (symbolP, preserved_copy_of_name);
1790
1791 S_SET_SEGMENT (symbolP, segment);
1792 S_SET_VALUE (symbolP, valu);
1793 symbol_clear_list_pointers (symbolP);
1794
1795 symbol_set_frag (symbolP, frag);
1796
1797 /* Link to end of symbol chain. */
1798 {
1799 extern int symbol_table_frozen;
1800
1801 if (symbol_table_frozen)
1802 abort ();
1803 }
1804
1805 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1806
1807 obj_symbol_new_hook (symbolP);
1808
1809 #ifdef tc_symbol_new_hook
1810 tc_symbol_new_hook (symbolP);
1811 #endif
1812
1813 #ifdef DEBUG_SYMS
1814 verify_symbol_chain (symbol_rootP, symbol_lastP);
1815 #endif /* DEBUG_SYMS */
1816 }
1817
1818
1819 static void
1820 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1821 {
1822 unsigned int entry;
1823 literal_pool *pool;
1824 char sym_name[20];
1825 int align;
1826
1827 for (align = 2; align <= 4; align++)
1828 {
1829 int size = 1 << align;
1830
1831 pool = find_literal_pool (size);
1832 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1833 continue;
1834
1835 /* Align pool as you have word accesses.
1836 Only make a frag if we have to. */
1837 if (!need_pass_2)
1838 frag_align (align, 0, 0);
1839
1840 mapping_state (MAP_DATA);
1841
1842 record_alignment (now_seg, align);
1843
1844 sprintf (sym_name, "$$lit_\002%x", pool->id);
1845
1846 symbol_locate (pool->symbol, sym_name, now_seg,
1847 (valueT) frag_now_fix (), frag_now);
1848 symbol_table_insert (pool->symbol);
1849
1850 for (entry = 0; entry < pool->next_free_entry; entry++)
1851 {
1852 expressionS * exp = & pool->literals[entry].exp;
1853
1854 if (exp->X_op == O_big)
1855 {
1856 /* PR 16688: Restore the global bignum value. */
1857 gas_assert (pool->literals[entry].bignum != NULL);
1858 memcpy (generic_bignum, pool->literals[entry].bignum,
1859 CHARS_PER_LITTLENUM * exp->X_add_number);
1860 }
1861
1862 /* First output the expression in the instruction to the pool. */
1863 emit_expr (exp, size); /* .word|.xword */
1864
1865 if (exp->X_op == O_big)
1866 {
1867 free (pool->literals[entry].bignum);
1868 pool->literals[entry].bignum = NULL;
1869 }
1870 }
1871
1872 /* Mark the pool as empty. */
1873 pool->next_free_entry = 0;
1874 pool->symbol = NULL;
1875 }
1876 }
1877
1878 #ifdef OBJ_ELF
1879 /* Forward declarations for functions below, in the MD interface
1880 section. */
1881 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1882 static struct reloc_table_entry * find_reloc_table_entry (char **);
1883
1884 /* Directives: Data. */
1885 /* N.B. the support for relocation suffix in this directive needs to be
1886 implemented properly. */
1887
1888 static void
1889 s_aarch64_elf_cons (int nbytes)
1890 {
1891 expressionS exp;
1892
1893 #ifdef md_flush_pending_output
1894 md_flush_pending_output ();
1895 #endif
1896
1897 if (is_it_end_of_statement ())
1898 {
1899 demand_empty_rest_of_line ();
1900 return;
1901 }
1902
1903 #ifdef md_cons_align
1904 md_cons_align (nbytes);
1905 #endif
1906
1907 mapping_state (MAP_DATA);
1908 do
1909 {
1910 struct reloc_table_entry *reloc;
1911
1912 expression (&exp);
1913
1914 if (exp.X_op != O_symbol)
1915 emit_expr (&exp, (unsigned int) nbytes);
1916 else
1917 {
1918 skip_past_char (&input_line_pointer, '#');
1919 if (skip_past_char (&input_line_pointer, ':'))
1920 {
1921 reloc = find_reloc_table_entry (&input_line_pointer);
1922 if (reloc == NULL)
1923 as_bad (_("unrecognized relocation suffix"));
1924 else
1925 as_bad (_("unimplemented relocation suffix"));
1926 ignore_rest_of_line ();
1927 return;
1928 }
1929 else
1930 emit_expr (&exp, (unsigned int) nbytes);
1931 }
1932 }
1933 while (*input_line_pointer++ == ',');
1934
1935 /* Put terminator back into stream. */
1936 input_line_pointer--;
1937 demand_empty_rest_of_line ();
1938 }
1939
1940 #endif /* OBJ_ELF */
1941
1942 /* Output a 32-bit word, but mark as an instruction. */
1943
1944 static void
1945 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1946 {
1947 expressionS exp;
1948
1949 #ifdef md_flush_pending_output
1950 md_flush_pending_output ();
1951 #endif
1952
1953 if (is_it_end_of_statement ())
1954 {
1955 demand_empty_rest_of_line ();
1956 return;
1957 }
1958
1959 /* Sections are assumed to start aligned. In executable section, there is no
1960 MAP_DATA symbol pending. So we only align the address during
1961 MAP_DATA --> MAP_INSN transition.
1962 For other sections, this is not guaranteed. */
1963 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1964 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1965 frag_align_code (2, 0);
1966
1967 #ifdef OBJ_ELF
1968 mapping_state (MAP_INSN);
1969 #endif
1970
1971 do
1972 {
1973 expression (&exp);
1974 if (exp.X_op != O_constant)
1975 {
1976 as_bad (_("constant expression required"));
1977 ignore_rest_of_line ();
1978 return;
1979 }
1980
1981 if (target_big_endian)
1982 {
1983 unsigned int val = exp.X_add_number;
1984 exp.X_add_number = SWAP_32 (val);
1985 }
1986 emit_expr (&exp, 4);
1987 }
1988 while (*input_line_pointer++ == ',');
1989
1990 /* Put terminator back into stream. */
1991 input_line_pointer--;
1992 demand_empty_rest_of_line ();
1993 }
1994
1995 #ifdef OBJ_ELF
1996 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1997
1998 static void
1999 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2000 {
2001 expressionS exp;
2002
2003 expression (&exp);
2004 frag_grow (4);
2005 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2006 BFD_RELOC_AARCH64_TLSDESC_ADD);
2007
2008 demand_empty_rest_of_line ();
2009 }
2010
2011 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2012
2013 static void
2014 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2015 {
2016 expressionS exp;
2017
2018 /* Since we're just labelling the code, there's no need to define a
2019 mapping symbol. */
2020 expression (&exp);
2021 /* Make sure there is enough room in this frag for the following
2022 blr. This trick only works if the blr follows immediately after
2023 the .tlsdesc directive. */
2024 frag_grow (4);
2025 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2026 BFD_RELOC_AARCH64_TLSDESC_CALL);
2027
2028 demand_empty_rest_of_line ();
2029 }
2030
2031 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2032
2033 static void
2034 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2035 {
2036 expressionS exp;
2037
2038 expression (&exp);
2039 frag_grow (4);
2040 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2041 BFD_RELOC_AARCH64_TLSDESC_LDR);
2042
2043 demand_empty_rest_of_line ();
2044 }
2045 #endif /* OBJ_ELF */
2046
2047 static void s_aarch64_arch (int);
2048 static void s_aarch64_cpu (int);
2049 static void s_aarch64_arch_extension (int);
2050
2051 /* This table describes all the machine specific pseudo-ops the assembler
2052 has to support. The fields are:
2053 pseudo-op name without dot
2054 function to call to execute this pseudo-op
2055 Integer arg to pass to the function. */
2056
2057 const pseudo_typeS md_pseudo_table[] = {
2058 /* Never called because '.req' does not start a line. */
2059 {"req", s_req, 0},
2060 {"unreq", s_unreq, 0},
2061 {"bss", s_bss, 0},
2062 {"even", s_even, 0},
2063 {"ltorg", s_ltorg, 0},
2064 {"pool", s_ltorg, 0},
2065 {"cpu", s_aarch64_cpu, 0},
2066 {"arch", s_aarch64_arch, 0},
2067 {"arch_extension", s_aarch64_arch_extension, 0},
2068 {"inst", s_aarch64_inst, 0},
2069 #ifdef OBJ_ELF
2070 {"tlsdescadd", s_tlsdescadd, 0},
2071 {"tlsdesccall", s_tlsdesccall, 0},
2072 {"tlsdescldr", s_tlsdescldr, 0},
2073 {"word", s_aarch64_elf_cons, 4},
2074 {"long", s_aarch64_elf_cons, 4},
2075 {"xword", s_aarch64_elf_cons, 8},
2076 {"dword", s_aarch64_elf_cons, 8},
2077 #endif
2078 {0, 0, 0}
2079 };
2080 \f
2081
2082 /* Check whether STR points to a register name followed by a comma or the
2083 end of line; REG_TYPE indicates which register types are checked
2084 against. Return TRUE if STR is such a register name; otherwise return
2085 FALSE. The function does not intend to produce any diagnostics, but since
2086 the register parser aarch64_reg_parse, which is called by this function,
2087 does produce diagnostics, we call clear_error to clear any diagnostics
2088 that may be generated by aarch64_reg_parse.
2089 Also, the function returns FALSE directly if there is any user error
2090 present at the function entry. This prevents the existing diagnostics
2091 state from being spoiled.
2092 The function currently serves parse_constant_immediate and
2093 parse_big_immediate only. */
2094 static bfd_boolean
2095 reg_name_p (char *str, aarch64_reg_type reg_type)
2096 {
2097 int reg;
2098
2099 /* Prevent the diagnostics state from being spoiled. */
2100 if (error_p ())
2101 return FALSE;
2102
2103 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2104
2105 /* Clear the parsing error that may be set by the reg parser. */
2106 clear_error ();
2107
2108 if (reg == PARSE_FAIL)
2109 return FALSE;
2110
2111 skip_whitespace (str);
2112 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2113 return TRUE;
2114
2115 return FALSE;
2116 }
2117
2118 /* Parser functions used exclusively in instruction operands. */
2119
2120 /* Parse an immediate expression which may not be constant.
2121
2122 To prevent the expression parser from pushing a register name
2123 into the symbol table as an undefined symbol, firstly a check is
2124 done to find out whether STR is a register of type REG_TYPE followed
2125 by a comma or the end of line. Return FALSE if STR is such a string. */
2126
2127 static bfd_boolean
2128 parse_immediate_expression (char **str, expressionS *exp,
2129 aarch64_reg_type reg_type)
2130 {
2131 if (reg_name_p (*str, reg_type))
2132 {
2133 set_recoverable_error (_("immediate operand required"));
2134 return FALSE;
2135 }
2136
2137 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2138
2139 if (exp->X_op == O_absent)
2140 {
2141 set_fatal_syntax_error (_("missing immediate expression"));
2142 return FALSE;
2143 }
2144
2145 return TRUE;
2146 }
2147
2148 /* Constant immediate-value read function for use in insn parsing.
2149 STR points to the beginning of the immediate (with the optional
2150 leading #); *VAL receives the value. REG_TYPE says which register
2151 names should be treated as registers rather than as symbolic immediates.
2152
2153 Return TRUE on success; otherwise return FALSE. */
2154
2155 static bfd_boolean
2156 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2157 {
2158 expressionS exp;
2159
2160 if (! parse_immediate_expression (str, &exp, reg_type))
2161 return FALSE;
2162
2163 if (exp.X_op != O_constant)
2164 {
2165 set_syntax_error (_("constant expression required"));
2166 return FALSE;
2167 }
2168
2169 *val = exp.X_add_number;
2170 return TRUE;
2171 }
2172
2173 static uint32_t
2174 encode_imm_float_bits (uint32_t imm)
2175 {
2176 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2177 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2178 }
2179
2180 /* Return TRUE if the single-precision floating-point value encoded in IMM
2181 can be expressed in the AArch64 8-bit signed floating-point format with
2182 3-bit exponent and normalized 4 bits of precision; in other words, the
2183 floating-point value must be expressable as
2184 (+/-) n / 16 * power (2, r)
2185 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2186
2187 static bfd_boolean
2188 aarch64_imm_float_p (uint32_t imm)
2189 {
2190 /* If a single-precision floating-point value has the following bit
2191 pattern, it can be expressed in the AArch64 8-bit floating-point
2192 format:
2193
2194 3 32222222 2221111111111
2195 1 09876543 21098765432109876543210
2196 n Eeeeeexx xxxx0000000000000000000
2197
2198 where n, e and each x are either 0 or 1 independently, with
2199 E == ~ e. */
2200
2201 uint32_t pattern;
2202
2203 /* Prepare the pattern for 'Eeeeee'. */
2204 if (((imm >> 30) & 0x1) == 0)
2205 pattern = 0x3e000000;
2206 else
2207 pattern = 0x40000000;
2208
2209 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2210 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2211 }
2212
2213 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2214 as an IEEE float without any loss of precision. Store the value in
2215 *FPWORD if so. */
2216
2217 static bfd_boolean
2218 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2219 {
2220 /* If a double-precision floating-point value has the following bit
2221 pattern, it can be expressed in a float:
2222
2223 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2224 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2225 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2226
2227 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2228 if Eeee_eeee != 1111_1111
2229
2230 where n, e, s and S are either 0 or 1 independently and where ~ is the
2231 inverse of E. */
2232
2233 uint32_t pattern;
2234 uint32_t high32 = imm >> 32;
2235 uint32_t low32 = imm;
2236
2237 /* Lower 29 bits need to be 0s. */
2238 if ((imm & 0x1fffffff) != 0)
2239 return FALSE;
2240
2241 /* Prepare the pattern for 'Eeeeeeeee'. */
2242 if (((high32 >> 30) & 0x1) == 0)
2243 pattern = 0x38000000;
2244 else
2245 pattern = 0x40000000;
2246
2247 /* Check E~~~. */
2248 if ((high32 & 0x78000000) != pattern)
2249 return FALSE;
2250
2251 /* Check Eeee_eeee != 1111_1111. */
2252 if ((high32 & 0x7ff00000) == 0x47f00000)
2253 return FALSE;
2254
2255 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2256 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2257 | (low32 >> 29)); /* 3 S bits. */
2258 return TRUE;
2259 }
2260
2261 /* Return true if we should treat OPERAND as a double-precision
2262 floating-point operand rather than a single-precision one. */
2263 static bfd_boolean
2264 double_precision_operand_p (const aarch64_opnd_info *operand)
2265 {
2266 /* Check for unsuffixed SVE registers, which are allowed
2267 for LDR and STR but not in instructions that require an
2268 immediate. We get better error messages if we arbitrarily
2269 pick one size, parse the immediate normally, and then
2270 report the match failure in the normal way. */
2271 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2272 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2273 }
2274
2275 /* Parse a floating-point immediate. Return TRUE on success and return the
2276 value in *IMMED in the format of IEEE754 single-precision encoding.
2277 *CCP points to the start of the string; DP_P is TRUE when the immediate
2278 is expected to be in double-precision (N.B. this only matters when
2279 hexadecimal representation is involved). REG_TYPE says which register
2280 names should be treated as registers rather than as symbolic immediates.
2281
2282 This routine accepts any IEEE float; it is up to the callers to reject
2283 invalid ones. */
2284
2285 static bfd_boolean
2286 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2287 aarch64_reg_type reg_type)
2288 {
2289 char *str = *ccp;
2290 char *fpnum;
2291 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2292 int64_t val = 0;
2293 unsigned fpword = 0;
2294 bfd_boolean hex_p = FALSE;
2295
2296 skip_past_char (&str, '#');
2297
2298 fpnum = str;
2299 skip_whitespace (fpnum);
2300
2301 if (strncmp (fpnum, "0x", 2) == 0)
2302 {
2303 /* Support the hexadecimal representation of the IEEE754 encoding.
2304 Double-precision is expected when DP_P is TRUE, otherwise the
2305 representation should be in single-precision. */
2306 if (! parse_constant_immediate (&str, &val, reg_type))
2307 goto invalid_fp;
2308
2309 if (dp_p)
2310 {
2311 if (!can_convert_double_to_float (val, &fpword))
2312 goto invalid_fp;
2313 }
2314 else if ((uint64_t) val > 0xffffffff)
2315 goto invalid_fp;
2316 else
2317 fpword = val;
2318
2319 hex_p = TRUE;
2320 }
2321 else if (reg_name_p (str, reg_type))
2322 {
2323 set_recoverable_error (_("immediate operand required"));
2324 return FALSE;
2325 }
2326
2327 if (! hex_p)
2328 {
2329 int i;
2330
2331 if ((str = atof_ieee (str, 's', words)) == NULL)
2332 goto invalid_fp;
2333
2334 /* Our FP word must be 32 bits (single-precision FP). */
2335 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2336 {
2337 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2338 fpword |= words[i];
2339 }
2340 }
2341
2342 *immed = fpword;
2343 *ccp = str;
2344 return TRUE;
2345
2346 invalid_fp:
2347 set_fatal_syntax_error (_("invalid floating-point constant"));
2348 return FALSE;
2349 }
2350
2351 /* Less-generic immediate-value read function with the possibility of loading
2352 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2353 instructions.
2354
2355 To prevent the expression parser from pushing a register name into the
2356 symbol table as an undefined symbol, a check is firstly done to find
2357 out whether STR is a register of type REG_TYPE followed by a comma or
2358 the end of line. Return FALSE if STR is such a register. */
2359
2360 static bfd_boolean
2361 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2362 {
2363 char *ptr = *str;
2364
2365 if (reg_name_p (ptr, reg_type))
2366 {
2367 set_syntax_error (_("immediate operand required"));
2368 return FALSE;
2369 }
2370
2371 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2372
2373 if (inst.reloc.exp.X_op == O_constant)
2374 *imm = inst.reloc.exp.X_add_number;
2375
2376 *str = ptr;
2377
2378 return TRUE;
2379 }
2380
2381 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2382 if NEED_LIBOPCODES is non-zero, the fixup will need
2383 assistance from the libopcodes. */
2384
2385 static inline void
2386 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2387 const aarch64_opnd_info *operand,
2388 int need_libopcodes_p)
2389 {
2390 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2391 reloc->opnd = operand->type;
2392 if (need_libopcodes_p)
2393 reloc->need_libopcodes_p = 1;
2394 };
2395
2396 /* Return TRUE if the instruction needs to be fixed up later internally by
2397 the GAS; otherwise return FALSE. */
2398
2399 static inline bfd_boolean
2400 aarch64_gas_internal_fixup_p (void)
2401 {
2402 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2403 }
2404
2405 /* Assign the immediate value to the relevant field in *OPERAND if
2406 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2407 needs an internal fixup in a later stage.
2408 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2409 IMM.VALUE that may get assigned with the constant. */
2410 static inline void
2411 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2412 aarch64_opnd_info *operand,
2413 int addr_off_p,
2414 int need_libopcodes_p,
2415 int skip_p)
2416 {
2417 if (reloc->exp.X_op == O_constant)
2418 {
2419 if (addr_off_p)
2420 operand->addr.offset.imm = reloc->exp.X_add_number;
2421 else
2422 operand->imm.value = reloc->exp.X_add_number;
2423 reloc->type = BFD_RELOC_UNUSED;
2424 }
2425 else
2426 {
2427 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2428 /* Tell libopcodes to ignore this operand or not. This is helpful
2429 when one of the operands needs to be fixed up later but we need
2430 libopcodes to check the other operands. */
2431 operand->skip = skip_p;
2432 }
2433 }
2434
2435 /* Relocation modifiers. Each entry in the table contains the textual
2436 name for the relocation which may be placed before a symbol used as
2437 a load/store offset, or add immediate. It must be surrounded by a
2438 leading and trailing colon, for example:
2439
2440 ldr x0, [x1, #:rello:varsym]
2441 add x0, x1, #:rello:varsym */
2442
2443 struct reloc_table_entry
2444 {
2445 const char *name;
2446 int pc_rel;
2447 bfd_reloc_code_real_type adr_type;
2448 bfd_reloc_code_real_type adrp_type;
2449 bfd_reloc_code_real_type movw_type;
2450 bfd_reloc_code_real_type add_type;
2451 bfd_reloc_code_real_type ldst_type;
2452 bfd_reloc_code_real_type ld_literal_type;
2453 };
2454
2455 static struct reloc_table_entry reloc_table[] = {
2456 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2457 {"lo12", 0,
2458 0, /* adr_type */
2459 0,
2460 0,
2461 BFD_RELOC_AARCH64_ADD_LO12,
2462 BFD_RELOC_AARCH64_LDST_LO12,
2463 0},
2464
2465 /* Higher 21 bits of pc-relative page offset: ADRP */
2466 {"pg_hi21", 1,
2467 0, /* adr_type */
2468 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2469 0,
2470 0,
2471 0,
2472 0},
2473
2474 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2475 {"pg_hi21_nc", 1,
2476 0, /* adr_type */
2477 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2478 0,
2479 0,
2480 0,
2481 0},
2482
2483 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2484 {"abs_g0", 0,
2485 0, /* adr_type */
2486 0,
2487 BFD_RELOC_AARCH64_MOVW_G0,
2488 0,
2489 0,
2490 0},
2491
2492 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2493 {"abs_g0_s", 0,
2494 0, /* adr_type */
2495 0,
2496 BFD_RELOC_AARCH64_MOVW_G0_S,
2497 0,
2498 0,
2499 0},
2500
2501 /* Less significant bits 0-15 of address/value: MOVK, no check */
2502 {"abs_g0_nc", 0,
2503 0, /* adr_type */
2504 0,
2505 BFD_RELOC_AARCH64_MOVW_G0_NC,
2506 0,
2507 0,
2508 0},
2509
2510 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2511 {"abs_g1", 0,
2512 0, /* adr_type */
2513 0,
2514 BFD_RELOC_AARCH64_MOVW_G1,
2515 0,
2516 0,
2517 0},
2518
2519 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2520 {"abs_g1_s", 0,
2521 0, /* adr_type */
2522 0,
2523 BFD_RELOC_AARCH64_MOVW_G1_S,
2524 0,
2525 0,
2526 0},
2527
2528 /* Less significant bits 16-31 of address/value: MOVK, no check */
2529 {"abs_g1_nc", 0,
2530 0, /* adr_type */
2531 0,
2532 BFD_RELOC_AARCH64_MOVW_G1_NC,
2533 0,
2534 0,
2535 0},
2536
2537 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2538 {"abs_g2", 0,
2539 0, /* adr_type */
2540 0,
2541 BFD_RELOC_AARCH64_MOVW_G2,
2542 0,
2543 0,
2544 0},
2545
2546 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2547 {"abs_g2_s", 0,
2548 0, /* adr_type */
2549 0,
2550 BFD_RELOC_AARCH64_MOVW_G2_S,
2551 0,
2552 0,
2553 0},
2554
2555 /* Less significant bits 32-47 of address/value: MOVK, no check */
2556 {"abs_g2_nc", 0,
2557 0, /* adr_type */
2558 0,
2559 BFD_RELOC_AARCH64_MOVW_G2_NC,
2560 0,
2561 0,
2562 0},
2563
2564 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2565 {"abs_g3", 0,
2566 0, /* adr_type */
2567 0,
2568 BFD_RELOC_AARCH64_MOVW_G3,
2569 0,
2570 0,
2571 0},
2572
2573 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2574 {"prel_g0", 1,
2575 0, /* adr_type */
2576 0,
2577 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2578 0,
2579 0,
2580 0},
2581
2582 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2583 {"prel_g0_nc", 1,
2584 0, /* adr_type */
2585 0,
2586 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2587 0,
2588 0,
2589 0},
2590
2591 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2592 {"prel_g1", 1,
2593 0, /* adr_type */
2594 0,
2595 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2596 0,
2597 0,
2598 0},
2599
2600 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2601 {"prel_g1_nc", 1,
2602 0, /* adr_type */
2603 0,
2604 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2605 0,
2606 0,
2607 0},
2608
2609 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2610 {"prel_g2", 1,
2611 0, /* adr_type */
2612 0,
2613 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2614 0,
2615 0,
2616 0},
2617
2618 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2619 {"prel_g2_nc", 1,
2620 0, /* adr_type */
2621 0,
2622 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2623 0,
2624 0,
2625 0},
2626
2627 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2628 {"prel_g3", 1,
2629 0, /* adr_type */
2630 0,
2631 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2632 0,
2633 0,
2634 0},
2635
2636 /* Get to the page containing GOT entry for a symbol. */
2637 {"got", 1,
2638 0, /* adr_type */
2639 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2640 0,
2641 0,
2642 0,
2643 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2644
2645 /* 12 bit offset into the page containing GOT entry for that symbol. */
2646 {"got_lo12", 0,
2647 0, /* adr_type */
2648 0,
2649 0,
2650 0,
2651 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2652 0},
2653
2654 /* 0-15 bits of address/value: MOVk, no check. */
2655 {"gotoff_g0_nc", 0,
2656 0, /* adr_type */
2657 0,
2658 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2659 0,
2660 0,
2661 0},
2662
2663 /* Most significant bits 16-31 of address/value: MOVZ. */
2664 {"gotoff_g1", 0,
2665 0, /* adr_type */
2666 0,
2667 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2668 0,
2669 0,
2670 0},
2671
2672 /* 15 bit offset into the page containing GOT entry for that symbol. */
2673 {"gotoff_lo15", 0,
2674 0, /* adr_type */
2675 0,
2676 0,
2677 0,
2678 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2679 0},
2680
2681 /* Get to the page containing GOT TLS entry for a symbol */
2682 {"gottprel_g0_nc", 0,
2683 0, /* adr_type */
2684 0,
2685 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2686 0,
2687 0,
2688 0},
2689
2690 /* Get to the page containing GOT TLS entry for a symbol */
2691 {"gottprel_g1", 0,
2692 0, /* adr_type */
2693 0,
2694 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2695 0,
2696 0,
2697 0},
2698
2699 /* Get to the page containing GOT TLS entry for a symbol */
2700 {"tlsgd", 0,
2701 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2702 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2703 0,
2704 0,
2705 0,
2706 0},
2707
2708 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2709 {"tlsgd_lo12", 0,
2710 0, /* adr_type */
2711 0,
2712 0,
2713 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2714 0,
2715 0},
2716
2717 /* Lower 16 bits address/value: MOVk. */
2718 {"tlsgd_g0_nc", 0,
2719 0, /* adr_type */
2720 0,
2721 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2722 0,
2723 0,
2724 0},
2725
2726 /* Most significant bits 16-31 of address/value: MOVZ. */
2727 {"tlsgd_g1", 0,
2728 0, /* adr_type */
2729 0,
2730 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2731 0,
2732 0,
2733 0},
2734
2735 /* Get to the page containing GOT TLS entry for a symbol */
2736 {"tlsdesc", 0,
2737 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2738 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2739 0,
2740 0,
2741 0,
2742 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2743
2744 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2745 {"tlsdesc_lo12", 0,
2746 0, /* adr_type */
2747 0,
2748 0,
2749 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2750 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2751 0},
2752
2753 /* Get to the page containing GOT TLS entry for a symbol.
2754 The same as GD, we allocate two consecutive GOT slots
2755 for module index and module offset, the only difference
2756 with GD is the module offset should be initialized to
2757 zero without any outstanding runtime relocation. */
2758 {"tlsldm", 0,
2759 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2760 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2761 0,
2762 0,
2763 0,
2764 0},
2765
2766 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2767 {"tlsldm_lo12_nc", 0,
2768 0, /* adr_type */
2769 0,
2770 0,
2771 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2772 0,
2773 0},
2774
2775 /* 12 bit offset into the module TLS base address. */
2776 {"dtprel_lo12", 0,
2777 0, /* adr_type */
2778 0,
2779 0,
2780 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2781 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2782 0},
2783
2784 /* Same as dtprel_lo12, no overflow check. */
2785 {"dtprel_lo12_nc", 0,
2786 0, /* adr_type */
2787 0,
2788 0,
2789 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2790 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2791 0},
2792
2793 /* bits[23:12] of offset to the module TLS base address. */
2794 {"dtprel_hi12", 0,
2795 0, /* adr_type */
2796 0,
2797 0,
2798 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2799 0,
2800 0},
2801
2802 /* bits[15:0] of offset to the module TLS base address. */
2803 {"dtprel_g0", 0,
2804 0, /* adr_type */
2805 0,
2806 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2807 0,
2808 0,
2809 0},
2810
2811 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2812 {"dtprel_g0_nc", 0,
2813 0, /* adr_type */
2814 0,
2815 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2816 0,
2817 0,
2818 0},
2819
2820 /* bits[31:16] of offset to the module TLS base address. */
2821 {"dtprel_g1", 0,
2822 0, /* adr_type */
2823 0,
2824 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2825 0,
2826 0,
2827 0},
2828
2829 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2830 {"dtprel_g1_nc", 0,
2831 0, /* adr_type */
2832 0,
2833 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2834 0,
2835 0,
2836 0},
2837
2838 /* bits[47:32] of offset to the module TLS base address. */
2839 {"dtprel_g2", 0,
2840 0, /* adr_type */
2841 0,
2842 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2843 0,
2844 0,
2845 0},
2846
2847 /* Lower 16 bit offset into GOT entry for a symbol */
2848 {"tlsdesc_off_g0_nc", 0,
2849 0, /* adr_type */
2850 0,
2851 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2852 0,
2853 0,
2854 0},
2855
2856 /* Higher 16 bit offset into GOT entry for a symbol */
2857 {"tlsdesc_off_g1", 0,
2858 0, /* adr_type */
2859 0,
2860 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2861 0,
2862 0,
2863 0},
2864
2865 /* Get to the page containing GOT TLS entry for a symbol */
2866 {"gottprel", 0,
2867 0, /* adr_type */
2868 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2869 0,
2870 0,
2871 0,
2872 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2873
2874 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2875 {"gottprel_lo12", 0,
2876 0, /* adr_type */
2877 0,
2878 0,
2879 0,
2880 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2881 0},
2882
2883 /* Get tp offset for a symbol. */
2884 {"tprel", 0,
2885 0, /* adr_type */
2886 0,
2887 0,
2888 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2889 0,
2890 0},
2891
2892 /* Get tp offset for a symbol. */
2893 {"tprel_lo12", 0,
2894 0, /* adr_type */
2895 0,
2896 0,
2897 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2898 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2899 0},
2900
2901 /* Get tp offset for a symbol. */
2902 {"tprel_hi12", 0,
2903 0, /* adr_type */
2904 0,
2905 0,
2906 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2907 0,
2908 0},
2909
2910 /* Get tp offset for a symbol. */
2911 {"tprel_lo12_nc", 0,
2912 0, /* adr_type */
2913 0,
2914 0,
2915 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2916 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2917 0},
2918
2919 /* Most significant bits 32-47 of address/value: MOVZ. */
2920 {"tprel_g2", 0,
2921 0, /* adr_type */
2922 0,
2923 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2924 0,
2925 0,
2926 0},
2927
2928 /* Most significant bits 16-31 of address/value: MOVZ. */
2929 {"tprel_g1", 0,
2930 0, /* adr_type */
2931 0,
2932 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2933 0,
2934 0,
2935 0},
2936
2937 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2938 {"tprel_g1_nc", 0,
2939 0, /* adr_type */
2940 0,
2941 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2942 0,
2943 0,
2944 0},
2945
2946 /* Most significant bits 0-15 of address/value: MOVZ. */
2947 {"tprel_g0", 0,
2948 0, /* adr_type */
2949 0,
2950 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2951 0,
2952 0,
2953 0},
2954
2955 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2956 {"tprel_g0_nc", 0,
2957 0, /* adr_type */
2958 0,
2959 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2960 0,
2961 0,
2962 0},
2963
2964 /* 15bit offset from got entry to base address of GOT table. */
2965 {"gotpage_lo15", 0,
2966 0,
2967 0,
2968 0,
2969 0,
2970 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2971 0},
2972
2973 /* 14bit offset from got entry to base address of GOT table. */
2974 {"gotpage_lo14", 0,
2975 0,
2976 0,
2977 0,
2978 0,
2979 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2980 0},
2981 };
2982
2983 /* Given the address of a pointer pointing to the textual name of a
2984 relocation as may appear in assembler source, attempt to find its
2985 details in reloc_table. The pointer will be updated to the character
2986 after the trailing colon. On failure, NULL will be returned;
2987 otherwise return the reloc_table_entry. */
2988
2989 static struct reloc_table_entry *
2990 find_reloc_table_entry (char **str)
2991 {
2992 unsigned int i;
2993 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2994 {
2995 int length = strlen (reloc_table[i].name);
2996
2997 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2998 && (*str)[length] == ':')
2999 {
3000 *str += (length + 1);
3001 return &reloc_table[i];
3002 }
3003 }
3004
3005 return NULL;
3006 }
3007
3008 /* Mode argument to parse_shift and parser_shifter_operand. */
3009 enum parse_shift_mode
3010 {
3011 SHIFTED_NONE, /* no shifter allowed */
3012 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3013 "#imm{,lsl #n}" */
3014 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3015 "#imm" */
3016 SHIFTED_LSL, /* bare "lsl #n" */
3017 SHIFTED_MUL, /* bare "mul #n" */
3018 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3019 SHIFTED_MUL_VL, /* "mul vl" */
3020 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3021 };
3022
3023 /* Parse a <shift> operator on an AArch64 data processing instruction.
3024 Return TRUE on success; otherwise return FALSE. */
3025 static bfd_boolean
3026 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3027 {
3028 const struct aarch64_name_value_pair *shift_op;
3029 enum aarch64_modifier_kind kind;
3030 expressionS exp;
3031 int exp_has_prefix;
3032 char *s = *str;
3033 char *p = s;
3034
3035 for (p = *str; ISALPHA (*p); p++)
3036 ;
3037
3038 if (p == *str)
3039 {
3040 set_syntax_error (_("shift expression expected"));
3041 return FALSE;
3042 }
3043
3044 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3045
3046 if (shift_op == NULL)
3047 {
3048 set_syntax_error (_("shift operator expected"));
3049 return FALSE;
3050 }
3051
3052 kind = aarch64_get_operand_modifier (shift_op);
3053
3054 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3055 {
3056 set_syntax_error (_("invalid use of 'MSL'"));
3057 return FALSE;
3058 }
3059
3060 if (kind == AARCH64_MOD_MUL
3061 && mode != SHIFTED_MUL
3062 && mode != SHIFTED_MUL_VL)
3063 {
3064 set_syntax_error (_("invalid use of 'MUL'"));
3065 return FALSE;
3066 }
3067
3068 switch (mode)
3069 {
3070 case SHIFTED_LOGIC_IMM:
3071 if (aarch64_extend_operator_p (kind))
3072 {
3073 set_syntax_error (_("extending shift is not permitted"));
3074 return FALSE;
3075 }
3076 break;
3077
3078 case SHIFTED_ARITH_IMM:
3079 if (kind == AARCH64_MOD_ROR)
3080 {
3081 set_syntax_error (_("'ROR' shift is not permitted"));
3082 return FALSE;
3083 }
3084 break;
3085
3086 case SHIFTED_LSL:
3087 if (kind != AARCH64_MOD_LSL)
3088 {
3089 set_syntax_error (_("only 'LSL' shift is permitted"));
3090 return FALSE;
3091 }
3092 break;
3093
3094 case SHIFTED_MUL:
3095 if (kind != AARCH64_MOD_MUL)
3096 {
3097 set_syntax_error (_("only 'MUL' is permitted"));
3098 return FALSE;
3099 }
3100 break;
3101
3102 case SHIFTED_MUL_VL:
3103 /* "MUL VL" consists of two separate tokens. Require the first
3104 token to be "MUL" and look for a following "VL". */
3105 if (kind == AARCH64_MOD_MUL)
3106 {
3107 skip_whitespace (p);
3108 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3109 {
3110 p += 2;
3111 kind = AARCH64_MOD_MUL_VL;
3112 break;
3113 }
3114 }
3115 set_syntax_error (_("only 'MUL VL' is permitted"));
3116 return FALSE;
3117
3118 case SHIFTED_REG_OFFSET:
3119 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3120 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3121 {
3122 set_fatal_syntax_error
3123 (_("invalid shift for the register offset addressing mode"));
3124 return FALSE;
3125 }
3126 break;
3127
3128 case SHIFTED_LSL_MSL:
3129 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3130 {
3131 set_syntax_error (_("invalid shift operator"));
3132 return FALSE;
3133 }
3134 break;
3135
3136 default:
3137 abort ();
3138 }
3139
3140 /* Whitespace can appear here if the next thing is a bare digit. */
3141 skip_whitespace (p);
3142
3143 /* Parse shift amount. */
3144 exp_has_prefix = 0;
3145 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3146 exp.X_op = O_absent;
3147 else
3148 {
3149 if (is_immediate_prefix (*p))
3150 {
3151 p++;
3152 exp_has_prefix = 1;
3153 }
3154 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3155 }
3156 if (kind == AARCH64_MOD_MUL_VL)
3157 /* For consistency, give MUL VL the same shift amount as an implicit
3158 MUL #1. */
3159 operand->shifter.amount = 1;
3160 else if (exp.X_op == O_absent)
3161 {
3162 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3163 {
3164 set_syntax_error (_("missing shift amount"));
3165 return FALSE;
3166 }
3167 operand->shifter.amount = 0;
3168 }
3169 else if (exp.X_op != O_constant)
3170 {
3171 set_syntax_error (_("constant shift amount required"));
3172 return FALSE;
3173 }
3174 /* For parsing purposes, MUL #n has no inherent range. The range
3175 depends on the operand and will be checked by operand-specific
3176 routines. */
3177 else if (kind != AARCH64_MOD_MUL
3178 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3179 {
3180 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3181 return FALSE;
3182 }
3183 else
3184 {
3185 operand->shifter.amount = exp.X_add_number;
3186 operand->shifter.amount_present = 1;
3187 }
3188
3189 operand->shifter.operator_present = 1;
3190 operand->shifter.kind = kind;
3191
3192 *str = p;
3193 return TRUE;
3194 }
3195
3196 /* Parse a <shifter_operand> for a data processing instruction:
3197
3198 #<immediate>
3199 #<immediate>, LSL #imm
3200
3201 Validation of immediate operands is deferred to md_apply_fix.
3202
3203 Return TRUE on success; otherwise return FALSE. */
3204
3205 static bfd_boolean
3206 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3207 enum parse_shift_mode mode)
3208 {
3209 char *p;
3210
3211 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3212 return FALSE;
3213
3214 p = *str;
3215
3216 /* Accept an immediate expression. */
3217 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3218 return FALSE;
3219
3220 /* Accept optional LSL for arithmetic immediate values. */
3221 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3222 if (! parse_shift (&p, operand, SHIFTED_LSL))
3223 return FALSE;
3224
3225 /* Not accept any shifter for logical immediate values. */
3226 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3227 && parse_shift (&p, operand, mode))
3228 {
3229 set_syntax_error (_("unexpected shift operator"));
3230 return FALSE;
3231 }
3232
3233 *str = p;
3234 return TRUE;
3235 }
3236
3237 /* Parse a <shifter_operand> for a data processing instruction:
3238
3239 <Rm>
3240 <Rm>, <shift>
3241 #<immediate>
3242 #<immediate>, LSL #imm
3243
3244 where <shift> is handled by parse_shift above, and the last two
3245 cases are handled by the function above.
3246
3247 Validation of immediate operands is deferred to md_apply_fix.
3248
3249 Return TRUE on success; otherwise return FALSE. */
3250
3251 static bfd_boolean
3252 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3253 enum parse_shift_mode mode)
3254 {
3255 const reg_entry *reg;
3256 aarch64_opnd_qualifier_t qualifier;
3257 enum aarch64_operand_class opd_class
3258 = aarch64_get_operand_class (operand->type);
3259
3260 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3261 if (reg)
3262 {
3263 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3264 {
3265 set_syntax_error (_("unexpected register in the immediate operand"));
3266 return FALSE;
3267 }
3268
3269 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3270 {
3271 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3272 return FALSE;
3273 }
3274
3275 operand->reg.regno = reg->number;
3276 operand->qualifier = qualifier;
3277
3278 /* Accept optional shift operation on register. */
3279 if (! skip_past_comma (str))
3280 return TRUE;
3281
3282 if (! parse_shift (str, operand, mode))
3283 return FALSE;
3284
3285 return TRUE;
3286 }
3287 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3288 {
3289 set_syntax_error
3290 (_("integer register expected in the extended/shifted operand "
3291 "register"));
3292 return FALSE;
3293 }
3294
3295 /* We have a shifted immediate variable. */
3296 return parse_shifter_operand_imm (str, operand, mode);
3297 }
3298
3299 /* Return TRUE on success; return FALSE otherwise. */
3300
3301 static bfd_boolean
3302 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3303 enum parse_shift_mode mode)
3304 {
3305 char *p = *str;
3306
3307 /* Determine if we have the sequence of characters #: or just :
3308 coming next. If we do, then we check for a :rello: relocation
3309 modifier. If we don't, punt the whole lot to
3310 parse_shifter_operand. */
3311
3312 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3313 {
3314 struct reloc_table_entry *entry;
3315
3316 if (p[0] == '#')
3317 p += 2;
3318 else
3319 p++;
3320 *str = p;
3321
3322 /* Try to parse a relocation. Anything else is an error. */
3323 if (!(entry = find_reloc_table_entry (str)))
3324 {
3325 set_syntax_error (_("unknown relocation modifier"));
3326 return FALSE;
3327 }
3328
3329 if (entry->add_type == 0)
3330 {
3331 set_syntax_error
3332 (_("this relocation modifier is not allowed on this instruction"));
3333 return FALSE;
3334 }
3335
3336 /* Save str before we decompose it. */
3337 p = *str;
3338
3339 /* Next, we parse the expression. */
3340 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3341 return FALSE;
3342
3343 /* Record the relocation type (use the ADD variant here). */
3344 inst.reloc.type = entry->add_type;
3345 inst.reloc.pc_rel = entry->pc_rel;
3346
3347 /* If str is empty, we've reached the end, stop here. */
3348 if (**str == '\0')
3349 return TRUE;
3350
3351 /* Otherwise, we have a shifted reloc modifier, so rewind to
3352 recover the variable name and continue parsing for the shifter. */
3353 *str = p;
3354 return parse_shifter_operand_imm (str, operand, mode);
3355 }
3356
3357 return parse_shifter_operand (str, operand, mode);
3358 }
3359
3360 /* Parse all forms of an address expression. Information is written
3361 to *OPERAND and/or inst.reloc.
3362
3363 The A64 instruction set has the following addressing modes:
3364
3365 Offset
3366 [base] // in SIMD ld/st structure
3367 [base{,#0}] // in ld/st exclusive
3368 [base{,#imm}]
3369 [base,Xm{,LSL #imm}]
3370 [base,Xm,SXTX {#imm}]
3371 [base,Wm,(S|U)XTW {#imm}]
3372 Pre-indexed
3373 [base,#imm]!
3374 Post-indexed
3375 [base],#imm
3376 [base],Xm // in SIMD ld/st structure
3377 PC-relative (literal)
3378 label
3379 SVE:
3380 [base,#imm,MUL VL]
3381 [base,Zm.D{,LSL #imm}]
3382 [base,Zm.S,(S|U)XTW {#imm}]
3383 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3384 [Zn.S,#imm]
3385 [Zn.D,#imm]
3386 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3387 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3388 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3389
3390 (As a convenience, the notation "=immediate" is permitted in conjunction
3391 with the pc-relative literal load instructions to automatically place an
3392 immediate value or symbolic address in a nearby literal pool and generate
3393 a hidden label which references it.)
3394
3395 Upon a successful parsing, the address structure in *OPERAND will be
3396 filled in the following way:
3397
3398 .base_regno = <base>
3399 .offset.is_reg // 1 if the offset is a register
3400 .offset.imm = <imm>
3401 .offset.regno = <Rm>
3402
3403 For different addressing modes defined in the A64 ISA:
3404
3405 Offset
3406 .pcrel=0; .preind=1; .postind=0; .writeback=0
3407 Pre-indexed
3408 .pcrel=0; .preind=1; .postind=0; .writeback=1
3409 Post-indexed
3410 .pcrel=0; .preind=0; .postind=1; .writeback=1
3411 PC-relative (literal)
3412 .pcrel=1; .preind=1; .postind=0; .writeback=0
3413
3414 The shift/extension information, if any, will be stored in .shifter.
3415 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3416 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3417 corresponding register.
3418
3419 BASE_TYPE says which types of base register should be accepted and
3420 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3421 is the type of shifter that is allowed for immediate offsets,
3422 or SHIFTED_NONE if none.
3423
3424 In all other respects, it is the caller's responsibility to check
3425 for addressing modes not supported by the instruction, and to set
3426 inst.reloc.type. */
3427
3428 static bfd_boolean
3429 parse_address_main (char **str, aarch64_opnd_info *operand,
3430 aarch64_opnd_qualifier_t *base_qualifier,
3431 aarch64_opnd_qualifier_t *offset_qualifier,
3432 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3433 enum parse_shift_mode imm_shift_mode)
3434 {
3435 char *p = *str;
3436 const reg_entry *reg;
3437 expressionS *exp = &inst.reloc.exp;
3438
3439 *base_qualifier = AARCH64_OPND_QLF_NIL;
3440 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3441 if (! skip_past_char (&p, '['))
3442 {
3443 /* =immediate or label. */
3444 operand->addr.pcrel = 1;
3445 operand->addr.preind = 1;
3446
3447 /* #:<reloc_op>:<symbol> */
3448 skip_past_char (&p, '#');
3449 if (skip_past_char (&p, ':'))
3450 {
3451 bfd_reloc_code_real_type ty;
3452 struct reloc_table_entry *entry;
3453
3454 /* Try to parse a relocation modifier. Anything else is
3455 an error. */
3456 entry = find_reloc_table_entry (&p);
3457 if (! entry)
3458 {
3459 set_syntax_error (_("unknown relocation modifier"));
3460 return FALSE;
3461 }
3462
3463 switch (operand->type)
3464 {
3465 case AARCH64_OPND_ADDR_PCREL21:
3466 /* adr */
3467 ty = entry->adr_type;
3468 break;
3469
3470 default:
3471 ty = entry->ld_literal_type;
3472 break;
3473 }
3474
3475 if (ty == 0)
3476 {
3477 set_syntax_error
3478 (_("this relocation modifier is not allowed on this "
3479 "instruction"));
3480 return FALSE;
3481 }
3482
3483 /* #:<reloc_op>: */
3484 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3485 {
3486 set_syntax_error (_("invalid relocation expression"));
3487 return FALSE;
3488 }
3489
3490 /* #:<reloc_op>:<expr> */
3491 /* Record the relocation type. */
3492 inst.reloc.type = ty;
3493 inst.reloc.pc_rel = entry->pc_rel;
3494 }
3495 else
3496 {
3497
3498 if (skip_past_char (&p, '='))
3499 /* =immediate; need to generate the literal in the literal pool. */
3500 inst.gen_lit_pool = 1;
3501
3502 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3503 {
3504 set_syntax_error (_("invalid address"));
3505 return FALSE;
3506 }
3507 }
3508
3509 *str = p;
3510 return TRUE;
3511 }
3512
3513 /* [ */
3514
3515 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3516 if (!reg || !aarch64_check_reg_type (reg, base_type))
3517 {
3518 set_syntax_error (_(get_reg_expected_msg (base_type)));
3519 return FALSE;
3520 }
3521 operand->addr.base_regno = reg->number;
3522
3523 /* [Xn */
3524 if (skip_past_comma (&p))
3525 {
3526 /* [Xn, */
3527 operand->addr.preind = 1;
3528
3529 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3530 if (reg)
3531 {
3532 if (!aarch64_check_reg_type (reg, offset_type))
3533 {
3534 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3535 return FALSE;
3536 }
3537
3538 /* [Xn,Rm */
3539 operand->addr.offset.regno = reg->number;
3540 operand->addr.offset.is_reg = 1;
3541 /* Shifted index. */
3542 if (skip_past_comma (&p))
3543 {
3544 /* [Xn,Rm, */
3545 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3546 /* Use the diagnostics set in parse_shift, so not set new
3547 error message here. */
3548 return FALSE;
3549 }
3550 /* We only accept:
3551 [base,Xm{,LSL #imm}]
3552 [base,Xm,SXTX {#imm}]
3553 [base,Wm,(S|U)XTW {#imm}] */
3554 if (operand->shifter.kind == AARCH64_MOD_NONE
3555 || operand->shifter.kind == AARCH64_MOD_LSL
3556 || operand->shifter.kind == AARCH64_MOD_SXTX)
3557 {
3558 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3559 {
3560 set_syntax_error (_("invalid use of 32-bit register offset"));
3561 return FALSE;
3562 }
3563 if (aarch64_get_qualifier_esize (*base_qualifier)
3564 != aarch64_get_qualifier_esize (*offset_qualifier))
3565 {
3566 set_syntax_error (_("offset has different size from base"));
3567 return FALSE;
3568 }
3569 }
3570 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3571 {
3572 set_syntax_error (_("invalid use of 64-bit register offset"));
3573 return FALSE;
3574 }
3575 }
3576 else
3577 {
3578 /* [Xn,#:<reloc_op>:<symbol> */
3579 skip_past_char (&p, '#');
3580 if (skip_past_char (&p, ':'))
3581 {
3582 struct reloc_table_entry *entry;
3583
3584 /* Try to parse a relocation modifier. Anything else is
3585 an error. */
3586 if (!(entry = find_reloc_table_entry (&p)))
3587 {
3588 set_syntax_error (_("unknown relocation modifier"));
3589 return FALSE;
3590 }
3591
3592 if (entry->ldst_type == 0)
3593 {
3594 set_syntax_error
3595 (_("this relocation modifier is not allowed on this "
3596 "instruction"));
3597 return FALSE;
3598 }
3599
3600 /* [Xn,#:<reloc_op>: */
3601 /* We now have the group relocation table entry corresponding to
3602 the name in the assembler source. Next, we parse the
3603 expression. */
3604 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3605 {
3606 set_syntax_error (_("invalid relocation expression"));
3607 return FALSE;
3608 }
3609
3610 /* [Xn,#:<reloc_op>:<expr> */
3611 /* Record the load/store relocation type. */
3612 inst.reloc.type = entry->ldst_type;
3613 inst.reloc.pc_rel = entry->pc_rel;
3614 }
3615 else
3616 {
3617 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3618 {
3619 set_syntax_error (_("invalid expression in the address"));
3620 return FALSE;
3621 }
3622 /* [Xn,<expr> */
3623 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3624 /* [Xn,<expr>,<shifter> */
3625 if (! parse_shift (&p, operand, imm_shift_mode))
3626 return FALSE;
3627 }
3628 }
3629 }
3630
3631 if (! skip_past_char (&p, ']'))
3632 {
3633 set_syntax_error (_("']' expected"));
3634 return FALSE;
3635 }
3636
3637 if (skip_past_char (&p, '!'))
3638 {
3639 if (operand->addr.preind && operand->addr.offset.is_reg)
3640 {
3641 set_syntax_error (_("register offset not allowed in pre-indexed "
3642 "addressing mode"));
3643 return FALSE;
3644 }
3645 /* [Xn]! */
3646 operand->addr.writeback = 1;
3647 }
3648 else if (skip_past_comma (&p))
3649 {
3650 /* [Xn], */
3651 operand->addr.postind = 1;
3652 operand->addr.writeback = 1;
3653
3654 if (operand->addr.preind)
3655 {
3656 set_syntax_error (_("cannot combine pre- and post-indexing"));
3657 return FALSE;
3658 }
3659
3660 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3661 if (reg)
3662 {
3663 /* [Xn],Xm */
3664 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3665 {
3666 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3667 return FALSE;
3668 }
3669
3670 operand->addr.offset.regno = reg->number;
3671 operand->addr.offset.is_reg = 1;
3672 }
3673 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3674 {
3675 /* [Xn],#expr */
3676 set_syntax_error (_("invalid expression in the address"));
3677 return FALSE;
3678 }
3679 }
3680
3681 /* If at this point neither .preind nor .postind is set, we have a
3682 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3683 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3684 {
3685 if (operand->addr.writeback)
3686 {
3687 /* Reject [Rn]! */
3688 set_syntax_error (_("missing offset in the pre-indexed address"));
3689 return FALSE;
3690 }
3691
3692 operand->addr.preind = 1;
3693 inst.reloc.exp.X_op = O_constant;
3694 inst.reloc.exp.X_add_number = 0;
3695 }
3696
3697 *str = p;
3698 return TRUE;
3699 }
3700
3701 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3702 on success. */
3703 static bfd_boolean
3704 parse_address (char **str, aarch64_opnd_info *operand)
3705 {
3706 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3707 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3708 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3709 }
3710
3711 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3712 The arguments have the same meaning as for parse_address_main.
3713 Return TRUE on success. */
3714 static bfd_boolean
3715 parse_sve_address (char **str, aarch64_opnd_info *operand,
3716 aarch64_opnd_qualifier_t *base_qualifier,
3717 aarch64_opnd_qualifier_t *offset_qualifier)
3718 {
3719 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3720 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3721 SHIFTED_MUL_VL);
3722 }
3723
3724 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3725 Return TRUE on success; otherwise return FALSE. */
3726 static bfd_boolean
3727 parse_half (char **str, int *internal_fixup_p)
3728 {
3729 char *p = *str;
3730
3731 skip_past_char (&p, '#');
3732
3733 gas_assert (internal_fixup_p);
3734 *internal_fixup_p = 0;
3735
3736 if (*p == ':')
3737 {
3738 struct reloc_table_entry *entry;
3739
3740 /* Try to parse a relocation. Anything else is an error. */
3741 ++p;
3742 if (!(entry = find_reloc_table_entry (&p)))
3743 {
3744 set_syntax_error (_("unknown relocation modifier"));
3745 return FALSE;
3746 }
3747
3748 if (entry->movw_type == 0)
3749 {
3750 set_syntax_error
3751 (_("this relocation modifier is not allowed on this instruction"));
3752 return FALSE;
3753 }
3754
3755 inst.reloc.type = entry->movw_type;
3756 }
3757 else
3758 *internal_fixup_p = 1;
3759
3760 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3761 return FALSE;
3762
3763 *str = p;
3764 return TRUE;
3765 }
3766
3767 /* Parse an operand for an ADRP instruction:
3768 ADRP <Xd>, <label>
3769 Return TRUE on success; otherwise return FALSE. */
3770
3771 static bfd_boolean
3772 parse_adrp (char **str)
3773 {
3774 char *p;
3775
3776 p = *str;
3777 if (*p == ':')
3778 {
3779 struct reloc_table_entry *entry;
3780
3781 /* Try to parse a relocation. Anything else is an error. */
3782 ++p;
3783 if (!(entry = find_reloc_table_entry (&p)))
3784 {
3785 set_syntax_error (_("unknown relocation modifier"));
3786 return FALSE;
3787 }
3788
3789 if (entry->adrp_type == 0)
3790 {
3791 set_syntax_error
3792 (_("this relocation modifier is not allowed on this instruction"));
3793 return FALSE;
3794 }
3795
3796 inst.reloc.type = entry->adrp_type;
3797 }
3798 else
3799 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3800
3801 inst.reloc.pc_rel = 1;
3802
3803 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3804 return FALSE;
3805
3806 *str = p;
3807 return TRUE;
3808 }
3809
3810 /* Miscellaneous. */
3811
3812 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3813 of SIZE tokens in which index I gives the token for field value I,
3814 or is null if field value I is invalid. REG_TYPE says which register
3815 names should be treated as registers rather than as symbolic immediates.
3816
3817 Return true on success, moving *STR past the operand and storing the
3818 field value in *VAL. */
3819
3820 static int
3821 parse_enum_string (char **str, int64_t *val, const char *const *array,
3822 size_t size, aarch64_reg_type reg_type)
3823 {
3824 expressionS exp;
3825 char *p, *q;
3826 size_t i;
3827
3828 /* Match C-like tokens. */
3829 p = q = *str;
3830 while (ISALNUM (*q))
3831 q++;
3832
3833 for (i = 0; i < size; ++i)
3834 if (array[i]
3835 && strncasecmp (array[i], p, q - p) == 0
3836 && array[i][q - p] == 0)
3837 {
3838 *val = i;
3839 *str = q;
3840 return TRUE;
3841 }
3842
3843 if (!parse_immediate_expression (&p, &exp, reg_type))
3844 return FALSE;
3845
3846 if (exp.X_op == O_constant
3847 && (uint64_t) exp.X_add_number < size)
3848 {
3849 *val = exp.X_add_number;
3850 *str = p;
3851 return TRUE;
3852 }
3853
3854 /* Use the default error for this operand. */
3855 return FALSE;
3856 }
3857
3858 /* Parse an option for a preload instruction. Returns the encoding for the
3859 option, or PARSE_FAIL. */
3860
3861 static int
3862 parse_pldop (char **str)
3863 {
3864 char *p, *q;
3865 const struct aarch64_name_value_pair *o;
3866
3867 p = q = *str;
3868 while (ISALNUM (*q))
3869 q++;
3870
3871 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3872 if (!o)
3873 return PARSE_FAIL;
3874
3875 *str = q;
3876 return o->value;
3877 }
3878
3879 /* Parse an option for a barrier instruction. Returns the encoding for the
3880 option, or PARSE_FAIL. */
3881
3882 static int
3883 parse_barrier (char **str)
3884 {
3885 char *p, *q;
3886 const asm_barrier_opt *o;
3887
3888 p = q = *str;
3889 while (ISALPHA (*q))
3890 q++;
3891
3892 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3893 if (!o)
3894 return PARSE_FAIL;
3895
3896 *str = q;
3897 return o->value;
3898 }
3899
3900 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3901 return 0 if successful. Otherwise return PARSE_FAIL. */
3902
3903 static int
3904 parse_barrier_psb (char **str,
3905 const struct aarch64_name_value_pair ** hint_opt)
3906 {
3907 char *p, *q;
3908 const struct aarch64_name_value_pair *o;
3909
3910 p = q = *str;
3911 while (ISALPHA (*q))
3912 q++;
3913
3914 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3915 if (!o)
3916 {
3917 set_fatal_syntax_error
3918 ( _("unknown or missing option to PSB"));
3919 return PARSE_FAIL;
3920 }
3921
3922 if (o->value != 0x11)
3923 {
3924 /* PSB only accepts option name 'CSYNC'. */
3925 set_syntax_error
3926 (_("the specified option is not accepted for PSB"));
3927 return PARSE_FAIL;
3928 }
3929
3930 *str = q;
3931 *hint_opt = o;
3932 return 0;
3933 }
3934
3935 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3936 Returns the encoding for the option, or PARSE_FAIL.
3937
3938 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3939 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3940
3941 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3942 field, otherwise as a system register.
3943 */
3944
3945 static int
3946 parse_sys_reg (char **str, struct hash_control *sys_regs,
3947 int imple_defined_p, int pstatefield_p,
3948 uint32_t* flags)
3949 {
3950 char *p, *q;
3951 char buf[32];
3952 const aarch64_sys_reg *o;
3953 int value;
3954
3955 p = buf;
3956 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3957 if (p < buf + 31)
3958 *p++ = TOLOWER (*q);
3959 *p = '\0';
3960 /* Assert that BUF be large enough. */
3961 gas_assert (p - buf == q - *str);
3962
3963 o = hash_find (sys_regs, buf);
3964 if (!o)
3965 {
3966 if (!imple_defined_p)
3967 return PARSE_FAIL;
3968 else
3969 {
3970 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3971 unsigned int op0, op1, cn, cm, op2;
3972
3973 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3974 != 5)
3975 return PARSE_FAIL;
3976 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3977 return PARSE_FAIL;
3978 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3979 if (flags)
3980 *flags = 0;
3981 }
3982 }
3983 else
3984 {
3985 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3986 as_bad (_("selected processor does not support PSTATE field "
3987 "name '%s'"), buf);
3988 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3989 as_bad (_("selected processor does not support system register "
3990 "name '%s'"), buf);
3991 if (aarch64_sys_reg_deprecated_p (o))
3992 as_warn (_("system register name '%s' is deprecated and may be "
3993 "removed in a future release"), buf);
3994 value = o->value;
3995 if (flags)
3996 *flags = o->flags;
3997 }
3998
3999 *str = q;
4000 return value;
4001 }
4002
4003 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4004 for the option, or NULL. */
4005
4006 static const aarch64_sys_ins_reg *
4007 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
4008 {
4009 char *p, *q;
4010 char buf[32];
4011 const aarch64_sys_ins_reg *o;
4012
4013 p = buf;
4014 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4015 if (p < buf + 31)
4016 *p++ = TOLOWER (*q);
4017 *p = '\0';
4018
4019 o = hash_find (sys_ins_regs, buf);
4020 if (!o)
4021 return NULL;
4022
4023 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
4024 as_bad (_("selected processor does not support system register "
4025 "name '%s'"), buf);
4026
4027 *str = q;
4028 return o;
4029 }
4030 \f
4031 #define po_char_or_fail(chr) do { \
4032 if (! skip_past_char (&str, chr)) \
4033 goto failure; \
4034 } while (0)
4035
4036 #define po_reg_or_fail(regtype) do { \
4037 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4038 if (val == PARSE_FAIL) \
4039 { \
4040 set_default_error (); \
4041 goto failure; \
4042 } \
4043 } while (0)
4044
4045 #define po_int_reg_or_fail(reg_type) do { \
4046 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4047 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4048 { \
4049 set_default_error (); \
4050 goto failure; \
4051 } \
4052 info->reg.regno = reg->number; \
4053 info->qualifier = qualifier; \
4054 } while (0)
4055
4056 #define po_imm_nc_or_fail() do { \
4057 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4058 goto failure; \
4059 } while (0)
4060
4061 #define po_imm_or_fail(min, max) do { \
4062 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4063 goto failure; \
4064 if (val < min || val > max) \
4065 { \
4066 set_fatal_syntax_error (_("immediate value out of range "\
4067 #min " to "#max)); \
4068 goto failure; \
4069 } \
4070 } while (0)
4071
4072 #define po_enum_or_fail(array) do { \
4073 if (!parse_enum_string (&str, &val, array, \
4074 ARRAY_SIZE (array), imm_reg_type)) \
4075 goto failure; \
4076 } while (0)
4077
4078 #define po_misc_or_fail(expr) do { \
4079 if (!expr) \
4080 goto failure; \
4081 } while (0)
4082 \f
4083 /* encode the 12-bit imm field of Add/sub immediate */
4084 static inline uint32_t
4085 encode_addsub_imm (uint32_t imm)
4086 {
4087 return imm << 10;
4088 }
4089
4090 /* encode the shift amount field of Add/sub immediate */
4091 static inline uint32_t
4092 encode_addsub_imm_shift_amount (uint32_t cnt)
4093 {
4094 return cnt << 22;
4095 }
4096
4097
4098 /* encode the imm field of Adr instruction */
4099 static inline uint32_t
4100 encode_adr_imm (uint32_t imm)
4101 {
4102 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4103 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4104 }
4105
4106 /* encode the immediate field of Move wide immediate */
4107 static inline uint32_t
4108 encode_movw_imm (uint32_t imm)
4109 {
4110 return imm << 5;
4111 }
4112
4113 /* encode the 26-bit offset of unconditional branch */
4114 static inline uint32_t
4115 encode_branch_ofs_26 (uint32_t ofs)
4116 {
4117 return ofs & ((1 << 26) - 1);
4118 }
4119
4120 /* encode the 19-bit offset of conditional branch and compare & branch */
4121 static inline uint32_t
4122 encode_cond_branch_ofs_19 (uint32_t ofs)
4123 {
4124 return (ofs & ((1 << 19) - 1)) << 5;
4125 }
4126
4127 /* encode the 19-bit offset of ld literal */
4128 static inline uint32_t
4129 encode_ld_lit_ofs_19 (uint32_t ofs)
4130 {
4131 return (ofs & ((1 << 19) - 1)) << 5;
4132 }
4133
4134 /* Encode the 14-bit offset of test & branch. */
4135 static inline uint32_t
4136 encode_tst_branch_ofs_14 (uint32_t ofs)
4137 {
4138 return (ofs & ((1 << 14) - 1)) << 5;
4139 }
4140
4141 /* Encode the 16-bit imm field of svc/hvc/smc. */
4142 static inline uint32_t
4143 encode_svc_imm (uint32_t imm)
4144 {
4145 return imm << 5;
4146 }
4147
4148 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4149 static inline uint32_t
4150 reencode_addsub_switch_add_sub (uint32_t opcode)
4151 {
4152 return opcode ^ (1 << 30);
4153 }
4154
4155 static inline uint32_t
4156 reencode_movzn_to_movz (uint32_t opcode)
4157 {
4158 return opcode | (1 << 30);
4159 }
4160
4161 static inline uint32_t
4162 reencode_movzn_to_movn (uint32_t opcode)
4163 {
4164 return opcode & ~(1 << 30);
4165 }
4166
4167 /* Overall per-instruction processing. */
4168
4169 /* We need to be able to fix up arbitrary expressions in some statements.
4170 This is so that we can handle symbols that are an arbitrary distance from
4171 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4172 which returns part of an address in a form which will be valid for
4173 a data instruction. We do this by pushing the expression into a symbol
4174 in the expr_section, and creating a fix for that. */
4175
4176 static fixS *
4177 fix_new_aarch64 (fragS * frag,
4178 int where,
4179 short int size, expressionS * exp, int pc_rel, int reloc)
4180 {
4181 fixS *new_fix;
4182
4183 switch (exp->X_op)
4184 {
4185 case O_constant:
4186 case O_symbol:
4187 case O_add:
4188 case O_subtract:
4189 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4190 break;
4191
4192 default:
4193 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4194 pc_rel, reloc);
4195 break;
4196 }
4197 return new_fix;
4198 }
4199 \f
4200 /* Diagnostics on operands errors. */
4201
4202 /* By default, output verbose error message.
4203 Disable the verbose error message by -mno-verbose-error. */
4204 static int verbose_error_p = 1;
4205
4206 #ifdef DEBUG_AARCH64
4207 /* N.B. this is only for the purpose of debugging. */
4208 const char* operand_mismatch_kind_names[] =
4209 {
4210 "AARCH64_OPDE_NIL",
4211 "AARCH64_OPDE_RECOVERABLE",
4212 "AARCH64_OPDE_SYNTAX_ERROR",
4213 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4214 "AARCH64_OPDE_INVALID_VARIANT",
4215 "AARCH64_OPDE_OUT_OF_RANGE",
4216 "AARCH64_OPDE_UNALIGNED",
4217 "AARCH64_OPDE_REG_LIST",
4218 "AARCH64_OPDE_OTHER_ERROR",
4219 };
4220 #endif /* DEBUG_AARCH64 */
4221
4222 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4223
4224 When multiple errors of different kinds are found in the same assembly
4225 line, only the error of the highest severity will be picked up for
4226 issuing the diagnostics. */
4227
4228 static inline bfd_boolean
4229 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4230 enum aarch64_operand_error_kind rhs)
4231 {
4232 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4233 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4234 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4235 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4236 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4237 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4238 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4239 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4240 return lhs > rhs;
4241 }
4242
4243 /* Helper routine to get the mnemonic name from the assembly instruction
4244 line; should only be called for the diagnosis purpose, as there is
4245 string copy operation involved, which may affect the runtime
4246 performance if used in elsewhere. */
4247
4248 static const char*
4249 get_mnemonic_name (const char *str)
4250 {
4251 static char mnemonic[32];
4252 char *ptr;
4253
4254 /* Get the first 15 bytes and assume that the full name is included. */
4255 strncpy (mnemonic, str, 31);
4256 mnemonic[31] = '\0';
4257
4258 /* Scan up to the end of the mnemonic, which must end in white space,
4259 '.', or end of string. */
4260 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4261 ;
4262
4263 *ptr = '\0';
4264
4265 /* Append '...' to the truncated long name. */
4266 if (ptr - mnemonic == 31)
4267 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4268
4269 return mnemonic;
4270 }
4271
4272 static void
4273 reset_aarch64_instruction (aarch64_instruction *instruction)
4274 {
4275 memset (instruction, '\0', sizeof (aarch64_instruction));
4276 instruction->reloc.type = BFD_RELOC_UNUSED;
4277 }
4278
4279 /* Data structures storing one user error in the assembly code related to
4280 operands. */
4281
4282 struct operand_error_record
4283 {
4284 const aarch64_opcode *opcode;
4285 aarch64_operand_error detail;
4286 struct operand_error_record *next;
4287 };
4288
4289 typedef struct operand_error_record operand_error_record;
4290
4291 struct operand_errors
4292 {
4293 operand_error_record *head;
4294 operand_error_record *tail;
4295 };
4296
4297 typedef struct operand_errors operand_errors;
4298
4299 /* Top-level data structure reporting user errors for the current line of
4300 the assembly code.
4301 The way md_assemble works is that all opcodes sharing the same mnemonic
4302 name are iterated to find a match to the assembly line. In this data
4303 structure, each of the such opcodes will have one operand_error_record
4304 allocated and inserted. In other words, excessive errors related with
4305 a single opcode are disregarded. */
4306 operand_errors operand_error_report;
4307
4308 /* Free record nodes. */
4309 static operand_error_record *free_opnd_error_record_nodes = NULL;
4310
4311 /* Initialize the data structure that stores the operand mismatch
4312 information on assembling one line of the assembly code. */
4313 static void
4314 init_operand_error_report (void)
4315 {
4316 if (operand_error_report.head != NULL)
4317 {
4318 gas_assert (operand_error_report.tail != NULL);
4319 operand_error_report.tail->next = free_opnd_error_record_nodes;
4320 free_opnd_error_record_nodes = operand_error_report.head;
4321 operand_error_report.head = NULL;
4322 operand_error_report.tail = NULL;
4323 return;
4324 }
4325 gas_assert (operand_error_report.tail == NULL);
4326 }
4327
4328 /* Return TRUE if some operand error has been recorded during the
4329 parsing of the current assembly line using the opcode *OPCODE;
4330 otherwise return FALSE. */
4331 static inline bfd_boolean
4332 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4333 {
4334 operand_error_record *record = operand_error_report.head;
4335 return record && record->opcode == opcode;
4336 }
4337
4338 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4339 OPCODE field is initialized with OPCODE.
4340 N.B. only one record for each opcode, i.e. the maximum of one error is
4341 recorded for each instruction template. */
4342
4343 static void
4344 add_operand_error_record (const operand_error_record* new_record)
4345 {
4346 const aarch64_opcode *opcode = new_record->opcode;
4347 operand_error_record* record = operand_error_report.head;
4348
4349 /* The record may have been created for this opcode. If not, we need
4350 to prepare one. */
4351 if (! opcode_has_operand_error_p (opcode))
4352 {
4353 /* Get one empty record. */
4354 if (free_opnd_error_record_nodes == NULL)
4355 {
4356 record = XNEW (operand_error_record);
4357 }
4358 else
4359 {
4360 record = free_opnd_error_record_nodes;
4361 free_opnd_error_record_nodes = record->next;
4362 }
4363 record->opcode = opcode;
4364 /* Insert at the head. */
4365 record->next = operand_error_report.head;
4366 operand_error_report.head = record;
4367 if (operand_error_report.tail == NULL)
4368 operand_error_report.tail = record;
4369 }
4370 else if (record->detail.kind != AARCH64_OPDE_NIL
4371 && record->detail.index <= new_record->detail.index
4372 && operand_error_higher_severity_p (record->detail.kind,
4373 new_record->detail.kind))
4374 {
4375 /* In the case of multiple errors found on operands related with a
4376 single opcode, only record the error of the leftmost operand and
4377 only if the error is of higher severity. */
4378 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4379 " the existing error %s on operand %d",
4380 operand_mismatch_kind_names[new_record->detail.kind],
4381 new_record->detail.index,
4382 operand_mismatch_kind_names[record->detail.kind],
4383 record->detail.index);
4384 return;
4385 }
4386
4387 record->detail = new_record->detail;
4388 }
4389
4390 static inline void
4391 record_operand_error_info (const aarch64_opcode *opcode,
4392 aarch64_operand_error *error_info)
4393 {
4394 operand_error_record record;
4395 record.opcode = opcode;
4396 record.detail = *error_info;
4397 add_operand_error_record (&record);
4398 }
4399
4400 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4401 error message *ERROR, for operand IDX (count from 0). */
4402
4403 static void
4404 record_operand_error (const aarch64_opcode *opcode, int idx,
4405 enum aarch64_operand_error_kind kind,
4406 const char* error)
4407 {
4408 aarch64_operand_error info;
4409 memset(&info, 0, sizeof (info));
4410 info.index = idx;
4411 info.kind = kind;
4412 info.error = error;
4413 info.non_fatal = FALSE;
4414 record_operand_error_info (opcode, &info);
4415 }
4416
4417 static void
4418 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4419 enum aarch64_operand_error_kind kind,
4420 const char* error, const int *extra_data)
4421 {
4422 aarch64_operand_error info;
4423 info.index = idx;
4424 info.kind = kind;
4425 info.error = error;
4426 info.data[0] = extra_data[0];
4427 info.data[1] = extra_data[1];
4428 info.data[2] = extra_data[2];
4429 info.non_fatal = FALSE;
4430 record_operand_error_info (opcode, &info);
4431 }
4432
4433 static void
4434 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4435 const char* error, int lower_bound,
4436 int upper_bound)
4437 {
4438 int data[3] = {lower_bound, upper_bound, 0};
4439 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4440 error, data);
4441 }
4442
4443 /* Remove the operand error record for *OPCODE. */
4444 static void ATTRIBUTE_UNUSED
4445 remove_operand_error_record (const aarch64_opcode *opcode)
4446 {
4447 if (opcode_has_operand_error_p (opcode))
4448 {
4449 operand_error_record* record = operand_error_report.head;
4450 gas_assert (record != NULL && operand_error_report.tail != NULL);
4451 operand_error_report.head = record->next;
4452 record->next = free_opnd_error_record_nodes;
4453 free_opnd_error_record_nodes = record;
4454 if (operand_error_report.head == NULL)
4455 {
4456 gas_assert (operand_error_report.tail == record);
4457 operand_error_report.tail = NULL;
4458 }
4459 }
4460 }
4461
4462 /* Given the instruction in *INSTR, return the index of the best matched
4463 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4464
4465 Return -1 if there is no qualifier sequence; return the first match
4466 if there is multiple matches found. */
4467
4468 static int
4469 find_best_match (const aarch64_inst *instr,
4470 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4471 {
4472 int i, num_opnds, max_num_matched, idx;
4473
4474 num_opnds = aarch64_num_of_operands (instr->opcode);
4475 if (num_opnds == 0)
4476 {
4477 DEBUG_TRACE ("no operand");
4478 return -1;
4479 }
4480
4481 max_num_matched = 0;
4482 idx = 0;
4483
4484 /* For each pattern. */
4485 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4486 {
4487 int j, num_matched;
4488 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4489
4490 /* Most opcodes has much fewer patterns in the list. */
4491 if (empty_qualifier_sequence_p (qualifiers))
4492 {
4493 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4494 break;
4495 }
4496
4497 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4498 if (*qualifiers == instr->operands[j].qualifier)
4499 ++num_matched;
4500
4501 if (num_matched > max_num_matched)
4502 {
4503 max_num_matched = num_matched;
4504 idx = i;
4505 }
4506 }
4507
4508 DEBUG_TRACE ("return with %d", idx);
4509 return idx;
4510 }
4511
4512 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4513 corresponding operands in *INSTR. */
4514
4515 static inline void
4516 assign_qualifier_sequence (aarch64_inst *instr,
4517 const aarch64_opnd_qualifier_t *qualifiers)
4518 {
4519 int i = 0;
4520 int num_opnds = aarch64_num_of_operands (instr->opcode);
4521 gas_assert (num_opnds);
4522 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4523 instr->operands[i].qualifier = *qualifiers;
4524 }
4525
4526 /* Print operands for the diagnosis purpose. */
4527
4528 static void
4529 print_operands (char *buf, const aarch64_opcode *opcode,
4530 const aarch64_opnd_info *opnds)
4531 {
4532 int i;
4533
4534 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4535 {
4536 char str[128];
4537
4538 /* We regard the opcode operand info more, however we also look into
4539 the inst->operands to support the disassembling of the optional
4540 operand.
4541 The two operand code should be the same in all cases, apart from
4542 when the operand can be optional. */
4543 if (opcode->operands[i] == AARCH64_OPND_NIL
4544 || opnds[i].type == AARCH64_OPND_NIL)
4545 break;
4546
4547 /* Generate the operand string in STR. */
4548 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4549 NULL);
4550
4551 /* Delimiter. */
4552 if (str[0] != '\0')
4553 strcat (buf, i == 0 ? " " : ", ");
4554
4555 /* Append the operand string. */
4556 strcat (buf, str);
4557 }
4558 }
4559
4560 /* Send to stderr a string as information. */
4561
4562 static void
4563 output_info (const char *format, ...)
4564 {
4565 const char *file;
4566 unsigned int line;
4567 va_list args;
4568
4569 file = as_where (&line);
4570 if (file)
4571 {
4572 if (line != 0)
4573 fprintf (stderr, "%s:%u: ", file, line);
4574 else
4575 fprintf (stderr, "%s: ", file);
4576 }
4577 fprintf (stderr, _("Info: "));
4578 va_start (args, format);
4579 vfprintf (stderr, format, args);
4580 va_end (args);
4581 (void) putc ('\n', stderr);
4582 }
4583
4584 /* Output one operand error record. */
4585
4586 static void
4587 output_operand_error_record (const operand_error_record *record, char *str)
4588 {
4589 const aarch64_operand_error *detail = &record->detail;
4590 int idx = detail->index;
4591 const aarch64_opcode *opcode = record->opcode;
4592 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4593 : AARCH64_OPND_NIL);
4594
4595 typedef void (*handler_t)(const char *format, ...);
4596 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4597
4598 switch (detail->kind)
4599 {
4600 case AARCH64_OPDE_NIL:
4601 gas_assert (0);
4602 break;
4603 case AARCH64_OPDE_SYNTAX_ERROR:
4604 case AARCH64_OPDE_RECOVERABLE:
4605 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4606 case AARCH64_OPDE_OTHER_ERROR:
4607 /* Use the prepared error message if there is, otherwise use the
4608 operand description string to describe the error. */
4609 if (detail->error != NULL)
4610 {
4611 if (idx < 0)
4612 handler (_("%s -- `%s'"), detail->error, str);
4613 else
4614 handler (_("%s at operand %d -- `%s'"),
4615 detail->error, idx + 1, str);
4616 }
4617 else
4618 {
4619 gas_assert (idx >= 0);
4620 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4621 aarch64_get_operand_desc (opd_code), str);
4622 }
4623 break;
4624
4625 case AARCH64_OPDE_INVALID_VARIANT:
4626 handler (_("operand mismatch -- `%s'"), str);
4627 if (verbose_error_p)
4628 {
4629 /* We will try to correct the erroneous instruction and also provide
4630 more information e.g. all other valid variants.
4631
4632 The string representation of the corrected instruction and other
4633 valid variants are generated by
4634
4635 1) obtaining the intermediate representation of the erroneous
4636 instruction;
4637 2) manipulating the IR, e.g. replacing the operand qualifier;
4638 3) printing out the instruction by calling the printer functions
4639 shared with the disassembler.
4640
4641 The limitation of this method is that the exact input assembly
4642 line cannot be accurately reproduced in some cases, for example an
4643 optional operand present in the actual assembly line will be
4644 omitted in the output; likewise for the optional syntax rules,
4645 e.g. the # before the immediate. Another limitation is that the
4646 assembly symbols and relocation operations in the assembly line
4647 currently cannot be printed out in the error report. Last but not
4648 least, when there is other error(s) co-exist with this error, the
4649 'corrected' instruction may be still incorrect, e.g. given
4650 'ldnp h0,h1,[x0,#6]!'
4651 this diagnosis will provide the version:
4652 'ldnp s0,s1,[x0,#6]!'
4653 which is still not right. */
4654 size_t len = strlen (get_mnemonic_name (str));
4655 int i, qlf_idx;
4656 bfd_boolean result;
4657 char buf[2048];
4658 aarch64_inst *inst_base = &inst.base;
4659 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4660
4661 /* Init inst. */
4662 reset_aarch64_instruction (&inst);
4663 inst_base->opcode = opcode;
4664
4665 /* Reset the error report so that there is no side effect on the
4666 following operand parsing. */
4667 init_operand_error_report ();
4668
4669 /* Fill inst. */
4670 result = parse_operands (str + len, opcode)
4671 && programmer_friendly_fixup (&inst);
4672 gas_assert (result);
4673 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4674 NULL, NULL, insn_sequence);
4675 gas_assert (!result);
4676
4677 /* Find the most matched qualifier sequence. */
4678 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4679 gas_assert (qlf_idx > -1);
4680
4681 /* Assign the qualifiers. */
4682 assign_qualifier_sequence (inst_base,
4683 opcode->qualifiers_list[qlf_idx]);
4684
4685 /* Print the hint. */
4686 output_info (_(" did you mean this?"));
4687 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4688 print_operands (buf, opcode, inst_base->operands);
4689 output_info (_(" %s"), buf);
4690
4691 /* Print out other variant(s) if there is any. */
4692 if (qlf_idx != 0 ||
4693 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4694 output_info (_(" other valid variant(s):"));
4695
4696 /* For each pattern. */
4697 qualifiers_list = opcode->qualifiers_list;
4698 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4699 {
4700 /* Most opcodes has much fewer patterns in the list.
4701 First NIL qualifier indicates the end in the list. */
4702 if (empty_qualifier_sequence_p (*qualifiers_list))
4703 break;
4704
4705 if (i != qlf_idx)
4706 {
4707 /* Mnemonics name. */
4708 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4709
4710 /* Assign the qualifiers. */
4711 assign_qualifier_sequence (inst_base, *qualifiers_list);
4712
4713 /* Print instruction. */
4714 print_operands (buf, opcode, inst_base->operands);
4715
4716 output_info (_(" %s"), buf);
4717 }
4718 }
4719 }
4720 break;
4721
4722 case AARCH64_OPDE_UNTIED_OPERAND:
4723 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4724 detail->index + 1, str);
4725 break;
4726
4727 case AARCH64_OPDE_OUT_OF_RANGE:
4728 if (detail->data[0] != detail->data[1])
4729 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4730 detail->error ? detail->error : _("immediate value"),
4731 detail->data[0], detail->data[1], idx + 1, str);
4732 else
4733 handler (_("%s must be %d at operand %d -- `%s'"),
4734 detail->error ? detail->error : _("immediate value"),
4735 detail->data[0], idx + 1, str);
4736 break;
4737
4738 case AARCH64_OPDE_REG_LIST:
4739 if (detail->data[0] == 1)
4740 handler (_("invalid number of registers in the list; "
4741 "only 1 register is expected at operand %d -- `%s'"),
4742 idx + 1, str);
4743 else
4744 handler (_("invalid number of registers in the list; "
4745 "%d registers are expected at operand %d -- `%s'"),
4746 detail->data[0], idx + 1, str);
4747 break;
4748
4749 case AARCH64_OPDE_UNALIGNED:
4750 handler (_("immediate value must be a multiple of "
4751 "%d at operand %d -- `%s'"),
4752 detail->data[0], idx + 1, str);
4753 break;
4754
4755 default:
4756 gas_assert (0);
4757 break;
4758 }
4759 }
4760
4761 /* Process and output the error message about the operand mismatching.
4762
4763 When this function is called, the operand error information had
4764 been collected for an assembly line and there will be multiple
4765 errors in the case of multiple instruction templates; output the
4766 error message that most closely describes the problem.
4767
4768 The errors to be printed can be filtered on printing all errors
4769 or only non-fatal errors. This distinction has to be made because
4770 the error buffer may already be filled with fatal errors we don't want to
4771 print due to the different instruction templates. */
4772
4773 static void
4774 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4775 {
4776 int largest_error_pos;
4777 const char *msg = NULL;
4778 enum aarch64_operand_error_kind kind;
4779 operand_error_record *curr;
4780 operand_error_record *head = operand_error_report.head;
4781 operand_error_record *record = NULL;
4782
4783 /* No error to report. */
4784 if (head == NULL)
4785 return;
4786
4787 gas_assert (head != NULL && operand_error_report.tail != NULL);
4788
4789 /* Only one error. */
4790 if (head == operand_error_report.tail)
4791 {
4792 /* If the only error is a non-fatal one and we don't want to print it,
4793 just exit. */
4794 if (!non_fatal_only || head->detail.non_fatal)
4795 {
4796 DEBUG_TRACE ("single opcode entry with error kind: %s",
4797 operand_mismatch_kind_names[head->detail.kind]);
4798 output_operand_error_record (head, str);
4799 }
4800 return;
4801 }
4802
4803 /* Find the error kind of the highest severity. */
4804 DEBUG_TRACE ("multiple opcode entries with error kind");
4805 kind = AARCH64_OPDE_NIL;
4806 for (curr = head; curr != NULL; curr = curr->next)
4807 {
4808 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4809 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4810 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4811 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4812 kind = curr->detail.kind;
4813 }
4814
4815 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4816
4817 /* Pick up one of errors of KIND to report. */
4818 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4819 for (curr = head; curr != NULL; curr = curr->next)
4820 {
4821 /* If we don't want to print non-fatal errors then don't consider them
4822 at all. */
4823 if (curr->detail.kind != kind
4824 || (non_fatal_only && !curr->detail.non_fatal))
4825 continue;
4826 /* If there are multiple errors, pick up the one with the highest
4827 mismatching operand index. In the case of multiple errors with
4828 the equally highest operand index, pick up the first one or the
4829 first one with non-NULL error message. */
4830 if (curr->detail.index > largest_error_pos
4831 || (curr->detail.index == largest_error_pos && msg == NULL
4832 && curr->detail.error != NULL))
4833 {
4834 largest_error_pos = curr->detail.index;
4835 record = curr;
4836 msg = record->detail.error;
4837 }
4838 }
4839
4840 /* The way errors are collected in the back-end is a bit non-intuitive. But
4841 essentially, because each operand template is tried recursively you may
4842 always have errors collected from the previous tried OPND. These are
4843 usually skipped if there is one successful match. However now with the
4844 non-fatal errors we have to ignore those previously collected hard errors
4845 when we're only interested in printing the non-fatal ones. This condition
4846 prevents us from printing errors that are not appropriate, since we did
4847 match a condition, but it also has warnings that it wants to print. */
4848 if (non_fatal_only && !record)
4849 return;
4850
4851 gas_assert (largest_error_pos != -2 && record != NULL);
4852 DEBUG_TRACE ("Pick up error kind %s to report",
4853 operand_mismatch_kind_names[record->detail.kind]);
4854
4855 /* Output. */
4856 output_operand_error_record (record, str);
4857 }
4858 \f
4859 /* Write an AARCH64 instruction to buf - always little-endian. */
4860 static void
4861 put_aarch64_insn (char *buf, uint32_t insn)
4862 {
4863 unsigned char *where = (unsigned char *) buf;
4864 where[0] = insn;
4865 where[1] = insn >> 8;
4866 where[2] = insn >> 16;
4867 where[3] = insn >> 24;
4868 }
4869
4870 static uint32_t
4871 get_aarch64_insn (char *buf)
4872 {
4873 unsigned char *where = (unsigned char *) buf;
4874 uint32_t result;
4875 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4876 return result;
4877 }
4878
4879 static void
4880 output_inst (struct aarch64_inst *new_inst)
4881 {
4882 char *to = NULL;
4883
4884 to = frag_more (INSN_SIZE);
4885
4886 frag_now->tc_frag_data.recorded = 1;
4887
4888 put_aarch64_insn (to, inst.base.value);
4889
4890 if (inst.reloc.type != BFD_RELOC_UNUSED)
4891 {
4892 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4893 INSN_SIZE, &inst.reloc.exp,
4894 inst.reloc.pc_rel,
4895 inst.reloc.type);
4896 DEBUG_TRACE ("Prepared relocation fix up");
4897 /* Don't check the addend value against the instruction size,
4898 that's the job of our code in md_apply_fix(). */
4899 fixp->fx_no_overflow = 1;
4900 if (new_inst != NULL)
4901 fixp->tc_fix_data.inst = new_inst;
4902 if (aarch64_gas_internal_fixup_p ())
4903 {
4904 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4905 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4906 fixp->fx_addnumber = inst.reloc.flags;
4907 }
4908 }
4909
4910 dwarf2_emit_insn (INSN_SIZE);
4911 }
4912
4913 /* Link together opcodes of the same name. */
4914
4915 struct templates
4916 {
4917 aarch64_opcode *opcode;
4918 struct templates *next;
4919 };
4920
4921 typedef struct templates templates;
4922
4923 static templates *
4924 lookup_mnemonic (const char *start, int len)
4925 {
4926 templates *templ = NULL;
4927
4928 templ = hash_find_n (aarch64_ops_hsh, start, len);
4929 return templ;
4930 }
4931
4932 /* Subroutine of md_assemble, responsible for looking up the primary
4933 opcode from the mnemonic the user wrote. STR points to the
4934 beginning of the mnemonic. */
4935
4936 static templates *
4937 opcode_lookup (char **str)
4938 {
4939 char *end, *base, *dot;
4940 const aarch64_cond *cond;
4941 char condname[16];
4942 int len;
4943
4944 /* Scan up to the end of the mnemonic, which must end in white space,
4945 '.', or end of string. */
4946 dot = 0;
4947 for (base = end = *str; is_part_of_name(*end); end++)
4948 if (*end == '.' && !dot)
4949 dot = end;
4950
4951 if (end == base || dot == base)
4952 return 0;
4953
4954 inst.cond = COND_ALWAYS;
4955
4956 /* Handle a possible condition. */
4957 if (dot)
4958 {
4959 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
4960 if (cond)
4961 {
4962 inst.cond = cond->value;
4963 *str = end;
4964 }
4965 else
4966 {
4967 *str = dot;
4968 return 0;
4969 }
4970 len = dot - base;
4971 }
4972 else
4973 {
4974 *str = end;
4975 len = end - base;
4976 }
4977
4978 if (inst.cond == COND_ALWAYS)
4979 {
4980 /* Look for unaffixed mnemonic. */
4981 return lookup_mnemonic (base, len);
4982 }
4983 else if (len <= 13)
4984 {
4985 /* append ".c" to mnemonic if conditional */
4986 memcpy (condname, base, len);
4987 memcpy (condname + len, ".c", 2);
4988 base = condname;
4989 len += 2;
4990 return lookup_mnemonic (base, len);
4991 }
4992
4993 return NULL;
4994 }
4995
4996 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4997 to a corresponding operand qualifier. */
4998
4999 static inline aarch64_opnd_qualifier_t
5000 vectype_to_qualifier (const struct vector_type_el *vectype)
5001 {
5002 /* Element size in bytes indexed by vector_el_type. */
5003 const unsigned char ele_size[5]
5004 = {1, 2, 4, 8, 16};
5005 const unsigned int ele_base [5] =
5006 {
5007 AARCH64_OPND_QLF_V_4B,
5008 AARCH64_OPND_QLF_V_2H,
5009 AARCH64_OPND_QLF_V_2S,
5010 AARCH64_OPND_QLF_V_1D,
5011 AARCH64_OPND_QLF_V_1Q
5012 };
5013
5014 if (!vectype->defined || vectype->type == NT_invtype)
5015 goto vectype_conversion_fail;
5016
5017 if (vectype->type == NT_zero)
5018 return AARCH64_OPND_QLF_P_Z;
5019 if (vectype->type == NT_merge)
5020 return AARCH64_OPND_QLF_P_M;
5021
5022 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5023
5024 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5025 {
5026 /* Special case S_4B. */
5027 if (vectype->type == NT_b && vectype->width == 4)
5028 return AARCH64_OPND_QLF_S_4B;
5029
5030 /* Vector element register. */
5031 return AARCH64_OPND_QLF_S_B + vectype->type;
5032 }
5033 else
5034 {
5035 /* Vector register. */
5036 int reg_size = ele_size[vectype->type] * vectype->width;
5037 unsigned offset;
5038 unsigned shift;
5039 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5040 goto vectype_conversion_fail;
5041
5042 /* The conversion is by calculating the offset from the base operand
5043 qualifier for the vector type. The operand qualifiers are regular
5044 enough that the offset can established by shifting the vector width by
5045 a vector-type dependent amount. */
5046 shift = 0;
5047 if (vectype->type == NT_b)
5048 shift = 3;
5049 else if (vectype->type == NT_h || vectype->type == NT_s)
5050 shift = 2;
5051 else if (vectype->type >= NT_d)
5052 shift = 1;
5053 else
5054 gas_assert (0);
5055
5056 offset = ele_base [vectype->type] + (vectype->width >> shift);
5057 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5058 && offset <= AARCH64_OPND_QLF_V_1Q);
5059 return offset;
5060 }
5061
5062 vectype_conversion_fail:
5063 first_error (_("bad vector arrangement type"));
5064 return AARCH64_OPND_QLF_NIL;
5065 }
5066
5067 /* Process an optional operand that is found omitted from the assembly line.
5068 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5069 instruction's opcode entry while IDX is the index of this omitted operand.
5070 */
5071
5072 static void
5073 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5074 int idx, aarch64_opnd_info *operand)
5075 {
5076 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5077 gas_assert (optional_operand_p (opcode, idx));
5078 gas_assert (!operand->present);
5079
5080 switch (type)
5081 {
5082 case AARCH64_OPND_Rd:
5083 case AARCH64_OPND_Rn:
5084 case AARCH64_OPND_Rm:
5085 case AARCH64_OPND_Rt:
5086 case AARCH64_OPND_Rt2:
5087 case AARCH64_OPND_Rs:
5088 case AARCH64_OPND_Ra:
5089 case AARCH64_OPND_Rt_SYS:
5090 case AARCH64_OPND_Rd_SP:
5091 case AARCH64_OPND_Rn_SP:
5092 case AARCH64_OPND_Rm_SP:
5093 case AARCH64_OPND_Fd:
5094 case AARCH64_OPND_Fn:
5095 case AARCH64_OPND_Fm:
5096 case AARCH64_OPND_Fa:
5097 case AARCH64_OPND_Ft:
5098 case AARCH64_OPND_Ft2:
5099 case AARCH64_OPND_Sd:
5100 case AARCH64_OPND_Sn:
5101 case AARCH64_OPND_Sm:
5102 case AARCH64_OPND_Va:
5103 case AARCH64_OPND_Vd:
5104 case AARCH64_OPND_Vn:
5105 case AARCH64_OPND_Vm:
5106 case AARCH64_OPND_VdD1:
5107 case AARCH64_OPND_VnD1:
5108 operand->reg.regno = default_value;
5109 break;
5110
5111 case AARCH64_OPND_Ed:
5112 case AARCH64_OPND_En:
5113 case AARCH64_OPND_Em:
5114 case AARCH64_OPND_Em16:
5115 case AARCH64_OPND_SM3_IMM2:
5116 operand->reglane.regno = default_value;
5117 break;
5118
5119 case AARCH64_OPND_IDX:
5120 case AARCH64_OPND_BIT_NUM:
5121 case AARCH64_OPND_IMMR:
5122 case AARCH64_OPND_IMMS:
5123 case AARCH64_OPND_SHLL_IMM:
5124 case AARCH64_OPND_IMM_VLSL:
5125 case AARCH64_OPND_IMM_VLSR:
5126 case AARCH64_OPND_CCMP_IMM:
5127 case AARCH64_OPND_FBITS:
5128 case AARCH64_OPND_UIMM4:
5129 case AARCH64_OPND_UIMM3_OP1:
5130 case AARCH64_OPND_UIMM3_OP2:
5131 case AARCH64_OPND_IMM:
5132 case AARCH64_OPND_IMM_2:
5133 case AARCH64_OPND_WIDTH:
5134 case AARCH64_OPND_UIMM7:
5135 case AARCH64_OPND_NZCV:
5136 case AARCH64_OPND_SVE_PATTERN:
5137 case AARCH64_OPND_SVE_PRFOP:
5138 operand->imm.value = default_value;
5139 break;
5140
5141 case AARCH64_OPND_SVE_PATTERN_SCALED:
5142 operand->imm.value = default_value;
5143 operand->shifter.kind = AARCH64_MOD_MUL;
5144 operand->shifter.amount = 1;
5145 break;
5146
5147 case AARCH64_OPND_EXCEPTION:
5148 inst.reloc.type = BFD_RELOC_UNUSED;
5149 break;
5150
5151 case AARCH64_OPND_BARRIER_ISB:
5152 operand->barrier = aarch64_barrier_options + default_value;
5153
5154 default:
5155 break;
5156 }
5157 }
5158
5159 /* Process the relocation type for move wide instructions.
5160 Return TRUE on success; otherwise return FALSE. */
5161
5162 static bfd_boolean
5163 process_movw_reloc_info (void)
5164 {
5165 int is32;
5166 unsigned shift;
5167
5168 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5169
5170 if (inst.base.opcode->op == OP_MOVK)
5171 switch (inst.reloc.type)
5172 {
5173 case BFD_RELOC_AARCH64_MOVW_G0_S:
5174 case BFD_RELOC_AARCH64_MOVW_G1_S:
5175 case BFD_RELOC_AARCH64_MOVW_G2_S:
5176 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5177 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5178 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5179 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5180 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5181 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5182 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5183 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5184 set_syntax_error
5185 (_("the specified relocation type is not allowed for MOVK"));
5186 return FALSE;
5187 default:
5188 break;
5189 }
5190
5191 switch (inst.reloc.type)
5192 {
5193 case BFD_RELOC_AARCH64_MOVW_G0:
5194 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5195 case BFD_RELOC_AARCH64_MOVW_G0_S:
5196 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5197 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5198 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5199 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5200 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5201 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5202 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5203 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5204 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5205 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5206 shift = 0;
5207 break;
5208 case BFD_RELOC_AARCH64_MOVW_G1:
5209 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5210 case BFD_RELOC_AARCH64_MOVW_G1_S:
5211 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5212 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5213 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5214 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5215 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5216 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5217 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5218 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5219 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5220 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5221 shift = 16;
5222 break;
5223 case BFD_RELOC_AARCH64_MOVW_G2:
5224 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5225 case BFD_RELOC_AARCH64_MOVW_G2_S:
5226 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5227 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5228 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5229 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5230 if (is32)
5231 {
5232 set_fatal_syntax_error
5233 (_("the specified relocation type is not allowed for 32-bit "
5234 "register"));
5235 return FALSE;
5236 }
5237 shift = 32;
5238 break;
5239 case BFD_RELOC_AARCH64_MOVW_G3:
5240 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5241 if (is32)
5242 {
5243 set_fatal_syntax_error
5244 (_("the specified relocation type is not allowed for 32-bit "
5245 "register"));
5246 return FALSE;
5247 }
5248 shift = 48;
5249 break;
5250 default:
5251 /* More cases should be added when more MOVW-related relocation types
5252 are supported in GAS. */
5253 gas_assert (aarch64_gas_internal_fixup_p ());
5254 /* The shift amount should have already been set by the parser. */
5255 return TRUE;
5256 }
5257 inst.base.operands[1].shifter.amount = shift;
5258 return TRUE;
5259 }
5260
5261 /* A primitive log calculator. */
5262
5263 static inline unsigned int
5264 get_logsz (unsigned int size)
5265 {
5266 const unsigned char ls[16] =
5267 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5268 if (size > 16)
5269 {
5270 gas_assert (0);
5271 return -1;
5272 }
5273 gas_assert (ls[size - 1] != (unsigned char)-1);
5274 return ls[size - 1];
5275 }
5276
5277 /* Determine and return the real reloc type code for an instruction
5278 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5279
5280 static inline bfd_reloc_code_real_type
5281 ldst_lo12_determine_real_reloc_type (void)
5282 {
5283 unsigned logsz;
5284 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5285 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5286
5287 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5288 {
5289 BFD_RELOC_AARCH64_LDST8_LO12,
5290 BFD_RELOC_AARCH64_LDST16_LO12,
5291 BFD_RELOC_AARCH64_LDST32_LO12,
5292 BFD_RELOC_AARCH64_LDST64_LO12,
5293 BFD_RELOC_AARCH64_LDST128_LO12
5294 },
5295 {
5296 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5297 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5298 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5299 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5300 BFD_RELOC_AARCH64_NONE
5301 },
5302 {
5303 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5304 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5305 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5306 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5307 BFD_RELOC_AARCH64_NONE
5308 },
5309 {
5310 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5311 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5312 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5313 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5314 BFD_RELOC_AARCH64_NONE
5315 },
5316 {
5317 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5318 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5319 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5320 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5321 BFD_RELOC_AARCH64_NONE
5322 }
5323 };
5324
5325 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5326 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5327 || (inst.reloc.type
5328 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5329 || (inst.reloc.type
5330 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5331 || (inst.reloc.type
5332 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5333 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5334
5335 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5336 opd1_qlf =
5337 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5338 1, opd0_qlf, 0);
5339 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5340
5341 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5342 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5343 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5344 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5345 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5346 gas_assert (logsz <= 3);
5347 else
5348 gas_assert (logsz <= 4);
5349
5350 /* In reloc.c, these pseudo relocation types should be defined in similar
5351 order as above reloc_ldst_lo12 array. Because the array index calculation
5352 below relies on this. */
5353 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5354 }
5355
5356 /* Check whether a register list REGINFO is valid. The registers must be
5357 numbered in increasing order (modulo 32), in increments of one or two.
5358
5359 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5360 increments of two.
5361
5362 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5363
5364 static bfd_boolean
5365 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5366 {
5367 uint32_t i, nb_regs, prev_regno, incr;
5368
5369 nb_regs = 1 + (reginfo & 0x3);
5370 reginfo >>= 2;
5371 prev_regno = reginfo & 0x1f;
5372 incr = accept_alternate ? 2 : 1;
5373
5374 for (i = 1; i < nb_regs; ++i)
5375 {
5376 uint32_t curr_regno;
5377 reginfo >>= 5;
5378 curr_regno = reginfo & 0x1f;
5379 if (curr_regno != ((prev_regno + incr) & 0x1f))
5380 return FALSE;
5381 prev_regno = curr_regno;
5382 }
5383
5384 return TRUE;
5385 }
5386
5387 /* Generic instruction operand parser. This does no encoding and no
5388 semantic validation; it merely squirrels values away in the inst
5389 structure. Returns TRUE or FALSE depending on whether the
5390 specified grammar matched. */
5391
5392 static bfd_boolean
5393 parse_operands (char *str, const aarch64_opcode *opcode)
5394 {
5395 int i;
5396 char *backtrack_pos = 0;
5397 const enum aarch64_opnd *operands = opcode->operands;
5398 aarch64_reg_type imm_reg_type;
5399
5400 clear_error ();
5401 skip_whitespace (str);
5402
5403 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5404 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5405 else
5406 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5407
5408 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5409 {
5410 int64_t val;
5411 const reg_entry *reg;
5412 int comma_skipped_p = 0;
5413 aarch64_reg_type rtype;
5414 struct vector_type_el vectype;
5415 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5416 aarch64_opnd_info *info = &inst.base.operands[i];
5417 aarch64_reg_type reg_type;
5418
5419 DEBUG_TRACE ("parse operand %d", i);
5420
5421 /* Assign the operand code. */
5422 info->type = operands[i];
5423
5424 if (optional_operand_p (opcode, i))
5425 {
5426 /* Remember where we are in case we need to backtrack. */
5427 gas_assert (!backtrack_pos);
5428 backtrack_pos = str;
5429 }
5430
5431 /* Expect comma between operands; the backtrack mechanism will take
5432 care of cases of omitted optional operand. */
5433 if (i > 0 && ! skip_past_char (&str, ','))
5434 {
5435 set_syntax_error (_("comma expected between operands"));
5436 goto failure;
5437 }
5438 else
5439 comma_skipped_p = 1;
5440
5441 switch (operands[i])
5442 {
5443 case AARCH64_OPND_Rd:
5444 case AARCH64_OPND_Rn:
5445 case AARCH64_OPND_Rm:
5446 case AARCH64_OPND_Rt:
5447 case AARCH64_OPND_Rt2:
5448 case AARCH64_OPND_Rs:
5449 case AARCH64_OPND_Ra:
5450 case AARCH64_OPND_Rt_SYS:
5451 case AARCH64_OPND_PAIRREG:
5452 case AARCH64_OPND_SVE_Rm:
5453 po_int_reg_or_fail (REG_TYPE_R_Z);
5454 break;
5455
5456 case AARCH64_OPND_Rd_SP:
5457 case AARCH64_OPND_Rn_SP:
5458 case AARCH64_OPND_SVE_Rn_SP:
5459 case AARCH64_OPND_Rm_SP:
5460 po_int_reg_or_fail (REG_TYPE_R_SP);
5461 break;
5462
5463 case AARCH64_OPND_Rm_EXT:
5464 case AARCH64_OPND_Rm_SFT:
5465 po_misc_or_fail (parse_shifter_operand
5466 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5467 ? SHIFTED_ARITH_IMM
5468 : SHIFTED_LOGIC_IMM)));
5469 if (!info->shifter.operator_present)
5470 {
5471 /* Default to LSL if not present. Libopcodes prefers shifter
5472 kind to be explicit. */
5473 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5474 info->shifter.kind = AARCH64_MOD_LSL;
5475 /* For Rm_EXT, libopcodes will carry out further check on whether
5476 or not stack pointer is used in the instruction (Recall that
5477 "the extend operator is not optional unless at least one of
5478 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5479 }
5480 break;
5481
5482 case AARCH64_OPND_Fd:
5483 case AARCH64_OPND_Fn:
5484 case AARCH64_OPND_Fm:
5485 case AARCH64_OPND_Fa:
5486 case AARCH64_OPND_Ft:
5487 case AARCH64_OPND_Ft2:
5488 case AARCH64_OPND_Sd:
5489 case AARCH64_OPND_Sn:
5490 case AARCH64_OPND_Sm:
5491 case AARCH64_OPND_SVE_VZn:
5492 case AARCH64_OPND_SVE_Vd:
5493 case AARCH64_OPND_SVE_Vm:
5494 case AARCH64_OPND_SVE_Vn:
5495 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5496 if (val == PARSE_FAIL)
5497 {
5498 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5499 goto failure;
5500 }
5501 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5502
5503 info->reg.regno = val;
5504 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5505 break;
5506
5507 case AARCH64_OPND_SVE_Pd:
5508 case AARCH64_OPND_SVE_Pg3:
5509 case AARCH64_OPND_SVE_Pg4_5:
5510 case AARCH64_OPND_SVE_Pg4_10:
5511 case AARCH64_OPND_SVE_Pg4_16:
5512 case AARCH64_OPND_SVE_Pm:
5513 case AARCH64_OPND_SVE_Pn:
5514 case AARCH64_OPND_SVE_Pt:
5515 reg_type = REG_TYPE_PN;
5516 goto vector_reg;
5517
5518 case AARCH64_OPND_SVE_Za_5:
5519 case AARCH64_OPND_SVE_Za_16:
5520 case AARCH64_OPND_SVE_Zd:
5521 case AARCH64_OPND_SVE_Zm_5:
5522 case AARCH64_OPND_SVE_Zm_16:
5523 case AARCH64_OPND_SVE_Zn:
5524 case AARCH64_OPND_SVE_Zt:
5525 reg_type = REG_TYPE_ZN;
5526 goto vector_reg;
5527
5528 case AARCH64_OPND_Va:
5529 case AARCH64_OPND_Vd:
5530 case AARCH64_OPND_Vn:
5531 case AARCH64_OPND_Vm:
5532 reg_type = REG_TYPE_VN;
5533 vector_reg:
5534 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5535 if (val == PARSE_FAIL)
5536 {
5537 first_error (_(get_reg_expected_msg (reg_type)));
5538 goto failure;
5539 }
5540 if (vectype.defined & NTA_HASINDEX)
5541 goto failure;
5542
5543 info->reg.regno = val;
5544 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5545 && vectype.type == NT_invtype)
5546 /* Unqualified Pn and Zn registers are allowed in certain
5547 contexts. Rely on F_STRICT qualifier checking to catch
5548 invalid uses. */
5549 info->qualifier = AARCH64_OPND_QLF_NIL;
5550 else
5551 {
5552 info->qualifier = vectype_to_qualifier (&vectype);
5553 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5554 goto failure;
5555 }
5556 break;
5557
5558 case AARCH64_OPND_VdD1:
5559 case AARCH64_OPND_VnD1:
5560 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5561 if (val == PARSE_FAIL)
5562 {
5563 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5564 goto failure;
5565 }
5566 if (vectype.type != NT_d || vectype.index != 1)
5567 {
5568 set_fatal_syntax_error
5569 (_("the top half of a 128-bit FP/SIMD register is expected"));
5570 goto failure;
5571 }
5572 info->reg.regno = val;
5573 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5574 here; it is correct for the purpose of encoding/decoding since
5575 only the register number is explicitly encoded in the related
5576 instructions, although this appears a bit hacky. */
5577 info->qualifier = AARCH64_OPND_QLF_S_D;
5578 break;
5579
5580 case AARCH64_OPND_SVE_Zm3_INDEX:
5581 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5582 case AARCH64_OPND_SVE_Zm4_INDEX:
5583 case AARCH64_OPND_SVE_Zn_INDEX:
5584 reg_type = REG_TYPE_ZN;
5585 goto vector_reg_index;
5586
5587 case AARCH64_OPND_Ed:
5588 case AARCH64_OPND_En:
5589 case AARCH64_OPND_Em:
5590 case AARCH64_OPND_Em16:
5591 case AARCH64_OPND_SM3_IMM2:
5592 reg_type = REG_TYPE_VN;
5593 vector_reg_index:
5594 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5595 if (val == PARSE_FAIL)
5596 {
5597 first_error (_(get_reg_expected_msg (reg_type)));
5598 goto failure;
5599 }
5600 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5601 goto failure;
5602
5603 info->reglane.regno = val;
5604 info->reglane.index = vectype.index;
5605 info->qualifier = vectype_to_qualifier (&vectype);
5606 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5607 goto failure;
5608 break;
5609
5610 case AARCH64_OPND_SVE_ZnxN:
5611 case AARCH64_OPND_SVE_ZtxN:
5612 reg_type = REG_TYPE_ZN;
5613 goto vector_reg_list;
5614
5615 case AARCH64_OPND_LVn:
5616 case AARCH64_OPND_LVt:
5617 case AARCH64_OPND_LVt_AL:
5618 case AARCH64_OPND_LEt:
5619 reg_type = REG_TYPE_VN;
5620 vector_reg_list:
5621 if (reg_type == REG_TYPE_ZN
5622 && get_opcode_dependent_value (opcode) == 1
5623 && *str != '{')
5624 {
5625 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5626 if (val == PARSE_FAIL)
5627 {
5628 first_error (_(get_reg_expected_msg (reg_type)));
5629 goto failure;
5630 }
5631 info->reglist.first_regno = val;
5632 info->reglist.num_regs = 1;
5633 }
5634 else
5635 {
5636 val = parse_vector_reg_list (&str, reg_type, &vectype);
5637 if (val == PARSE_FAIL)
5638 goto failure;
5639 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5640 {
5641 set_fatal_syntax_error (_("invalid register list"));
5642 goto failure;
5643 }
5644 info->reglist.first_regno = (val >> 2) & 0x1f;
5645 info->reglist.num_regs = (val & 0x3) + 1;
5646 }
5647 if (operands[i] == AARCH64_OPND_LEt)
5648 {
5649 if (!(vectype.defined & NTA_HASINDEX))
5650 goto failure;
5651 info->reglist.has_index = 1;
5652 info->reglist.index = vectype.index;
5653 }
5654 else
5655 {
5656 if (vectype.defined & NTA_HASINDEX)
5657 goto failure;
5658 if (!(vectype.defined & NTA_HASTYPE))
5659 {
5660 if (reg_type == REG_TYPE_ZN)
5661 set_fatal_syntax_error (_("missing type suffix"));
5662 goto failure;
5663 }
5664 }
5665 info->qualifier = vectype_to_qualifier (&vectype);
5666 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5667 goto failure;
5668 break;
5669
5670 case AARCH64_OPND_CRn:
5671 case AARCH64_OPND_CRm:
5672 {
5673 char prefix = *(str++);
5674 if (prefix != 'c' && prefix != 'C')
5675 goto failure;
5676
5677 po_imm_nc_or_fail ();
5678 if (val > 15)
5679 {
5680 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5681 goto failure;
5682 }
5683 info->qualifier = AARCH64_OPND_QLF_CR;
5684 info->imm.value = val;
5685 break;
5686 }
5687
5688 case AARCH64_OPND_SHLL_IMM:
5689 case AARCH64_OPND_IMM_VLSR:
5690 po_imm_or_fail (1, 64);
5691 info->imm.value = val;
5692 break;
5693
5694 case AARCH64_OPND_CCMP_IMM:
5695 case AARCH64_OPND_SIMM5:
5696 case AARCH64_OPND_FBITS:
5697 case AARCH64_OPND_UIMM4:
5698 case AARCH64_OPND_UIMM3_OP1:
5699 case AARCH64_OPND_UIMM3_OP2:
5700 case AARCH64_OPND_IMM_VLSL:
5701 case AARCH64_OPND_IMM:
5702 case AARCH64_OPND_IMM_2:
5703 case AARCH64_OPND_WIDTH:
5704 case AARCH64_OPND_SVE_INV_LIMM:
5705 case AARCH64_OPND_SVE_LIMM:
5706 case AARCH64_OPND_SVE_LIMM_MOV:
5707 case AARCH64_OPND_SVE_SHLIMM_PRED:
5708 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5709 case AARCH64_OPND_SVE_SHRIMM_PRED:
5710 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5711 case AARCH64_OPND_SVE_SIMM5:
5712 case AARCH64_OPND_SVE_SIMM5B:
5713 case AARCH64_OPND_SVE_SIMM6:
5714 case AARCH64_OPND_SVE_SIMM8:
5715 case AARCH64_OPND_SVE_UIMM3:
5716 case AARCH64_OPND_SVE_UIMM7:
5717 case AARCH64_OPND_SVE_UIMM8:
5718 case AARCH64_OPND_SVE_UIMM8_53:
5719 case AARCH64_OPND_IMM_ROT1:
5720 case AARCH64_OPND_IMM_ROT2:
5721 case AARCH64_OPND_IMM_ROT3:
5722 case AARCH64_OPND_SVE_IMM_ROT1:
5723 case AARCH64_OPND_SVE_IMM_ROT2:
5724 po_imm_nc_or_fail ();
5725 info->imm.value = val;
5726 break;
5727
5728 case AARCH64_OPND_SVE_AIMM:
5729 case AARCH64_OPND_SVE_ASIMM:
5730 po_imm_nc_or_fail ();
5731 info->imm.value = val;
5732 skip_whitespace (str);
5733 if (skip_past_comma (&str))
5734 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5735 else
5736 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5737 break;
5738
5739 case AARCH64_OPND_SVE_PATTERN:
5740 po_enum_or_fail (aarch64_sve_pattern_array);
5741 info->imm.value = val;
5742 break;
5743
5744 case AARCH64_OPND_SVE_PATTERN_SCALED:
5745 po_enum_or_fail (aarch64_sve_pattern_array);
5746 info->imm.value = val;
5747 if (skip_past_comma (&str)
5748 && !parse_shift (&str, info, SHIFTED_MUL))
5749 goto failure;
5750 if (!info->shifter.operator_present)
5751 {
5752 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5753 info->shifter.kind = AARCH64_MOD_MUL;
5754 info->shifter.amount = 1;
5755 }
5756 break;
5757
5758 case AARCH64_OPND_SVE_PRFOP:
5759 po_enum_or_fail (aarch64_sve_prfop_array);
5760 info->imm.value = val;
5761 break;
5762
5763 case AARCH64_OPND_UIMM7:
5764 po_imm_or_fail (0, 127);
5765 info->imm.value = val;
5766 break;
5767
5768 case AARCH64_OPND_IDX:
5769 case AARCH64_OPND_MASK:
5770 case AARCH64_OPND_BIT_NUM:
5771 case AARCH64_OPND_IMMR:
5772 case AARCH64_OPND_IMMS:
5773 po_imm_or_fail (0, 63);
5774 info->imm.value = val;
5775 break;
5776
5777 case AARCH64_OPND_IMM0:
5778 po_imm_nc_or_fail ();
5779 if (val != 0)
5780 {
5781 set_fatal_syntax_error (_("immediate zero expected"));
5782 goto failure;
5783 }
5784 info->imm.value = 0;
5785 break;
5786
5787 case AARCH64_OPND_FPIMM0:
5788 {
5789 int qfloat;
5790 bfd_boolean res1 = FALSE, res2 = FALSE;
5791 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5792 it is probably not worth the effort to support it. */
5793 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5794 imm_reg_type))
5795 && (error_p ()
5796 || !(res2 = parse_constant_immediate (&str, &val,
5797 imm_reg_type))))
5798 goto failure;
5799 if ((res1 && qfloat == 0) || (res2 && val == 0))
5800 {
5801 info->imm.value = 0;
5802 info->imm.is_fp = 1;
5803 break;
5804 }
5805 set_fatal_syntax_error (_("immediate zero expected"));
5806 goto failure;
5807 }
5808
5809 case AARCH64_OPND_IMM_MOV:
5810 {
5811 char *saved = str;
5812 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5813 reg_name_p (str, REG_TYPE_VN))
5814 goto failure;
5815 str = saved;
5816 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5817 GE_OPT_PREFIX, 1));
5818 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5819 later. fix_mov_imm_insn will try to determine a machine
5820 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5821 message if the immediate cannot be moved by a single
5822 instruction. */
5823 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5824 inst.base.operands[i].skip = 1;
5825 }
5826 break;
5827
5828 case AARCH64_OPND_SIMD_IMM:
5829 case AARCH64_OPND_SIMD_IMM_SFT:
5830 if (! parse_big_immediate (&str, &val, imm_reg_type))
5831 goto failure;
5832 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5833 /* addr_off_p */ 0,
5834 /* need_libopcodes_p */ 1,
5835 /* skip_p */ 1);
5836 /* Parse shift.
5837 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5838 shift, we don't check it here; we leave the checking to
5839 the libopcodes (operand_general_constraint_met_p). By
5840 doing this, we achieve better diagnostics. */
5841 if (skip_past_comma (&str)
5842 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5843 goto failure;
5844 if (!info->shifter.operator_present
5845 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5846 {
5847 /* Default to LSL if not present. Libopcodes prefers shifter
5848 kind to be explicit. */
5849 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5850 info->shifter.kind = AARCH64_MOD_LSL;
5851 }
5852 break;
5853
5854 case AARCH64_OPND_FPIMM:
5855 case AARCH64_OPND_SIMD_FPIMM:
5856 case AARCH64_OPND_SVE_FPIMM8:
5857 {
5858 int qfloat;
5859 bfd_boolean dp_p;
5860
5861 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5862 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5863 || !aarch64_imm_float_p (qfloat))
5864 {
5865 if (!error_p ())
5866 set_fatal_syntax_error (_("invalid floating-point"
5867 " constant"));
5868 goto failure;
5869 }
5870 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5871 inst.base.operands[i].imm.is_fp = 1;
5872 }
5873 break;
5874
5875 case AARCH64_OPND_SVE_I1_HALF_ONE:
5876 case AARCH64_OPND_SVE_I1_HALF_TWO:
5877 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5878 {
5879 int qfloat;
5880 bfd_boolean dp_p;
5881
5882 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5883 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5884 {
5885 if (!error_p ())
5886 set_fatal_syntax_error (_("invalid floating-point"
5887 " constant"));
5888 goto failure;
5889 }
5890 inst.base.operands[i].imm.value = qfloat;
5891 inst.base.operands[i].imm.is_fp = 1;
5892 }
5893 break;
5894
5895 case AARCH64_OPND_LIMM:
5896 po_misc_or_fail (parse_shifter_operand (&str, info,
5897 SHIFTED_LOGIC_IMM));
5898 if (info->shifter.operator_present)
5899 {
5900 set_fatal_syntax_error
5901 (_("shift not allowed for bitmask immediate"));
5902 goto failure;
5903 }
5904 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5905 /* addr_off_p */ 0,
5906 /* need_libopcodes_p */ 1,
5907 /* skip_p */ 1);
5908 break;
5909
5910 case AARCH64_OPND_AIMM:
5911 if (opcode->op == OP_ADD)
5912 /* ADD may have relocation types. */
5913 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5914 SHIFTED_ARITH_IMM));
5915 else
5916 po_misc_or_fail (parse_shifter_operand (&str, info,
5917 SHIFTED_ARITH_IMM));
5918 switch (inst.reloc.type)
5919 {
5920 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5921 info->shifter.amount = 12;
5922 break;
5923 case BFD_RELOC_UNUSED:
5924 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5925 if (info->shifter.kind != AARCH64_MOD_NONE)
5926 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5927 inst.reloc.pc_rel = 0;
5928 break;
5929 default:
5930 break;
5931 }
5932 info->imm.value = 0;
5933 if (!info->shifter.operator_present)
5934 {
5935 /* Default to LSL if not present. Libopcodes prefers shifter
5936 kind to be explicit. */
5937 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5938 info->shifter.kind = AARCH64_MOD_LSL;
5939 }
5940 break;
5941
5942 case AARCH64_OPND_HALF:
5943 {
5944 /* #<imm16> or relocation. */
5945 int internal_fixup_p;
5946 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5947 if (internal_fixup_p)
5948 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5949 skip_whitespace (str);
5950 if (skip_past_comma (&str))
5951 {
5952 /* {, LSL #<shift>} */
5953 if (! aarch64_gas_internal_fixup_p ())
5954 {
5955 set_fatal_syntax_error (_("can't mix relocation modifier "
5956 "with explicit shift"));
5957 goto failure;
5958 }
5959 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5960 }
5961 else
5962 inst.base.operands[i].shifter.amount = 0;
5963 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5964 inst.base.operands[i].imm.value = 0;
5965 if (! process_movw_reloc_info ())
5966 goto failure;
5967 }
5968 break;
5969
5970 case AARCH64_OPND_EXCEPTION:
5971 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5972 imm_reg_type));
5973 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5974 /* addr_off_p */ 0,
5975 /* need_libopcodes_p */ 0,
5976 /* skip_p */ 1);
5977 break;
5978
5979 case AARCH64_OPND_NZCV:
5980 {
5981 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5982 if (nzcv != NULL)
5983 {
5984 str += 4;
5985 info->imm.value = nzcv->value;
5986 break;
5987 }
5988 po_imm_or_fail (0, 15);
5989 info->imm.value = val;
5990 }
5991 break;
5992
5993 case AARCH64_OPND_COND:
5994 case AARCH64_OPND_COND1:
5995 {
5996 char *start = str;
5997 do
5998 str++;
5999 while (ISALPHA (*str));
6000 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
6001 if (info->cond == NULL)
6002 {
6003 set_syntax_error (_("invalid condition"));
6004 goto failure;
6005 }
6006 else if (operands[i] == AARCH64_OPND_COND1
6007 && (info->cond->value & 0xe) == 0xe)
6008 {
6009 /* Do not allow AL or NV. */
6010 set_default_error ();
6011 goto failure;
6012 }
6013 }
6014 break;
6015
6016 case AARCH64_OPND_ADDR_ADRP:
6017 po_misc_or_fail (parse_adrp (&str));
6018 /* Clear the value as operand needs to be relocated. */
6019 info->imm.value = 0;
6020 break;
6021
6022 case AARCH64_OPND_ADDR_PCREL14:
6023 case AARCH64_OPND_ADDR_PCREL19:
6024 case AARCH64_OPND_ADDR_PCREL21:
6025 case AARCH64_OPND_ADDR_PCREL26:
6026 po_misc_or_fail (parse_address (&str, info));
6027 if (!info->addr.pcrel)
6028 {
6029 set_syntax_error (_("invalid pc-relative address"));
6030 goto failure;
6031 }
6032 if (inst.gen_lit_pool
6033 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6034 {
6035 /* Only permit "=value" in the literal load instructions.
6036 The literal will be generated by programmer_friendly_fixup. */
6037 set_syntax_error (_("invalid use of \"=immediate\""));
6038 goto failure;
6039 }
6040 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6041 {
6042 set_syntax_error (_("unrecognized relocation suffix"));
6043 goto failure;
6044 }
6045 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6046 {
6047 info->imm.value = inst.reloc.exp.X_add_number;
6048 inst.reloc.type = BFD_RELOC_UNUSED;
6049 }
6050 else
6051 {
6052 info->imm.value = 0;
6053 if (inst.reloc.type == BFD_RELOC_UNUSED)
6054 switch (opcode->iclass)
6055 {
6056 case compbranch:
6057 case condbranch:
6058 /* e.g. CBZ or B.COND */
6059 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6060 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6061 break;
6062 case testbranch:
6063 /* e.g. TBZ */
6064 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6065 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6066 break;
6067 case branch_imm:
6068 /* e.g. B or BL */
6069 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6070 inst.reloc.type =
6071 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6072 : BFD_RELOC_AARCH64_JUMP26;
6073 break;
6074 case loadlit:
6075 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6076 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6077 break;
6078 case pcreladdr:
6079 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6080 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6081 break;
6082 default:
6083 gas_assert (0);
6084 abort ();
6085 }
6086 inst.reloc.pc_rel = 1;
6087 }
6088 break;
6089
6090 case AARCH64_OPND_ADDR_SIMPLE:
6091 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6092 {
6093 /* [<Xn|SP>{, #<simm>}] */
6094 char *start = str;
6095 /* First use the normal address-parsing routines, to get
6096 the usual syntax errors. */
6097 po_misc_or_fail (parse_address (&str, info));
6098 if (info->addr.pcrel || info->addr.offset.is_reg
6099 || !info->addr.preind || info->addr.postind
6100 || info->addr.writeback)
6101 {
6102 set_syntax_error (_("invalid addressing mode"));
6103 goto failure;
6104 }
6105
6106 /* Then retry, matching the specific syntax of these addresses. */
6107 str = start;
6108 po_char_or_fail ('[');
6109 po_reg_or_fail (REG_TYPE_R64_SP);
6110 /* Accept optional ", #0". */
6111 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6112 && skip_past_char (&str, ','))
6113 {
6114 skip_past_char (&str, '#');
6115 if (! skip_past_char (&str, '0'))
6116 {
6117 set_fatal_syntax_error
6118 (_("the optional immediate offset can only be 0"));
6119 goto failure;
6120 }
6121 }
6122 po_char_or_fail (']');
6123 break;
6124 }
6125
6126 case AARCH64_OPND_ADDR_REGOFF:
6127 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6128 po_misc_or_fail (parse_address (&str, info));
6129 regoff_addr:
6130 if (info->addr.pcrel || !info->addr.offset.is_reg
6131 || !info->addr.preind || info->addr.postind
6132 || info->addr.writeback)
6133 {
6134 set_syntax_error (_("invalid addressing mode"));
6135 goto failure;
6136 }
6137 if (!info->shifter.operator_present)
6138 {
6139 /* Default to LSL if not present. Libopcodes prefers shifter
6140 kind to be explicit. */
6141 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6142 info->shifter.kind = AARCH64_MOD_LSL;
6143 }
6144 /* Qualifier to be deduced by libopcodes. */
6145 break;
6146
6147 case AARCH64_OPND_ADDR_SIMM7:
6148 po_misc_or_fail (parse_address (&str, info));
6149 if (info->addr.pcrel || info->addr.offset.is_reg
6150 || (!info->addr.preind && !info->addr.postind))
6151 {
6152 set_syntax_error (_("invalid addressing mode"));
6153 goto failure;
6154 }
6155 if (inst.reloc.type != BFD_RELOC_UNUSED)
6156 {
6157 set_syntax_error (_("relocation not allowed"));
6158 goto failure;
6159 }
6160 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6161 /* addr_off_p */ 1,
6162 /* need_libopcodes_p */ 1,
6163 /* skip_p */ 0);
6164 break;
6165
6166 case AARCH64_OPND_ADDR_SIMM9:
6167 case AARCH64_OPND_ADDR_SIMM9_2:
6168 po_misc_or_fail (parse_address (&str, info));
6169 if (info->addr.pcrel || info->addr.offset.is_reg
6170 || (!info->addr.preind && !info->addr.postind)
6171 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6172 && info->addr.writeback))
6173 {
6174 set_syntax_error (_("invalid addressing mode"));
6175 goto failure;
6176 }
6177 if (inst.reloc.type != BFD_RELOC_UNUSED)
6178 {
6179 set_syntax_error (_("relocation not allowed"));
6180 goto failure;
6181 }
6182 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6183 /* addr_off_p */ 1,
6184 /* need_libopcodes_p */ 1,
6185 /* skip_p */ 0);
6186 break;
6187
6188 case AARCH64_OPND_ADDR_SIMM10:
6189 case AARCH64_OPND_ADDR_OFFSET:
6190 po_misc_or_fail (parse_address (&str, info));
6191 if (info->addr.pcrel || info->addr.offset.is_reg
6192 || !info->addr.preind || info->addr.postind)
6193 {
6194 set_syntax_error (_("invalid addressing mode"));
6195 goto failure;
6196 }
6197 if (inst.reloc.type != BFD_RELOC_UNUSED)
6198 {
6199 set_syntax_error (_("relocation not allowed"));
6200 goto failure;
6201 }
6202 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6203 /* addr_off_p */ 1,
6204 /* need_libopcodes_p */ 1,
6205 /* skip_p */ 0);
6206 break;
6207
6208 case AARCH64_OPND_ADDR_UIMM12:
6209 po_misc_or_fail (parse_address (&str, info));
6210 if (info->addr.pcrel || info->addr.offset.is_reg
6211 || !info->addr.preind || info->addr.writeback)
6212 {
6213 set_syntax_error (_("invalid addressing mode"));
6214 goto failure;
6215 }
6216 if (inst.reloc.type == BFD_RELOC_UNUSED)
6217 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6218 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6219 || (inst.reloc.type
6220 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6221 || (inst.reloc.type
6222 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6223 || (inst.reloc.type
6224 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6225 || (inst.reloc.type
6226 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6227 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6228 /* Leave qualifier to be determined by libopcodes. */
6229 break;
6230
6231 case AARCH64_OPND_SIMD_ADDR_POST:
6232 /* [<Xn|SP>], <Xm|#<amount>> */
6233 po_misc_or_fail (parse_address (&str, info));
6234 if (!info->addr.postind || !info->addr.writeback)
6235 {
6236 set_syntax_error (_("invalid addressing mode"));
6237 goto failure;
6238 }
6239 if (!info->addr.offset.is_reg)
6240 {
6241 if (inst.reloc.exp.X_op == O_constant)
6242 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6243 else
6244 {
6245 set_fatal_syntax_error
6246 (_("writeback value must be an immediate constant"));
6247 goto failure;
6248 }
6249 }
6250 /* No qualifier. */
6251 break;
6252
6253 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6254 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6255 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6256 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6257 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6258 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6259 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6260 case AARCH64_OPND_SVE_ADDR_RI_U6:
6261 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6262 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6263 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6264 /* [X<n>{, #imm, MUL VL}]
6265 [X<n>{, #imm}]
6266 but recognizing SVE registers. */
6267 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6268 &offset_qualifier));
6269 if (base_qualifier != AARCH64_OPND_QLF_X)
6270 {
6271 set_syntax_error (_("invalid addressing mode"));
6272 goto failure;
6273 }
6274 sve_regimm:
6275 if (info->addr.pcrel || info->addr.offset.is_reg
6276 || !info->addr.preind || info->addr.writeback)
6277 {
6278 set_syntax_error (_("invalid addressing mode"));
6279 goto failure;
6280 }
6281 if (inst.reloc.type != BFD_RELOC_UNUSED
6282 || inst.reloc.exp.X_op != O_constant)
6283 {
6284 /* Make sure this has priority over
6285 "invalid addressing mode". */
6286 set_fatal_syntax_error (_("constant offset required"));
6287 goto failure;
6288 }
6289 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6290 break;
6291
6292 case AARCH64_OPND_SVE_ADDR_R:
6293 /* [<Xn|SP>{, <R><m>}]
6294 but recognizing SVE registers. */
6295 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6296 &offset_qualifier));
6297 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6298 {
6299 offset_qualifier = AARCH64_OPND_QLF_X;
6300 info->addr.offset.is_reg = 1;
6301 info->addr.offset.regno = 31;
6302 }
6303 else if (base_qualifier != AARCH64_OPND_QLF_X
6304 || offset_qualifier != AARCH64_OPND_QLF_X)
6305 {
6306 set_syntax_error (_("invalid addressing mode"));
6307 goto failure;
6308 }
6309 goto regoff_addr;
6310
6311 case AARCH64_OPND_SVE_ADDR_RR:
6312 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6313 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6314 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6315 case AARCH64_OPND_SVE_ADDR_RX:
6316 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6317 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6318 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6319 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6320 but recognizing SVE registers. */
6321 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6322 &offset_qualifier));
6323 if (base_qualifier != AARCH64_OPND_QLF_X
6324 || offset_qualifier != AARCH64_OPND_QLF_X)
6325 {
6326 set_syntax_error (_("invalid addressing mode"));
6327 goto failure;
6328 }
6329 goto regoff_addr;
6330
6331 case AARCH64_OPND_SVE_ADDR_RZ:
6332 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6333 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6334 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6335 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6336 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6337 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6338 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6339 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6340 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6341 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6342 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6343 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6344 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6345 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6346 &offset_qualifier));
6347 if (base_qualifier != AARCH64_OPND_QLF_X
6348 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6349 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6350 {
6351 set_syntax_error (_("invalid addressing mode"));
6352 goto failure;
6353 }
6354 info->qualifier = offset_qualifier;
6355 goto regoff_addr;
6356
6357 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6358 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6359 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6360 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6361 /* [Z<n>.<T>{, #imm}] */
6362 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6363 &offset_qualifier));
6364 if (base_qualifier != AARCH64_OPND_QLF_S_S
6365 && base_qualifier != AARCH64_OPND_QLF_S_D)
6366 {
6367 set_syntax_error (_("invalid addressing mode"));
6368 goto failure;
6369 }
6370 info->qualifier = base_qualifier;
6371 goto sve_regimm;
6372
6373 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6374 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6375 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6376 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6377 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6378
6379 We don't reject:
6380
6381 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6382
6383 here since we get better error messages by leaving it to
6384 the qualifier checking routines. */
6385 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6386 &offset_qualifier));
6387 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6388 && base_qualifier != AARCH64_OPND_QLF_S_D)
6389 || offset_qualifier != base_qualifier)
6390 {
6391 set_syntax_error (_("invalid addressing mode"));
6392 goto failure;
6393 }
6394 info->qualifier = base_qualifier;
6395 goto regoff_addr;
6396
6397 case AARCH64_OPND_SYSREG:
6398 {
6399 uint32_t sysreg_flags;
6400 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6401 &sysreg_flags)) == PARSE_FAIL)
6402 {
6403 set_syntax_error (_("unknown or missing system register name"));
6404 goto failure;
6405 }
6406 inst.base.operands[i].sysreg.value = val;
6407 inst.base.operands[i].sysreg.flags = sysreg_flags;
6408 break;
6409 }
6410
6411 case AARCH64_OPND_PSTATEFIELD:
6412 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6413 == PARSE_FAIL)
6414 {
6415 set_syntax_error (_("unknown or missing PSTATE field name"));
6416 goto failure;
6417 }
6418 inst.base.operands[i].pstatefield = val;
6419 break;
6420
6421 case AARCH64_OPND_SYSREG_IC:
6422 inst.base.operands[i].sysins_op =
6423 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6424 goto sys_reg_ins;
6425 case AARCH64_OPND_SYSREG_DC:
6426 inst.base.operands[i].sysins_op =
6427 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6428 goto sys_reg_ins;
6429 case AARCH64_OPND_SYSREG_AT:
6430 inst.base.operands[i].sysins_op =
6431 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6432 goto sys_reg_ins;
6433 case AARCH64_OPND_SYSREG_TLBI:
6434 inst.base.operands[i].sysins_op =
6435 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6436 sys_reg_ins:
6437 if (inst.base.operands[i].sysins_op == NULL)
6438 {
6439 set_fatal_syntax_error ( _("unknown or missing operation name"));
6440 goto failure;
6441 }
6442 break;
6443
6444 case AARCH64_OPND_BARRIER:
6445 case AARCH64_OPND_BARRIER_ISB:
6446 val = parse_barrier (&str);
6447 if (val != PARSE_FAIL
6448 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6449 {
6450 /* ISB only accepts options name 'sy'. */
6451 set_syntax_error
6452 (_("the specified option is not accepted in ISB"));
6453 /* Turn off backtrack as this optional operand is present. */
6454 backtrack_pos = 0;
6455 goto failure;
6456 }
6457 /* This is an extension to accept a 0..15 immediate. */
6458 if (val == PARSE_FAIL)
6459 po_imm_or_fail (0, 15);
6460 info->barrier = aarch64_barrier_options + val;
6461 break;
6462
6463 case AARCH64_OPND_PRFOP:
6464 val = parse_pldop (&str);
6465 /* This is an extension to accept a 0..31 immediate. */
6466 if (val == PARSE_FAIL)
6467 po_imm_or_fail (0, 31);
6468 inst.base.operands[i].prfop = aarch64_prfops + val;
6469 break;
6470
6471 case AARCH64_OPND_BARRIER_PSB:
6472 val = parse_barrier_psb (&str, &(info->hint_option));
6473 if (val == PARSE_FAIL)
6474 goto failure;
6475 break;
6476
6477 default:
6478 as_fatal (_("unhandled operand code %d"), operands[i]);
6479 }
6480
6481 /* If we get here, this operand was successfully parsed. */
6482 inst.base.operands[i].present = 1;
6483 continue;
6484
6485 failure:
6486 /* The parse routine should already have set the error, but in case
6487 not, set a default one here. */
6488 if (! error_p ())
6489 set_default_error ();
6490
6491 if (! backtrack_pos)
6492 goto parse_operands_return;
6493
6494 {
6495 /* We reach here because this operand is marked as optional, and
6496 either no operand was supplied or the operand was supplied but it
6497 was syntactically incorrect. In the latter case we report an
6498 error. In the former case we perform a few more checks before
6499 dropping through to the code to insert the default operand. */
6500
6501 char *tmp = backtrack_pos;
6502 char endchar = END_OF_INSN;
6503
6504 if (i != (aarch64_num_of_operands (opcode) - 1))
6505 endchar = ',';
6506 skip_past_char (&tmp, ',');
6507
6508 if (*tmp != endchar)
6509 /* The user has supplied an operand in the wrong format. */
6510 goto parse_operands_return;
6511
6512 /* Make sure there is not a comma before the optional operand.
6513 For example the fifth operand of 'sys' is optional:
6514
6515 sys #0,c0,c0,#0, <--- wrong
6516 sys #0,c0,c0,#0 <--- correct. */
6517 if (comma_skipped_p && i && endchar == END_OF_INSN)
6518 {
6519 set_fatal_syntax_error
6520 (_("unexpected comma before the omitted optional operand"));
6521 goto parse_operands_return;
6522 }
6523 }
6524
6525 /* Reaching here means we are dealing with an optional operand that is
6526 omitted from the assembly line. */
6527 gas_assert (optional_operand_p (opcode, i));
6528 info->present = 0;
6529 process_omitted_operand (operands[i], opcode, i, info);
6530
6531 /* Try again, skipping the optional operand at backtrack_pos. */
6532 str = backtrack_pos;
6533 backtrack_pos = 0;
6534
6535 /* Clear any error record after the omitted optional operand has been
6536 successfully handled. */
6537 clear_error ();
6538 }
6539
6540 /* Check if we have parsed all the operands. */
6541 if (*str != '\0' && ! error_p ())
6542 {
6543 /* Set I to the index of the last present operand; this is
6544 for the purpose of diagnostics. */
6545 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6546 ;
6547 set_fatal_syntax_error
6548 (_("unexpected characters following instruction"));
6549 }
6550
6551 parse_operands_return:
6552
6553 if (error_p ())
6554 {
6555 DEBUG_TRACE ("parsing FAIL: %s - %s",
6556 operand_mismatch_kind_names[get_error_kind ()],
6557 get_error_message ());
6558 /* Record the operand error properly; this is useful when there
6559 are multiple instruction templates for a mnemonic name, so that
6560 later on, we can select the error that most closely describes
6561 the problem. */
6562 record_operand_error (opcode, i, get_error_kind (),
6563 get_error_message ());
6564 return FALSE;
6565 }
6566 else
6567 {
6568 DEBUG_TRACE ("parsing SUCCESS");
6569 return TRUE;
6570 }
6571 }
6572
6573 /* It does some fix-up to provide some programmer friendly feature while
6574 keeping the libopcodes happy, i.e. libopcodes only accepts
6575 the preferred architectural syntax.
6576 Return FALSE if there is any failure; otherwise return TRUE. */
6577
6578 static bfd_boolean
6579 programmer_friendly_fixup (aarch64_instruction *instr)
6580 {
6581 aarch64_inst *base = &instr->base;
6582 const aarch64_opcode *opcode = base->opcode;
6583 enum aarch64_op op = opcode->op;
6584 aarch64_opnd_info *operands = base->operands;
6585
6586 DEBUG_TRACE ("enter");
6587
6588 switch (opcode->iclass)
6589 {
6590 case testbranch:
6591 /* TBNZ Xn|Wn, #uimm6, label
6592 Test and Branch Not Zero: conditionally jumps to label if bit number
6593 uimm6 in register Xn is not zero. The bit number implies the width of
6594 the register, which may be written and should be disassembled as Wn if
6595 uimm is less than 32. */
6596 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6597 {
6598 if (operands[1].imm.value >= 32)
6599 {
6600 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6601 0, 31);
6602 return FALSE;
6603 }
6604 operands[0].qualifier = AARCH64_OPND_QLF_X;
6605 }
6606 break;
6607 case loadlit:
6608 /* LDR Wt, label | =value
6609 As a convenience assemblers will typically permit the notation
6610 "=value" in conjunction with the pc-relative literal load instructions
6611 to automatically place an immediate value or symbolic address in a
6612 nearby literal pool and generate a hidden label which references it.
6613 ISREG has been set to 0 in the case of =value. */
6614 if (instr->gen_lit_pool
6615 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6616 {
6617 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6618 if (op == OP_LDRSW_LIT)
6619 size = 4;
6620 if (instr->reloc.exp.X_op != O_constant
6621 && instr->reloc.exp.X_op != O_big
6622 && instr->reloc.exp.X_op != O_symbol)
6623 {
6624 record_operand_error (opcode, 1,
6625 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6626 _("constant expression expected"));
6627 return FALSE;
6628 }
6629 if (! add_to_lit_pool (&instr->reloc.exp, size))
6630 {
6631 record_operand_error (opcode, 1,
6632 AARCH64_OPDE_OTHER_ERROR,
6633 _("literal pool insertion failed"));
6634 return FALSE;
6635 }
6636 }
6637 break;
6638 case log_shift:
6639 case bitfield:
6640 /* UXT[BHW] Wd, Wn
6641 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6642 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6643 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6644 A programmer-friendly assembler should accept a destination Xd in
6645 place of Wd, however that is not the preferred form for disassembly.
6646 */
6647 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6648 && operands[1].qualifier == AARCH64_OPND_QLF_W
6649 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6650 operands[0].qualifier = AARCH64_OPND_QLF_W;
6651 break;
6652
6653 case addsub_ext:
6654 {
6655 /* In the 64-bit form, the final register operand is written as Wm
6656 for all but the (possibly omitted) UXTX/LSL and SXTX
6657 operators.
6658 As a programmer-friendly assembler, we accept e.g.
6659 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6660 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6661 int idx = aarch64_operand_index (opcode->operands,
6662 AARCH64_OPND_Rm_EXT);
6663 gas_assert (idx == 1 || idx == 2);
6664 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6665 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6666 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6667 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6668 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6669 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6670 }
6671 break;
6672
6673 default:
6674 break;
6675 }
6676
6677 DEBUG_TRACE ("exit with SUCCESS");
6678 return TRUE;
6679 }
6680
6681 /* Check for loads and stores that will cause unpredictable behavior. */
6682
6683 static void
6684 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6685 {
6686 aarch64_inst *base = &instr->base;
6687 const aarch64_opcode *opcode = base->opcode;
6688 const aarch64_opnd_info *opnds = base->operands;
6689 switch (opcode->iclass)
6690 {
6691 case ldst_pos:
6692 case ldst_imm9:
6693 case ldst_imm10:
6694 case ldst_unscaled:
6695 case ldst_unpriv:
6696 /* Loading/storing the base register is unpredictable if writeback. */
6697 if ((aarch64_get_operand_class (opnds[0].type)
6698 == AARCH64_OPND_CLASS_INT_REG)
6699 && opnds[0].reg.regno == opnds[1].addr.base_regno
6700 && opnds[1].addr.base_regno != REG_SP
6701 && opnds[1].addr.writeback)
6702 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6703 break;
6704 case ldstpair_off:
6705 case ldstnapair_offs:
6706 case ldstpair_indexed:
6707 /* Loading/storing the base register is unpredictable if writeback. */
6708 if ((aarch64_get_operand_class (opnds[0].type)
6709 == AARCH64_OPND_CLASS_INT_REG)
6710 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6711 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6712 && opnds[2].addr.base_regno != REG_SP
6713 && opnds[2].addr.writeback)
6714 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6715 /* Load operations must load different registers. */
6716 if ((opcode->opcode & (1 << 22))
6717 && opnds[0].reg.regno == opnds[1].reg.regno)
6718 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6719 break;
6720
6721 case ldstexcl:
6722 /* It is unpredictable if the destination and status registers are the
6723 same. */
6724 if ((aarch64_get_operand_class (opnds[0].type)
6725 == AARCH64_OPND_CLASS_INT_REG)
6726 && (aarch64_get_operand_class (opnds[1].type)
6727 == AARCH64_OPND_CLASS_INT_REG)
6728 && (opnds[0].reg.regno == opnds[1].reg.regno
6729 || opnds[0].reg.regno == opnds[2].reg.regno))
6730 as_warn (_("unpredictable: identical transfer and status registers"
6731 " --`%s'"),
6732 str);
6733
6734 break;
6735
6736 default:
6737 break;
6738 }
6739 }
6740
6741 /* A wrapper function to interface with libopcodes on encoding and
6742 record the error message if there is any.
6743
6744 Return TRUE on success; otherwise return FALSE. */
6745
6746 static bfd_boolean
6747 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6748 aarch64_insn *code)
6749 {
6750 aarch64_operand_error error_info;
6751 memset (&error_info, '\0', sizeof (error_info));
6752 error_info.kind = AARCH64_OPDE_NIL;
6753 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
6754 && !error_info.non_fatal)
6755 return TRUE;
6756
6757 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6758 record_operand_error_info (opcode, &error_info);
6759 return error_info.non_fatal;
6760 }
6761
6762 #ifdef DEBUG_AARCH64
6763 static inline void
6764 dump_opcode_operands (const aarch64_opcode *opcode)
6765 {
6766 int i = 0;
6767 while (opcode->operands[i] != AARCH64_OPND_NIL)
6768 {
6769 aarch64_verbose ("\t\t opnd%d: %s", i,
6770 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6771 ? aarch64_get_operand_name (opcode->operands[i])
6772 : aarch64_get_operand_desc (opcode->operands[i]));
6773 ++i;
6774 }
6775 }
6776 #endif /* DEBUG_AARCH64 */
6777
6778 /* This is the guts of the machine-dependent assembler. STR points to a
6779 machine dependent instruction. This function is supposed to emit
6780 the frags/bytes it assembles to. */
6781
6782 void
6783 md_assemble (char *str)
6784 {
6785 char *p = str;
6786 templates *template;
6787 aarch64_opcode *opcode;
6788 aarch64_inst *inst_base;
6789 unsigned saved_cond;
6790
6791 /* Align the previous label if needed. */
6792 if (last_label_seen != NULL)
6793 {
6794 symbol_set_frag (last_label_seen, frag_now);
6795 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6796 S_SET_SEGMENT (last_label_seen, now_seg);
6797 }
6798
6799 /* Update the current insn_sequence from the segment. */
6800 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
6801
6802 inst.reloc.type = BFD_RELOC_UNUSED;
6803
6804 DEBUG_TRACE ("\n\n");
6805 DEBUG_TRACE ("==============================");
6806 DEBUG_TRACE ("Enter md_assemble with %s", str);
6807
6808 template = opcode_lookup (&p);
6809 if (!template)
6810 {
6811 /* It wasn't an instruction, but it might be a register alias of
6812 the form alias .req reg directive. */
6813 if (!create_register_alias (str, p))
6814 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6815 str);
6816 return;
6817 }
6818
6819 skip_whitespace (p);
6820 if (*p == ',')
6821 {
6822 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6823 get_mnemonic_name (str), str);
6824 return;
6825 }
6826
6827 init_operand_error_report ();
6828
6829 /* Sections are assumed to start aligned. In executable section, there is no
6830 MAP_DATA symbol pending. So we only align the address during
6831 MAP_DATA --> MAP_INSN transition.
6832 For other sections, this is not guaranteed. */
6833 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6834 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6835 frag_align_code (2, 0);
6836
6837 saved_cond = inst.cond;
6838 reset_aarch64_instruction (&inst);
6839 inst.cond = saved_cond;
6840
6841 /* Iterate through all opcode entries with the same mnemonic name. */
6842 do
6843 {
6844 opcode = template->opcode;
6845
6846 DEBUG_TRACE ("opcode %s found", opcode->name);
6847 #ifdef DEBUG_AARCH64
6848 if (debug_dump)
6849 dump_opcode_operands (opcode);
6850 #endif /* DEBUG_AARCH64 */
6851
6852 mapping_state (MAP_INSN);
6853
6854 inst_base = &inst.base;
6855 inst_base->opcode = opcode;
6856
6857 /* Truly conditionally executed instructions, e.g. b.cond. */
6858 if (opcode->flags & F_COND)
6859 {
6860 gas_assert (inst.cond != COND_ALWAYS);
6861 inst_base->cond = get_cond_from_value (inst.cond);
6862 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6863 }
6864 else if (inst.cond != COND_ALWAYS)
6865 {
6866 /* It shouldn't arrive here, where the assembly looks like a
6867 conditional instruction but the found opcode is unconditional. */
6868 gas_assert (0);
6869 continue;
6870 }
6871
6872 if (parse_operands (p, opcode)
6873 && programmer_friendly_fixup (&inst)
6874 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6875 {
6876 /* Check that this instruction is supported for this CPU. */
6877 if (!opcode->avariant
6878 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6879 {
6880 as_bad (_("selected processor does not support `%s'"), str);
6881 return;
6882 }
6883
6884 warn_unpredictable_ldst (&inst, str);
6885
6886 if (inst.reloc.type == BFD_RELOC_UNUSED
6887 || !inst.reloc.need_libopcodes_p)
6888 output_inst (NULL);
6889 else
6890 {
6891 /* If there is relocation generated for the instruction,
6892 store the instruction information for the future fix-up. */
6893 struct aarch64_inst *copy;
6894 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6895 copy = XNEW (struct aarch64_inst);
6896 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6897 output_inst (copy);
6898 }
6899
6900 /* Issue non-fatal messages if any. */
6901 output_operand_error_report (str, TRUE);
6902 return;
6903 }
6904
6905 template = template->next;
6906 if (template != NULL)
6907 {
6908 reset_aarch64_instruction (&inst);
6909 inst.cond = saved_cond;
6910 }
6911 }
6912 while (template != NULL);
6913
6914 /* Issue the error messages if any. */
6915 output_operand_error_report (str, FALSE);
6916 }
6917
6918 /* Various frobbings of labels and their addresses. */
6919
6920 void
6921 aarch64_start_line_hook (void)
6922 {
6923 last_label_seen = NULL;
6924 }
6925
6926 void
6927 aarch64_frob_label (symbolS * sym)
6928 {
6929 last_label_seen = sym;
6930
6931 dwarf2_emit_label (sym);
6932 }
6933
6934 int
6935 aarch64_data_in_code (void)
6936 {
6937 if (!strncmp (input_line_pointer + 1, "data:", 5))
6938 {
6939 *input_line_pointer = '/';
6940 input_line_pointer += 5;
6941 *input_line_pointer = 0;
6942 return 1;
6943 }
6944
6945 return 0;
6946 }
6947
6948 char *
6949 aarch64_canonicalize_symbol_name (char *name)
6950 {
6951 int len;
6952
6953 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6954 *(name + len - 5) = 0;
6955
6956 return name;
6957 }
6958 \f
6959 /* Table of all register names defined by default. The user can
6960 define additional names with .req. Note that all register names
6961 should appear in both upper and lowercase variants. Some registers
6962 also have mixed-case names. */
6963
6964 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6965 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
6966 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6967 #define REGSET16(p,t) \
6968 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6969 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6970 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6971 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6972 #define REGSET31(p,t) \
6973 REGSET16(p, t), \
6974 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6975 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6976 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6977 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6978 #define REGSET(p,t) \
6979 REGSET31(p,t), REGNUM(p,31,t)
6980
6981 /* These go into aarch64_reg_hsh hash-table. */
6982 static const reg_entry reg_names[] = {
6983 /* Integer registers. */
6984 REGSET31 (x, R_64), REGSET31 (X, R_64),
6985 REGSET31 (w, R_32), REGSET31 (W, R_32),
6986
6987 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
6988 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
6989 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
6990 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
6991 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6992 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6993
6994 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6995 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6996
6997 /* Floating-point single precision registers. */
6998 REGSET (s, FP_S), REGSET (S, FP_S),
6999
7000 /* Floating-point double precision registers. */
7001 REGSET (d, FP_D), REGSET (D, FP_D),
7002
7003 /* Floating-point half precision registers. */
7004 REGSET (h, FP_H), REGSET (H, FP_H),
7005
7006 /* Floating-point byte precision registers. */
7007 REGSET (b, FP_B), REGSET (B, FP_B),
7008
7009 /* Floating-point quad precision registers. */
7010 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7011
7012 /* FP/SIMD registers. */
7013 REGSET (v, VN), REGSET (V, VN),
7014
7015 /* SVE vector registers. */
7016 REGSET (z, ZN), REGSET (Z, ZN),
7017
7018 /* SVE predicate registers. */
7019 REGSET16 (p, PN), REGSET16 (P, PN)
7020 };
7021
7022 #undef REGDEF
7023 #undef REGDEF_ALIAS
7024 #undef REGNUM
7025 #undef REGSET16
7026 #undef REGSET31
7027 #undef REGSET
7028
7029 #define N 1
7030 #define n 0
7031 #define Z 1
7032 #define z 0
7033 #define C 1
7034 #define c 0
7035 #define V 1
7036 #define v 0
7037 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7038 static const asm_nzcv nzcv_names[] = {
7039 {"nzcv", B (n, z, c, v)},
7040 {"nzcV", B (n, z, c, V)},
7041 {"nzCv", B (n, z, C, v)},
7042 {"nzCV", B (n, z, C, V)},
7043 {"nZcv", B (n, Z, c, v)},
7044 {"nZcV", B (n, Z, c, V)},
7045 {"nZCv", B (n, Z, C, v)},
7046 {"nZCV", B (n, Z, C, V)},
7047 {"Nzcv", B (N, z, c, v)},
7048 {"NzcV", B (N, z, c, V)},
7049 {"NzCv", B (N, z, C, v)},
7050 {"NzCV", B (N, z, C, V)},
7051 {"NZcv", B (N, Z, c, v)},
7052 {"NZcV", B (N, Z, c, V)},
7053 {"NZCv", B (N, Z, C, v)},
7054 {"NZCV", B (N, Z, C, V)}
7055 };
7056
7057 #undef N
7058 #undef n
7059 #undef Z
7060 #undef z
7061 #undef C
7062 #undef c
7063 #undef V
7064 #undef v
7065 #undef B
7066 \f
7067 /* MD interface: bits in the object file. */
7068
7069 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7070 for use in the a.out file, and stores them in the array pointed to by buf.
7071 This knows about the endian-ness of the target machine and does
7072 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7073 2 (short) and 4 (long) Floating numbers are put out as a series of
7074 LITTLENUMS (shorts, here at least). */
7075
7076 void
7077 md_number_to_chars (char *buf, valueT val, int n)
7078 {
7079 if (target_big_endian)
7080 number_to_chars_bigendian (buf, val, n);
7081 else
7082 number_to_chars_littleendian (buf, val, n);
7083 }
7084
7085 /* MD interface: Sections. */
7086
7087 /* Estimate the size of a frag before relaxing. Assume everything fits in
7088 4 bytes. */
7089
7090 int
7091 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7092 {
7093 fragp->fr_var = 4;
7094 return 4;
7095 }
7096
7097 /* Round up a section size to the appropriate boundary. */
7098
7099 valueT
7100 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7101 {
7102 return size;
7103 }
7104
7105 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7106 of an rs_align_code fragment.
7107
7108 Here we fill the frag with the appropriate info for padding the
7109 output stream. The resulting frag will consist of a fixed (fr_fix)
7110 and of a repeating (fr_var) part.
7111
7112 The fixed content is always emitted before the repeating content and
7113 these two parts are used as follows in constructing the output:
7114 - the fixed part will be used to align to a valid instruction word
7115 boundary, in case that we start at a misaligned address; as no
7116 executable instruction can live at the misaligned location, we
7117 simply fill with zeros;
7118 - the variable part will be used to cover the remaining padding and
7119 we fill using the AArch64 NOP instruction.
7120
7121 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7122 enough storage space for up to 3 bytes for padding the back to a valid
7123 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7124
7125 void
7126 aarch64_handle_align (fragS * fragP)
7127 {
7128 /* NOP = d503201f */
7129 /* AArch64 instructions are always little-endian. */
7130 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7131
7132 int bytes, fix, noop_size;
7133 char *p;
7134
7135 if (fragP->fr_type != rs_align_code)
7136 return;
7137
7138 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7139 p = fragP->fr_literal + fragP->fr_fix;
7140
7141 #ifdef OBJ_ELF
7142 gas_assert (fragP->tc_frag_data.recorded);
7143 #endif
7144
7145 noop_size = sizeof (aarch64_noop);
7146
7147 fix = bytes & (noop_size - 1);
7148 if (fix)
7149 {
7150 #ifdef OBJ_ELF
7151 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7152 #endif
7153 memset (p, 0, fix);
7154 p += fix;
7155 fragP->fr_fix += fix;
7156 }
7157
7158 if (noop_size)
7159 memcpy (p, aarch64_noop, noop_size);
7160 fragP->fr_var = noop_size;
7161 }
7162
7163 /* Perform target specific initialisation of a frag.
7164 Note - despite the name this initialisation is not done when the frag
7165 is created, but only when its type is assigned. A frag can be created
7166 and used a long time before its type is set, so beware of assuming that
7167 this initialisation is performed first. */
7168
7169 #ifndef OBJ_ELF
7170 void
7171 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7172 int max_chars ATTRIBUTE_UNUSED)
7173 {
7174 }
7175
7176 #else /* OBJ_ELF is defined. */
7177 void
7178 aarch64_init_frag (fragS * fragP, int max_chars)
7179 {
7180 /* Record a mapping symbol for alignment frags. We will delete this
7181 later if the alignment ends up empty. */
7182 if (!fragP->tc_frag_data.recorded)
7183 fragP->tc_frag_data.recorded = 1;
7184
7185 /* PR 21809: Do not set a mapping state for debug sections
7186 - it just confuses other tools. */
7187 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
7188 return;
7189
7190 switch (fragP->fr_type)
7191 {
7192 case rs_align_test:
7193 case rs_fill:
7194 mapping_state_2 (MAP_DATA, max_chars);
7195 break;
7196 case rs_align:
7197 /* PR 20364: We can get alignment frags in code sections,
7198 so do not just assume that we should use the MAP_DATA state. */
7199 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7200 break;
7201 case rs_align_code:
7202 mapping_state_2 (MAP_INSN, max_chars);
7203 break;
7204 default:
7205 break;
7206 }
7207 }
7208 \f
7209 /* Initialize the DWARF-2 unwind information for this procedure. */
7210
7211 void
7212 tc_aarch64_frame_initial_instructions (void)
7213 {
7214 cfi_add_CFA_def_cfa (REG_SP, 0);
7215 }
7216 #endif /* OBJ_ELF */
7217
7218 /* Convert REGNAME to a DWARF-2 register number. */
7219
7220 int
7221 tc_aarch64_regname_to_dw2regnum (char *regname)
7222 {
7223 const reg_entry *reg = parse_reg (&regname);
7224 if (reg == NULL)
7225 return -1;
7226
7227 switch (reg->type)
7228 {
7229 case REG_TYPE_SP_32:
7230 case REG_TYPE_SP_64:
7231 case REG_TYPE_R_32:
7232 case REG_TYPE_R_64:
7233 return reg->number;
7234
7235 case REG_TYPE_FP_B:
7236 case REG_TYPE_FP_H:
7237 case REG_TYPE_FP_S:
7238 case REG_TYPE_FP_D:
7239 case REG_TYPE_FP_Q:
7240 return reg->number + 64;
7241
7242 default:
7243 break;
7244 }
7245 return -1;
7246 }
7247
7248 /* Implement DWARF2_ADDR_SIZE. */
7249
7250 int
7251 aarch64_dwarf2_addr_size (void)
7252 {
7253 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7254 if (ilp32_p)
7255 return 4;
7256 #endif
7257 return bfd_arch_bits_per_address (stdoutput) / 8;
7258 }
7259
7260 /* MD interface: Symbol and relocation handling. */
7261
7262 /* Return the address within the segment that a PC-relative fixup is
7263 relative to. For AArch64 PC-relative fixups applied to instructions
7264 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7265
7266 long
7267 md_pcrel_from_section (fixS * fixP, segT seg)
7268 {
7269 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7270
7271 /* If this is pc-relative and we are going to emit a relocation
7272 then we just want to put out any pipeline compensation that the linker
7273 will need. Otherwise we want to use the calculated base. */
7274 if (fixP->fx_pcrel
7275 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7276 || aarch64_force_relocation (fixP)))
7277 base = 0;
7278
7279 /* AArch64 should be consistent for all pc-relative relocations. */
7280 return base + AARCH64_PCREL_OFFSET;
7281 }
7282
7283 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7284 Otherwise we have no need to default values of symbols. */
7285
7286 symbolS *
7287 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7288 {
7289 #ifdef OBJ_ELF
7290 if (name[0] == '_' && name[1] == 'G'
7291 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7292 {
7293 if (!GOT_symbol)
7294 {
7295 if (symbol_find (name))
7296 as_bad (_("GOT already in the symbol table"));
7297
7298 GOT_symbol = symbol_new (name, undefined_section,
7299 (valueT) 0, &zero_address_frag);
7300 }
7301
7302 return GOT_symbol;
7303 }
7304 #endif
7305
7306 return 0;
7307 }
7308
7309 /* Return non-zero if the indicated VALUE has overflowed the maximum
7310 range expressible by a unsigned number with the indicated number of
7311 BITS. */
7312
7313 static bfd_boolean
7314 unsigned_overflow (valueT value, unsigned bits)
7315 {
7316 valueT lim;
7317 if (bits >= sizeof (valueT) * 8)
7318 return FALSE;
7319 lim = (valueT) 1 << bits;
7320 return (value >= lim);
7321 }
7322
7323
7324 /* Return non-zero if the indicated VALUE has overflowed the maximum
7325 range expressible by an signed number with the indicated number of
7326 BITS. */
7327
7328 static bfd_boolean
7329 signed_overflow (offsetT value, unsigned bits)
7330 {
7331 offsetT lim;
7332 if (bits >= sizeof (offsetT) * 8)
7333 return FALSE;
7334 lim = (offsetT) 1 << (bits - 1);
7335 return (value < -lim || value >= lim);
7336 }
7337
7338 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7339 unsigned immediate offset load/store instruction, try to encode it as
7340 an unscaled, 9-bit, signed immediate offset load/store instruction.
7341 Return TRUE if it is successful; otherwise return FALSE.
7342
7343 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7344 in response to the standard LDR/STR mnemonics when the immediate offset is
7345 unambiguous, i.e. when it is negative or unaligned. */
7346
7347 static bfd_boolean
7348 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7349 {
7350 int idx;
7351 enum aarch64_op new_op;
7352 const aarch64_opcode *new_opcode;
7353
7354 gas_assert (instr->opcode->iclass == ldst_pos);
7355
7356 switch (instr->opcode->op)
7357 {
7358 case OP_LDRB_POS:new_op = OP_LDURB; break;
7359 case OP_STRB_POS: new_op = OP_STURB; break;
7360 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7361 case OP_LDRH_POS: new_op = OP_LDURH; break;
7362 case OP_STRH_POS: new_op = OP_STURH; break;
7363 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7364 case OP_LDR_POS: new_op = OP_LDUR; break;
7365 case OP_STR_POS: new_op = OP_STUR; break;
7366 case OP_LDRF_POS: new_op = OP_LDURV; break;
7367 case OP_STRF_POS: new_op = OP_STURV; break;
7368 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7369 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7370 default: new_op = OP_NIL; break;
7371 }
7372
7373 if (new_op == OP_NIL)
7374 return FALSE;
7375
7376 new_opcode = aarch64_get_opcode (new_op);
7377 gas_assert (new_opcode != NULL);
7378
7379 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7380 instr->opcode->op, new_opcode->op);
7381
7382 aarch64_replace_opcode (instr, new_opcode);
7383
7384 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7385 qualifier matching may fail because the out-of-date qualifier will
7386 prevent the operand being updated with a new and correct qualifier. */
7387 idx = aarch64_operand_index (instr->opcode->operands,
7388 AARCH64_OPND_ADDR_SIMM9);
7389 gas_assert (idx == 1);
7390 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7391
7392 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7393
7394 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7395 insn_sequence))
7396 return FALSE;
7397
7398 return TRUE;
7399 }
7400
7401 /* Called by fix_insn to fix a MOV immediate alias instruction.
7402
7403 Operand for a generic move immediate instruction, which is an alias
7404 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7405 a 32-bit/64-bit immediate value into general register. An assembler error
7406 shall result if the immediate cannot be created by a single one of these
7407 instructions. If there is a choice, then to ensure reversability an
7408 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7409
7410 static void
7411 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7412 {
7413 const aarch64_opcode *opcode;
7414
7415 /* Need to check if the destination is SP/ZR. The check has to be done
7416 before any aarch64_replace_opcode. */
7417 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7418 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7419
7420 instr->operands[1].imm.value = value;
7421 instr->operands[1].skip = 0;
7422
7423 if (try_mov_wide_p)
7424 {
7425 /* Try the MOVZ alias. */
7426 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7427 aarch64_replace_opcode (instr, opcode);
7428 if (aarch64_opcode_encode (instr->opcode, instr,
7429 &instr->value, NULL, NULL, insn_sequence))
7430 {
7431 put_aarch64_insn (buf, instr->value);
7432 return;
7433 }
7434 /* Try the MOVK alias. */
7435 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7436 aarch64_replace_opcode (instr, opcode);
7437 if (aarch64_opcode_encode (instr->opcode, instr,
7438 &instr->value, NULL, NULL, insn_sequence))
7439 {
7440 put_aarch64_insn (buf, instr->value);
7441 return;
7442 }
7443 }
7444
7445 if (try_mov_bitmask_p)
7446 {
7447 /* Try the ORR alias. */
7448 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7449 aarch64_replace_opcode (instr, opcode);
7450 if (aarch64_opcode_encode (instr->opcode, instr,
7451 &instr->value, NULL, NULL, insn_sequence))
7452 {
7453 put_aarch64_insn (buf, instr->value);
7454 return;
7455 }
7456 }
7457
7458 as_bad_where (fixP->fx_file, fixP->fx_line,
7459 _("immediate cannot be moved by a single instruction"));
7460 }
7461
7462 /* An instruction operand which is immediate related may have symbol used
7463 in the assembly, e.g.
7464
7465 mov w0, u32
7466 .set u32, 0x00ffff00
7467
7468 At the time when the assembly instruction is parsed, a referenced symbol,
7469 like 'u32' in the above example may not have been seen; a fixS is created
7470 in such a case and is handled here after symbols have been resolved.
7471 Instruction is fixed up with VALUE using the information in *FIXP plus
7472 extra information in FLAGS.
7473
7474 This function is called by md_apply_fix to fix up instructions that need
7475 a fix-up described above but does not involve any linker-time relocation. */
7476
7477 static void
7478 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7479 {
7480 int idx;
7481 uint32_t insn;
7482 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7483 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7484 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7485
7486 if (new_inst)
7487 {
7488 /* Now the instruction is about to be fixed-up, so the operand that
7489 was previously marked as 'ignored' needs to be unmarked in order
7490 to get the encoding done properly. */
7491 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7492 new_inst->operands[idx].skip = 0;
7493 }
7494
7495 gas_assert (opnd != AARCH64_OPND_NIL);
7496
7497 switch (opnd)
7498 {
7499 case AARCH64_OPND_EXCEPTION:
7500 if (unsigned_overflow (value, 16))
7501 as_bad_where (fixP->fx_file, fixP->fx_line,
7502 _("immediate out of range"));
7503 insn = get_aarch64_insn (buf);
7504 insn |= encode_svc_imm (value);
7505 put_aarch64_insn (buf, insn);
7506 break;
7507
7508 case AARCH64_OPND_AIMM:
7509 /* ADD or SUB with immediate.
7510 NOTE this assumes we come here with a add/sub shifted reg encoding
7511 3 322|2222|2 2 2 21111 111111
7512 1 098|7654|3 2 1 09876 543210 98765 43210
7513 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7514 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7515 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7516 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7517 ->
7518 3 322|2222|2 2 221111111111
7519 1 098|7654|3 2 109876543210 98765 43210
7520 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7521 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7522 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7523 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7524 Fields sf Rn Rd are already set. */
7525 insn = get_aarch64_insn (buf);
7526 if (value < 0)
7527 {
7528 /* Add <-> sub. */
7529 insn = reencode_addsub_switch_add_sub (insn);
7530 value = -value;
7531 }
7532
7533 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7534 && unsigned_overflow (value, 12))
7535 {
7536 /* Try to shift the value by 12 to make it fit. */
7537 if (((value >> 12) << 12) == value
7538 && ! unsigned_overflow (value, 12 + 12))
7539 {
7540 value >>= 12;
7541 insn |= encode_addsub_imm_shift_amount (1);
7542 }
7543 }
7544
7545 if (unsigned_overflow (value, 12))
7546 as_bad_where (fixP->fx_file, fixP->fx_line,
7547 _("immediate out of range"));
7548
7549 insn |= encode_addsub_imm (value);
7550
7551 put_aarch64_insn (buf, insn);
7552 break;
7553
7554 case AARCH64_OPND_SIMD_IMM:
7555 case AARCH64_OPND_SIMD_IMM_SFT:
7556 case AARCH64_OPND_LIMM:
7557 /* Bit mask immediate. */
7558 gas_assert (new_inst != NULL);
7559 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7560 new_inst->operands[idx].imm.value = value;
7561 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7562 &new_inst->value, NULL, NULL, insn_sequence))
7563 put_aarch64_insn (buf, new_inst->value);
7564 else
7565 as_bad_where (fixP->fx_file, fixP->fx_line,
7566 _("invalid immediate"));
7567 break;
7568
7569 case AARCH64_OPND_HALF:
7570 /* 16-bit unsigned immediate. */
7571 if (unsigned_overflow (value, 16))
7572 as_bad_where (fixP->fx_file, fixP->fx_line,
7573 _("immediate out of range"));
7574 insn = get_aarch64_insn (buf);
7575 insn |= encode_movw_imm (value & 0xffff);
7576 put_aarch64_insn (buf, insn);
7577 break;
7578
7579 case AARCH64_OPND_IMM_MOV:
7580 /* Operand for a generic move immediate instruction, which is
7581 an alias instruction that generates a single MOVZ, MOVN or ORR
7582 instruction to loads a 32-bit/64-bit immediate value into general
7583 register. An assembler error shall result if the immediate cannot be
7584 created by a single one of these instructions. If there is a choice,
7585 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7586 and MOVZ or MOVN to ORR. */
7587 gas_assert (new_inst != NULL);
7588 fix_mov_imm_insn (fixP, buf, new_inst, value);
7589 break;
7590
7591 case AARCH64_OPND_ADDR_SIMM7:
7592 case AARCH64_OPND_ADDR_SIMM9:
7593 case AARCH64_OPND_ADDR_SIMM9_2:
7594 case AARCH64_OPND_ADDR_SIMM10:
7595 case AARCH64_OPND_ADDR_UIMM12:
7596 /* Immediate offset in an address. */
7597 insn = get_aarch64_insn (buf);
7598
7599 gas_assert (new_inst != NULL && new_inst->value == insn);
7600 gas_assert (new_inst->opcode->operands[1] == opnd
7601 || new_inst->opcode->operands[2] == opnd);
7602
7603 /* Get the index of the address operand. */
7604 if (new_inst->opcode->operands[1] == opnd)
7605 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7606 idx = 1;
7607 else
7608 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7609 idx = 2;
7610
7611 /* Update the resolved offset value. */
7612 new_inst->operands[idx].addr.offset.imm = value;
7613
7614 /* Encode/fix-up. */
7615 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7616 &new_inst->value, NULL, NULL, insn_sequence))
7617 {
7618 put_aarch64_insn (buf, new_inst->value);
7619 break;
7620 }
7621 else if (new_inst->opcode->iclass == ldst_pos
7622 && try_to_encode_as_unscaled_ldst (new_inst))
7623 {
7624 put_aarch64_insn (buf, new_inst->value);
7625 break;
7626 }
7627
7628 as_bad_where (fixP->fx_file, fixP->fx_line,
7629 _("immediate offset out of range"));
7630 break;
7631
7632 default:
7633 gas_assert (0);
7634 as_fatal (_("unhandled operand code %d"), opnd);
7635 }
7636 }
7637
7638 /* Apply a fixup (fixP) to segment data, once it has been determined
7639 by our caller that we have all the info we need to fix it up.
7640
7641 Parameter valP is the pointer to the value of the bits. */
7642
7643 void
7644 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7645 {
7646 offsetT value = *valP;
7647 uint32_t insn;
7648 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7649 int scale;
7650 unsigned flags = fixP->fx_addnumber;
7651
7652 DEBUG_TRACE ("\n\n");
7653 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7654 DEBUG_TRACE ("Enter md_apply_fix");
7655
7656 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7657
7658 /* Note whether this will delete the relocation. */
7659
7660 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7661 fixP->fx_done = 1;
7662
7663 /* Process the relocations. */
7664 switch (fixP->fx_r_type)
7665 {
7666 case BFD_RELOC_NONE:
7667 /* This will need to go in the object file. */
7668 fixP->fx_done = 0;
7669 break;
7670
7671 case BFD_RELOC_8:
7672 case BFD_RELOC_8_PCREL:
7673 if (fixP->fx_done || !seg->use_rela_p)
7674 md_number_to_chars (buf, value, 1);
7675 break;
7676
7677 case BFD_RELOC_16:
7678 case BFD_RELOC_16_PCREL:
7679 if (fixP->fx_done || !seg->use_rela_p)
7680 md_number_to_chars (buf, value, 2);
7681 break;
7682
7683 case BFD_RELOC_32:
7684 case BFD_RELOC_32_PCREL:
7685 if (fixP->fx_done || !seg->use_rela_p)
7686 md_number_to_chars (buf, value, 4);
7687 break;
7688
7689 case BFD_RELOC_64:
7690 case BFD_RELOC_64_PCREL:
7691 if (fixP->fx_done || !seg->use_rela_p)
7692 md_number_to_chars (buf, value, 8);
7693 break;
7694
7695 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7696 /* We claim that these fixups have been processed here, even if
7697 in fact we generate an error because we do not have a reloc
7698 for them, so tc_gen_reloc() will reject them. */
7699 fixP->fx_done = 1;
7700 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7701 {
7702 as_bad_where (fixP->fx_file, fixP->fx_line,
7703 _("undefined symbol %s used as an immediate value"),
7704 S_GET_NAME (fixP->fx_addsy));
7705 goto apply_fix_return;
7706 }
7707 fix_insn (fixP, flags, value);
7708 break;
7709
7710 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7711 if (fixP->fx_done || !seg->use_rela_p)
7712 {
7713 if (value & 3)
7714 as_bad_where (fixP->fx_file, fixP->fx_line,
7715 _("pc-relative load offset not word aligned"));
7716 if (signed_overflow (value, 21))
7717 as_bad_where (fixP->fx_file, fixP->fx_line,
7718 _("pc-relative load offset out of range"));
7719 insn = get_aarch64_insn (buf);
7720 insn |= encode_ld_lit_ofs_19 (value >> 2);
7721 put_aarch64_insn (buf, insn);
7722 }
7723 break;
7724
7725 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7726 if (fixP->fx_done || !seg->use_rela_p)
7727 {
7728 if (signed_overflow (value, 21))
7729 as_bad_where (fixP->fx_file, fixP->fx_line,
7730 _("pc-relative address offset out of range"));
7731 insn = get_aarch64_insn (buf);
7732 insn |= encode_adr_imm (value);
7733 put_aarch64_insn (buf, insn);
7734 }
7735 break;
7736
7737 case BFD_RELOC_AARCH64_BRANCH19:
7738 if (fixP->fx_done || !seg->use_rela_p)
7739 {
7740 if (value & 3)
7741 as_bad_where (fixP->fx_file, fixP->fx_line,
7742 _("conditional branch target not word aligned"));
7743 if (signed_overflow (value, 21))
7744 as_bad_where (fixP->fx_file, fixP->fx_line,
7745 _("conditional branch out of range"));
7746 insn = get_aarch64_insn (buf);
7747 insn |= encode_cond_branch_ofs_19 (value >> 2);
7748 put_aarch64_insn (buf, insn);
7749 }
7750 break;
7751
7752 case BFD_RELOC_AARCH64_TSTBR14:
7753 if (fixP->fx_done || !seg->use_rela_p)
7754 {
7755 if (value & 3)
7756 as_bad_where (fixP->fx_file, fixP->fx_line,
7757 _("conditional branch target not word aligned"));
7758 if (signed_overflow (value, 16))
7759 as_bad_where (fixP->fx_file, fixP->fx_line,
7760 _("conditional branch out of range"));
7761 insn = get_aarch64_insn (buf);
7762 insn |= encode_tst_branch_ofs_14 (value >> 2);
7763 put_aarch64_insn (buf, insn);
7764 }
7765 break;
7766
7767 case BFD_RELOC_AARCH64_CALL26:
7768 case BFD_RELOC_AARCH64_JUMP26:
7769 if (fixP->fx_done || !seg->use_rela_p)
7770 {
7771 if (value & 3)
7772 as_bad_where (fixP->fx_file, fixP->fx_line,
7773 _("branch target not word aligned"));
7774 if (signed_overflow (value, 28))
7775 as_bad_where (fixP->fx_file, fixP->fx_line,
7776 _("branch out of range"));
7777 insn = get_aarch64_insn (buf);
7778 insn |= encode_branch_ofs_26 (value >> 2);
7779 put_aarch64_insn (buf, insn);
7780 }
7781 break;
7782
7783 case BFD_RELOC_AARCH64_MOVW_G0:
7784 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7785 case BFD_RELOC_AARCH64_MOVW_G0_S:
7786 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7787 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7788 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
7789 scale = 0;
7790 goto movw_common;
7791 case BFD_RELOC_AARCH64_MOVW_G1:
7792 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7793 case BFD_RELOC_AARCH64_MOVW_G1_S:
7794 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7795 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7796 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
7797 scale = 16;
7798 goto movw_common;
7799 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7800 scale = 0;
7801 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7802 /* Should always be exported to object file, see
7803 aarch64_force_relocation(). */
7804 gas_assert (!fixP->fx_done);
7805 gas_assert (seg->use_rela_p);
7806 goto movw_common;
7807 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7808 scale = 16;
7809 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7810 /* Should always be exported to object file, see
7811 aarch64_force_relocation(). */
7812 gas_assert (!fixP->fx_done);
7813 gas_assert (seg->use_rela_p);
7814 goto movw_common;
7815 case BFD_RELOC_AARCH64_MOVW_G2:
7816 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7817 case BFD_RELOC_AARCH64_MOVW_G2_S:
7818 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7819 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
7820 scale = 32;
7821 goto movw_common;
7822 case BFD_RELOC_AARCH64_MOVW_G3:
7823 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
7824 scale = 48;
7825 movw_common:
7826 if (fixP->fx_done || !seg->use_rela_p)
7827 {
7828 insn = get_aarch64_insn (buf);
7829
7830 if (!fixP->fx_done)
7831 {
7832 /* REL signed addend must fit in 16 bits */
7833 if (signed_overflow (value, 16))
7834 as_bad_where (fixP->fx_file, fixP->fx_line,
7835 _("offset out of range"));
7836 }
7837 else
7838 {
7839 /* Check for overflow and scale. */
7840 switch (fixP->fx_r_type)
7841 {
7842 case BFD_RELOC_AARCH64_MOVW_G0:
7843 case BFD_RELOC_AARCH64_MOVW_G1:
7844 case BFD_RELOC_AARCH64_MOVW_G2:
7845 case BFD_RELOC_AARCH64_MOVW_G3:
7846 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7847 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7848 if (unsigned_overflow (value, scale + 16))
7849 as_bad_where (fixP->fx_file, fixP->fx_line,
7850 _("unsigned value out of range"));
7851 break;
7852 case BFD_RELOC_AARCH64_MOVW_G0_S:
7853 case BFD_RELOC_AARCH64_MOVW_G1_S:
7854 case BFD_RELOC_AARCH64_MOVW_G2_S:
7855 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7856 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7857 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7858 /* NOTE: We can only come here with movz or movn. */
7859 if (signed_overflow (value, scale + 16))
7860 as_bad_where (fixP->fx_file, fixP->fx_line,
7861 _("signed value out of range"));
7862 if (value < 0)
7863 {
7864 /* Force use of MOVN. */
7865 value = ~value;
7866 insn = reencode_movzn_to_movn (insn);
7867 }
7868 else
7869 {
7870 /* Force use of MOVZ. */
7871 insn = reencode_movzn_to_movz (insn);
7872 }
7873 break;
7874 default:
7875 /* Unchecked relocations. */
7876 break;
7877 }
7878 value >>= scale;
7879 }
7880
7881 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7882 insn |= encode_movw_imm (value & 0xffff);
7883
7884 put_aarch64_insn (buf, insn);
7885 }
7886 break;
7887
7888 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7889 fixP->fx_r_type = (ilp32_p
7890 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7891 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7892 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7893 /* Should always be exported to object file, see
7894 aarch64_force_relocation(). */
7895 gas_assert (!fixP->fx_done);
7896 gas_assert (seg->use_rela_p);
7897 break;
7898
7899 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7900 fixP->fx_r_type = (ilp32_p
7901 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7902 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
7903 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7904 /* Should always be exported to object file, see
7905 aarch64_force_relocation(). */
7906 gas_assert (!fixP->fx_done);
7907 gas_assert (seg->use_rela_p);
7908 break;
7909
7910 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7911 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7912 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7913 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7914 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
7915 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7916 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7917 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7918 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7919 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7920 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7921 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7922 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7923 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7924 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7925 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7926 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7927 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7928 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7929 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7930 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7931 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7932 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7933 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7934 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7935 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7936 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7937 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7938 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7939 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7940 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7941 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7942 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7943 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7944 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7945 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7946 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
7947 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
7948 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
7949 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
7950 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
7951 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
7952 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
7953 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
7954 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7955 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7956 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7957 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7958 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7959 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7960 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7961 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7962 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7963 /* Should always be exported to object file, see
7964 aarch64_force_relocation(). */
7965 gas_assert (!fixP->fx_done);
7966 gas_assert (seg->use_rela_p);
7967 break;
7968
7969 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7970 /* Should always be exported to object file, see
7971 aarch64_force_relocation(). */
7972 fixP->fx_r_type = (ilp32_p
7973 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7974 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7975 gas_assert (!fixP->fx_done);
7976 gas_assert (seg->use_rela_p);
7977 break;
7978
7979 case BFD_RELOC_AARCH64_ADD_LO12:
7980 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7981 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7982 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7983 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7984 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7985 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7986 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7987 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7988 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7989 case BFD_RELOC_AARCH64_LDST128_LO12:
7990 case BFD_RELOC_AARCH64_LDST16_LO12:
7991 case BFD_RELOC_AARCH64_LDST32_LO12:
7992 case BFD_RELOC_AARCH64_LDST64_LO12:
7993 case BFD_RELOC_AARCH64_LDST8_LO12:
7994 /* Should always be exported to object file, see
7995 aarch64_force_relocation(). */
7996 gas_assert (!fixP->fx_done);
7997 gas_assert (seg->use_rela_p);
7998 break;
7999
8000 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8001 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8002 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8003 break;
8004
8005 case BFD_RELOC_UNUSED:
8006 /* An error will already have been reported. */
8007 break;
8008
8009 default:
8010 as_bad_where (fixP->fx_file, fixP->fx_line,
8011 _("unexpected %s fixup"),
8012 bfd_get_reloc_code_name (fixP->fx_r_type));
8013 break;
8014 }
8015
8016 apply_fix_return:
8017 /* Free the allocated the struct aarch64_inst.
8018 N.B. currently there are very limited number of fix-up types actually use
8019 this field, so the impact on the performance should be minimal . */
8020 if (fixP->tc_fix_data.inst != NULL)
8021 free (fixP->tc_fix_data.inst);
8022
8023 return;
8024 }
8025
8026 /* Translate internal representation of relocation info to BFD target
8027 format. */
8028
8029 arelent *
8030 tc_gen_reloc (asection * section, fixS * fixp)
8031 {
8032 arelent *reloc;
8033 bfd_reloc_code_real_type code;
8034
8035 reloc = XNEW (arelent);
8036
8037 reloc->sym_ptr_ptr = XNEW (asymbol *);
8038 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8039 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8040
8041 if (fixp->fx_pcrel)
8042 {
8043 if (section->use_rela_p)
8044 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8045 else
8046 fixp->fx_offset = reloc->address;
8047 }
8048 reloc->addend = fixp->fx_offset;
8049
8050 code = fixp->fx_r_type;
8051 switch (code)
8052 {
8053 case BFD_RELOC_16:
8054 if (fixp->fx_pcrel)
8055 code = BFD_RELOC_16_PCREL;
8056 break;
8057
8058 case BFD_RELOC_32:
8059 if (fixp->fx_pcrel)
8060 code = BFD_RELOC_32_PCREL;
8061 break;
8062
8063 case BFD_RELOC_64:
8064 if (fixp->fx_pcrel)
8065 code = BFD_RELOC_64_PCREL;
8066 break;
8067
8068 default:
8069 break;
8070 }
8071
8072 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8073 if (reloc->howto == NULL)
8074 {
8075 as_bad_where (fixp->fx_file, fixp->fx_line,
8076 _
8077 ("cannot represent %s relocation in this object file format"),
8078 bfd_get_reloc_code_name (code));
8079 return NULL;
8080 }
8081
8082 return reloc;
8083 }
8084
8085 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8086
8087 void
8088 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8089 {
8090 bfd_reloc_code_real_type type;
8091 int pcrel = 0;
8092
8093 /* Pick a reloc.
8094 FIXME: @@ Should look at CPU word size. */
8095 switch (size)
8096 {
8097 case 1:
8098 type = BFD_RELOC_8;
8099 break;
8100 case 2:
8101 type = BFD_RELOC_16;
8102 break;
8103 case 4:
8104 type = BFD_RELOC_32;
8105 break;
8106 case 8:
8107 type = BFD_RELOC_64;
8108 break;
8109 default:
8110 as_bad (_("cannot do %u-byte relocation"), size);
8111 type = BFD_RELOC_UNUSED;
8112 break;
8113 }
8114
8115 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8116 }
8117
8118 int
8119 aarch64_force_relocation (struct fix *fixp)
8120 {
8121 switch (fixp->fx_r_type)
8122 {
8123 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8124 /* Perform these "immediate" internal relocations
8125 even if the symbol is extern or weak. */
8126 return 0;
8127
8128 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8129 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8130 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8131 /* Pseudo relocs that need to be fixed up according to
8132 ilp32_p. */
8133 return 0;
8134
8135 case BFD_RELOC_AARCH64_ADD_LO12:
8136 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8137 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8138 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8139 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8140 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8141 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8142 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8143 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8144 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8145 case BFD_RELOC_AARCH64_LDST128_LO12:
8146 case BFD_RELOC_AARCH64_LDST16_LO12:
8147 case BFD_RELOC_AARCH64_LDST32_LO12:
8148 case BFD_RELOC_AARCH64_LDST64_LO12:
8149 case BFD_RELOC_AARCH64_LDST8_LO12:
8150 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8151 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8152 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8153 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8154 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8155 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8156 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8157 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8158 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8159 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8160 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8161 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8162 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8163 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8164 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8165 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8166 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8167 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8168 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8169 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8170 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8171 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8172 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8173 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8174 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8175 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8176 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8177 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8178 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8179 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8180 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8181 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8182 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8183 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8184 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8185 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8186 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8187 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8188 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8189 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8190 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8191 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8192 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8193 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8194 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8195 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8196 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8197 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8198 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8199 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8200 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8201 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8202 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8203 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8204 /* Always leave these relocations for the linker. */
8205 return 1;
8206
8207 default:
8208 break;
8209 }
8210
8211 return generic_force_reloc (fixp);
8212 }
8213
8214 #ifdef OBJ_ELF
8215
8216 /* Implement md_after_parse_args. This is the earliest time we need to decide
8217 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8218
8219 void
8220 aarch64_after_parse_args (void)
8221 {
8222 if (aarch64_abi != AARCH64_ABI_NONE)
8223 return;
8224
8225 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8226 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8227 aarch64_abi = AARCH64_ABI_ILP32;
8228 else
8229 aarch64_abi = AARCH64_ABI_LP64;
8230 }
8231
8232 const char *
8233 elf64_aarch64_target_format (void)
8234 {
8235 if (strcmp (TARGET_OS, "cloudabi") == 0)
8236 {
8237 /* FIXME: What to do for ilp32_p ? */
8238 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
8239 }
8240 if (target_big_endian)
8241 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8242 else
8243 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8244 }
8245
8246 void
8247 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8248 {
8249 elf_frob_symbol (symp, puntp);
8250 }
8251 #endif
8252
8253 /* MD interface: Finalization. */
8254
8255 /* A good place to do this, although this was probably not intended
8256 for this kind of use. We need to dump the literal pool before
8257 references are made to a null symbol pointer. */
8258
8259 void
8260 aarch64_cleanup (void)
8261 {
8262 literal_pool *pool;
8263
8264 for (pool = list_of_pools; pool; pool = pool->next)
8265 {
8266 /* Put it at the end of the relevant section. */
8267 subseg_set (pool->section, pool->sub_section);
8268 s_ltorg (0);
8269 }
8270 }
8271
8272 #ifdef OBJ_ELF
8273 /* Remove any excess mapping symbols generated for alignment frags in
8274 SEC. We may have created a mapping symbol before a zero byte
8275 alignment; remove it if there's a mapping symbol after the
8276 alignment. */
8277 static void
8278 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8279 void *dummy ATTRIBUTE_UNUSED)
8280 {
8281 segment_info_type *seginfo = seg_info (sec);
8282 fragS *fragp;
8283
8284 if (seginfo == NULL || seginfo->frchainP == NULL)
8285 return;
8286
8287 for (fragp = seginfo->frchainP->frch_root;
8288 fragp != NULL; fragp = fragp->fr_next)
8289 {
8290 symbolS *sym = fragp->tc_frag_data.last_map;
8291 fragS *next = fragp->fr_next;
8292
8293 /* Variable-sized frags have been converted to fixed size by
8294 this point. But if this was variable-sized to start with,
8295 there will be a fixed-size frag after it. So don't handle
8296 next == NULL. */
8297 if (sym == NULL || next == NULL)
8298 continue;
8299
8300 if (S_GET_VALUE (sym) < next->fr_address)
8301 /* Not at the end of this frag. */
8302 continue;
8303 know (S_GET_VALUE (sym) == next->fr_address);
8304
8305 do
8306 {
8307 if (next->tc_frag_data.first_map != NULL)
8308 {
8309 /* Next frag starts with a mapping symbol. Discard this
8310 one. */
8311 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8312 break;
8313 }
8314
8315 if (next->fr_next == NULL)
8316 {
8317 /* This mapping symbol is at the end of the section. Discard
8318 it. */
8319 know (next->fr_fix == 0 && next->fr_var == 0);
8320 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8321 break;
8322 }
8323
8324 /* As long as we have empty frags without any mapping symbols,
8325 keep looking. */
8326 /* If the next frag is non-empty and does not start with a
8327 mapping symbol, then this mapping symbol is required. */
8328 if (next->fr_address != next->fr_next->fr_address)
8329 break;
8330
8331 next = next->fr_next;
8332 }
8333 while (next != NULL);
8334 }
8335 }
8336 #endif
8337
8338 /* Adjust the symbol table. */
8339
8340 void
8341 aarch64_adjust_symtab (void)
8342 {
8343 #ifdef OBJ_ELF
8344 /* Remove any overlapping mapping symbols generated by alignment frags. */
8345 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8346 /* Now do generic ELF adjustments. */
8347 elf_adjust_symtab ();
8348 #endif
8349 }
8350
8351 static void
8352 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8353 {
8354 const char *hash_err;
8355
8356 hash_err = hash_insert (table, key, value);
8357 if (hash_err)
8358 printf ("Internal Error: Can't hash %s\n", key);
8359 }
8360
8361 static void
8362 fill_instruction_hash_table (void)
8363 {
8364 aarch64_opcode *opcode = aarch64_opcode_table;
8365
8366 while (opcode->name != NULL)
8367 {
8368 templates *templ, *new_templ;
8369 templ = hash_find (aarch64_ops_hsh, opcode->name);
8370
8371 new_templ = XNEW (templates);
8372 new_templ->opcode = opcode;
8373 new_templ->next = NULL;
8374
8375 if (!templ)
8376 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8377 else
8378 {
8379 new_templ->next = templ->next;
8380 templ->next = new_templ;
8381 }
8382 ++opcode;
8383 }
8384 }
8385
8386 static inline void
8387 convert_to_upper (char *dst, const char *src, size_t num)
8388 {
8389 unsigned int i;
8390 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8391 *dst = TOUPPER (*src);
8392 *dst = '\0';
8393 }
8394
8395 /* Assume STR point to a lower-case string, allocate, convert and return
8396 the corresponding upper-case string. */
8397 static inline const char*
8398 get_upper_str (const char *str)
8399 {
8400 char *ret;
8401 size_t len = strlen (str);
8402 ret = XNEWVEC (char, len + 1);
8403 convert_to_upper (ret, str, len);
8404 return ret;
8405 }
8406
8407 /* MD interface: Initialization. */
8408
8409 void
8410 md_begin (void)
8411 {
8412 unsigned mach;
8413 unsigned int i;
8414
8415 if ((aarch64_ops_hsh = hash_new ()) == NULL
8416 || (aarch64_cond_hsh = hash_new ()) == NULL
8417 || (aarch64_shift_hsh = hash_new ()) == NULL
8418 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8419 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8420 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8421 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8422 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8423 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8424 || (aarch64_reg_hsh = hash_new ()) == NULL
8425 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8426 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8427 || (aarch64_pldop_hsh = hash_new ()) == NULL
8428 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8429 as_fatal (_("virtual memory exhausted"));
8430
8431 fill_instruction_hash_table ();
8432
8433 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8434 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8435 (void *) (aarch64_sys_regs + i));
8436
8437 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8438 checked_hash_insert (aarch64_pstatefield_hsh,
8439 aarch64_pstatefields[i].name,
8440 (void *) (aarch64_pstatefields + i));
8441
8442 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8443 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8444 aarch64_sys_regs_ic[i].name,
8445 (void *) (aarch64_sys_regs_ic + i));
8446
8447 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8448 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8449 aarch64_sys_regs_dc[i].name,
8450 (void *) (aarch64_sys_regs_dc + i));
8451
8452 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8453 checked_hash_insert (aarch64_sys_regs_at_hsh,
8454 aarch64_sys_regs_at[i].name,
8455 (void *) (aarch64_sys_regs_at + i));
8456
8457 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8458 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8459 aarch64_sys_regs_tlbi[i].name,
8460 (void *) (aarch64_sys_regs_tlbi + i));
8461
8462 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8463 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8464 (void *) (reg_names + i));
8465
8466 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8467 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8468 (void *) (nzcv_names + i));
8469
8470 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8471 {
8472 const char *name = aarch64_operand_modifiers[i].name;
8473 checked_hash_insert (aarch64_shift_hsh, name,
8474 (void *) (aarch64_operand_modifiers + i));
8475 /* Also hash the name in the upper case. */
8476 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8477 (void *) (aarch64_operand_modifiers + i));
8478 }
8479
8480 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8481 {
8482 unsigned int j;
8483 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8484 the same condition code. */
8485 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8486 {
8487 const char *name = aarch64_conds[i].names[j];
8488 if (name == NULL)
8489 break;
8490 checked_hash_insert (aarch64_cond_hsh, name,
8491 (void *) (aarch64_conds + i));
8492 /* Also hash the name in the upper case. */
8493 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8494 (void *) (aarch64_conds + i));
8495 }
8496 }
8497
8498 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8499 {
8500 const char *name = aarch64_barrier_options[i].name;
8501 /* Skip xx00 - the unallocated values of option. */
8502 if ((i & 0x3) == 0)
8503 continue;
8504 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8505 (void *) (aarch64_barrier_options + i));
8506 /* Also hash the name in the upper case. */
8507 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8508 (void *) (aarch64_barrier_options + i));
8509 }
8510
8511 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8512 {
8513 const char* name = aarch64_prfops[i].name;
8514 /* Skip the unallocated hint encodings. */
8515 if (name == NULL)
8516 continue;
8517 checked_hash_insert (aarch64_pldop_hsh, name,
8518 (void *) (aarch64_prfops + i));
8519 /* Also hash the name in the upper case. */
8520 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8521 (void *) (aarch64_prfops + i));
8522 }
8523
8524 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8525 {
8526 const char* name = aarch64_hint_options[i].name;
8527
8528 checked_hash_insert (aarch64_hint_opt_hsh, name,
8529 (void *) (aarch64_hint_options + i));
8530 /* Also hash the name in the upper case. */
8531 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8532 (void *) (aarch64_hint_options + i));
8533 }
8534
8535 /* Set the cpu variant based on the command-line options. */
8536 if (!mcpu_cpu_opt)
8537 mcpu_cpu_opt = march_cpu_opt;
8538
8539 if (!mcpu_cpu_opt)
8540 mcpu_cpu_opt = &cpu_default;
8541
8542 cpu_variant = *mcpu_cpu_opt;
8543
8544 /* Record the CPU type. */
8545 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8546
8547 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8548 }
8549
8550 /* Command line processing. */
8551
8552 const char *md_shortopts = "m:";
8553
8554 #ifdef AARCH64_BI_ENDIAN
8555 #define OPTION_EB (OPTION_MD_BASE + 0)
8556 #define OPTION_EL (OPTION_MD_BASE + 1)
8557 #else
8558 #if TARGET_BYTES_BIG_ENDIAN
8559 #define OPTION_EB (OPTION_MD_BASE + 0)
8560 #else
8561 #define OPTION_EL (OPTION_MD_BASE + 1)
8562 #endif
8563 #endif
8564
8565 struct option md_longopts[] = {
8566 #ifdef OPTION_EB
8567 {"EB", no_argument, NULL, OPTION_EB},
8568 #endif
8569 #ifdef OPTION_EL
8570 {"EL", no_argument, NULL, OPTION_EL},
8571 #endif
8572 {NULL, no_argument, NULL, 0}
8573 };
8574
8575 size_t md_longopts_size = sizeof (md_longopts);
8576
8577 struct aarch64_option_table
8578 {
8579 const char *option; /* Option name to match. */
8580 const char *help; /* Help information. */
8581 int *var; /* Variable to change. */
8582 int value; /* What to change it to. */
8583 char *deprecated; /* If non-null, print this message. */
8584 };
8585
8586 static struct aarch64_option_table aarch64_opts[] = {
8587 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8588 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8589 NULL},
8590 #ifdef DEBUG_AARCH64
8591 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8592 #endif /* DEBUG_AARCH64 */
8593 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8594 NULL},
8595 {"mno-verbose-error", N_("do not output verbose error messages"),
8596 &verbose_error_p, 0, NULL},
8597 {NULL, NULL, NULL, 0, NULL}
8598 };
8599
8600 struct aarch64_cpu_option_table
8601 {
8602 const char *name;
8603 const aarch64_feature_set value;
8604 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8605 case. */
8606 const char *canonical_name;
8607 };
8608
8609 /* This list should, at a minimum, contain all the cpu names
8610 recognized by GCC. */
8611 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8612 {"all", AARCH64_ANY, NULL},
8613 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8614 AARCH64_FEATURE_CRC), "Cortex-A35"},
8615 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8616 AARCH64_FEATURE_CRC), "Cortex-A53"},
8617 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8618 AARCH64_FEATURE_CRC), "Cortex-A57"},
8619 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8620 AARCH64_FEATURE_CRC), "Cortex-A72"},
8621 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8622 AARCH64_FEATURE_CRC), "Cortex-A73"},
8623 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8624 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8625 "Cortex-A55"},
8626 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8627 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8628 "Cortex-A75"},
8629 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8630 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8631 "Cortex-A76"},
8632 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8633 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8634 "Samsung Exynos M1"},
8635 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8636 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8637 | AARCH64_FEATURE_RDMA),
8638 "Qualcomm Falkor"},
8639 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8640 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8641 | AARCH64_FEATURE_RDMA),
8642 "Qualcomm QDF24XX"},
8643 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8644 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8645 "Qualcomm Saphira"},
8646 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8647 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8648 "Cavium ThunderX"},
8649 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8650 AARCH64_FEATURE_CRYPTO),
8651 "Broadcom Vulcan"},
8652 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8653 in earlier releases and is superseded by 'xgene1' in all
8654 tools. */
8655 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8656 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8657 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8658 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8659 {"generic", AARCH64_ARCH_V8, NULL},
8660
8661 {NULL, AARCH64_ARCH_NONE, NULL}
8662 };
8663
8664 struct aarch64_arch_option_table
8665 {
8666 const char *name;
8667 const aarch64_feature_set value;
8668 };
8669
8670 /* This list should, at a minimum, contain all the architecture names
8671 recognized by GCC. */
8672 static const struct aarch64_arch_option_table aarch64_archs[] = {
8673 {"all", AARCH64_ANY},
8674 {"armv8-a", AARCH64_ARCH_V8},
8675 {"armv8.1-a", AARCH64_ARCH_V8_1},
8676 {"armv8.2-a", AARCH64_ARCH_V8_2},
8677 {"armv8.3-a", AARCH64_ARCH_V8_3},
8678 {"armv8.4-a", AARCH64_ARCH_V8_4},
8679 {NULL, AARCH64_ARCH_NONE}
8680 };
8681
8682 /* ISA extensions. */
8683 struct aarch64_option_cpu_value_table
8684 {
8685 const char *name;
8686 const aarch64_feature_set value;
8687 const aarch64_feature_set require; /* Feature dependencies. */
8688 };
8689
8690 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8691 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8692 AARCH64_ARCH_NONE},
8693 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8694 | AARCH64_FEATURE_AES
8695 | AARCH64_FEATURE_SHA2, 0),
8696 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8697 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8698 AARCH64_ARCH_NONE},
8699 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8700 AARCH64_ARCH_NONE},
8701 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8702 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8703 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8704 AARCH64_ARCH_NONE},
8705 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8706 AARCH64_ARCH_NONE},
8707 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8708 AARCH64_ARCH_NONE},
8709 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8710 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8711 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8712 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8713 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
8714 AARCH64_FEATURE (AARCH64_FEATURE_FP
8715 | AARCH64_FEATURE_F16, 0)},
8716 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8717 AARCH64_ARCH_NONE},
8718 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8719 AARCH64_FEATURE (AARCH64_FEATURE_F16
8720 | AARCH64_FEATURE_SIMD
8721 | AARCH64_FEATURE_COMPNUM, 0)},
8722 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8723 AARCH64_FEATURE (AARCH64_FEATURE_F16
8724 | AARCH64_FEATURE_SIMD, 0)},
8725 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8726 AARCH64_ARCH_NONE},
8727 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8728 AARCH64_ARCH_NONE},
8729 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8730 AARCH64_ARCH_NONE},
8731 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8732 AARCH64_ARCH_NONE},
8733 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8734 AARCH64_ARCH_NONE},
8735 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8736 | AARCH64_FEATURE_SHA3, 0),
8737 AARCH64_ARCH_NONE},
8738 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8739 };
8740
8741 struct aarch64_long_option_table
8742 {
8743 const char *option; /* Substring to match. */
8744 const char *help; /* Help information. */
8745 int (*func) (const char *subopt); /* Function to decode sub-option. */
8746 char *deprecated; /* If non-null, print this message. */
8747 };
8748
8749 /* Transitive closure of features depending on set. */
8750 static aarch64_feature_set
8751 aarch64_feature_disable_set (aarch64_feature_set set)
8752 {
8753 const struct aarch64_option_cpu_value_table *opt;
8754 aarch64_feature_set prev = 0;
8755
8756 while (prev != set) {
8757 prev = set;
8758 for (opt = aarch64_features; opt->name != NULL; opt++)
8759 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8760 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8761 }
8762 return set;
8763 }
8764
8765 /* Transitive closure of dependencies of set. */
8766 static aarch64_feature_set
8767 aarch64_feature_enable_set (aarch64_feature_set set)
8768 {
8769 const struct aarch64_option_cpu_value_table *opt;
8770 aarch64_feature_set prev = 0;
8771
8772 while (prev != set) {
8773 prev = set;
8774 for (opt = aarch64_features; opt->name != NULL; opt++)
8775 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8776 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8777 }
8778 return set;
8779 }
8780
8781 static int
8782 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8783 bfd_boolean ext_only)
8784 {
8785 /* We insist on extensions being added before being removed. We achieve
8786 this by using the ADDING_VALUE variable to indicate whether we are
8787 adding an extension (1) or removing it (0) and only allowing it to
8788 change in the order -1 -> 1 -> 0. */
8789 int adding_value = -1;
8790 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8791
8792 /* Copy the feature set, so that we can modify it. */
8793 *ext_set = **opt_p;
8794 *opt_p = ext_set;
8795
8796 while (str != NULL && *str != 0)
8797 {
8798 const struct aarch64_option_cpu_value_table *opt;
8799 const char *ext = NULL;
8800 int optlen;
8801
8802 if (!ext_only)
8803 {
8804 if (*str != '+')
8805 {
8806 as_bad (_("invalid architectural extension"));
8807 return 0;
8808 }
8809
8810 ext = strchr (++str, '+');
8811 }
8812
8813 if (ext != NULL)
8814 optlen = ext - str;
8815 else
8816 optlen = strlen (str);
8817
8818 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8819 {
8820 if (adding_value != 0)
8821 adding_value = 0;
8822 optlen -= 2;
8823 str += 2;
8824 }
8825 else if (optlen > 0)
8826 {
8827 if (adding_value == -1)
8828 adding_value = 1;
8829 else if (adding_value != 1)
8830 {
8831 as_bad (_("must specify extensions to add before specifying "
8832 "those to remove"));
8833 return FALSE;
8834 }
8835 }
8836
8837 if (optlen == 0)
8838 {
8839 as_bad (_("missing architectural extension"));
8840 return 0;
8841 }
8842
8843 gas_assert (adding_value != -1);
8844
8845 for (opt = aarch64_features; opt->name != NULL; opt++)
8846 if (strncmp (opt->name, str, optlen) == 0)
8847 {
8848 aarch64_feature_set set;
8849
8850 /* Add or remove the extension. */
8851 if (adding_value)
8852 {
8853 set = aarch64_feature_enable_set (opt->value);
8854 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8855 }
8856 else
8857 {
8858 set = aarch64_feature_disable_set (opt->value);
8859 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8860 }
8861 break;
8862 }
8863
8864 if (opt->name == NULL)
8865 {
8866 as_bad (_("unknown architectural extension `%s'"), str);
8867 return 0;
8868 }
8869
8870 str = ext;
8871 };
8872
8873 return 1;
8874 }
8875
8876 static int
8877 aarch64_parse_cpu (const char *str)
8878 {
8879 const struct aarch64_cpu_option_table *opt;
8880 const char *ext = strchr (str, '+');
8881 size_t optlen;
8882
8883 if (ext != NULL)
8884 optlen = ext - str;
8885 else
8886 optlen = strlen (str);
8887
8888 if (optlen == 0)
8889 {
8890 as_bad (_("missing cpu name `%s'"), str);
8891 return 0;
8892 }
8893
8894 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8895 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8896 {
8897 mcpu_cpu_opt = &opt->value;
8898 if (ext != NULL)
8899 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8900
8901 return 1;
8902 }
8903
8904 as_bad (_("unknown cpu `%s'"), str);
8905 return 0;
8906 }
8907
8908 static int
8909 aarch64_parse_arch (const char *str)
8910 {
8911 const struct aarch64_arch_option_table *opt;
8912 const char *ext = strchr (str, '+');
8913 size_t optlen;
8914
8915 if (ext != NULL)
8916 optlen = ext - str;
8917 else
8918 optlen = strlen (str);
8919
8920 if (optlen == 0)
8921 {
8922 as_bad (_("missing architecture name `%s'"), str);
8923 return 0;
8924 }
8925
8926 for (opt = aarch64_archs; opt->name != NULL; opt++)
8927 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8928 {
8929 march_cpu_opt = &opt->value;
8930 if (ext != NULL)
8931 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8932
8933 return 1;
8934 }
8935
8936 as_bad (_("unknown architecture `%s'\n"), str);
8937 return 0;
8938 }
8939
8940 /* ABIs. */
8941 struct aarch64_option_abi_value_table
8942 {
8943 const char *name;
8944 enum aarch64_abi_type value;
8945 };
8946
8947 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8948 {"ilp32", AARCH64_ABI_ILP32},
8949 {"lp64", AARCH64_ABI_LP64},
8950 };
8951
8952 static int
8953 aarch64_parse_abi (const char *str)
8954 {
8955 unsigned int i;
8956
8957 if (str[0] == '\0')
8958 {
8959 as_bad (_("missing abi name `%s'"), str);
8960 return 0;
8961 }
8962
8963 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8964 if (strcmp (str, aarch64_abis[i].name) == 0)
8965 {
8966 aarch64_abi = aarch64_abis[i].value;
8967 return 1;
8968 }
8969
8970 as_bad (_("unknown abi `%s'\n"), str);
8971 return 0;
8972 }
8973
8974 static struct aarch64_long_option_table aarch64_long_opts[] = {
8975 #ifdef OBJ_ELF
8976 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8977 aarch64_parse_abi, NULL},
8978 #endif /* OBJ_ELF */
8979 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8980 aarch64_parse_cpu, NULL},
8981 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8982 aarch64_parse_arch, NULL},
8983 {NULL, NULL, 0, NULL}
8984 };
8985
8986 int
8987 md_parse_option (int c, const char *arg)
8988 {
8989 struct aarch64_option_table *opt;
8990 struct aarch64_long_option_table *lopt;
8991
8992 switch (c)
8993 {
8994 #ifdef OPTION_EB
8995 case OPTION_EB:
8996 target_big_endian = 1;
8997 break;
8998 #endif
8999
9000 #ifdef OPTION_EL
9001 case OPTION_EL:
9002 target_big_endian = 0;
9003 break;
9004 #endif
9005
9006 case 'a':
9007 /* Listing option. Just ignore these, we don't support additional
9008 ones. */
9009 return 0;
9010
9011 default:
9012 for (opt = aarch64_opts; opt->option != NULL; opt++)
9013 {
9014 if (c == opt->option[0]
9015 && ((arg == NULL && opt->option[1] == 0)
9016 || streq (arg, opt->option + 1)))
9017 {
9018 /* If the option is deprecated, tell the user. */
9019 if (opt->deprecated != NULL)
9020 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9021 arg ? arg : "", _(opt->deprecated));
9022
9023 if (opt->var != NULL)
9024 *opt->var = opt->value;
9025
9026 return 1;
9027 }
9028 }
9029
9030 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9031 {
9032 /* These options are expected to have an argument. */
9033 if (c == lopt->option[0]
9034 && arg != NULL
9035 && strncmp (arg, lopt->option + 1,
9036 strlen (lopt->option + 1)) == 0)
9037 {
9038 /* If the option is deprecated, tell the user. */
9039 if (lopt->deprecated != NULL)
9040 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9041 _(lopt->deprecated));
9042
9043 /* Call the sup-option parser. */
9044 return lopt->func (arg + strlen (lopt->option) - 1);
9045 }
9046 }
9047
9048 return 0;
9049 }
9050
9051 return 1;
9052 }
9053
9054 void
9055 md_show_usage (FILE * fp)
9056 {
9057 struct aarch64_option_table *opt;
9058 struct aarch64_long_option_table *lopt;
9059
9060 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9061
9062 for (opt = aarch64_opts; opt->option != NULL; opt++)
9063 if (opt->help != NULL)
9064 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9065
9066 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9067 if (lopt->help != NULL)
9068 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9069
9070 #ifdef OPTION_EB
9071 fprintf (fp, _("\
9072 -EB assemble code for a big-endian cpu\n"));
9073 #endif
9074
9075 #ifdef OPTION_EL
9076 fprintf (fp, _("\
9077 -EL assemble code for a little-endian cpu\n"));
9078 #endif
9079 }
9080
9081 /* Parse a .cpu directive. */
9082
9083 static void
9084 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9085 {
9086 const struct aarch64_cpu_option_table *opt;
9087 char saved_char;
9088 char *name;
9089 char *ext;
9090 size_t optlen;
9091
9092 name = input_line_pointer;
9093 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9094 input_line_pointer++;
9095 saved_char = *input_line_pointer;
9096 *input_line_pointer = 0;
9097
9098 ext = strchr (name, '+');
9099
9100 if (ext != NULL)
9101 optlen = ext - name;
9102 else
9103 optlen = strlen (name);
9104
9105 /* Skip the first "all" entry. */
9106 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9107 if (strlen (opt->name) == optlen
9108 && strncmp (name, opt->name, optlen) == 0)
9109 {
9110 mcpu_cpu_opt = &opt->value;
9111 if (ext != NULL)
9112 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9113 return;
9114
9115 cpu_variant = *mcpu_cpu_opt;
9116
9117 *input_line_pointer = saved_char;
9118 demand_empty_rest_of_line ();
9119 return;
9120 }
9121 as_bad (_("unknown cpu `%s'"), name);
9122 *input_line_pointer = saved_char;
9123 ignore_rest_of_line ();
9124 }
9125
9126
9127 /* Parse a .arch directive. */
9128
9129 static void
9130 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9131 {
9132 const struct aarch64_arch_option_table *opt;
9133 char saved_char;
9134 char *name;
9135 char *ext;
9136 size_t optlen;
9137
9138 name = input_line_pointer;
9139 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9140 input_line_pointer++;
9141 saved_char = *input_line_pointer;
9142 *input_line_pointer = 0;
9143
9144 ext = strchr (name, '+');
9145
9146 if (ext != NULL)
9147 optlen = ext - name;
9148 else
9149 optlen = strlen (name);
9150
9151 /* Skip the first "all" entry. */
9152 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9153 if (strlen (opt->name) == optlen
9154 && strncmp (name, opt->name, optlen) == 0)
9155 {
9156 mcpu_cpu_opt = &opt->value;
9157 if (ext != NULL)
9158 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9159 return;
9160
9161 cpu_variant = *mcpu_cpu_opt;
9162
9163 *input_line_pointer = saved_char;
9164 demand_empty_rest_of_line ();
9165 return;
9166 }
9167
9168 as_bad (_("unknown architecture `%s'\n"), name);
9169 *input_line_pointer = saved_char;
9170 ignore_rest_of_line ();
9171 }
9172
9173 /* Parse a .arch_extension directive. */
9174
9175 static void
9176 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9177 {
9178 char saved_char;
9179 char *ext = input_line_pointer;;
9180
9181 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9182 input_line_pointer++;
9183 saved_char = *input_line_pointer;
9184 *input_line_pointer = 0;
9185
9186 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9187 return;
9188
9189 cpu_variant = *mcpu_cpu_opt;
9190
9191 *input_line_pointer = saved_char;
9192 demand_empty_rest_of_line ();
9193 }
9194
9195 /* Copy symbol information. */
9196
9197 void
9198 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9199 {
9200 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9201 }
This page took 0.26367 seconds and 4 git commands to generate.