[AArch64][SVE 22/32] Add qualifiers for merging and zeroing predication
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum vector_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q,
87 NT_zero,
88 NT_merge
89 };
90
91 /* Bits for DEFINED field in vector_type_el. */
92 #define NTA_HASTYPE 1
93 #define NTA_HASINDEX 2
94 #define NTA_HASVARWIDTH 4
95
96 struct vector_type_el
97 {
98 enum vector_el_type type;
99 unsigned char defined;
100 unsigned width;
101 int64_t index;
102 };
103
104 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
105
106 struct reloc
107 {
108 bfd_reloc_code_real_type type;
109 expressionS exp;
110 int pc_rel;
111 enum aarch64_opnd opnd;
112 uint32_t flags;
113 unsigned need_libopcodes_p : 1;
114 };
115
116 struct aarch64_instruction
117 {
118 /* libopcodes structure for instruction intermediate representation. */
119 aarch64_inst base;
120 /* Record assembly errors found during the parsing. */
121 struct
122 {
123 enum aarch64_operand_error_kind kind;
124 const char *error;
125 } parsing_error;
126 /* The condition that appears in the assembly line. */
127 int cond;
128 /* Relocation information (including the GAS internal fixup). */
129 struct reloc reloc;
130 /* Need to generate an immediate in the literal pool. */
131 unsigned gen_lit_pool : 1;
132 };
133
134 typedef struct aarch64_instruction aarch64_instruction;
135
136 static aarch64_instruction inst;
137
138 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
139 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
140
141 /* Diagnostics inline function utilites.
142
143 These are lightweight utlities which should only be called by parse_operands
144 and other parsers. GAS processes each assembly line by parsing it against
145 instruction template(s), in the case of multiple templates (for the same
146 mnemonic name), those templates are tried one by one until one succeeds or
147 all fail. An assembly line may fail a few templates before being
148 successfully parsed; an error saved here in most cases is not a user error
149 but an error indicating the current template is not the right template.
150 Therefore it is very important that errors can be saved at a low cost during
151 the parsing; we don't want to slow down the whole parsing by recording
152 non-user errors in detail.
153
154 Remember that the objective is to help GAS pick up the most approapriate
155 error message in the case of multiple templates, e.g. FMOV which has 8
156 templates. */
157
158 static inline void
159 clear_error (void)
160 {
161 inst.parsing_error.kind = AARCH64_OPDE_NIL;
162 inst.parsing_error.error = NULL;
163 }
164
165 static inline bfd_boolean
166 error_p (void)
167 {
168 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
169 }
170
171 static inline const char *
172 get_error_message (void)
173 {
174 return inst.parsing_error.error;
175 }
176
177 static inline enum aarch64_operand_error_kind
178 get_error_kind (void)
179 {
180 return inst.parsing_error.kind;
181 }
182
183 static inline void
184 set_error (enum aarch64_operand_error_kind kind, const char *error)
185 {
186 inst.parsing_error.kind = kind;
187 inst.parsing_error.error = error;
188 }
189
190 static inline void
191 set_recoverable_error (const char *error)
192 {
193 set_error (AARCH64_OPDE_RECOVERABLE, error);
194 }
195
196 /* Use the DESC field of the corresponding aarch64_operand entry to compose
197 the error message. */
198 static inline void
199 set_default_error (void)
200 {
201 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
202 }
203
204 static inline void
205 set_syntax_error (const char *error)
206 {
207 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
208 }
209
210 static inline void
211 set_first_syntax_error (const char *error)
212 {
213 if (! error_p ())
214 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
215 }
216
217 static inline void
218 set_fatal_syntax_error (const char *error)
219 {
220 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
221 }
222 \f
223 /* Number of littlenums required to hold an extended precision number. */
224 #define MAX_LITTLENUMS 6
225
226 /* Return value for certain parsers when the parsing fails; those parsers
227 return the information of the parsed result, e.g. register number, on
228 success. */
229 #define PARSE_FAIL -1
230
231 /* This is an invalid condition code that means no conditional field is
232 present. */
233 #define COND_ALWAYS 0x10
234
235 typedef struct
236 {
237 const char *template;
238 unsigned long value;
239 } asm_barrier_opt;
240
241 typedef struct
242 {
243 const char *template;
244 uint32_t value;
245 } asm_nzcv;
246
247 struct reloc_entry
248 {
249 char *name;
250 bfd_reloc_code_real_type reloc;
251 };
252
253 /* Macros to define the register types and masks for the purpose
254 of parsing. */
255
256 #undef AARCH64_REG_TYPES
257 #define AARCH64_REG_TYPES \
258 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
259 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
260 BASIC_REG_TYPE(SP_32) /* wsp */ \
261 BASIC_REG_TYPE(SP_64) /* sp */ \
262 BASIC_REG_TYPE(Z_32) /* wzr */ \
263 BASIC_REG_TYPE(Z_64) /* xzr */ \
264 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
265 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
266 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
267 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
268 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
269 BASIC_REG_TYPE(CN) /* c[0-7] */ \
270 BASIC_REG_TYPE(VN) /* v[0-31] */ \
271 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
272 BASIC_REG_TYPE(PN) /* p[0-15] */ \
273 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
274 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
275 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
276 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
277 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
278 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
279 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
280 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
281 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
282 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
283 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
284 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
285 /* Typecheck: any [BHSDQ]P FP. */ \
286 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
287 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
288 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
289 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
290 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
291 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
292 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
293 /* Any integer register; used for error messages only. */ \
294 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
296 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
297 /* Pseudo type to mark the end of the enumerator sequence. */ \
298 BASIC_REG_TYPE(MAX)
299
300 #undef BASIC_REG_TYPE
301 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
302 #undef MULTI_REG_TYPE
303 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
304
305 /* Register type enumerators. */
306 typedef enum aarch64_reg_type_
307 {
308 /* A list of REG_TYPE_*. */
309 AARCH64_REG_TYPES
310 } aarch64_reg_type;
311
312 #undef BASIC_REG_TYPE
313 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
314 #undef REG_TYPE
315 #define REG_TYPE(T) (1 << REG_TYPE_##T)
316 #undef MULTI_REG_TYPE
317 #define MULTI_REG_TYPE(T,V) V,
318
319 /* Structure for a hash table entry for a register. */
320 typedef struct
321 {
322 const char *name;
323 unsigned char number;
324 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
325 unsigned char builtin;
326 } reg_entry;
327
328 /* Values indexed by aarch64_reg_type to assist the type checking. */
329 static const unsigned reg_type_masks[] =
330 {
331 AARCH64_REG_TYPES
332 };
333
334 #undef BASIC_REG_TYPE
335 #undef REG_TYPE
336 #undef MULTI_REG_TYPE
337 #undef AARCH64_REG_TYPES
338
339 /* Diagnostics used when we don't get a register of the expected type.
340 Note: this has to synchronized with aarch64_reg_type definitions
341 above. */
342 static const char *
343 get_reg_expected_msg (aarch64_reg_type reg_type)
344 {
345 const char *msg;
346
347 switch (reg_type)
348 {
349 case REG_TYPE_R_32:
350 msg = N_("integer 32-bit register expected");
351 break;
352 case REG_TYPE_R_64:
353 msg = N_("integer 64-bit register expected");
354 break;
355 case REG_TYPE_R_N:
356 msg = N_("integer register expected");
357 break;
358 case REG_TYPE_R64_SP:
359 msg = N_("64-bit integer or SP register expected");
360 break;
361 case REG_TYPE_R_Z:
362 msg = N_("integer or zero register expected");
363 break;
364 case REG_TYPE_R_SP:
365 msg = N_("integer or SP register expected");
366 break;
367 case REG_TYPE_R_Z_SP:
368 msg = N_("integer, zero or SP register expected");
369 break;
370 case REG_TYPE_FP_B:
371 msg = N_("8-bit SIMD scalar register expected");
372 break;
373 case REG_TYPE_FP_H:
374 msg = N_("16-bit SIMD scalar or floating-point half precision "
375 "register expected");
376 break;
377 case REG_TYPE_FP_S:
378 msg = N_("32-bit SIMD scalar or floating-point single precision "
379 "register expected");
380 break;
381 case REG_TYPE_FP_D:
382 msg = N_("64-bit SIMD scalar or floating-point double precision "
383 "register expected");
384 break;
385 case REG_TYPE_FP_Q:
386 msg = N_("128-bit SIMD scalar or floating-point quad precision "
387 "register expected");
388 break;
389 case REG_TYPE_CN:
390 msg = N_("C0 - C15 expected");
391 break;
392 case REG_TYPE_R_Z_BHSDQ_V:
393 msg = N_("register expected");
394 break;
395 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
396 msg = N_("SIMD scalar or floating-point register expected");
397 break;
398 case REG_TYPE_VN: /* any V reg */
399 msg = N_("vector register expected");
400 break;
401 case REG_TYPE_ZN:
402 msg = N_("SVE vector register expected");
403 break;
404 case REG_TYPE_PN:
405 msg = N_("SVE predicate register expected");
406 break;
407 default:
408 as_fatal (_("invalid register type %d"), reg_type);
409 }
410 return msg;
411 }
412
413 /* Some well known registers that we refer to directly elsewhere. */
414 #define REG_SP 31
415
416 /* Instructions take 4 bytes in the object file. */
417 #define INSN_SIZE 4
418
419 static struct hash_control *aarch64_ops_hsh;
420 static struct hash_control *aarch64_cond_hsh;
421 static struct hash_control *aarch64_shift_hsh;
422 static struct hash_control *aarch64_sys_regs_hsh;
423 static struct hash_control *aarch64_pstatefield_hsh;
424 static struct hash_control *aarch64_sys_regs_ic_hsh;
425 static struct hash_control *aarch64_sys_regs_dc_hsh;
426 static struct hash_control *aarch64_sys_regs_at_hsh;
427 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
428 static struct hash_control *aarch64_reg_hsh;
429 static struct hash_control *aarch64_barrier_opt_hsh;
430 static struct hash_control *aarch64_nzcv_hsh;
431 static struct hash_control *aarch64_pldop_hsh;
432 static struct hash_control *aarch64_hint_opt_hsh;
433
434 /* Stuff needed to resolve the label ambiguity
435 As:
436 ...
437 label: <insn>
438 may differ from:
439 ...
440 label:
441 <insn> */
442
443 static symbolS *last_label_seen;
444
445 /* Literal pool structure. Held on a per-section
446 and per-sub-section basis. */
447
448 #define MAX_LITERAL_POOL_SIZE 1024
449 typedef struct literal_expression
450 {
451 expressionS exp;
452 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
453 LITTLENUM_TYPE * bignum;
454 } literal_expression;
455
456 typedef struct literal_pool
457 {
458 literal_expression literals[MAX_LITERAL_POOL_SIZE];
459 unsigned int next_free_entry;
460 unsigned int id;
461 symbolS *symbol;
462 segT section;
463 subsegT sub_section;
464 int size;
465 struct literal_pool *next;
466 } literal_pool;
467
468 /* Pointer to a linked list of literal pools. */
469 static literal_pool *list_of_pools = NULL;
470 \f
471 /* Pure syntax. */
472
473 /* This array holds the chars that always start a comment. If the
474 pre-processor is disabled, these aren't very useful. */
475 const char comment_chars[] = "";
476
477 /* This array holds the chars that only start a comment at the beginning of
478 a line. If the line seems to have the form '# 123 filename'
479 .line and .file directives will appear in the pre-processed output. */
480 /* Note that input_file.c hand checks for '#' at the beginning of the
481 first line of the input file. This is because the compiler outputs
482 #NO_APP at the beginning of its output. */
483 /* Also note that comments like this one will always work. */
484 const char line_comment_chars[] = "#";
485
486 const char line_separator_chars[] = ";";
487
488 /* Chars that can be used to separate mant
489 from exp in floating point numbers. */
490 const char EXP_CHARS[] = "eE";
491
492 /* Chars that mean this number is a floating point constant. */
493 /* As in 0f12.456 */
494 /* or 0d1.2345e12 */
495
496 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
497
498 /* Prefix character that indicates the start of an immediate value. */
499 #define is_immediate_prefix(C) ((C) == '#')
500
501 /* Separator character handling. */
502
503 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
504
505 static inline bfd_boolean
506 skip_past_char (char **str, char c)
507 {
508 if (**str == c)
509 {
510 (*str)++;
511 return TRUE;
512 }
513 else
514 return FALSE;
515 }
516
517 #define skip_past_comma(str) skip_past_char (str, ',')
518
519 /* Arithmetic expressions (possibly involving symbols). */
520
521 static bfd_boolean in_my_get_expression_p = FALSE;
522
523 /* Third argument to my_get_expression. */
524 #define GE_NO_PREFIX 0
525 #define GE_OPT_PREFIX 1
526
527 /* Return TRUE if the string pointed by *STR is successfully parsed
528 as an valid expression; *EP will be filled with the information of
529 such an expression. Otherwise return FALSE. */
530
531 static bfd_boolean
532 my_get_expression (expressionS * ep, char **str, int prefix_mode,
533 int reject_absent)
534 {
535 char *save_in;
536 segT seg;
537 int prefix_present_p = 0;
538
539 switch (prefix_mode)
540 {
541 case GE_NO_PREFIX:
542 break;
543 case GE_OPT_PREFIX:
544 if (is_immediate_prefix (**str))
545 {
546 (*str)++;
547 prefix_present_p = 1;
548 }
549 break;
550 default:
551 abort ();
552 }
553
554 memset (ep, 0, sizeof (expressionS));
555
556 save_in = input_line_pointer;
557 input_line_pointer = *str;
558 in_my_get_expression_p = TRUE;
559 seg = expression (ep);
560 in_my_get_expression_p = FALSE;
561
562 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
563 {
564 /* We found a bad expression in md_operand(). */
565 *str = input_line_pointer;
566 input_line_pointer = save_in;
567 if (prefix_present_p && ! error_p ())
568 set_fatal_syntax_error (_("bad expression"));
569 else
570 set_first_syntax_error (_("bad expression"));
571 return FALSE;
572 }
573
574 #ifdef OBJ_AOUT
575 if (seg != absolute_section
576 && seg != text_section
577 && seg != data_section
578 && seg != bss_section && seg != undefined_section)
579 {
580 set_syntax_error (_("bad segment"));
581 *str = input_line_pointer;
582 input_line_pointer = save_in;
583 return FALSE;
584 }
585 #else
586 (void) seg;
587 #endif
588
589 *str = input_line_pointer;
590 input_line_pointer = save_in;
591 return TRUE;
592 }
593
594 /* Turn a string in input_line_pointer into a floating point constant
595 of type TYPE, and store the appropriate bytes in *LITP. The number
596 of LITTLENUMS emitted is stored in *SIZEP. An error message is
597 returned, or NULL on OK. */
598
599 const char *
600 md_atof (int type, char *litP, int *sizeP)
601 {
602 return ieee_md_atof (type, litP, sizeP, target_big_endian);
603 }
604
605 /* We handle all bad expressions here, so that we can report the faulty
606 instruction in the error message. */
607 void
608 md_operand (expressionS * exp)
609 {
610 if (in_my_get_expression_p)
611 exp->X_op = O_illegal;
612 }
613
614 /* Immediate values. */
615
616 /* Errors may be set multiple times during parsing or bit encoding
617 (particularly in the Neon bits), but usually the earliest error which is set
618 will be the most meaningful. Avoid overwriting it with later (cascading)
619 errors by calling this function. */
620
621 static void
622 first_error (const char *error)
623 {
624 if (! error_p ())
625 set_syntax_error (error);
626 }
627
628 /* Similiar to first_error, but this function accepts formatted error
629 message. */
630 static void
631 first_error_fmt (const char *format, ...)
632 {
633 va_list args;
634 enum
635 { size = 100 };
636 /* N.B. this single buffer will not cause error messages for different
637 instructions to pollute each other; this is because at the end of
638 processing of each assembly line, error message if any will be
639 collected by as_bad. */
640 static char buffer[size];
641
642 if (! error_p ())
643 {
644 int ret ATTRIBUTE_UNUSED;
645 va_start (args, format);
646 ret = vsnprintf (buffer, size, format, args);
647 know (ret <= size - 1 && ret >= 0);
648 va_end (args);
649 set_syntax_error (buffer);
650 }
651 }
652
653 /* Register parsing. */
654
655 /* Generic register parser which is called by other specialized
656 register parsers.
657 CCP points to what should be the beginning of a register name.
658 If it is indeed a valid register name, advance CCP over it and
659 return the reg_entry structure; otherwise return NULL.
660 It does not issue diagnostics. */
661
662 static reg_entry *
663 parse_reg (char **ccp)
664 {
665 char *start = *ccp;
666 char *p;
667 reg_entry *reg;
668
669 #ifdef REGISTER_PREFIX
670 if (*start != REGISTER_PREFIX)
671 return NULL;
672 start++;
673 #endif
674
675 p = start;
676 if (!ISALPHA (*p) || !is_name_beginner (*p))
677 return NULL;
678
679 do
680 p++;
681 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
682
683 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
684
685 if (!reg)
686 return NULL;
687
688 *ccp = p;
689 return reg;
690 }
691
692 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
693 return FALSE. */
694 static bfd_boolean
695 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
696 {
697 return (reg_type_masks[type] & (1 << reg->type)) != 0;
698 }
699
700 /* Try to parse a base or offset register. Return the register entry
701 on success, setting *QUALIFIER to the register qualifier. Return null
702 otherwise.
703
704 Note that this function does not issue any diagnostics. */
705
706 static const reg_entry *
707 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
708 {
709 char *str = *ccp;
710 const reg_entry *reg = parse_reg (&str);
711
712 if (reg == NULL)
713 return NULL;
714
715 switch (reg->type)
716 {
717 case REG_TYPE_R_32:
718 case REG_TYPE_SP_32:
719 case REG_TYPE_Z_32:
720 *qualifier = AARCH64_OPND_QLF_W;
721 break;
722
723 case REG_TYPE_R_64:
724 case REG_TYPE_SP_64:
725 case REG_TYPE_Z_64:
726 *qualifier = AARCH64_OPND_QLF_X;
727 break;
728
729 default:
730 return NULL;
731 }
732
733 *ccp = str;
734
735 return reg;
736 }
737
738 /* Parse the qualifier of a vector register or vector element of type
739 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
740 succeeds; otherwise return FALSE.
741
742 Accept only one occurrence of:
743 8b 16b 2h 4h 8h 2s 4s 1d 2d
744 b h s d q */
745 static bfd_boolean
746 parse_vector_type_for_operand (aarch64_reg_type reg_type,
747 struct vector_type_el *parsed_type, char **str)
748 {
749 char *ptr = *str;
750 unsigned width;
751 unsigned element_size;
752 enum vector_el_type type;
753
754 /* skip '.' */
755 gas_assert (*ptr == '.');
756 ptr++;
757
758 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
759 {
760 width = 0;
761 goto elt_size;
762 }
763 width = strtoul (ptr, &ptr, 10);
764 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
765 {
766 first_error_fmt (_("bad size %d in vector width specifier"), width);
767 return FALSE;
768 }
769
770 elt_size:
771 switch (TOLOWER (*ptr))
772 {
773 case 'b':
774 type = NT_b;
775 element_size = 8;
776 break;
777 case 'h':
778 type = NT_h;
779 element_size = 16;
780 break;
781 case 's':
782 type = NT_s;
783 element_size = 32;
784 break;
785 case 'd':
786 type = NT_d;
787 element_size = 64;
788 break;
789 case 'q':
790 if (width == 1)
791 {
792 type = NT_q;
793 element_size = 128;
794 break;
795 }
796 /* fall through. */
797 default:
798 if (*ptr != '\0')
799 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
800 else
801 first_error (_("missing element size"));
802 return FALSE;
803 }
804 if (width != 0 && width * element_size != 64 && width * element_size != 128
805 && !(width == 2 && element_size == 16))
806 {
807 first_error_fmt (_
808 ("invalid element size %d and vector size combination %c"),
809 width, *ptr);
810 return FALSE;
811 }
812 ptr++;
813
814 parsed_type->type = type;
815 parsed_type->width = width;
816
817 *str = ptr;
818
819 return TRUE;
820 }
821
822 /* *STR contains an SVE zero/merge predication suffix. Parse it into
823 *PARSED_TYPE and point *STR at the end of the suffix. */
824
825 static bfd_boolean
826 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
827 {
828 char *ptr = *str;
829
830 /* Skip '/'. */
831 gas_assert (*ptr == '/');
832 ptr++;
833 switch (TOLOWER (*ptr))
834 {
835 case 'z':
836 parsed_type->type = NT_zero;
837 break;
838 case 'm':
839 parsed_type->type = NT_merge;
840 break;
841 default:
842 if (*ptr != '\0' && *ptr != ',')
843 first_error_fmt (_("unexpected character `%c' in predication type"),
844 *ptr);
845 else
846 first_error (_("missing predication type"));
847 return FALSE;
848 }
849 parsed_type->width = 0;
850 *str = ptr + 1;
851 return TRUE;
852 }
853
854 /* Parse a register of the type TYPE.
855
856 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
857 name or the parsed register is not of TYPE.
858
859 Otherwise return the register number, and optionally fill in the actual
860 type of the register in *RTYPE when multiple alternatives were given, and
861 return the register shape and element index information in *TYPEINFO.
862
863 IN_REG_LIST should be set with TRUE if the caller is parsing a register
864 list. */
865
866 static int
867 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
868 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
869 {
870 char *str = *ccp;
871 const reg_entry *reg = parse_reg (&str);
872 struct vector_type_el atype;
873 struct vector_type_el parsetype;
874 bfd_boolean is_typed_vecreg = FALSE;
875
876 atype.defined = 0;
877 atype.type = NT_invtype;
878 atype.width = -1;
879 atype.index = 0;
880
881 if (reg == NULL)
882 {
883 if (typeinfo)
884 *typeinfo = atype;
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888
889 if (! aarch64_check_reg_type (reg, type))
890 {
891 DEBUG_TRACE ("reg type check failed");
892 set_default_error ();
893 return PARSE_FAIL;
894 }
895 type = reg->type;
896
897 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
898 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
899 {
900 if (*str == '.')
901 {
902 if (!parse_vector_type_for_operand (type, &parsetype, &str))
903 return PARSE_FAIL;
904 }
905 else
906 {
907 if (!parse_predication_for_operand (&parsetype, &str))
908 return PARSE_FAIL;
909 }
910
911 /* Register if of the form Vn.[bhsdq]. */
912 is_typed_vecreg = TRUE;
913
914 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
915 {
916 /* The width is always variable; we don't allow an integer width
917 to be specified. */
918 gas_assert (parsetype.width == 0);
919 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
920 }
921 else if (parsetype.width == 0)
922 /* Expect index. In the new scheme we cannot have
923 Vn.[bhsdq] represent a scalar. Therefore any
924 Vn.[bhsdq] should have an index following it.
925 Except in reglists ofcourse. */
926 atype.defined |= NTA_HASINDEX;
927 else
928 atype.defined |= NTA_HASTYPE;
929
930 atype.type = parsetype.type;
931 atype.width = parsetype.width;
932 }
933
934 if (skip_past_char (&str, '['))
935 {
936 expressionS exp;
937
938 /* Reject Sn[index] syntax. */
939 if (!is_typed_vecreg)
940 {
941 first_error (_("this type of register can't be indexed"));
942 return PARSE_FAIL;
943 }
944
945 if (in_reg_list == TRUE)
946 {
947 first_error (_("index not allowed inside register list"));
948 return PARSE_FAIL;
949 }
950
951 atype.defined |= NTA_HASINDEX;
952
953 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
954
955 if (exp.X_op != O_constant)
956 {
957 first_error (_("constant expression required"));
958 return PARSE_FAIL;
959 }
960
961 if (! skip_past_char (&str, ']'))
962 return PARSE_FAIL;
963
964 atype.index = exp.X_add_number;
965 }
966 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
967 {
968 /* Indexed vector register expected. */
969 first_error (_("indexed vector register expected"));
970 return PARSE_FAIL;
971 }
972
973 /* A vector reg Vn should be typed or indexed. */
974 if (type == REG_TYPE_VN && atype.defined == 0)
975 {
976 first_error (_("invalid use of vector register"));
977 }
978
979 if (typeinfo)
980 *typeinfo = atype;
981
982 if (rtype)
983 *rtype = type;
984
985 *ccp = str;
986
987 return reg->number;
988 }
989
990 /* Parse register.
991
992 Return the register number on success; return PARSE_FAIL otherwise.
993
994 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
995 the register (e.g. NEON double or quad reg when either has been requested).
996
997 If this is a NEON vector register with additional type information, fill
998 in the struct pointed to by VECTYPE (if non-NULL).
999
1000 This parser does not handle register list. */
1001
1002 static int
1003 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1004 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1005 {
1006 struct vector_type_el atype;
1007 char *str = *ccp;
1008 int reg = parse_typed_reg (&str, type, rtype, &atype,
1009 /*in_reg_list= */ FALSE);
1010
1011 if (reg == PARSE_FAIL)
1012 return PARSE_FAIL;
1013
1014 if (vectype)
1015 *vectype = atype;
1016
1017 *ccp = str;
1018
1019 return reg;
1020 }
1021
1022 static inline bfd_boolean
1023 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1024 {
1025 return
1026 e1.type == e2.type
1027 && e1.defined == e2.defined
1028 && e1.width == e2.width && e1.index == e2.index;
1029 }
1030
1031 /* This function parses a list of vector registers of type TYPE.
1032 On success, it returns the parsed register list information in the
1033 following encoded format:
1034
1035 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1036 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1037
1038 The information of the register shape and/or index is returned in
1039 *VECTYPE.
1040
1041 It returns PARSE_FAIL if the register list is invalid.
1042
1043 The list contains one to four registers.
1044 Each register can be one of:
1045 <Vt>.<T>[<index>]
1046 <Vt>.<T>
1047 All <T> should be identical.
1048 All <index> should be identical.
1049 There are restrictions on <Vt> numbers which are checked later
1050 (by reg_list_valid_p). */
1051
1052 static int
1053 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1054 struct vector_type_el *vectype)
1055 {
1056 char *str = *ccp;
1057 int nb_regs;
1058 struct vector_type_el typeinfo, typeinfo_first;
1059 int val, val_range;
1060 int in_range;
1061 int ret_val;
1062 int i;
1063 bfd_boolean error = FALSE;
1064 bfd_boolean expect_index = FALSE;
1065
1066 if (*str != '{')
1067 {
1068 set_syntax_error (_("expecting {"));
1069 return PARSE_FAIL;
1070 }
1071 str++;
1072
1073 nb_regs = 0;
1074 typeinfo_first.defined = 0;
1075 typeinfo_first.type = NT_invtype;
1076 typeinfo_first.width = -1;
1077 typeinfo_first.index = 0;
1078 ret_val = 0;
1079 val = -1;
1080 val_range = -1;
1081 in_range = 0;
1082 do
1083 {
1084 if (in_range)
1085 {
1086 str++; /* skip over '-' */
1087 val_range = val;
1088 }
1089 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1090 /*in_reg_list= */ TRUE);
1091 if (val == PARSE_FAIL)
1092 {
1093 set_first_syntax_error (_("invalid vector register in list"));
1094 error = TRUE;
1095 continue;
1096 }
1097 /* reject [bhsd]n */
1098 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1099 {
1100 set_first_syntax_error (_("invalid scalar register in list"));
1101 error = TRUE;
1102 continue;
1103 }
1104
1105 if (typeinfo.defined & NTA_HASINDEX)
1106 expect_index = TRUE;
1107
1108 if (in_range)
1109 {
1110 if (val < val_range)
1111 {
1112 set_first_syntax_error
1113 (_("invalid range in vector register list"));
1114 error = TRUE;
1115 }
1116 val_range++;
1117 }
1118 else
1119 {
1120 val_range = val;
1121 if (nb_regs == 0)
1122 typeinfo_first = typeinfo;
1123 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1124 {
1125 set_first_syntax_error
1126 (_("type mismatch in vector register list"));
1127 error = TRUE;
1128 }
1129 }
1130 if (! error)
1131 for (i = val_range; i <= val; i++)
1132 {
1133 ret_val |= i << (5 * nb_regs);
1134 nb_regs++;
1135 }
1136 in_range = 0;
1137 }
1138 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1139
1140 skip_whitespace (str);
1141 if (*str != '}')
1142 {
1143 set_first_syntax_error (_("end of vector register list not found"));
1144 error = TRUE;
1145 }
1146 str++;
1147
1148 skip_whitespace (str);
1149
1150 if (expect_index)
1151 {
1152 if (skip_past_char (&str, '['))
1153 {
1154 expressionS exp;
1155
1156 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1157 if (exp.X_op != O_constant)
1158 {
1159 set_first_syntax_error (_("constant expression required."));
1160 error = TRUE;
1161 }
1162 if (! skip_past_char (&str, ']'))
1163 error = TRUE;
1164 else
1165 typeinfo_first.index = exp.X_add_number;
1166 }
1167 else
1168 {
1169 set_first_syntax_error (_("expected index"));
1170 error = TRUE;
1171 }
1172 }
1173
1174 if (nb_regs > 4)
1175 {
1176 set_first_syntax_error (_("too many registers in vector register list"));
1177 error = TRUE;
1178 }
1179 else if (nb_regs == 0)
1180 {
1181 set_first_syntax_error (_("empty vector register list"));
1182 error = TRUE;
1183 }
1184
1185 *ccp = str;
1186 if (! error)
1187 *vectype = typeinfo_first;
1188
1189 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1190 }
1191
1192 /* Directives: register aliases. */
1193
1194 static reg_entry *
1195 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1196 {
1197 reg_entry *new;
1198 const char *name;
1199
1200 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1201 {
1202 if (new->builtin)
1203 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1204 str);
1205
1206 /* Only warn about a redefinition if it's not defined as the
1207 same register. */
1208 else if (new->number != number || new->type != type)
1209 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1210
1211 return NULL;
1212 }
1213
1214 name = xstrdup (str);
1215 new = XNEW (reg_entry);
1216
1217 new->name = name;
1218 new->number = number;
1219 new->type = type;
1220 new->builtin = FALSE;
1221
1222 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1223 abort ();
1224
1225 return new;
1226 }
1227
1228 /* Look for the .req directive. This is of the form:
1229
1230 new_register_name .req existing_register_name
1231
1232 If we find one, or if it looks sufficiently like one that we want to
1233 handle any error here, return TRUE. Otherwise return FALSE. */
1234
1235 static bfd_boolean
1236 create_register_alias (char *newname, char *p)
1237 {
1238 const reg_entry *old;
1239 char *oldname, *nbuf;
1240 size_t nlen;
1241
1242 /* The input scrubber ensures that whitespace after the mnemonic is
1243 collapsed to single spaces. */
1244 oldname = p;
1245 if (strncmp (oldname, " .req ", 6) != 0)
1246 return FALSE;
1247
1248 oldname += 6;
1249 if (*oldname == '\0')
1250 return FALSE;
1251
1252 old = hash_find (aarch64_reg_hsh, oldname);
1253 if (!old)
1254 {
1255 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1256 return TRUE;
1257 }
1258
1259 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1260 the desired alias name, and p points to its end. If not, then
1261 the desired alias name is in the global original_case_string. */
1262 #ifdef TC_CASE_SENSITIVE
1263 nlen = p - newname;
1264 #else
1265 newname = original_case_string;
1266 nlen = strlen (newname);
1267 #endif
1268
1269 nbuf = xmemdup0 (newname, nlen);
1270
1271 /* Create aliases under the new name as stated; an all-lowercase
1272 version of the new name; and an all-uppercase version of the new
1273 name. */
1274 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1275 {
1276 for (p = nbuf; *p; p++)
1277 *p = TOUPPER (*p);
1278
1279 if (strncmp (nbuf, newname, nlen))
1280 {
1281 /* If this attempt to create an additional alias fails, do not bother
1282 trying to create the all-lower case alias. We will fail and issue
1283 a second, duplicate error message. This situation arises when the
1284 programmer does something like:
1285 foo .req r0
1286 Foo .req r1
1287 The second .req creates the "Foo" alias but then fails to create
1288 the artificial FOO alias because it has already been created by the
1289 first .req. */
1290 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1291 {
1292 free (nbuf);
1293 return TRUE;
1294 }
1295 }
1296
1297 for (p = nbuf; *p; p++)
1298 *p = TOLOWER (*p);
1299
1300 if (strncmp (nbuf, newname, nlen))
1301 insert_reg_alias (nbuf, old->number, old->type);
1302 }
1303
1304 free (nbuf);
1305 return TRUE;
1306 }
1307
1308 /* Should never be called, as .req goes between the alias and the
1309 register name, not at the beginning of the line. */
1310 static void
1311 s_req (int a ATTRIBUTE_UNUSED)
1312 {
1313 as_bad (_("invalid syntax for .req directive"));
1314 }
1315
1316 /* The .unreq directive deletes an alias which was previously defined
1317 by .req. For example:
1318
1319 my_alias .req r11
1320 .unreq my_alias */
1321
1322 static void
1323 s_unreq (int a ATTRIBUTE_UNUSED)
1324 {
1325 char *name;
1326 char saved_char;
1327
1328 name = input_line_pointer;
1329
1330 while (*input_line_pointer != 0
1331 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1332 ++input_line_pointer;
1333
1334 saved_char = *input_line_pointer;
1335 *input_line_pointer = 0;
1336
1337 if (!*name)
1338 as_bad (_("invalid syntax for .unreq directive"));
1339 else
1340 {
1341 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1342
1343 if (!reg)
1344 as_bad (_("unknown register alias '%s'"), name);
1345 else if (reg->builtin)
1346 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1347 name);
1348 else
1349 {
1350 char *p;
1351 char *nbuf;
1352
1353 hash_delete (aarch64_reg_hsh, name, FALSE);
1354 free ((char *) reg->name);
1355 free (reg);
1356
1357 /* Also locate the all upper case and all lower case versions.
1358 Do not complain if we cannot find one or the other as it
1359 was probably deleted above. */
1360
1361 nbuf = strdup (name);
1362 for (p = nbuf; *p; p++)
1363 *p = TOUPPER (*p);
1364 reg = hash_find (aarch64_reg_hsh, nbuf);
1365 if (reg)
1366 {
1367 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1368 free ((char *) reg->name);
1369 free (reg);
1370 }
1371
1372 for (p = nbuf; *p; p++)
1373 *p = TOLOWER (*p);
1374 reg = hash_find (aarch64_reg_hsh, nbuf);
1375 if (reg)
1376 {
1377 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1378 free ((char *) reg->name);
1379 free (reg);
1380 }
1381
1382 free (nbuf);
1383 }
1384 }
1385
1386 *input_line_pointer = saved_char;
1387 demand_empty_rest_of_line ();
1388 }
1389
1390 /* Directives: Instruction set selection. */
1391
1392 #ifdef OBJ_ELF
1393 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1394 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1395 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1396 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1397
1398 /* Create a new mapping symbol for the transition to STATE. */
1399
1400 static void
1401 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1402 {
1403 symbolS *symbolP;
1404 const char *symname;
1405 int type;
1406
1407 switch (state)
1408 {
1409 case MAP_DATA:
1410 symname = "$d";
1411 type = BSF_NO_FLAGS;
1412 break;
1413 case MAP_INSN:
1414 symname = "$x";
1415 type = BSF_NO_FLAGS;
1416 break;
1417 default:
1418 abort ();
1419 }
1420
1421 symbolP = symbol_new (symname, now_seg, value, frag);
1422 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1423
1424 /* Save the mapping symbols for future reference. Also check that
1425 we do not place two mapping symbols at the same offset within a
1426 frag. We'll handle overlap between frags in
1427 check_mapping_symbols.
1428
1429 If .fill or other data filling directive generates zero sized data,
1430 the mapping symbol for the following code will have the same value
1431 as the one generated for the data filling directive. In this case,
1432 we replace the old symbol with the new one at the same address. */
1433 if (value == 0)
1434 {
1435 if (frag->tc_frag_data.first_map != NULL)
1436 {
1437 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1438 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1439 &symbol_lastP);
1440 }
1441 frag->tc_frag_data.first_map = symbolP;
1442 }
1443 if (frag->tc_frag_data.last_map != NULL)
1444 {
1445 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1446 S_GET_VALUE (symbolP));
1447 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1448 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1449 &symbol_lastP);
1450 }
1451 frag->tc_frag_data.last_map = symbolP;
1452 }
1453
1454 /* We must sometimes convert a region marked as code to data during
1455 code alignment, if an odd number of bytes have to be padded. The
1456 code mapping symbol is pushed to an aligned address. */
1457
1458 static void
1459 insert_data_mapping_symbol (enum mstate state,
1460 valueT value, fragS * frag, offsetT bytes)
1461 {
1462 /* If there was already a mapping symbol, remove it. */
1463 if (frag->tc_frag_data.last_map != NULL
1464 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1465 frag->fr_address + value)
1466 {
1467 symbolS *symp = frag->tc_frag_data.last_map;
1468
1469 if (value == 0)
1470 {
1471 know (frag->tc_frag_data.first_map == symp);
1472 frag->tc_frag_data.first_map = NULL;
1473 }
1474 frag->tc_frag_data.last_map = NULL;
1475 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1476 }
1477
1478 make_mapping_symbol (MAP_DATA, value, frag);
1479 make_mapping_symbol (state, value + bytes, frag);
1480 }
1481
1482 static void mapping_state_2 (enum mstate state, int max_chars);
1483
1484 /* Set the mapping state to STATE. Only call this when about to
1485 emit some STATE bytes to the file. */
1486
1487 void
1488 mapping_state (enum mstate state)
1489 {
1490 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1491
1492 if (state == MAP_INSN)
1493 /* AArch64 instructions require 4-byte alignment. When emitting
1494 instructions into any section, record the appropriate section
1495 alignment. */
1496 record_alignment (now_seg, 2);
1497
1498 if (mapstate == state)
1499 /* The mapping symbol has already been emitted.
1500 There is nothing else to do. */
1501 return;
1502
1503 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1504 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1505 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1506 evaluated later in the next else. */
1507 return;
1508 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1509 {
1510 /* Only add the symbol if the offset is > 0:
1511 if we're at the first frag, check it's size > 0;
1512 if we're not at the first frag, then for sure
1513 the offset is > 0. */
1514 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1515 const int add_symbol = (frag_now != frag_first)
1516 || (frag_now_fix () > 0);
1517
1518 if (add_symbol)
1519 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1520 }
1521 #undef TRANSITION
1522
1523 mapping_state_2 (state, 0);
1524 }
1525
1526 /* Same as mapping_state, but MAX_CHARS bytes have already been
1527 allocated. Put the mapping symbol that far back. */
1528
1529 static void
1530 mapping_state_2 (enum mstate state, int max_chars)
1531 {
1532 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1533
1534 if (!SEG_NORMAL (now_seg))
1535 return;
1536
1537 if (mapstate == state)
1538 /* The mapping symbol has already been emitted.
1539 There is nothing else to do. */
1540 return;
1541
1542 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1543 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1544 }
1545 #else
1546 #define mapping_state(x) /* nothing */
1547 #define mapping_state_2(x, y) /* nothing */
1548 #endif
1549
1550 /* Directives: sectioning and alignment. */
1551
1552 static void
1553 s_bss (int ignore ATTRIBUTE_UNUSED)
1554 {
1555 /* We don't support putting frags in the BSS segment, we fake it by
1556 marking in_bss, then looking at s_skip for clues. */
1557 subseg_set (bss_section, 0);
1558 demand_empty_rest_of_line ();
1559 mapping_state (MAP_DATA);
1560 }
1561
1562 static void
1563 s_even (int ignore ATTRIBUTE_UNUSED)
1564 {
1565 /* Never make frag if expect extra pass. */
1566 if (!need_pass_2)
1567 frag_align (1, 0, 0);
1568
1569 record_alignment (now_seg, 1);
1570
1571 demand_empty_rest_of_line ();
1572 }
1573
1574 /* Directives: Literal pools. */
1575
1576 static literal_pool *
1577 find_literal_pool (int size)
1578 {
1579 literal_pool *pool;
1580
1581 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1582 {
1583 if (pool->section == now_seg
1584 && pool->sub_section == now_subseg && pool->size == size)
1585 break;
1586 }
1587
1588 return pool;
1589 }
1590
1591 static literal_pool *
1592 find_or_make_literal_pool (int size)
1593 {
1594 /* Next literal pool ID number. */
1595 static unsigned int latest_pool_num = 1;
1596 literal_pool *pool;
1597
1598 pool = find_literal_pool (size);
1599
1600 if (pool == NULL)
1601 {
1602 /* Create a new pool. */
1603 pool = XNEW (literal_pool);
1604 if (!pool)
1605 return NULL;
1606
1607 /* Currently we always put the literal pool in the current text
1608 section. If we were generating "small" model code where we
1609 knew that all code and initialised data was within 1MB then
1610 we could output literals to mergeable, read-only data
1611 sections. */
1612
1613 pool->next_free_entry = 0;
1614 pool->section = now_seg;
1615 pool->sub_section = now_subseg;
1616 pool->size = size;
1617 pool->next = list_of_pools;
1618 pool->symbol = NULL;
1619
1620 /* Add it to the list. */
1621 list_of_pools = pool;
1622 }
1623
1624 /* New pools, and emptied pools, will have a NULL symbol. */
1625 if (pool->symbol == NULL)
1626 {
1627 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1628 (valueT) 0, &zero_address_frag);
1629 pool->id = latest_pool_num++;
1630 }
1631
1632 /* Done. */
1633 return pool;
1634 }
1635
1636 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1637 Return TRUE on success, otherwise return FALSE. */
1638 static bfd_boolean
1639 add_to_lit_pool (expressionS *exp, int size)
1640 {
1641 literal_pool *pool;
1642 unsigned int entry;
1643
1644 pool = find_or_make_literal_pool (size);
1645
1646 /* Check if this literal value is already in the pool. */
1647 for (entry = 0; entry < pool->next_free_entry; entry++)
1648 {
1649 expressionS * litexp = & pool->literals[entry].exp;
1650
1651 if ((litexp->X_op == exp->X_op)
1652 && (exp->X_op == O_constant)
1653 && (litexp->X_add_number == exp->X_add_number)
1654 && (litexp->X_unsigned == exp->X_unsigned))
1655 break;
1656
1657 if ((litexp->X_op == exp->X_op)
1658 && (exp->X_op == O_symbol)
1659 && (litexp->X_add_number == exp->X_add_number)
1660 && (litexp->X_add_symbol == exp->X_add_symbol)
1661 && (litexp->X_op_symbol == exp->X_op_symbol))
1662 break;
1663 }
1664
1665 /* Do we need to create a new entry? */
1666 if (entry == pool->next_free_entry)
1667 {
1668 if (entry >= MAX_LITERAL_POOL_SIZE)
1669 {
1670 set_syntax_error (_("literal pool overflow"));
1671 return FALSE;
1672 }
1673
1674 pool->literals[entry].exp = *exp;
1675 pool->next_free_entry += 1;
1676 if (exp->X_op == O_big)
1677 {
1678 /* PR 16688: Bignums are held in a single global array. We must
1679 copy and preserve that value now, before it is overwritten. */
1680 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1681 exp->X_add_number);
1682 memcpy (pool->literals[entry].bignum, generic_bignum,
1683 CHARS_PER_LITTLENUM * exp->X_add_number);
1684 }
1685 else
1686 pool->literals[entry].bignum = NULL;
1687 }
1688
1689 exp->X_op = O_symbol;
1690 exp->X_add_number = ((int) entry) * size;
1691 exp->X_add_symbol = pool->symbol;
1692
1693 return TRUE;
1694 }
1695
1696 /* Can't use symbol_new here, so have to create a symbol and then at
1697 a later date assign it a value. Thats what these functions do. */
1698
1699 static void
1700 symbol_locate (symbolS * symbolP,
1701 const char *name,/* It is copied, the caller can modify. */
1702 segT segment, /* Segment identifier (SEG_<something>). */
1703 valueT valu, /* Symbol value. */
1704 fragS * frag) /* Associated fragment. */
1705 {
1706 size_t name_length;
1707 char *preserved_copy_of_name;
1708
1709 name_length = strlen (name) + 1; /* +1 for \0. */
1710 obstack_grow (&notes, name, name_length);
1711 preserved_copy_of_name = obstack_finish (&notes);
1712
1713 #ifdef tc_canonicalize_symbol_name
1714 preserved_copy_of_name =
1715 tc_canonicalize_symbol_name (preserved_copy_of_name);
1716 #endif
1717
1718 S_SET_NAME (symbolP, preserved_copy_of_name);
1719
1720 S_SET_SEGMENT (symbolP, segment);
1721 S_SET_VALUE (symbolP, valu);
1722 symbol_clear_list_pointers (symbolP);
1723
1724 symbol_set_frag (symbolP, frag);
1725
1726 /* Link to end of symbol chain. */
1727 {
1728 extern int symbol_table_frozen;
1729
1730 if (symbol_table_frozen)
1731 abort ();
1732 }
1733
1734 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1735
1736 obj_symbol_new_hook (symbolP);
1737
1738 #ifdef tc_symbol_new_hook
1739 tc_symbol_new_hook (symbolP);
1740 #endif
1741
1742 #ifdef DEBUG_SYMS
1743 verify_symbol_chain (symbol_rootP, symbol_lastP);
1744 #endif /* DEBUG_SYMS */
1745 }
1746
1747
1748 static void
1749 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1750 {
1751 unsigned int entry;
1752 literal_pool *pool;
1753 char sym_name[20];
1754 int align;
1755
1756 for (align = 2; align <= 4; align++)
1757 {
1758 int size = 1 << align;
1759
1760 pool = find_literal_pool (size);
1761 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1762 continue;
1763
1764 /* Align pool as you have word accesses.
1765 Only make a frag if we have to. */
1766 if (!need_pass_2)
1767 frag_align (align, 0, 0);
1768
1769 mapping_state (MAP_DATA);
1770
1771 record_alignment (now_seg, align);
1772
1773 sprintf (sym_name, "$$lit_\002%x", pool->id);
1774
1775 symbol_locate (pool->symbol, sym_name, now_seg,
1776 (valueT) frag_now_fix (), frag_now);
1777 symbol_table_insert (pool->symbol);
1778
1779 for (entry = 0; entry < pool->next_free_entry; entry++)
1780 {
1781 expressionS * exp = & pool->literals[entry].exp;
1782
1783 if (exp->X_op == O_big)
1784 {
1785 /* PR 16688: Restore the global bignum value. */
1786 gas_assert (pool->literals[entry].bignum != NULL);
1787 memcpy (generic_bignum, pool->literals[entry].bignum,
1788 CHARS_PER_LITTLENUM * exp->X_add_number);
1789 }
1790
1791 /* First output the expression in the instruction to the pool. */
1792 emit_expr (exp, size); /* .word|.xword */
1793
1794 if (exp->X_op == O_big)
1795 {
1796 free (pool->literals[entry].bignum);
1797 pool->literals[entry].bignum = NULL;
1798 }
1799 }
1800
1801 /* Mark the pool as empty. */
1802 pool->next_free_entry = 0;
1803 pool->symbol = NULL;
1804 }
1805 }
1806
1807 #ifdef OBJ_ELF
1808 /* Forward declarations for functions below, in the MD interface
1809 section. */
1810 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1811 static struct reloc_table_entry * find_reloc_table_entry (char **);
1812
1813 /* Directives: Data. */
1814 /* N.B. the support for relocation suffix in this directive needs to be
1815 implemented properly. */
1816
1817 static void
1818 s_aarch64_elf_cons (int nbytes)
1819 {
1820 expressionS exp;
1821
1822 #ifdef md_flush_pending_output
1823 md_flush_pending_output ();
1824 #endif
1825
1826 if (is_it_end_of_statement ())
1827 {
1828 demand_empty_rest_of_line ();
1829 return;
1830 }
1831
1832 #ifdef md_cons_align
1833 md_cons_align (nbytes);
1834 #endif
1835
1836 mapping_state (MAP_DATA);
1837 do
1838 {
1839 struct reloc_table_entry *reloc;
1840
1841 expression (&exp);
1842
1843 if (exp.X_op != O_symbol)
1844 emit_expr (&exp, (unsigned int) nbytes);
1845 else
1846 {
1847 skip_past_char (&input_line_pointer, '#');
1848 if (skip_past_char (&input_line_pointer, ':'))
1849 {
1850 reloc = find_reloc_table_entry (&input_line_pointer);
1851 if (reloc == NULL)
1852 as_bad (_("unrecognized relocation suffix"));
1853 else
1854 as_bad (_("unimplemented relocation suffix"));
1855 ignore_rest_of_line ();
1856 return;
1857 }
1858 else
1859 emit_expr (&exp, (unsigned int) nbytes);
1860 }
1861 }
1862 while (*input_line_pointer++ == ',');
1863
1864 /* Put terminator back into stream. */
1865 input_line_pointer--;
1866 demand_empty_rest_of_line ();
1867 }
1868
1869 #endif /* OBJ_ELF */
1870
1871 /* Output a 32-bit word, but mark as an instruction. */
1872
1873 static void
1874 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1875 {
1876 expressionS exp;
1877
1878 #ifdef md_flush_pending_output
1879 md_flush_pending_output ();
1880 #endif
1881
1882 if (is_it_end_of_statement ())
1883 {
1884 demand_empty_rest_of_line ();
1885 return;
1886 }
1887
1888 /* Sections are assumed to start aligned. In executable section, there is no
1889 MAP_DATA symbol pending. So we only align the address during
1890 MAP_DATA --> MAP_INSN transition.
1891 For other sections, this is not guaranteed. */
1892 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1893 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1894 frag_align_code (2, 0);
1895
1896 #ifdef OBJ_ELF
1897 mapping_state (MAP_INSN);
1898 #endif
1899
1900 do
1901 {
1902 expression (&exp);
1903 if (exp.X_op != O_constant)
1904 {
1905 as_bad (_("constant expression required"));
1906 ignore_rest_of_line ();
1907 return;
1908 }
1909
1910 if (target_big_endian)
1911 {
1912 unsigned int val = exp.X_add_number;
1913 exp.X_add_number = SWAP_32 (val);
1914 }
1915 emit_expr (&exp, 4);
1916 }
1917 while (*input_line_pointer++ == ',');
1918
1919 /* Put terminator back into stream. */
1920 input_line_pointer--;
1921 demand_empty_rest_of_line ();
1922 }
1923
1924 #ifdef OBJ_ELF
1925 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1926
1927 static void
1928 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1929 {
1930 expressionS exp;
1931
1932 expression (&exp);
1933 frag_grow (4);
1934 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1935 BFD_RELOC_AARCH64_TLSDESC_ADD);
1936
1937 demand_empty_rest_of_line ();
1938 }
1939
1940 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1941
1942 static void
1943 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1944 {
1945 expressionS exp;
1946
1947 /* Since we're just labelling the code, there's no need to define a
1948 mapping symbol. */
1949 expression (&exp);
1950 /* Make sure there is enough room in this frag for the following
1951 blr. This trick only works if the blr follows immediately after
1952 the .tlsdesc directive. */
1953 frag_grow (4);
1954 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1955 BFD_RELOC_AARCH64_TLSDESC_CALL);
1956
1957 demand_empty_rest_of_line ();
1958 }
1959
1960 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
1961
1962 static void
1963 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
1964 {
1965 expressionS exp;
1966
1967 expression (&exp);
1968 frag_grow (4);
1969 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1970 BFD_RELOC_AARCH64_TLSDESC_LDR);
1971
1972 demand_empty_rest_of_line ();
1973 }
1974 #endif /* OBJ_ELF */
1975
1976 static void s_aarch64_arch (int);
1977 static void s_aarch64_cpu (int);
1978 static void s_aarch64_arch_extension (int);
1979
1980 /* This table describes all the machine specific pseudo-ops the assembler
1981 has to support. The fields are:
1982 pseudo-op name without dot
1983 function to call to execute this pseudo-op
1984 Integer arg to pass to the function. */
1985
1986 const pseudo_typeS md_pseudo_table[] = {
1987 /* Never called because '.req' does not start a line. */
1988 {"req", s_req, 0},
1989 {"unreq", s_unreq, 0},
1990 {"bss", s_bss, 0},
1991 {"even", s_even, 0},
1992 {"ltorg", s_ltorg, 0},
1993 {"pool", s_ltorg, 0},
1994 {"cpu", s_aarch64_cpu, 0},
1995 {"arch", s_aarch64_arch, 0},
1996 {"arch_extension", s_aarch64_arch_extension, 0},
1997 {"inst", s_aarch64_inst, 0},
1998 #ifdef OBJ_ELF
1999 {"tlsdescadd", s_tlsdescadd, 0},
2000 {"tlsdesccall", s_tlsdesccall, 0},
2001 {"tlsdescldr", s_tlsdescldr, 0},
2002 {"word", s_aarch64_elf_cons, 4},
2003 {"long", s_aarch64_elf_cons, 4},
2004 {"xword", s_aarch64_elf_cons, 8},
2005 {"dword", s_aarch64_elf_cons, 8},
2006 #endif
2007 {0, 0, 0}
2008 };
2009 \f
2010
2011 /* Check whether STR points to a register name followed by a comma or the
2012 end of line; REG_TYPE indicates which register types are checked
2013 against. Return TRUE if STR is such a register name; otherwise return
2014 FALSE. The function does not intend to produce any diagnostics, but since
2015 the register parser aarch64_reg_parse, which is called by this function,
2016 does produce diagnostics, we call clear_error to clear any diagnostics
2017 that may be generated by aarch64_reg_parse.
2018 Also, the function returns FALSE directly if there is any user error
2019 present at the function entry. This prevents the existing diagnostics
2020 state from being spoiled.
2021 The function currently serves parse_constant_immediate and
2022 parse_big_immediate only. */
2023 static bfd_boolean
2024 reg_name_p (char *str, aarch64_reg_type reg_type)
2025 {
2026 int reg;
2027
2028 /* Prevent the diagnostics state from being spoiled. */
2029 if (error_p ())
2030 return FALSE;
2031
2032 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2033
2034 /* Clear the parsing error that may be set by the reg parser. */
2035 clear_error ();
2036
2037 if (reg == PARSE_FAIL)
2038 return FALSE;
2039
2040 skip_whitespace (str);
2041 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2042 return TRUE;
2043
2044 return FALSE;
2045 }
2046
2047 /* Parser functions used exclusively in instruction operands. */
2048
2049 /* Parse an immediate expression which may not be constant.
2050
2051 To prevent the expression parser from pushing a register name
2052 into the symbol table as an undefined symbol, firstly a check is
2053 done to find out whether STR is a register of type REG_TYPE followed
2054 by a comma or the end of line. Return FALSE if STR is such a string. */
2055
2056 static bfd_boolean
2057 parse_immediate_expression (char **str, expressionS *exp,
2058 aarch64_reg_type reg_type)
2059 {
2060 if (reg_name_p (*str, reg_type))
2061 {
2062 set_recoverable_error (_("immediate operand required"));
2063 return FALSE;
2064 }
2065
2066 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2067
2068 if (exp->X_op == O_absent)
2069 {
2070 set_fatal_syntax_error (_("missing immediate expression"));
2071 return FALSE;
2072 }
2073
2074 return TRUE;
2075 }
2076
2077 /* Constant immediate-value read function for use in insn parsing.
2078 STR points to the beginning of the immediate (with the optional
2079 leading #); *VAL receives the value. REG_TYPE says which register
2080 names should be treated as registers rather than as symbolic immediates.
2081
2082 Return TRUE on success; otherwise return FALSE. */
2083
2084 static bfd_boolean
2085 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2086 {
2087 expressionS exp;
2088
2089 if (! parse_immediate_expression (str, &exp, reg_type))
2090 return FALSE;
2091
2092 if (exp.X_op != O_constant)
2093 {
2094 set_syntax_error (_("constant expression required"));
2095 return FALSE;
2096 }
2097
2098 *val = exp.X_add_number;
2099 return TRUE;
2100 }
2101
2102 static uint32_t
2103 encode_imm_float_bits (uint32_t imm)
2104 {
2105 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2106 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2107 }
2108
2109 /* Return TRUE if the single-precision floating-point value encoded in IMM
2110 can be expressed in the AArch64 8-bit signed floating-point format with
2111 3-bit exponent and normalized 4 bits of precision; in other words, the
2112 floating-point value must be expressable as
2113 (+/-) n / 16 * power (2, r)
2114 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2115
2116 static bfd_boolean
2117 aarch64_imm_float_p (uint32_t imm)
2118 {
2119 /* If a single-precision floating-point value has the following bit
2120 pattern, it can be expressed in the AArch64 8-bit floating-point
2121 format:
2122
2123 3 32222222 2221111111111
2124 1 09876543 21098765432109876543210
2125 n Eeeeeexx xxxx0000000000000000000
2126
2127 where n, e and each x are either 0 or 1 independently, with
2128 E == ~ e. */
2129
2130 uint32_t pattern;
2131
2132 /* Prepare the pattern for 'Eeeeee'. */
2133 if (((imm >> 30) & 0x1) == 0)
2134 pattern = 0x3e000000;
2135 else
2136 pattern = 0x40000000;
2137
2138 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2139 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2140 }
2141
2142 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2143 as an IEEE float without any loss of precision. Store the value in
2144 *FPWORD if so. */
2145
2146 static bfd_boolean
2147 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2148 {
2149 /* If a double-precision floating-point value has the following bit
2150 pattern, it can be expressed in a float:
2151
2152 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2153 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2154 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2155
2156 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2157 if Eeee_eeee != 1111_1111
2158
2159 where n, e, s and S are either 0 or 1 independently and where ~ is the
2160 inverse of E. */
2161
2162 uint32_t pattern;
2163 uint32_t high32 = imm >> 32;
2164 uint32_t low32 = imm;
2165
2166 /* Lower 29 bits need to be 0s. */
2167 if ((imm & 0x1fffffff) != 0)
2168 return FALSE;
2169
2170 /* Prepare the pattern for 'Eeeeeeeee'. */
2171 if (((high32 >> 30) & 0x1) == 0)
2172 pattern = 0x38000000;
2173 else
2174 pattern = 0x40000000;
2175
2176 /* Check E~~~. */
2177 if ((high32 & 0x78000000) != pattern)
2178 return FALSE;
2179
2180 /* Check Eeee_eeee != 1111_1111. */
2181 if ((high32 & 0x7ff00000) == 0x47f00000)
2182 return FALSE;
2183
2184 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2185 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2186 | (low32 >> 29)); /* 3 S bits. */
2187 return TRUE;
2188 }
2189
2190 /* Parse a floating-point immediate. Return TRUE on success and return the
2191 value in *IMMED in the format of IEEE754 single-precision encoding.
2192 *CCP points to the start of the string; DP_P is TRUE when the immediate
2193 is expected to be in double-precision (N.B. this only matters when
2194 hexadecimal representation is involved). REG_TYPE says which register
2195 names should be treated as registers rather than as symbolic immediates.
2196
2197 This routine accepts any IEEE float; it is up to the callers to reject
2198 invalid ones. */
2199
2200 static bfd_boolean
2201 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2202 aarch64_reg_type reg_type)
2203 {
2204 char *str = *ccp;
2205 char *fpnum;
2206 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2207 int found_fpchar = 0;
2208 int64_t val = 0;
2209 unsigned fpword = 0;
2210 bfd_boolean hex_p = FALSE;
2211
2212 skip_past_char (&str, '#');
2213
2214 fpnum = str;
2215 skip_whitespace (fpnum);
2216
2217 if (strncmp (fpnum, "0x", 2) == 0)
2218 {
2219 /* Support the hexadecimal representation of the IEEE754 encoding.
2220 Double-precision is expected when DP_P is TRUE, otherwise the
2221 representation should be in single-precision. */
2222 if (! parse_constant_immediate (&str, &val, reg_type))
2223 goto invalid_fp;
2224
2225 if (dp_p)
2226 {
2227 if (!can_convert_double_to_float (val, &fpword))
2228 goto invalid_fp;
2229 }
2230 else if ((uint64_t) val > 0xffffffff)
2231 goto invalid_fp;
2232 else
2233 fpword = val;
2234
2235 hex_p = TRUE;
2236 }
2237 else
2238 {
2239 if (reg_name_p (str, reg_type))
2240 {
2241 set_recoverable_error (_("immediate operand required"));
2242 return FALSE;
2243 }
2244
2245 /* We must not accidentally parse an integer as a floating-point number.
2246 Make sure that the value we parse is not an integer by checking for
2247 special characters '.' or 'e'. */
2248 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2249 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2250 {
2251 found_fpchar = 1;
2252 break;
2253 }
2254
2255 if (!found_fpchar)
2256 return FALSE;
2257 }
2258
2259 if (! hex_p)
2260 {
2261 int i;
2262
2263 if ((str = atof_ieee (str, 's', words)) == NULL)
2264 goto invalid_fp;
2265
2266 /* Our FP word must be 32 bits (single-precision FP). */
2267 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2268 {
2269 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2270 fpword |= words[i];
2271 }
2272 }
2273
2274 *immed = fpword;
2275 *ccp = str;
2276 return TRUE;
2277
2278 invalid_fp:
2279 set_fatal_syntax_error (_("invalid floating-point constant"));
2280 return FALSE;
2281 }
2282
2283 /* Less-generic immediate-value read function with the possibility of loading
2284 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2285 instructions.
2286
2287 To prevent the expression parser from pushing a register name into the
2288 symbol table as an undefined symbol, a check is firstly done to find
2289 out whether STR is a register of type REG_TYPE followed by a comma or
2290 the end of line. Return FALSE if STR is such a register. */
2291
2292 static bfd_boolean
2293 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2294 {
2295 char *ptr = *str;
2296
2297 if (reg_name_p (ptr, reg_type))
2298 {
2299 set_syntax_error (_("immediate operand required"));
2300 return FALSE;
2301 }
2302
2303 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2304
2305 if (inst.reloc.exp.X_op == O_constant)
2306 *imm = inst.reloc.exp.X_add_number;
2307
2308 *str = ptr;
2309
2310 return TRUE;
2311 }
2312
2313 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2314 if NEED_LIBOPCODES is non-zero, the fixup will need
2315 assistance from the libopcodes. */
2316
2317 static inline void
2318 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2319 const aarch64_opnd_info *operand,
2320 int need_libopcodes_p)
2321 {
2322 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2323 reloc->opnd = operand->type;
2324 if (need_libopcodes_p)
2325 reloc->need_libopcodes_p = 1;
2326 };
2327
2328 /* Return TRUE if the instruction needs to be fixed up later internally by
2329 the GAS; otherwise return FALSE. */
2330
2331 static inline bfd_boolean
2332 aarch64_gas_internal_fixup_p (void)
2333 {
2334 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2335 }
2336
2337 /* Assign the immediate value to the relavant field in *OPERAND if
2338 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2339 needs an internal fixup in a later stage.
2340 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2341 IMM.VALUE that may get assigned with the constant. */
2342 static inline void
2343 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2344 aarch64_opnd_info *operand,
2345 int addr_off_p,
2346 int need_libopcodes_p,
2347 int skip_p)
2348 {
2349 if (reloc->exp.X_op == O_constant)
2350 {
2351 if (addr_off_p)
2352 operand->addr.offset.imm = reloc->exp.X_add_number;
2353 else
2354 operand->imm.value = reloc->exp.X_add_number;
2355 reloc->type = BFD_RELOC_UNUSED;
2356 }
2357 else
2358 {
2359 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2360 /* Tell libopcodes to ignore this operand or not. This is helpful
2361 when one of the operands needs to be fixed up later but we need
2362 libopcodes to check the other operands. */
2363 operand->skip = skip_p;
2364 }
2365 }
2366
2367 /* Relocation modifiers. Each entry in the table contains the textual
2368 name for the relocation which may be placed before a symbol used as
2369 a load/store offset, or add immediate. It must be surrounded by a
2370 leading and trailing colon, for example:
2371
2372 ldr x0, [x1, #:rello:varsym]
2373 add x0, x1, #:rello:varsym */
2374
2375 struct reloc_table_entry
2376 {
2377 const char *name;
2378 int pc_rel;
2379 bfd_reloc_code_real_type adr_type;
2380 bfd_reloc_code_real_type adrp_type;
2381 bfd_reloc_code_real_type movw_type;
2382 bfd_reloc_code_real_type add_type;
2383 bfd_reloc_code_real_type ldst_type;
2384 bfd_reloc_code_real_type ld_literal_type;
2385 };
2386
2387 static struct reloc_table_entry reloc_table[] = {
2388 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2389 {"lo12", 0,
2390 0, /* adr_type */
2391 0,
2392 0,
2393 BFD_RELOC_AARCH64_ADD_LO12,
2394 BFD_RELOC_AARCH64_LDST_LO12,
2395 0},
2396
2397 /* Higher 21 bits of pc-relative page offset: ADRP */
2398 {"pg_hi21", 1,
2399 0, /* adr_type */
2400 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2401 0,
2402 0,
2403 0,
2404 0},
2405
2406 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2407 {"pg_hi21_nc", 1,
2408 0, /* adr_type */
2409 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2410 0,
2411 0,
2412 0,
2413 0},
2414
2415 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2416 {"abs_g0", 0,
2417 0, /* adr_type */
2418 0,
2419 BFD_RELOC_AARCH64_MOVW_G0,
2420 0,
2421 0,
2422 0},
2423
2424 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2425 {"abs_g0_s", 0,
2426 0, /* adr_type */
2427 0,
2428 BFD_RELOC_AARCH64_MOVW_G0_S,
2429 0,
2430 0,
2431 0},
2432
2433 /* Less significant bits 0-15 of address/value: MOVK, no check */
2434 {"abs_g0_nc", 0,
2435 0, /* adr_type */
2436 0,
2437 BFD_RELOC_AARCH64_MOVW_G0_NC,
2438 0,
2439 0,
2440 0},
2441
2442 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2443 {"abs_g1", 0,
2444 0, /* adr_type */
2445 0,
2446 BFD_RELOC_AARCH64_MOVW_G1,
2447 0,
2448 0,
2449 0},
2450
2451 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2452 {"abs_g1_s", 0,
2453 0, /* adr_type */
2454 0,
2455 BFD_RELOC_AARCH64_MOVW_G1_S,
2456 0,
2457 0,
2458 0},
2459
2460 /* Less significant bits 16-31 of address/value: MOVK, no check */
2461 {"abs_g1_nc", 0,
2462 0, /* adr_type */
2463 0,
2464 BFD_RELOC_AARCH64_MOVW_G1_NC,
2465 0,
2466 0,
2467 0},
2468
2469 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2470 {"abs_g2", 0,
2471 0, /* adr_type */
2472 0,
2473 BFD_RELOC_AARCH64_MOVW_G2,
2474 0,
2475 0,
2476 0},
2477
2478 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2479 {"abs_g2_s", 0,
2480 0, /* adr_type */
2481 0,
2482 BFD_RELOC_AARCH64_MOVW_G2_S,
2483 0,
2484 0,
2485 0},
2486
2487 /* Less significant bits 32-47 of address/value: MOVK, no check */
2488 {"abs_g2_nc", 0,
2489 0, /* adr_type */
2490 0,
2491 BFD_RELOC_AARCH64_MOVW_G2_NC,
2492 0,
2493 0,
2494 0},
2495
2496 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2497 {"abs_g3", 0,
2498 0, /* adr_type */
2499 0,
2500 BFD_RELOC_AARCH64_MOVW_G3,
2501 0,
2502 0,
2503 0},
2504
2505 /* Get to the page containing GOT entry for a symbol. */
2506 {"got", 1,
2507 0, /* adr_type */
2508 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2509 0,
2510 0,
2511 0,
2512 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2513
2514 /* 12 bit offset into the page containing GOT entry for that symbol. */
2515 {"got_lo12", 0,
2516 0, /* adr_type */
2517 0,
2518 0,
2519 0,
2520 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2521 0},
2522
2523 /* 0-15 bits of address/value: MOVk, no check. */
2524 {"gotoff_g0_nc", 0,
2525 0, /* adr_type */
2526 0,
2527 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2528 0,
2529 0,
2530 0},
2531
2532 /* Most significant bits 16-31 of address/value: MOVZ. */
2533 {"gotoff_g1", 0,
2534 0, /* adr_type */
2535 0,
2536 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2537 0,
2538 0,
2539 0},
2540
2541 /* 15 bit offset into the page containing GOT entry for that symbol. */
2542 {"gotoff_lo15", 0,
2543 0, /* adr_type */
2544 0,
2545 0,
2546 0,
2547 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2548 0},
2549
2550 /* Get to the page containing GOT TLS entry for a symbol */
2551 {"gottprel_g0_nc", 0,
2552 0, /* adr_type */
2553 0,
2554 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2555 0,
2556 0,
2557 0},
2558
2559 /* Get to the page containing GOT TLS entry for a symbol */
2560 {"gottprel_g1", 0,
2561 0, /* adr_type */
2562 0,
2563 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2564 0,
2565 0,
2566 0},
2567
2568 /* Get to the page containing GOT TLS entry for a symbol */
2569 {"tlsgd", 0,
2570 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2571 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2572 0,
2573 0,
2574 0,
2575 0},
2576
2577 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2578 {"tlsgd_lo12", 0,
2579 0, /* adr_type */
2580 0,
2581 0,
2582 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2583 0,
2584 0},
2585
2586 /* Lower 16 bits address/value: MOVk. */
2587 {"tlsgd_g0_nc", 0,
2588 0, /* adr_type */
2589 0,
2590 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2591 0,
2592 0,
2593 0},
2594
2595 /* Most significant bits 16-31 of address/value: MOVZ. */
2596 {"tlsgd_g1", 0,
2597 0, /* adr_type */
2598 0,
2599 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2600 0,
2601 0,
2602 0},
2603
2604 /* Get to the page containing GOT TLS entry for a symbol */
2605 {"tlsdesc", 0,
2606 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2607 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2608 0,
2609 0,
2610 0,
2611 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2612
2613 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2614 {"tlsdesc_lo12", 0,
2615 0, /* adr_type */
2616 0,
2617 0,
2618 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2619 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2620 0},
2621
2622 /* Get to the page containing GOT TLS entry for a symbol.
2623 The same as GD, we allocate two consecutive GOT slots
2624 for module index and module offset, the only difference
2625 with GD is the module offset should be intialized to
2626 zero without any outstanding runtime relocation. */
2627 {"tlsldm", 0,
2628 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2629 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2630 0,
2631 0,
2632 0,
2633 0},
2634
2635 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2636 {"tlsldm_lo12_nc", 0,
2637 0, /* adr_type */
2638 0,
2639 0,
2640 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2641 0,
2642 0},
2643
2644 /* 12 bit offset into the module TLS base address. */
2645 {"dtprel_lo12", 0,
2646 0, /* adr_type */
2647 0,
2648 0,
2649 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2650 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2651 0},
2652
2653 /* Same as dtprel_lo12, no overflow check. */
2654 {"dtprel_lo12_nc", 0,
2655 0, /* adr_type */
2656 0,
2657 0,
2658 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2659 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2660 0},
2661
2662 /* bits[23:12] of offset to the module TLS base address. */
2663 {"dtprel_hi12", 0,
2664 0, /* adr_type */
2665 0,
2666 0,
2667 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2668 0,
2669 0},
2670
2671 /* bits[15:0] of offset to the module TLS base address. */
2672 {"dtprel_g0", 0,
2673 0, /* adr_type */
2674 0,
2675 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2676 0,
2677 0,
2678 0},
2679
2680 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2681 {"dtprel_g0_nc", 0,
2682 0, /* adr_type */
2683 0,
2684 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2685 0,
2686 0,
2687 0},
2688
2689 /* bits[31:16] of offset to the module TLS base address. */
2690 {"dtprel_g1", 0,
2691 0, /* adr_type */
2692 0,
2693 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2694 0,
2695 0,
2696 0},
2697
2698 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2699 {"dtprel_g1_nc", 0,
2700 0, /* adr_type */
2701 0,
2702 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2703 0,
2704 0,
2705 0},
2706
2707 /* bits[47:32] of offset to the module TLS base address. */
2708 {"dtprel_g2", 0,
2709 0, /* adr_type */
2710 0,
2711 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2712 0,
2713 0,
2714 0},
2715
2716 /* Lower 16 bit offset into GOT entry for a symbol */
2717 {"tlsdesc_off_g0_nc", 0,
2718 0, /* adr_type */
2719 0,
2720 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2721 0,
2722 0,
2723 0},
2724
2725 /* Higher 16 bit offset into GOT entry for a symbol */
2726 {"tlsdesc_off_g1", 0,
2727 0, /* adr_type */
2728 0,
2729 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2730 0,
2731 0,
2732 0},
2733
2734 /* Get to the page containing GOT TLS entry for a symbol */
2735 {"gottprel", 0,
2736 0, /* adr_type */
2737 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2738 0,
2739 0,
2740 0,
2741 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2742
2743 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2744 {"gottprel_lo12", 0,
2745 0, /* adr_type */
2746 0,
2747 0,
2748 0,
2749 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2750 0},
2751
2752 /* Get tp offset for a symbol. */
2753 {"tprel", 0,
2754 0, /* adr_type */
2755 0,
2756 0,
2757 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2758 0,
2759 0},
2760
2761 /* Get tp offset for a symbol. */
2762 {"tprel_lo12", 0,
2763 0, /* adr_type */
2764 0,
2765 0,
2766 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2767 0,
2768 0},
2769
2770 /* Get tp offset for a symbol. */
2771 {"tprel_hi12", 0,
2772 0, /* adr_type */
2773 0,
2774 0,
2775 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2776 0,
2777 0},
2778
2779 /* Get tp offset for a symbol. */
2780 {"tprel_lo12_nc", 0,
2781 0, /* adr_type */
2782 0,
2783 0,
2784 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2785 0,
2786 0},
2787
2788 /* Most significant bits 32-47 of address/value: MOVZ. */
2789 {"tprel_g2", 0,
2790 0, /* adr_type */
2791 0,
2792 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2793 0,
2794 0,
2795 0},
2796
2797 /* Most significant bits 16-31 of address/value: MOVZ. */
2798 {"tprel_g1", 0,
2799 0, /* adr_type */
2800 0,
2801 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2802 0,
2803 0,
2804 0},
2805
2806 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2807 {"tprel_g1_nc", 0,
2808 0, /* adr_type */
2809 0,
2810 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2811 0,
2812 0,
2813 0},
2814
2815 /* Most significant bits 0-15 of address/value: MOVZ. */
2816 {"tprel_g0", 0,
2817 0, /* adr_type */
2818 0,
2819 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2820 0,
2821 0,
2822 0},
2823
2824 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2825 {"tprel_g0_nc", 0,
2826 0, /* adr_type */
2827 0,
2828 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2829 0,
2830 0,
2831 0},
2832
2833 /* 15bit offset from got entry to base address of GOT table. */
2834 {"gotpage_lo15", 0,
2835 0,
2836 0,
2837 0,
2838 0,
2839 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2840 0},
2841
2842 /* 14bit offset from got entry to base address of GOT table. */
2843 {"gotpage_lo14", 0,
2844 0,
2845 0,
2846 0,
2847 0,
2848 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2849 0},
2850 };
2851
2852 /* Given the address of a pointer pointing to the textual name of a
2853 relocation as may appear in assembler source, attempt to find its
2854 details in reloc_table. The pointer will be updated to the character
2855 after the trailing colon. On failure, NULL will be returned;
2856 otherwise return the reloc_table_entry. */
2857
2858 static struct reloc_table_entry *
2859 find_reloc_table_entry (char **str)
2860 {
2861 unsigned int i;
2862 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2863 {
2864 int length = strlen (reloc_table[i].name);
2865
2866 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2867 && (*str)[length] == ':')
2868 {
2869 *str += (length + 1);
2870 return &reloc_table[i];
2871 }
2872 }
2873
2874 return NULL;
2875 }
2876
2877 /* Mode argument to parse_shift and parser_shifter_operand. */
2878 enum parse_shift_mode
2879 {
2880 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2881 "#imm{,lsl #n}" */
2882 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2883 "#imm" */
2884 SHIFTED_LSL, /* bare "lsl #n" */
2885 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2886 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2887 };
2888
2889 /* Parse a <shift> operator on an AArch64 data processing instruction.
2890 Return TRUE on success; otherwise return FALSE. */
2891 static bfd_boolean
2892 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2893 {
2894 const struct aarch64_name_value_pair *shift_op;
2895 enum aarch64_modifier_kind kind;
2896 expressionS exp;
2897 int exp_has_prefix;
2898 char *s = *str;
2899 char *p = s;
2900
2901 for (p = *str; ISALPHA (*p); p++)
2902 ;
2903
2904 if (p == *str)
2905 {
2906 set_syntax_error (_("shift expression expected"));
2907 return FALSE;
2908 }
2909
2910 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2911
2912 if (shift_op == NULL)
2913 {
2914 set_syntax_error (_("shift operator expected"));
2915 return FALSE;
2916 }
2917
2918 kind = aarch64_get_operand_modifier (shift_op);
2919
2920 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2921 {
2922 set_syntax_error (_("invalid use of 'MSL'"));
2923 return FALSE;
2924 }
2925
2926 switch (mode)
2927 {
2928 case SHIFTED_LOGIC_IMM:
2929 if (aarch64_extend_operator_p (kind) == TRUE)
2930 {
2931 set_syntax_error (_("extending shift is not permitted"));
2932 return FALSE;
2933 }
2934 break;
2935
2936 case SHIFTED_ARITH_IMM:
2937 if (kind == AARCH64_MOD_ROR)
2938 {
2939 set_syntax_error (_("'ROR' shift is not permitted"));
2940 return FALSE;
2941 }
2942 break;
2943
2944 case SHIFTED_LSL:
2945 if (kind != AARCH64_MOD_LSL)
2946 {
2947 set_syntax_error (_("only 'LSL' shift is permitted"));
2948 return FALSE;
2949 }
2950 break;
2951
2952 case SHIFTED_REG_OFFSET:
2953 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2954 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2955 {
2956 set_fatal_syntax_error
2957 (_("invalid shift for the register offset addressing mode"));
2958 return FALSE;
2959 }
2960 break;
2961
2962 case SHIFTED_LSL_MSL:
2963 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2964 {
2965 set_syntax_error (_("invalid shift operator"));
2966 return FALSE;
2967 }
2968 break;
2969
2970 default:
2971 abort ();
2972 }
2973
2974 /* Whitespace can appear here if the next thing is a bare digit. */
2975 skip_whitespace (p);
2976
2977 /* Parse shift amount. */
2978 exp_has_prefix = 0;
2979 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2980 exp.X_op = O_absent;
2981 else
2982 {
2983 if (is_immediate_prefix (*p))
2984 {
2985 p++;
2986 exp_has_prefix = 1;
2987 }
2988 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2989 }
2990 if (exp.X_op == O_absent)
2991 {
2992 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2993 {
2994 set_syntax_error (_("missing shift amount"));
2995 return FALSE;
2996 }
2997 operand->shifter.amount = 0;
2998 }
2999 else if (exp.X_op != O_constant)
3000 {
3001 set_syntax_error (_("constant shift amount required"));
3002 return FALSE;
3003 }
3004 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
3005 {
3006 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3007 return FALSE;
3008 }
3009 else
3010 {
3011 operand->shifter.amount = exp.X_add_number;
3012 operand->shifter.amount_present = 1;
3013 }
3014
3015 operand->shifter.operator_present = 1;
3016 operand->shifter.kind = kind;
3017
3018 *str = p;
3019 return TRUE;
3020 }
3021
3022 /* Parse a <shifter_operand> for a data processing instruction:
3023
3024 #<immediate>
3025 #<immediate>, LSL #imm
3026
3027 Validation of immediate operands is deferred to md_apply_fix.
3028
3029 Return TRUE on success; otherwise return FALSE. */
3030
3031 static bfd_boolean
3032 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3033 enum parse_shift_mode mode)
3034 {
3035 char *p;
3036
3037 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3038 return FALSE;
3039
3040 p = *str;
3041
3042 /* Accept an immediate expression. */
3043 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3044 return FALSE;
3045
3046 /* Accept optional LSL for arithmetic immediate values. */
3047 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3048 if (! parse_shift (&p, operand, SHIFTED_LSL))
3049 return FALSE;
3050
3051 /* Not accept any shifter for logical immediate values. */
3052 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3053 && parse_shift (&p, operand, mode))
3054 {
3055 set_syntax_error (_("unexpected shift operator"));
3056 return FALSE;
3057 }
3058
3059 *str = p;
3060 return TRUE;
3061 }
3062
3063 /* Parse a <shifter_operand> for a data processing instruction:
3064
3065 <Rm>
3066 <Rm>, <shift>
3067 #<immediate>
3068 #<immediate>, LSL #imm
3069
3070 where <shift> is handled by parse_shift above, and the last two
3071 cases are handled by the function above.
3072
3073 Validation of immediate operands is deferred to md_apply_fix.
3074
3075 Return TRUE on success; otherwise return FALSE. */
3076
3077 static bfd_boolean
3078 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3079 enum parse_shift_mode mode)
3080 {
3081 const reg_entry *reg;
3082 aarch64_opnd_qualifier_t qualifier;
3083 enum aarch64_operand_class opd_class
3084 = aarch64_get_operand_class (operand->type);
3085
3086 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3087 if (reg)
3088 {
3089 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3090 {
3091 set_syntax_error (_("unexpected register in the immediate operand"));
3092 return FALSE;
3093 }
3094
3095 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3096 {
3097 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3098 return FALSE;
3099 }
3100
3101 operand->reg.regno = reg->number;
3102 operand->qualifier = qualifier;
3103
3104 /* Accept optional shift operation on register. */
3105 if (! skip_past_comma (str))
3106 return TRUE;
3107
3108 if (! parse_shift (str, operand, mode))
3109 return FALSE;
3110
3111 return TRUE;
3112 }
3113 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3114 {
3115 set_syntax_error
3116 (_("integer register expected in the extended/shifted operand "
3117 "register"));
3118 return FALSE;
3119 }
3120
3121 /* We have a shifted immediate variable. */
3122 return parse_shifter_operand_imm (str, operand, mode);
3123 }
3124
3125 /* Return TRUE on success; return FALSE otherwise. */
3126
3127 static bfd_boolean
3128 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3129 enum parse_shift_mode mode)
3130 {
3131 char *p = *str;
3132
3133 /* Determine if we have the sequence of characters #: or just :
3134 coming next. If we do, then we check for a :rello: relocation
3135 modifier. If we don't, punt the whole lot to
3136 parse_shifter_operand. */
3137
3138 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3139 {
3140 struct reloc_table_entry *entry;
3141
3142 if (p[0] == '#')
3143 p += 2;
3144 else
3145 p++;
3146 *str = p;
3147
3148 /* Try to parse a relocation. Anything else is an error. */
3149 if (!(entry = find_reloc_table_entry (str)))
3150 {
3151 set_syntax_error (_("unknown relocation modifier"));
3152 return FALSE;
3153 }
3154
3155 if (entry->add_type == 0)
3156 {
3157 set_syntax_error
3158 (_("this relocation modifier is not allowed on this instruction"));
3159 return FALSE;
3160 }
3161
3162 /* Save str before we decompose it. */
3163 p = *str;
3164
3165 /* Next, we parse the expression. */
3166 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3167 return FALSE;
3168
3169 /* Record the relocation type (use the ADD variant here). */
3170 inst.reloc.type = entry->add_type;
3171 inst.reloc.pc_rel = entry->pc_rel;
3172
3173 /* If str is empty, we've reached the end, stop here. */
3174 if (**str == '\0')
3175 return TRUE;
3176
3177 /* Otherwise, we have a shifted reloc modifier, so rewind to
3178 recover the variable name and continue parsing for the shifter. */
3179 *str = p;
3180 return parse_shifter_operand_imm (str, operand, mode);
3181 }
3182
3183 return parse_shifter_operand (str, operand, mode);
3184 }
3185
3186 /* Parse all forms of an address expression. Information is written
3187 to *OPERAND and/or inst.reloc.
3188
3189 The A64 instruction set has the following addressing modes:
3190
3191 Offset
3192 [base] // in SIMD ld/st structure
3193 [base{,#0}] // in ld/st exclusive
3194 [base{,#imm}]
3195 [base,Xm{,LSL #imm}]
3196 [base,Xm,SXTX {#imm}]
3197 [base,Wm,(S|U)XTW {#imm}]
3198 Pre-indexed
3199 [base,#imm]!
3200 Post-indexed
3201 [base],#imm
3202 [base],Xm // in SIMD ld/st structure
3203 PC-relative (literal)
3204 label
3205 =immediate
3206
3207 (As a convenience, the notation "=immediate" is permitted in conjunction
3208 with the pc-relative literal load instructions to automatically place an
3209 immediate value or symbolic address in a nearby literal pool and generate
3210 a hidden label which references it.)
3211
3212 Upon a successful parsing, the address structure in *OPERAND will be
3213 filled in the following way:
3214
3215 .base_regno = <base>
3216 .offset.is_reg // 1 if the offset is a register
3217 .offset.imm = <imm>
3218 .offset.regno = <Rm>
3219
3220 For different addressing modes defined in the A64 ISA:
3221
3222 Offset
3223 .pcrel=0; .preind=1; .postind=0; .writeback=0
3224 Pre-indexed
3225 .pcrel=0; .preind=1; .postind=0; .writeback=1
3226 Post-indexed
3227 .pcrel=0; .preind=0; .postind=1; .writeback=1
3228 PC-relative (literal)
3229 .pcrel=1; .preind=1; .postind=0; .writeback=0
3230
3231 The shift/extension information, if any, will be stored in .shifter.
3232
3233 It is the caller's responsibility to check for addressing modes not
3234 supported by the instruction, and to set inst.reloc.type. */
3235
3236 static bfd_boolean
3237 parse_address_main (char **str, aarch64_opnd_info *operand)
3238 {
3239 char *p = *str;
3240 const reg_entry *reg;
3241 aarch64_opnd_qualifier_t base_qualifier;
3242 aarch64_opnd_qualifier_t offset_qualifier;
3243 expressionS *exp = &inst.reloc.exp;
3244
3245 if (! skip_past_char (&p, '['))
3246 {
3247 /* =immediate or label. */
3248 operand->addr.pcrel = 1;
3249 operand->addr.preind = 1;
3250
3251 /* #:<reloc_op>:<symbol> */
3252 skip_past_char (&p, '#');
3253 if (skip_past_char (&p, ':'))
3254 {
3255 bfd_reloc_code_real_type ty;
3256 struct reloc_table_entry *entry;
3257
3258 /* Try to parse a relocation modifier. Anything else is
3259 an error. */
3260 entry = find_reloc_table_entry (&p);
3261 if (! entry)
3262 {
3263 set_syntax_error (_("unknown relocation modifier"));
3264 return FALSE;
3265 }
3266
3267 switch (operand->type)
3268 {
3269 case AARCH64_OPND_ADDR_PCREL21:
3270 /* adr */
3271 ty = entry->adr_type;
3272 break;
3273
3274 default:
3275 ty = entry->ld_literal_type;
3276 break;
3277 }
3278
3279 if (ty == 0)
3280 {
3281 set_syntax_error
3282 (_("this relocation modifier is not allowed on this "
3283 "instruction"));
3284 return FALSE;
3285 }
3286
3287 /* #:<reloc_op>: */
3288 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3289 {
3290 set_syntax_error (_("invalid relocation expression"));
3291 return FALSE;
3292 }
3293
3294 /* #:<reloc_op>:<expr> */
3295 /* Record the relocation type. */
3296 inst.reloc.type = ty;
3297 inst.reloc.pc_rel = entry->pc_rel;
3298 }
3299 else
3300 {
3301
3302 if (skip_past_char (&p, '='))
3303 /* =immediate; need to generate the literal in the literal pool. */
3304 inst.gen_lit_pool = 1;
3305
3306 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3307 {
3308 set_syntax_error (_("invalid address"));
3309 return FALSE;
3310 }
3311 }
3312
3313 *str = p;
3314 return TRUE;
3315 }
3316
3317 /* [ */
3318
3319 reg = aarch64_reg_parse_32_64 (&p, &base_qualifier);
3320 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R64_SP))
3321 {
3322 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R64_SP)));
3323 return FALSE;
3324 }
3325 operand->addr.base_regno = reg->number;
3326
3327 /* [Xn */
3328 if (skip_past_comma (&p))
3329 {
3330 /* [Xn, */
3331 operand->addr.preind = 1;
3332
3333 reg = aarch64_reg_parse_32_64 (&p, &offset_qualifier);
3334 if (reg)
3335 {
3336 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3337 {
3338 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3339 return FALSE;
3340 }
3341
3342 /* [Xn,Rm */
3343 operand->addr.offset.regno = reg->number;
3344 operand->addr.offset.is_reg = 1;
3345 /* Shifted index. */
3346 if (skip_past_comma (&p))
3347 {
3348 /* [Xn,Rm, */
3349 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3350 /* Use the diagnostics set in parse_shift, so not set new
3351 error message here. */
3352 return FALSE;
3353 }
3354 /* We only accept:
3355 [base,Xm{,LSL #imm}]
3356 [base,Xm,SXTX {#imm}]
3357 [base,Wm,(S|U)XTW {#imm}] */
3358 if (operand->shifter.kind == AARCH64_MOD_NONE
3359 || operand->shifter.kind == AARCH64_MOD_LSL
3360 || operand->shifter.kind == AARCH64_MOD_SXTX)
3361 {
3362 if (offset_qualifier == AARCH64_OPND_QLF_W)
3363 {
3364 set_syntax_error (_("invalid use of 32-bit register offset"));
3365 return FALSE;
3366 }
3367 }
3368 else if (offset_qualifier == AARCH64_OPND_QLF_X)
3369 {
3370 set_syntax_error (_("invalid use of 64-bit register offset"));
3371 return FALSE;
3372 }
3373 }
3374 else
3375 {
3376 /* [Xn,#:<reloc_op>:<symbol> */
3377 skip_past_char (&p, '#');
3378 if (skip_past_char (&p, ':'))
3379 {
3380 struct reloc_table_entry *entry;
3381
3382 /* Try to parse a relocation modifier. Anything else is
3383 an error. */
3384 if (!(entry = find_reloc_table_entry (&p)))
3385 {
3386 set_syntax_error (_("unknown relocation modifier"));
3387 return FALSE;
3388 }
3389
3390 if (entry->ldst_type == 0)
3391 {
3392 set_syntax_error
3393 (_("this relocation modifier is not allowed on this "
3394 "instruction"));
3395 return FALSE;
3396 }
3397
3398 /* [Xn,#:<reloc_op>: */
3399 /* We now have the group relocation table entry corresponding to
3400 the name in the assembler source. Next, we parse the
3401 expression. */
3402 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3403 {
3404 set_syntax_error (_("invalid relocation expression"));
3405 return FALSE;
3406 }
3407
3408 /* [Xn,#:<reloc_op>:<expr> */
3409 /* Record the load/store relocation type. */
3410 inst.reloc.type = entry->ldst_type;
3411 inst.reloc.pc_rel = entry->pc_rel;
3412 }
3413 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3414 {
3415 set_syntax_error (_("invalid expression in the address"));
3416 return FALSE;
3417 }
3418 /* [Xn,<expr> */
3419 }
3420 }
3421
3422 if (! skip_past_char (&p, ']'))
3423 {
3424 set_syntax_error (_("']' expected"));
3425 return FALSE;
3426 }
3427
3428 if (skip_past_char (&p, '!'))
3429 {
3430 if (operand->addr.preind && operand->addr.offset.is_reg)
3431 {
3432 set_syntax_error (_("register offset not allowed in pre-indexed "
3433 "addressing mode"));
3434 return FALSE;
3435 }
3436 /* [Xn]! */
3437 operand->addr.writeback = 1;
3438 }
3439 else if (skip_past_comma (&p))
3440 {
3441 /* [Xn], */
3442 operand->addr.postind = 1;
3443 operand->addr.writeback = 1;
3444
3445 if (operand->addr.preind)
3446 {
3447 set_syntax_error (_("cannot combine pre- and post-indexing"));
3448 return FALSE;
3449 }
3450
3451 reg = aarch64_reg_parse_32_64 (&p, &offset_qualifier);
3452 if (reg)
3453 {
3454 /* [Xn],Xm */
3455 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3456 {
3457 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3458 return FALSE;
3459 }
3460
3461 operand->addr.offset.regno = reg->number;
3462 operand->addr.offset.is_reg = 1;
3463 }
3464 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3465 {
3466 /* [Xn],#expr */
3467 set_syntax_error (_("invalid expression in the address"));
3468 return FALSE;
3469 }
3470 }
3471
3472 /* If at this point neither .preind nor .postind is set, we have a
3473 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3474 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3475 {
3476 if (operand->addr.writeback)
3477 {
3478 /* Reject [Rn]! */
3479 set_syntax_error (_("missing offset in the pre-indexed address"));
3480 return FALSE;
3481 }
3482 operand->addr.preind = 1;
3483 inst.reloc.exp.X_op = O_constant;
3484 inst.reloc.exp.X_add_number = 0;
3485 }
3486
3487 *str = p;
3488 return TRUE;
3489 }
3490
3491 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3492 on success. */
3493 static bfd_boolean
3494 parse_address (char **str, aarch64_opnd_info *operand)
3495 {
3496 return parse_address_main (str, operand);
3497 }
3498
3499 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3500 Return TRUE on success; otherwise return FALSE. */
3501 static bfd_boolean
3502 parse_half (char **str, int *internal_fixup_p)
3503 {
3504 char *p = *str;
3505
3506 skip_past_char (&p, '#');
3507
3508 gas_assert (internal_fixup_p);
3509 *internal_fixup_p = 0;
3510
3511 if (*p == ':')
3512 {
3513 struct reloc_table_entry *entry;
3514
3515 /* Try to parse a relocation. Anything else is an error. */
3516 ++p;
3517 if (!(entry = find_reloc_table_entry (&p)))
3518 {
3519 set_syntax_error (_("unknown relocation modifier"));
3520 return FALSE;
3521 }
3522
3523 if (entry->movw_type == 0)
3524 {
3525 set_syntax_error
3526 (_("this relocation modifier is not allowed on this instruction"));
3527 return FALSE;
3528 }
3529
3530 inst.reloc.type = entry->movw_type;
3531 }
3532 else
3533 *internal_fixup_p = 1;
3534
3535 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3536 return FALSE;
3537
3538 *str = p;
3539 return TRUE;
3540 }
3541
3542 /* Parse an operand for an ADRP instruction:
3543 ADRP <Xd>, <label>
3544 Return TRUE on success; otherwise return FALSE. */
3545
3546 static bfd_boolean
3547 parse_adrp (char **str)
3548 {
3549 char *p;
3550
3551 p = *str;
3552 if (*p == ':')
3553 {
3554 struct reloc_table_entry *entry;
3555
3556 /* Try to parse a relocation. Anything else is an error. */
3557 ++p;
3558 if (!(entry = find_reloc_table_entry (&p)))
3559 {
3560 set_syntax_error (_("unknown relocation modifier"));
3561 return FALSE;
3562 }
3563
3564 if (entry->adrp_type == 0)
3565 {
3566 set_syntax_error
3567 (_("this relocation modifier is not allowed on this instruction"));
3568 return FALSE;
3569 }
3570
3571 inst.reloc.type = entry->adrp_type;
3572 }
3573 else
3574 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3575
3576 inst.reloc.pc_rel = 1;
3577
3578 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3579 return FALSE;
3580
3581 *str = p;
3582 return TRUE;
3583 }
3584
3585 /* Miscellaneous. */
3586
3587 /* Parse an option for a preload instruction. Returns the encoding for the
3588 option, or PARSE_FAIL. */
3589
3590 static int
3591 parse_pldop (char **str)
3592 {
3593 char *p, *q;
3594 const struct aarch64_name_value_pair *o;
3595
3596 p = q = *str;
3597 while (ISALNUM (*q))
3598 q++;
3599
3600 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3601 if (!o)
3602 return PARSE_FAIL;
3603
3604 *str = q;
3605 return o->value;
3606 }
3607
3608 /* Parse an option for a barrier instruction. Returns the encoding for the
3609 option, or PARSE_FAIL. */
3610
3611 static int
3612 parse_barrier (char **str)
3613 {
3614 char *p, *q;
3615 const asm_barrier_opt *o;
3616
3617 p = q = *str;
3618 while (ISALPHA (*q))
3619 q++;
3620
3621 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3622 if (!o)
3623 return PARSE_FAIL;
3624
3625 *str = q;
3626 return o->value;
3627 }
3628
3629 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3630 return 0 if successful. Otherwise return PARSE_FAIL. */
3631
3632 static int
3633 parse_barrier_psb (char **str,
3634 const struct aarch64_name_value_pair ** hint_opt)
3635 {
3636 char *p, *q;
3637 const struct aarch64_name_value_pair *o;
3638
3639 p = q = *str;
3640 while (ISALPHA (*q))
3641 q++;
3642
3643 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3644 if (!o)
3645 {
3646 set_fatal_syntax_error
3647 ( _("unknown or missing option to PSB"));
3648 return PARSE_FAIL;
3649 }
3650
3651 if (o->value != 0x11)
3652 {
3653 /* PSB only accepts option name 'CSYNC'. */
3654 set_syntax_error
3655 (_("the specified option is not accepted for PSB"));
3656 return PARSE_FAIL;
3657 }
3658
3659 *str = q;
3660 *hint_opt = o;
3661 return 0;
3662 }
3663
3664 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3665 Returns the encoding for the option, or PARSE_FAIL.
3666
3667 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3668 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3669
3670 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3671 field, otherwise as a system register.
3672 */
3673
3674 static int
3675 parse_sys_reg (char **str, struct hash_control *sys_regs,
3676 int imple_defined_p, int pstatefield_p)
3677 {
3678 char *p, *q;
3679 char buf[32];
3680 const aarch64_sys_reg *o;
3681 int value;
3682
3683 p = buf;
3684 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3685 if (p < buf + 31)
3686 *p++ = TOLOWER (*q);
3687 *p = '\0';
3688 /* Assert that BUF be large enough. */
3689 gas_assert (p - buf == q - *str);
3690
3691 o = hash_find (sys_regs, buf);
3692 if (!o)
3693 {
3694 if (!imple_defined_p)
3695 return PARSE_FAIL;
3696 else
3697 {
3698 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3699 unsigned int op0, op1, cn, cm, op2;
3700
3701 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3702 != 5)
3703 return PARSE_FAIL;
3704 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3705 return PARSE_FAIL;
3706 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3707 }
3708 }
3709 else
3710 {
3711 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3712 as_bad (_("selected processor does not support PSTATE field "
3713 "name '%s'"), buf);
3714 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3715 as_bad (_("selected processor does not support system register "
3716 "name '%s'"), buf);
3717 if (aarch64_sys_reg_deprecated_p (o))
3718 as_warn (_("system register name '%s' is deprecated and may be "
3719 "removed in a future release"), buf);
3720 value = o->value;
3721 }
3722
3723 *str = q;
3724 return value;
3725 }
3726
3727 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3728 for the option, or NULL. */
3729
3730 static const aarch64_sys_ins_reg *
3731 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3732 {
3733 char *p, *q;
3734 char buf[32];
3735 const aarch64_sys_ins_reg *o;
3736
3737 p = buf;
3738 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3739 if (p < buf + 31)
3740 *p++ = TOLOWER (*q);
3741 *p = '\0';
3742
3743 o = hash_find (sys_ins_regs, buf);
3744 if (!o)
3745 return NULL;
3746
3747 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3748 as_bad (_("selected processor does not support system register "
3749 "name '%s'"), buf);
3750
3751 *str = q;
3752 return o;
3753 }
3754 \f
3755 #define po_char_or_fail(chr) do { \
3756 if (! skip_past_char (&str, chr)) \
3757 goto failure; \
3758 } while (0)
3759
3760 #define po_reg_or_fail(regtype) do { \
3761 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3762 if (val == PARSE_FAIL) \
3763 { \
3764 set_default_error (); \
3765 goto failure; \
3766 } \
3767 } while (0)
3768
3769 #define po_int_reg_or_fail(reg_type) do { \
3770 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
3771 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
3772 { \
3773 set_default_error (); \
3774 goto failure; \
3775 } \
3776 info->reg.regno = reg->number; \
3777 info->qualifier = qualifier; \
3778 } while (0)
3779
3780 #define po_imm_nc_or_fail() do { \
3781 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3782 goto failure; \
3783 } while (0)
3784
3785 #define po_imm_or_fail(min, max) do { \
3786 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3787 goto failure; \
3788 if (val < min || val > max) \
3789 { \
3790 set_fatal_syntax_error (_("immediate value out of range "\
3791 #min " to "#max)); \
3792 goto failure; \
3793 } \
3794 } while (0)
3795
3796 #define po_misc_or_fail(expr) do { \
3797 if (!expr) \
3798 goto failure; \
3799 } while (0)
3800 \f
3801 /* encode the 12-bit imm field of Add/sub immediate */
3802 static inline uint32_t
3803 encode_addsub_imm (uint32_t imm)
3804 {
3805 return imm << 10;
3806 }
3807
3808 /* encode the shift amount field of Add/sub immediate */
3809 static inline uint32_t
3810 encode_addsub_imm_shift_amount (uint32_t cnt)
3811 {
3812 return cnt << 22;
3813 }
3814
3815
3816 /* encode the imm field of Adr instruction */
3817 static inline uint32_t
3818 encode_adr_imm (uint32_t imm)
3819 {
3820 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3821 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3822 }
3823
3824 /* encode the immediate field of Move wide immediate */
3825 static inline uint32_t
3826 encode_movw_imm (uint32_t imm)
3827 {
3828 return imm << 5;
3829 }
3830
3831 /* encode the 26-bit offset of unconditional branch */
3832 static inline uint32_t
3833 encode_branch_ofs_26 (uint32_t ofs)
3834 {
3835 return ofs & ((1 << 26) - 1);
3836 }
3837
3838 /* encode the 19-bit offset of conditional branch and compare & branch */
3839 static inline uint32_t
3840 encode_cond_branch_ofs_19 (uint32_t ofs)
3841 {
3842 return (ofs & ((1 << 19) - 1)) << 5;
3843 }
3844
3845 /* encode the 19-bit offset of ld literal */
3846 static inline uint32_t
3847 encode_ld_lit_ofs_19 (uint32_t ofs)
3848 {
3849 return (ofs & ((1 << 19) - 1)) << 5;
3850 }
3851
3852 /* Encode the 14-bit offset of test & branch. */
3853 static inline uint32_t
3854 encode_tst_branch_ofs_14 (uint32_t ofs)
3855 {
3856 return (ofs & ((1 << 14) - 1)) << 5;
3857 }
3858
3859 /* Encode the 16-bit imm field of svc/hvc/smc. */
3860 static inline uint32_t
3861 encode_svc_imm (uint32_t imm)
3862 {
3863 return imm << 5;
3864 }
3865
3866 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3867 static inline uint32_t
3868 reencode_addsub_switch_add_sub (uint32_t opcode)
3869 {
3870 return opcode ^ (1 << 30);
3871 }
3872
3873 static inline uint32_t
3874 reencode_movzn_to_movz (uint32_t opcode)
3875 {
3876 return opcode | (1 << 30);
3877 }
3878
3879 static inline uint32_t
3880 reencode_movzn_to_movn (uint32_t opcode)
3881 {
3882 return opcode & ~(1 << 30);
3883 }
3884
3885 /* Overall per-instruction processing. */
3886
3887 /* We need to be able to fix up arbitrary expressions in some statements.
3888 This is so that we can handle symbols that are an arbitrary distance from
3889 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3890 which returns part of an address in a form which will be valid for
3891 a data instruction. We do this by pushing the expression into a symbol
3892 in the expr_section, and creating a fix for that. */
3893
3894 static fixS *
3895 fix_new_aarch64 (fragS * frag,
3896 int where,
3897 short int size, expressionS * exp, int pc_rel, int reloc)
3898 {
3899 fixS *new_fix;
3900
3901 switch (exp->X_op)
3902 {
3903 case O_constant:
3904 case O_symbol:
3905 case O_add:
3906 case O_subtract:
3907 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3908 break;
3909
3910 default:
3911 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3912 pc_rel, reloc);
3913 break;
3914 }
3915 return new_fix;
3916 }
3917 \f
3918 /* Diagnostics on operands errors. */
3919
3920 /* By default, output verbose error message.
3921 Disable the verbose error message by -mno-verbose-error. */
3922 static int verbose_error_p = 1;
3923
3924 #ifdef DEBUG_AARCH64
3925 /* N.B. this is only for the purpose of debugging. */
3926 const char* operand_mismatch_kind_names[] =
3927 {
3928 "AARCH64_OPDE_NIL",
3929 "AARCH64_OPDE_RECOVERABLE",
3930 "AARCH64_OPDE_SYNTAX_ERROR",
3931 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3932 "AARCH64_OPDE_INVALID_VARIANT",
3933 "AARCH64_OPDE_OUT_OF_RANGE",
3934 "AARCH64_OPDE_UNALIGNED",
3935 "AARCH64_OPDE_REG_LIST",
3936 "AARCH64_OPDE_OTHER_ERROR",
3937 };
3938 #endif /* DEBUG_AARCH64 */
3939
3940 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3941
3942 When multiple errors of different kinds are found in the same assembly
3943 line, only the error of the highest severity will be picked up for
3944 issuing the diagnostics. */
3945
3946 static inline bfd_boolean
3947 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3948 enum aarch64_operand_error_kind rhs)
3949 {
3950 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3951 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3952 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3953 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3954 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3955 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3956 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3957 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3958 return lhs > rhs;
3959 }
3960
3961 /* Helper routine to get the mnemonic name from the assembly instruction
3962 line; should only be called for the diagnosis purpose, as there is
3963 string copy operation involved, which may affect the runtime
3964 performance if used in elsewhere. */
3965
3966 static const char*
3967 get_mnemonic_name (const char *str)
3968 {
3969 static char mnemonic[32];
3970 char *ptr;
3971
3972 /* Get the first 15 bytes and assume that the full name is included. */
3973 strncpy (mnemonic, str, 31);
3974 mnemonic[31] = '\0';
3975
3976 /* Scan up to the end of the mnemonic, which must end in white space,
3977 '.', or end of string. */
3978 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3979 ;
3980
3981 *ptr = '\0';
3982
3983 /* Append '...' to the truncated long name. */
3984 if (ptr - mnemonic == 31)
3985 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3986
3987 return mnemonic;
3988 }
3989
3990 static void
3991 reset_aarch64_instruction (aarch64_instruction *instruction)
3992 {
3993 memset (instruction, '\0', sizeof (aarch64_instruction));
3994 instruction->reloc.type = BFD_RELOC_UNUSED;
3995 }
3996
3997 /* Data strutures storing one user error in the assembly code related to
3998 operands. */
3999
4000 struct operand_error_record
4001 {
4002 const aarch64_opcode *opcode;
4003 aarch64_operand_error detail;
4004 struct operand_error_record *next;
4005 };
4006
4007 typedef struct operand_error_record operand_error_record;
4008
4009 struct operand_errors
4010 {
4011 operand_error_record *head;
4012 operand_error_record *tail;
4013 };
4014
4015 typedef struct operand_errors operand_errors;
4016
4017 /* Top-level data structure reporting user errors for the current line of
4018 the assembly code.
4019 The way md_assemble works is that all opcodes sharing the same mnemonic
4020 name are iterated to find a match to the assembly line. In this data
4021 structure, each of the such opcodes will have one operand_error_record
4022 allocated and inserted. In other words, excessive errors related with
4023 a single opcode are disregarded. */
4024 operand_errors operand_error_report;
4025
4026 /* Free record nodes. */
4027 static operand_error_record *free_opnd_error_record_nodes = NULL;
4028
4029 /* Initialize the data structure that stores the operand mismatch
4030 information on assembling one line of the assembly code. */
4031 static void
4032 init_operand_error_report (void)
4033 {
4034 if (operand_error_report.head != NULL)
4035 {
4036 gas_assert (operand_error_report.tail != NULL);
4037 operand_error_report.tail->next = free_opnd_error_record_nodes;
4038 free_opnd_error_record_nodes = operand_error_report.head;
4039 operand_error_report.head = NULL;
4040 operand_error_report.tail = NULL;
4041 return;
4042 }
4043 gas_assert (operand_error_report.tail == NULL);
4044 }
4045
4046 /* Return TRUE if some operand error has been recorded during the
4047 parsing of the current assembly line using the opcode *OPCODE;
4048 otherwise return FALSE. */
4049 static inline bfd_boolean
4050 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4051 {
4052 operand_error_record *record = operand_error_report.head;
4053 return record && record->opcode == opcode;
4054 }
4055
4056 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4057 OPCODE field is initialized with OPCODE.
4058 N.B. only one record for each opcode, i.e. the maximum of one error is
4059 recorded for each instruction template. */
4060
4061 static void
4062 add_operand_error_record (const operand_error_record* new_record)
4063 {
4064 const aarch64_opcode *opcode = new_record->opcode;
4065 operand_error_record* record = operand_error_report.head;
4066
4067 /* The record may have been created for this opcode. If not, we need
4068 to prepare one. */
4069 if (! opcode_has_operand_error_p (opcode))
4070 {
4071 /* Get one empty record. */
4072 if (free_opnd_error_record_nodes == NULL)
4073 {
4074 record = XNEW (operand_error_record);
4075 }
4076 else
4077 {
4078 record = free_opnd_error_record_nodes;
4079 free_opnd_error_record_nodes = record->next;
4080 }
4081 record->opcode = opcode;
4082 /* Insert at the head. */
4083 record->next = operand_error_report.head;
4084 operand_error_report.head = record;
4085 if (operand_error_report.tail == NULL)
4086 operand_error_report.tail = record;
4087 }
4088 else if (record->detail.kind != AARCH64_OPDE_NIL
4089 && record->detail.index <= new_record->detail.index
4090 && operand_error_higher_severity_p (record->detail.kind,
4091 new_record->detail.kind))
4092 {
4093 /* In the case of multiple errors found on operands related with a
4094 single opcode, only record the error of the leftmost operand and
4095 only if the error is of higher severity. */
4096 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4097 " the existing error %s on operand %d",
4098 operand_mismatch_kind_names[new_record->detail.kind],
4099 new_record->detail.index,
4100 operand_mismatch_kind_names[record->detail.kind],
4101 record->detail.index);
4102 return;
4103 }
4104
4105 record->detail = new_record->detail;
4106 }
4107
4108 static inline void
4109 record_operand_error_info (const aarch64_opcode *opcode,
4110 aarch64_operand_error *error_info)
4111 {
4112 operand_error_record record;
4113 record.opcode = opcode;
4114 record.detail = *error_info;
4115 add_operand_error_record (&record);
4116 }
4117
4118 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4119 error message *ERROR, for operand IDX (count from 0). */
4120
4121 static void
4122 record_operand_error (const aarch64_opcode *opcode, int idx,
4123 enum aarch64_operand_error_kind kind,
4124 const char* error)
4125 {
4126 aarch64_operand_error info;
4127 memset(&info, 0, sizeof (info));
4128 info.index = idx;
4129 info.kind = kind;
4130 info.error = error;
4131 record_operand_error_info (opcode, &info);
4132 }
4133
4134 static void
4135 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4136 enum aarch64_operand_error_kind kind,
4137 const char* error, const int *extra_data)
4138 {
4139 aarch64_operand_error info;
4140 info.index = idx;
4141 info.kind = kind;
4142 info.error = error;
4143 info.data[0] = extra_data[0];
4144 info.data[1] = extra_data[1];
4145 info.data[2] = extra_data[2];
4146 record_operand_error_info (opcode, &info);
4147 }
4148
4149 static void
4150 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4151 const char* error, int lower_bound,
4152 int upper_bound)
4153 {
4154 int data[3] = {lower_bound, upper_bound, 0};
4155 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4156 error, data);
4157 }
4158
4159 /* Remove the operand error record for *OPCODE. */
4160 static void ATTRIBUTE_UNUSED
4161 remove_operand_error_record (const aarch64_opcode *opcode)
4162 {
4163 if (opcode_has_operand_error_p (opcode))
4164 {
4165 operand_error_record* record = operand_error_report.head;
4166 gas_assert (record != NULL && operand_error_report.tail != NULL);
4167 operand_error_report.head = record->next;
4168 record->next = free_opnd_error_record_nodes;
4169 free_opnd_error_record_nodes = record;
4170 if (operand_error_report.head == NULL)
4171 {
4172 gas_assert (operand_error_report.tail == record);
4173 operand_error_report.tail = NULL;
4174 }
4175 }
4176 }
4177
4178 /* Given the instruction in *INSTR, return the index of the best matched
4179 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4180
4181 Return -1 if there is no qualifier sequence; return the first match
4182 if there is multiple matches found. */
4183
4184 static int
4185 find_best_match (const aarch64_inst *instr,
4186 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4187 {
4188 int i, num_opnds, max_num_matched, idx;
4189
4190 num_opnds = aarch64_num_of_operands (instr->opcode);
4191 if (num_opnds == 0)
4192 {
4193 DEBUG_TRACE ("no operand");
4194 return -1;
4195 }
4196
4197 max_num_matched = 0;
4198 idx = 0;
4199
4200 /* For each pattern. */
4201 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4202 {
4203 int j, num_matched;
4204 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4205
4206 /* Most opcodes has much fewer patterns in the list. */
4207 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4208 {
4209 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4210 break;
4211 }
4212
4213 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4214 if (*qualifiers == instr->operands[j].qualifier)
4215 ++num_matched;
4216
4217 if (num_matched > max_num_matched)
4218 {
4219 max_num_matched = num_matched;
4220 idx = i;
4221 }
4222 }
4223
4224 DEBUG_TRACE ("return with %d", idx);
4225 return idx;
4226 }
4227
4228 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4229 corresponding operands in *INSTR. */
4230
4231 static inline void
4232 assign_qualifier_sequence (aarch64_inst *instr,
4233 const aarch64_opnd_qualifier_t *qualifiers)
4234 {
4235 int i = 0;
4236 int num_opnds = aarch64_num_of_operands (instr->opcode);
4237 gas_assert (num_opnds);
4238 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4239 instr->operands[i].qualifier = *qualifiers;
4240 }
4241
4242 /* Print operands for the diagnosis purpose. */
4243
4244 static void
4245 print_operands (char *buf, const aarch64_opcode *opcode,
4246 const aarch64_opnd_info *opnds)
4247 {
4248 int i;
4249
4250 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4251 {
4252 char str[128];
4253
4254 /* We regard the opcode operand info more, however we also look into
4255 the inst->operands to support the disassembling of the optional
4256 operand.
4257 The two operand code should be the same in all cases, apart from
4258 when the operand can be optional. */
4259 if (opcode->operands[i] == AARCH64_OPND_NIL
4260 || opnds[i].type == AARCH64_OPND_NIL)
4261 break;
4262
4263 /* Generate the operand string in STR. */
4264 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4265
4266 /* Delimiter. */
4267 if (str[0] != '\0')
4268 strcat (buf, i == 0 ? " " : ",");
4269
4270 /* Append the operand string. */
4271 strcat (buf, str);
4272 }
4273 }
4274
4275 /* Send to stderr a string as information. */
4276
4277 static void
4278 output_info (const char *format, ...)
4279 {
4280 const char *file;
4281 unsigned int line;
4282 va_list args;
4283
4284 file = as_where (&line);
4285 if (file)
4286 {
4287 if (line != 0)
4288 fprintf (stderr, "%s:%u: ", file, line);
4289 else
4290 fprintf (stderr, "%s: ", file);
4291 }
4292 fprintf (stderr, _("Info: "));
4293 va_start (args, format);
4294 vfprintf (stderr, format, args);
4295 va_end (args);
4296 (void) putc ('\n', stderr);
4297 }
4298
4299 /* Output one operand error record. */
4300
4301 static void
4302 output_operand_error_record (const operand_error_record *record, char *str)
4303 {
4304 const aarch64_operand_error *detail = &record->detail;
4305 int idx = detail->index;
4306 const aarch64_opcode *opcode = record->opcode;
4307 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4308 : AARCH64_OPND_NIL);
4309
4310 switch (detail->kind)
4311 {
4312 case AARCH64_OPDE_NIL:
4313 gas_assert (0);
4314 break;
4315
4316 case AARCH64_OPDE_SYNTAX_ERROR:
4317 case AARCH64_OPDE_RECOVERABLE:
4318 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4319 case AARCH64_OPDE_OTHER_ERROR:
4320 /* Use the prepared error message if there is, otherwise use the
4321 operand description string to describe the error. */
4322 if (detail->error != NULL)
4323 {
4324 if (idx < 0)
4325 as_bad (_("%s -- `%s'"), detail->error, str);
4326 else
4327 as_bad (_("%s at operand %d -- `%s'"),
4328 detail->error, idx + 1, str);
4329 }
4330 else
4331 {
4332 gas_assert (idx >= 0);
4333 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4334 aarch64_get_operand_desc (opd_code), str);
4335 }
4336 break;
4337
4338 case AARCH64_OPDE_INVALID_VARIANT:
4339 as_bad (_("operand mismatch -- `%s'"), str);
4340 if (verbose_error_p)
4341 {
4342 /* We will try to correct the erroneous instruction and also provide
4343 more information e.g. all other valid variants.
4344
4345 The string representation of the corrected instruction and other
4346 valid variants are generated by
4347
4348 1) obtaining the intermediate representation of the erroneous
4349 instruction;
4350 2) manipulating the IR, e.g. replacing the operand qualifier;
4351 3) printing out the instruction by calling the printer functions
4352 shared with the disassembler.
4353
4354 The limitation of this method is that the exact input assembly
4355 line cannot be accurately reproduced in some cases, for example an
4356 optional operand present in the actual assembly line will be
4357 omitted in the output; likewise for the optional syntax rules,
4358 e.g. the # before the immediate. Another limitation is that the
4359 assembly symbols and relocation operations in the assembly line
4360 currently cannot be printed out in the error report. Last but not
4361 least, when there is other error(s) co-exist with this error, the
4362 'corrected' instruction may be still incorrect, e.g. given
4363 'ldnp h0,h1,[x0,#6]!'
4364 this diagnosis will provide the version:
4365 'ldnp s0,s1,[x0,#6]!'
4366 which is still not right. */
4367 size_t len = strlen (get_mnemonic_name (str));
4368 int i, qlf_idx;
4369 bfd_boolean result;
4370 char buf[2048];
4371 aarch64_inst *inst_base = &inst.base;
4372 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4373
4374 /* Init inst. */
4375 reset_aarch64_instruction (&inst);
4376 inst_base->opcode = opcode;
4377
4378 /* Reset the error report so that there is no side effect on the
4379 following operand parsing. */
4380 init_operand_error_report ();
4381
4382 /* Fill inst. */
4383 result = parse_operands (str + len, opcode)
4384 && programmer_friendly_fixup (&inst);
4385 gas_assert (result);
4386 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4387 NULL, NULL);
4388 gas_assert (!result);
4389
4390 /* Find the most matched qualifier sequence. */
4391 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4392 gas_assert (qlf_idx > -1);
4393
4394 /* Assign the qualifiers. */
4395 assign_qualifier_sequence (inst_base,
4396 opcode->qualifiers_list[qlf_idx]);
4397
4398 /* Print the hint. */
4399 output_info (_(" did you mean this?"));
4400 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4401 print_operands (buf, opcode, inst_base->operands);
4402 output_info (_(" %s"), buf);
4403
4404 /* Print out other variant(s) if there is any. */
4405 if (qlf_idx != 0 ||
4406 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4407 output_info (_(" other valid variant(s):"));
4408
4409 /* For each pattern. */
4410 qualifiers_list = opcode->qualifiers_list;
4411 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4412 {
4413 /* Most opcodes has much fewer patterns in the list.
4414 First NIL qualifier indicates the end in the list. */
4415 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4416 break;
4417
4418 if (i != qlf_idx)
4419 {
4420 /* Mnemonics name. */
4421 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4422
4423 /* Assign the qualifiers. */
4424 assign_qualifier_sequence (inst_base, *qualifiers_list);
4425
4426 /* Print instruction. */
4427 print_operands (buf, opcode, inst_base->operands);
4428
4429 output_info (_(" %s"), buf);
4430 }
4431 }
4432 }
4433 break;
4434
4435 case AARCH64_OPDE_UNTIED_OPERAND:
4436 as_bad (_("operand %d must be the same register as operand 1 -- `%s'"),
4437 detail->index + 1, str);
4438 break;
4439
4440 case AARCH64_OPDE_OUT_OF_RANGE:
4441 if (detail->data[0] != detail->data[1])
4442 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4443 detail->error ? detail->error : _("immediate value"),
4444 detail->data[0], detail->data[1], idx + 1, str);
4445 else
4446 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4447 detail->error ? detail->error : _("immediate value"),
4448 detail->data[0], idx + 1, str);
4449 break;
4450
4451 case AARCH64_OPDE_REG_LIST:
4452 if (detail->data[0] == 1)
4453 as_bad (_("invalid number of registers in the list; "
4454 "only 1 register is expected at operand %d -- `%s'"),
4455 idx + 1, str);
4456 else
4457 as_bad (_("invalid number of registers in the list; "
4458 "%d registers are expected at operand %d -- `%s'"),
4459 detail->data[0], idx + 1, str);
4460 break;
4461
4462 case AARCH64_OPDE_UNALIGNED:
4463 as_bad (_("immediate value should be a multiple of "
4464 "%d at operand %d -- `%s'"),
4465 detail->data[0], idx + 1, str);
4466 break;
4467
4468 default:
4469 gas_assert (0);
4470 break;
4471 }
4472 }
4473
4474 /* Process and output the error message about the operand mismatching.
4475
4476 When this function is called, the operand error information had
4477 been collected for an assembly line and there will be multiple
4478 errors in the case of mulitple instruction templates; output the
4479 error message that most closely describes the problem. */
4480
4481 static void
4482 output_operand_error_report (char *str)
4483 {
4484 int largest_error_pos;
4485 const char *msg = NULL;
4486 enum aarch64_operand_error_kind kind;
4487 operand_error_record *curr;
4488 operand_error_record *head = operand_error_report.head;
4489 operand_error_record *record = NULL;
4490
4491 /* No error to report. */
4492 if (head == NULL)
4493 return;
4494
4495 gas_assert (head != NULL && operand_error_report.tail != NULL);
4496
4497 /* Only one error. */
4498 if (head == operand_error_report.tail)
4499 {
4500 DEBUG_TRACE ("single opcode entry with error kind: %s",
4501 operand_mismatch_kind_names[head->detail.kind]);
4502 output_operand_error_record (head, str);
4503 return;
4504 }
4505
4506 /* Find the error kind of the highest severity. */
4507 DEBUG_TRACE ("multiple opcode entres with error kind");
4508 kind = AARCH64_OPDE_NIL;
4509 for (curr = head; curr != NULL; curr = curr->next)
4510 {
4511 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4512 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4513 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4514 kind = curr->detail.kind;
4515 }
4516 gas_assert (kind != AARCH64_OPDE_NIL);
4517
4518 /* Pick up one of errors of KIND to report. */
4519 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4520 for (curr = head; curr != NULL; curr = curr->next)
4521 {
4522 if (curr->detail.kind != kind)
4523 continue;
4524 /* If there are multiple errors, pick up the one with the highest
4525 mismatching operand index. In the case of multiple errors with
4526 the equally highest operand index, pick up the first one or the
4527 first one with non-NULL error message. */
4528 if (curr->detail.index > largest_error_pos
4529 || (curr->detail.index == largest_error_pos && msg == NULL
4530 && curr->detail.error != NULL))
4531 {
4532 largest_error_pos = curr->detail.index;
4533 record = curr;
4534 msg = record->detail.error;
4535 }
4536 }
4537
4538 gas_assert (largest_error_pos != -2 && record != NULL);
4539 DEBUG_TRACE ("Pick up error kind %s to report",
4540 operand_mismatch_kind_names[record->detail.kind]);
4541
4542 /* Output. */
4543 output_operand_error_record (record, str);
4544 }
4545 \f
4546 /* Write an AARCH64 instruction to buf - always little-endian. */
4547 static void
4548 put_aarch64_insn (char *buf, uint32_t insn)
4549 {
4550 unsigned char *where = (unsigned char *) buf;
4551 where[0] = insn;
4552 where[1] = insn >> 8;
4553 where[2] = insn >> 16;
4554 where[3] = insn >> 24;
4555 }
4556
4557 static uint32_t
4558 get_aarch64_insn (char *buf)
4559 {
4560 unsigned char *where = (unsigned char *) buf;
4561 uint32_t result;
4562 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4563 return result;
4564 }
4565
4566 static void
4567 output_inst (struct aarch64_inst *new_inst)
4568 {
4569 char *to = NULL;
4570
4571 to = frag_more (INSN_SIZE);
4572
4573 frag_now->tc_frag_data.recorded = 1;
4574
4575 put_aarch64_insn (to, inst.base.value);
4576
4577 if (inst.reloc.type != BFD_RELOC_UNUSED)
4578 {
4579 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4580 INSN_SIZE, &inst.reloc.exp,
4581 inst.reloc.pc_rel,
4582 inst.reloc.type);
4583 DEBUG_TRACE ("Prepared relocation fix up");
4584 /* Don't check the addend value against the instruction size,
4585 that's the job of our code in md_apply_fix(). */
4586 fixp->fx_no_overflow = 1;
4587 if (new_inst != NULL)
4588 fixp->tc_fix_data.inst = new_inst;
4589 if (aarch64_gas_internal_fixup_p ())
4590 {
4591 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4592 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4593 fixp->fx_addnumber = inst.reloc.flags;
4594 }
4595 }
4596
4597 dwarf2_emit_insn (INSN_SIZE);
4598 }
4599
4600 /* Link together opcodes of the same name. */
4601
4602 struct templates
4603 {
4604 aarch64_opcode *opcode;
4605 struct templates *next;
4606 };
4607
4608 typedef struct templates templates;
4609
4610 static templates *
4611 lookup_mnemonic (const char *start, int len)
4612 {
4613 templates *templ = NULL;
4614
4615 templ = hash_find_n (aarch64_ops_hsh, start, len);
4616 return templ;
4617 }
4618
4619 /* Subroutine of md_assemble, responsible for looking up the primary
4620 opcode from the mnemonic the user wrote. STR points to the
4621 beginning of the mnemonic. */
4622
4623 static templates *
4624 opcode_lookup (char **str)
4625 {
4626 char *end, *base;
4627 const aarch64_cond *cond;
4628 char condname[16];
4629 int len;
4630
4631 /* Scan up to the end of the mnemonic, which must end in white space,
4632 '.', or end of string. */
4633 for (base = end = *str; is_part_of_name(*end); end++)
4634 if (*end == '.')
4635 break;
4636
4637 if (end == base)
4638 return 0;
4639
4640 inst.cond = COND_ALWAYS;
4641
4642 /* Handle a possible condition. */
4643 if (end[0] == '.')
4644 {
4645 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4646 if (cond)
4647 {
4648 inst.cond = cond->value;
4649 *str = end + 3;
4650 }
4651 else
4652 {
4653 *str = end;
4654 return 0;
4655 }
4656 }
4657 else
4658 *str = end;
4659
4660 len = end - base;
4661
4662 if (inst.cond == COND_ALWAYS)
4663 {
4664 /* Look for unaffixed mnemonic. */
4665 return lookup_mnemonic (base, len);
4666 }
4667 else if (len <= 13)
4668 {
4669 /* append ".c" to mnemonic if conditional */
4670 memcpy (condname, base, len);
4671 memcpy (condname + len, ".c", 2);
4672 base = condname;
4673 len += 2;
4674 return lookup_mnemonic (base, len);
4675 }
4676
4677 return NULL;
4678 }
4679
4680 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4681 to a corresponding operand qualifier. */
4682
4683 static inline aarch64_opnd_qualifier_t
4684 vectype_to_qualifier (const struct vector_type_el *vectype)
4685 {
4686 /* Element size in bytes indexed by vector_el_type. */
4687 const unsigned char ele_size[5]
4688 = {1, 2, 4, 8, 16};
4689 const unsigned int ele_base [5] =
4690 {
4691 AARCH64_OPND_QLF_V_8B,
4692 AARCH64_OPND_QLF_V_2H,
4693 AARCH64_OPND_QLF_V_2S,
4694 AARCH64_OPND_QLF_V_1D,
4695 AARCH64_OPND_QLF_V_1Q
4696 };
4697
4698 if (!vectype->defined || vectype->type == NT_invtype)
4699 goto vectype_conversion_fail;
4700
4701 if (vectype->type == NT_zero)
4702 return AARCH64_OPND_QLF_P_Z;
4703 if (vectype->type == NT_merge)
4704 return AARCH64_OPND_QLF_P_M;
4705
4706 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4707
4708 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
4709 /* Vector element register. */
4710 return AARCH64_OPND_QLF_S_B + vectype->type;
4711 else
4712 {
4713 /* Vector register. */
4714 int reg_size = ele_size[vectype->type] * vectype->width;
4715 unsigned offset;
4716 unsigned shift;
4717 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4718 goto vectype_conversion_fail;
4719
4720 /* The conversion is by calculating the offset from the base operand
4721 qualifier for the vector type. The operand qualifiers are regular
4722 enough that the offset can established by shifting the vector width by
4723 a vector-type dependent amount. */
4724 shift = 0;
4725 if (vectype->type == NT_b)
4726 shift = 4;
4727 else if (vectype->type == NT_h || vectype->type == NT_s)
4728 shift = 2;
4729 else if (vectype->type >= NT_d)
4730 shift = 1;
4731 else
4732 gas_assert (0);
4733
4734 offset = ele_base [vectype->type] + (vectype->width >> shift);
4735 gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4736 && offset <= AARCH64_OPND_QLF_V_1Q);
4737 return offset;
4738 }
4739
4740 vectype_conversion_fail:
4741 first_error (_("bad vector arrangement type"));
4742 return AARCH64_OPND_QLF_NIL;
4743 }
4744
4745 /* Process an optional operand that is found omitted from the assembly line.
4746 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4747 instruction's opcode entry while IDX is the index of this omitted operand.
4748 */
4749
4750 static void
4751 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4752 int idx, aarch64_opnd_info *operand)
4753 {
4754 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4755 gas_assert (optional_operand_p (opcode, idx));
4756 gas_assert (!operand->present);
4757
4758 switch (type)
4759 {
4760 case AARCH64_OPND_Rd:
4761 case AARCH64_OPND_Rn:
4762 case AARCH64_OPND_Rm:
4763 case AARCH64_OPND_Rt:
4764 case AARCH64_OPND_Rt2:
4765 case AARCH64_OPND_Rs:
4766 case AARCH64_OPND_Ra:
4767 case AARCH64_OPND_Rt_SYS:
4768 case AARCH64_OPND_Rd_SP:
4769 case AARCH64_OPND_Rn_SP:
4770 case AARCH64_OPND_Fd:
4771 case AARCH64_OPND_Fn:
4772 case AARCH64_OPND_Fm:
4773 case AARCH64_OPND_Fa:
4774 case AARCH64_OPND_Ft:
4775 case AARCH64_OPND_Ft2:
4776 case AARCH64_OPND_Sd:
4777 case AARCH64_OPND_Sn:
4778 case AARCH64_OPND_Sm:
4779 case AARCH64_OPND_Vd:
4780 case AARCH64_OPND_Vn:
4781 case AARCH64_OPND_Vm:
4782 case AARCH64_OPND_VdD1:
4783 case AARCH64_OPND_VnD1:
4784 operand->reg.regno = default_value;
4785 break;
4786
4787 case AARCH64_OPND_Ed:
4788 case AARCH64_OPND_En:
4789 case AARCH64_OPND_Em:
4790 operand->reglane.regno = default_value;
4791 break;
4792
4793 case AARCH64_OPND_IDX:
4794 case AARCH64_OPND_BIT_NUM:
4795 case AARCH64_OPND_IMMR:
4796 case AARCH64_OPND_IMMS:
4797 case AARCH64_OPND_SHLL_IMM:
4798 case AARCH64_OPND_IMM_VLSL:
4799 case AARCH64_OPND_IMM_VLSR:
4800 case AARCH64_OPND_CCMP_IMM:
4801 case AARCH64_OPND_FBITS:
4802 case AARCH64_OPND_UIMM4:
4803 case AARCH64_OPND_UIMM3_OP1:
4804 case AARCH64_OPND_UIMM3_OP2:
4805 case AARCH64_OPND_IMM:
4806 case AARCH64_OPND_WIDTH:
4807 case AARCH64_OPND_UIMM7:
4808 case AARCH64_OPND_NZCV:
4809 operand->imm.value = default_value;
4810 break;
4811
4812 case AARCH64_OPND_EXCEPTION:
4813 inst.reloc.type = BFD_RELOC_UNUSED;
4814 break;
4815
4816 case AARCH64_OPND_BARRIER_ISB:
4817 operand->barrier = aarch64_barrier_options + default_value;
4818
4819 default:
4820 break;
4821 }
4822 }
4823
4824 /* Process the relocation type for move wide instructions.
4825 Return TRUE on success; otherwise return FALSE. */
4826
4827 static bfd_boolean
4828 process_movw_reloc_info (void)
4829 {
4830 int is32;
4831 unsigned shift;
4832
4833 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4834
4835 if (inst.base.opcode->op == OP_MOVK)
4836 switch (inst.reloc.type)
4837 {
4838 case BFD_RELOC_AARCH64_MOVW_G0_S:
4839 case BFD_RELOC_AARCH64_MOVW_G1_S:
4840 case BFD_RELOC_AARCH64_MOVW_G2_S:
4841 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4842 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4843 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4844 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4845 set_syntax_error
4846 (_("the specified relocation type is not allowed for MOVK"));
4847 return FALSE;
4848 default:
4849 break;
4850 }
4851
4852 switch (inst.reloc.type)
4853 {
4854 case BFD_RELOC_AARCH64_MOVW_G0:
4855 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4856 case BFD_RELOC_AARCH64_MOVW_G0_S:
4857 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4858 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4859 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4860 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
4861 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
4862 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
4863 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4864 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4865 shift = 0;
4866 break;
4867 case BFD_RELOC_AARCH64_MOVW_G1:
4868 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4869 case BFD_RELOC_AARCH64_MOVW_G1_S:
4870 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4871 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4872 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4873 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
4874 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
4875 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
4876 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4877 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4878 shift = 16;
4879 break;
4880 case BFD_RELOC_AARCH64_MOVW_G2:
4881 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4882 case BFD_RELOC_AARCH64_MOVW_G2_S:
4883 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
4884 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4885 if (is32)
4886 {
4887 set_fatal_syntax_error
4888 (_("the specified relocation type is not allowed for 32-bit "
4889 "register"));
4890 return FALSE;
4891 }
4892 shift = 32;
4893 break;
4894 case BFD_RELOC_AARCH64_MOVW_G3:
4895 if (is32)
4896 {
4897 set_fatal_syntax_error
4898 (_("the specified relocation type is not allowed for 32-bit "
4899 "register"));
4900 return FALSE;
4901 }
4902 shift = 48;
4903 break;
4904 default:
4905 /* More cases should be added when more MOVW-related relocation types
4906 are supported in GAS. */
4907 gas_assert (aarch64_gas_internal_fixup_p ());
4908 /* The shift amount should have already been set by the parser. */
4909 return TRUE;
4910 }
4911 inst.base.operands[1].shifter.amount = shift;
4912 return TRUE;
4913 }
4914
4915 /* A primitive log caculator. */
4916
4917 static inline unsigned int
4918 get_logsz (unsigned int size)
4919 {
4920 const unsigned char ls[16] =
4921 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4922 if (size > 16)
4923 {
4924 gas_assert (0);
4925 return -1;
4926 }
4927 gas_assert (ls[size - 1] != (unsigned char)-1);
4928 return ls[size - 1];
4929 }
4930
4931 /* Determine and return the real reloc type code for an instruction
4932 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4933
4934 static inline bfd_reloc_code_real_type
4935 ldst_lo12_determine_real_reloc_type (void)
4936 {
4937 unsigned logsz;
4938 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4939 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4940
4941 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
4942 {
4943 BFD_RELOC_AARCH64_LDST8_LO12,
4944 BFD_RELOC_AARCH64_LDST16_LO12,
4945 BFD_RELOC_AARCH64_LDST32_LO12,
4946 BFD_RELOC_AARCH64_LDST64_LO12,
4947 BFD_RELOC_AARCH64_LDST128_LO12
4948 },
4949 {
4950 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
4951 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
4952 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
4953 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
4954 BFD_RELOC_AARCH64_NONE
4955 },
4956 {
4957 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
4958 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
4959 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
4960 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
4961 BFD_RELOC_AARCH64_NONE
4962 }
4963 };
4964
4965 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
4966 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4967 || (inst.reloc.type
4968 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
4969 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4970
4971 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4972 opd1_qlf =
4973 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4974 1, opd0_qlf, 0);
4975 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4976
4977 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4978 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4979 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
4980 gas_assert (logsz <= 3);
4981 else
4982 gas_assert (logsz <= 4);
4983
4984 /* In reloc.c, these pseudo relocation types should be defined in similar
4985 order as above reloc_ldst_lo12 array. Because the array index calcuation
4986 below relies on this. */
4987 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
4988 }
4989
4990 /* Check whether a register list REGINFO is valid. The registers must be
4991 numbered in increasing order (modulo 32), in increments of one or two.
4992
4993 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4994 increments of two.
4995
4996 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4997
4998 static bfd_boolean
4999 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5000 {
5001 uint32_t i, nb_regs, prev_regno, incr;
5002
5003 nb_regs = 1 + (reginfo & 0x3);
5004 reginfo >>= 2;
5005 prev_regno = reginfo & 0x1f;
5006 incr = accept_alternate ? 2 : 1;
5007
5008 for (i = 1; i < nb_regs; ++i)
5009 {
5010 uint32_t curr_regno;
5011 reginfo >>= 5;
5012 curr_regno = reginfo & 0x1f;
5013 if (curr_regno != ((prev_regno + incr) & 0x1f))
5014 return FALSE;
5015 prev_regno = curr_regno;
5016 }
5017
5018 return TRUE;
5019 }
5020
5021 /* Generic instruction operand parser. This does no encoding and no
5022 semantic validation; it merely squirrels values away in the inst
5023 structure. Returns TRUE or FALSE depending on whether the
5024 specified grammar matched. */
5025
5026 static bfd_boolean
5027 parse_operands (char *str, const aarch64_opcode *opcode)
5028 {
5029 int i;
5030 char *backtrack_pos = 0;
5031 const enum aarch64_opnd *operands = opcode->operands;
5032 aarch64_reg_type imm_reg_type;
5033
5034 clear_error ();
5035 skip_whitespace (str);
5036
5037 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5038
5039 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5040 {
5041 int64_t val;
5042 const reg_entry *reg;
5043 int comma_skipped_p = 0;
5044 aarch64_reg_type rtype;
5045 struct vector_type_el vectype;
5046 aarch64_opnd_qualifier_t qualifier;
5047 aarch64_opnd_info *info = &inst.base.operands[i];
5048 aarch64_reg_type reg_type;
5049
5050 DEBUG_TRACE ("parse operand %d", i);
5051
5052 /* Assign the operand code. */
5053 info->type = operands[i];
5054
5055 if (optional_operand_p (opcode, i))
5056 {
5057 /* Remember where we are in case we need to backtrack. */
5058 gas_assert (!backtrack_pos);
5059 backtrack_pos = str;
5060 }
5061
5062 /* Expect comma between operands; the backtrack mechanizm will take
5063 care of cases of omitted optional operand. */
5064 if (i > 0 && ! skip_past_char (&str, ','))
5065 {
5066 set_syntax_error (_("comma expected between operands"));
5067 goto failure;
5068 }
5069 else
5070 comma_skipped_p = 1;
5071
5072 switch (operands[i])
5073 {
5074 case AARCH64_OPND_Rd:
5075 case AARCH64_OPND_Rn:
5076 case AARCH64_OPND_Rm:
5077 case AARCH64_OPND_Rt:
5078 case AARCH64_OPND_Rt2:
5079 case AARCH64_OPND_Rs:
5080 case AARCH64_OPND_Ra:
5081 case AARCH64_OPND_Rt_SYS:
5082 case AARCH64_OPND_PAIRREG:
5083 po_int_reg_or_fail (REG_TYPE_R_Z);
5084 break;
5085
5086 case AARCH64_OPND_Rd_SP:
5087 case AARCH64_OPND_Rn_SP:
5088 po_int_reg_or_fail (REG_TYPE_R_SP);
5089 break;
5090
5091 case AARCH64_OPND_Rm_EXT:
5092 case AARCH64_OPND_Rm_SFT:
5093 po_misc_or_fail (parse_shifter_operand
5094 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5095 ? SHIFTED_ARITH_IMM
5096 : SHIFTED_LOGIC_IMM)));
5097 if (!info->shifter.operator_present)
5098 {
5099 /* Default to LSL if not present. Libopcodes prefers shifter
5100 kind to be explicit. */
5101 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5102 info->shifter.kind = AARCH64_MOD_LSL;
5103 /* For Rm_EXT, libopcodes will carry out further check on whether
5104 or not stack pointer is used in the instruction (Recall that
5105 "the extend operator is not optional unless at least one of
5106 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5107 }
5108 break;
5109
5110 case AARCH64_OPND_Fd:
5111 case AARCH64_OPND_Fn:
5112 case AARCH64_OPND_Fm:
5113 case AARCH64_OPND_Fa:
5114 case AARCH64_OPND_Ft:
5115 case AARCH64_OPND_Ft2:
5116 case AARCH64_OPND_Sd:
5117 case AARCH64_OPND_Sn:
5118 case AARCH64_OPND_Sm:
5119 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5120 if (val == PARSE_FAIL)
5121 {
5122 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5123 goto failure;
5124 }
5125 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5126
5127 info->reg.regno = val;
5128 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5129 break;
5130
5131 case AARCH64_OPND_SVE_Pd:
5132 case AARCH64_OPND_SVE_Pg3:
5133 case AARCH64_OPND_SVE_Pg4_5:
5134 case AARCH64_OPND_SVE_Pg4_10:
5135 case AARCH64_OPND_SVE_Pg4_16:
5136 case AARCH64_OPND_SVE_Pm:
5137 case AARCH64_OPND_SVE_Pn:
5138 case AARCH64_OPND_SVE_Pt:
5139 reg_type = REG_TYPE_PN;
5140 goto vector_reg;
5141
5142 case AARCH64_OPND_SVE_Za_5:
5143 case AARCH64_OPND_SVE_Za_16:
5144 case AARCH64_OPND_SVE_Zd:
5145 case AARCH64_OPND_SVE_Zm_5:
5146 case AARCH64_OPND_SVE_Zm_16:
5147 case AARCH64_OPND_SVE_Zn:
5148 case AARCH64_OPND_SVE_Zt:
5149 reg_type = REG_TYPE_ZN;
5150 goto vector_reg;
5151
5152 case AARCH64_OPND_Vd:
5153 case AARCH64_OPND_Vn:
5154 case AARCH64_OPND_Vm:
5155 reg_type = REG_TYPE_VN;
5156 vector_reg:
5157 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5158 if (val == PARSE_FAIL)
5159 {
5160 first_error (_(get_reg_expected_msg (reg_type)));
5161 goto failure;
5162 }
5163 if (vectype.defined & NTA_HASINDEX)
5164 goto failure;
5165
5166 info->reg.regno = val;
5167 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5168 && vectype.type == NT_invtype)
5169 /* Unqualified Pn and Zn registers are allowed in certain
5170 contexts. Rely on F_STRICT qualifier checking to catch
5171 invalid uses. */
5172 info->qualifier = AARCH64_OPND_QLF_NIL;
5173 else
5174 {
5175 info->qualifier = vectype_to_qualifier (&vectype);
5176 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5177 goto failure;
5178 }
5179 break;
5180
5181 case AARCH64_OPND_VdD1:
5182 case AARCH64_OPND_VnD1:
5183 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5184 if (val == PARSE_FAIL)
5185 {
5186 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5187 goto failure;
5188 }
5189 if (vectype.type != NT_d || vectype.index != 1)
5190 {
5191 set_fatal_syntax_error
5192 (_("the top half of a 128-bit FP/SIMD register is expected"));
5193 goto failure;
5194 }
5195 info->reg.regno = val;
5196 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5197 here; it is correct for the purpose of encoding/decoding since
5198 only the register number is explicitly encoded in the related
5199 instructions, although this appears a bit hacky. */
5200 info->qualifier = AARCH64_OPND_QLF_S_D;
5201 break;
5202
5203 case AARCH64_OPND_SVE_Zn_INDEX:
5204 reg_type = REG_TYPE_ZN;
5205 goto vector_reg_index;
5206
5207 case AARCH64_OPND_Ed:
5208 case AARCH64_OPND_En:
5209 case AARCH64_OPND_Em:
5210 reg_type = REG_TYPE_VN;
5211 vector_reg_index:
5212 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5213 if (val == PARSE_FAIL)
5214 {
5215 first_error (_(get_reg_expected_msg (reg_type)));
5216 goto failure;
5217 }
5218 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5219 goto failure;
5220
5221 info->reglane.regno = val;
5222 info->reglane.index = vectype.index;
5223 info->qualifier = vectype_to_qualifier (&vectype);
5224 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5225 goto failure;
5226 break;
5227
5228 case AARCH64_OPND_SVE_ZnxN:
5229 case AARCH64_OPND_SVE_ZtxN:
5230 reg_type = REG_TYPE_ZN;
5231 goto vector_reg_list;
5232
5233 case AARCH64_OPND_LVn:
5234 case AARCH64_OPND_LVt:
5235 case AARCH64_OPND_LVt_AL:
5236 case AARCH64_OPND_LEt:
5237 reg_type = REG_TYPE_VN;
5238 vector_reg_list:
5239 if (reg_type == REG_TYPE_ZN
5240 && get_opcode_dependent_value (opcode) == 1
5241 && *str != '{')
5242 {
5243 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5244 if (val == PARSE_FAIL)
5245 {
5246 first_error (_(get_reg_expected_msg (reg_type)));
5247 goto failure;
5248 }
5249 info->reglist.first_regno = val;
5250 info->reglist.num_regs = 1;
5251 }
5252 else
5253 {
5254 val = parse_vector_reg_list (&str, reg_type, &vectype);
5255 if (val == PARSE_FAIL)
5256 goto failure;
5257 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5258 {
5259 set_fatal_syntax_error (_("invalid register list"));
5260 goto failure;
5261 }
5262 info->reglist.first_regno = (val >> 2) & 0x1f;
5263 info->reglist.num_regs = (val & 0x3) + 1;
5264 }
5265 if (operands[i] == AARCH64_OPND_LEt)
5266 {
5267 if (!(vectype.defined & NTA_HASINDEX))
5268 goto failure;
5269 info->reglist.has_index = 1;
5270 info->reglist.index = vectype.index;
5271 }
5272 else
5273 {
5274 if (vectype.defined & NTA_HASINDEX)
5275 goto failure;
5276 if (!(vectype.defined & NTA_HASTYPE))
5277 {
5278 if (reg_type == REG_TYPE_ZN)
5279 set_fatal_syntax_error (_("missing type suffix"));
5280 goto failure;
5281 }
5282 }
5283 info->qualifier = vectype_to_qualifier (&vectype);
5284 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5285 goto failure;
5286 break;
5287
5288 case AARCH64_OPND_Cn:
5289 case AARCH64_OPND_Cm:
5290 po_reg_or_fail (REG_TYPE_CN);
5291 if (val > 15)
5292 {
5293 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5294 goto failure;
5295 }
5296 inst.base.operands[i].reg.regno = val;
5297 break;
5298
5299 case AARCH64_OPND_SHLL_IMM:
5300 case AARCH64_OPND_IMM_VLSR:
5301 po_imm_or_fail (1, 64);
5302 info->imm.value = val;
5303 break;
5304
5305 case AARCH64_OPND_CCMP_IMM:
5306 case AARCH64_OPND_FBITS:
5307 case AARCH64_OPND_UIMM4:
5308 case AARCH64_OPND_UIMM3_OP1:
5309 case AARCH64_OPND_UIMM3_OP2:
5310 case AARCH64_OPND_IMM_VLSL:
5311 case AARCH64_OPND_IMM:
5312 case AARCH64_OPND_WIDTH:
5313 po_imm_nc_or_fail ();
5314 info->imm.value = val;
5315 break;
5316
5317 case AARCH64_OPND_UIMM7:
5318 po_imm_or_fail (0, 127);
5319 info->imm.value = val;
5320 break;
5321
5322 case AARCH64_OPND_IDX:
5323 case AARCH64_OPND_BIT_NUM:
5324 case AARCH64_OPND_IMMR:
5325 case AARCH64_OPND_IMMS:
5326 po_imm_or_fail (0, 63);
5327 info->imm.value = val;
5328 break;
5329
5330 case AARCH64_OPND_IMM0:
5331 po_imm_nc_or_fail ();
5332 if (val != 0)
5333 {
5334 set_fatal_syntax_error (_("immediate zero expected"));
5335 goto failure;
5336 }
5337 info->imm.value = 0;
5338 break;
5339
5340 case AARCH64_OPND_FPIMM0:
5341 {
5342 int qfloat;
5343 bfd_boolean res1 = FALSE, res2 = FALSE;
5344 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5345 it is probably not worth the effort to support it. */
5346 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5347 imm_reg_type))
5348 && (error_p ()
5349 || !(res2 = parse_constant_immediate (&str, &val,
5350 imm_reg_type))))
5351 goto failure;
5352 if ((res1 && qfloat == 0) || (res2 && val == 0))
5353 {
5354 info->imm.value = 0;
5355 info->imm.is_fp = 1;
5356 break;
5357 }
5358 set_fatal_syntax_error (_("immediate zero expected"));
5359 goto failure;
5360 }
5361
5362 case AARCH64_OPND_IMM_MOV:
5363 {
5364 char *saved = str;
5365 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5366 reg_name_p (str, REG_TYPE_VN))
5367 goto failure;
5368 str = saved;
5369 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5370 GE_OPT_PREFIX, 1));
5371 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5372 later. fix_mov_imm_insn will try to determine a machine
5373 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5374 message if the immediate cannot be moved by a single
5375 instruction. */
5376 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5377 inst.base.operands[i].skip = 1;
5378 }
5379 break;
5380
5381 case AARCH64_OPND_SIMD_IMM:
5382 case AARCH64_OPND_SIMD_IMM_SFT:
5383 if (! parse_big_immediate (&str, &val, imm_reg_type))
5384 goto failure;
5385 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5386 /* addr_off_p */ 0,
5387 /* need_libopcodes_p */ 1,
5388 /* skip_p */ 1);
5389 /* Parse shift.
5390 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5391 shift, we don't check it here; we leave the checking to
5392 the libopcodes (operand_general_constraint_met_p). By
5393 doing this, we achieve better diagnostics. */
5394 if (skip_past_comma (&str)
5395 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5396 goto failure;
5397 if (!info->shifter.operator_present
5398 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5399 {
5400 /* Default to LSL if not present. Libopcodes prefers shifter
5401 kind to be explicit. */
5402 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5403 info->shifter.kind = AARCH64_MOD_LSL;
5404 }
5405 break;
5406
5407 case AARCH64_OPND_FPIMM:
5408 case AARCH64_OPND_SIMD_FPIMM:
5409 {
5410 int qfloat;
5411 bfd_boolean dp_p
5412 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5413 == 8);
5414 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5415 || !aarch64_imm_float_p (qfloat))
5416 {
5417 if (!error_p ())
5418 set_fatal_syntax_error (_("invalid floating-point"
5419 " constant"));
5420 goto failure;
5421 }
5422 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5423 inst.base.operands[i].imm.is_fp = 1;
5424 }
5425 break;
5426
5427 case AARCH64_OPND_LIMM:
5428 po_misc_or_fail (parse_shifter_operand (&str, info,
5429 SHIFTED_LOGIC_IMM));
5430 if (info->shifter.operator_present)
5431 {
5432 set_fatal_syntax_error
5433 (_("shift not allowed for bitmask immediate"));
5434 goto failure;
5435 }
5436 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5437 /* addr_off_p */ 0,
5438 /* need_libopcodes_p */ 1,
5439 /* skip_p */ 1);
5440 break;
5441
5442 case AARCH64_OPND_AIMM:
5443 if (opcode->op == OP_ADD)
5444 /* ADD may have relocation types. */
5445 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5446 SHIFTED_ARITH_IMM));
5447 else
5448 po_misc_or_fail (parse_shifter_operand (&str, info,
5449 SHIFTED_ARITH_IMM));
5450 switch (inst.reloc.type)
5451 {
5452 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5453 info->shifter.amount = 12;
5454 break;
5455 case BFD_RELOC_UNUSED:
5456 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5457 if (info->shifter.kind != AARCH64_MOD_NONE)
5458 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5459 inst.reloc.pc_rel = 0;
5460 break;
5461 default:
5462 break;
5463 }
5464 info->imm.value = 0;
5465 if (!info->shifter.operator_present)
5466 {
5467 /* Default to LSL if not present. Libopcodes prefers shifter
5468 kind to be explicit. */
5469 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5470 info->shifter.kind = AARCH64_MOD_LSL;
5471 }
5472 break;
5473
5474 case AARCH64_OPND_HALF:
5475 {
5476 /* #<imm16> or relocation. */
5477 int internal_fixup_p;
5478 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5479 if (internal_fixup_p)
5480 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5481 skip_whitespace (str);
5482 if (skip_past_comma (&str))
5483 {
5484 /* {, LSL #<shift>} */
5485 if (! aarch64_gas_internal_fixup_p ())
5486 {
5487 set_fatal_syntax_error (_("can't mix relocation modifier "
5488 "with explicit shift"));
5489 goto failure;
5490 }
5491 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5492 }
5493 else
5494 inst.base.operands[i].shifter.amount = 0;
5495 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5496 inst.base.operands[i].imm.value = 0;
5497 if (! process_movw_reloc_info ())
5498 goto failure;
5499 }
5500 break;
5501
5502 case AARCH64_OPND_EXCEPTION:
5503 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5504 imm_reg_type));
5505 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5506 /* addr_off_p */ 0,
5507 /* need_libopcodes_p */ 0,
5508 /* skip_p */ 1);
5509 break;
5510
5511 case AARCH64_OPND_NZCV:
5512 {
5513 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5514 if (nzcv != NULL)
5515 {
5516 str += 4;
5517 info->imm.value = nzcv->value;
5518 break;
5519 }
5520 po_imm_or_fail (0, 15);
5521 info->imm.value = val;
5522 }
5523 break;
5524
5525 case AARCH64_OPND_COND:
5526 case AARCH64_OPND_COND1:
5527 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5528 str += 2;
5529 if (info->cond == NULL)
5530 {
5531 set_syntax_error (_("invalid condition"));
5532 goto failure;
5533 }
5534 else if (operands[i] == AARCH64_OPND_COND1
5535 && (info->cond->value & 0xe) == 0xe)
5536 {
5537 /* Not allow AL or NV. */
5538 set_default_error ();
5539 goto failure;
5540 }
5541 break;
5542
5543 case AARCH64_OPND_ADDR_ADRP:
5544 po_misc_or_fail (parse_adrp (&str));
5545 /* Clear the value as operand needs to be relocated. */
5546 info->imm.value = 0;
5547 break;
5548
5549 case AARCH64_OPND_ADDR_PCREL14:
5550 case AARCH64_OPND_ADDR_PCREL19:
5551 case AARCH64_OPND_ADDR_PCREL21:
5552 case AARCH64_OPND_ADDR_PCREL26:
5553 po_misc_or_fail (parse_address (&str, info));
5554 if (!info->addr.pcrel)
5555 {
5556 set_syntax_error (_("invalid pc-relative address"));
5557 goto failure;
5558 }
5559 if (inst.gen_lit_pool
5560 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5561 {
5562 /* Only permit "=value" in the literal load instructions.
5563 The literal will be generated by programmer_friendly_fixup. */
5564 set_syntax_error (_("invalid use of \"=immediate\""));
5565 goto failure;
5566 }
5567 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5568 {
5569 set_syntax_error (_("unrecognized relocation suffix"));
5570 goto failure;
5571 }
5572 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5573 {
5574 info->imm.value = inst.reloc.exp.X_add_number;
5575 inst.reloc.type = BFD_RELOC_UNUSED;
5576 }
5577 else
5578 {
5579 info->imm.value = 0;
5580 if (inst.reloc.type == BFD_RELOC_UNUSED)
5581 switch (opcode->iclass)
5582 {
5583 case compbranch:
5584 case condbranch:
5585 /* e.g. CBZ or B.COND */
5586 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5587 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5588 break;
5589 case testbranch:
5590 /* e.g. TBZ */
5591 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5592 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5593 break;
5594 case branch_imm:
5595 /* e.g. B or BL */
5596 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5597 inst.reloc.type =
5598 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5599 : BFD_RELOC_AARCH64_JUMP26;
5600 break;
5601 case loadlit:
5602 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5603 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5604 break;
5605 case pcreladdr:
5606 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5607 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5608 break;
5609 default:
5610 gas_assert (0);
5611 abort ();
5612 }
5613 inst.reloc.pc_rel = 1;
5614 }
5615 break;
5616
5617 case AARCH64_OPND_ADDR_SIMPLE:
5618 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5619 {
5620 /* [<Xn|SP>{, #<simm>}] */
5621 char *start = str;
5622 /* First use the normal address-parsing routines, to get
5623 the usual syntax errors. */
5624 po_misc_or_fail (parse_address (&str, info));
5625 if (info->addr.pcrel || info->addr.offset.is_reg
5626 || !info->addr.preind || info->addr.postind
5627 || info->addr.writeback)
5628 {
5629 set_syntax_error (_("invalid addressing mode"));
5630 goto failure;
5631 }
5632
5633 /* Then retry, matching the specific syntax of these addresses. */
5634 str = start;
5635 po_char_or_fail ('[');
5636 po_reg_or_fail (REG_TYPE_R64_SP);
5637 /* Accept optional ", #0". */
5638 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5639 && skip_past_char (&str, ','))
5640 {
5641 skip_past_char (&str, '#');
5642 if (! skip_past_char (&str, '0'))
5643 {
5644 set_fatal_syntax_error
5645 (_("the optional immediate offset can only be 0"));
5646 goto failure;
5647 }
5648 }
5649 po_char_or_fail (']');
5650 break;
5651 }
5652
5653 case AARCH64_OPND_ADDR_REGOFF:
5654 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5655 po_misc_or_fail (parse_address (&str, info));
5656 if (info->addr.pcrel || !info->addr.offset.is_reg
5657 || !info->addr.preind || info->addr.postind
5658 || info->addr.writeback)
5659 {
5660 set_syntax_error (_("invalid addressing mode"));
5661 goto failure;
5662 }
5663 if (!info->shifter.operator_present)
5664 {
5665 /* Default to LSL if not present. Libopcodes prefers shifter
5666 kind to be explicit. */
5667 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5668 info->shifter.kind = AARCH64_MOD_LSL;
5669 }
5670 /* Qualifier to be deduced by libopcodes. */
5671 break;
5672
5673 case AARCH64_OPND_ADDR_SIMM7:
5674 po_misc_or_fail (parse_address (&str, info));
5675 if (info->addr.pcrel || info->addr.offset.is_reg
5676 || (!info->addr.preind && !info->addr.postind))
5677 {
5678 set_syntax_error (_("invalid addressing mode"));
5679 goto failure;
5680 }
5681 if (inst.reloc.type != BFD_RELOC_UNUSED)
5682 {
5683 set_syntax_error (_("relocation not allowed"));
5684 goto failure;
5685 }
5686 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5687 /* addr_off_p */ 1,
5688 /* need_libopcodes_p */ 1,
5689 /* skip_p */ 0);
5690 break;
5691
5692 case AARCH64_OPND_ADDR_SIMM9:
5693 case AARCH64_OPND_ADDR_SIMM9_2:
5694 po_misc_or_fail (parse_address (&str, info));
5695 if (info->addr.pcrel || info->addr.offset.is_reg
5696 || (!info->addr.preind && !info->addr.postind)
5697 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5698 && info->addr.writeback))
5699 {
5700 set_syntax_error (_("invalid addressing mode"));
5701 goto failure;
5702 }
5703 if (inst.reloc.type != BFD_RELOC_UNUSED)
5704 {
5705 set_syntax_error (_("relocation not allowed"));
5706 goto failure;
5707 }
5708 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5709 /* addr_off_p */ 1,
5710 /* need_libopcodes_p */ 1,
5711 /* skip_p */ 0);
5712 break;
5713
5714 case AARCH64_OPND_ADDR_UIMM12:
5715 po_misc_or_fail (parse_address (&str, info));
5716 if (info->addr.pcrel || info->addr.offset.is_reg
5717 || !info->addr.preind || info->addr.writeback)
5718 {
5719 set_syntax_error (_("invalid addressing mode"));
5720 goto failure;
5721 }
5722 if (inst.reloc.type == BFD_RELOC_UNUSED)
5723 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5724 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5725 || (inst.reloc.type
5726 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
5727 || (inst.reloc.type
5728 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
5729 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5730 /* Leave qualifier to be determined by libopcodes. */
5731 break;
5732
5733 case AARCH64_OPND_SIMD_ADDR_POST:
5734 /* [<Xn|SP>], <Xm|#<amount>> */
5735 po_misc_or_fail (parse_address (&str, info));
5736 if (!info->addr.postind || !info->addr.writeback)
5737 {
5738 set_syntax_error (_("invalid addressing mode"));
5739 goto failure;
5740 }
5741 if (!info->addr.offset.is_reg)
5742 {
5743 if (inst.reloc.exp.X_op == O_constant)
5744 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5745 else
5746 {
5747 set_fatal_syntax_error
5748 (_("writeback value should be an immediate constant"));
5749 goto failure;
5750 }
5751 }
5752 /* No qualifier. */
5753 break;
5754
5755 case AARCH64_OPND_SYSREG:
5756 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5757 == PARSE_FAIL)
5758 {
5759 set_syntax_error (_("unknown or missing system register name"));
5760 goto failure;
5761 }
5762 inst.base.operands[i].sysreg = val;
5763 break;
5764
5765 case AARCH64_OPND_PSTATEFIELD:
5766 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5767 == PARSE_FAIL)
5768 {
5769 set_syntax_error (_("unknown or missing PSTATE field name"));
5770 goto failure;
5771 }
5772 inst.base.operands[i].pstatefield = val;
5773 break;
5774
5775 case AARCH64_OPND_SYSREG_IC:
5776 inst.base.operands[i].sysins_op =
5777 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5778 goto sys_reg_ins;
5779 case AARCH64_OPND_SYSREG_DC:
5780 inst.base.operands[i].sysins_op =
5781 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5782 goto sys_reg_ins;
5783 case AARCH64_OPND_SYSREG_AT:
5784 inst.base.operands[i].sysins_op =
5785 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5786 goto sys_reg_ins;
5787 case AARCH64_OPND_SYSREG_TLBI:
5788 inst.base.operands[i].sysins_op =
5789 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5790 sys_reg_ins:
5791 if (inst.base.operands[i].sysins_op == NULL)
5792 {
5793 set_fatal_syntax_error ( _("unknown or missing operation name"));
5794 goto failure;
5795 }
5796 break;
5797
5798 case AARCH64_OPND_BARRIER:
5799 case AARCH64_OPND_BARRIER_ISB:
5800 val = parse_barrier (&str);
5801 if (val != PARSE_FAIL
5802 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5803 {
5804 /* ISB only accepts options name 'sy'. */
5805 set_syntax_error
5806 (_("the specified option is not accepted in ISB"));
5807 /* Turn off backtrack as this optional operand is present. */
5808 backtrack_pos = 0;
5809 goto failure;
5810 }
5811 /* This is an extension to accept a 0..15 immediate. */
5812 if (val == PARSE_FAIL)
5813 po_imm_or_fail (0, 15);
5814 info->barrier = aarch64_barrier_options + val;
5815 break;
5816
5817 case AARCH64_OPND_PRFOP:
5818 val = parse_pldop (&str);
5819 /* This is an extension to accept a 0..31 immediate. */
5820 if (val == PARSE_FAIL)
5821 po_imm_or_fail (0, 31);
5822 inst.base.operands[i].prfop = aarch64_prfops + val;
5823 break;
5824
5825 case AARCH64_OPND_BARRIER_PSB:
5826 val = parse_barrier_psb (&str, &(info->hint_option));
5827 if (val == PARSE_FAIL)
5828 goto failure;
5829 break;
5830
5831 default:
5832 as_fatal (_("unhandled operand code %d"), operands[i]);
5833 }
5834
5835 /* If we get here, this operand was successfully parsed. */
5836 inst.base.operands[i].present = 1;
5837 continue;
5838
5839 failure:
5840 /* The parse routine should already have set the error, but in case
5841 not, set a default one here. */
5842 if (! error_p ())
5843 set_default_error ();
5844
5845 if (! backtrack_pos)
5846 goto parse_operands_return;
5847
5848 {
5849 /* We reach here because this operand is marked as optional, and
5850 either no operand was supplied or the operand was supplied but it
5851 was syntactically incorrect. In the latter case we report an
5852 error. In the former case we perform a few more checks before
5853 dropping through to the code to insert the default operand. */
5854
5855 char *tmp = backtrack_pos;
5856 char endchar = END_OF_INSN;
5857
5858 if (i != (aarch64_num_of_operands (opcode) - 1))
5859 endchar = ',';
5860 skip_past_char (&tmp, ',');
5861
5862 if (*tmp != endchar)
5863 /* The user has supplied an operand in the wrong format. */
5864 goto parse_operands_return;
5865
5866 /* Make sure there is not a comma before the optional operand.
5867 For example the fifth operand of 'sys' is optional:
5868
5869 sys #0,c0,c0,#0, <--- wrong
5870 sys #0,c0,c0,#0 <--- correct. */
5871 if (comma_skipped_p && i && endchar == END_OF_INSN)
5872 {
5873 set_fatal_syntax_error
5874 (_("unexpected comma before the omitted optional operand"));
5875 goto parse_operands_return;
5876 }
5877 }
5878
5879 /* Reaching here means we are dealing with an optional operand that is
5880 omitted from the assembly line. */
5881 gas_assert (optional_operand_p (opcode, i));
5882 info->present = 0;
5883 process_omitted_operand (operands[i], opcode, i, info);
5884
5885 /* Try again, skipping the optional operand at backtrack_pos. */
5886 str = backtrack_pos;
5887 backtrack_pos = 0;
5888
5889 /* Clear any error record after the omitted optional operand has been
5890 successfully handled. */
5891 clear_error ();
5892 }
5893
5894 /* Check if we have parsed all the operands. */
5895 if (*str != '\0' && ! error_p ())
5896 {
5897 /* Set I to the index of the last present operand; this is
5898 for the purpose of diagnostics. */
5899 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5900 ;
5901 set_fatal_syntax_error
5902 (_("unexpected characters following instruction"));
5903 }
5904
5905 parse_operands_return:
5906
5907 if (error_p ())
5908 {
5909 DEBUG_TRACE ("parsing FAIL: %s - %s",
5910 operand_mismatch_kind_names[get_error_kind ()],
5911 get_error_message ());
5912 /* Record the operand error properly; this is useful when there
5913 are multiple instruction templates for a mnemonic name, so that
5914 later on, we can select the error that most closely describes
5915 the problem. */
5916 record_operand_error (opcode, i, get_error_kind (),
5917 get_error_message ());
5918 return FALSE;
5919 }
5920 else
5921 {
5922 DEBUG_TRACE ("parsing SUCCESS");
5923 return TRUE;
5924 }
5925 }
5926
5927 /* It does some fix-up to provide some programmer friendly feature while
5928 keeping the libopcodes happy, i.e. libopcodes only accepts
5929 the preferred architectural syntax.
5930 Return FALSE if there is any failure; otherwise return TRUE. */
5931
5932 static bfd_boolean
5933 programmer_friendly_fixup (aarch64_instruction *instr)
5934 {
5935 aarch64_inst *base = &instr->base;
5936 const aarch64_opcode *opcode = base->opcode;
5937 enum aarch64_op op = opcode->op;
5938 aarch64_opnd_info *operands = base->operands;
5939
5940 DEBUG_TRACE ("enter");
5941
5942 switch (opcode->iclass)
5943 {
5944 case testbranch:
5945 /* TBNZ Xn|Wn, #uimm6, label
5946 Test and Branch Not Zero: conditionally jumps to label if bit number
5947 uimm6 in register Xn is not zero. The bit number implies the width of
5948 the register, which may be written and should be disassembled as Wn if
5949 uimm is less than 32. */
5950 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5951 {
5952 if (operands[1].imm.value >= 32)
5953 {
5954 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5955 0, 31);
5956 return FALSE;
5957 }
5958 operands[0].qualifier = AARCH64_OPND_QLF_X;
5959 }
5960 break;
5961 case loadlit:
5962 /* LDR Wt, label | =value
5963 As a convenience assemblers will typically permit the notation
5964 "=value" in conjunction with the pc-relative literal load instructions
5965 to automatically place an immediate value or symbolic address in a
5966 nearby literal pool and generate a hidden label which references it.
5967 ISREG has been set to 0 in the case of =value. */
5968 if (instr->gen_lit_pool
5969 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5970 {
5971 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5972 if (op == OP_LDRSW_LIT)
5973 size = 4;
5974 if (instr->reloc.exp.X_op != O_constant
5975 && instr->reloc.exp.X_op != O_big
5976 && instr->reloc.exp.X_op != O_symbol)
5977 {
5978 record_operand_error (opcode, 1,
5979 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5980 _("constant expression expected"));
5981 return FALSE;
5982 }
5983 if (! add_to_lit_pool (&instr->reloc.exp, size))
5984 {
5985 record_operand_error (opcode, 1,
5986 AARCH64_OPDE_OTHER_ERROR,
5987 _("literal pool insertion failed"));
5988 return FALSE;
5989 }
5990 }
5991 break;
5992 case log_shift:
5993 case bitfield:
5994 /* UXT[BHW] Wd, Wn
5995 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5996 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5997 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5998 A programmer-friendly assembler should accept a destination Xd in
5999 place of Wd, however that is not the preferred form for disassembly.
6000 */
6001 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6002 && operands[1].qualifier == AARCH64_OPND_QLF_W
6003 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6004 operands[0].qualifier = AARCH64_OPND_QLF_W;
6005 break;
6006
6007 case addsub_ext:
6008 {
6009 /* In the 64-bit form, the final register operand is written as Wm
6010 for all but the (possibly omitted) UXTX/LSL and SXTX
6011 operators.
6012 As a programmer-friendly assembler, we accept e.g.
6013 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6014 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6015 int idx = aarch64_operand_index (opcode->operands,
6016 AARCH64_OPND_Rm_EXT);
6017 gas_assert (idx == 1 || idx == 2);
6018 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6019 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6020 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6021 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6022 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6023 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6024 }
6025 break;
6026
6027 default:
6028 break;
6029 }
6030
6031 DEBUG_TRACE ("exit with SUCCESS");
6032 return TRUE;
6033 }
6034
6035 /* Check for loads and stores that will cause unpredictable behavior. */
6036
6037 static void
6038 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6039 {
6040 aarch64_inst *base = &instr->base;
6041 const aarch64_opcode *opcode = base->opcode;
6042 const aarch64_opnd_info *opnds = base->operands;
6043 switch (opcode->iclass)
6044 {
6045 case ldst_pos:
6046 case ldst_imm9:
6047 case ldst_unscaled:
6048 case ldst_unpriv:
6049 /* Loading/storing the base register is unpredictable if writeback. */
6050 if ((aarch64_get_operand_class (opnds[0].type)
6051 == AARCH64_OPND_CLASS_INT_REG)
6052 && opnds[0].reg.regno == opnds[1].addr.base_regno
6053 && opnds[1].addr.base_regno != REG_SP
6054 && opnds[1].addr.writeback)
6055 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6056 break;
6057 case ldstpair_off:
6058 case ldstnapair_offs:
6059 case ldstpair_indexed:
6060 /* Loading/storing the base register is unpredictable if writeback. */
6061 if ((aarch64_get_operand_class (opnds[0].type)
6062 == AARCH64_OPND_CLASS_INT_REG)
6063 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6064 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6065 && opnds[2].addr.base_regno != REG_SP
6066 && opnds[2].addr.writeback)
6067 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6068 /* Load operations must load different registers. */
6069 if ((opcode->opcode & (1 << 22))
6070 && opnds[0].reg.regno == opnds[1].reg.regno)
6071 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6072 break;
6073 default:
6074 break;
6075 }
6076 }
6077
6078 /* A wrapper function to interface with libopcodes on encoding and
6079 record the error message if there is any.
6080
6081 Return TRUE on success; otherwise return FALSE. */
6082
6083 static bfd_boolean
6084 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6085 aarch64_insn *code)
6086 {
6087 aarch64_operand_error error_info;
6088 error_info.kind = AARCH64_OPDE_NIL;
6089 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
6090 return TRUE;
6091 else
6092 {
6093 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6094 record_operand_error_info (opcode, &error_info);
6095 return FALSE;
6096 }
6097 }
6098
6099 #ifdef DEBUG_AARCH64
6100 static inline void
6101 dump_opcode_operands (const aarch64_opcode *opcode)
6102 {
6103 int i = 0;
6104 while (opcode->operands[i] != AARCH64_OPND_NIL)
6105 {
6106 aarch64_verbose ("\t\t opnd%d: %s", i,
6107 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6108 ? aarch64_get_operand_name (opcode->operands[i])
6109 : aarch64_get_operand_desc (opcode->operands[i]));
6110 ++i;
6111 }
6112 }
6113 #endif /* DEBUG_AARCH64 */
6114
6115 /* This is the guts of the machine-dependent assembler. STR points to a
6116 machine dependent instruction. This function is supposed to emit
6117 the frags/bytes it assembles to. */
6118
6119 void
6120 md_assemble (char *str)
6121 {
6122 char *p = str;
6123 templates *template;
6124 aarch64_opcode *opcode;
6125 aarch64_inst *inst_base;
6126 unsigned saved_cond;
6127
6128 /* Align the previous label if needed. */
6129 if (last_label_seen != NULL)
6130 {
6131 symbol_set_frag (last_label_seen, frag_now);
6132 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6133 S_SET_SEGMENT (last_label_seen, now_seg);
6134 }
6135
6136 inst.reloc.type = BFD_RELOC_UNUSED;
6137
6138 DEBUG_TRACE ("\n\n");
6139 DEBUG_TRACE ("==============================");
6140 DEBUG_TRACE ("Enter md_assemble with %s", str);
6141
6142 template = opcode_lookup (&p);
6143 if (!template)
6144 {
6145 /* It wasn't an instruction, but it might be a register alias of
6146 the form alias .req reg directive. */
6147 if (!create_register_alias (str, p))
6148 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6149 str);
6150 return;
6151 }
6152
6153 skip_whitespace (p);
6154 if (*p == ',')
6155 {
6156 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6157 get_mnemonic_name (str), str);
6158 return;
6159 }
6160
6161 init_operand_error_report ();
6162
6163 /* Sections are assumed to start aligned. In executable section, there is no
6164 MAP_DATA symbol pending. So we only align the address during
6165 MAP_DATA --> MAP_INSN transition.
6166 For other sections, this is not guaranteed. */
6167 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6168 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6169 frag_align_code (2, 0);
6170
6171 saved_cond = inst.cond;
6172 reset_aarch64_instruction (&inst);
6173 inst.cond = saved_cond;
6174
6175 /* Iterate through all opcode entries with the same mnemonic name. */
6176 do
6177 {
6178 opcode = template->opcode;
6179
6180 DEBUG_TRACE ("opcode %s found", opcode->name);
6181 #ifdef DEBUG_AARCH64
6182 if (debug_dump)
6183 dump_opcode_operands (opcode);
6184 #endif /* DEBUG_AARCH64 */
6185
6186 mapping_state (MAP_INSN);
6187
6188 inst_base = &inst.base;
6189 inst_base->opcode = opcode;
6190
6191 /* Truly conditionally executed instructions, e.g. b.cond. */
6192 if (opcode->flags & F_COND)
6193 {
6194 gas_assert (inst.cond != COND_ALWAYS);
6195 inst_base->cond = get_cond_from_value (inst.cond);
6196 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6197 }
6198 else if (inst.cond != COND_ALWAYS)
6199 {
6200 /* It shouldn't arrive here, where the assembly looks like a
6201 conditional instruction but the found opcode is unconditional. */
6202 gas_assert (0);
6203 continue;
6204 }
6205
6206 if (parse_operands (p, opcode)
6207 && programmer_friendly_fixup (&inst)
6208 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6209 {
6210 /* Check that this instruction is supported for this CPU. */
6211 if (!opcode->avariant
6212 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6213 {
6214 as_bad (_("selected processor does not support `%s'"), str);
6215 return;
6216 }
6217
6218 warn_unpredictable_ldst (&inst, str);
6219
6220 if (inst.reloc.type == BFD_RELOC_UNUSED
6221 || !inst.reloc.need_libopcodes_p)
6222 output_inst (NULL);
6223 else
6224 {
6225 /* If there is relocation generated for the instruction,
6226 store the instruction information for the future fix-up. */
6227 struct aarch64_inst *copy;
6228 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6229 copy = XNEW (struct aarch64_inst);
6230 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6231 output_inst (copy);
6232 }
6233 return;
6234 }
6235
6236 template = template->next;
6237 if (template != NULL)
6238 {
6239 reset_aarch64_instruction (&inst);
6240 inst.cond = saved_cond;
6241 }
6242 }
6243 while (template != NULL);
6244
6245 /* Issue the error messages if any. */
6246 output_operand_error_report (str);
6247 }
6248
6249 /* Various frobbings of labels and their addresses. */
6250
6251 void
6252 aarch64_start_line_hook (void)
6253 {
6254 last_label_seen = NULL;
6255 }
6256
6257 void
6258 aarch64_frob_label (symbolS * sym)
6259 {
6260 last_label_seen = sym;
6261
6262 dwarf2_emit_label (sym);
6263 }
6264
6265 int
6266 aarch64_data_in_code (void)
6267 {
6268 if (!strncmp (input_line_pointer + 1, "data:", 5))
6269 {
6270 *input_line_pointer = '/';
6271 input_line_pointer += 5;
6272 *input_line_pointer = 0;
6273 return 1;
6274 }
6275
6276 return 0;
6277 }
6278
6279 char *
6280 aarch64_canonicalize_symbol_name (char *name)
6281 {
6282 int len;
6283
6284 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6285 *(name + len - 5) = 0;
6286
6287 return name;
6288 }
6289 \f
6290 /* Table of all register names defined by default. The user can
6291 define additional names with .req. Note that all register names
6292 should appear in both upper and lowercase variants. Some registers
6293 also have mixed-case names. */
6294
6295 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6296 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6297 #define REGSET16(p,t) \
6298 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6299 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6300 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6301 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6302 #define REGSET31(p,t) \
6303 REGSET16(p, t), \
6304 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6305 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6306 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6307 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6308 #define REGSET(p,t) \
6309 REGSET31(p,t), REGNUM(p,31,t)
6310
6311 /* These go into aarch64_reg_hsh hash-table. */
6312 static const reg_entry reg_names[] = {
6313 /* Integer registers. */
6314 REGSET31 (x, R_64), REGSET31 (X, R_64),
6315 REGSET31 (w, R_32), REGSET31 (W, R_32),
6316
6317 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6318 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6319
6320 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6321 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6322
6323 /* Coprocessor register numbers. */
6324 REGSET (c, CN), REGSET (C, CN),
6325
6326 /* Floating-point single precision registers. */
6327 REGSET (s, FP_S), REGSET (S, FP_S),
6328
6329 /* Floating-point double precision registers. */
6330 REGSET (d, FP_D), REGSET (D, FP_D),
6331
6332 /* Floating-point half precision registers. */
6333 REGSET (h, FP_H), REGSET (H, FP_H),
6334
6335 /* Floating-point byte precision registers. */
6336 REGSET (b, FP_B), REGSET (B, FP_B),
6337
6338 /* Floating-point quad precision registers. */
6339 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6340
6341 /* FP/SIMD registers. */
6342 REGSET (v, VN), REGSET (V, VN),
6343
6344 /* SVE vector registers. */
6345 REGSET (z, ZN), REGSET (Z, ZN),
6346
6347 /* SVE predicate registers. */
6348 REGSET16 (p, PN), REGSET16 (P, PN)
6349 };
6350
6351 #undef REGDEF
6352 #undef REGNUM
6353 #undef REGSET16
6354 #undef REGSET31
6355 #undef REGSET
6356
6357 #define N 1
6358 #define n 0
6359 #define Z 1
6360 #define z 0
6361 #define C 1
6362 #define c 0
6363 #define V 1
6364 #define v 0
6365 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6366 static const asm_nzcv nzcv_names[] = {
6367 {"nzcv", B (n, z, c, v)},
6368 {"nzcV", B (n, z, c, V)},
6369 {"nzCv", B (n, z, C, v)},
6370 {"nzCV", B (n, z, C, V)},
6371 {"nZcv", B (n, Z, c, v)},
6372 {"nZcV", B (n, Z, c, V)},
6373 {"nZCv", B (n, Z, C, v)},
6374 {"nZCV", B (n, Z, C, V)},
6375 {"Nzcv", B (N, z, c, v)},
6376 {"NzcV", B (N, z, c, V)},
6377 {"NzCv", B (N, z, C, v)},
6378 {"NzCV", B (N, z, C, V)},
6379 {"NZcv", B (N, Z, c, v)},
6380 {"NZcV", B (N, Z, c, V)},
6381 {"NZCv", B (N, Z, C, v)},
6382 {"NZCV", B (N, Z, C, V)}
6383 };
6384
6385 #undef N
6386 #undef n
6387 #undef Z
6388 #undef z
6389 #undef C
6390 #undef c
6391 #undef V
6392 #undef v
6393 #undef B
6394 \f
6395 /* MD interface: bits in the object file. */
6396
6397 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6398 for use in the a.out file, and stores them in the array pointed to by buf.
6399 This knows about the endian-ness of the target machine and does
6400 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6401 2 (short) and 4 (long) Floating numbers are put out as a series of
6402 LITTLENUMS (shorts, here at least). */
6403
6404 void
6405 md_number_to_chars (char *buf, valueT val, int n)
6406 {
6407 if (target_big_endian)
6408 number_to_chars_bigendian (buf, val, n);
6409 else
6410 number_to_chars_littleendian (buf, val, n);
6411 }
6412
6413 /* MD interface: Sections. */
6414
6415 /* Estimate the size of a frag before relaxing. Assume everything fits in
6416 4 bytes. */
6417
6418 int
6419 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6420 {
6421 fragp->fr_var = 4;
6422 return 4;
6423 }
6424
6425 /* Round up a section size to the appropriate boundary. */
6426
6427 valueT
6428 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6429 {
6430 return size;
6431 }
6432
6433 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6434 of an rs_align_code fragment.
6435
6436 Here we fill the frag with the appropriate info for padding the
6437 output stream. The resulting frag will consist of a fixed (fr_fix)
6438 and of a repeating (fr_var) part.
6439
6440 The fixed content is always emitted before the repeating content and
6441 these two parts are used as follows in constructing the output:
6442 - the fixed part will be used to align to a valid instruction word
6443 boundary, in case that we start at a misaligned address; as no
6444 executable instruction can live at the misaligned location, we
6445 simply fill with zeros;
6446 - the variable part will be used to cover the remaining padding and
6447 we fill using the AArch64 NOP instruction.
6448
6449 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6450 enough storage space for up to 3 bytes for padding the back to a valid
6451 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6452
6453 void
6454 aarch64_handle_align (fragS * fragP)
6455 {
6456 /* NOP = d503201f */
6457 /* AArch64 instructions are always little-endian. */
6458 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6459
6460 int bytes, fix, noop_size;
6461 char *p;
6462
6463 if (fragP->fr_type != rs_align_code)
6464 return;
6465
6466 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6467 p = fragP->fr_literal + fragP->fr_fix;
6468
6469 #ifdef OBJ_ELF
6470 gas_assert (fragP->tc_frag_data.recorded);
6471 #endif
6472
6473 noop_size = sizeof (aarch64_noop);
6474
6475 fix = bytes & (noop_size - 1);
6476 if (fix)
6477 {
6478 #ifdef OBJ_ELF
6479 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6480 #endif
6481 memset (p, 0, fix);
6482 p += fix;
6483 fragP->fr_fix += fix;
6484 }
6485
6486 if (noop_size)
6487 memcpy (p, aarch64_noop, noop_size);
6488 fragP->fr_var = noop_size;
6489 }
6490
6491 /* Perform target specific initialisation of a frag.
6492 Note - despite the name this initialisation is not done when the frag
6493 is created, but only when its type is assigned. A frag can be created
6494 and used a long time before its type is set, so beware of assuming that
6495 this initialisationis performed first. */
6496
6497 #ifndef OBJ_ELF
6498 void
6499 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6500 int max_chars ATTRIBUTE_UNUSED)
6501 {
6502 }
6503
6504 #else /* OBJ_ELF is defined. */
6505 void
6506 aarch64_init_frag (fragS * fragP, int max_chars)
6507 {
6508 /* Record a mapping symbol for alignment frags. We will delete this
6509 later if the alignment ends up empty. */
6510 if (!fragP->tc_frag_data.recorded)
6511 fragP->tc_frag_data.recorded = 1;
6512
6513 switch (fragP->fr_type)
6514 {
6515 case rs_align_test:
6516 case rs_fill:
6517 mapping_state_2 (MAP_DATA, max_chars);
6518 break;
6519 case rs_align:
6520 /* PR 20364: We can get alignment frags in code sections,
6521 so do not just assume that we should use the MAP_DATA state. */
6522 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
6523 break;
6524 case rs_align_code:
6525 mapping_state_2 (MAP_INSN, max_chars);
6526 break;
6527 default:
6528 break;
6529 }
6530 }
6531 \f
6532 /* Initialize the DWARF-2 unwind information for this procedure. */
6533
6534 void
6535 tc_aarch64_frame_initial_instructions (void)
6536 {
6537 cfi_add_CFA_def_cfa (REG_SP, 0);
6538 }
6539 #endif /* OBJ_ELF */
6540
6541 /* Convert REGNAME to a DWARF-2 register number. */
6542
6543 int
6544 tc_aarch64_regname_to_dw2regnum (char *regname)
6545 {
6546 const reg_entry *reg = parse_reg (&regname);
6547 if (reg == NULL)
6548 return -1;
6549
6550 switch (reg->type)
6551 {
6552 case REG_TYPE_SP_32:
6553 case REG_TYPE_SP_64:
6554 case REG_TYPE_R_32:
6555 case REG_TYPE_R_64:
6556 return reg->number;
6557
6558 case REG_TYPE_FP_B:
6559 case REG_TYPE_FP_H:
6560 case REG_TYPE_FP_S:
6561 case REG_TYPE_FP_D:
6562 case REG_TYPE_FP_Q:
6563 return reg->number + 64;
6564
6565 default:
6566 break;
6567 }
6568 return -1;
6569 }
6570
6571 /* Implement DWARF2_ADDR_SIZE. */
6572
6573 int
6574 aarch64_dwarf2_addr_size (void)
6575 {
6576 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6577 if (ilp32_p)
6578 return 4;
6579 #endif
6580 return bfd_arch_bits_per_address (stdoutput) / 8;
6581 }
6582
6583 /* MD interface: Symbol and relocation handling. */
6584
6585 /* Return the address within the segment that a PC-relative fixup is
6586 relative to. For AArch64 PC-relative fixups applied to instructions
6587 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6588
6589 long
6590 md_pcrel_from_section (fixS * fixP, segT seg)
6591 {
6592 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6593
6594 /* If this is pc-relative and we are going to emit a relocation
6595 then we just want to put out any pipeline compensation that the linker
6596 will need. Otherwise we want to use the calculated base. */
6597 if (fixP->fx_pcrel
6598 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6599 || aarch64_force_relocation (fixP)))
6600 base = 0;
6601
6602 /* AArch64 should be consistent for all pc-relative relocations. */
6603 return base + AARCH64_PCREL_OFFSET;
6604 }
6605
6606 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6607 Otherwise we have no need to default values of symbols. */
6608
6609 symbolS *
6610 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6611 {
6612 #ifdef OBJ_ELF
6613 if (name[0] == '_' && name[1] == 'G'
6614 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6615 {
6616 if (!GOT_symbol)
6617 {
6618 if (symbol_find (name))
6619 as_bad (_("GOT already in the symbol table"));
6620
6621 GOT_symbol = symbol_new (name, undefined_section,
6622 (valueT) 0, &zero_address_frag);
6623 }
6624
6625 return GOT_symbol;
6626 }
6627 #endif
6628
6629 return 0;
6630 }
6631
6632 /* Return non-zero if the indicated VALUE has overflowed the maximum
6633 range expressible by a unsigned number with the indicated number of
6634 BITS. */
6635
6636 static bfd_boolean
6637 unsigned_overflow (valueT value, unsigned bits)
6638 {
6639 valueT lim;
6640 if (bits >= sizeof (valueT) * 8)
6641 return FALSE;
6642 lim = (valueT) 1 << bits;
6643 return (value >= lim);
6644 }
6645
6646
6647 /* Return non-zero if the indicated VALUE has overflowed the maximum
6648 range expressible by an signed number with the indicated number of
6649 BITS. */
6650
6651 static bfd_boolean
6652 signed_overflow (offsetT value, unsigned bits)
6653 {
6654 offsetT lim;
6655 if (bits >= sizeof (offsetT) * 8)
6656 return FALSE;
6657 lim = (offsetT) 1 << (bits - 1);
6658 return (value < -lim || value >= lim);
6659 }
6660
6661 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6662 unsigned immediate offset load/store instruction, try to encode it as
6663 an unscaled, 9-bit, signed immediate offset load/store instruction.
6664 Return TRUE if it is successful; otherwise return FALSE.
6665
6666 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6667 in response to the standard LDR/STR mnemonics when the immediate offset is
6668 unambiguous, i.e. when it is negative or unaligned. */
6669
6670 static bfd_boolean
6671 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6672 {
6673 int idx;
6674 enum aarch64_op new_op;
6675 const aarch64_opcode *new_opcode;
6676
6677 gas_assert (instr->opcode->iclass == ldst_pos);
6678
6679 switch (instr->opcode->op)
6680 {
6681 case OP_LDRB_POS:new_op = OP_LDURB; break;
6682 case OP_STRB_POS: new_op = OP_STURB; break;
6683 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6684 case OP_LDRH_POS: new_op = OP_LDURH; break;
6685 case OP_STRH_POS: new_op = OP_STURH; break;
6686 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6687 case OP_LDR_POS: new_op = OP_LDUR; break;
6688 case OP_STR_POS: new_op = OP_STUR; break;
6689 case OP_LDRF_POS: new_op = OP_LDURV; break;
6690 case OP_STRF_POS: new_op = OP_STURV; break;
6691 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6692 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6693 default: new_op = OP_NIL; break;
6694 }
6695
6696 if (new_op == OP_NIL)
6697 return FALSE;
6698
6699 new_opcode = aarch64_get_opcode (new_op);
6700 gas_assert (new_opcode != NULL);
6701
6702 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6703 instr->opcode->op, new_opcode->op);
6704
6705 aarch64_replace_opcode (instr, new_opcode);
6706
6707 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6708 qualifier matching may fail because the out-of-date qualifier will
6709 prevent the operand being updated with a new and correct qualifier. */
6710 idx = aarch64_operand_index (instr->opcode->operands,
6711 AARCH64_OPND_ADDR_SIMM9);
6712 gas_assert (idx == 1);
6713 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6714
6715 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6716
6717 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6718 return FALSE;
6719
6720 return TRUE;
6721 }
6722
6723 /* Called by fix_insn to fix a MOV immediate alias instruction.
6724
6725 Operand for a generic move immediate instruction, which is an alias
6726 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6727 a 32-bit/64-bit immediate value into general register. An assembler error
6728 shall result if the immediate cannot be created by a single one of these
6729 instructions. If there is a choice, then to ensure reversability an
6730 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6731
6732 static void
6733 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6734 {
6735 const aarch64_opcode *opcode;
6736
6737 /* Need to check if the destination is SP/ZR. The check has to be done
6738 before any aarch64_replace_opcode. */
6739 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6740 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6741
6742 instr->operands[1].imm.value = value;
6743 instr->operands[1].skip = 0;
6744
6745 if (try_mov_wide_p)
6746 {
6747 /* Try the MOVZ alias. */
6748 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6749 aarch64_replace_opcode (instr, opcode);
6750 if (aarch64_opcode_encode (instr->opcode, instr,
6751 &instr->value, NULL, NULL))
6752 {
6753 put_aarch64_insn (buf, instr->value);
6754 return;
6755 }
6756 /* Try the MOVK alias. */
6757 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6758 aarch64_replace_opcode (instr, opcode);
6759 if (aarch64_opcode_encode (instr->opcode, instr,
6760 &instr->value, NULL, NULL))
6761 {
6762 put_aarch64_insn (buf, instr->value);
6763 return;
6764 }
6765 }
6766
6767 if (try_mov_bitmask_p)
6768 {
6769 /* Try the ORR alias. */
6770 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6771 aarch64_replace_opcode (instr, opcode);
6772 if (aarch64_opcode_encode (instr->opcode, instr,
6773 &instr->value, NULL, NULL))
6774 {
6775 put_aarch64_insn (buf, instr->value);
6776 return;
6777 }
6778 }
6779
6780 as_bad_where (fixP->fx_file, fixP->fx_line,
6781 _("immediate cannot be moved by a single instruction"));
6782 }
6783
6784 /* An instruction operand which is immediate related may have symbol used
6785 in the assembly, e.g.
6786
6787 mov w0, u32
6788 .set u32, 0x00ffff00
6789
6790 At the time when the assembly instruction is parsed, a referenced symbol,
6791 like 'u32' in the above example may not have been seen; a fixS is created
6792 in such a case and is handled here after symbols have been resolved.
6793 Instruction is fixed up with VALUE using the information in *FIXP plus
6794 extra information in FLAGS.
6795
6796 This function is called by md_apply_fix to fix up instructions that need
6797 a fix-up described above but does not involve any linker-time relocation. */
6798
6799 static void
6800 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6801 {
6802 int idx;
6803 uint32_t insn;
6804 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6805 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6806 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6807
6808 if (new_inst)
6809 {
6810 /* Now the instruction is about to be fixed-up, so the operand that
6811 was previously marked as 'ignored' needs to be unmarked in order
6812 to get the encoding done properly. */
6813 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6814 new_inst->operands[idx].skip = 0;
6815 }
6816
6817 gas_assert (opnd != AARCH64_OPND_NIL);
6818
6819 switch (opnd)
6820 {
6821 case AARCH64_OPND_EXCEPTION:
6822 if (unsigned_overflow (value, 16))
6823 as_bad_where (fixP->fx_file, fixP->fx_line,
6824 _("immediate out of range"));
6825 insn = get_aarch64_insn (buf);
6826 insn |= encode_svc_imm (value);
6827 put_aarch64_insn (buf, insn);
6828 break;
6829
6830 case AARCH64_OPND_AIMM:
6831 /* ADD or SUB with immediate.
6832 NOTE this assumes we come here with a add/sub shifted reg encoding
6833 3 322|2222|2 2 2 21111 111111
6834 1 098|7654|3 2 1 09876 543210 98765 43210
6835 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6836 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6837 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6838 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6839 ->
6840 3 322|2222|2 2 221111111111
6841 1 098|7654|3 2 109876543210 98765 43210
6842 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6843 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6844 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6845 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6846 Fields sf Rn Rd are already set. */
6847 insn = get_aarch64_insn (buf);
6848 if (value < 0)
6849 {
6850 /* Add <-> sub. */
6851 insn = reencode_addsub_switch_add_sub (insn);
6852 value = -value;
6853 }
6854
6855 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6856 && unsigned_overflow (value, 12))
6857 {
6858 /* Try to shift the value by 12 to make it fit. */
6859 if (((value >> 12) << 12) == value
6860 && ! unsigned_overflow (value, 12 + 12))
6861 {
6862 value >>= 12;
6863 insn |= encode_addsub_imm_shift_amount (1);
6864 }
6865 }
6866
6867 if (unsigned_overflow (value, 12))
6868 as_bad_where (fixP->fx_file, fixP->fx_line,
6869 _("immediate out of range"));
6870
6871 insn |= encode_addsub_imm (value);
6872
6873 put_aarch64_insn (buf, insn);
6874 break;
6875
6876 case AARCH64_OPND_SIMD_IMM:
6877 case AARCH64_OPND_SIMD_IMM_SFT:
6878 case AARCH64_OPND_LIMM:
6879 /* Bit mask immediate. */
6880 gas_assert (new_inst != NULL);
6881 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6882 new_inst->operands[idx].imm.value = value;
6883 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6884 &new_inst->value, NULL, NULL))
6885 put_aarch64_insn (buf, new_inst->value);
6886 else
6887 as_bad_where (fixP->fx_file, fixP->fx_line,
6888 _("invalid immediate"));
6889 break;
6890
6891 case AARCH64_OPND_HALF:
6892 /* 16-bit unsigned immediate. */
6893 if (unsigned_overflow (value, 16))
6894 as_bad_where (fixP->fx_file, fixP->fx_line,
6895 _("immediate out of range"));
6896 insn = get_aarch64_insn (buf);
6897 insn |= encode_movw_imm (value & 0xffff);
6898 put_aarch64_insn (buf, insn);
6899 break;
6900
6901 case AARCH64_OPND_IMM_MOV:
6902 /* Operand for a generic move immediate instruction, which is
6903 an alias instruction that generates a single MOVZ, MOVN or ORR
6904 instruction to loads a 32-bit/64-bit immediate value into general
6905 register. An assembler error shall result if the immediate cannot be
6906 created by a single one of these instructions. If there is a choice,
6907 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6908 and MOVZ or MOVN to ORR. */
6909 gas_assert (new_inst != NULL);
6910 fix_mov_imm_insn (fixP, buf, new_inst, value);
6911 break;
6912
6913 case AARCH64_OPND_ADDR_SIMM7:
6914 case AARCH64_OPND_ADDR_SIMM9:
6915 case AARCH64_OPND_ADDR_SIMM9_2:
6916 case AARCH64_OPND_ADDR_UIMM12:
6917 /* Immediate offset in an address. */
6918 insn = get_aarch64_insn (buf);
6919
6920 gas_assert (new_inst != NULL && new_inst->value == insn);
6921 gas_assert (new_inst->opcode->operands[1] == opnd
6922 || new_inst->opcode->operands[2] == opnd);
6923
6924 /* Get the index of the address operand. */
6925 if (new_inst->opcode->operands[1] == opnd)
6926 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6927 idx = 1;
6928 else
6929 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6930 idx = 2;
6931
6932 /* Update the resolved offset value. */
6933 new_inst->operands[idx].addr.offset.imm = value;
6934
6935 /* Encode/fix-up. */
6936 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6937 &new_inst->value, NULL, NULL))
6938 {
6939 put_aarch64_insn (buf, new_inst->value);
6940 break;
6941 }
6942 else if (new_inst->opcode->iclass == ldst_pos
6943 && try_to_encode_as_unscaled_ldst (new_inst))
6944 {
6945 put_aarch64_insn (buf, new_inst->value);
6946 break;
6947 }
6948
6949 as_bad_where (fixP->fx_file, fixP->fx_line,
6950 _("immediate offset out of range"));
6951 break;
6952
6953 default:
6954 gas_assert (0);
6955 as_fatal (_("unhandled operand code %d"), opnd);
6956 }
6957 }
6958
6959 /* Apply a fixup (fixP) to segment data, once it has been determined
6960 by our caller that we have all the info we need to fix it up.
6961
6962 Parameter valP is the pointer to the value of the bits. */
6963
6964 void
6965 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6966 {
6967 offsetT value = *valP;
6968 uint32_t insn;
6969 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6970 int scale;
6971 unsigned flags = fixP->fx_addnumber;
6972
6973 DEBUG_TRACE ("\n\n");
6974 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6975 DEBUG_TRACE ("Enter md_apply_fix");
6976
6977 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6978
6979 /* Note whether this will delete the relocation. */
6980
6981 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6982 fixP->fx_done = 1;
6983
6984 /* Process the relocations. */
6985 switch (fixP->fx_r_type)
6986 {
6987 case BFD_RELOC_NONE:
6988 /* This will need to go in the object file. */
6989 fixP->fx_done = 0;
6990 break;
6991
6992 case BFD_RELOC_8:
6993 case BFD_RELOC_8_PCREL:
6994 if (fixP->fx_done || !seg->use_rela_p)
6995 md_number_to_chars (buf, value, 1);
6996 break;
6997
6998 case BFD_RELOC_16:
6999 case BFD_RELOC_16_PCREL:
7000 if (fixP->fx_done || !seg->use_rela_p)
7001 md_number_to_chars (buf, value, 2);
7002 break;
7003
7004 case BFD_RELOC_32:
7005 case BFD_RELOC_32_PCREL:
7006 if (fixP->fx_done || !seg->use_rela_p)
7007 md_number_to_chars (buf, value, 4);
7008 break;
7009
7010 case BFD_RELOC_64:
7011 case BFD_RELOC_64_PCREL:
7012 if (fixP->fx_done || !seg->use_rela_p)
7013 md_number_to_chars (buf, value, 8);
7014 break;
7015
7016 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7017 /* We claim that these fixups have been processed here, even if
7018 in fact we generate an error because we do not have a reloc
7019 for them, so tc_gen_reloc() will reject them. */
7020 fixP->fx_done = 1;
7021 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7022 {
7023 as_bad_where (fixP->fx_file, fixP->fx_line,
7024 _("undefined symbol %s used as an immediate value"),
7025 S_GET_NAME (fixP->fx_addsy));
7026 goto apply_fix_return;
7027 }
7028 fix_insn (fixP, flags, value);
7029 break;
7030
7031 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7032 if (fixP->fx_done || !seg->use_rela_p)
7033 {
7034 if (value & 3)
7035 as_bad_where (fixP->fx_file, fixP->fx_line,
7036 _("pc-relative load offset not word aligned"));
7037 if (signed_overflow (value, 21))
7038 as_bad_where (fixP->fx_file, fixP->fx_line,
7039 _("pc-relative load offset out of range"));
7040 insn = get_aarch64_insn (buf);
7041 insn |= encode_ld_lit_ofs_19 (value >> 2);
7042 put_aarch64_insn (buf, insn);
7043 }
7044 break;
7045
7046 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7047 if (fixP->fx_done || !seg->use_rela_p)
7048 {
7049 if (signed_overflow (value, 21))
7050 as_bad_where (fixP->fx_file, fixP->fx_line,
7051 _("pc-relative address offset out of range"));
7052 insn = get_aarch64_insn (buf);
7053 insn |= encode_adr_imm (value);
7054 put_aarch64_insn (buf, insn);
7055 }
7056 break;
7057
7058 case BFD_RELOC_AARCH64_BRANCH19:
7059 if (fixP->fx_done || !seg->use_rela_p)
7060 {
7061 if (value & 3)
7062 as_bad_where (fixP->fx_file, fixP->fx_line,
7063 _("conditional branch target not word aligned"));
7064 if (signed_overflow (value, 21))
7065 as_bad_where (fixP->fx_file, fixP->fx_line,
7066 _("conditional branch out of range"));
7067 insn = get_aarch64_insn (buf);
7068 insn |= encode_cond_branch_ofs_19 (value >> 2);
7069 put_aarch64_insn (buf, insn);
7070 }
7071 break;
7072
7073 case BFD_RELOC_AARCH64_TSTBR14:
7074 if (fixP->fx_done || !seg->use_rela_p)
7075 {
7076 if (value & 3)
7077 as_bad_where (fixP->fx_file, fixP->fx_line,
7078 _("conditional branch target not word aligned"));
7079 if (signed_overflow (value, 16))
7080 as_bad_where (fixP->fx_file, fixP->fx_line,
7081 _("conditional branch out of range"));
7082 insn = get_aarch64_insn (buf);
7083 insn |= encode_tst_branch_ofs_14 (value >> 2);
7084 put_aarch64_insn (buf, insn);
7085 }
7086 break;
7087
7088 case BFD_RELOC_AARCH64_CALL26:
7089 case BFD_RELOC_AARCH64_JUMP26:
7090 if (fixP->fx_done || !seg->use_rela_p)
7091 {
7092 if (value & 3)
7093 as_bad_where (fixP->fx_file, fixP->fx_line,
7094 _("branch target not word aligned"));
7095 if (signed_overflow (value, 28))
7096 as_bad_where (fixP->fx_file, fixP->fx_line,
7097 _("branch out of range"));
7098 insn = get_aarch64_insn (buf);
7099 insn |= encode_branch_ofs_26 (value >> 2);
7100 put_aarch64_insn (buf, insn);
7101 }
7102 break;
7103
7104 case BFD_RELOC_AARCH64_MOVW_G0:
7105 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7106 case BFD_RELOC_AARCH64_MOVW_G0_S:
7107 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7108 scale = 0;
7109 goto movw_common;
7110 case BFD_RELOC_AARCH64_MOVW_G1:
7111 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7112 case BFD_RELOC_AARCH64_MOVW_G1_S:
7113 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7114 scale = 16;
7115 goto movw_common;
7116 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7117 scale = 0;
7118 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7119 /* Should always be exported to object file, see
7120 aarch64_force_relocation(). */
7121 gas_assert (!fixP->fx_done);
7122 gas_assert (seg->use_rela_p);
7123 goto movw_common;
7124 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7125 scale = 16;
7126 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7127 /* Should always be exported to object file, see
7128 aarch64_force_relocation(). */
7129 gas_assert (!fixP->fx_done);
7130 gas_assert (seg->use_rela_p);
7131 goto movw_common;
7132 case BFD_RELOC_AARCH64_MOVW_G2:
7133 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7134 case BFD_RELOC_AARCH64_MOVW_G2_S:
7135 scale = 32;
7136 goto movw_common;
7137 case BFD_RELOC_AARCH64_MOVW_G3:
7138 scale = 48;
7139 movw_common:
7140 if (fixP->fx_done || !seg->use_rela_p)
7141 {
7142 insn = get_aarch64_insn (buf);
7143
7144 if (!fixP->fx_done)
7145 {
7146 /* REL signed addend must fit in 16 bits */
7147 if (signed_overflow (value, 16))
7148 as_bad_where (fixP->fx_file, fixP->fx_line,
7149 _("offset out of range"));
7150 }
7151 else
7152 {
7153 /* Check for overflow and scale. */
7154 switch (fixP->fx_r_type)
7155 {
7156 case BFD_RELOC_AARCH64_MOVW_G0:
7157 case BFD_RELOC_AARCH64_MOVW_G1:
7158 case BFD_RELOC_AARCH64_MOVW_G2:
7159 case BFD_RELOC_AARCH64_MOVW_G3:
7160 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7161 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7162 if (unsigned_overflow (value, scale + 16))
7163 as_bad_where (fixP->fx_file, fixP->fx_line,
7164 _("unsigned value out of range"));
7165 break;
7166 case BFD_RELOC_AARCH64_MOVW_G0_S:
7167 case BFD_RELOC_AARCH64_MOVW_G1_S:
7168 case BFD_RELOC_AARCH64_MOVW_G2_S:
7169 /* NOTE: We can only come here with movz or movn. */
7170 if (signed_overflow (value, scale + 16))
7171 as_bad_where (fixP->fx_file, fixP->fx_line,
7172 _("signed value out of range"));
7173 if (value < 0)
7174 {
7175 /* Force use of MOVN. */
7176 value = ~value;
7177 insn = reencode_movzn_to_movn (insn);
7178 }
7179 else
7180 {
7181 /* Force use of MOVZ. */
7182 insn = reencode_movzn_to_movz (insn);
7183 }
7184 break;
7185 default:
7186 /* Unchecked relocations. */
7187 break;
7188 }
7189 value >>= scale;
7190 }
7191
7192 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7193 insn |= encode_movw_imm (value & 0xffff);
7194
7195 put_aarch64_insn (buf, insn);
7196 }
7197 break;
7198
7199 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7200 fixP->fx_r_type = (ilp32_p
7201 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7202 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7203 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7204 /* Should always be exported to object file, see
7205 aarch64_force_relocation(). */
7206 gas_assert (!fixP->fx_done);
7207 gas_assert (seg->use_rela_p);
7208 break;
7209
7210 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7211 fixP->fx_r_type = (ilp32_p
7212 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7213 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
7214 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7215 /* Should always be exported to object file, see
7216 aarch64_force_relocation(). */
7217 gas_assert (!fixP->fx_done);
7218 gas_assert (seg->use_rela_p);
7219 break;
7220
7221 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7222 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7223 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7224 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7225 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7226 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7227 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7228 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7229 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7230 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7231 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7232 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7233 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7234 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7235 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7236 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7237 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7238 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7239 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7240 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7241 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7242 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7243 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7244 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7245 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7246 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7247 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7248 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7249 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7250 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7251 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7252 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7253 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7254 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7255 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7256 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7257 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7258 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7259 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7260 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7261 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7262 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7263 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7264 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7265 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7266 /* Should always be exported to object file, see
7267 aarch64_force_relocation(). */
7268 gas_assert (!fixP->fx_done);
7269 gas_assert (seg->use_rela_p);
7270 break;
7271
7272 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7273 /* Should always be exported to object file, see
7274 aarch64_force_relocation(). */
7275 fixP->fx_r_type = (ilp32_p
7276 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7277 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7278 gas_assert (!fixP->fx_done);
7279 gas_assert (seg->use_rela_p);
7280 break;
7281
7282 case BFD_RELOC_AARCH64_ADD_LO12:
7283 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7284 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7285 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7286 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7287 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7288 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7289 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7290 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7291 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7292 case BFD_RELOC_AARCH64_LDST128_LO12:
7293 case BFD_RELOC_AARCH64_LDST16_LO12:
7294 case BFD_RELOC_AARCH64_LDST32_LO12:
7295 case BFD_RELOC_AARCH64_LDST64_LO12:
7296 case BFD_RELOC_AARCH64_LDST8_LO12:
7297 /* Should always be exported to object file, see
7298 aarch64_force_relocation(). */
7299 gas_assert (!fixP->fx_done);
7300 gas_assert (seg->use_rela_p);
7301 break;
7302
7303 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7304 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7305 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7306 break;
7307
7308 case BFD_RELOC_UNUSED:
7309 /* An error will already have been reported. */
7310 break;
7311
7312 default:
7313 as_bad_where (fixP->fx_file, fixP->fx_line,
7314 _("unexpected %s fixup"),
7315 bfd_get_reloc_code_name (fixP->fx_r_type));
7316 break;
7317 }
7318
7319 apply_fix_return:
7320 /* Free the allocated the struct aarch64_inst.
7321 N.B. currently there are very limited number of fix-up types actually use
7322 this field, so the impact on the performance should be minimal . */
7323 if (fixP->tc_fix_data.inst != NULL)
7324 free (fixP->tc_fix_data.inst);
7325
7326 return;
7327 }
7328
7329 /* Translate internal representation of relocation info to BFD target
7330 format. */
7331
7332 arelent *
7333 tc_gen_reloc (asection * section, fixS * fixp)
7334 {
7335 arelent *reloc;
7336 bfd_reloc_code_real_type code;
7337
7338 reloc = XNEW (arelent);
7339
7340 reloc->sym_ptr_ptr = XNEW (asymbol *);
7341 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7342 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7343
7344 if (fixp->fx_pcrel)
7345 {
7346 if (section->use_rela_p)
7347 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7348 else
7349 fixp->fx_offset = reloc->address;
7350 }
7351 reloc->addend = fixp->fx_offset;
7352
7353 code = fixp->fx_r_type;
7354 switch (code)
7355 {
7356 case BFD_RELOC_16:
7357 if (fixp->fx_pcrel)
7358 code = BFD_RELOC_16_PCREL;
7359 break;
7360
7361 case BFD_RELOC_32:
7362 if (fixp->fx_pcrel)
7363 code = BFD_RELOC_32_PCREL;
7364 break;
7365
7366 case BFD_RELOC_64:
7367 if (fixp->fx_pcrel)
7368 code = BFD_RELOC_64_PCREL;
7369 break;
7370
7371 default:
7372 break;
7373 }
7374
7375 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7376 if (reloc->howto == NULL)
7377 {
7378 as_bad_where (fixp->fx_file, fixp->fx_line,
7379 _
7380 ("cannot represent %s relocation in this object file format"),
7381 bfd_get_reloc_code_name (code));
7382 return NULL;
7383 }
7384
7385 return reloc;
7386 }
7387
7388 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7389
7390 void
7391 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7392 {
7393 bfd_reloc_code_real_type type;
7394 int pcrel = 0;
7395
7396 /* Pick a reloc.
7397 FIXME: @@ Should look at CPU word size. */
7398 switch (size)
7399 {
7400 case 1:
7401 type = BFD_RELOC_8;
7402 break;
7403 case 2:
7404 type = BFD_RELOC_16;
7405 break;
7406 case 4:
7407 type = BFD_RELOC_32;
7408 break;
7409 case 8:
7410 type = BFD_RELOC_64;
7411 break;
7412 default:
7413 as_bad (_("cannot do %u-byte relocation"), size);
7414 type = BFD_RELOC_UNUSED;
7415 break;
7416 }
7417
7418 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7419 }
7420
7421 int
7422 aarch64_force_relocation (struct fix *fixp)
7423 {
7424 switch (fixp->fx_r_type)
7425 {
7426 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7427 /* Perform these "immediate" internal relocations
7428 even if the symbol is extern or weak. */
7429 return 0;
7430
7431 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7432 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7433 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7434 /* Pseudo relocs that need to be fixed up according to
7435 ilp32_p. */
7436 return 0;
7437
7438 case BFD_RELOC_AARCH64_ADD_LO12:
7439 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7440 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7441 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7442 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7443 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7444 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7445 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7446 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7447 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7448 case BFD_RELOC_AARCH64_LDST128_LO12:
7449 case BFD_RELOC_AARCH64_LDST16_LO12:
7450 case BFD_RELOC_AARCH64_LDST32_LO12:
7451 case BFD_RELOC_AARCH64_LDST64_LO12:
7452 case BFD_RELOC_AARCH64_LDST8_LO12:
7453 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7454 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7455 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7456 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7457 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7458 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7459 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7460 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7461 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7462 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7463 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7464 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7465 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7466 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7467 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7468 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7469 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7470 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7471 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7472 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7473 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7474 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7475 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7476 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7477 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7478 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7479 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7480 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7481 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7482 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7483 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7484 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7485 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7486 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7487 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7488 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7489 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7490 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7491 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7492 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7493 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7494 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7495 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7496 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7497 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7498 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7499 /* Always leave these relocations for the linker. */
7500 return 1;
7501
7502 default:
7503 break;
7504 }
7505
7506 return generic_force_reloc (fixp);
7507 }
7508
7509 #ifdef OBJ_ELF
7510
7511 const char *
7512 elf64_aarch64_target_format (void)
7513 {
7514 if (strcmp (TARGET_OS, "cloudabi") == 0)
7515 {
7516 /* FIXME: What to do for ilp32_p ? */
7517 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
7518 }
7519 if (target_big_endian)
7520 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7521 else
7522 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7523 }
7524
7525 void
7526 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7527 {
7528 elf_frob_symbol (symp, puntp);
7529 }
7530 #endif
7531
7532 /* MD interface: Finalization. */
7533
7534 /* A good place to do this, although this was probably not intended
7535 for this kind of use. We need to dump the literal pool before
7536 references are made to a null symbol pointer. */
7537
7538 void
7539 aarch64_cleanup (void)
7540 {
7541 literal_pool *pool;
7542
7543 for (pool = list_of_pools; pool; pool = pool->next)
7544 {
7545 /* Put it at the end of the relevant section. */
7546 subseg_set (pool->section, pool->sub_section);
7547 s_ltorg (0);
7548 }
7549 }
7550
7551 #ifdef OBJ_ELF
7552 /* Remove any excess mapping symbols generated for alignment frags in
7553 SEC. We may have created a mapping symbol before a zero byte
7554 alignment; remove it if there's a mapping symbol after the
7555 alignment. */
7556 static void
7557 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7558 void *dummy ATTRIBUTE_UNUSED)
7559 {
7560 segment_info_type *seginfo = seg_info (sec);
7561 fragS *fragp;
7562
7563 if (seginfo == NULL || seginfo->frchainP == NULL)
7564 return;
7565
7566 for (fragp = seginfo->frchainP->frch_root;
7567 fragp != NULL; fragp = fragp->fr_next)
7568 {
7569 symbolS *sym = fragp->tc_frag_data.last_map;
7570 fragS *next = fragp->fr_next;
7571
7572 /* Variable-sized frags have been converted to fixed size by
7573 this point. But if this was variable-sized to start with,
7574 there will be a fixed-size frag after it. So don't handle
7575 next == NULL. */
7576 if (sym == NULL || next == NULL)
7577 continue;
7578
7579 if (S_GET_VALUE (sym) < next->fr_address)
7580 /* Not at the end of this frag. */
7581 continue;
7582 know (S_GET_VALUE (sym) == next->fr_address);
7583
7584 do
7585 {
7586 if (next->tc_frag_data.first_map != NULL)
7587 {
7588 /* Next frag starts with a mapping symbol. Discard this
7589 one. */
7590 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7591 break;
7592 }
7593
7594 if (next->fr_next == NULL)
7595 {
7596 /* This mapping symbol is at the end of the section. Discard
7597 it. */
7598 know (next->fr_fix == 0 && next->fr_var == 0);
7599 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7600 break;
7601 }
7602
7603 /* As long as we have empty frags without any mapping symbols,
7604 keep looking. */
7605 /* If the next frag is non-empty and does not start with a
7606 mapping symbol, then this mapping symbol is required. */
7607 if (next->fr_address != next->fr_next->fr_address)
7608 break;
7609
7610 next = next->fr_next;
7611 }
7612 while (next != NULL);
7613 }
7614 }
7615 #endif
7616
7617 /* Adjust the symbol table. */
7618
7619 void
7620 aarch64_adjust_symtab (void)
7621 {
7622 #ifdef OBJ_ELF
7623 /* Remove any overlapping mapping symbols generated by alignment frags. */
7624 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7625 /* Now do generic ELF adjustments. */
7626 elf_adjust_symtab ();
7627 #endif
7628 }
7629
7630 static void
7631 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7632 {
7633 const char *hash_err;
7634
7635 hash_err = hash_insert (table, key, value);
7636 if (hash_err)
7637 printf ("Internal Error: Can't hash %s\n", key);
7638 }
7639
7640 static void
7641 fill_instruction_hash_table (void)
7642 {
7643 aarch64_opcode *opcode = aarch64_opcode_table;
7644
7645 while (opcode->name != NULL)
7646 {
7647 templates *templ, *new_templ;
7648 templ = hash_find (aarch64_ops_hsh, opcode->name);
7649
7650 new_templ = XNEW (templates);
7651 new_templ->opcode = opcode;
7652 new_templ->next = NULL;
7653
7654 if (!templ)
7655 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7656 else
7657 {
7658 new_templ->next = templ->next;
7659 templ->next = new_templ;
7660 }
7661 ++opcode;
7662 }
7663 }
7664
7665 static inline void
7666 convert_to_upper (char *dst, const char *src, size_t num)
7667 {
7668 unsigned int i;
7669 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7670 *dst = TOUPPER (*src);
7671 *dst = '\0';
7672 }
7673
7674 /* Assume STR point to a lower-case string, allocate, convert and return
7675 the corresponding upper-case string. */
7676 static inline const char*
7677 get_upper_str (const char *str)
7678 {
7679 char *ret;
7680 size_t len = strlen (str);
7681 ret = XNEWVEC (char, len + 1);
7682 convert_to_upper (ret, str, len);
7683 return ret;
7684 }
7685
7686 /* MD interface: Initialization. */
7687
7688 void
7689 md_begin (void)
7690 {
7691 unsigned mach;
7692 unsigned int i;
7693
7694 if ((aarch64_ops_hsh = hash_new ()) == NULL
7695 || (aarch64_cond_hsh = hash_new ()) == NULL
7696 || (aarch64_shift_hsh = hash_new ()) == NULL
7697 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7698 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7699 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7700 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7701 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7702 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7703 || (aarch64_reg_hsh = hash_new ()) == NULL
7704 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7705 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7706 || (aarch64_pldop_hsh = hash_new ()) == NULL
7707 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
7708 as_fatal (_("virtual memory exhausted"));
7709
7710 fill_instruction_hash_table ();
7711
7712 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7713 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7714 (void *) (aarch64_sys_regs + i));
7715
7716 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7717 checked_hash_insert (aarch64_pstatefield_hsh,
7718 aarch64_pstatefields[i].name,
7719 (void *) (aarch64_pstatefields + i));
7720
7721 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
7722 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7723 aarch64_sys_regs_ic[i].name,
7724 (void *) (aarch64_sys_regs_ic + i));
7725
7726 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
7727 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7728 aarch64_sys_regs_dc[i].name,
7729 (void *) (aarch64_sys_regs_dc + i));
7730
7731 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
7732 checked_hash_insert (aarch64_sys_regs_at_hsh,
7733 aarch64_sys_regs_at[i].name,
7734 (void *) (aarch64_sys_regs_at + i));
7735
7736 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
7737 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7738 aarch64_sys_regs_tlbi[i].name,
7739 (void *) (aarch64_sys_regs_tlbi + i));
7740
7741 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7742 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7743 (void *) (reg_names + i));
7744
7745 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7746 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7747 (void *) (nzcv_names + i));
7748
7749 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7750 {
7751 const char *name = aarch64_operand_modifiers[i].name;
7752 checked_hash_insert (aarch64_shift_hsh, name,
7753 (void *) (aarch64_operand_modifiers + i));
7754 /* Also hash the name in the upper case. */
7755 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7756 (void *) (aarch64_operand_modifiers + i));
7757 }
7758
7759 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7760 {
7761 unsigned int j;
7762 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7763 the same condition code. */
7764 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7765 {
7766 const char *name = aarch64_conds[i].names[j];
7767 if (name == NULL)
7768 break;
7769 checked_hash_insert (aarch64_cond_hsh, name,
7770 (void *) (aarch64_conds + i));
7771 /* Also hash the name in the upper case. */
7772 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7773 (void *) (aarch64_conds + i));
7774 }
7775 }
7776
7777 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7778 {
7779 const char *name = aarch64_barrier_options[i].name;
7780 /* Skip xx00 - the unallocated values of option. */
7781 if ((i & 0x3) == 0)
7782 continue;
7783 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7784 (void *) (aarch64_barrier_options + i));
7785 /* Also hash the name in the upper case. */
7786 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7787 (void *) (aarch64_barrier_options + i));
7788 }
7789
7790 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7791 {
7792 const char* name = aarch64_prfops[i].name;
7793 /* Skip the unallocated hint encodings. */
7794 if (name == NULL)
7795 continue;
7796 checked_hash_insert (aarch64_pldop_hsh, name,
7797 (void *) (aarch64_prfops + i));
7798 /* Also hash the name in the upper case. */
7799 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7800 (void *) (aarch64_prfops + i));
7801 }
7802
7803 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
7804 {
7805 const char* name = aarch64_hint_options[i].name;
7806
7807 checked_hash_insert (aarch64_hint_opt_hsh, name,
7808 (void *) (aarch64_hint_options + i));
7809 /* Also hash the name in the upper case. */
7810 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7811 (void *) (aarch64_hint_options + i));
7812 }
7813
7814 /* Set the cpu variant based on the command-line options. */
7815 if (!mcpu_cpu_opt)
7816 mcpu_cpu_opt = march_cpu_opt;
7817
7818 if (!mcpu_cpu_opt)
7819 mcpu_cpu_opt = &cpu_default;
7820
7821 cpu_variant = *mcpu_cpu_opt;
7822
7823 /* Record the CPU type. */
7824 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7825
7826 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7827 }
7828
7829 /* Command line processing. */
7830
7831 const char *md_shortopts = "m:";
7832
7833 #ifdef AARCH64_BI_ENDIAN
7834 #define OPTION_EB (OPTION_MD_BASE + 0)
7835 #define OPTION_EL (OPTION_MD_BASE + 1)
7836 #else
7837 #if TARGET_BYTES_BIG_ENDIAN
7838 #define OPTION_EB (OPTION_MD_BASE + 0)
7839 #else
7840 #define OPTION_EL (OPTION_MD_BASE + 1)
7841 #endif
7842 #endif
7843
7844 struct option md_longopts[] = {
7845 #ifdef OPTION_EB
7846 {"EB", no_argument, NULL, OPTION_EB},
7847 #endif
7848 #ifdef OPTION_EL
7849 {"EL", no_argument, NULL, OPTION_EL},
7850 #endif
7851 {NULL, no_argument, NULL, 0}
7852 };
7853
7854 size_t md_longopts_size = sizeof (md_longopts);
7855
7856 struct aarch64_option_table
7857 {
7858 const char *option; /* Option name to match. */
7859 const char *help; /* Help information. */
7860 int *var; /* Variable to change. */
7861 int value; /* What to change it to. */
7862 char *deprecated; /* If non-null, print this message. */
7863 };
7864
7865 static struct aarch64_option_table aarch64_opts[] = {
7866 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7867 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7868 NULL},
7869 #ifdef DEBUG_AARCH64
7870 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7871 #endif /* DEBUG_AARCH64 */
7872 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7873 NULL},
7874 {"mno-verbose-error", N_("do not output verbose error messages"),
7875 &verbose_error_p, 0, NULL},
7876 {NULL, NULL, NULL, 0, NULL}
7877 };
7878
7879 struct aarch64_cpu_option_table
7880 {
7881 const char *name;
7882 const aarch64_feature_set value;
7883 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7884 case. */
7885 const char *canonical_name;
7886 };
7887
7888 /* This list should, at a minimum, contain all the cpu names
7889 recognized by GCC. */
7890 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7891 {"all", AARCH64_ANY, NULL},
7892 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
7893 AARCH64_FEATURE_CRC), "Cortex-A35"},
7894 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7895 AARCH64_FEATURE_CRC), "Cortex-A53"},
7896 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7897 AARCH64_FEATURE_CRC), "Cortex-A57"},
7898 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7899 AARCH64_FEATURE_CRC), "Cortex-A72"},
7900 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
7901 AARCH64_FEATURE_CRC), "Cortex-A73"},
7902 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7903 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7904 "Samsung Exynos M1"},
7905 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7906 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7907 "Qualcomm QDF24XX"},
7908 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7909 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7910 "Cavium ThunderX"},
7911 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
7912 AARCH64_FEATURE_CRYPTO),
7913 "Broadcom Vulcan"},
7914 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7915 in earlier releases and is superseded by 'xgene1' in all
7916 tools. */
7917 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7918 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7919 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7920 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7921 {"generic", AARCH64_ARCH_V8, NULL},
7922
7923 {NULL, AARCH64_ARCH_NONE, NULL}
7924 };
7925
7926 struct aarch64_arch_option_table
7927 {
7928 const char *name;
7929 const aarch64_feature_set value;
7930 };
7931
7932 /* This list should, at a minimum, contain all the architecture names
7933 recognized by GCC. */
7934 static const struct aarch64_arch_option_table aarch64_archs[] = {
7935 {"all", AARCH64_ANY},
7936 {"armv8-a", AARCH64_ARCH_V8},
7937 {"armv8.1-a", AARCH64_ARCH_V8_1},
7938 {"armv8.2-a", AARCH64_ARCH_V8_2},
7939 {NULL, AARCH64_ARCH_NONE}
7940 };
7941
7942 /* ISA extensions. */
7943 struct aarch64_option_cpu_value_table
7944 {
7945 const char *name;
7946 const aarch64_feature_set value;
7947 const aarch64_feature_set require; /* Feature dependencies. */
7948 };
7949
7950 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7951 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
7952 AARCH64_ARCH_NONE},
7953 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
7954 AARCH64_ARCH_NONE},
7955 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
7956 AARCH64_ARCH_NONE},
7957 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
7958 AARCH64_ARCH_NONE},
7959 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
7960 AARCH64_ARCH_NONE},
7961 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
7962 AARCH64_ARCH_NONE},
7963 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
7964 AARCH64_ARCH_NONE},
7965 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
7966 AARCH64_ARCH_NONE},
7967 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
7968 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7969 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
7970 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7971 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
7972 AARCH64_ARCH_NONE},
7973 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
7974 };
7975
7976 struct aarch64_long_option_table
7977 {
7978 const char *option; /* Substring to match. */
7979 const char *help; /* Help information. */
7980 int (*func) (const char *subopt); /* Function to decode sub-option. */
7981 char *deprecated; /* If non-null, print this message. */
7982 };
7983
7984 /* Transitive closure of features depending on set. */
7985 static aarch64_feature_set
7986 aarch64_feature_disable_set (aarch64_feature_set set)
7987 {
7988 const struct aarch64_option_cpu_value_table *opt;
7989 aarch64_feature_set prev = 0;
7990
7991 while (prev != set) {
7992 prev = set;
7993 for (opt = aarch64_features; opt->name != NULL; opt++)
7994 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
7995 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
7996 }
7997 return set;
7998 }
7999
8000 /* Transitive closure of dependencies of set. */
8001 static aarch64_feature_set
8002 aarch64_feature_enable_set (aarch64_feature_set set)
8003 {
8004 const struct aarch64_option_cpu_value_table *opt;
8005 aarch64_feature_set prev = 0;
8006
8007 while (prev != set) {
8008 prev = set;
8009 for (opt = aarch64_features; opt->name != NULL; opt++)
8010 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8011 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8012 }
8013 return set;
8014 }
8015
8016 static int
8017 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8018 bfd_boolean ext_only)
8019 {
8020 /* We insist on extensions being added before being removed. We achieve
8021 this by using the ADDING_VALUE variable to indicate whether we are
8022 adding an extension (1) or removing it (0) and only allowing it to
8023 change in the order -1 -> 1 -> 0. */
8024 int adding_value = -1;
8025 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8026
8027 /* Copy the feature set, so that we can modify it. */
8028 *ext_set = **opt_p;
8029 *opt_p = ext_set;
8030
8031 while (str != NULL && *str != 0)
8032 {
8033 const struct aarch64_option_cpu_value_table *opt;
8034 const char *ext = NULL;
8035 int optlen;
8036
8037 if (!ext_only)
8038 {
8039 if (*str != '+')
8040 {
8041 as_bad (_("invalid architectural extension"));
8042 return 0;
8043 }
8044
8045 ext = strchr (++str, '+');
8046 }
8047
8048 if (ext != NULL)
8049 optlen = ext - str;
8050 else
8051 optlen = strlen (str);
8052
8053 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8054 {
8055 if (adding_value != 0)
8056 adding_value = 0;
8057 optlen -= 2;
8058 str += 2;
8059 }
8060 else if (optlen > 0)
8061 {
8062 if (adding_value == -1)
8063 adding_value = 1;
8064 else if (adding_value != 1)
8065 {
8066 as_bad (_("must specify extensions to add before specifying "
8067 "those to remove"));
8068 return FALSE;
8069 }
8070 }
8071
8072 if (optlen == 0)
8073 {
8074 as_bad (_("missing architectural extension"));
8075 return 0;
8076 }
8077
8078 gas_assert (adding_value != -1);
8079
8080 for (opt = aarch64_features; opt->name != NULL; opt++)
8081 if (strncmp (opt->name, str, optlen) == 0)
8082 {
8083 aarch64_feature_set set;
8084
8085 /* Add or remove the extension. */
8086 if (adding_value)
8087 {
8088 set = aarch64_feature_enable_set (opt->value);
8089 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8090 }
8091 else
8092 {
8093 set = aarch64_feature_disable_set (opt->value);
8094 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8095 }
8096 break;
8097 }
8098
8099 if (opt->name == NULL)
8100 {
8101 as_bad (_("unknown architectural extension `%s'"), str);
8102 return 0;
8103 }
8104
8105 str = ext;
8106 };
8107
8108 return 1;
8109 }
8110
8111 static int
8112 aarch64_parse_cpu (const char *str)
8113 {
8114 const struct aarch64_cpu_option_table *opt;
8115 const char *ext = strchr (str, '+');
8116 size_t optlen;
8117
8118 if (ext != NULL)
8119 optlen = ext - str;
8120 else
8121 optlen = strlen (str);
8122
8123 if (optlen == 0)
8124 {
8125 as_bad (_("missing cpu name `%s'"), str);
8126 return 0;
8127 }
8128
8129 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8130 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8131 {
8132 mcpu_cpu_opt = &opt->value;
8133 if (ext != NULL)
8134 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8135
8136 return 1;
8137 }
8138
8139 as_bad (_("unknown cpu `%s'"), str);
8140 return 0;
8141 }
8142
8143 static int
8144 aarch64_parse_arch (const char *str)
8145 {
8146 const struct aarch64_arch_option_table *opt;
8147 const char *ext = strchr (str, '+');
8148 size_t optlen;
8149
8150 if (ext != NULL)
8151 optlen = ext - str;
8152 else
8153 optlen = strlen (str);
8154
8155 if (optlen == 0)
8156 {
8157 as_bad (_("missing architecture name `%s'"), str);
8158 return 0;
8159 }
8160
8161 for (opt = aarch64_archs; opt->name != NULL; opt++)
8162 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8163 {
8164 march_cpu_opt = &opt->value;
8165 if (ext != NULL)
8166 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8167
8168 return 1;
8169 }
8170
8171 as_bad (_("unknown architecture `%s'\n"), str);
8172 return 0;
8173 }
8174
8175 /* ABIs. */
8176 struct aarch64_option_abi_value_table
8177 {
8178 const char *name;
8179 enum aarch64_abi_type value;
8180 };
8181
8182 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8183 {"ilp32", AARCH64_ABI_ILP32},
8184 {"lp64", AARCH64_ABI_LP64},
8185 };
8186
8187 static int
8188 aarch64_parse_abi (const char *str)
8189 {
8190 unsigned int i;
8191
8192 if (str[0] == '\0')
8193 {
8194 as_bad (_("missing abi name `%s'"), str);
8195 return 0;
8196 }
8197
8198 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8199 if (strcmp (str, aarch64_abis[i].name) == 0)
8200 {
8201 aarch64_abi = aarch64_abis[i].value;
8202 return 1;
8203 }
8204
8205 as_bad (_("unknown abi `%s'\n"), str);
8206 return 0;
8207 }
8208
8209 static struct aarch64_long_option_table aarch64_long_opts[] = {
8210 #ifdef OBJ_ELF
8211 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8212 aarch64_parse_abi, NULL},
8213 #endif /* OBJ_ELF */
8214 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8215 aarch64_parse_cpu, NULL},
8216 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8217 aarch64_parse_arch, NULL},
8218 {NULL, NULL, 0, NULL}
8219 };
8220
8221 int
8222 md_parse_option (int c, const char *arg)
8223 {
8224 struct aarch64_option_table *opt;
8225 struct aarch64_long_option_table *lopt;
8226
8227 switch (c)
8228 {
8229 #ifdef OPTION_EB
8230 case OPTION_EB:
8231 target_big_endian = 1;
8232 break;
8233 #endif
8234
8235 #ifdef OPTION_EL
8236 case OPTION_EL:
8237 target_big_endian = 0;
8238 break;
8239 #endif
8240
8241 case 'a':
8242 /* Listing option. Just ignore these, we don't support additional
8243 ones. */
8244 return 0;
8245
8246 default:
8247 for (opt = aarch64_opts; opt->option != NULL; opt++)
8248 {
8249 if (c == opt->option[0]
8250 && ((arg == NULL && opt->option[1] == 0)
8251 || streq (arg, opt->option + 1)))
8252 {
8253 /* If the option is deprecated, tell the user. */
8254 if (opt->deprecated != NULL)
8255 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8256 arg ? arg : "", _(opt->deprecated));
8257
8258 if (opt->var != NULL)
8259 *opt->var = opt->value;
8260
8261 return 1;
8262 }
8263 }
8264
8265 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8266 {
8267 /* These options are expected to have an argument. */
8268 if (c == lopt->option[0]
8269 && arg != NULL
8270 && strncmp (arg, lopt->option + 1,
8271 strlen (lopt->option + 1)) == 0)
8272 {
8273 /* If the option is deprecated, tell the user. */
8274 if (lopt->deprecated != NULL)
8275 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8276 _(lopt->deprecated));
8277
8278 /* Call the sup-option parser. */
8279 return lopt->func (arg + strlen (lopt->option) - 1);
8280 }
8281 }
8282
8283 return 0;
8284 }
8285
8286 return 1;
8287 }
8288
8289 void
8290 md_show_usage (FILE * fp)
8291 {
8292 struct aarch64_option_table *opt;
8293 struct aarch64_long_option_table *lopt;
8294
8295 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8296
8297 for (opt = aarch64_opts; opt->option != NULL; opt++)
8298 if (opt->help != NULL)
8299 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8300
8301 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8302 if (lopt->help != NULL)
8303 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8304
8305 #ifdef OPTION_EB
8306 fprintf (fp, _("\
8307 -EB assemble code for a big-endian cpu\n"));
8308 #endif
8309
8310 #ifdef OPTION_EL
8311 fprintf (fp, _("\
8312 -EL assemble code for a little-endian cpu\n"));
8313 #endif
8314 }
8315
8316 /* Parse a .cpu directive. */
8317
8318 static void
8319 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8320 {
8321 const struct aarch64_cpu_option_table *opt;
8322 char saved_char;
8323 char *name;
8324 char *ext;
8325 size_t optlen;
8326
8327 name = input_line_pointer;
8328 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8329 input_line_pointer++;
8330 saved_char = *input_line_pointer;
8331 *input_line_pointer = 0;
8332
8333 ext = strchr (name, '+');
8334
8335 if (ext != NULL)
8336 optlen = ext - name;
8337 else
8338 optlen = strlen (name);
8339
8340 /* Skip the first "all" entry. */
8341 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8342 if (strlen (opt->name) == optlen
8343 && strncmp (name, opt->name, optlen) == 0)
8344 {
8345 mcpu_cpu_opt = &opt->value;
8346 if (ext != NULL)
8347 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8348 return;
8349
8350 cpu_variant = *mcpu_cpu_opt;
8351
8352 *input_line_pointer = saved_char;
8353 demand_empty_rest_of_line ();
8354 return;
8355 }
8356 as_bad (_("unknown cpu `%s'"), name);
8357 *input_line_pointer = saved_char;
8358 ignore_rest_of_line ();
8359 }
8360
8361
8362 /* Parse a .arch directive. */
8363
8364 static void
8365 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8366 {
8367 const struct aarch64_arch_option_table *opt;
8368 char saved_char;
8369 char *name;
8370 char *ext;
8371 size_t optlen;
8372
8373 name = input_line_pointer;
8374 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8375 input_line_pointer++;
8376 saved_char = *input_line_pointer;
8377 *input_line_pointer = 0;
8378
8379 ext = strchr (name, '+');
8380
8381 if (ext != NULL)
8382 optlen = ext - name;
8383 else
8384 optlen = strlen (name);
8385
8386 /* Skip the first "all" entry. */
8387 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8388 if (strlen (opt->name) == optlen
8389 && strncmp (name, opt->name, optlen) == 0)
8390 {
8391 mcpu_cpu_opt = &opt->value;
8392 if (ext != NULL)
8393 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8394 return;
8395
8396 cpu_variant = *mcpu_cpu_opt;
8397
8398 *input_line_pointer = saved_char;
8399 demand_empty_rest_of_line ();
8400 return;
8401 }
8402
8403 as_bad (_("unknown architecture `%s'\n"), name);
8404 *input_line_pointer = saved_char;
8405 ignore_rest_of_line ();
8406 }
8407
8408 /* Parse a .arch_extension directive. */
8409
8410 static void
8411 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8412 {
8413 char saved_char;
8414 char *ext = input_line_pointer;;
8415
8416 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8417 input_line_pointer++;
8418 saved_char = *input_line_pointer;
8419 *input_line_pointer = 0;
8420
8421 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8422 return;
8423
8424 cpu_variant = *mcpu_cpu_opt;
8425
8426 *input_line_pointer = saved_char;
8427 demand_empty_rest_of_line ();
8428 }
8429
8430 /* Copy symbol information. */
8431
8432 void
8433 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8434 {
8435 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8436 }
This page took 0.200749 seconds and 5 git commands to generate.