[GAS][AARCH64]Add BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC support.
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Structure for a hash table entry for a register. */
251 typedef struct
252 {
253 const char *name;
254 unsigned char number;
255 unsigned char type;
256 unsigned char builtin;
257 } reg_entry;
258
259 /* Macros to define the register types and masks for the purpose
260 of parsing. */
261
262 #undef AARCH64_REG_TYPES
263 #define AARCH64_REG_TYPES \
264 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
265 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
266 BASIC_REG_TYPE(SP_32) /* wsp */ \
267 BASIC_REG_TYPE(SP_64) /* sp */ \
268 BASIC_REG_TYPE(Z_32) /* wzr */ \
269 BASIC_REG_TYPE(Z_64) /* xzr */ \
270 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
271 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
272 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
273 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
274 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
275 BASIC_REG_TYPE(CN) /* c[0-7] */ \
276 BASIC_REG_TYPE(VN) /* v[0-31] */ \
277 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
278 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
279 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
280 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
281 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
283 /* Typecheck: any [BHSDQ]P FP. */ \
284 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
285 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
286 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
287 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
289 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
290 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
291 /* Any integer register; used for error messages only. */ \
292 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Pseudo type to mark the end of the enumerator sequence. */ \
296 BASIC_REG_TYPE(MAX)
297
298 #undef BASIC_REG_TYPE
299 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
300 #undef MULTI_REG_TYPE
301 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
302
303 /* Register type enumerators. */
304 typedef enum
305 {
306 /* A list of REG_TYPE_*. */
307 AARCH64_REG_TYPES
308 } aarch64_reg_type;
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
312 #undef REG_TYPE
313 #define REG_TYPE(T) (1 << REG_TYPE_##T)
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) V,
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409
410 /* Stuff needed to resolve the label ambiguity
411 As:
412 ...
413 label: <insn>
414 may differ from:
415 ...
416 label:
417 <insn> */
418
419 static symbolS *last_label_seen;
420
421 /* Literal pool structure. Held on a per-section
422 and per-sub-section basis. */
423
424 #define MAX_LITERAL_POOL_SIZE 1024
425 typedef struct literal_expression
426 {
427 expressionS exp;
428 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
429 LITTLENUM_TYPE * bignum;
430 } literal_expression;
431
432 typedef struct literal_pool
433 {
434 literal_expression literals[MAX_LITERAL_POOL_SIZE];
435 unsigned int next_free_entry;
436 unsigned int id;
437 symbolS *symbol;
438 segT section;
439 subsegT sub_section;
440 int size;
441 struct literal_pool *next;
442 } literal_pool;
443
444 /* Pointer to a linked list of literal pools. */
445 static literal_pool *list_of_pools = NULL;
446 \f
447 /* Pure syntax. */
448
449 /* This array holds the chars that always start a comment. If the
450 pre-processor is disabled, these aren't very useful. */
451 const char comment_chars[] = "";
452
453 /* This array holds the chars that only start a comment at the beginning of
454 a line. If the line seems to have the form '# 123 filename'
455 .line and .file directives will appear in the pre-processed output. */
456 /* Note that input_file.c hand checks for '#' at the beginning of the
457 first line of the input file. This is because the compiler outputs
458 #NO_APP at the beginning of its output. */
459 /* Also note that comments like this one will always work. */
460 const char line_comment_chars[] = "#";
461
462 const char line_separator_chars[] = ";";
463
464 /* Chars that can be used to separate mant
465 from exp in floating point numbers. */
466 const char EXP_CHARS[] = "eE";
467
468 /* Chars that mean this number is a floating point constant. */
469 /* As in 0f12.456 */
470 /* or 0d1.2345e12 */
471
472 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
473
474 /* Prefix character that indicates the start of an immediate value. */
475 #define is_immediate_prefix(C) ((C) == '#')
476
477 /* Separator character handling. */
478
479 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
480
481 static inline bfd_boolean
482 skip_past_char (char **str, char c)
483 {
484 if (**str == c)
485 {
486 (*str)++;
487 return TRUE;
488 }
489 else
490 return FALSE;
491 }
492
493 #define skip_past_comma(str) skip_past_char (str, ',')
494
495 /* Arithmetic expressions (possibly involving symbols). */
496
497 static bfd_boolean in_my_get_expression_p = FALSE;
498
499 /* Third argument to my_get_expression. */
500 #define GE_NO_PREFIX 0
501 #define GE_OPT_PREFIX 1
502
503 /* Return TRUE if the string pointed by *STR is successfully parsed
504 as an valid expression; *EP will be filled with the information of
505 such an expression. Otherwise return FALSE. */
506
507 static bfd_boolean
508 my_get_expression (expressionS * ep, char **str, int prefix_mode,
509 int reject_absent)
510 {
511 char *save_in;
512 segT seg;
513 int prefix_present_p = 0;
514
515 switch (prefix_mode)
516 {
517 case GE_NO_PREFIX:
518 break;
519 case GE_OPT_PREFIX:
520 if (is_immediate_prefix (**str))
521 {
522 (*str)++;
523 prefix_present_p = 1;
524 }
525 break;
526 default:
527 abort ();
528 }
529
530 memset (ep, 0, sizeof (expressionS));
531
532 save_in = input_line_pointer;
533 input_line_pointer = *str;
534 in_my_get_expression_p = TRUE;
535 seg = expression (ep);
536 in_my_get_expression_p = FALSE;
537
538 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
539 {
540 /* We found a bad expression in md_operand(). */
541 *str = input_line_pointer;
542 input_line_pointer = save_in;
543 if (prefix_present_p && ! error_p ())
544 set_fatal_syntax_error (_("bad expression"));
545 else
546 set_first_syntax_error (_("bad expression"));
547 return FALSE;
548 }
549
550 #ifdef OBJ_AOUT
551 if (seg != absolute_section
552 && seg != text_section
553 && seg != data_section
554 && seg != bss_section && seg != undefined_section)
555 {
556 set_syntax_error (_("bad segment"));
557 *str = input_line_pointer;
558 input_line_pointer = save_in;
559 return FALSE;
560 }
561 #else
562 (void) seg;
563 #endif
564
565 *str = input_line_pointer;
566 input_line_pointer = save_in;
567 return TRUE;
568 }
569
570 /* Turn a string in input_line_pointer into a floating point constant
571 of type TYPE, and store the appropriate bytes in *LITP. The number
572 of LITTLENUMS emitted is stored in *SIZEP. An error message is
573 returned, or NULL on OK. */
574
575 char *
576 md_atof (int type, char *litP, int *sizeP)
577 {
578 return ieee_md_atof (type, litP, sizeP, target_big_endian);
579 }
580
581 /* We handle all bad expressions here, so that we can report the faulty
582 instruction in the error message. */
583 void
584 md_operand (expressionS * exp)
585 {
586 if (in_my_get_expression_p)
587 exp->X_op = O_illegal;
588 }
589
590 /* Immediate values. */
591
592 /* Errors may be set multiple times during parsing or bit encoding
593 (particularly in the Neon bits), but usually the earliest error which is set
594 will be the most meaningful. Avoid overwriting it with later (cascading)
595 errors by calling this function. */
596
597 static void
598 first_error (const char *error)
599 {
600 if (! error_p ())
601 set_syntax_error (error);
602 }
603
604 /* Similiar to first_error, but this function accepts formatted error
605 message. */
606 static void
607 first_error_fmt (const char *format, ...)
608 {
609 va_list args;
610 enum
611 { size = 100 };
612 /* N.B. this single buffer will not cause error messages for different
613 instructions to pollute each other; this is because at the end of
614 processing of each assembly line, error message if any will be
615 collected by as_bad. */
616 static char buffer[size];
617
618 if (! error_p ())
619 {
620 int ret ATTRIBUTE_UNUSED;
621 va_start (args, format);
622 ret = vsnprintf (buffer, size, format, args);
623 know (ret <= size - 1 && ret >= 0);
624 va_end (args);
625 set_syntax_error (buffer);
626 }
627 }
628
629 /* Register parsing. */
630
631 /* Generic register parser which is called by other specialized
632 register parsers.
633 CCP points to what should be the beginning of a register name.
634 If it is indeed a valid register name, advance CCP over it and
635 return the reg_entry structure; otherwise return NULL.
636 It does not issue diagnostics. */
637
638 static reg_entry *
639 parse_reg (char **ccp)
640 {
641 char *start = *ccp;
642 char *p;
643 reg_entry *reg;
644
645 #ifdef REGISTER_PREFIX
646 if (*start != REGISTER_PREFIX)
647 return NULL;
648 start++;
649 #endif
650
651 p = start;
652 if (!ISALPHA (*p) || !is_name_beginner (*p))
653 return NULL;
654
655 do
656 p++;
657 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
658
659 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
660
661 if (!reg)
662 return NULL;
663
664 *ccp = p;
665 return reg;
666 }
667
668 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
669 return FALSE. */
670 static bfd_boolean
671 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
672 {
673 if (reg->type == type)
674 return TRUE;
675
676 switch (type)
677 {
678 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
679 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
680 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
681 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
682 case REG_TYPE_VN: /* Vector register. */
683 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
684 return ((reg_type_masks[reg->type] & reg_type_masks[type])
685 == reg_type_masks[reg->type]);
686 default:
687 as_fatal ("unhandled type %d", type);
688 abort ();
689 }
690 }
691
692 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
693 Return the register number otherwise. *ISREG32 is set to one if the
694 register is 32-bit wide; *ISREGZERO is set to one if the register is
695 of type Z_32 or Z_64.
696 Note that this function does not issue any diagnostics. */
697
698 static int
699 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
700 int *isreg32, int *isregzero)
701 {
702 char *str = *ccp;
703 const reg_entry *reg = parse_reg (&str);
704
705 if (reg == NULL)
706 return PARSE_FAIL;
707
708 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
709 return PARSE_FAIL;
710
711 switch (reg->type)
712 {
713 case REG_TYPE_SP_32:
714 case REG_TYPE_SP_64:
715 if (reject_sp)
716 return PARSE_FAIL;
717 *isreg32 = reg->type == REG_TYPE_SP_32;
718 *isregzero = 0;
719 break;
720 case REG_TYPE_R_32:
721 case REG_TYPE_R_64:
722 *isreg32 = reg->type == REG_TYPE_R_32;
723 *isregzero = 0;
724 break;
725 case REG_TYPE_Z_32:
726 case REG_TYPE_Z_64:
727 if (reject_rz)
728 return PARSE_FAIL;
729 *isreg32 = reg->type == REG_TYPE_Z_32;
730 *isregzero = 1;
731 break;
732 default:
733 return PARSE_FAIL;
734 }
735
736 *ccp = str;
737
738 return reg->number;
739 }
740
741 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
742 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
743 otherwise return FALSE.
744
745 Accept only one occurrence of:
746 8b 16b 4h 8h 2s 4s 1d 2d
747 b h s d q */
748 static bfd_boolean
749 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
750 {
751 char *ptr = *str;
752 unsigned width;
753 unsigned element_size;
754 enum neon_el_type type;
755
756 /* skip '.' */
757 ptr++;
758
759 if (!ISDIGIT (*ptr))
760 {
761 width = 0;
762 goto elt_size;
763 }
764 width = strtoul (ptr, &ptr, 10);
765 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
766 {
767 first_error_fmt (_("bad size %d in vector width specifier"), width);
768 return FALSE;
769 }
770
771 elt_size:
772 switch (TOLOWER (*ptr))
773 {
774 case 'b':
775 type = NT_b;
776 element_size = 8;
777 break;
778 case 'h':
779 type = NT_h;
780 element_size = 16;
781 break;
782 case 's':
783 type = NT_s;
784 element_size = 32;
785 break;
786 case 'd':
787 type = NT_d;
788 element_size = 64;
789 break;
790 case 'q':
791 if (width == 1)
792 {
793 type = NT_q;
794 element_size = 128;
795 break;
796 }
797 /* fall through. */
798 default:
799 if (*ptr != '\0')
800 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
801 else
802 first_error (_("missing element size"));
803 return FALSE;
804 }
805 if (width != 0 && width * element_size != 64 && width * element_size != 128)
806 {
807 first_error_fmt (_
808 ("invalid element size %d and vector size combination %c"),
809 width, *ptr);
810 return FALSE;
811 }
812 ptr++;
813
814 parsed_type->type = type;
815 parsed_type->width = width;
816
817 *str = ptr;
818
819 return TRUE;
820 }
821
822 /* Parse a single type, e.g. ".8b", leading period included.
823 Only applicable to Vn registers.
824
825 Return TRUE on success; otherwise return FALSE. */
826 static bfd_boolean
827 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
828 {
829 char *str = *ccp;
830
831 if (*str == '.')
832 {
833 if (! parse_neon_type_for_operand (vectype, &str))
834 {
835 first_error (_("vector type expected"));
836 return FALSE;
837 }
838 }
839 else
840 return FALSE;
841
842 *ccp = str;
843
844 return TRUE;
845 }
846
847 /* Parse a register of the type TYPE.
848
849 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
850 name or the parsed register is not of TYPE.
851
852 Otherwise return the register number, and optionally fill in the actual
853 type of the register in *RTYPE when multiple alternatives were given, and
854 return the register shape and element index information in *TYPEINFO.
855
856 IN_REG_LIST should be set with TRUE if the caller is parsing a register
857 list. */
858
859 static int
860 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
861 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
862 {
863 char *str = *ccp;
864 const reg_entry *reg = parse_reg (&str);
865 struct neon_type_el atype;
866 struct neon_type_el parsetype;
867 bfd_boolean is_typed_vecreg = FALSE;
868
869 atype.defined = 0;
870 atype.type = NT_invtype;
871 atype.width = -1;
872 atype.index = 0;
873
874 if (reg == NULL)
875 {
876 if (typeinfo)
877 *typeinfo = atype;
878 set_default_error ();
879 return PARSE_FAIL;
880 }
881
882 if (! aarch64_check_reg_type (reg, type))
883 {
884 DEBUG_TRACE ("reg type check failed");
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888 type = reg->type;
889
890 if (type == REG_TYPE_VN
891 && parse_neon_operand_type (&parsetype, &str))
892 {
893 /* Register if of the form Vn.[bhsdq]. */
894 is_typed_vecreg = TRUE;
895
896 if (parsetype.width == 0)
897 /* Expect index. In the new scheme we cannot have
898 Vn.[bhsdq] represent a scalar. Therefore any
899 Vn.[bhsdq] should have an index following it.
900 Except in reglists ofcourse. */
901 atype.defined |= NTA_HASINDEX;
902 else
903 atype.defined |= NTA_HASTYPE;
904
905 atype.type = parsetype.type;
906 atype.width = parsetype.width;
907 }
908
909 if (skip_past_char (&str, '['))
910 {
911 expressionS exp;
912
913 /* Reject Sn[index] syntax. */
914 if (!is_typed_vecreg)
915 {
916 first_error (_("this type of register can't be indexed"));
917 return PARSE_FAIL;
918 }
919
920 if (in_reg_list == TRUE)
921 {
922 first_error (_("index not allowed inside register list"));
923 return PARSE_FAIL;
924 }
925
926 atype.defined |= NTA_HASINDEX;
927
928 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
929
930 if (exp.X_op != O_constant)
931 {
932 first_error (_("constant expression required"));
933 return PARSE_FAIL;
934 }
935
936 if (! skip_past_char (&str, ']'))
937 return PARSE_FAIL;
938
939 atype.index = exp.X_add_number;
940 }
941 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
942 {
943 /* Indexed vector register expected. */
944 first_error (_("indexed vector register expected"));
945 return PARSE_FAIL;
946 }
947
948 /* A vector reg Vn should be typed or indexed. */
949 if (type == REG_TYPE_VN && atype.defined == 0)
950 {
951 first_error (_("invalid use of vector register"));
952 }
953
954 if (typeinfo)
955 *typeinfo = atype;
956
957 if (rtype)
958 *rtype = type;
959
960 *ccp = str;
961
962 return reg->number;
963 }
964
965 /* Parse register.
966
967 Return the register number on success; return PARSE_FAIL otherwise.
968
969 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
970 the register (e.g. NEON double or quad reg when either has been requested).
971
972 If this is a NEON vector register with additional type information, fill
973 in the struct pointed to by VECTYPE (if non-NULL).
974
975 This parser does not handle register list. */
976
977 static int
978 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
979 aarch64_reg_type *rtype, struct neon_type_el *vectype)
980 {
981 struct neon_type_el atype;
982 char *str = *ccp;
983 int reg = parse_typed_reg (&str, type, rtype, &atype,
984 /*in_reg_list= */ FALSE);
985
986 if (reg == PARSE_FAIL)
987 return PARSE_FAIL;
988
989 if (vectype)
990 *vectype = atype;
991
992 *ccp = str;
993
994 return reg;
995 }
996
997 static inline bfd_boolean
998 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
999 {
1000 return
1001 e1.type == e2.type
1002 && e1.defined == e2.defined
1003 && e1.width == e2.width && e1.index == e2.index;
1004 }
1005
1006 /* This function parses the NEON register list. On success, it returns
1007 the parsed register list information in the following encoded format:
1008
1009 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1010 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1011
1012 The information of the register shape and/or index is returned in
1013 *VECTYPE.
1014
1015 It returns PARSE_FAIL if the register list is invalid.
1016
1017 The list contains one to four registers.
1018 Each register can be one of:
1019 <Vt>.<T>[<index>]
1020 <Vt>.<T>
1021 All <T> should be identical.
1022 All <index> should be identical.
1023 There are restrictions on <Vt> numbers which are checked later
1024 (by reg_list_valid_p). */
1025
1026 static int
1027 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1028 {
1029 char *str = *ccp;
1030 int nb_regs;
1031 struct neon_type_el typeinfo, typeinfo_first;
1032 int val, val_range;
1033 int in_range;
1034 int ret_val;
1035 int i;
1036 bfd_boolean error = FALSE;
1037 bfd_boolean expect_index = FALSE;
1038
1039 if (*str != '{')
1040 {
1041 set_syntax_error (_("expecting {"));
1042 return PARSE_FAIL;
1043 }
1044 str++;
1045
1046 nb_regs = 0;
1047 typeinfo_first.defined = 0;
1048 typeinfo_first.type = NT_invtype;
1049 typeinfo_first.width = -1;
1050 typeinfo_first.index = 0;
1051 ret_val = 0;
1052 val = -1;
1053 val_range = -1;
1054 in_range = 0;
1055 do
1056 {
1057 if (in_range)
1058 {
1059 str++; /* skip over '-' */
1060 val_range = val;
1061 }
1062 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1063 /*in_reg_list= */ TRUE);
1064 if (val == PARSE_FAIL)
1065 {
1066 set_first_syntax_error (_("invalid vector register in list"));
1067 error = TRUE;
1068 continue;
1069 }
1070 /* reject [bhsd]n */
1071 if (typeinfo.defined == 0)
1072 {
1073 set_first_syntax_error (_("invalid scalar register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077
1078 if (typeinfo.defined & NTA_HASINDEX)
1079 expect_index = TRUE;
1080
1081 if (in_range)
1082 {
1083 if (val < val_range)
1084 {
1085 set_first_syntax_error
1086 (_("invalid range in vector register list"));
1087 error = TRUE;
1088 }
1089 val_range++;
1090 }
1091 else
1092 {
1093 val_range = val;
1094 if (nb_regs == 0)
1095 typeinfo_first = typeinfo;
1096 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1097 {
1098 set_first_syntax_error
1099 (_("type mismatch in vector register list"));
1100 error = TRUE;
1101 }
1102 }
1103 if (! error)
1104 for (i = val_range; i <= val; i++)
1105 {
1106 ret_val |= i << (5 * nb_regs);
1107 nb_regs++;
1108 }
1109 in_range = 0;
1110 }
1111 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1112
1113 skip_whitespace (str);
1114 if (*str != '}')
1115 {
1116 set_first_syntax_error (_("end of vector register list not found"));
1117 error = TRUE;
1118 }
1119 str++;
1120
1121 skip_whitespace (str);
1122
1123 if (expect_index)
1124 {
1125 if (skip_past_char (&str, '['))
1126 {
1127 expressionS exp;
1128
1129 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1130 if (exp.X_op != O_constant)
1131 {
1132 set_first_syntax_error (_("constant expression required."));
1133 error = TRUE;
1134 }
1135 if (! skip_past_char (&str, ']'))
1136 error = TRUE;
1137 else
1138 typeinfo_first.index = exp.X_add_number;
1139 }
1140 else
1141 {
1142 set_first_syntax_error (_("expected index"));
1143 error = TRUE;
1144 }
1145 }
1146
1147 if (nb_regs > 4)
1148 {
1149 set_first_syntax_error (_("too many registers in vector register list"));
1150 error = TRUE;
1151 }
1152 else if (nb_regs == 0)
1153 {
1154 set_first_syntax_error (_("empty vector register list"));
1155 error = TRUE;
1156 }
1157
1158 *ccp = str;
1159 if (! error)
1160 *vectype = typeinfo_first;
1161
1162 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1163 }
1164
1165 /* Directives: register aliases. */
1166
1167 static reg_entry *
1168 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1169 {
1170 reg_entry *new;
1171 const char *name;
1172
1173 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1174 {
1175 if (new->builtin)
1176 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1177 str);
1178
1179 /* Only warn about a redefinition if it's not defined as the
1180 same register. */
1181 else if (new->number != number || new->type != type)
1182 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1183
1184 return NULL;
1185 }
1186
1187 name = xstrdup (str);
1188 new = xmalloc (sizeof (reg_entry));
1189
1190 new->name = name;
1191 new->number = number;
1192 new->type = type;
1193 new->builtin = FALSE;
1194
1195 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1196 abort ();
1197
1198 return new;
1199 }
1200
1201 /* Look for the .req directive. This is of the form:
1202
1203 new_register_name .req existing_register_name
1204
1205 If we find one, or if it looks sufficiently like one that we want to
1206 handle any error here, return TRUE. Otherwise return FALSE. */
1207
1208 static bfd_boolean
1209 create_register_alias (char *newname, char *p)
1210 {
1211 const reg_entry *old;
1212 char *oldname, *nbuf;
1213 size_t nlen;
1214
1215 /* The input scrubber ensures that whitespace after the mnemonic is
1216 collapsed to single spaces. */
1217 oldname = p;
1218 if (strncmp (oldname, " .req ", 6) != 0)
1219 return FALSE;
1220
1221 oldname += 6;
1222 if (*oldname == '\0')
1223 return FALSE;
1224
1225 old = hash_find (aarch64_reg_hsh, oldname);
1226 if (!old)
1227 {
1228 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1229 return TRUE;
1230 }
1231
1232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1233 the desired alias name, and p points to its end. If not, then
1234 the desired alias name is in the global original_case_string. */
1235 #ifdef TC_CASE_SENSITIVE
1236 nlen = p - newname;
1237 #else
1238 newname = original_case_string;
1239 nlen = strlen (newname);
1240 #endif
1241
1242 nbuf = alloca (nlen + 1);
1243 memcpy (nbuf, newname, nlen);
1244 nbuf[nlen] = '\0';
1245
1246 /* Create aliases under the new name as stated; an all-lowercase
1247 version of the new name; and an all-uppercase version of the new
1248 name. */
1249 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1250 {
1251 for (p = nbuf; *p; p++)
1252 *p = TOUPPER (*p);
1253
1254 if (strncmp (nbuf, newname, nlen))
1255 {
1256 /* If this attempt to create an additional alias fails, do not bother
1257 trying to create the all-lower case alias. We will fail and issue
1258 a second, duplicate error message. This situation arises when the
1259 programmer does something like:
1260 foo .req r0
1261 Foo .req r1
1262 The second .req creates the "Foo" alias but then fails to create
1263 the artificial FOO alias because it has already been created by the
1264 first .req. */
1265 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1266 return TRUE;
1267 }
1268
1269 for (p = nbuf; *p; p++)
1270 *p = TOLOWER (*p);
1271
1272 if (strncmp (nbuf, newname, nlen))
1273 insert_reg_alias (nbuf, old->number, old->type);
1274 }
1275
1276 return TRUE;
1277 }
1278
1279 /* Should never be called, as .req goes between the alias and the
1280 register name, not at the beginning of the line. */
1281 static void
1282 s_req (int a ATTRIBUTE_UNUSED)
1283 {
1284 as_bad (_("invalid syntax for .req directive"));
1285 }
1286
1287 /* The .unreq directive deletes an alias which was previously defined
1288 by .req. For example:
1289
1290 my_alias .req r11
1291 .unreq my_alias */
1292
1293 static void
1294 s_unreq (int a ATTRIBUTE_UNUSED)
1295 {
1296 char *name;
1297 char saved_char;
1298
1299 name = input_line_pointer;
1300
1301 while (*input_line_pointer != 0
1302 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1303 ++input_line_pointer;
1304
1305 saved_char = *input_line_pointer;
1306 *input_line_pointer = 0;
1307
1308 if (!*name)
1309 as_bad (_("invalid syntax for .unreq directive"));
1310 else
1311 {
1312 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1313
1314 if (!reg)
1315 as_bad (_("unknown register alias '%s'"), name);
1316 else if (reg->builtin)
1317 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1318 name);
1319 else
1320 {
1321 char *p;
1322 char *nbuf;
1323
1324 hash_delete (aarch64_reg_hsh, name, FALSE);
1325 free ((char *) reg->name);
1326 free (reg);
1327
1328 /* Also locate the all upper case and all lower case versions.
1329 Do not complain if we cannot find one or the other as it
1330 was probably deleted above. */
1331
1332 nbuf = strdup (name);
1333 for (p = nbuf; *p; p++)
1334 *p = TOUPPER (*p);
1335 reg = hash_find (aarch64_reg_hsh, nbuf);
1336 if (reg)
1337 {
1338 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1339 free ((char *) reg->name);
1340 free (reg);
1341 }
1342
1343 for (p = nbuf; *p; p++)
1344 *p = TOLOWER (*p);
1345 reg = hash_find (aarch64_reg_hsh, nbuf);
1346 if (reg)
1347 {
1348 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1349 free ((char *) reg->name);
1350 free (reg);
1351 }
1352
1353 free (nbuf);
1354 }
1355 }
1356
1357 *input_line_pointer = saved_char;
1358 demand_empty_rest_of_line ();
1359 }
1360
1361 /* Directives: Instruction set selection. */
1362
1363 #ifdef OBJ_ELF
1364 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1365 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1366 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1367 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1368
1369 /* Create a new mapping symbol for the transition to STATE. */
1370
1371 static void
1372 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1373 {
1374 symbolS *symbolP;
1375 const char *symname;
1376 int type;
1377
1378 switch (state)
1379 {
1380 case MAP_DATA:
1381 symname = "$d";
1382 type = BSF_NO_FLAGS;
1383 break;
1384 case MAP_INSN:
1385 symname = "$x";
1386 type = BSF_NO_FLAGS;
1387 break;
1388 default:
1389 abort ();
1390 }
1391
1392 symbolP = symbol_new (symname, now_seg, value, frag);
1393 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1394
1395 /* Save the mapping symbols for future reference. Also check that
1396 we do not place two mapping symbols at the same offset within a
1397 frag. We'll handle overlap between frags in
1398 check_mapping_symbols.
1399
1400 If .fill or other data filling directive generates zero sized data,
1401 the mapping symbol for the following code will have the same value
1402 as the one generated for the data filling directive. In this case,
1403 we replace the old symbol with the new one at the same address. */
1404 if (value == 0)
1405 {
1406 if (frag->tc_frag_data.first_map != NULL)
1407 {
1408 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1409 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1410 &symbol_lastP);
1411 }
1412 frag->tc_frag_data.first_map = symbolP;
1413 }
1414 if (frag->tc_frag_data.last_map != NULL)
1415 {
1416 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1417 S_GET_VALUE (symbolP));
1418 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1419 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1420 &symbol_lastP);
1421 }
1422 frag->tc_frag_data.last_map = symbolP;
1423 }
1424
1425 /* We must sometimes convert a region marked as code to data during
1426 code alignment, if an odd number of bytes have to be padded. The
1427 code mapping symbol is pushed to an aligned address. */
1428
1429 static void
1430 insert_data_mapping_symbol (enum mstate state,
1431 valueT value, fragS * frag, offsetT bytes)
1432 {
1433 /* If there was already a mapping symbol, remove it. */
1434 if (frag->tc_frag_data.last_map != NULL
1435 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1436 frag->fr_address + value)
1437 {
1438 symbolS *symp = frag->tc_frag_data.last_map;
1439
1440 if (value == 0)
1441 {
1442 know (frag->tc_frag_data.first_map == symp);
1443 frag->tc_frag_data.first_map = NULL;
1444 }
1445 frag->tc_frag_data.last_map = NULL;
1446 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1447 }
1448
1449 make_mapping_symbol (MAP_DATA, value, frag);
1450 make_mapping_symbol (state, value + bytes, frag);
1451 }
1452
1453 static void mapping_state_2 (enum mstate state, int max_chars);
1454
1455 /* Set the mapping state to STATE. Only call this when about to
1456 emit some STATE bytes to the file. */
1457
1458 void
1459 mapping_state (enum mstate state)
1460 {
1461 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1462
1463 if (state == MAP_INSN)
1464 /* AArch64 instructions require 4-byte alignment. When emitting
1465 instructions into any section, record the appropriate section
1466 alignment. */
1467 record_alignment (now_seg, 2);
1468
1469 if (mapstate == state)
1470 /* The mapping symbol has already been emitted.
1471 There is nothing else to do. */
1472 return;
1473
1474 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1475 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1476 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1477 evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492 #undef TRANSITION
1493
1494 mapping_state_2 (state, 0);
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 expressionS * litexp = & pool->literals[entry].exp;
1621
1622 if ((litexp->X_op == exp->X_op)
1623 && (exp->X_op == O_constant)
1624 && (litexp->X_add_number == exp->X_add_number)
1625 && (litexp->X_unsigned == exp->X_unsigned))
1626 break;
1627
1628 if ((litexp->X_op == exp->X_op)
1629 && (exp->X_op == O_symbol)
1630 && (litexp->X_add_number == exp->X_add_number)
1631 && (litexp->X_add_symbol == exp->X_add_symbol)
1632 && (litexp->X_op_symbol == exp->X_op_symbol))
1633 break;
1634 }
1635
1636 /* Do we need to create a new entry? */
1637 if (entry == pool->next_free_entry)
1638 {
1639 if (entry >= MAX_LITERAL_POOL_SIZE)
1640 {
1641 set_syntax_error (_("literal pool overflow"));
1642 return FALSE;
1643 }
1644
1645 pool->literals[entry].exp = *exp;
1646 pool->next_free_entry += 1;
1647 if (exp->X_op == O_big)
1648 {
1649 /* PR 16688: Bignums are held in a single global array. We must
1650 copy and preserve that value now, before it is overwritten. */
1651 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1652 memcpy (pool->literals[entry].bignum, generic_bignum,
1653 CHARS_PER_LITTLENUM * exp->X_add_number);
1654 }
1655 else
1656 pool->literals[entry].bignum = NULL;
1657 }
1658
1659 exp->X_op = O_symbol;
1660 exp->X_add_number = ((int) entry) * size;
1661 exp->X_add_symbol = pool->symbol;
1662
1663 return TRUE;
1664 }
1665
1666 /* Can't use symbol_new here, so have to create a symbol and then at
1667 a later date assign it a value. Thats what these functions do. */
1668
1669 static void
1670 symbol_locate (symbolS * symbolP,
1671 const char *name,/* It is copied, the caller can modify. */
1672 segT segment, /* Segment identifier (SEG_<something>). */
1673 valueT valu, /* Symbol value. */
1674 fragS * frag) /* Associated fragment. */
1675 {
1676 size_t name_length;
1677 char *preserved_copy_of_name;
1678
1679 name_length = strlen (name) + 1; /* +1 for \0. */
1680 obstack_grow (&notes, name, name_length);
1681 preserved_copy_of_name = obstack_finish (&notes);
1682
1683 #ifdef tc_canonicalize_symbol_name
1684 preserved_copy_of_name =
1685 tc_canonicalize_symbol_name (preserved_copy_of_name);
1686 #endif
1687
1688 S_SET_NAME (symbolP, preserved_copy_of_name);
1689
1690 S_SET_SEGMENT (symbolP, segment);
1691 S_SET_VALUE (symbolP, valu);
1692 symbol_clear_list_pointers (symbolP);
1693
1694 symbol_set_frag (symbolP, frag);
1695
1696 /* Link to end of symbol chain. */
1697 {
1698 extern int symbol_table_frozen;
1699
1700 if (symbol_table_frozen)
1701 abort ();
1702 }
1703
1704 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1705
1706 obj_symbol_new_hook (symbolP);
1707
1708 #ifdef tc_symbol_new_hook
1709 tc_symbol_new_hook (symbolP);
1710 #endif
1711
1712 #ifdef DEBUG_SYMS
1713 verify_symbol_chain (symbol_rootP, symbol_lastP);
1714 #endif /* DEBUG_SYMS */
1715 }
1716
1717
1718 static void
1719 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1720 {
1721 unsigned int entry;
1722 literal_pool *pool;
1723 char sym_name[20];
1724 int align;
1725
1726 for (align = 2; align <= 4; align++)
1727 {
1728 int size = 1 << align;
1729
1730 pool = find_literal_pool (size);
1731 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1732 continue;
1733
1734 mapping_state (MAP_DATA);
1735
1736 /* Align pool as you have word accesses.
1737 Only make a frag if we have to. */
1738 if (!need_pass_2)
1739 frag_align (align, 0, 0);
1740
1741 record_alignment (now_seg, align);
1742
1743 sprintf (sym_name, "$$lit_\002%x", pool->id);
1744
1745 symbol_locate (pool->symbol, sym_name, now_seg,
1746 (valueT) frag_now_fix (), frag_now);
1747 symbol_table_insert (pool->symbol);
1748
1749 for (entry = 0; entry < pool->next_free_entry; entry++)
1750 {
1751 expressionS * exp = & pool->literals[entry].exp;
1752
1753 if (exp->X_op == O_big)
1754 {
1755 /* PR 16688: Restore the global bignum value. */
1756 gas_assert (pool->literals[entry].bignum != NULL);
1757 memcpy (generic_bignum, pool->literals[entry].bignum,
1758 CHARS_PER_LITTLENUM * exp->X_add_number);
1759 }
1760
1761 /* First output the expression in the instruction to the pool. */
1762 emit_expr (exp, size); /* .word|.xword */
1763
1764 if (exp->X_op == O_big)
1765 {
1766 free (pool->literals[entry].bignum);
1767 pool->literals[entry].bignum = NULL;
1768 }
1769 }
1770
1771 /* Mark the pool as empty. */
1772 pool->next_free_entry = 0;
1773 pool->symbol = NULL;
1774 }
1775 }
1776
1777 #ifdef OBJ_ELF
1778 /* Forward declarations for functions below, in the MD interface
1779 section. */
1780 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1781 static struct reloc_table_entry * find_reloc_table_entry (char **);
1782
1783 /* Directives: Data. */
1784 /* N.B. the support for relocation suffix in this directive needs to be
1785 implemented properly. */
1786
1787 static void
1788 s_aarch64_elf_cons (int nbytes)
1789 {
1790 expressionS exp;
1791
1792 #ifdef md_flush_pending_output
1793 md_flush_pending_output ();
1794 #endif
1795
1796 if (is_it_end_of_statement ())
1797 {
1798 demand_empty_rest_of_line ();
1799 return;
1800 }
1801
1802 #ifdef md_cons_align
1803 md_cons_align (nbytes);
1804 #endif
1805
1806 mapping_state (MAP_DATA);
1807 do
1808 {
1809 struct reloc_table_entry *reloc;
1810
1811 expression (&exp);
1812
1813 if (exp.X_op != O_symbol)
1814 emit_expr (&exp, (unsigned int) nbytes);
1815 else
1816 {
1817 skip_past_char (&input_line_pointer, '#');
1818 if (skip_past_char (&input_line_pointer, ':'))
1819 {
1820 reloc = find_reloc_table_entry (&input_line_pointer);
1821 if (reloc == NULL)
1822 as_bad (_("unrecognized relocation suffix"));
1823 else
1824 as_bad (_("unimplemented relocation suffix"));
1825 ignore_rest_of_line ();
1826 return;
1827 }
1828 else
1829 emit_expr (&exp, (unsigned int) nbytes);
1830 }
1831 }
1832 while (*input_line_pointer++ == ',');
1833
1834 /* Put terminator back into stream. */
1835 input_line_pointer--;
1836 demand_empty_rest_of_line ();
1837 }
1838
1839 #endif /* OBJ_ELF */
1840
1841 /* Output a 32-bit word, but mark as an instruction. */
1842
1843 static void
1844 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1845 {
1846 expressionS exp;
1847
1848 #ifdef md_flush_pending_output
1849 md_flush_pending_output ();
1850 #endif
1851
1852 if (is_it_end_of_statement ())
1853 {
1854 demand_empty_rest_of_line ();
1855 return;
1856 }
1857
1858 /* Sections are assumed to start aligned. In executable section, there is no
1859 MAP_DATA symbol pending. So we only align the address during
1860 MAP_DATA --> MAP_INSN transition.
1861 For other sections, this is not guaranteed. */
1862 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1863 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1864 frag_align_code (2, 0);
1865
1866 #ifdef OBJ_ELF
1867 mapping_state (MAP_INSN);
1868 #endif
1869
1870 do
1871 {
1872 expression (&exp);
1873 if (exp.X_op != O_constant)
1874 {
1875 as_bad (_("constant expression required"));
1876 ignore_rest_of_line ();
1877 return;
1878 }
1879
1880 if (target_big_endian)
1881 {
1882 unsigned int val = exp.X_add_number;
1883 exp.X_add_number = SWAP_32 (val);
1884 }
1885 emit_expr (&exp, 4);
1886 }
1887 while (*input_line_pointer++ == ',');
1888
1889 /* Put terminator back into stream. */
1890 input_line_pointer--;
1891 demand_empty_rest_of_line ();
1892 }
1893
1894 #ifdef OBJ_ELF
1895 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1896
1897 static void
1898 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1899 {
1900 expressionS exp;
1901
1902 /* Since we're just labelling the code, there's no need to define a
1903 mapping symbol. */
1904 expression (&exp);
1905 /* Make sure there is enough room in this frag for the following
1906 blr. This trick only works if the blr follows immediately after
1907 the .tlsdesc directive. */
1908 frag_grow (4);
1909 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1910 BFD_RELOC_AARCH64_TLSDESC_CALL);
1911
1912 demand_empty_rest_of_line ();
1913 }
1914 #endif /* OBJ_ELF */
1915
1916 static void s_aarch64_arch (int);
1917 static void s_aarch64_cpu (int);
1918 static void s_aarch64_arch_extension (int);
1919
1920 /* This table describes all the machine specific pseudo-ops the assembler
1921 has to support. The fields are:
1922 pseudo-op name without dot
1923 function to call to execute this pseudo-op
1924 Integer arg to pass to the function. */
1925
1926 const pseudo_typeS md_pseudo_table[] = {
1927 /* Never called because '.req' does not start a line. */
1928 {"req", s_req, 0},
1929 {"unreq", s_unreq, 0},
1930 {"bss", s_bss, 0},
1931 {"even", s_even, 0},
1932 {"ltorg", s_ltorg, 0},
1933 {"pool", s_ltorg, 0},
1934 {"cpu", s_aarch64_cpu, 0},
1935 {"arch", s_aarch64_arch, 0},
1936 {"arch_extension", s_aarch64_arch_extension, 0},
1937 {"inst", s_aarch64_inst, 0},
1938 #ifdef OBJ_ELF
1939 {"tlsdesccall", s_tlsdesccall, 0},
1940 {"word", s_aarch64_elf_cons, 4},
1941 {"long", s_aarch64_elf_cons, 4},
1942 {"xword", s_aarch64_elf_cons, 8},
1943 {"dword", s_aarch64_elf_cons, 8},
1944 #endif
1945 {0, 0, 0}
1946 };
1947 \f
1948
1949 /* Check whether STR points to a register name followed by a comma or the
1950 end of line; REG_TYPE indicates which register types are checked
1951 against. Return TRUE if STR is such a register name; otherwise return
1952 FALSE. The function does not intend to produce any diagnostics, but since
1953 the register parser aarch64_reg_parse, which is called by this function,
1954 does produce diagnostics, we call clear_error to clear any diagnostics
1955 that may be generated by aarch64_reg_parse.
1956 Also, the function returns FALSE directly if there is any user error
1957 present at the function entry. This prevents the existing diagnostics
1958 state from being spoiled.
1959 The function currently serves parse_constant_immediate and
1960 parse_big_immediate only. */
1961 static bfd_boolean
1962 reg_name_p (char *str, aarch64_reg_type reg_type)
1963 {
1964 int reg;
1965
1966 /* Prevent the diagnostics state from being spoiled. */
1967 if (error_p ())
1968 return FALSE;
1969
1970 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1971
1972 /* Clear the parsing error that may be set by the reg parser. */
1973 clear_error ();
1974
1975 if (reg == PARSE_FAIL)
1976 return FALSE;
1977
1978 skip_whitespace (str);
1979 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1980 return TRUE;
1981
1982 return FALSE;
1983 }
1984
1985 /* Parser functions used exclusively in instruction operands. */
1986
1987 /* Parse an immediate expression which may not be constant.
1988
1989 To prevent the expression parser from pushing a register name
1990 into the symbol table as an undefined symbol, firstly a check is
1991 done to find out whether STR is a valid register name followed
1992 by a comma or the end of line. Return FALSE if STR is such a
1993 string. */
1994
1995 static bfd_boolean
1996 parse_immediate_expression (char **str, expressionS *exp)
1997 {
1998 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1999 {
2000 set_recoverable_error (_("immediate operand required"));
2001 return FALSE;
2002 }
2003
2004 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2005
2006 if (exp->X_op == O_absent)
2007 {
2008 set_fatal_syntax_error (_("missing immediate expression"));
2009 return FALSE;
2010 }
2011
2012 return TRUE;
2013 }
2014
2015 /* Constant immediate-value read function for use in insn parsing.
2016 STR points to the beginning of the immediate (with the optional
2017 leading #); *VAL receives the value.
2018
2019 Return TRUE on success; otherwise return FALSE. */
2020
2021 static bfd_boolean
2022 parse_constant_immediate (char **str, int64_t * val)
2023 {
2024 expressionS exp;
2025
2026 if (! parse_immediate_expression (str, &exp))
2027 return FALSE;
2028
2029 if (exp.X_op != O_constant)
2030 {
2031 set_syntax_error (_("constant expression required"));
2032 return FALSE;
2033 }
2034
2035 *val = exp.X_add_number;
2036 return TRUE;
2037 }
2038
2039 static uint32_t
2040 encode_imm_float_bits (uint32_t imm)
2041 {
2042 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2043 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2044 }
2045
2046 /* Return TRUE if the single-precision floating-point value encoded in IMM
2047 can be expressed in the AArch64 8-bit signed floating-point format with
2048 3-bit exponent and normalized 4 bits of precision; in other words, the
2049 floating-point value must be expressable as
2050 (+/-) n / 16 * power (2, r)
2051 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2052
2053 static bfd_boolean
2054 aarch64_imm_float_p (uint32_t imm)
2055 {
2056 /* If a single-precision floating-point value has the following bit
2057 pattern, it can be expressed in the AArch64 8-bit floating-point
2058 format:
2059
2060 3 32222222 2221111111111
2061 1 09876543 21098765432109876543210
2062 n Eeeeeexx xxxx0000000000000000000
2063
2064 where n, e and each x are either 0 or 1 independently, with
2065 E == ~ e. */
2066
2067 uint32_t pattern;
2068
2069 /* Prepare the pattern for 'Eeeeee'. */
2070 if (((imm >> 30) & 0x1) == 0)
2071 pattern = 0x3e000000;
2072 else
2073 pattern = 0x40000000;
2074
2075 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2076 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2077 }
2078
2079 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2080
2081 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2082 8-bit signed floating-point format with 3-bit exponent and normalized 4
2083 bits of precision (i.e. can be used in an FMOV instruction); return the
2084 equivalent single-precision encoding in *FPWORD.
2085
2086 Otherwise return FALSE. */
2087
2088 static bfd_boolean
2089 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2090 {
2091 /* If a double-precision floating-point value has the following bit
2092 pattern, it can be expressed in the AArch64 8-bit floating-point
2093 format:
2094
2095 6 66655555555 554444444...21111111111
2096 3 21098765432 109876543...098765432109876543210
2097 n Eeeeeeeeexx xxxx00000...000000000000000000000
2098
2099 where n, e and each x are either 0 or 1 independently, with
2100 E == ~ e. */
2101
2102 uint32_t pattern;
2103 uint32_t high32 = imm >> 32;
2104
2105 /* Lower 32 bits need to be 0s. */
2106 if ((imm & 0xffffffff) != 0)
2107 return FALSE;
2108
2109 /* Prepare the pattern for 'Eeeeeeeee'. */
2110 if (((high32 >> 30) & 0x1) == 0)
2111 pattern = 0x3fc00000;
2112 else
2113 pattern = 0x40000000;
2114
2115 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2116 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2117 {
2118 /* Convert to the single-precision encoding.
2119 i.e. convert
2120 n Eeeeeeeeexx xxxx00000...000000000000000000000
2121 to
2122 n Eeeeeexx xxxx0000000000000000000. */
2123 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2124 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2125 return TRUE;
2126 }
2127 else
2128 return FALSE;
2129 }
2130
2131 /* Parse a floating-point immediate. Return TRUE on success and return the
2132 value in *IMMED in the format of IEEE754 single-precision encoding.
2133 *CCP points to the start of the string; DP_P is TRUE when the immediate
2134 is expected to be in double-precision (N.B. this only matters when
2135 hexadecimal representation is involved).
2136
2137 N.B. 0.0 is accepted by this function. */
2138
2139 static bfd_boolean
2140 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2141 {
2142 char *str = *ccp;
2143 char *fpnum;
2144 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2145 int found_fpchar = 0;
2146 int64_t val = 0;
2147 unsigned fpword = 0;
2148 bfd_boolean hex_p = FALSE;
2149
2150 skip_past_char (&str, '#');
2151
2152 fpnum = str;
2153 skip_whitespace (fpnum);
2154
2155 if (strncmp (fpnum, "0x", 2) == 0)
2156 {
2157 /* Support the hexadecimal representation of the IEEE754 encoding.
2158 Double-precision is expected when DP_P is TRUE, otherwise the
2159 representation should be in single-precision. */
2160 if (! parse_constant_immediate (&str, &val))
2161 goto invalid_fp;
2162
2163 if (dp_p)
2164 {
2165 if (! aarch64_double_precision_fmovable (val, &fpword))
2166 goto invalid_fp;
2167 }
2168 else if ((uint64_t) val > 0xffffffff)
2169 goto invalid_fp;
2170 else
2171 fpword = val;
2172
2173 hex_p = TRUE;
2174 }
2175 else
2176 {
2177 /* We must not accidentally parse an integer as a floating-point number.
2178 Make sure that the value we parse is not an integer by checking for
2179 special characters '.' or 'e'. */
2180 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2181 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2182 {
2183 found_fpchar = 1;
2184 break;
2185 }
2186
2187 if (!found_fpchar)
2188 return FALSE;
2189 }
2190
2191 if (! hex_p)
2192 {
2193 int i;
2194
2195 if ((str = atof_ieee (str, 's', words)) == NULL)
2196 goto invalid_fp;
2197
2198 /* Our FP word must be 32 bits (single-precision FP). */
2199 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2200 {
2201 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2202 fpword |= words[i];
2203 }
2204 }
2205
2206 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2207 {
2208 *immed = fpword;
2209 *ccp = str;
2210 return TRUE;
2211 }
2212
2213 invalid_fp:
2214 set_fatal_syntax_error (_("invalid floating-point constant"));
2215 return FALSE;
2216 }
2217
2218 /* Less-generic immediate-value read function with the possibility of loading
2219 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2220 instructions.
2221
2222 To prevent the expression parser from pushing a register name into the
2223 symbol table as an undefined symbol, a check is firstly done to find
2224 out whether STR is a valid register name followed by a comma or the end
2225 of line. Return FALSE if STR is such a register. */
2226
2227 static bfd_boolean
2228 parse_big_immediate (char **str, int64_t *imm)
2229 {
2230 char *ptr = *str;
2231
2232 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2233 {
2234 set_syntax_error (_("immediate operand required"));
2235 return FALSE;
2236 }
2237
2238 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2239
2240 if (inst.reloc.exp.X_op == O_constant)
2241 *imm = inst.reloc.exp.X_add_number;
2242
2243 *str = ptr;
2244
2245 return TRUE;
2246 }
2247
2248 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2249 if NEED_LIBOPCODES is non-zero, the fixup will need
2250 assistance from the libopcodes. */
2251
2252 static inline void
2253 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2254 const aarch64_opnd_info *operand,
2255 int need_libopcodes_p)
2256 {
2257 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2258 reloc->opnd = operand->type;
2259 if (need_libopcodes_p)
2260 reloc->need_libopcodes_p = 1;
2261 };
2262
2263 /* Return TRUE if the instruction needs to be fixed up later internally by
2264 the GAS; otherwise return FALSE. */
2265
2266 static inline bfd_boolean
2267 aarch64_gas_internal_fixup_p (void)
2268 {
2269 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2270 }
2271
2272 /* Assign the immediate value to the relavant field in *OPERAND if
2273 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2274 needs an internal fixup in a later stage.
2275 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2276 IMM.VALUE that may get assigned with the constant. */
2277 static inline void
2278 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2279 aarch64_opnd_info *operand,
2280 int addr_off_p,
2281 int need_libopcodes_p,
2282 int skip_p)
2283 {
2284 if (reloc->exp.X_op == O_constant)
2285 {
2286 if (addr_off_p)
2287 operand->addr.offset.imm = reloc->exp.X_add_number;
2288 else
2289 operand->imm.value = reloc->exp.X_add_number;
2290 reloc->type = BFD_RELOC_UNUSED;
2291 }
2292 else
2293 {
2294 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2295 /* Tell libopcodes to ignore this operand or not. This is helpful
2296 when one of the operands needs to be fixed up later but we need
2297 libopcodes to check the other operands. */
2298 operand->skip = skip_p;
2299 }
2300 }
2301
2302 /* Relocation modifiers. Each entry in the table contains the textual
2303 name for the relocation which may be placed before a symbol used as
2304 a load/store offset, or add immediate. It must be surrounded by a
2305 leading and trailing colon, for example:
2306
2307 ldr x0, [x1, #:rello:varsym]
2308 add x0, x1, #:rello:varsym */
2309
2310 struct reloc_table_entry
2311 {
2312 const char *name;
2313 int pc_rel;
2314 bfd_reloc_code_real_type adr_type;
2315 bfd_reloc_code_real_type adrp_type;
2316 bfd_reloc_code_real_type movw_type;
2317 bfd_reloc_code_real_type add_type;
2318 bfd_reloc_code_real_type ldst_type;
2319 bfd_reloc_code_real_type ld_literal_type;
2320 };
2321
2322 static struct reloc_table_entry reloc_table[] = {
2323 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2324 {"lo12", 0,
2325 0, /* adr_type */
2326 0,
2327 0,
2328 BFD_RELOC_AARCH64_ADD_LO12,
2329 BFD_RELOC_AARCH64_LDST_LO12,
2330 0},
2331
2332 /* Higher 21 bits of pc-relative page offset: ADRP */
2333 {"pg_hi21", 1,
2334 0, /* adr_type */
2335 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2336 0,
2337 0,
2338 0,
2339 0},
2340
2341 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2342 {"pg_hi21_nc", 1,
2343 0, /* adr_type */
2344 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2345 0,
2346 0,
2347 0,
2348 0},
2349
2350 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2351 {"abs_g0", 0,
2352 0, /* adr_type */
2353 0,
2354 BFD_RELOC_AARCH64_MOVW_G0,
2355 0,
2356 0,
2357 0},
2358
2359 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2360 {"abs_g0_s", 0,
2361 0, /* adr_type */
2362 0,
2363 BFD_RELOC_AARCH64_MOVW_G0_S,
2364 0,
2365 0,
2366 0},
2367
2368 /* Less significant bits 0-15 of address/value: MOVK, no check */
2369 {"abs_g0_nc", 0,
2370 0, /* adr_type */
2371 0,
2372 BFD_RELOC_AARCH64_MOVW_G0_NC,
2373 0,
2374 0,
2375 0},
2376
2377 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2378 {"abs_g1", 0,
2379 0, /* adr_type */
2380 0,
2381 BFD_RELOC_AARCH64_MOVW_G1,
2382 0,
2383 0,
2384 0},
2385
2386 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2387 {"abs_g1_s", 0,
2388 0, /* adr_type */
2389 0,
2390 BFD_RELOC_AARCH64_MOVW_G1_S,
2391 0,
2392 0,
2393 0},
2394
2395 /* Less significant bits 16-31 of address/value: MOVK, no check */
2396 {"abs_g1_nc", 0,
2397 0, /* adr_type */
2398 0,
2399 BFD_RELOC_AARCH64_MOVW_G1_NC,
2400 0,
2401 0,
2402 0},
2403
2404 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2405 {"abs_g2", 0,
2406 0, /* adr_type */
2407 0,
2408 BFD_RELOC_AARCH64_MOVW_G2,
2409 0,
2410 0,
2411 0},
2412
2413 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2414 {"abs_g2_s", 0,
2415 0, /* adr_type */
2416 0,
2417 BFD_RELOC_AARCH64_MOVW_G2_S,
2418 0,
2419 0,
2420 0},
2421
2422 /* Less significant bits 32-47 of address/value: MOVK, no check */
2423 {"abs_g2_nc", 0,
2424 0, /* adr_type */
2425 0,
2426 BFD_RELOC_AARCH64_MOVW_G2_NC,
2427 0,
2428 0,
2429 0},
2430
2431 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2432 {"abs_g3", 0,
2433 0, /* adr_type */
2434 0,
2435 BFD_RELOC_AARCH64_MOVW_G3,
2436 0,
2437 0,
2438 0},
2439
2440 /* Get to the page containing GOT entry for a symbol. */
2441 {"got", 1,
2442 0, /* adr_type */
2443 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2444 0,
2445 0,
2446 0,
2447 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2448
2449 /* 12 bit offset into the page containing GOT entry for that symbol. */
2450 {"got_lo12", 0,
2451 0, /* adr_type */
2452 0,
2453 0,
2454 0,
2455 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2456 0},
2457
2458 /* 0-15 bits of address/value: MOVk, no check. */
2459 {"gotoff_g0_nc", 0,
2460 0, /* adr_type */
2461 0,
2462 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2463 0,
2464 0,
2465 0},
2466
2467 /* Most significant bits 16-31 of address/value: MOVZ. */
2468 {"gotoff_g1", 0,
2469 0, /* adr_type */
2470 0,
2471 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2472 0,
2473 0,
2474 0},
2475
2476 /* 15 bit offset into the page containing GOT entry for that symbol. */
2477 {"gotoff_lo15", 0,
2478 0, /* adr_type */
2479 0,
2480 0,
2481 0,
2482 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2483 0},
2484
2485 /* Get to the page containing GOT TLS entry for a symbol */
2486 {"tlsgd", 0,
2487 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2488 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2489 0,
2490 0,
2491 0,
2492 0},
2493
2494 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2495 {"tlsgd_lo12", 0,
2496 0, /* adr_type */
2497 0,
2498 0,
2499 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2500 0,
2501 0},
2502
2503 /* Lower 16 bits address/value: MOVk. */
2504 {"tlsgd_g0_nc", 0,
2505 0, /* adr_type */
2506 0,
2507 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2508 0,
2509 0,
2510 0},
2511
2512 /* Most significant bits 16-31 of address/value: MOVZ. */
2513 {"tlsgd_g1", 0,
2514 0, /* adr_type */
2515 0,
2516 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2517 0,
2518 0,
2519 0},
2520
2521 /* Get to the page containing GOT TLS entry for a symbol */
2522 {"tlsdesc", 0,
2523 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2524 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2525 0,
2526 0,
2527 0,
2528 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2529
2530 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2531 {"tlsdesc_lo12", 0,
2532 0, /* adr_type */
2533 0,
2534 0,
2535 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2536 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2537 0},
2538
2539 /* Get to the page containing GOT TLS entry for a symbol.
2540 The same as GD, we allocate two consecutive GOT slots
2541 for module index and module offset, the only difference
2542 with GD is the module offset should be intialized to
2543 zero without any outstanding runtime relocation. */
2544 {"tlsldm", 0,
2545 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2546 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2547 0,
2548 0,
2549 0,
2550 0},
2551
2552 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2553 {"tlsldm_lo12_nc", 0,
2554 0, /* adr_type */
2555 0,
2556 0,
2557 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2558 0,
2559 0},
2560
2561 /* 12 bit offset into the module TLS base address. */
2562 {"dtprel_lo12", 0,
2563 0, /* adr_type */
2564 0,
2565 0,
2566 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2567 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2568 0},
2569
2570 /* Same as dtprel_lo12, no overflow check. */
2571 {"dtprel_lo12_nc", 0,
2572 0, /* adr_type */
2573 0,
2574 0,
2575 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2576 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2577 0},
2578
2579 /* bits[23:12] of offset to the module TLS base address. */
2580 {"dtprel_hi12", 0,
2581 0, /* adr_type */
2582 0,
2583 0,
2584 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2585 0,
2586 0},
2587
2588 /* bits[15:0] of offset to the module TLS base address. */
2589 {"dtprel_g0", 0,
2590 0, /* adr_type */
2591 0,
2592 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2593 0,
2594 0,
2595 0},
2596
2597 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2598 {"dtprel_g0_nc", 0,
2599 0, /* adr_type */
2600 0,
2601 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2602 0,
2603 0,
2604 0},
2605
2606 /* bits[31:16] of offset to the module TLS base address. */
2607 {"dtprel_g1", 0,
2608 0, /* adr_type */
2609 0,
2610 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2611 0,
2612 0,
2613 0},
2614
2615 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2616 {"dtprel_g1_nc", 0,
2617 0, /* adr_type */
2618 0,
2619 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2620 0,
2621 0,
2622 0},
2623
2624 /* bits[47:32] of offset to the module TLS base address. */
2625 {"dtprel_g2", 0,
2626 0, /* adr_type */
2627 0,
2628 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2629 0,
2630 0,
2631 0},
2632
2633 /* Get to the page containing GOT TLS entry for a symbol */
2634 {"gottprel", 0,
2635 0, /* adr_type */
2636 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2637 0,
2638 0,
2639 0,
2640 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2641
2642 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2643 {"gottprel_lo12", 0,
2644 0, /* adr_type */
2645 0,
2646 0,
2647 0,
2648 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2649 0},
2650
2651 /* Get tp offset for a symbol. */
2652 {"tprel", 0,
2653 0, /* adr_type */
2654 0,
2655 0,
2656 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2657 0,
2658 0},
2659
2660 /* Get tp offset for a symbol. */
2661 {"tprel_lo12", 0,
2662 0, /* adr_type */
2663 0,
2664 0,
2665 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2666 0,
2667 0},
2668
2669 /* Get tp offset for a symbol. */
2670 {"tprel_hi12", 0,
2671 0, /* adr_type */
2672 0,
2673 0,
2674 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2675 0,
2676 0},
2677
2678 /* Get tp offset for a symbol. */
2679 {"tprel_lo12_nc", 0,
2680 0, /* adr_type */
2681 0,
2682 0,
2683 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2684 0,
2685 0},
2686
2687 /* Most significant bits 32-47 of address/value: MOVZ. */
2688 {"tprel_g2", 0,
2689 0, /* adr_type */
2690 0,
2691 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2692 0,
2693 0,
2694 0},
2695
2696 /* Most significant bits 16-31 of address/value: MOVZ. */
2697 {"tprel_g1", 0,
2698 0, /* adr_type */
2699 0,
2700 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2701 0,
2702 0,
2703 0},
2704
2705 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2706 {"tprel_g1_nc", 0,
2707 0, /* adr_type */
2708 0,
2709 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2710 0,
2711 0,
2712 0},
2713
2714 /* Most significant bits 0-15 of address/value: MOVZ. */
2715 {"tprel_g0", 0,
2716 0, /* adr_type */
2717 0,
2718 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2719 0,
2720 0,
2721 0},
2722
2723 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2724 {"tprel_g0_nc", 0,
2725 0, /* adr_type */
2726 0,
2727 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2728 0,
2729 0,
2730 0},
2731
2732 /* 15bit offset from got entry to base address of GOT table. */
2733 {"gotpage_lo15", 0,
2734 0,
2735 0,
2736 0,
2737 0,
2738 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2739 0},
2740
2741 /* 14bit offset from got entry to base address of GOT table. */
2742 {"gotpage_lo14", 0,
2743 0,
2744 0,
2745 0,
2746 0,
2747 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2748 0},
2749 };
2750
2751 /* Given the address of a pointer pointing to the textual name of a
2752 relocation as may appear in assembler source, attempt to find its
2753 details in reloc_table. The pointer will be updated to the character
2754 after the trailing colon. On failure, NULL will be returned;
2755 otherwise return the reloc_table_entry. */
2756
2757 static struct reloc_table_entry *
2758 find_reloc_table_entry (char **str)
2759 {
2760 unsigned int i;
2761 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2762 {
2763 int length = strlen (reloc_table[i].name);
2764
2765 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2766 && (*str)[length] == ':')
2767 {
2768 *str += (length + 1);
2769 return &reloc_table[i];
2770 }
2771 }
2772
2773 return NULL;
2774 }
2775
2776 /* Mode argument to parse_shift and parser_shifter_operand. */
2777 enum parse_shift_mode
2778 {
2779 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2780 "#imm{,lsl #n}" */
2781 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2782 "#imm" */
2783 SHIFTED_LSL, /* bare "lsl #n" */
2784 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2785 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2786 };
2787
2788 /* Parse a <shift> operator on an AArch64 data processing instruction.
2789 Return TRUE on success; otherwise return FALSE. */
2790 static bfd_boolean
2791 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2792 {
2793 const struct aarch64_name_value_pair *shift_op;
2794 enum aarch64_modifier_kind kind;
2795 expressionS exp;
2796 int exp_has_prefix;
2797 char *s = *str;
2798 char *p = s;
2799
2800 for (p = *str; ISALPHA (*p); p++)
2801 ;
2802
2803 if (p == *str)
2804 {
2805 set_syntax_error (_("shift expression expected"));
2806 return FALSE;
2807 }
2808
2809 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2810
2811 if (shift_op == NULL)
2812 {
2813 set_syntax_error (_("shift operator expected"));
2814 return FALSE;
2815 }
2816
2817 kind = aarch64_get_operand_modifier (shift_op);
2818
2819 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2820 {
2821 set_syntax_error (_("invalid use of 'MSL'"));
2822 return FALSE;
2823 }
2824
2825 switch (mode)
2826 {
2827 case SHIFTED_LOGIC_IMM:
2828 if (aarch64_extend_operator_p (kind) == TRUE)
2829 {
2830 set_syntax_error (_("extending shift is not permitted"));
2831 return FALSE;
2832 }
2833 break;
2834
2835 case SHIFTED_ARITH_IMM:
2836 if (kind == AARCH64_MOD_ROR)
2837 {
2838 set_syntax_error (_("'ROR' shift is not permitted"));
2839 return FALSE;
2840 }
2841 break;
2842
2843 case SHIFTED_LSL:
2844 if (kind != AARCH64_MOD_LSL)
2845 {
2846 set_syntax_error (_("only 'LSL' shift is permitted"));
2847 return FALSE;
2848 }
2849 break;
2850
2851 case SHIFTED_REG_OFFSET:
2852 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2853 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2854 {
2855 set_fatal_syntax_error
2856 (_("invalid shift for the register offset addressing mode"));
2857 return FALSE;
2858 }
2859 break;
2860
2861 case SHIFTED_LSL_MSL:
2862 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2863 {
2864 set_syntax_error (_("invalid shift operator"));
2865 return FALSE;
2866 }
2867 break;
2868
2869 default:
2870 abort ();
2871 }
2872
2873 /* Whitespace can appear here if the next thing is a bare digit. */
2874 skip_whitespace (p);
2875
2876 /* Parse shift amount. */
2877 exp_has_prefix = 0;
2878 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2879 exp.X_op = O_absent;
2880 else
2881 {
2882 if (is_immediate_prefix (*p))
2883 {
2884 p++;
2885 exp_has_prefix = 1;
2886 }
2887 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2888 }
2889 if (exp.X_op == O_absent)
2890 {
2891 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2892 {
2893 set_syntax_error (_("missing shift amount"));
2894 return FALSE;
2895 }
2896 operand->shifter.amount = 0;
2897 }
2898 else if (exp.X_op != O_constant)
2899 {
2900 set_syntax_error (_("constant shift amount required"));
2901 return FALSE;
2902 }
2903 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2904 {
2905 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2906 return FALSE;
2907 }
2908 else
2909 {
2910 operand->shifter.amount = exp.X_add_number;
2911 operand->shifter.amount_present = 1;
2912 }
2913
2914 operand->shifter.operator_present = 1;
2915 operand->shifter.kind = kind;
2916
2917 *str = p;
2918 return TRUE;
2919 }
2920
2921 /* Parse a <shifter_operand> for a data processing instruction:
2922
2923 #<immediate>
2924 #<immediate>, LSL #imm
2925
2926 Validation of immediate operands is deferred to md_apply_fix.
2927
2928 Return TRUE on success; otherwise return FALSE. */
2929
2930 static bfd_boolean
2931 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2932 enum parse_shift_mode mode)
2933 {
2934 char *p;
2935
2936 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2937 return FALSE;
2938
2939 p = *str;
2940
2941 /* Accept an immediate expression. */
2942 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2943 return FALSE;
2944
2945 /* Accept optional LSL for arithmetic immediate values. */
2946 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2947 if (! parse_shift (&p, operand, SHIFTED_LSL))
2948 return FALSE;
2949
2950 /* Not accept any shifter for logical immediate values. */
2951 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2952 && parse_shift (&p, operand, mode))
2953 {
2954 set_syntax_error (_("unexpected shift operator"));
2955 return FALSE;
2956 }
2957
2958 *str = p;
2959 return TRUE;
2960 }
2961
2962 /* Parse a <shifter_operand> for a data processing instruction:
2963
2964 <Rm>
2965 <Rm>, <shift>
2966 #<immediate>
2967 #<immediate>, LSL #imm
2968
2969 where <shift> is handled by parse_shift above, and the last two
2970 cases are handled by the function above.
2971
2972 Validation of immediate operands is deferred to md_apply_fix.
2973
2974 Return TRUE on success; otherwise return FALSE. */
2975
2976 static bfd_boolean
2977 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2978 enum parse_shift_mode mode)
2979 {
2980 int reg;
2981 int isreg32, isregzero;
2982 enum aarch64_operand_class opd_class
2983 = aarch64_get_operand_class (operand->type);
2984
2985 if ((reg =
2986 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2987 {
2988 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2989 {
2990 set_syntax_error (_("unexpected register in the immediate operand"));
2991 return FALSE;
2992 }
2993
2994 if (!isregzero && reg == REG_SP)
2995 {
2996 set_syntax_error (BAD_SP);
2997 return FALSE;
2998 }
2999
3000 operand->reg.regno = reg;
3001 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
3002
3003 /* Accept optional shift operation on register. */
3004 if (! skip_past_comma (str))
3005 return TRUE;
3006
3007 if (! parse_shift (str, operand, mode))
3008 return FALSE;
3009
3010 return TRUE;
3011 }
3012 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3013 {
3014 set_syntax_error
3015 (_("integer register expected in the extended/shifted operand "
3016 "register"));
3017 return FALSE;
3018 }
3019
3020 /* We have a shifted immediate variable. */
3021 return parse_shifter_operand_imm (str, operand, mode);
3022 }
3023
3024 /* Return TRUE on success; return FALSE otherwise. */
3025
3026 static bfd_boolean
3027 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3028 enum parse_shift_mode mode)
3029 {
3030 char *p = *str;
3031
3032 /* Determine if we have the sequence of characters #: or just :
3033 coming next. If we do, then we check for a :rello: relocation
3034 modifier. If we don't, punt the whole lot to
3035 parse_shifter_operand. */
3036
3037 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3038 {
3039 struct reloc_table_entry *entry;
3040
3041 if (p[0] == '#')
3042 p += 2;
3043 else
3044 p++;
3045 *str = p;
3046
3047 /* Try to parse a relocation. Anything else is an error. */
3048 if (!(entry = find_reloc_table_entry (str)))
3049 {
3050 set_syntax_error (_("unknown relocation modifier"));
3051 return FALSE;
3052 }
3053
3054 if (entry->add_type == 0)
3055 {
3056 set_syntax_error
3057 (_("this relocation modifier is not allowed on this instruction"));
3058 return FALSE;
3059 }
3060
3061 /* Save str before we decompose it. */
3062 p = *str;
3063
3064 /* Next, we parse the expression. */
3065 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3066 return FALSE;
3067
3068 /* Record the relocation type (use the ADD variant here). */
3069 inst.reloc.type = entry->add_type;
3070 inst.reloc.pc_rel = entry->pc_rel;
3071
3072 /* If str is empty, we've reached the end, stop here. */
3073 if (**str == '\0')
3074 return TRUE;
3075
3076 /* Otherwise, we have a shifted reloc modifier, so rewind to
3077 recover the variable name and continue parsing for the shifter. */
3078 *str = p;
3079 return parse_shifter_operand_imm (str, operand, mode);
3080 }
3081
3082 return parse_shifter_operand (str, operand, mode);
3083 }
3084
3085 /* Parse all forms of an address expression. Information is written
3086 to *OPERAND and/or inst.reloc.
3087
3088 The A64 instruction set has the following addressing modes:
3089
3090 Offset
3091 [base] // in SIMD ld/st structure
3092 [base{,#0}] // in ld/st exclusive
3093 [base{,#imm}]
3094 [base,Xm{,LSL #imm}]
3095 [base,Xm,SXTX {#imm}]
3096 [base,Wm,(S|U)XTW {#imm}]
3097 Pre-indexed
3098 [base,#imm]!
3099 Post-indexed
3100 [base],#imm
3101 [base],Xm // in SIMD ld/st structure
3102 PC-relative (literal)
3103 label
3104 =immediate
3105
3106 (As a convenience, the notation "=immediate" is permitted in conjunction
3107 with the pc-relative literal load instructions to automatically place an
3108 immediate value or symbolic address in a nearby literal pool and generate
3109 a hidden label which references it.)
3110
3111 Upon a successful parsing, the address structure in *OPERAND will be
3112 filled in the following way:
3113
3114 .base_regno = <base>
3115 .offset.is_reg // 1 if the offset is a register
3116 .offset.imm = <imm>
3117 .offset.regno = <Rm>
3118
3119 For different addressing modes defined in the A64 ISA:
3120
3121 Offset
3122 .pcrel=0; .preind=1; .postind=0; .writeback=0
3123 Pre-indexed
3124 .pcrel=0; .preind=1; .postind=0; .writeback=1
3125 Post-indexed
3126 .pcrel=0; .preind=0; .postind=1; .writeback=1
3127 PC-relative (literal)
3128 .pcrel=1; .preind=1; .postind=0; .writeback=0
3129
3130 The shift/extension information, if any, will be stored in .shifter.
3131
3132 It is the caller's responsibility to check for addressing modes not
3133 supported by the instruction, and to set inst.reloc.type. */
3134
3135 static bfd_boolean
3136 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
3137 int accept_reg_post_index)
3138 {
3139 char *p = *str;
3140 int reg;
3141 int isreg32, isregzero;
3142 expressionS *exp = &inst.reloc.exp;
3143
3144 if (! skip_past_char (&p, '['))
3145 {
3146 /* =immediate or label. */
3147 operand->addr.pcrel = 1;
3148 operand->addr.preind = 1;
3149
3150 /* #:<reloc_op>:<symbol> */
3151 skip_past_char (&p, '#');
3152 if (reloc && skip_past_char (&p, ':'))
3153 {
3154 bfd_reloc_code_real_type ty;
3155 struct reloc_table_entry *entry;
3156
3157 /* Try to parse a relocation modifier. Anything else is
3158 an error. */
3159 entry = find_reloc_table_entry (&p);
3160 if (! entry)
3161 {
3162 set_syntax_error (_("unknown relocation modifier"));
3163 return FALSE;
3164 }
3165
3166 switch (operand->type)
3167 {
3168 case AARCH64_OPND_ADDR_PCREL21:
3169 /* adr */
3170 ty = entry->adr_type;
3171 break;
3172
3173 default:
3174 ty = entry->ld_literal_type;
3175 break;
3176 }
3177
3178 if (ty == 0)
3179 {
3180 set_syntax_error
3181 (_("this relocation modifier is not allowed on this "
3182 "instruction"));
3183 return FALSE;
3184 }
3185
3186 /* #:<reloc_op>: */
3187 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3188 {
3189 set_syntax_error (_("invalid relocation expression"));
3190 return FALSE;
3191 }
3192
3193 /* #:<reloc_op>:<expr> */
3194 /* Record the relocation type. */
3195 inst.reloc.type = ty;
3196 inst.reloc.pc_rel = entry->pc_rel;
3197 }
3198 else
3199 {
3200
3201 if (skip_past_char (&p, '='))
3202 /* =immediate; need to generate the literal in the literal pool. */
3203 inst.gen_lit_pool = 1;
3204
3205 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3206 {
3207 set_syntax_error (_("invalid address"));
3208 return FALSE;
3209 }
3210 }
3211
3212 *str = p;
3213 return TRUE;
3214 }
3215
3216 /* [ */
3217
3218 /* Accept SP and reject ZR */
3219 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3220 if (reg == PARSE_FAIL || isreg32)
3221 {
3222 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3223 return FALSE;
3224 }
3225 operand->addr.base_regno = reg;
3226
3227 /* [Xn */
3228 if (skip_past_comma (&p))
3229 {
3230 /* [Xn, */
3231 operand->addr.preind = 1;
3232
3233 /* Reject SP and accept ZR */
3234 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3235 if (reg != PARSE_FAIL)
3236 {
3237 /* [Xn,Rm */
3238 operand->addr.offset.regno = reg;
3239 operand->addr.offset.is_reg = 1;
3240 /* Shifted index. */
3241 if (skip_past_comma (&p))
3242 {
3243 /* [Xn,Rm, */
3244 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3245 /* Use the diagnostics set in parse_shift, so not set new
3246 error message here. */
3247 return FALSE;
3248 }
3249 /* We only accept:
3250 [base,Xm{,LSL #imm}]
3251 [base,Xm,SXTX {#imm}]
3252 [base,Wm,(S|U)XTW {#imm}] */
3253 if (operand->shifter.kind == AARCH64_MOD_NONE
3254 || operand->shifter.kind == AARCH64_MOD_LSL
3255 || operand->shifter.kind == AARCH64_MOD_SXTX)
3256 {
3257 if (isreg32)
3258 {
3259 set_syntax_error (_("invalid use of 32-bit register offset"));
3260 return FALSE;
3261 }
3262 }
3263 else if (!isreg32)
3264 {
3265 set_syntax_error (_("invalid use of 64-bit register offset"));
3266 return FALSE;
3267 }
3268 }
3269 else
3270 {
3271 /* [Xn,#:<reloc_op>:<symbol> */
3272 skip_past_char (&p, '#');
3273 if (reloc && skip_past_char (&p, ':'))
3274 {
3275 struct reloc_table_entry *entry;
3276
3277 /* Try to parse a relocation modifier. Anything else is
3278 an error. */
3279 if (!(entry = find_reloc_table_entry (&p)))
3280 {
3281 set_syntax_error (_("unknown relocation modifier"));
3282 return FALSE;
3283 }
3284
3285 if (entry->ldst_type == 0)
3286 {
3287 set_syntax_error
3288 (_("this relocation modifier is not allowed on this "
3289 "instruction"));
3290 return FALSE;
3291 }
3292
3293 /* [Xn,#:<reloc_op>: */
3294 /* We now have the group relocation table entry corresponding to
3295 the name in the assembler source. Next, we parse the
3296 expression. */
3297 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3298 {
3299 set_syntax_error (_("invalid relocation expression"));
3300 return FALSE;
3301 }
3302
3303 /* [Xn,#:<reloc_op>:<expr> */
3304 /* Record the load/store relocation type. */
3305 inst.reloc.type = entry->ldst_type;
3306 inst.reloc.pc_rel = entry->pc_rel;
3307 }
3308 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3309 {
3310 set_syntax_error (_("invalid expression in the address"));
3311 return FALSE;
3312 }
3313 /* [Xn,<expr> */
3314 }
3315 }
3316
3317 if (! skip_past_char (&p, ']'))
3318 {
3319 set_syntax_error (_("']' expected"));
3320 return FALSE;
3321 }
3322
3323 if (skip_past_char (&p, '!'))
3324 {
3325 if (operand->addr.preind && operand->addr.offset.is_reg)
3326 {
3327 set_syntax_error (_("register offset not allowed in pre-indexed "
3328 "addressing mode"));
3329 return FALSE;
3330 }
3331 /* [Xn]! */
3332 operand->addr.writeback = 1;
3333 }
3334 else if (skip_past_comma (&p))
3335 {
3336 /* [Xn], */
3337 operand->addr.postind = 1;
3338 operand->addr.writeback = 1;
3339
3340 if (operand->addr.preind)
3341 {
3342 set_syntax_error (_("cannot combine pre- and post-indexing"));
3343 return FALSE;
3344 }
3345
3346 if (accept_reg_post_index
3347 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3348 &isregzero)) != PARSE_FAIL)
3349 {
3350 /* [Xn],Xm */
3351 if (isreg32)
3352 {
3353 set_syntax_error (_("invalid 32-bit register offset"));
3354 return FALSE;
3355 }
3356 operand->addr.offset.regno = reg;
3357 operand->addr.offset.is_reg = 1;
3358 }
3359 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3360 {
3361 /* [Xn],#expr */
3362 set_syntax_error (_("invalid expression in the address"));
3363 return FALSE;
3364 }
3365 }
3366
3367 /* If at this point neither .preind nor .postind is set, we have a
3368 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3369 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3370 {
3371 if (operand->addr.writeback)
3372 {
3373 /* Reject [Rn]! */
3374 set_syntax_error (_("missing offset in the pre-indexed address"));
3375 return FALSE;
3376 }
3377 operand->addr.preind = 1;
3378 inst.reloc.exp.X_op = O_constant;
3379 inst.reloc.exp.X_add_number = 0;
3380 }
3381
3382 *str = p;
3383 return TRUE;
3384 }
3385
3386 /* Return TRUE on success; otherwise return FALSE. */
3387 static bfd_boolean
3388 parse_address (char **str, aarch64_opnd_info *operand,
3389 int accept_reg_post_index)
3390 {
3391 return parse_address_main (str, operand, 0, accept_reg_post_index);
3392 }
3393
3394 /* Return TRUE on success; otherwise return FALSE. */
3395 static bfd_boolean
3396 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3397 {
3398 return parse_address_main (str, operand, 1, 0);
3399 }
3400
3401 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3402 Return TRUE on success; otherwise return FALSE. */
3403 static bfd_boolean
3404 parse_half (char **str, int *internal_fixup_p)
3405 {
3406 char *p, *saved;
3407 int dummy;
3408
3409 p = *str;
3410 skip_past_char (&p, '#');
3411
3412 gas_assert (internal_fixup_p);
3413 *internal_fixup_p = 0;
3414
3415 if (*p == ':')
3416 {
3417 struct reloc_table_entry *entry;
3418
3419 /* Try to parse a relocation. Anything else is an error. */
3420 ++p;
3421 if (!(entry = find_reloc_table_entry (&p)))
3422 {
3423 set_syntax_error (_("unknown relocation modifier"));
3424 return FALSE;
3425 }
3426
3427 if (entry->movw_type == 0)
3428 {
3429 set_syntax_error
3430 (_("this relocation modifier is not allowed on this instruction"));
3431 return FALSE;
3432 }
3433
3434 inst.reloc.type = entry->movw_type;
3435 }
3436 else
3437 *internal_fixup_p = 1;
3438
3439 /* Avoid parsing a register as a general symbol. */
3440 saved = p;
3441 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3442 return FALSE;
3443 p = saved;
3444
3445 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3446 return FALSE;
3447
3448 *str = p;
3449 return TRUE;
3450 }
3451
3452 /* Parse an operand for an ADRP instruction:
3453 ADRP <Xd>, <label>
3454 Return TRUE on success; otherwise return FALSE. */
3455
3456 static bfd_boolean
3457 parse_adrp (char **str)
3458 {
3459 char *p;
3460
3461 p = *str;
3462 if (*p == ':')
3463 {
3464 struct reloc_table_entry *entry;
3465
3466 /* Try to parse a relocation. Anything else is an error. */
3467 ++p;
3468 if (!(entry = find_reloc_table_entry (&p)))
3469 {
3470 set_syntax_error (_("unknown relocation modifier"));
3471 return FALSE;
3472 }
3473
3474 if (entry->adrp_type == 0)
3475 {
3476 set_syntax_error
3477 (_("this relocation modifier is not allowed on this instruction"));
3478 return FALSE;
3479 }
3480
3481 inst.reloc.type = entry->adrp_type;
3482 }
3483 else
3484 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3485
3486 inst.reloc.pc_rel = 1;
3487
3488 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3489 return FALSE;
3490
3491 *str = p;
3492 return TRUE;
3493 }
3494
3495 /* Miscellaneous. */
3496
3497 /* Parse an option for a preload instruction. Returns the encoding for the
3498 option, or PARSE_FAIL. */
3499
3500 static int
3501 parse_pldop (char **str)
3502 {
3503 char *p, *q;
3504 const struct aarch64_name_value_pair *o;
3505
3506 p = q = *str;
3507 while (ISALNUM (*q))
3508 q++;
3509
3510 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3511 if (!o)
3512 return PARSE_FAIL;
3513
3514 *str = q;
3515 return o->value;
3516 }
3517
3518 /* Parse an option for a barrier instruction. Returns the encoding for the
3519 option, or PARSE_FAIL. */
3520
3521 static int
3522 parse_barrier (char **str)
3523 {
3524 char *p, *q;
3525 const asm_barrier_opt *o;
3526
3527 p = q = *str;
3528 while (ISALPHA (*q))
3529 q++;
3530
3531 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3532 if (!o)
3533 return PARSE_FAIL;
3534
3535 *str = q;
3536 return o->value;
3537 }
3538
3539 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3540 Returns the encoding for the option, or PARSE_FAIL.
3541
3542 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3543 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3544
3545 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3546 field, otherwise as a system register.
3547 */
3548
3549 static int
3550 parse_sys_reg (char **str, struct hash_control *sys_regs,
3551 int imple_defined_p, int pstatefield_p)
3552 {
3553 char *p, *q;
3554 char buf[32];
3555 const aarch64_sys_reg *o;
3556 int value;
3557
3558 p = buf;
3559 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3560 if (p < buf + 31)
3561 *p++ = TOLOWER (*q);
3562 *p = '\0';
3563 /* Assert that BUF be large enough. */
3564 gas_assert (p - buf == q - *str);
3565
3566 o = hash_find (sys_regs, buf);
3567 if (!o)
3568 {
3569 if (!imple_defined_p)
3570 return PARSE_FAIL;
3571 else
3572 {
3573 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3574 unsigned int op0, op1, cn, cm, op2;
3575
3576 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3577 != 5)
3578 return PARSE_FAIL;
3579 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3580 return PARSE_FAIL;
3581 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3582 }
3583 }
3584 else
3585 {
3586 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3587 as_bad (_("selected processor does not support PSTATE field "
3588 "name '%s'"), buf);
3589 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3590 as_bad (_("selected processor does not support system register "
3591 "name '%s'"), buf);
3592 if (aarch64_sys_reg_deprecated_p (o))
3593 as_warn (_("system register name '%s' is deprecated and may be "
3594 "removed in a future release"), buf);
3595 value = o->value;
3596 }
3597
3598 *str = q;
3599 return value;
3600 }
3601
3602 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3603 for the option, or NULL. */
3604
3605 static const aarch64_sys_ins_reg *
3606 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3607 {
3608 char *p, *q;
3609 char buf[32];
3610 const aarch64_sys_ins_reg *o;
3611
3612 p = buf;
3613 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3614 if (p < buf + 31)
3615 *p++ = TOLOWER (*q);
3616 *p = '\0';
3617
3618 o = hash_find (sys_ins_regs, buf);
3619 if (!o)
3620 return NULL;
3621
3622 *str = q;
3623 return o;
3624 }
3625 \f
3626 #define po_char_or_fail(chr) do { \
3627 if (! skip_past_char (&str, chr)) \
3628 goto failure; \
3629 } while (0)
3630
3631 #define po_reg_or_fail(regtype) do { \
3632 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3633 if (val == PARSE_FAIL) \
3634 { \
3635 set_default_error (); \
3636 goto failure; \
3637 } \
3638 } while (0)
3639
3640 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3641 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3642 &isreg32, &isregzero); \
3643 if (val == PARSE_FAIL) \
3644 { \
3645 set_default_error (); \
3646 goto failure; \
3647 } \
3648 info->reg.regno = val; \
3649 if (isreg32) \
3650 info->qualifier = AARCH64_OPND_QLF_W; \
3651 else \
3652 info->qualifier = AARCH64_OPND_QLF_X; \
3653 } while (0)
3654
3655 #define po_imm_nc_or_fail() do { \
3656 if (! parse_constant_immediate (&str, &val)) \
3657 goto failure; \
3658 } while (0)
3659
3660 #define po_imm_or_fail(min, max) do { \
3661 if (! parse_constant_immediate (&str, &val)) \
3662 goto failure; \
3663 if (val < min || val > max) \
3664 { \
3665 set_fatal_syntax_error (_("immediate value out of range "\
3666 #min " to "#max)); \
3667 goto failure; \
3668 } \
3669 } while (0)
3670
3671 #define po_misc_or_fail(expr) do { \
3672 if (!expr) \
3673 goto failure; \
3674 } while (0)
3675 \f
3676 /* encode the 12-bit imm field of Add/sub immediate */
3677 static inline uint32_t
3678 encode_addsub_imm (uint32_t imm)
3679 {
3680 return imm << 10;
3681 }
3682
3683 /* encode the shift amount field of Add/sub immediate */
3684 static inline uint32_t
3685 encode_addsub_imm_shift_amount (uint32_t cnt)
3686 {
3687 return cnt << 22;
3688 }
3689
3690
3691 /* encode the imm field of Adr instruction */
3692 static inline uint32_t
3693 encode_adr_imm (uint32_t imm)
3694 {
3695 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3696 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3697 }
3698
3699 /* encode the immediate field of Move wide immediate */
3700 static inline uint32_t
3701 encode_movw_imm (uint32_t imm)
3702 {
3703 return imm << 5;
3704 }
3705
3706 /* encode the 26-bit offset of unconditional branch */
3707 static inline uint32_t
3708 encode_branch_ofs_26 (uint32_t ofs)
3709 {
3710 return ofs & ((1 << 26) - 1);
3711 }
3712
3713 /* encode the 19-bit offset of conditional branch and compare & branch */
3714 static inline uint32_t
3715 encode_cond_branch_ofs_19 (uint32_t ofs)
3716 {
3717 return (ofs & ((1 << 19) - 1)) << 5;
3718 }
3719
3720 /* encode the 19-bit offset of ld literal */
3721 static inline uint32_t
3722 encode_ld_lit_ofs_19 (uint32_t ofs)
3723 {
3724 return (ofs & ((1 << 19) - 1)) << 5;
3725 }
3726
3727 /* Encode the 14-bit offset of test & branch. */
3728 static inline uint32_t
3729 encode_tst_branch_ofs_14 (uint32_t ofs)
3730 {
3731 return (ofs & ((1 << 14) - 1)) << 5;
3732 }
3733
3734 /* Encode the 16-bit imm field of svc/hvc/smc. */
3735 static inline uint32_t
3736 encode_svc_imm (uint32_t imm)
3737 {
3738 return imm << 5;
3739 }
3740
3741 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3742 static inline uint32_t
3743 reencode_addsub_switch_add_sub (uint32_t opcode)
3744 {
3745 return opcode ^ (1 << 30);
3746 }
3747
3748 static inline uint32_t
3749 reencode_movzn_to_movz (uint32_t opcode)
3750 {
3751 return opcode | (1 << 30);
3752 }
3753
3754 static inline uint32_t
3755 reencode_movzn_to_movn (uint32_t opcode)
3756 {
3757 return opcode & ~(1 << 30);
3758 }
3759
3760 /* Overall per-instruction processing. */
3761
3762 /* We need to be able to fix up arbitrary expressions in some statements.
3763 This is so that we can handle symbols that are an arbitrary distance from
3764 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3765 which returns part of an address in a form which will be valid for
3766 a data instruction. We do this by pushing the expression into a symbol
3767 in the expr_section, and creating a fix for that. */
3768
3769 static fixS *
3770 fix_new_aarch64 (fragS * frag,
3771 int where,
3772 short int size, expressionS * exp, int pc_rel, int reloc)
3773 {
3774 fixS *new_fix;
3775
3776 switch (exp->X_op)
3777 {
3778 case O_constant:
3779 case O_symbol:
3780 case O_add:
3781 case O_subtract:
3782 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3783 break;
3784
3785 default:
3786 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3787 pc_rel, reloc);
3788 break;
3789 }
3790 return new_fix;
3791 }
3792 \f
3793 /* Diagnostics on operands errors. */
3794
3795 /* By default, output verbose error message.
3796 Disable the verbose error message by -mno-verbose-error. */
3797 static int verbose_error_p = 1;
3798
3799 #ifdef DEBUG_AARCH64
3800 /* N.B. this is only for the purpose of debugging. */
3801 const char* operand_mismatch_kind_names[] =
3802 {
3803 "AARCH64_OPDE_NIL",
3804 "AARCH64_OPDE_RECOVERABLE",
3805 "AARCH64_OPDE_SYNTAX_ERROR",
3806 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3807 "AARCH64_OPDE_INVALID_VARIANT",
3808 "AARCH64_OPDE_OUT_OF_RANGE",
3809 "AARCH64_OPDE_UNALIGNED",
3810 "AARCH64_OPDE_REG_LIST",
3811 "AARCH64_OPDE_OTHER_ERROR",
3812 };
3813 #endif /* DEBUG_AARCH64 */
3814
3815 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3816
3817 When multiple errors of different kinds are found in the same assembly
3818 line, only the error of the highest severity will be picked up for
3819 issuing the diagnostics. */
3820
3821 static inline bfd_boolean
3822 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3823 enum aarch64_operand_error_kind rhs)
3824 {
3825 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3826 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3827 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3828 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3829 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3830 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3831 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3832 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3833 return lhs > rhs;
3834 }
3835
3836 /* Helper routine to get the mnemonic name from the assembly instruction
3837 line; should only be called for the diagnosis purpose, as there is
3838 string copy operation involved, which may affect the runtime
3839 performance if used in elsewhere. */
3840
3841 static const char*
3842 get_mnemonic_name (const char *str)
3843 {
3844 static char mnemonic[32];
3845 char *ptr;
3846
3847 /* Get the first 15 bytes and assume that the full name is included. */
3848 strncpy (mnemonic, str, 31);
3849 mnemonic[31] = '\0';
3850
3851 /* Scan up to the end of the mnemonic, which must end in white space,
3852 '.', or end of string. */
3853 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3854 ;
3855
3856 *ptr = '\0';
3857
3858 /* Append '...' to the truncated long name. */
3859 if (ptr - mnemonic == 31)
3860 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3861
3862 return mnemonic;
3863 }
3864
3865 static void
3866 reset_aarch64_instruction (aarch64_instruction *instruction)
3867 {
3868 memset (instruction, '\0', sizeof (aarch64_instruction));
3869 instruction->reloc.type = BFD_RELOC_UNUSED;
3870 }
3871
3872 /* Data strutures storing one user error in the assembly code related to
3873 operands. */
3874
3875 struct operand_error_record
3876 {
3877 const aarch64_opcode *opcode;
3878 aarch64_operand_error detail;
3879 struct operand_error_record *next;
3880 };
3881
3882 typedef struct operand_error_record operand_error_record;
3883
3884 struct operand_errors
3885 {
3886 operand_error_record *head;
3887 operand_error_record *tail;
3888 };
3889
3890 typedef struct operand_errors operand_errors;
3891
3892 /* Top-level data structure reporting user errors for the current line of
3893 the assembly code.
3894 The way md_assemble works is that all opcodes sharing the same mnemonic
3895 name are iterated to find a match to the assembly line. In this data
3896 structure, each of the such opcodes will have one operand_error_record
3897 allocated and inserted. In other words, excessive errors related with
3898 a single opcode are disregarded. */
3899 operand_errors operand_error_report;
3900
3901 /* Free record nodes. */
3902 static operand_error_record *free_opnd_error_record_nodes = NULL;
3903
3904 /* Initialize the data structure that stores the operand mismatch
3905 information on assembling one line of the assembly code. */
3906 static void
3907 init_operand_error_report (void)
3908 {
3909 if (operand_error_report.head != NULL)
3910 {
3911 gas_assert (operand_error_report.tail != NULL);
3912 operand_error_report.tail->next = free_opnd_error_record_nodes;
3913 free_opnd_error_record_nodes = operand_error_report.head;
3914 operand_error_report.head = NULL;
3915 operand_error_report.tail = NULL;
3916 return;
3917 }
3918 gas_assert (operand_error_report.tail == NULL);
3919 }
3920
3921 /* Return TRUE if some operand error has been recorded during the
3922 parsing of the current assembly line using the opcode *OPCODE;
3923 otherwise return FALSE. */
3924 static inline bfd_boolean
3925 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3926 {
3927 operand_error_record *record = operand_error_report.head;
3928 return record && record->opcode == opcode;
3929 }
3930
3931 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3932 OPCODE field is initialized with OPCODE.
3933 N.B. only one record for each opcode, i.e. the maximum of one error is
3934 recorded for each instruction template. */
3935
3936 static void
3937 add_operand_error_record (const operand_error_record* new_record)
3938 {
3939 const aarch64_opcode *opcode = new_record->opcode;
3940 operand_error_record* record = operand_error_report.head;
3941
3942 /* The record may have been created for this opcode. If not, we need
3943 to prepare one. */
3944 if (! opcode_has_operand_error_p (opcode))
3945 {
3946 /* Get one empty record. */
3947 if (free_opnd_error_record_nodes == NULL)
3948 {
3949 record = xmalloc (sizeof (operand_error_record));
3950 if (record == NULL)
3951 abort ();
3952 }
3953 else
3954 {
3955 record = free_opnd_error_record_nodes;
3956 free_opnd_error_record_nodes = record->next;
3957 }
3958 record->opcode = opcode;
3959 /* Insert at the head. */
3960 record->next = operand_error_report.head;
3961 operand_error_report.head = record;
3962 if (operand_error_report.tail == NULL)
3963 operand_error_report.tail = record;
3964 }
3965 else if (record->detail.kind != AARCH64_OPDE_NIL
3966 && record->detail.index <= new_record->detail.index
3967 && operand_error_higher_severity_p (record->detail.kind,
3968 new_record->detail.kind))
3969 {
3970 /* In the case of multiple errors found on operands related with a
3971 single opcode, only record the error of the leftmost operand and
3972 only if the error is of higher severity. */
3973 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3974 " the existing error %s on operand %d",
3975 operand_mismatch_kind_names[new_record->detail.kind],
3976 new_record->detail.index,
3977 operand_mismatch_kind_names[record->detail.kind],
3978 record->detail.index);
3979 return;
3980 }
3981
3982 record->detail = new_record->detail;
3983 }
3984
3985 static inline void
3986 record_operand_error_info (const aarch64_opcode *opcode,
3987 aarch64_operand_error *error_info)
3988 {
3989 operand_error_record record;
3990 record.opcode = opcode;
3991 record.detail = *error_info;
3992 add_operand_error_record (&record);
3993 }
3994
3995 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3996 error message *ERROR, for operand IDX (count from 0). */
3997
3998 static void
3999 record_operand_error (const aarch64_opcode *opcode, int idx,
4000 enum aarch64_operand_error_kind kind,
4001 const char* error)
4002 {
4003 aarch64_operand_error info;
4004 memset(&info, 0, sizeof (info));
4005 info.index = idx;
4006 info.kind = kind;
4007 info.error = error;
4008 record_operand_error_info (opcode, &info);
4009 }
4010
4011 static void
4012 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4013 enum aarch64_operand_error_kind kind,
4014 const char* error, const int *extra_data)
4015 {
4016 aarch64_operand_error info;
4017 info.index = idx;
4018 info.kind = kind;
4019 info.error = error;
4020 info.data[0] = extra_data[0];
4021 info.data[1] = extra_data[1];
4022 info.data[2] = extra_data[2];
4023 record_operand_error_info (opcode, &info);
4024 }
4025
4026 static void
4027 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4028 const char* error, int lower_bound,
4029 int upper_bound)
4030 {
4031 int data[3] = {lower_bound, upper_bound, 0};
4032 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4033 error, data);
4034 }
4035
4036 /* Remove the operand error record for *OPCODE. */
4037 static void ATTRIBUTE_UNUSED
4038 remove_operand_error_record (const aarch64_opcode *opcode)
4039 {
4040 if (opcode_has_operand_error_p (opcode))
4041 {
4042 operand_error_record* record = operand_error_report.head;
4043 gas_assert (record != NULL && operand_error_report.tail != NULL);
4044 operand_error_report.head = record->next;
4045 record->next = free_opnd_error_record_nodes;
4046 free_opnd_error_record_nodes = record;
4047 if (operand_error_report.head == NULL)
4048 {
4049 gas_assert (operand_error_report.tail == record);
4050 operand_error_report.tail = NULL;
4051 }
4052 }
4053 }
4054
4055 /* Given the instruction in *INSTR, return the index of the best matched
4056 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4057
4058 Return -1 if there is no qualifier sequence; return the first match
4059 if there is multiple matches found. */
4060
4061 static int
4062 find_best_match (const aarch64_inst *instr,
4063 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4064 {
4065 int i, num_opnds, max_num_matched, idx;
4066
4067 num_opnds = aarch64_num_of_operands (instr->opcode);
4068 if (num_opnds == 0)
4069 {
4070 DEBUG_TRACE ("no operand");
4071 return -1;
4072 }
4073
4074 max_num_matched = 0;
4075 idx = -1;
4076
4077 /* For each pattern. */
4078 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4079 {
4080 int j, num_matched;
4081 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4082
4083 /* Most opcodes has much fewer patterns in the list. */
4084 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4085 {
4086 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4087 if (i != 0 && idx == -1)
4088 /* If nothing has been matched, return the 1st sequence. */
4089 idx = 0;
4090 break;
4091 }
4092
4093 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4094 if (*qualifiers == instr->operands[j].qualifier)
4095 ++num_matched;
4096
4097 if (num_matched > max_num_matched)
4098 {
4099 max_num_matched = num_matched;
4100 idx = i;
4101 }
4102 }
4103
4104 DEBUG_TRACE ("return with %d", idx);
4105 return idx;
4106 }
4107
4108 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4109 corresponding operands in *INSTR. */
4110
4111 static inline void
4112 assign_qualifier_sequence (aarch64_inst *instr,
4113 const aarch64_opnd_qualifier_t *qualifiers)
4114 {
4115 int i = 0;
4116 int num_opnds = aarch64_num_of_operands (instr->opcode);
4117 gas_assert (num_opnds);
4118 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4119 instr->operands[i].qualifier = *qualifiers;
4120 }
4121
4122 /* Print operands for the diagnosis purpose. */
4123
4124 static void
4125 print_operands (char *buf, const aarch64_opcode *opcode,
4126 const aarch64_opnd_info *opnds)
4127 {
4128 int i;
4129
4130 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4131 {
4132 const size_t size = 128;
4133 char str[size];
4134
4135 /* We regard the opcode operand info more, however we also look into
4136 the inst->operands to support the disassembling of the optional
4137 operand.
4138 The two operand code should be the same in all cases, apart from
4139 when the operand can be optional. */
4140 if (opcode->operands[i] == AARCH64_OPND_NIL
4141 || opnds[i].type == AARCH64_OPND_NIL)
4142 break;
4143
4144 /* Generate the operand string in STR. */
4145 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
4146
4147 /* Delimiter. */
4148 if (str[0] != '\0')
4149 strcat (buf, i == 0 ? " " : ",");
4150
4151 /* Append the operand string. */
4152 strcat (buf, str);
4153 }
4154 }
4155
4156 /* Send to stderr a string as information. */
4157
4158 static void
4159 output_info (const char *format, ...)
4160 {
4161 char *file;
4162 unsigned int line;
4163 va_list args;
4164
4165 as_where (&file, &line);
4166 if (file)
4167 {
4168 if (line != 0)
4169 fprintf (stderr, "%s:%u: ", file, line);
4170 else
4171 fprintf (stderr, "%s: ", file);
4172 }
4173 fprintf (stderr, _("Info: "));
4174 va_start (args, format);
4175 vfprintf (stderr, format, args);
4176 va_end (args);
4177 (void) putc ('\n', stderr);
4178 }
4179
4180 /* Output one operand error record. */
4181
4182 static void
4183 output_operand_error_record (const operand_error_record *record, char *str)
4184 {
4185 const aarch64_operand_error *detail = &record->detail;
4186 int idx = detail->index;
4187 const aarch64_opcode *opcode = record->opcode;
4188 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4189 : AARCH64_OPND_NIL);
4190
4191 switch (detail->kind)
4192 {
4193 case AARCH64_OPDE_NIL:
4194 gas_assert (0);
4195 break;
4196
4197 case AARCH64_OPDE_SYNTAX_ERROR:
4198 case AARCH64_OPDE_RECOVERABLE:
4199 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4200 case AARCH64_OPDE_OTHER_ERROR:
4201 /* Use the prepared error message if there is, otherwise use the
4202 operand description string to describe the error. */
4203 if (detail->error != NULL)
4204 {
4205 if (idx < 0)
4206 as_bad (_("%s -- `%s'"), detail->error, str);
4207 else
4208 as_bad (_("%s at operand %d -- `%s'"),
4209 detail->error, idx + 1, str);
4210 }
4211 else
4212 {
4213 gas_assert (idx >= 0);
4214 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4215 aarch64_get_operand_desc (opd_code), str);
4216 }
4217 break;
4218
4219 case AARCH64_OPDE_INVALID_VARIANT:
4220 as_bad (_("operand mismatch -- `%s'"), str);
4221 if (verbose_error_p)
4222 {
4223 /* We will try to correct the erroneous instruction and also provide
4224 more information e.g. all other valid variants.
4225
4226 The string representation of the corrected instruction and other
4227 valid variants are generated by
4228
4229 1) obtaining the intermediate representation of the erroneous
4230 instruction;
4231 2) manipulating the IR, e.g. replacing the operand qualifier;
4232 3) printing out the instruction by calling the printer functions
4233 shared with the disassembler.
4234
4235 The limitation of this method is that the exact input assembly
4236 line cannot be accurately reproduced in some cases, for example an
4237 optional operand present in the actual assembly line will be
4238 omitted in the output; likewise for the optional syntax rules,
4239 e.g. the # before the immediate. Another limitation is that the
4240 assembly symbols and relocation operations in the assembly line
4241 currently cannot be printed out in the error report. Last but not
4242 least, when there is other error(s) co-exist with this error, the
4243 'corrected' instruction may be still incorrect, e.g. given
4244 'ldnp h0,h1,[x0,#6]!'
4245 this diagnosis will provide the version:
4246 'ldnp s0,s1,[x0,#6]!'
4247 which is still not right. */
4248 size_t len = strlen (get_mnemonic_name (str));
4249 int i, qlf_idx;
4250 bfd_boolean result;
4251 const size_t size = 2048;
4252 char buf[size];
4253 aarch64_inst *inst_base = &inst.base;
4254 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4255
4256 /* Init inst. */
4257 reset_aarch64_instruction (&inst);
4258 inst_base->opcode = opcode;
4259
4260 /* Reset the error report so that there is no side effect on the
4261 following operand parsing. */
4262 init_operand_error_report ();
4263
4264 /* Fill inst. */
4265 result = parse_operands (str + len, opcode)
4266 && programmer_friendly_fixup (&inst);
4267 gas_assert (result);
4268 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4269 NULL, NULL);
4270 gas_assert (!result);
4271
4272 /* Find the most matched qualifier sequence. */
4273 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4274 gas_assert (qlf_idx > -1);
4275
4276 /* Assign the qualifiers. */
4277 assign_qualifier_sequence (inst_base,
4278 opcode->qualifiers_list[qlf_idx]);
4279
4280 /* Print the hint. */
4281 output_info (_(" did you mean this?"));
4282 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4283 print_operands (buf, opcode, inst_base->operands);
4284 output_info (_(" %s"), buf);
4285
4286 /* Print out other variant(s) if there is any. */
4287 if (qlf_idx != 0 ||
4288 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4289 output_info (_(" other valid variant(s):"));
4290
4291 /* For each pattern. */
4292 qualifiers_list = opcode->qualifiers_list;
4293 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4294 {
4295 /* Most opcodes has much fewer patterns in the list.
4296 First NIL qualifier indicates the end in the list. */
4297 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4298 break;
4299
4300 if (i != qlf_idx)
4301 {
4302 /* Mnemonics name. */
4303 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4304
4305 /* Assign the qualifiers. */
4306 assign_qualifier_sequence (inst_base, *qualifiers_list);
4307
4308 /* Print instruction. */
4309 print_operands (buf, opcode, inst_base->operands);
4310
4311 output_info (_(" %s"), buf);
4312 }
4313 }
4314 }
4315 break;
4316
4317 case AARCH64_OPDE_OUT_OF_RANGE:
4318 if (detail->data[0] != detail->data[1])
4319 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4320 detail->error ? detail->error : _("immediate value"),
4321 detail->data[0], detail->data[1], idx + 1, str);
4322 else
4323 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4324 detail->error ? detail->error : _("immediate value"),
4325 detail->data[0], idx + 1, str);
4326 break;
4327
4328 case AARCH64_OPDE_REG_LIST:
4329 if (detail->data[0] == 1)
4330 as_bad (_("invalid number of registers in the list; "
4331 "only 1 register is expected at operand %d -- `%s'"),
4332 idx + 1, str);
4333 else
4334 as_bad (_("invalid number of registers in the list; "
4335 "%d registers are expected at operand %d -- `%s'"),
4336 detail->data[0], idx + 1, str);
4337 break;
4338
4339 case AARCH64_OPDE_UNALIGNED:
4340 as_bad (_("immediate value should be a multiple of "
4341 "%d at operand %d -- `%s'"),
4342 detail->data[0], idx + 1, str);
4343 break;
4344
4345 default:
4346 gas_assert (0);
4347 break;
4348 }
4349 }
4350
4351 /* Process and output the error message about the operand mismatching.
4352
4353 When this function is called, the operand error information had
4354 been collected for an assembly line and there will be multiple
4355 errors in the case of mulitple instruction templates; output the
4356 error message that most closely describes the problem. */
4357
4358 static void
4359 output_operand_error_report (char *str)
4360 {
4361 int largest_error_pos;
4362 const char *msg = NULL;
4363 enum aarch64_operand_error_kind kind;
4364 operand_error_record *curr;
4365 operand_error_record *head = operand_error_report.head;
4366 operand_error_record *record = NULL;
4367
4368 /* No error to report. */
4369 if (head == NULL)
4370 return;
4371
4372 gas_assert (head != NULL && operand_error_report.tail != NULL);
4373
4374 /* Only one error. */
4375 if (head == operand_error_report.tail)
4376 {
4377 DEBUG_TRACE ("single opcode entry with error kind: %s",
4378 operand_mismatch_kind_names[head->detail.kind]);
4379 output_operand_error_record (head, str);
4380 return;
4381 }
4382
4383 /* Find the error kind of the highest severity. */
4384 DEBUG_TRACE ("multiple opcode entres with error kind");
4385 kind = AARCH64_OPDE_NIL;
4386 for (curr = head; curr != NULL; curr = curr->next)
4387 {
4388 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4389 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4390 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4391 kind = curr->detail.kind;
4392 }
4393 gas_assert (kind != AARCH64_OPDE_NIL);
4394
4395 /* Pick up one of errors of KIND to report. */
4396 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4397 for (curr = head; curr != NULL; curr = curr->next)
4398 {
4399 if (curr->detail.kind != kind)
4400 continue;
4401 /* If there are multiple errors, pick up the one with the highest
4402 mismatching operand index. In the case of multiple errors with
4403 the equally highest operand index, pick up the first one or the
4404 first one with non-NULL error message. */
4405 if (curr->detail.index > largest_error_pos
4406 || (curr->detail.index == largest_error_pos && msg == NULL
4407 && curr->detail.error != NULL))
4408 {
4409 largest_error_pos = curr->detail.index;
4410 record = curr;
4411 msg = record->detail.error;
4412 }
4413 }
4414
4415 gas_assert (largest_error_pos != -2 && record != NULL);
4416 DEBUG_TRACE ("Pick up error kind %s to report",
4417 operand_mismatch_kind_names[record->detail.kind]);
4418
4419 /* Output. */
4420 output_operand_error_record (record, str);
4421 }
4422 \f
4423 /* Write an AARCH64 instruction to buf - always little-endian. */
4424 static void
4425 put_aarch64_insn (char *buf, uint32_t insn)
4426 {
4427 unsigned char *where = (unsigned char *) buf;
4428 where[0] = insn;
4429 where[1] = insn >> 8;
4430 where[2] = insn >> 16;
4431 where[3] = insn >> 24;
4432 }
4433
4434 static uint32_t
4435 get_aarch64_insn (char *buf)
4436 {
4437 unsigned char *where = (unsigned char *) buf;
4438 uint32_t result;
4439 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4440 return result;
4441 }
4442
4443 static void
4444 output_inst (struct aarch64_inst *new_inst)
4445 {
4446 char *to = NULL;
4447
4448 to = frag_more (INSN_SIZE);
4449
4450 frag_now->tc_frag_data.recorded = 1;
4451
4452 put_aarch64_insn (to, inst.base.value);
4453
4454 if (inst.reloc.type != BFD_RELOC_UNUSED)
4455 {
4456 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4457 INSN_SIZE, &inst.reloc.exp,
4458 inst.reloc.pc_rel,
4459 inst.reloc.type);
4460 DEBUG_TRACE ("Prepared relocation fix up");
4461 /* Don't check the addend value against the instruction size,
4462 that's the job of our code in md_apply_fix(). */
4463 fixp->fx_no_overflow = 1;
4464 if (new_inst != NULL)
4465 fixp->tc_fix_data.inst = new_inst;
4466 if (aarch64_gas_internal_fixup_p ())
4467 {
4468 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4469 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4470 fixp->fx_addnumber = inst.reloc.flags;
4471 }
4472 }
4473
4474 dwarf2_emit_insn (INSN_SIZE);
4475 }
4476
4477 /* Link together opcodes of the same name. */
4478
4479 struct templates
4480 {
4481 aarch64_opcode *opcode;
4482 struct templates *next;
4483 };
4484
4485 typedef struct templates templates;
4486
4487 static templates *
4488 lookup_mnemonic (const char *start, int len)
4489 {
4490 templates *templ = NULL;
4491
4492 templ = hash_find_n (aarch64_ops_hsh, start, len);
4493 return templ;
4494 }
4495
4496 /* Subroutine of md_assemble, responsible for looking up the primary
4497 opcode from the mnemonic the user wrote. STR points to the
4498 beginning of the mnemonic. */
4499
4500 static templates *
4501 opcode_lookup (char **str)
4502 {
4503 char *end, *base;
4504 const aarch64_cond *cond;
4505 char condname[16];
4506 int len;
4507
4508 /* Scan up to the end of the mnemonic, which must end in white space,
4509 '.', or end of string. */
4510 for (base = end = *str; is_part_of_name(*end); end++)
4511 if (*end == '.')
4512 break;
4513
4514 if (end == base)
4515 return 0;
4516
4517 inst.cond = COND_ALWAYS;
4518
4519 /* Handle a possible condition. */
4520 if (end[0] == '.')
4521 {
4522 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4523 if (cond)
4524 {
4525 inst.cond = cond->value;
4526 *str = end + 3;
4527 }
4528 else
4529 {
4530 *str = end;
4531 return 0;
4532 }
4533 }
4534 else
4535 *str = end;
4536
4537 len = end - base;
4538
4539 if (inst.cond == COND_ALWAYS)
4540 {
4541 /* Look for unaffixed mnemonic. */
4542 return lookup_mnemonic (base, len);
4543 }
4544 else if (len <= 13)
4545 {
4546 /* append ".c" to mnemonic if conditional */
4547 memcpy (condname, base, len);
4548 memcpy (condname + len, ".c", 2);
4549 base = condname;
4550 len += 2;
4551 return lookup_mnemonic (base, len);
4552 }
4553
4554 return NULL;
4555 }
4556
4557 /* Internal helper routine converting a vector neon_type_el structure
4558 *VECTYPE to a corresponding operand qualifier. */
4559
4560 static inline aarch64_opnd_qualifier_t
4561 vectype_to_qualifier (const struct neon_type_el *vectype)
4562 {
4563 /* Element size in bytes indexed by neon_el_type. */
4564 const unsigned char ele_size[5]
4565 = {1, 2, 4, 8, 16};
4566
4567 if (!vectype->defined || vectype->type == NT_invtype)
4568 goto vectype_conversion_fail;
4569
4570 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4571
4572 if (vectype->defined & NTA_HASINDEX)
4573 /* Vector element register. */
4574 return AARCH64_OPND_QLF_S_B + vectype->type;
4575 else
4576 {
4577 /* Vector register. */
4578 int reg_size = ele_size[vectype->type] * vectype->width;
4579 unsigned offset;
4580 if (reg_size != 16 && reg_size != 8)
4581 goto vectype_conversion_fail;
4582 /* The conversion is calculated based on the relation of the order of
4583 qualifiers to the vector element size and vector register size. */
4584 offset = (vectype->type == NT_q)
4585 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4586 gas_assert (offset <= 8);
4587 return AARCH64_OPND_QLF_V_8B + offset;
4588 }
4589
4590 vectype_conversion_fail:
4591 first_error (_("bad vector arrangement type"));
4592 return AARCH64_OPND_QLF_NIL;
4593 }
4594
4595 /* Process an optional operand that is found omitted from the assembly line.
4596 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4597 instruction's opcode entry while IDX is the index of this omitted operand.
4598 */
4599
4600 static void
4601 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4602 int idx, aarch64_opnd_info *operand)
4603 {
4604 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4605 gas_assert (optional_operand_p (opcode, idx));
4606 gas_assert (!operand->present);
4607
4608 switch (type)
4609 {
4610 case AARCH64_OPND_Rd:
4611 case AARCH64_OPND_Rn:
4612 case AARCH64_OPND_Rm:
4613 case AARCH64_OPND_Rt:
4614 case AARCH64_OPND_Rt2:
4615 case AARCH64_OPND_Rs:
4616 case AARCH64_OPND_Ra:
4617 case AARCH64_OPND_Rt_SYS:
4618 case AARCH64_OPND_Rd_SP:
4619 case AARCH64_OPND_Rn_SP:
4620 case AARCH64_OPND_Fd:
4621 case AARCH64_OPND_Fn:
4622 case AARCH64_OPND_Fm:
4623 case AARCH64_OPND_Fa:
4624 case AARCH64_OPND_Ft:
4625 case AARCH64_OPND_Ft2:
4626 case AARCH64_OPND_Sd:
4627 case AARCH64_OPND_Sn:
4628 case AARCH64_OPND_Sm:
4629 case AARCH64_OPND_Vd:
4630 case AARCH64_OPND_Vn:
4631 case AARCH64_OPND_Vm:
4632 case AARCH64_OPND_VdD1:
4633 case AARCH64_OPND_VnD1:
4634 operand->reg.regno = default_value;
4635 break;
4636
4637 case AARCH64_OPND_Ed:
4638 case AARCH64_OPND_En:
4639 case AARCH64_OPND_Em:
4640 operand->reglane.regno = default_value;
4641 break;
4642
4643 case AARCH64_OPND_IDX:
4644 case AARCH64_OPND_BIT_NUM:
4645 case AARCH64_OPND_IMMR:
4646 case AARCH64_OPND_IMMS:
4647 case AARCH64_OPND_SHLL_IMM:
4648 case AARCH64_OPND_IMM_VLSL:
4649 case AARCH64_OPND_IMM_VLSR:
4650 case AARCH64_OPND_CCMP_IMM:
4651 case AARCH64_OPND_FBITS:
4652 case AARCH64_OPND_UIMM4:
4653 case AARCH64_OPND_UIMM3_OP1:
4654 case AARCH64_OPND_UIMM3_OP2:
4655 case AARCH64_OPND_IMM:
4656 case AARCH64_OPND_WIDTH:
4657 case AARCH64_OPND_UIMM7:
4658 case AARCH64_OPND_NZCV:
4659 operand->imm.value = default_value;
4660 break;
4661
4662 case AARCH64_OPND_EXCEPTION:
4663 inst.reloc.type = BFD_RELOC_UNUSED;
4664 break;
4665
4666 case AARCH64_OPND_BARRIER_ISB:
4667 operand->barrier = aarch64_barrier_options + default_value;
4668
4669 default:
4670 break;
4671 }
4672 }
4673
4674 /* Process the relocation type for move wide instructions.
4675 Return TRUE on success; otherwise return FALSE. */
4676
4677 static bfd_boolean
4678 process_movw_reloc_info (void)
4679 {
4680 int is32;
4681 unsigned shift;
4682
4683 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4684
4685 if (inst.base.opcode->op == OP_MOVK)
4686 switch (inst.reloc.type)
4687 {
4688 case BFD_RELOC_AARCH64_MOVW_G0_S:
4689 case BFD_RELOC_AARCH64_MOVW_G1_S:
4690 case BFD_RELOC_AARCH64_MOVW_G2_S:
4691 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4692 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4693 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4694 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4695 set_syntax_error
4696 (_("the specified relocation type is not allowed for MOVK"));
4697 return FALSE;
4698 default:
4699 break;
4700 }
4701
4702 switch (inst.reloc.type)
4703 {
4704 case BFD_RELOC_AARCH64_MOVW_G0:
4705 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4706 case BFD_RELOC_AARCH64_MOVW_G0_S:
4707 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4708 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4709 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
4710 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
4711 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4712 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4713 shift = 0;
4714 break;
4715 case BFD_RELOC_AARCH64_MOVW_G1:
4716 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4717 case BFD_RELOC_AARCH64_MOVW_G1_S:
4718 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4719 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4720 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
4721 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
4722 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4723 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4724 shift = 16;
4725 break;
4726 case BFD_RELOC_AARCH64_MOVW_G2:
4727 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4728 case BFD_RELOC_AARCH64_MOVW_G2_S:
4729 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
4730 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4731 if (is32)
4732 {
4733 set_fatal_syntax_error
4734 (_("the specified relocation type is not allowed for 32-bit "
4735 "register"));
4736 return FALSE;
4737 }
4738 shift = 32;
4739 break;
4740 case BFD_RELOC_AARCH64_MOVW_G3:
4741 if (is32)
4742 {
4743 set_fatal_syntax_error
4744 (_("the specified relocation type is not allowed for 32-bit "
4745 "register"));
4746 return FALSE;
4747 }
4748 shift = 48;
4749 break;
4750 default:
4751 /* More cases should be added when more MOVW-related relocation types
4752 are supported in GAS. */
4753 gas_assert (aarch64_gas_internal_fixup_p ());
4754 /* The shift amount should have already been set by the parser. */
4755 return TRUE;
4756 }
4757 inst.base.operands[1].shifter.amount = shift;
4758 return TRUE;
4759 }
4760
4761 /* A primitive log caculator. */
4762
4763 static inline unsigned int
4764 get_logsz (unsigned int size)
4765 {
4766 const unsigned char ls[16] =
4767 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4768 if (size > 16)
4769 {
4770 gas_assert (0);
4771 return -1;
4772 }
4773 gas_assert (ls[size - 1] != (unsigned char)-1);
4774 return ls[size - 1];
4775 }
4776
4777 /* Determine and return the real reloc type code for an instruction
4778 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4779
4780 static inline bfd_reloc_code_real_type
4781 ldst_lo12_determine_real_reloc_type (void)
4782 {
4783 unsigned logsz;
4784 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4785 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4786
4787 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
4788 {
4789 BFD_RELOC_AARCH64_LDST8_LO12,
4790 BFD_RELOC_AARCH64_LDST16_LO12,
4791 BFD_RELOC_AARCH64_LDST32_LO12,
4792 BFD_RELOC_AARCH64_LDST64_LO12,
4793 BFD_RELOC_AARCH64_LDST128_LO12
4794 },
4795 {
4796 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
4797 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
4798 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
4799 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
4800 BFD_RELOC_AARCH64_NONE
4801 },
4802 {
4803 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
4804 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
4805 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
4806 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
4807 BFD_RELOC_AARCH64_NONE
4808 }
4809 };
4810
4811 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
4812 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4813 || (inst.reloc.type
4814 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
4815 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4816
4817 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4818 opd1_qlf =
4819 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4820 1, opd0_qlf, 0);
4821 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4822
4823 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4824 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4825 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
4826 gas_assert (logsz <= 3);
4827 else
4828 gas_assert (logsz <= 4);
4829
4830 /* In reloc.c, these pseudo relocation types should be defined in similar
4831 order as above reloc_ldst_lo12 array. Because the array index calcuation
4832 below relies on this. */
4833 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
4834 }
4835
4836 /* Check whether a register list REGINFO is valid. The registers must be
4837 numbered in increasing order (modulo 32), in increments of one or two.
4838
4839 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4840 increments of two.
4841
4842 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4843
4844 static bfd_boolean
4845 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4846 {
4847 uint32_t i, nb_regs, prev_regno, incr;
4848
4849 nb_regs = 1 + (reginfo & 0x3);
4850 reginfo >>= 2;
4851 prev_regno = reginfo & 0x1f;
4852 incr = accept_alternate ? 2 : 1;
4853
4854 for (i = 1; i < nb_regs; ++i)
4855 {
4856 uint32_t curr_regno;
4857 reginfo >>= 5;
4858 curr_regno = reginfo & 0x1f;
4859 if (curr_regno != ((prev_regno + incr) & 0x1f))
4860 return FALSE;
4861 prev_regno = curr_regno;
4862 }
4863
4864 return TRUE;
4865 }
4866
4867 /* Generic instruction operand parser. This does no encoding and no
4868 semantic validation; it merely squirrels values away in the inst
4869 structure. Returns TRUE or FALSE depending on whether the
4870 specified grammar matched. */
4871
4872 static bfd_boolean
4873 parse_operands (char *str, const aarch64_opcode *opcode)
4874 {
4875 int i;
4876 char *backtrack_pos = 0;
4877 const enum aarch64_opnd *operands = opcode->operands;
4878
4879 clear_error ();
4880 skip_whitespace (str);
4881
4882 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4883 {
4884 int64_t val;
4885 int isreg32, isregzero;
4886 int comma_skipped_p = 0;
4887 aarch64_reg_type rtype;
4888 struct neon_type_el vectype;
4889 aarch64_opnd_info *info = &inst.base.operands[i];
4890
4891 DEBUG_TRACE ("parse operand %d", i);
4892
4893 /* Assign the operand code. */
4894 info->type = operands[i];
4895
4896 if (optional_operand_p (opcode, i))
4897 {
4898 /* Remember where we are in case we need to backtrack. */
4899 gas_assert (!backtrack_pos);
4900 backtrack_pos = str;
4901 }
4902
4903 /* Expect comma between operands; the backtrack mechanizm will take
4904 care of cases of omitted optional operand. */
4905 if (i > 0 && ! skip_past_char (&str, ','))
4906 {
4907 set_syntax_error (_("comma expected between operands"));
4908 goto failure;
4909 }
4910 else
4911 comma_skipped_p = 1;
4912
4913 switch (operands[i])
4914 {
4915 case AARCH64_OPND_Rd:
4916 case AARCH64_OPND_Rn:
4917 case AARCH64_OPND_Rm:
4918 case AARCH64_OPND_Rt:
4919 case AARCH64_OPND_Rt2:
4920 case AARCH64_OPND_Rs:
4921 case AARCH64_OPND_Ra:
4922 case AARCH64_OPND_Rt_SYS:
4923 case AARCH64_OPND_PAIRREG:
4924 po_int_reg_or_fail (1, 0);
4925 break;
4926
4927 case AARCH64_OPND_Rd_SP:
4928 case AARCH64_OPND_Rn_SP:
4929 po_int_reg_or_fail (0, 1);
4930 break;
4931
4932 case AARCH64_OPND_Rm_EXT:
4933 case AARCH64_OPND_Rm_SFT:
4934 po_misc_or_fail (parse_shifter_operand
4935 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4936 ? SHIFTED_ARITH_IMM
4937 : SHIFTED_LOGIC_IMM)));
4938 if (!info->shifter.operator_present)
4939 {
4940 /* Default to LSL if not present. Libopcodes prefers shifter
4941 kind to be explicit. */
4942 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4943 info->shifter.kind = AARCH64_MOD_LSL;
4944 /* For Rm_EXT, libopcodes will carry out further check on whether
4945 or not stack pointer is used in the instruction (Recall that
4946 "the extend operator is not optional unless at least one of
4947 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4948 }
4949 break;
4950
4951 case AARCH64_OPND_Fd:
4952 case AARCH64_OPND_Fn:
4953 case AARCH64_OPND_Fm:
4954 case AARCH64_OPND_Fa:
4955 case AARCH64_OPND_Ft:
4956 case AARCH64_OPND_Ft2:
4957 case AARCH64_OPND_Sd:
4958 case AARCH64_OPND_Sn:
4959 case AARCH64_OPND_Sm:
4960 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4961 if (val == PARSE_FAIL)
4962 {
4963 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4964 goto failure;
4965 }
4966 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4967
4968 info->reg.regno = val;
4969 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4970 break;
4971
4972 case AARCH64_OPND_Vd:
4973 case AARCH64_OPND_Vn:
4974 case AARCH64_OPND_Vm:
4975 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4976 if (val == PARSE_FAIL)
4977 {
4978 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4979 goto failure;
4980 }
4981 if (vectype.defined & NTA_HASINDEX)
4982 goto failure;
4983
4984 info->reg.regno = val;
4985 info->qualifier = vectype_to_qualifier (&vectype);
4986 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4987 goto failure;
4988 break;
4989
4990 case AARCH64_OPND_VdD1:
4991 case AARCH64_OPND_VnD1:
4992 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4993 if (val == PARSE_FAIL)
4994 {
4995 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4996 goto failure;
4997 }
4998 if (vectype.type != NT_d || vectype.index != 1)
4999 {
5000 set_fatal_syntax_error
5001 (_("the top half of a 128-bit FP/SIMD register is expected"));
5002 goto failure;
5003 }
5004 info->reg.regno = val;
5005 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5006 here; it is correct for the purpose of encoding/decoding since
5007 only the register number is explicitly encoded in the related
5008 instructions, although this appears a bit hacky. */
5009 info->qualifier = AARCH64_OPND_QLF_S_D;
5010 break;
5011
5012 case AARCH64_OPND_Ed:
5013 case AARCH64_OPND_En:
5014 case AARCH64_OPND_Em:
5015 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5016 if (val == PARSE_FAIL)
5017 {
5018 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5019 goto failure;
5020 }
5021 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5022 goto failure;
5023
5024 info->reglane.regno = val;
5025 info->reglane.index = vectype.index;
5026 info->qualifier = vectype_to_qualifier (&vectype);
5027 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5028 goto failure;
5029 break;
5030
5031 case AARCH64_OPND_LVn:
5032 case AARCH64_OPND_LVt:
5033 case AARCH64_OPND_LVt_AL:
5034 case AARCH64_OPND_LEt:
5035 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
5036 goto failure;
5037 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5038 {
5039 set_fatal_syntax_error (_("invalid register list"));
5040 goto failure;
5041 }
5042 info->reglist.first_regno = (val >> 2) & 0x1f;
5043 info->reglist.num_regs = (val & 0x3) + 1;
5044 if (operands[i] == AARCH64_OPND_LEt)
5045 {
5046 if (!(vectype.defined & NTA_HASINDEX))
5047 goto failure;
5048 info->reglist.has_index = 1;
5049 info->reglist.index = vectype.index;
5050 }
5051 else if (!(vectype.defined & NTA_HASTYPE))
5052 goto failure;
5053 info->qualifier = vectype_to_qualifier (&vectype);
5054 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5055 goto failure;
5056 break;
5057
5058 case AARCH64_OPND_Cn:
5059 case AARCH64_OPND_Cm:
5060 po_reg_or_fail (REG_TYPE_CN);
5061 if (val > 15)
5062 {
5063 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5064 goto failure;
5065 }
5066 inst.base.operands[i].reg.regno = val;
5067 break;
5068
5069 case AARCH64_OPND_SHLL_IMM:
5070 case AARCH64_OPND_IMM_VLSR:
5071 po_imm_or_fail (1, 64);
5072 info->imm.value = val;
5073 break;
5074
5075 case AARCH64_OPND_CCMP_IMM:
5076 case AARCH64_OPND_FBITS:
5077 case AARCH64_OPND_UIMM4:
5078 case AARCH64_OPND_UIMM3_OP1:
5079 case AARCH64_OPND_UIMM3_OP2:
5080 case AARCH64_OPND_IMM_VLSL:
5081 case AARCH64_OPND_IMM:
5082 case AARCH64_OPND_WIDTH:
5083 po_imm_nc_or_fail ();
5084 info->imm.value = val;
5085 break;
5086
5087 case AARCH64_OPND_UIMM7:
5088 po_imm_or_fail (0, 127);
5089 info->imm.value = val;
5090 break;
5091
5092 case AARCH64_OPND_IDX:
5093 case AARCH64_OPND_BIT_NUM:
5094 case AARCH64_OPND_IMMR:
5095 case AARCH64_OPND_IMMS:
5096 po_imm_or_fail (0, 63);
5097 info->imm.value = val;
5098 break;
5099
5100 case AARCH64_OPND_IMM0:
5101 po_imm_nc_or_fail ();
5102 if (val != 0)
5103 {
5104 set_fatal_syntax_error (_("immediate zero expected"));
5105 goto failure;
5106 }
5107 info->imm.value = 0;
5108 break;
5109
5110 case AARCH64_OPND_FPIMM0:
5111 {
5112 int qfloat;
5113 bfd_boolean res1 = FALSE, res2 = FALSE;
5114 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5115 it is probably not worth the effort to support it. */
5116 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
5117 && !(res2 = parse_constant_immediate (&str, &val)))
5118 goto failure;
5119 if ((res1 && qfloat == 0) || (res2 && val == 0))
5120 {
5121 info->imm.value = 0;
5122 info->imm.is_fp = 1;
5123 break;
5124 }
5125 set_fatal_syntax_error (_("immediate zero expected"));
5126 goto failure;
5127 }
5128
5129 case AARCH64_OPND_IMM_MOV:
5130 {
5131 char *saved = str;
5132 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5133 reg_name_p (str, REG_TYPE_VN))
5134 goto failure;
5135 str = saved;
5136 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5137 GE_OPT_PREFIX, 1));
5138 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5139 later. fix_mov_imm_insn will try to determine a machine
5140 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5141 message if the immediate cannot be moved by a single
5142 instruction. */
5143 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5144 inst.base.operands[i].skip = 1;
5145 }
5146 break;
5147
5148 case AARCH64_OPND_SIMD_IMM:
5149 case AARCH64_OPND_SIMD_IMM_SFT:
5150 if (! parse_big_immediate (&str, &val))
5151 goto failure;
5152 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5153 /* addr_off_p */ 0,
5154 /* need_libopcodes_p */ 1,
5155 /* skip_p */ 1);
5156 /* Parse shift.
5157 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5158 shift, we don't check it here; we leave the checking to
5159 the libopcodes (operand_general_constraint_met_p). By
5160 doing this, we achieve better diagnostics. */
5161 if (skip_past_comma (&str)
5162 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5163 goto failure;
5164 if (!info->shifter.operator_present
5165 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5166 {
5167 /* Default to LSL if not present. Libopcodes prefers shifter
5168 kind to be explicit. */
5169 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5170 info->shifter.kind = AARCH64_MOD_LSL;
5171 }
5172 break;
5173
5174 case AARCH64_OPND_FPIMM:
5175 case AARCH64_OPND_SIMD_FPIMM:
5176 {
5177 int qfloat;
5178 bfd_boolean dp_p
5179 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5180 == 8);
5181 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
5182 goto failure;
5183 if (qfloat == 0)
5184 {
5185 set_fatal_syntax_error (_("invalid floating-point constant"));
5186 goto failure;
5187 }
5188 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5189 inst.base.operands[i].imm.is_fp = 1;
5190 }
5191 break;
5192
5193 case AARCH64_OPND_LIMM:
5194 po_misc_or_fail (parse_shifter_operand (&str, info,
5195 SHIFTED_LOGIC_IMM));
5196 if (info->shifter.operator_present)
5197 {
5198 set_fatal_syntax_error
5199 (_("shift not allowed for bitmask immediate"));
5200 goto failure;
5201 }
5202 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5203 /* addr_off_p */ 0,
5204 /* need_libopcodes_p */ 1,
5205 /* skip_p */ 1);
5206 break;
5207
5208 case AARCH64_OPND_AIMM:
5209 if (opcode->op == OP_ADD)
5210 /* ADD may have relocation types. */
5211 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5212 SHIFTED_ARITH_IMM));
5213 else
5214 po_misc_or_fail (parse_shifter_operand (&str, info,
5215 SHIFTED_ARITH_IMM));
5216 switch (inst.reloc.type)
5217 {
5218 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5219 info->shifter.amount = 12;
5220 break;
5221 case BFD_RELOC_UNUSED:
5222 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5223 if (info->shifter.kind != AARCH64_MOD_NONE)
5224 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5225 inst.reloc.pc_rel = 0;
5226 break;
5227 default:
5228 break;
5229 }
5230 info->imm.value = 0;
5231 if (!info->shifter.operator_present)
5232 {
5233 /* Default to LSL if not present. Libopcodes prefers shifter
5234 kind to be explicit. */
5235 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5236 info->shifter.kind = AARCH64_MOD_LSL;
5237 }
5238 break;
5239
5240 case AARCH64_OPND_HALF:
5241 {
5242 /* #<imm16> or relocation. */
5243 int internal_fixup_p;
5244 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5245 if (internal_fixup_p)
5246 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5247 skip_whitespace (str);
5248 if (skip_past_comma (&str))
5249 {
5250 /* {, LSL #<shift>} */
5251 if (! aarch64_gas_internal_fixup_p ())
5252 {
5253 set_fatal_syntax_error (_("can't mix relocation modifier "
5254 "with explicit shift"));
5255 goto failure;
5256 }
5257 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5258 }
5259 else
5260 inst.base.operands[i].shifter.amount = 0;
5261 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5262 inst.base.operands[i].imm.value = 0;
5263 if (! process_movw_reloc_info ())
5264 goto failure;
5265 }
5266 break;
5267
5268 case AARCH64_OPND_EXCEPTION:
5269 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5270 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5271 /* addr_off_p */ 0,
5272 /* need_libopcodes_p */ 0,
5273 /* skip_p */ 1);
5274 break;
5275
5276 case AARCH64_OPND_NZCV:
5277 {
5278 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5279 if (nzcv != NULL)
5280 {
5281 str += 4;
5282 info->imm.value = nzcv->value;
5283 break;
5284 }
5285 po_imm_or_fail (0, 15);
5286 info->imm.value = val;
5287 }
5288 break;
5289
5290 case AARCH64_OPND_COND:
5291 case AARCH64_OPND_COND1:
5292 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5293 str += 2;
5294 if (info->cond == NULL)
5295 {
5296 set_syntax_error (_("invalid condition"));
5297 goto failure;
5298 }
5299 else if (operands[i] == AARCH64_OPND_COND1
5300 && (info->cond->value & 0xe) == 0xe)
5301 {
5302 /* Not allow AL or NV. */
5303 set_default_error ();
5304 goto failure;
5305 }
5306 break;
5307
5308 case AARCH64_OPND_ADDR_ADRP:
5309 po_misc_or_fail (parse_adrp (&str));
5310 /* Clear the value as operand needs to be relocated. */
5311 info->imm.value = 0;
5312 break;
5313
5314 case AARCH64_OPND_ADDR_PCREL14:
5315 case AARCH64_OPND_ADDR_PCREL19:
5316 case AARCH64_OPND_ADDR_PCREL21:
5317 case AARCH64_OPND_ADDR_PCREL26:
5318 po_misc_or_fail (parse_address_reloc (&str, info));
5319 if (!info->addr.pcrel)
5320 {
5321 set_syntax_error (_("invalid pc-relative address"));
5322 goto failure;
5323 }
5324 if (inst.gen_lit_pool
5325 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5326 {
5327 /* Only permit "=value" in the literal load instructions.
5328 The literal will be generated by programmer_friendly_fixup. */
5329 set_syntax_error (_("invalid use of \"=immediate\""));
5330 goto failure;
5331 }
5332 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5333 {
5334 set_syntax_error (_("unrecognized relocation suffix"));
5335 goto failure;
5336 }
5337 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5338 {
5339 info->imm.value = inst.reloc.exp.X_add_number;
5340 inst.reloc.type = BFD_RELOC_UNUSED;
5341 }
5342 else
5343 {
5344 info->imm.value = 0;
5345 if (inst.reloc.type == BFD_RELOC_UNUSED)
5346 switch (opcode->iclass)
5347 {
5348 case compbranch:
5349 case condbranch:
5350 /* e.g. CBZ or B.COND */
5351 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5352 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5353 break;
5354 case testbranch:
5355 /* e.g. TBZ */
5356 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5357 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5358 break;
5359 case branch_imm:
5360 /* e.g. B or BL */
5361 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5362 inst.reloc.type =
5363 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5364 : BFD_RELOC_AARCH64_JUMP26;
5365 break;
5366 case loadlit:
5367 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5368 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5369 break;
5370 case pcreladdr:
5371 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5372 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5373 break;
5374 default:
5375 gas_assert (0);
5376 abort ();
5377 }
5378 inst.reloc.pc_rel = 1;
5379 }
5380 break;
5381
5382 case AARCH64_OPND_ADDR_SIMPLE:
5383 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5384 /* [<Xn|SP>{, #<simm>}] */
5385 po_char_or_fail ('[');
5386 po_reg_or_fail (REG_TYPE_R64_SP);
5387 /* Accept optional ", #0". */
5388 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5389 && skip_past_char (&str, ','))
5390 {
5391 skip_past_char (&str, '#');
5392 if (! skip_past_char (&str, '0'))
5393 {
5394 set_fatal_syntax_error
5395 (_("the optional immediate offset can only be 0"));
5396 goto failure;
5397 }
5398 }
5399 po_char_or_fail (']');
5400 info->addr.base_regno = val;
5401 break;
5402
5403 case AARCH64_OPND_ADDR_REGOFF:
5404 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5405 po_misc_or_fail (parse_address (&str, info, 0));
5406 if (info->addr.pcrel || !info->addr.offset.is_reg
5407 || !info->addr.preind || info->addr.postind
5408 || info->addr.writeback)
5409 {
5410 set_syntax_error (_("invalid addressing mode"));
5411 goto failure;
5412 }
5413 if (!info->shifter.operator_present)
5414 {
5415 /* Default to LSL if not present. Libopcodes prefers shifter
5416 kind to be explicit. */
5417 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5418 info->shifter.kind = AARCH64_MOD_LSL;
5419 }
5420 /* Qualifier to be deduced by libopcodes. */
5421 break;
5422
5423 case AARCH64_OPND_ADDR_SIMM7:
5424 po_misc_or_fail (parse_address (&str, info, 0));
5425 if (info->addr.pcrel || info->addr.offset.is_reg
5426 || (!info->addr.preind && !info->addr.postind))
5427 {
5428 set_syntax_error (_("invalid addressing mode"));
5429 goto failure;
5430 }
5431 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5432 /* addr_off_p */ 1,
5433 /* need_libopcodes_p */ 1,
5434 /* skip_p */ 0);
5435 break;
5436
5437 case AARCH64_OPND_ADDR_SIMM9:
5438 case AARCH64_OPND_ADDR_SIMM9_2:
5439 po_misc_or_fail (parse_address_reloc (&str, info));
5440 if (info->addr.pcrel || info->addr.offset.is_reg
5441 || (!info->addr.preind && !info->addr.postind)
5442 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5443 && info->addr.writeback))
5444 {
5445 set_syntax_error (_("invalid addressing mode"));
5446 goto failure;
5447 }
5448 if (inst.reloc.type != BFD_RELOC_UNUSED)
5449 {
5450 set_syntax_error (_("relocation not allowed"));
5451 goto failure;
5452 }
5453 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5454 /* addr_off_p */ 1,
5455 /* need_libopcodes_p */ 1,
5456 /* skip_p */ 0);
5457 break;
5458
5459 case AARCH64_OPND_ADDR_UIMM12:
5460 po_misc_or_fail (parse_address_reloc (&str, info));
5461 if (info->addr.pcrel || info->addr.offset.is_reg
5462 || !info->addr.preind || info->addr.writeback)
5463 {
5464 set_syntax_error (_("invalid addressing mode"));
5465 goto failure;
5466 }
5467 if (inst.reloc.type == BFD_RELOC_UNUSED)
5468 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5469 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5470 || (inst.reloc.type
5471 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
5472 || (inst.reloc.type
5473 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
5474 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5475 /* Leave qualifier to be determined by libopcodes. */
5476 break;
5477
5478 case AARCH64_OPND_SIMD_ADDR_POST:
5479 /* [<Xn|SP>], <Xm|#<amount>> */
5480 po_misc_or_fail (parse_address (&str, info, 1));
5481 if (!info->addr.postind || !info->addr.writeback)
5482 {
5483 set_syntax_error (_("invalid addressing mode"));
5484 goto failure;
5485 }
5486 if (!info->addr.offset.is_reg)
5487 {
5488 if (inst.reloc.exp.X_op == O_constant)
5489 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5490 else
5491 {
5492 set_fatal_syntax_error
5493 (_("writeback value should be an immediate constant"));
5494 goto failure;
5495 }
5496 }
5497 /* No qualifier. */
5498 break;
5499
5500 case AARCH64_OPND_SYSREG:
5501 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5502 == PARSE_FAIL)
5503 {
5504 set_syntax_error (_("unknown or missing system register name"));
5505 goto failure;
5506 }
5507 inst.base.operands[i].sysreg = val;
5508 break;
5509
5510 case AARCH64_OPND_PSTATEFIELD:
5511 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5512 == PARSE_FAIL)
5513 {
5514 set_syntax_error (_("unknown or missing PSTATE field name"));
5515 goto failure;
5516 }
5517 inst.base.operands[i].pstatefield = val;
5518 break;
5519
5520 case AARCH64_OPND_SYSREG_IC:
5521 inst.base.operands[i].sysins_op =
5522 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5523 goto sys_reg_ins;
5524 case AARCH64_OPND_SYSREG_DC:
5525 inst.base.operands[i].sysins_op =
5526 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5527 goto sys_reg_ins;
5528 case AARCH64_OPND_SYSREG_AT:
5529 inst.base.operands[i].sysins_op =
5530 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5531 goto sys_reg_ins;
5532 case AARCH64_OPND_SYSREG_TLBI:
5533 inst.base.operands[i].sysins_op =
5534 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5535 sys_reg_ins:
5536 if (inst.base.operands[i].sysins_op == NULL)
5537 {
5538 set_fatal_syntax_error ( _("unknown or missing operation name"));
5539 goto failure;
5540 }
5541 break;
5542
5543 case AARCH64_OPND_BARRIER:
5544 case AARCH64_OPND_BARRIER_ISB:
5545 val = parse_barrier (&str);
5546 if (val != PARSE_FAIL
5547 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5548 {
5549 /* ISB only accepts options name 'sy'. */
5550 set_syntax_error
5551 (_("the specified option is not accepted in ISB"));
5552 /* Turn off backtrack as this optional operand is present. */
5553 backtrack_pos = 0;
5554 goto failure;
5555 }
5556 /* This is an extension to accept a 0..15 immediate. */
5557 if (val == PARSE_FAIL)
5558 po_imm_or_fail (0, 15);
5559 info->barrier = aarch64_barrier_options + val;
5560 break;
5561
5562 case AARCH64_OPND_PRFOP:
5563 val = parse_pldop (&str);
5564 /* This is an extension to accept a 0..31 immediate. */
5565 if (val == PARSE_FAIL)
5566 po_imm_or_fail (0, 31);
5567 inst.base.operands[i].prfop = aarch64_prfops + val;
5568 break;
5569
5570 default:
5571 as_fatal (_("unhandled operand code %d"), operands[i]);
5572 }
5573
5574 /* If we get here, this operand was successfully parsed. */
5575 inst.base.operands[i].present = 1;
5576 continue;
5577
5578 failure:
5579 /* The parse routine should already have set the error, but in case
5580 not, set a default one here. */
5581 if (! error_p ())
5582 set_default_error ();
5583
5584 if (! backtrack_pos)
5585 goto parse_operands_return;
5586
5587 {
5588 /* We reach here because this operand is marked as optional, and
5589 either no operand was supplied or the operand was supplied but it
5590 was syntactically incorrect. In the latter case we report an
5591 error. In the former case we perform a few more checks before
5592 dropping through to the code to insert the default operand. */
5593
5594 char *tmp = backtrack_pos;
5595 char endchar = END_OF_INSN;
5596
5597 if (i != (aarch64_num_of_operands (opcode) - 1))
5598 endchar = ',';
5599 skip_past_char (&tmp, ',');
5600
5601 if (*tmp != endchar)
5602 /* The user has supplied an operand in the wrong format. */
5603 goto parse_operands_return;
5604
5605 /* Make sure there is not a comma before the optional operand.
5606 For example the fifth operand of 'sys' is optional:
5607
5608 sys #0,c0,c0,#0, <--- wrong
5609 sys #0,c0,c0,#0 <--- correct. */
5610 if (comma_skipped_p && i && endchar == END_OF_INSN)
5611 {
5612 set_fatal_syntax_error
5613 (_("unexpected comma before the omitted optional operand"));
5614 goto parse_operands_return;
5615 }
5616 }
5617
5618 /* Reaching here means we are dealing with an optional operand that is
5619 omitted from the assembly line. */
5620 gas_assert (optional_operand_p (opcode, i));
5621 info->present = 0;
5622 process_omitted_operand (operands[i], opcode, i, info);
5623
5624 /* Try again, skipping the optional operand at backtrack_pos. */
5625 str = backtrack_pos;
5626 backtrack_pos = 0;
5627
5628 /* Clear any error record after the omitted optional operand has been
5629 successfully handled. */
5630 clear_error ();
5631 }
5632
5633 /* Check if we have parsed all the operands. */
5634 if (*str != '\0' && ! error_p ())
5635 {
5636 /* Set I to the index of the last present operand; this is
5637 for the purpose of diagnostics. */
5638 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5639 ;
5640 set_fatal_syntax_error
5641 (_("unexpected characters following instruction"));
5642 }
5643
5644 parse_operands_return:
5645
5646 if (error_p ())
5647 {
5648 DEBUG_TRACE ("parsing FAIL: %s - %s",
5649 operand_mismatch_kind_names[get_error_kind ()],
5650 get_error_message ());
5651 /* Record the operand error properly; this is useful when there
5652 are multiple instruction templates for a mnemonic name, so that
5653 later on, we can select the error that most closely describes
5654 the problem. */
5655 record_operand_error (opcode, i, get_error_kind (),
5656 get_error_message ());
5657 return FALSE;
5658 }
5659 else
5660 {
5661 DEBUG_TRACE ("parsing SUCCESS");
5662 return TRUE;
5663 }
5664 }
5665
5666 /* It does some fix-up to provide some programmer friendly feature while
5667 keeping the libopcodes happy, i.e. libopcodes only accepts
5668 the preferred architectural syntax.
5669 Return FALSE if there is any failure; otherwise return TRUE. */
5670
5671 static bfd_boolean
5672 programmer_friendly_fixup (aarch64_instruction *instr)
5673 {
5674 aarch64_inst *base = &instr->base;
5675 const aarch64_opcode *opcode = base->opcode;
5676 enum aarch64_op op = opcode->op;
5677 aarch64_opnd_info *operands = base->operands;
5678
5679 DEBUG_TRACE ("enter");
5680
5681 switch (opcode->iclass)
5682 {
5683 case testbranch:
5684 /* TBNZ Xn|Wn, #uimm6, label
5685 Test and Branch Not Zero: conditionally jumps to label if bit number
5686 uimm6 in register Xn is not zero. The bit number implies the width of
5687 the register, which may be written and should be disassembled as Wn if
5688 uimm is less than 32. */
5689 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5690 {
5691 if (operands[1].imm.value >= 32)
5692 {
5693 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5694 0, 31);
5695 return FALSE;
5696 }
5697 operands[0].qualifier = AARCH64_OPND_QLF_X;
5698 }
5699 break;
5700 case loadlit:
5701 /* LDR Wt, label | =value
5702 As a convenience assemblers will typically permit the notation
5703 "=value" in conjunction with the pc-relative literal load instructions
5704 to automatically place an immediate value or symbolic address in a
5705 nearby literal pool and generate a hidden label which references it.
5706 ISREG has been set to 0 in the case of =value. */
5707 if (instr->gen_lit_pool
5708 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5709 {
5710 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5711 if (op == OP_LDRSW_LIT)
5712 size = 4;
5713 if (instr->reloc.exp.X_op != O_constant
5714 && instr->reloc.exp.X_op != O_big
5715 && instr->reloc.exp.X_op != O_symbol)
5716 {
5717 record_operand_error (opcode, 1,
5718 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5719 _("constant expression expected"));
5720 return FALSE;
5721 }
5722 if (! add_to_lit_pool (&instr->reloc.exp, size))
5723 {
5724 record_operand_error (opcode, 1,
5725 AARCH64_OPDE_OTHER_ERROR,
5726 _("literal pool insertion failed"));
5727 return FALSE;
5728 }
5729 }
5730 break;
5731 case log_shift:
5732 case bitfield:
5733 /* UXT[BHW] Wd, Wn
5734 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5735 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5736 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5737 A programmer-friendly assembler should accept a destination Xd in
5738 place of Wd, however that is not the preferred form for disassembly.
5739 */
5740 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5741 && operands[1].qualifier == AARCH64_OPND_QLF_W
5742 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5743 operands[0].qualifier = AARCH64_OPND_QLF_W;
5744 break;
5745
5746 case addsub_ext:
5747 {
5748 /* In the 64-bit form, the final register operand is written as Wm
5749 for all but the (possibly omitted) UXTX/LSL and SXTX
5750 operators.
5751 As a programmer-friendly assembler, we accept e.g.
5752 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5753 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5754 int idx = aarch64_operand_index (opcode->operands,
5755 AARCH64_OPND_Rm_EXT);
5756 gas_assert (idx == 1 || idx == 2);
5757 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5758 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5759 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5760 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5761 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5762 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5763 }
5764 break;
5765
5766 default:
5767 break;
5768 }
5769
5770 DEBUG_TRACE ("exit with SUCCESS");
5771 return TRUE;
5772 }
5773
5774 /* Check for loads and stores that will cause unpredictable behavior. */
5775
5776 static void
5777 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5778 {
5779 aarch64_inst *base = &instr->base;
5780 const aarch64_opcode *opcode = base->opcode;
5781 const aarch64_opnd_info *opnds = base->operands;
5782 switch (opcode->iclass)
5783 {
5784 case ldst_pos:
5785 case ldst_imm9:
5786 case ldst_unscaled:
5787 case ldst_unpriv:
5788 /* Loading/storing the base register is unpredictable if writeback. */
5789 if ((aarch64_get_operand_class (opnds[0].type)
5790 == AARCH64_OPND_CLASS_INT_REG)
5791 && opnds[0].reg.regno == opnds[1].addr.base_regno
5792 && opnds[1].addr.base_regno != REG_SP
5793 && opnds[1].addr.writeback)
5794 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5795 break;
5796 case ldstpair_off:
5797 case ldstnapair_offs:
5798 case ldstpair_indexed:
5799 /* Loading/storing the base register is unpredictable if writeback. */
5800 if ((aarch64_get_operand_class (opnds[0].type)
5801 == AARCH64_OPND_CLASS_INT_REG)
5802 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5803 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5804 && opnds[2].addr.base_regno != REG_SP
5805 && opnds[2].addr.writeback)
5806 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5807 /* Load operations must load different registers. */
5808 if ((opcode->opcode & (1 << 22))
5809 && opnds[0].reg.regno == opnds[1].reg.regno)
5810 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5811 break;
5812 default:
5813 break;
5814 }
5815 }
5816
5817 /* A wrapper function to interface with libopcodes on encoding and
5818 record the error message if there is any.
5819
5820 Return TRUE on success; otherwise return FALSE. */
5821
5822 static bfd_boolean
5823 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5824 aarch64_insn *code)
5825 {
5826 aarch64_operand_error error_info;
5827 error_info.kind = AARCH64_OPDE_NIL;
5828 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5829 return TRUE;
5830 else
5831 {
5832 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5833 record_operand_error_info (opcode, &error_info);
5834 return FALSE;
5835 }
5836 }
5837
5838 #ifdef DEBUG_AARCH64
5839 static inline void
5840 dump_opcode_operands (const aarch64_opcode *opcode)
5841 {
5842 int i = 0;
5843 while (opcode->operands[i] != AARCH64_OPND_NIL)
5844 {
5845 aarch64_verbose ("\t\t opnd%d: %s", i,
5846 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5847 ? aarch64_get_operand_name (opcode->operands[i])
5848 : aarch64_get_operand_desc (opcode->operands[i]));
5849 ++i;
5850 }
5851 }
5852 #endif /* DEBUG_AARCH64 */
5853
5854 /* This is the guts of the machine-dependent assembler. STR points to a
5855 machine dependent instruction. This function is supposed to emit
5856 the frags/bytes it assembles to. */
5857
5858 void
5859 md_assemble (char *str)
5860 {
5861 char *p = str;
5862 templates *template;
5863 aarch64_opcode *opcode;
5864 aarch64_inst *inst_base;
5865 unsigned saved_cond;
5866
5867 /* Align the previous label if needed. */
5868 if (last_label_seen != NULL)
5869 {
5870 symbol_set_frag (last_label_seen, frag_now);
5871 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5872 S_SET_SEGMENT (last_label_seen, now_seg);
5873 }
5874
5875 inst.reloc.type = BFD_RELOC_UNUSED;
5876
5877 DEBUG_TRACE ("\n\n");
5878 DEBUG_TRACE ("==============================");
5879 DEBUG_TRACE ("Enter md_assemble with %s", str);
5880
5881 template = opcode_lookup (&p);
5882 if (!template)
5883 {
5884 /* It wasn't an instruction, but it might be a register alias of
5885 the form alias .req reg directive. */
5886 if (!create_register_alias (str, p))
5887 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5888 str);
5889 return;
5890 }
5891
5892 skip_whitespace (p);
5893 if (*p == ',')
5894 {
5895 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5896 get_mnemonic_name (str), str);
5897 return;
5898 }
5899
5900 init_operand_error_report ();
5901
5902 /* Sections are assumed to start aligned. In executable section, there is no
5903 MAP_DATA symbol pending. So we only align the address during
5904 MAP_DATA --> MAP_INSN transition.
5905 For other sections, this is not guaranteed. */
5906 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
5907 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
5908 frag_align_code (2, 0);
5909
5910 saved_cond = inst.cond;
5911 reset_aarch64_instruction (&inst);
5912 inst.cond = saved_cond;
5913
5914 /* Iterate through all opcode entries with the same mnemonic name. */
5915 do
5916 {
5917 opcode = template->opcode;
5918
5919 DEBUG_TRACE ("opcode %s found", opcode->name);
5920 #ifdef DEBUG_AARCH64
5921 if (debug_dump)
5922 dump_opcode_operands (opcode);
5923 #endif /* DEBUG_AARCH64 */
5924
5925 mapping_state (MAP_INSN);
5926
5927 inst_base = &inst.base;
5928 inst_base->opcode = opcode;
5929
5930 /* Truly conditionally executed instructions, e.g. b.cond. */
5931 if (opcode->flags & F_COND)
5932 {
5933 gas_assert (inst.cond != COND_ALWAYS);
5934 inst_base->cond = get_cond_from_value (inst.cond);
5935 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5936 }
5937 else if (inst.cond != COND_ALWAYS)
5938 {
5939 /* It shouldn't arrive here, where the assembly looks like a
5940 conditional instruction but the found opcode is unconditional. */
5941 gas_assert (0);
5942 continue;
5943 }
5944
5945 if (parse_operands (p, opcode)
5946 && programmer_friendly_fixup (&inst)
5947 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5948 {
5949 /* Check that this instruction is supported for this CPU. */
5950 if (!opcode->avariant
5951 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5952 {
5953 as_bad (_("selected processor does not support `%s'"), str);
5954 return;
5955 }
5956
5957 warn_unpredictable_ldst (&inst, str);
5958
5959 if (inst.reloc.type == BFD_RELOC_UNUSED
5960 || !inst.reloc.need_libopcodes_p)
5961 output_inst (NULL);
5962 else
5963 {
5964 /* If there is relocation generated for the instruction,
5965 store the instruction information for the future fix-up. */
5966 struct aarch64_inst *copy;
5967 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5968 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5969 abort ();
5970 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5971 output_inst (copy);
5972 }
5973 return;
5974 }
5975
5976 template = template->next;
5977 if (template != NULL)
5978 {
5979 reset_aarch64_instruction (&inst);
5980 inst.cond = saved_cond;
5981 }
5982 }
5983 while (template != NULL);
5984
5985 /* Issue the error messages if any. */
5986 output_operand_error_report (str);
5987 }
5988
5989 /* Various frobbings of labels and their addresses. */
5990
5991 void
5992 aarch64_start_line_hook (void)
5993 {
5994 last_label_seen = NULL;
5995 }
5996
5997 void
5998 aarch64_frob_label (symbolS * sym)
5999 {
6000 last_label_seen = sym;
6001
6002 dwarf2_emit_label (sym);
6003 }
6004
6005 int
6006 aarch64_data_in_code (void)
6007 {
6008 if (!strncmp (input_line_pointer + 1, "data:", 5))
6009 {
6010 *input_line_pointer = '/';
6011 input_line_pointer += 5;
6012 *input_line_pointer = 0;
6013 return 1;
6014 }
6015
6016 return 0;
6017 }
6018
6019 char *
6020 aarch64_canonicalize_symbol_name (char *name)
6021 {
6022 int len;
6023
6024 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6025 *(name + len - 5) = 0;
6026
6027 return name;
6028 }
6029 \f
6030 /* Table of all register names defined by default. The user can
6031 define additional names with .req. Note that all register names
6032 should appear in both upper and lowercase variants. Some registers
6033 also have mixed-case names. */
6034
6035 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6036 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6037 #define REGSET31(p,t) \
6038 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6039 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6040 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6041 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
6042 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6043 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6044 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6045 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6046 #define REGSET(p,t) \
6047 REGSET31(p,t), REGNUM(p,31,t)
6048
6049 /* These go into aarch64_reg_hsh hash-table. */
6050 static const reg_entry reg_names[] = {
6051 /* Integer registers. */
6052 REGSET31 (x, R_64), REGSET31 (X, R_64),
6053 REGSET31 (w, R_32), REGSET31 (W, R_32),
6054
6055 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6056 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6057
6058 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6059 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6060
6061 /* Coprocessor register numbers. */
6062 REGSET (c, CN), REGSET (C, CN),
6063
6064 /* Floating-point single precision registers. */
6065 REGSET (s, FP_S), REGSET (S, FP_S),
6066
6067 /* Floating-point double precision registers. */
6068 REGSET (d, FP_D), REGSET (D, FP_D),
6069
6070 /* Floating-point half precision registers. */
6071 REGSET (h, FP_H), REGSET (H, FP_H),
6072
6073 /* Floating-point byte precision registers. */
6074 REGSET (b, FP_B), REGSET (B, FP_B),
6075
6076 /* Floating-point quad precision registers. */
6077 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6078
6079 /* FP/SIMD registers. */
6080 REGSET (v, VN), REGSET (V, VN),
6081 };
6082
6083 #undef REGDEF
6084 #undef REGNUM
6085 #undef REGSET
6086
6087 #define N 1
6088 #define n 0
6089 #define Z 1
6090 #define z 0
6091 #define C 1
6092 #define c 0
6093 #define V 1
6094 #define v 0
6095 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6096 static const asm_nzcv nzcv_names[] = {
6097 {"nzcv", B (n, z, c, v)},
6098 {"nzcV", B (n, z, c, V)},
6099 {"nzCv", B (n, z, C, v)},
6100 {"nzCV", B (n, z, C, V)},
6101 {"nZcv", B (n, Z, c, v)},
6102 {"nZcV", B (n, Z, c, V)},
6103 {"nZCv", B (n, Z, C, v)},
6104 {"nZCV", B (n, Z, C, V)},
6105 {"Nzcv", B (N, z, c, v)},
6106 {"NzcV", B (N, z, c, V)},
6107 {"NzCv", B (N, z, C, v)},
6108 {"NzCV", B (N, z, C, V)},
6109 {"NZcv", B (N, Z, c, v)},
6110 {"NZcV", B (N, Z, c, V)},
6111 {"NZCv", B (N, Z, C, v)},
6112 {"NZCV", B (N, Z, C, V)}
6113 };
6114
6115 #undef N
6116 #undef n
6117 #undef Z
6118 #undef z
6119 #undef C
6120 #undef c
6121 #undef V
6122 #undef v
6123 #undef B
6124 \f
6125 /* MD interface: bits in the object file. */
6126
6127 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6128 for use in the a.out file, and stores them in the array pointed to by buf.
6129 This knows about the endian-ness of the target machine and does
6130 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6131 2 (short) and 4 (long) Floating numbers are put out as a series of
6132 LITTLENUMS (shorts, here at least). */
6133
6134 void
6135 md_number_to_chars (char *buf, valueT val, int n)
6136 {
6137 if (target_big_endian)
6138 number_to_chars_bigendian (buf, val, n);
6139 else
6140 number_to_chars_littleendian (buf, val, n);
6141 }
6142
6143 /* MD interface: Sections. */
6144
6145 /* Estimate the size of a frag before relaxing. Assume everything fits in
6146 4 bytes. */
6147
6148 int
6149 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6150 {
6151 fragp->fr_var = 4;
6152 return 4;
6153 }
6154
6155 /* Round up a section size to the appropriate boundary. */
6156
6157 valueT
6158 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6159 {
6160 return size;
6161 }
6162
6163 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6164 of an rs_align_code fragment.
6165
6166 Here we fill the frag with the appropriate info for padding the
6167 output stream. The resulting frag will consist of a fixed (fr_fix)
6168 and of a repeating (fr_var) part.
6169
6170 The fixed content is always emitted before the repeating content and
6171 these two parts are used as follows in constructing the output:
6172 - the fixed part will be used to align to a valid instruction word
6173 boundary, in case that we start at a misaligned address; as no
6174 executable instruction can live at the misaligned location, we
6175 simply fill with zeros;
6176 - the variable part will be used to cover the remaining padding and
6177 we fill using the AArch64 NOP instruction.
6178
6179 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6180 enough storage space for up to 3 bytes for padding the back to a valid
6181 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6182
6183 void
6184 aarch64_handle_align (fragS * fragP)
6185 {
6186 /* NOP = d503201f */
6187 /* AArch64 instructions are always little-endian. */
6188 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6189
6190 int bytes, fix, noop_size;
6191 char *p;
6192
6193 if (fragP->fr_type != rs_align_code)
6194 return;
6195
6196 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6197 p = fragP->fr_literal + fragP->fr_fix;
6198
6199 #ifdef OBJ_ELF
6200 gas_assert (fragP->tc_frag_data.recorded);
6201 #endif
6202
6203 noop_size = sizeof (aarch64_noop);
6204
6205 fix = bytes & (noop_size - 1);
6206 if (fix)
6207 {
6208 #ifdef OBJ_ELF
6209 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6210 #endif
6211 memset (p, 0, fix);
6212 p += fix;
6213 fragP->fr_fix += fix;
6214 }
6215
6216 if (noop_size)
6217 memcpy (p, aarch64_noop, noop_size);
6218 fragP->fr_var = noop_size;
6219 }
6220
6221 /* Perform target specific initialisation of a frag.
6222 Note - despite the name this initialisation is not done when the frag
6223 is created, but only when its type is assigned. A frag can be created
6224 and used a long time before its type is set, so beware of assuming that
6225 this initialisationis performed first. */
6226
6227 #ifndef OBJ_ELF
6228 void
6229 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6230 int max_chars ATTRIBUTE_UNUSED)
6231 {
6232 }
6233
6234 #else /* OBJ_ELF is defined. */
6235 void
6236 aarch64_init_frag (fragS * fragP, int max_chars)
6237 {
6238 /* Record a mapping symbol for alignment frags. We will delete this
6239 later if the alignment ends up empty. */
6240 if (!fragP->tc_frag_data.recorded)
6241 fragP->tc_frag_data.recorded = 1;
6242
6243 switch (fragP->fr_type)
6244 {
6245 case rs_align:
6246 case rs_align_test:
6247 case rs_fill:
6248 mapping_state_2 (MAP_DATA, max_chars);
6249 break;
6250 case rs_align_code:
6251 mapping_state_2 (MAP_INSN, max_chars);
6252 break;
6253 default:
6254 break;
6255 }
6256 }
6257 \f
6258 /* Initialize the DWARF-2 unwind information for this procedure. */
6259
6260 void
6261 tc_aarch64_frame_initial_instructions (void)
6262 {
6263 cfi_add_CFA_def_cfa (REG_SP, 0);
6264 }
6265 #endif /* OBJ_ELF */
6266
6267 /* Convert REGNAME to a DWARF-2 register number. */
6268
6269 int
6270 tc_aarch64_regname_to_dw2regnum (char *regname)
6271 {
6272 const reg_entry *reg = parse_reg (&regname);
6273 if (reg == NULL)
6274 return -1;
6275
6276 switch (reg->type)
6277 {
6278 case REG_TYPE_SP_32:
6279 case REG_TYPE_SP_64:
6280 case REG_TYPE_R_32:
6281 case REG_TYPE_R_64:
6282 return reg->number;
6283
6284 case REG_TYPE_FP_B:
6285 case REG_TYPE_FP_H:
6286 case REG_TYPE_FP_S:
6287 case REG_TYPE_FP_D:
6288 case REG_TYPE_FP_Q:
6289 return reg->number + 64;
6290
6291 default:
6292 break;
6293 }
6294 return -1;
6295 }
6296
6297 /* Implement DWARF2_ADDR_SIZE. */
6298
6299 int
6300 aarch64_dwarf2_addr_size (void)
6301 {
6302 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6303 if (ilp32_p)
6304 return 4;
6305 #endif
6306 return bfd_arch_bits_per_address (stdoutput) / 8;
6307 }
6308
6309 /* MD interface: Symbol and relocation handling. */
6310
6311 /* Return the address within the segment that a PC-relative fixup is
6312 relative to. For AArch64 PC-relative fixups applied to instructions
6313 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6314
6315 long
6316 md_pcrel_from_section (fixS * fixP, segT seg)
6317 {
6318 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6319
6320 /* If this is pc-relative and we are going to emit a relocation
6321 then we just want to put out any pipeline compensation that the linker
6322 will need. Otherwise we want to use the calculated base. */
6323 if (fixP->fx_pcrel
6324 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6325 || aarch64_force_relocation (fixP)))
6326 base = 0;
6327
6328 /* AArch64 should be consistent for all pc-relative relocations. */
6329 return base + AARCH64_PCREL_OFFSET;
6330 }
6331
6332 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6333 Otherwise we have no need to default values of symbols. */
6334
6335 symbolS *
6336 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6337 {
6338 #ifdef OBJ_ELF
6339 if (name[0] == '_' && name[1] == 'G'
6340 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6341 {
6342 if (!GOT_symbol)
6343 {
6344 if (symbol_find (name))
6345 as_bad (_("GOT already in the symbol table"));
6346
6347 GOT_symbol = symbol_new (name, undefined_section,
6348 (valueT) 0, &zero_address_frag);
6349 }
6350
6351 return GOT_symbol;
6352 }
6353 #endif
6354
6355 return 0;
6356 }
6357
6358 /* Return non-zero if the indicated VALUE has overflowed the maximum
6359 range expressible by a unsigned number with the indicated number of
6360 BITS. */
6361
6362 static bfd_boolean
6363 unsigned_overflow (valueT value, unsigned bits)
6364 {
6365 valueT lim;
6366 if (bits >= sizeof (valueT) * 8)
6367 return FALSE;
6368 lim = (valueT) 1 << bits;
6369 return (value >= lim);
6370 }
6371
6372
6373 /* Return non-zero if the indicated VALUE has overflowed the maximum
6374 range expressible by an signed number with the indicated number of
6375 BITS. */
6376
6377 static bfd_boolean
6378 signed_overflow (offsetT value, unsigned bits)
6379 {
6380 offsetT lim;
6381 if (bits >= sizeof (offsetT) * 8)
6382 return FALSE;
6383 lim = (offsetT) 1 << (bits - 1);
6384 return (value < -lim || value >= lim);
6385 }
6386
6387 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6388 unsigned immediate offset load/store instruction, try to encode it as
6389 an unscaled, 9-bit, signed immediate offset load/store instruction.
6390 Return TRUE if it is successful; otherwise return FALSE.
6391
6392 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6393 in response to the standard LDR/STR mnemonics when the immediate offset is
6394 unambiguous, i.e. when it is negative or unaligned. */
6395
6396 static bfd_boolean
6397 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6398 {
6399 int idx;
6400 enum aarch64_op new_op;
6401 const aarch64_opcode *new_opcode;
6402
6403 gas_assert (instr->opcode->iclass == ldst_pos);
6404
6405 switch (instr->opcode->op)
6406 {
6407 case OP_LDRB_POS:new_op = OP_LDURB; break;
6408 case OP_STRB_POS: new_op = OP_STURB; break;
6409 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6410 case OP_LDRH_POS: new_op = OP_LDURH; break;
6411 case OP_STRH_POS: new_op = OP_STURH; break;
6412 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6413 case OP_LDR_POS: new_op = OP_LDUR; break;
6414 case OP_STR_POS: new_op = OP_STUR; break;
6415 case OP_LDRF_POS: new_op = OP_LDURV; break;
6416 case OP_STRF_POS: new_op = OP_STURV; break;
6417 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6418 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6419 default: new_op = OP_NIL; break;
6420 }
6421
6422 if (new_op == OP_NIL)
6423 return FALSE;
6424
6425 new_opcode = aarch64_get_opcode (new_op);
6426 gas_assert (new_opcode != NULL);
6427
6428 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6429 instr->opcode->op, new_opcode->op);
6430
6431 aarch64_replace_opcode (instr, new_opcode);
6432
6433 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6434 qualifier matching may fail because the out-of-date qualifier will
6435 prevent the operand being updated with a new and correct qualifier. */
6436 idx = aarch64_operand_index (instr->opcode->operands,
6437 AARCH64_OPND_ADDR_SIMM9);
6438 gas_assert (idx == 1);
6439 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6440
6441 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6442
6443 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6444 return FALSE;
6445
6446 return TRUE;
6447 }
6448
6449 /* Called by fix_insn to fix a MOV immediate alias instruction.
6450
6451 Operand for a generic move immediate instruction, which is an alias
6452 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6453 a 32-bit/64-bit immediate value into general register. An assembler error
6454 shall result if the immediate cannot be created by a single one of these
6455 instructions. If there is a choice, then to ensure reversability an
6456 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6457
6458 static void
6459 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6460 {
6461 const aarch64_opcode *opcode;
6462
6463 /* Need to check if the destination is SP/ZR. The check has to be done
6464 before any aarch64_replace_opcode. */
6465 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6466 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6467
6468 instr->operands[1].imm.value = value;
6469 instr->operands[1].skip = 0;
6470
6471 if (try_mov_wide_p)
6472 {
6473 /* Try the MOVZ alias. */
6474 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6475 aarch64_replace_opcode (instr, opcode);
6476 if (aarch64_opcode_encode (instr->opcode, instr,
6477 &instr->value, NULL, NULL))
6478 {
6479 put_aarch64_insn (buf, instr->value);
6480 return;
6481 }
6482 /* Try the MOVK alias. */
6483 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6484 aarch64_replace_opcode (instr, opcode);
6485 if (aarch64_opcode_encode (instr->opcode, instr,
6486 &instr->value, NULL, NULL))
6487 {
6488 put_aarch64_insn (buf, instr->value);
6489 return;
6490 }
6491 }
6492
6493 if (try_mov_bitmask_p)
6494 {
6495 /* Try the ORR alias. */
6496 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6497 aarch64_replace_opcode (instr, opcode);
6498 if (aarch64_opcode_encode (instr->opcode, instr,
6499 &instr->value, NULL, NULL))
6500 {
6501 put_aarch64_insn (buf, instr->value);
6502 return;
6503 }
6504 }
6505
6506 as_bad_where (fixP->fx_file, fixP->fx_line,
6507 _("immediate cannot be moved by a single instruction"));
6508 }
6509
6510 /* An instruction operand which is immediate related may have symbol used
6511 in the assembly, e.g.
6512
6513 mov w0, u32
6514 .set u32, 0x00ffff00
6515
6516 At the time when the assembly instruction is parsed, a referenced symbol,
6517 like 'u32' in the above example may not have been seen; a fixS is created
6518 in such a case and is handled here after symbols have been resolved.
6519 Instruction is fixed up with VALUE using the information in *FIXP plus
6520 extra information in FLAGS.
6521
6522 This function is called by md_apply_fix to fix up instructions that need
6523 a fix-up described above but does not involve any linker-time relocation. */
6524
6525 static void
6526 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6527 {
6528 int idx;
6529 uint32_t insn;
6530 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6531 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6532 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6533
6534 if (new_inst)
6535 {
6536 /* Now the instruction is about to be fixed-up, so the operand that
6537 was previously marked as 'ignored' needs to be unmarked in order
6538 to get the encoding done properly. */
6539 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6540 new_inst->operands[idx].skip = 0;
6541 }
6542
6543 gas_assert (opnd != AARCH64_OPND_NIL);
6544
6545 switch (opnd)
6546 {
6547 case AARCH64_OPND_EXCEPTION:
6548 if (unsigned_overflow (value, 16))
6549 as_bad_where (fixP->fx_file, fixP->fx_line,
6550 _("immediate out of range"));
6551 insn = get_aarch64_insn (buf);
6552 insn |= encode_svc_imm (value);
6553 put_aarch64_insn (buf, insn);
6554 break;
6555
6556 case AARCH64_OPND_AIMM:
6557 /* ADD or SUB with immediate.
6558 NOTE this assumes we come here with a add/sub shifted reg encoding
6559 3 322|2222|2 2 2 21111 111111
6560 1 098|7654|3 2 1 09876 543210 98765 43210
6561 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6562 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6563 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6564 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6565 ->
6566 3 322|2222|2 2 221111111111
6567 1 098|7654|3 2 109876543210 98765 43210
6568 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6569 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6570 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6571 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6572 Fields sf Rn Rd are already set. */
6573 insn = get_aarch64_insn (buf);
6574 if (value < 0)
6575 {
6576 /* Add <-> sub. */
6577 insn = reencode_addsub_switch_add_sub (insn);
6578 value = -value;
6579 }
6580
6581 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6582 && unsigned_overflow (value, 12))
6583 {
6584 /* Try to shift the value by 12 to make it fit. */
6585 if (((value >> 12) << 12) == value
6586 && ! unsigned_overflow (value, 12 + 12))
6587 {
6588 value >>= 12;
6589 insn |= encode_addsub_imm_shift_amount (1);
6590 }
6591 }
6592
6593 if (unsigned_overflow (value, 12))
6594 as_bad_where (fixP->fx_file, fixP->fx_line,
6595 _("immediate out of range"));
6596
6597 insn |= encode_addsub_imm (value);
6598
6599 put_aarch64_insn (buf, insn);
6600 break;
6601
6602 case AARCH64_OPND_SIMD_IMM:
6603 case AARCH64_OPND_SIMD_IMM_SFT:
6604 case AARCH64_OPND_LIMM:
6605 /* Bit mask immediate. */
6606 gas_assert (new_inst != NULL);
6607 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6608 new_inst->operands[idx].imm.value = value;
6609 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6610 &new_inst->value, NULL, NULL))
6611 put_aarch64_insn (buf, new_inst->value);
6612 else
6613 as_bad_where (fixP->fx_file, fixP->fx_line,
6614 _("invalid immediate"));
6615 break;
6616
6617 case AARCH64_OPND_HALF:
6618 /* 16-bit unsigned immediate. */
6619 if (unsigned_overflow (value, 16))
6620 as_bad_where (fixP->fx_file, fixP->fx_line,
6621 _("immediate out of range"));
6622 insn = get_aarch64_insn (buf);
6623 insn |= encode_movw_imm (value & 0xffff);
6624 put_aarch64_insn (buf, insn);
6625 break;
6626
6627 case AARCH64_OPND_IMM_MOV:
6628 /* Operand for a generic move immediate instruction, which is
6629 an alias instruction that generates a single MOVZ, MOVN or ORR
6630 instruction to loads a 32-bit/64-bit immediate value into general
6631 register. An assembler error shall result if the immediate cannot be
6632 created by a single one of these instructions. If there is a choice,
6633 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6634 and MOVZ or MOVN to ORR. */
6635 gas_assert (new_inst != NULL);
6636 fix_mov_imm_insn (fixP, buf, new_inst, value);
6637 break;
6638
6639 case AARCH64_OPND_ADDR_SIMM7:
6640 case AARCH64_OPND_ADDR_SIMM9:
6641 case AARCH64_OPND_ADDR_SIMM9_2:
6642 case AARCH64_OPND_ADDR_UIMM12:
6643 /* Immediate offset in an address. */
6644 insn = get_aarch64_insn (buf);
6645
6646 gas_assert (new_inst != NULL && new_inst->value == insn);
6647 gas_assert (new_inst->opcode->operands[1] == opnd
6648 || new_inst->opcode->operands[2] == opnd);
6649
6650 /* Get the index of the address operand. */
6651 if (new_inst->opcode->operands[1] == opnd)
6652 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6653 idx = 1;
6654 else
6655 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6656 idx = 2;
6657
6658 /* Update the resolved offset value. */
6659 new_inst->operands[idx].addr.offset.imm = value;
6660
6661 /* Encode/fix-up. */
6662 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6663 &new_inst->value, NULL, NULL))
6664 {
6665 put_aarch64_insn (buf, new_inst->value);
6666 break;
6667 }
6668 else if (new_inst->opcode->iclass == ldst_pos
6669 && try_to_encode_as_unscaled_ldst (new_inst))
6670 {
6671 put_aarch64_insn (buf, new_inst->value);
6672 break;
6673 }
6674
6675 as_bad_where (fixP->fx_file, fixP->fx_line,
6676 _("immediate offset out of range"));
6677 break;
6678
6679 default:
6680 gas_assert (0);
6681 as_fatal (_("unhandled operand code %d"), opnd);
6682 }
6683 }
6684
6685 /* Apply a fixup (fixP) to segment data, once it has been determined
6686 by our caller that we have all the info we need to fix it up.
6687
6688 Parameter valP is the pointer to the value of the bits. */
6689
6690 void
6691 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6692 {
6693 offsetT value = *valP;
6694 uint32_t insn;
6695 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6696 int scale;
6697 unsigned flags = fixP->fx_addnumber;
6698
6699 DEBUG_TRACE ("\n\n");
6700 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6701 DEBUG_TRACE ("Enter md_apply_fix");
6702
6703 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6704
6705 /* Note whether this will delete the relocation. */
6706
6707 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6708 fixP->fx_done = 1;
6709
6710 /* Process the relocations. */
6711 switch (fixP->fx_r_type)
6712 {
6713 case BFD_RELOC_NONE:
6714 /* This will need to go in the object file. */
6715 fixP->fx_done = 0;
6716 break;
6717
6718 case BFD_RELOC_8:
6719 case BFD_RELOC_8_PCREL:
6720 if (fixP->fx_done || !seg->use_rela_p)
6721 md_number_to_chars (buf, value, 1);
6722 break;
6723
6724 case BFD_RELOC_16:
6725 case BFD_RELOC_16_PCREL:
6726 if (fixP->fx_done || !seg->use_rela_p)
6727 md_number_to_chars (buf, value, 2);
6728 break;
6729
6730 case BFD_RELOC_32:
6731 case BFD_RELOC_32_PCREL:
6732 if (fixP->fx_done || !seg->use_rela_p)
6733 md_number_to_chars (buf, value, 4);
6734 break;
6735
6736 case BFD_RELOC_64:
6737 case BFD_RELOC_64_PCREL:
6738 if (fixP->fx_done || !seg->use_rela_p)
6739 md_number_to_chars (buf, value, 8);
6740 break;
6741
6742 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6743 /* We claim that these fixups have been processed here, even if
6744 in fact we generate an error because we do not have a reloc
6745 for them, so tc_gen_reloc() will reject them. */
6746 fixP->fx_done = 1;
6747 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6748 {
6749 as_bad_where (fixP->fx_file, fixP->fx_line,
6750 _("undefined symbol %s used as an immediate value"),
6751 S_GET_NAME (fixP->fx_addsy));
6752 goto apply_fix_return;
6753 }
6754 fix_insn (fixP, flags, value);
6755 break;
6756
6757 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6758 if (fixP->fx_done || !seg->use_rela_p)
6759 {
6760 if (value & 3)
6761 as_bad_where (fixP->fx_file, fixP->fx_line,
6762 _("pc-relative load offset not word aligned"));
6763 if (signed_overflow (value, 21))
6764 as_bad_where (fixP->fx_file, fixP->fx_line,
6765 _("pc-relative load offset out of range"));
6766 insn = get_aarch64_insn (buf);
6767 insn |= encode_ld_lit_ofs_19 (value >> 2);
6768 put_aarch64_insn (buf, insn);
6769 }
6770 break;
6771
6772 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6773 if (fixP->fx_done || !seg->use_rela_p)
6774 {
6775 if (signed_overflow (value, 21))
6776 as_bad_where (fixP->fx_file, fixP->fx_line,
6777 _("pc-relative address offset out of range"));
6778 insn = get_aarch64_insn (buf);
6779 insn |= encode_adr_imm (value);
6780 put_aarch64_insn (buf, insn);
6781 }
6782 break;
6783
6784 case BFD_RELOC_AARCH64_BRANCH19:
6785 if (fixP->fx_done || !seg->use_rela_p)
6786 {
6787 if (value & 3)
6788 as_bad_where (fixP->fx_file, fixP->fx_line,
6789 _("conditional branch target not word aligned"));
6790 if (signed_overflow (value, 21))
6791 as_bad_where (fixP->fx_file, fixP->fx_line,
6792 _("conditional branch out of range"));
6793 insn = get_aarch64_insn (buf);
6794 insn |= encode_cond_branch_ofs_19 (value >> 2);
6795 put_aarch64_insn (buf, insn);
6796 }
6797 break;
6798
6799 case BFD_RELOC_AARCH64_TSTBR14:
6800 if (fixP->fx_done || !seg->use_rela_p)
6801 {
6802 if (value & 3)
6803 as_bad_where (fixP->fx_file, fixP->fx_line,
6804 _("conditional branch target not word aligned"));
6805 if (signed_overflow (value, 16))
6806 as_bad_where (fixP->fx_file, fixP->fx_line,
6807 _("conditional branch out of range"));
6808 insn = get_aarch64_insn (buf);
6809 insn |= encode_tst_branch_ofs_14 (value >> 2);
6810 put_aarch64_insn (buf, insn);
6811 }
6812 break;
6813
6814 case BFD_RELOC_AARCH64_CALL26:
6815 case BFD_RELOC_AARCH64_JUMP26:
6816 if (fixP->fx_done || !seg->use_rela_p)
6817 {
6818 if (value & 3)
6819 as_bad_where (fixP->fx_file, fixP->fx_line,
6820 _("branch target not word aligned"));
6821 if (signed_overflow (value, 28))
6822 as_bad_where (fixP->fx_file, fixP->fx_line,
6823 _("branch out of range"));
6824 insn = get_aarch64_insn (buf);
6825 insn |= encode_branch_ofs_26 (value >> 2);
6826 put_aarch64_insn (buf, insn);
6827 }
6828 break;
6829
6830 case BFD_RELOC_AARCH64_MOVW_G0:
6831 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6832 case BFD_RELOC_AARCH64_MOVW_G0_S:
6833 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6834 scale = 0;
6835 goto movw_common;
6836 case BFD_RELOC_AARCH64_MOVW_G1:
6837 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6838 case BFD_RELOC_AARCH64_MOVW_G1_S:
6839 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6840 scale = 16;
6841 goto movw_common;
6842 case BFD_RELOC_AARCH64_MOVW_G2:
6843 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6844 case BFD_RELOC_AARCH64_MOVW_G2_S:
6845 scale = 32;
6846 goto movw_common;
6847 case BFD_RELOC_AARCH64_MOVW_G3:
6848 scale = 48;
6849 movw_common:
6850 if (fixP->fx_done || !seg->use_rela_p)
6851 {
6852 insn = get_aarch64_insn (buf);
6853
6854 if (!fixP->fx_done)
6855 {
6856 /* REL signed addend must fit in 16 bits */
6857 if (signed_overflow (value, 16))
6858 as_bad_where (fixP->fx_file, fixP->fx_line,
6859 _("offset out of range"));
6860 }
6861 else
6862 {
6863 /* Check for overflow and scale. */
6864 switch (fixP->fx_r_type)
6865 {
6866 case BFD_RELOC_AARCH64_MOVW_G0:
6867 case BFD_RELOC_AARCH64_MOVW_G1:
6868 case BFD_RELOC_AARCH64_MOVW_G2:
6869 case BFD_RELOC_AARCH64_MOVW_G3:
6870 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6871 if (unsigned_overflow (value, scale + 16))
6872 as_bad_where (fixP->fx_file, fixP->fx_line,
6873 _("unsigned value out of range"));
6874 break;
6875 case BFD_RELOC_AARCH64_MOVW_G0_S:
6876 case BFD_RELOC_AARCH64_MOVW_G1_S:
6877 case BFD_RELOC_AARCH64_MOVW_G2_S:
6878 /* NOTE: We can only come here with movz or movn. */
6879 if (signed_overflow (value, scale + 16))
6880 as_bad_where (fixP->fx_file, fixP->fx_line,
6881 _("signed value out of range"));
6882 if (value < 0)
6883 {
6884 /* Force use of MOVN. */
6885 value = ~value;
6886 insn = reencode_movzn_to_movn (insn);
6887 }
6888 else
6889 {
6890 /* Force use of MOVZ. */
6891 insn = reencode_movzn_to_movz (insn);
6892 }
6893 break;
6894 default:
6895 /* Unchecked relocations. */
6896 break;
6897 }
6898 value >>= scale;
6899 }
6900
6901 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6902 insn |= encode_movw_imm (value & 0xffff);
6903
6904 put_aarch64_insn (buf, insn);
6905 }
6906 break;
6907
6908 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6909 fixP->fx_r_type = (ilp32_p
6910 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6911 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6912 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6913 /* Should always be exported to object file, see
6914 aarch64_force_relocation(). */
6915 gas_assert (!fixP->fx_done);
6916 gas_assert (seg->use_rela_p);
6917 break;
6918
6919 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6920 fixP->fx_r_type = (ilp32_p
6921 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6922 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6923 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6924 /* Should always be exported to object file, see
6925 aarch64_force_relocation(). */
6926 gas_assert (!fixP->fx_done);
6927 gas_assert (seg->use_rela_p);
6928 break;
6929
6930 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6931 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6932 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6933 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6934 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6935 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6936 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6937 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6938 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6939 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6940 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6941 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6942 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6943 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6944 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6945 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
6946 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
6947 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6948 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6949 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6950 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6951 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
6952 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
6953 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
6954 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
6955 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
6956 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
6957 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
6958 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
6959 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6960 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6961 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6962 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6963 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6964 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6965 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6966 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6967 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6968 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6969 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6970 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6971 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6972 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6973 /* Should always be exported to object file, see
6974 aarch64_force_relocation(). */
6975 gas_assert (!fixP->fx_done);
6976 gas_assert (seg->use_rela_p);
6977 break;
6978
6979 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6980 /* Should always be exported to object file, see
6981 aarch64_force_relocation(). */
6982 fixP->fx_r_type = (ilp32_p
6983 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6984 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6985 gas_assert (!fixP->fx_done);
6986 gas_assert (seg->use_rela_p);
6987 break;
6988
6989 case BFD_RELOC_AARCH64_ADD_LO12:
6990 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6991 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6992 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6993 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6994 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6995 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6996 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
6997 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6998 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6999 case BFD_RELOC_AARCH64_LDST128_LO12:
7000 case BFD_RELOC_AARCH64_LDST16_LO12:
7001 case BFD_RELOC_AARCH64_LDST32_LO12:
7002 case BFD_RELOC_AARCH64_LDST64_LO12:
7003 case BFD_RELOC_AARCH64_LDST8_LO12:
7004 /* Should always be exported to object file, see
7005 aarch64_force_relocation(). */
7006 gas_assert (!fixP->fx_done);
7007 gas_assert (seg->use_rela_p);
7008 break;
7009
7010 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7011 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7012 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7013 break;
7014
7015 case BFD_RELOC_UNUSED:
7016 /* An error will already have been reported. */
7017 break;
7018
7019 default:
7020 as_bad_where (fixP->fx_file, fixP->fx_line,
7021 _("unexpected %s fixup"),
7022 bfd_get_reloc_code_name (fixP->fx_r_type));
7023 break;
7024 }
7025
7026 apply_fix_return:
7027 /* Free the allocated the struct aarch64_inst.
7028 N.B. currently there are very limited number of fix-up types actually use
7029 this field, so the impact on the performance should be minimal . */
7030 if (fixP->tc_fix_data.inst != NULL)
7031 free (fixP->tc_fix_data.inst);
7032
7033 return;
7034 }
7035
7036 /* Translate internal representation of relocation info to BFD target
7037 format. */
7038
7039 arelent *
7040 tc_gen_reloc (asection * section, fixS * fixp)
7041 {
7042 arelent *reloc;
7043 bfd_reloc_code_real_type code;
7044
7045 reloc = xmalloc (sizeof (arelent));
7046
7047 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
7048 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7049 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7050
7051 if (fixp->fx_pcrel)
7052 {
7053 if (section->use_rela_p)
7054 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7055 else
7056 fixp->fx_offset = reloc->address;
7057 }
7058 reloc->addend = fixp->fx_offset;
7059
7060 code = fixp->fx_r_type;
7061 switch (code)
7062 {
7063 case BFD_RELOC_16:
7064 if (fixp->fx_pcrel)
7065 code = BFD_RELOC_16_PCREL;
7066 break;
7067
7068 case BFD_RELOC_32:
7069 if (fixp->fx_pcrel)
7070 code = BFD_RELOC_32_PCREL;
7071 break;
7072
7073 case BFD_RELOC_64:
7074 if (fixp->fx_pcrel)
7075 code = BFD_RELOC_64_PCREL;
7076 break;
7077
7078 default:
7079 break;
7080 }
7081
7082 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7083 if (reloc->howto == NULL)
7084 {
7085 as_bad_where (fixp->fx_file, fixp->fx_line,
7086 _
7087 ("cannot represent %s relocation in this object file format"),
7088 bfd_get_reloc_code_name (code));
7089 return NULL;
7090 }
7091
7092 return reloc;
7093 }
7094
7095 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7096
7097 void
7098 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7099 {
7100 bfd_reloc_code_real_type type;
7101 int pcrel = 0;
7102
7103 /* Pick a reloc.
7104 FIXME: @@ Should look at CPU word size. */
7105 switch (size)
7106 {
7107 case 1:
7108 type = BFD_RELOC_8;
7109 break;
7110 case 2:
7111 type = BFD_RELOC_16;
7112 break;
7113 case 4:
7114 type = BFD_RELOC_32;
7115 break;
7116 case 8:
7117 type = BFD_RELOC_64;
7118 break;
7119 default:
7120 as_bad (_("cannot do %u-byte relocation"), size);
7121 type = BFD_RELOC_UNUSED;
7122 break;
7123 }
7124
7125 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7126 }
7127
7128 int
7129 aarch64_force_relocation (struct fix *fixp)
7130 {
7131 switch (fixp->fx_r_type)
7132 {
7133 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7134 /* Perform these "immediate" internal relocations
7135 even if the symbol is extern or weak. */
7136 return 0;
7137
7138 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7139 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7140 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7141 /* Pseudo relocs that need to be fixed up according to
7142 ilp32_p. */
7143 return 0;
7144
7145 case BFD_RELOC_AARCH64_ADD_LO12:
7146 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7147 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7148 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7149 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7150 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7151 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7152 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7153 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7154 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7155 case BFD_RELOC_AARCH64_LDST128_LO12:
7156 case BFD_RELOC_AARCH64_LDST16_LO12:
7157 case BFD_RELOC_AARCH64_LDST32_LO12:
7158 case BFD_RELOC_AARCH64_LDST64_LO12:
7159 case BFD_RELOC_AARCH64_LDST8_LO12:
7160 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7161 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7162 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7163 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7164 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7165 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7166 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7167 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7168 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7169 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7170 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7171 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7172 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7173 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7174 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7175 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7176 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7177 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7178 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7179 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7180 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7181 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7182 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7183 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7184 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7185 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7186 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7187 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7188 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7189 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7190 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7191 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7192 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7193 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7194 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7195 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7196 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7197 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7198 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7199 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7200 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7201 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7202 /* Always leave these relocations for the linker. */
7203 return 1;
7204
7205 default:
7206 break;
7207 }
7208
7209 return generic_force_reloc (fixp);
7210 }
7211
7212 #ifdef OBJ_ELF
7213
7214 const char *
7215 elf64_aarch64_target_format (void)
7216 {
7217 if (target_big_endian)
7218 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7219 else
7220 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7221 }
7222
7223 void
7224 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7225 {
7226 elf_frob_symbol (symp, puntp);
7227 }
7228 #endif
7229
7230 /* MD interface: Finalization. */
7231
7232 /* A good place to do this, although this was probably not intended
7233 for this kind of use. We need to dump the literal pool before
7234 references are made to a null symbol pointer. */
7235
7236 void
7237 aarch64_cleanup (void)
7238 {
7239 literal_pool *pool;
7240
7241 for (pool = list_of_pools; pool; pool = pool->next)
7242 {
7243 /* Put it at the end of the relevant section. */
7244 subseg_set (pool->section, pool->sub_section);
7245 s_ltorg (0);
7246 }
7247 }
7248
7249 #ifdef OBJ_ELF
7250 /* Remove any excess mapping symbols generated for alignment frags in
7251 SEC. We may have created a mapping symbol before a zero byte
7252 alignment; remove it if there's a mapping symbol after the
7253 alignment. */
7254 static void
7255 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7256 void *dummy ATTRIBUTE_UNUSED)
7257 {
7258 segment_info_type *seginfo = seg_info (sec);
7259 fragS *fragp;
7260
7261 if (seginfo == NULL || seginfo->frchainP == NULL)
7262 return;
7263
7264 for (fragp = seginfo->frchainP->frch_root;
7265 fragp != NULL; fragp = fragp->fr_next)
7266 {
7267 symbolS *sym = fragp->tc_frag_data.last_map;
7268 fragS *next = fragp->fr_next;
7269
7270 /* Variable-sized frags have been converted to fixed size by
7271 this point. But if this was variable-sized to start with,
7272 there will be a fixed-size frag after it. So don't handle
7273 next == NULL. */
7274 if (sym == NULL || next == NULL)
7275 continue;
7276
7277 if (S_GET_VALUE (sym) < next->fr_address)
7278 /* Not at the end of this frag. */
7279 continue;
7280 know (S_GET_VALUE (sym) == next->fr_address);
7281
7282 do
7283 {
7284 if (next->tc_frag_data.first_map != NULL)
7285 {
7286 /* Next frag starts with a mapping symbol. Discard this
7287 one. */
7288 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7289 break;
7290 }
7291
7292 if (next->fr_next == NULL)
7293 {
7294 /* This mapping symbol is at the end of the section. Discard
7295 it. */
7296 know (next->fr_fix == 0 && next->fr_var == 0);
7297 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7298 break;
7299 }
7300
7301 /* As long as we have empty frags without any mapping symbols,
7302 keep looking. */
7303 /* If the next frag is non-empty and does not start with a
7304 mapping symbol, then this mapping symbol is required. */
7305 if (next->fr_address != next->fr_next->fr_address)
7306 break;
7307
7308 next = next->fr_next;
7309 }
7310 while (next != NULL);
7311 }
7312 }
7313 #endif
7314
7315 /* Adjust the symbol table. */
7316
7317 void
7318 aarch64_adjust_symtab (void)
7319 {
7320 #ifdef OBJ_ELF
7321 /* Remove any overlapping mapping symbols generated by alignment frags. */
7322 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7323 /* Now do generic ELF adjustments. */
7324 elf_adjust_symtab ();
7325 #endif
7326 }
7327
7328 static void
7329 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7330 {
7331 const char *hash_err;
7332
7333 hash_err = hash_insert (table, key, value);
7334 if (hash_err)
7335 printf ("Internal Error: Can't hash %s\n", key);
7336 }
7337
7338 static void
7339 fill_instruction_hash_table (void)
7340 {
7341 aarch64_opcode *opcode = aarch64_opcode_table;
7342
7343 while (opcode->name != NULL)
7344 {
7345 templates *templ, *new_templ;
7346 templ = hash_find (aarch64_ops_hsh, opcode->name);
7347
7348 new_templ = (templates *) xmalloc (sizeof (templates));
7349 new_templ->opcode = opcode;
7350 new_templ->next = NULL;
7351
7352 if (!templ)
7353 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7354 else
7355 {
7356 new_templ->next = templ->next;
7357 templ->next = new_templ;
7358 }
7359 ++opcode;
7360 }
7361 }
7362
7363 static inline void
7364 convert_to_upper (char *dst, const char *src, size_t num)
7365 {
7366 unsigned int i;
7367 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7368 *dst = TOUPPER (*src);
7369 *dst = '\0';
7370 }
7371
7372 /* Assume STR point to a lower-case string, allocate, convert and return
7373 the corresponding upper-case string. */
7374 static inline const char*
7375 get_upper_str (const char *str)
7376 {
7377 char *ret;
7378 size_t len = strlen (str);
7379 if ((ret = xmalloc (len + 1)) == NULL)
7380 abort ();
7381 convert_to_upper (ret, str, len);
7382 return ret;
7383 }
7384
7385 /* MD interface: Initialization. */
7386
7387 void
7388 md_begin (void)
7389 {
7390 unsigned mach;
7391 unsigned int i;
7392
7393 if ((aarch64_ops_hsh = hash_new ()) == NULL
7394 || (aarch64_cond_hsh = hash_new ()) == NULL
7395 || (aarch64_shift_hsh = hash_new ()) == NULL
7396 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7397 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7398 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7399 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7400 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7401 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7402 || (aarch64_reg_hsh = hash_new ()) == NULL
7403 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7404 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7405 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7406 as_fatal (_("virtual memory exhausted"));
7407
7408 fill_instruction_hash_table ();
7409
7410 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7411 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7412 (void *) (aarch64_sys_regs + i));
7413
7414 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7415 checked_hash_insert (aarch64_pstatefield_hsh,
7416 aarch64_pstatefields[i].name,
7417 (void *) (aarch64_pstatefields + i));
7418
7419 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7420 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7421 aarch64_sys_regs_ic[i].template,
7422 (void *) (aarch64_sys_regs_ic + i));
7423
7424 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7425 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7426 aarch64_sys_regs_dc[i].template,
7427 (void *) (aarch64_sys_regs_dc + i));
7428
7429 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7430 checked_hash_insert (aarch64_sys_regs_at_hsh,
7431 aarch64_sys_regs_at[i].template,
7432 (void *) (aarch64_sys_regs_at + i));
7433
7434 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7435 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7436 aarch64_sys_regs_tlbi[i].template,
7437 (void *) (aarch64_sys_regs_tlbi + i));
7438
7439 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7440 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7441 (void *) (reg_names + i));
7442
7443 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7444 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7445 (void *) (nzcv_names + i));
7446
7447 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7448 {
7449 const char *name = aarch64_operand_modifiers[i].name;
7450 checked_hash_insert (aarch64_shift_hsh, name,
7451 (void *) (aarch64_operand_modifiers + i));
7452 /* Also hash the name in the upper case. */
7453 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7454 (void *) (aarch64_operand_modifiers + i));
7455 }
7456
7457 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7458 {
7459 unsigned int j;
7460 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7461 the same condition code. */
7462 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7463 {
7464 const char *name = aarch64_conds[i].names[j];
7465 if (name == NULL)
7466 break;
7467 checked_hash_insert (aarch64_cond_hsh, name,
7468 (void *) (aarch64_conds + i));
7469 /* Also hash the name in the upper case. */
7470 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7471 (void *) (aarch64_conds + i));
7472 }
7473 }
7474
7475 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7476 {
7477 const char *name = aarch64_barrier_options[i].name;
7478 /* Skip xx00 - the unallocated values of option. */
7479 if ((i & 0x3) == 0)
7480 continue;
7481 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7482 (void *) (aarch64_barrier_options + i));
7483 /* Also hash the name in the upper case. */
7484 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7485 (void *) (aarch64_barrier_options + i));
7486 }
7487
7488 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7489 {
7490 const char* name = aarch64_prfops[i].name;
7491 /* Skip the unallocated hint encodings. */
7492 if (name == NULL)
7493 continue;
7494 checked_hash_insert (aarch64_pldop_hsh, name,
7495 (void *) (aarch64_prfops + i));
7496 /* Also hash the name in the upper case. */
7497 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7498 (void *) (aarch64_prfops + i));
7499 }
7500
7501 /* Set the cpu variant based on the command-line options. */
7502 if (!mcpu_cpu_opt)
7503 mcpu_cpu_opt = march_cpu_opt;
7504
7505 if (!mcpu_cpu_opt)
7506 mcpu_cpu_opt = &cpu_default;
7507
7508 cpu_variant = *mcpu_cpu_opt;
7509
7510 /* Record the CPU type. */
7511 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7512
7513 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7514 }
7515
7516 /* Command line processing. */
7517
7518 const char *md_shortopts = "m:";
7519
7520 #ifdef AARCH64_BI_ENDIAN
7521 #define OPTION_EB (OPTION_MD_BASE + 0)
7522 #define OPTION_EL (OPTION_MD_BASE + 1)
7523 #else
7524 #if TARGET_BYTES_BIG_ENDIAN
7525 #define OPTION_EB (OPTION_MD_BASE + 0)
7526 #else
7527 #define OPTION_EL (OPTION_MD_BASE + 1)
7528 #endif
7529 #endif
7530
7531 struct option md_longopts[] = {
7532 #ifdef OPTION_EB
7533 {"EB", no_argument, NULL, OPTION_EB},
7534 #endif
7535 #ifdef OPTION_EL
7536 {"EL", no_argument, NULL, OPTION_EL},
7537 #endif
7538 {NULL, no_argument, NULL, 0}
7539 };
7540
7541 size_t md_longopts_size = sizeof (md_longopts);
7542
7543 struct aarch64_option_table
7544 {
7545 char *option; /* Option name to match. */
7546 char *help; /* Help information. */
7547 int *var; /* Variable to change. */
7548 int value; /* What to change it to. */
7549 char *deprecated; /* If non-null, print this message. */
7550 };
7551
7552 static struct aarch64_option_table aarch64_opts[] = {
7553 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7554 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7555 NULL},
7556 #ifdef DEBUG_AARCH64
7557 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7558 #endif /* DEBUG_AARCH64 */
7559 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7560 NULL},
7561 {"mno-verbose-error", N_("do not output verbose error messages"),
7562 &verbose_error_p, 0, NULL},
7563 {NULL, NULL, NULL, 0, NULL}
7564 };
7565
7566 struct aarch64_cpu_option_table
7567 {
7568 char *name;
7569 const aarch64_feature_set value;
7570 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7571 case. */
7572 const char *canonical_name;
7573 };
7574
7575 /* This list should, at a minimum, contain all the cpu names
7576 recognized by GCC. */
7577 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7578 {"all", AARCH64_ANY, NULL},
7579 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7580 AARCH64_FEATURE_CRC), "Cortex-A53"},
7581 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7582 AARCH64_FEATURE_CRC), "Cortex-A57"},
7583 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7584 AARCH64_FEATURE_CRC), "Cortex-A72"},
7585 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7586 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7587 "Samsung Exynos M1"},
7588 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7589 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7590 "Cavium ThunderX"},
7591 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7592 in earlier releases and is superseded by 'xgene1' in all
7593 tools. */
7594 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7595 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7596 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7597 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7598 {"generic", AARCH64_ARCH_V8, NULL},
7599
7600 {NULL, AARCH64_ARCH_NONE, NULL}
7601 };
7602
7603 struct aarch64_arch_option_table
7604 {
7605 char *name;
7606 const aarch64_feature_set value;
7607 };
7608
7609 /* This list should, at a minimum, contain all the architecture names
7610 recognized by GCC. */
7611 static const struct aarch64_arch_option_table aarch64_archs[] = {
7612 {"all", AARCH64_ANY},
7613 {"armv8-a", AARCH64_ARCH_V8},
7614 {"armv8.1-a", AARCH64_ARCH_V8_1},
7615 {NULL, AARCH64_ARCH_NONE}
7616 };
7617
7618 /* ISA extensions. */
7619 struct aarch64_option_cpu_value_table
7620 {
7621 char *name;
7622 const aarch64_feature_set value;
7623 };
7624
7625 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7626 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7627 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7628 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7629 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7630 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7631 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0)},
7632 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0)},
7633 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_SIMD
7634 | AARCH64_FEATURE_RDMA, 0)},
7635 {NULL, AARCH64_ARCH_NONE}
7636 };
7637
7638 struct aarch64_long_option_table
7639 {
7640 char *option; /* Substring to match. */
7641 char *help; /* Help information. */
7642 int (*func) (char *subopt); /* Function to decode sub-option. */
7643 char *deprecated; /* If non-null, print this message. */
7644 };
7645
7646 static int
7647 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7648 bfd_boolean ext_only)
7649 {
7650 /* We insist on extensions being added before being removed. We achieve
7651 this by using the ADDING_VALUE variable to indicate whether we are
7652 adding an extension (1) or removing it (0) and only allowing it to
7653 change in the order -1 -> 1 -> 0. */
7654 int adding_value = -1;
7655 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7656
7657 /* Copy the feature set, so that we can modify it. */
7658 *ext_set = **opt_p;
7659 *opt_p = ext_set;
7660
7661 while (str != NULL && *str != 0)
7662 {
7663 const struct aarch64_option_cpu_value_table *opt;
7664 char *ext = NULL;
7665 int optlen;
7666
7667 if (!ext_only)
7668 {
7669 if (*str != '+')
7670 {
7671 as_bad (_("invalid architectural extension"));
7672 return 0;
7673 }
7674
7675 ext = strchr (++str, '+');
7676 }
7677
7678 if (ext != NULL)
7679 optlen = ext - str;
7680 else
7681 optlen = strlen (str);
7682
7683 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7684 {
7685 if (adding_value != 0)
7686 adding_value = 0;
7687 optlen -= 2;
7688 str += 2;
7689 }
7690 else if (optlen > 0)
7691 {
7692 if (adding_value == -1)
7693 adding_value = 1;
7694 else if (adding_value != 1)
7695 {
7696 as_bad (_("must specify extensions to add before specifying "
7697 "those to remove"));
7698 return FALSE;
7699 }
7700 }
7701
7702 if (optlen == 0)
7703 {
7704 as_bad (_("missing architectural extension"));
7705 return 0;
7706 }
7707
7708 gas_assert (adding_value != -1);
7709
7710 for (opt = aarch64_features; opt->name != NULL; opt++)
7711 if (strncmp (opt->name, str, optlen) == 0)
7712 {
7713 /* Add or remove the extension. */
7714 if (adding_value)
7715 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7716 else
7717 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7718 break;
7719 }
7720
7721 if (opt->name == NULL)
7722 {
7723 as_bad (_("unknown architectural extension `%s'"), str);
7724 return 0;
7725 }
7726
7727 str = ext;
7728 };
7729
7730 return 1;
7731 }
7732
7733 static int
7734 aarch64_parse_cpu (char *str)
7735 {
7736 const struct aarch64_cpu_option_table *opt;
7737 char *ext = strchr (str, '+');
7738 size_t optlen;
7739
7740 if (ext != NULL)
7741 optlen = ext - str;
7742 else
7743 optlen = strlen (str);
7744
7745 if (optlen == 0)
7746 {
7747 as_bad (_("missing cpu name `%s'"), str);
7748 return 0;
7749 }
7750
7751 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7752 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7753 {
7754 mcpu_cpu_opt = &opt->value;
7755 if (ext != NULL)
7756 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7757
7758 return 1;
7759 }
7760
7761 as_bad (_("unknown cpu `%s'"), str);
7762 return 0;
7763 }
7764
7765 static int
7766 aarch64_parse_arch (char *str)
7767 {
7768 const struct aarch64_arch_option_table *opt;
7769 char *ext = strchr (str, '+');
7770 size_t optlen;
7771
7772 if (ext != NULL)
7773 optlen = ext - str;
7774 else
7775 optlen = strlen (str);
7776
7777 if (optlen == 0)
7778 {
7779 as_bad (_("missing architecture name `%s'"), str);
7780 return 0;
7781 }
7782
7783 for (opt = aarch64_archs; opt->name != NULL; opt++)
7784 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7785 {
7786 march_cpu_opt = &opt->value;
7787 if (ext != NULL)
7788 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7789
7790 return 1;
7791 }
7792
7793 as_bad (_("unknown architecture `%s'\n"), str);
7794 return 0;
7795 }
7796
7797 /* ABIs. */
7798 struct aarch64_option_abi_value_table
7799 {
7800 char *name;
7801 enum aarch64_abi_type value;
7802 };
7803
7804 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7805 {"ilp32", AARCH64_ABI_ILP32},
7806 {"lp64", AARCH64_ABI_LP64},
7807 {NULL, 0}
7808 };
7809
7810 static int
7811 aarch64_parse_abi (char *str)
7812 {
7813 const struct aarch64_option_abi_value_table *opt;
7814 size_t optlen = strlen (str);
7815
7816 if (optlen == 0)
7817 {
7818 as_bad (_("missing abi name `%s'"), str);
7819 return 0;
7820 }
7821
7822 for (opt = aarch64_abis; opt->name != NULL; opt++)
7823 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7824 {
7825 aarch64_abi = opt->value;
7826 return 1;
7827 }
7828
7829 as_bad (_("unknown abi `%s'\n"), str);
7830 return 0;
7831 }
7832
7833 static struct aarch64_long_option_table aarch64_long_opts[] = {
7834 #ifdef OBJ_ELF
7835 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7836 aarch64_parse_abi, NULL},
7837 #endif /* OBJ_ELF */
7838 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7839 aarch64_parse_cpu, NULL},
7840 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7841 aarch64_parse_arch, NULL},
7842 {NULL, NULL, 0, NULL}
7843 };
7844
7845 int
7846 md_parse_option (int c, char *arg)
7847 {
7848 struct aarch64_option_table *opt;
7849 struct aarch64_long_option_table *lopt;
7850
7851 switch (c)
7852 {
7853 #ifdef OPTION_EB
7854 case OPTION_EB:
7855 target_big_endian = 1;
7856 break;
7857 #endif
7858
7859 #ifdef OPTION_EL
7860 case OPTION_EL:
7861 target_big_endian = 0;
7862 break;
7863 #endif
7864
7865 case 'a':
7866 /* Listing option. Just ignore these, we don't support additional
7867 ones. */
7868 return 0;
7869
7870 default:
7871 for (opt = aarch64_opts; opt->option != NULL; opt++)
7872 {
7873 if (c == opt->option[0]
7874 && ((arg == NULL && opt->option[1] == 0)
7875 || streq (arg, opt->option + 1)))
7876 {
7877 /* If the option is deprecated, tell the user. */
7878 if (opt->deprecated != NULL)
7879 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7880 arg ? arg : "", _(opt->deprecated));
7881
7882 if (opt->var != NULL)
7883 *opt->var = opt->value;
7884
7885 return 1;
7886 }
7887 }
7888
7889 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7890 {
7891 /* These options are expected to have an argument. */
7892 if (c == lopt->option[0]
7893 && arg != NULL
7894 && strncmp (arg, lopt->option + 1,
7895 strlen (lopt->option + 1)) == 0)
7896 {
7897 /* If the option is deprecated, tell the user. */
7898 if (lopt->deprecated != NULL)
7899 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7900 _(lopt->deprecated));
7901
7902 /* Call the sup-option parser. */
7903 return lopt->func (arg + strlen (lopt->option) - 1);
7904 }
7905 }
7906
7907 return 0;
7908 }
7909
7910 return 1;
7911 }
7912
7913 void
7914 md_show_usage (FILE * fp)
7915 {
7916 struct aarch64_option_table *opt;
7917 struct aarch64_long_option_table *lopt;
7918
7919 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7920
7921 for (opt = aarch64_opts; opt->option != NULL; opt++)
7922 if (opt->help != NULL)
7923 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7924
7925 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7926 if (lopt->help != NULL)
7927 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7928
7929 #ifdef OPTION_EB
7930 fprintf (fp, _("\
7931 -EB assemble code for a big-endian cpu\n"));
7932 #endif
7933
7934 #ifdef OPTION_EL
7935 fprintf (fp, _("\
7936 -EL assemble code for a little-endian cpu\n"));
7937 #endif
7938 }
7939
7940 /* Parse a .cpu directive. */
7941
7942 static void
7943 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7944 {
7945 const struct aarch64_cpu_option_table *opt;
7946 char saved_char;
7947 char *name;
7948 char *ext;
7949 size_t optlen;
7950
7951 name = input_line_pointer;
7952 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7953 input_line_pointer++;
7954 saved_char = *input_line_pointer;
7955 *input_line_pointer = 0;
7956
7957 ext = strchr (name, '+');
7958
7959 if (ext != NULL)
7960 optlen = ext - name;
7961 else
7962 optlen = strlen (name);
7963
7964 /* Skip the first "all" entry. */
7965 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7966 if (strlen (opt->name) == optlen
7967 && strncmp (name, opt->name, optlen) == 0)
7968 {
7969 mcpu_cpu_opt = &opt->value;
7970 if (ext != NULL)
7971 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7972 return;
7973
7974 cpu_variant = *mcpu_cpu_opt;
7975
7976 *input_line_pointer = saved_char;
7977 demand_empty_rest_of_line ();
7978 return;
7979 }
7980 as_bad (_("unknown cpu `%s'"), name);
7981 *input_line_pointer = saved_char;
7982 ignore_rest_of_line ();
7983 }
7984
7985
7986 /* Parse a .arch directive. */
7987
7988 static void
7989 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7990 {
7991 const struct aarch64_arch_option_table *opt;
7992 char saved_char;
7993 char *name;
7994 char *ext;
7995 size_t optlen;
7996
7997 name = input_line_pointer;
7998 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7999 input_line_pointer++;
8000 saved_char = *input_line_pointer;
8001 *input_line_pointer = 0;
8002
8003 ext = strchr (name, '+');
8004
8005 if (ext != NULL)
8006 optlen = ext - name;
8007 else
8008 optlen = strlen (name);
8009
8010 /* Skip the first "all" entry. */
8011 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8012 if (strlen (opt->name) == optlen
8013 && strncmp (name, opt->name, optlen) == 0)
8014 {
8015 mcpu_cpu_opt = &opt->value;
8016 if (ext != NULL)
8017 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8018 return;
8019
8020 cpu_variant = *mcpu_cpu_opt;
8021
8022 *input_line_pointer = saved_char;
8023 demand_empty_rest_of_line ();
8024 return;
8025 }
8026
8027 as_bad (_("unknown architecture `%s'\n"), name);
8028 *input_line_pointer = saved_char;
8029 ignore_rest_of_line ();
8030 }
8031
8032 /* Parse a .arch_extension directive. */
8033
8034 static void
8035 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8036 {
8037 char saved_char;
8038 char *ext = input_line_pointer;;
8039
8040 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8041 input_line_pointer++;
8042 saved_char = *input_line_pointer;
8043 *input_line_pointer = 0;
8044
8045 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8046 return;
8047
8048 cpu_variant = *mcpu_cpu_opt;
8049
8050 *input_line_pointer = saved_char;
8051 demand_empty_rest_of_line ();
8052 }
8053
8054 /* Copy symbol information. */
8055
8056 void
8057 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8058 {
8059 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8060 }
This page took 0.479749 seconds and 5 git commands to generate.