[GAS][AARCH64]Add BFD_RELOC_AARCH64_TLSGD_MOVW_G1 support.
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Structure for a hash table entry for a register. */
251 typedef struct
252 {
253 const char *name;
254 unsigned char number;
255 unsigned char type;
256 unsigned char builtin;
257 } reg_entry;
258
259 /* Macros to define the register types and masks for the purpose
260 of parsing. */
261
262 #undef AARCH64_REG_TYPES
263 #define AARCH64_REG_TYPES \
264 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
265 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
266 BASIC_REG_TYPE(SP_32) /* wsp */ \
267 BASIC_REG_TYPE(SP_64) /* sp */ \
268 BASIC_REG_TYPE(Z_32) /* wzr */ \
269 BASIC_REG_TYPE(Z_64) /* xzr */ \
270 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
271 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
272 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
273 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
274 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
275 BASIC_REG_TYPE(CN) /* c[0-7] */ \
276 BASIC_REG_TYPE(VN) /* v[0-31] */ \
277 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
278 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
279 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
280 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
281 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
283 /* Typecheck: any [BHSDQ]P FP. */ \
284 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
285 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
286 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
287 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
289 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
290 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
291 /* Any integer register; used for error messages only. */ \
292 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Pseudo type to mark the end of the enumerator sequence. */ \
296 BASIC_REG_TYPE(MAX)
297
298 #undef BASIC_REG_TYPE
299 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
300 #undef MULTI_REG_TYPE
301 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
302
303 /* Register type enumerators. */
304 typedef enum
305 {
306 /* A list of REG_TYPE_*. */
307 AARCH64_REG_TYPES
308 } aarch64_reg_type;
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
312 #undef REG_TYPE
313 #define REG_TYPE(T) (1 << REG_TYPE_##T)
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) V,
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409
410 /* Stuff needed to resolve the label ambiguity
411 As:
412 ...
413 label: <insn>
414 may differ from:
415 ...
416 label:
417 <insn> */
418
419 static symbolS *last_label_seen;
420
421 /* Literal pool structure. Held on a per-section
422 and per-sub-section basis. */
423
424 #define MAX_LITERAL_POOL_SIZE 1024
425 typedef struct literal_expression
426 {
427 expressionS exp;
428 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
429 LITTLENUM_TYPE * bignum;
430 } literal_expression;
431
432 typedef struct literal_pool
433 {
434 literal_expression literals[MAX_LITERAL_POOL_SIZE];
435 unsigned int next_free_entry;
436 unsigned int id;
437 symbolS *symbol;
438 segT section;
439 subsegT sub_section;
440 int size;
441 struct literal_pool *next;
442 } literal_pool;
443
444 /* Pointer to a linked list of literal pools. */
445 static literal_pool *list_of_pools = NULL;
446 \f
447 /* Pure syntax. */
448
449 /* This array holds the chars that always start a comment. If the
450 pre-processor is disabled, these aren't very useful. */
451 const char comment_chars[] = "";
452
453 /* This array holds the chars that only start a comment at the beginning of
454 a line. If the line seems to have the form '# 123 filename'
455 .line and .file directives will appear in the pre-processed output. */
456 /* Note that input_file.c hand checks for '#' at the beginning of the
457 first line of the input file. This is because the compiler outputs
458 #NO_APP at the beginning of its output. */
459 /* Also note that comments like this one will always work. */
460 const char line_comment_chars[] = "#";
461
462 const char line_separator_chars[] = ";";
463
464 /* Chars that can be used to separate mant
465 from exp in floating point numbers. */
466 const char EXP_CHARS[] = "eE";
467
468 /* Chars that mean this number is a floating point constant. */
469 /* As in 0f12.456 */
470 /* or 0d1.2345e12 */
471
472 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
473
474 /* Prefix character that indicates the start of an immediate value. */
475 #define is_immediate_prefix(C) ((C) == '#')
476
477 /* Separator character handling. */
478
479 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
480
481 static inline bfd_boolean
482 skip_past_char (char **str, char c)
483 {
484 if (**str == c)
485 {
486 (*str)++;
487 return TRUE;
488 }
489 else
490 return FALSE;
491 }
492
493 #define skip_past_comma(str) skip_past_char (str, ',')
494
495 /* Arithmetic expressions (possibly involving symbols). */
496
497 static bfd_boolean in_my_get_expression_p = FALSE;
498
499 /* Third argument to my_get_expression. */
500 #define GE_NO_PREFIX 0
501 #define GE_OPT_PREFIX 1
502
503 /* Return TRUE if the string pointed by *STR is successfully parsed
504 as an valid expression; *EP will be filled with the information of
505 such an expression. Otherwise return FALSE. */
506
507 static bfd_boolean
508 my_get_expression (expressionS * ep, char **str, int prefix_mode,
509 int reject_absent)
510 {
511 char *save_in;
512 segT seg;
513 int prefix_present_p = 0;
514
515 switch (prefix_mode)
516 {
517 case GE_NO_PREFIX:
518 break;
519 case GE_OPT_PREFIX:
520 if (is_immediate_prefix (**str))
521 {
522 (*str)++;
523 prefix_present_p = 1;
524 }
525 break;
526 default:
527 abort ();
528 }
529
530 memset (ep, 0, sizeof (expressionS));
531
532 save_in = input_line_pointer;
533 input_line_pointer = *str;
534 in_my_get_expression_p = TRUE;
535 seg = expression (ep);
536 in_my_get_expression_p = FALSE;
537
538 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
539 {
540 /* We found a bad expression in md_operand(). */
541 *str = input_line_pointer;
542 input_line_pointer = save_in;
543 if (prefix_present_p && ! error_p ())
544 set_fatal_syntax_error (_("bad expression"));
545 else
546 set_first_syntax_error (_("bad expression"));
547 return FALSE;
548 }
549
550 #ifdef OBJ_AOUT
551 if (seg != absolute_section
552 && seg != text_section
553 && seg != data_section
554 && seg != bss_section && seg != undefined_section)
555 {
556 set_syntax_error (_("bad segment"));
557 *str = input_line_pointer;
558 input_line_pointer = save_in;
559 return FALSE;
560 }
561 #else
562 (void) seg;
563 #endif
564
565 *str = input_line_pointer;
566 input_line_pointer = save_in;
567 return TRUE;
568 }
569
570 /* Turn a string in input_line_pointer into a floating point constant
571 of type TYPE, and store the appropriate bytes in *LITP. The number
572 of LITTLENUMS emitted is stored in *SIZEP. An error message is
573 returned, or NULL on OK. */
574
575 char *
576 md_atof (int type, char *litP, int *sizeP)
577 {
578 return ieee_md_atof (type, litP, sizeP, target_big_endian);
579 }
580
581 /* We handle all bad expressions here, so that we can report the faulty
582 instruction in the error message. */
583 void
584 md_operand (expressionS * exp)
585 {
586 if (in_my_get_expression_p)
587 exp->X_op = O_illegal;
588 }
589
590 /* Immediate values. */
591
592 /* Errors may be set multiple times during parsing or bit encoding
593 (particularly in the Neon bits), but usually the earliest error which is set
594 will be the most meaningful. Avoid overwriting it with later (cascading)
595 errors by calling this function. */
596
597 static void
598 first_error (const char *error)
599 {
600 if (! error_p ())
601 set_syntax_error (error);
602 }
603
604 /* Similiar to first_error, but this function accepts formatted error
605 message. */
606 static void
607 first_error_fmt (const char *format, ...)
608 {
609 va_list args;
610 enum
611 { size = 100 };
612 /* N.B. this single buffer will not cause error messages for different
613 instructions to pollute each other; this is because at the end of
614 processing of each assembly line, error message if any will be
615 collected by as_bad. */
616 static char buffer[size];
617
618 if (! error_p ())
619 {
620 int ret ATTRIBUTE_UNUSED;
621 va_start (args, format);
622 ret = vsnprintf (buffer, size, format, args);
623 know (ret <= size - 1 && ret >= 0);
624 va_end (args);
625 set_syntax_error (buffer);
626 }
627 }
628
629 /* Register parsing. */
630
631 /* Generic register parser which is called by other specialized
632 register parsers.
633 CCP points to what should be the beginning of a register name.
634 If it is indeed a valid register name, advance CCP over it and
635 return the reg_entry structure; otherwise return NULL.
636 It does not issue diagnostics. */
637
638 static reg_entry *
639 parse_reg (char **ccp)
640 {
641 char *start = *ccp;
642 char *p;
643 reg_entry *reg;
644
645 #ifdef REGISTER_PREFIX
646 if (*start != REGISTER_PREFIX)
647 return NULL;
648 start++;
649 #endif
650
651 p = start;
652 if (!ISALPHA (*p) || !is_name_beginner (*p))
653 return NULL;
654
655 do
656 p++;
657 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
658
659 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
660
661 if (!reg)
662 return NULL;
663
664 *ccp = p;
665 return reg;
666 }
667
668 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
669 return FALSE. */
670 static bfd_boolean
671 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
672 {
673 if (reg->type == type)
674 return TRUE;
675
676 switch (type)
677 {
678 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
679 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
680 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
681 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
682 case REG_TYPE_VN: /* Vector register. */
683 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
684 return ((reg_type_masks[reg->type] & reg_type_masks[type])
685 == reg_type_masks[reg->type]);
686 default:
687 as_fatal ("unhandled type %d", type);
688 abort ();
689 }
690 }
691
692 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
693 Return the register number otherwise. *ISREG32 is set to one if the
694 register is 32-bit wide; *ISREGZERO is set to one if the register is
695 of type Z_32 or Z_64.
696 Note that this function does not issue any diagnostics. */
697
698 static int
699 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
700 int *isreg32, int *isregzero)
701 {
702 char *str = *ccp;
703 const reg_entry *reg = parse_reg (&str);
704
705 if (reg == NULL)
706 return PARSE_FAIL;
707
708 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
709 return PARSE_FAIL;
710
711 switch (reg->type)
712 {
713 case REG_TYPE_SP_32:
714 case REG_TYPE_SP_64:
715 if (reject_sp)
716 return PARSE_FAIL;
717 *isreg32 = reg->type == REG_TYPE_SP_32;
718 *isregzero = 0;
719 break;
720 case REG_TYPE_R_32:
721 case REG_TYPE_R_64:
722 *isreg32 = reg->type == REG_TYPE_R_32;
723 *isregzero = 0;
724 break;
725 case REG_TYPE_Z_32:
726 case REG_TYPE_Z_64:
727 if (reject_rz)
728 return PARSE_FAIL;
729 *isreg32 = reg->type == REG_TYPE_Z_32;
730 *isregzero = 1;
731 break;
732 default:
733 return PARSE_FAIL;
734 }
735
736 *ccp = str;
737
738 return reg->number;
739 }
740
741 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
742 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
743 otherwise return FALSE.
744
745 Accept only one occurrence of:
746 8b 16b 4h 8h 2s 4s 1d 2d
747 b h s d q */
748 static bfd_boolean
749 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
750 {
751 char *ptr = *str;
752 unsigned width;
753 unsigned element_size;
754 enum neon_el_type type;
755
756 /* skip '.' */
757 ptr++;
758
759 if (!ISDIGIT (*ptr))
760 {
761 width = 0;
762 goto elt_size;
763 }
764 width = strtoul (ptr, &ptr, 10);
765 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
766 {
767 first_error_fmt (_("bad size %d in vector width specifier"), width);
768 return FALSE;
769 }
770
771 elt_size:
772 switch (TOLOWER (*ptr))
773 {
774 case 'b':
775 type = NT_b;
776 element_size = 8;
777 break;
778 case 'h':
779 type = NT_h;
780 element_size = 16;
781 break;
782 case 's':
783 type = NT_s;
784 element_size = 32;
785 break;
786 case 'd':
787 type = NT_d;
788 element_size = 64;
789 break;
790 case 'q':
791 if (width == 1)
792 {
793 type = NT_q;
794 element_size = 128;
795 break;
796 }
797 /* fall through. */
798 default:
799 if (*ptr != '\0')
800 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
801 else
802 first_error (_("missing element size"));
803 return FALSE;
804 }
805 if (width != 0 && width * element_size != 64 && width * element_size != 128)
806 {
807 first_error_fmt (_
808 ("invalid element size %d and vector size combination %c"),
809 width, *ptr);
810 return FALSE;
811 }
812 ptr++;
813
814 parsed_type->type = type;
815 parsed_type->width = width;
816
817 *str = ptr;
818
819 return TRUE;
820 }
821
822 /* Parse a single type, e.g. ".8b", leading period included.
823 Only applicable to Vn registers.
824
825 Return TRUE on success; otherwise return FALSE. */
826 static bfd_boolean
827 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
828 {
829 char *str = *ccp;
830
831 if (*str == '.')
832 {
833 if (! parse_neon_type_for_operand (vectype, &str))
834 {
835 first_error (_("vector type expected"));
836 return FALSE;
837 }
838 }
839 else
840 return FALSE;
841
842 *ccp = str;
843
844 return TRUE;
845 }
846
847 /* Parse a register of the type TYPE.
848
849 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
850 name or the parsed register is not of TYPE.
851
852 Otherwise return the register number, and optionally fill in the actual
853 type of the register in *RTYPE when multiple alternatives were given, and
854 return the register shape and element index information in *TYPEINFO.
855
856 IN_REG_LIST should be set with TRUE if the caller is parsing a register
857 list. */
858
859 static int
860 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
861 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
862 {
863 char *str = *ccp;
864 const reg_entry *reg = parse_reg (&str);
865 struct neon_type_el atype;
866 struct neon_type_el parsetype;
867 bfd_boolean is_typed_vecreg = FALSE;
868
869 atype.defined = 0;
870 atype.type = NT_invtype;
871 atype.width = -1;
872 atype.index = 0;
873
874 if (reg == NULL)
875 {
876 if (typeinfo)
877 *typeinfo = atype;
878 set_default_error ();
879 return PARSE_FAIL;
880 }
881
882 if (! aarch64_check_reg_type (reg, type))
883 {
884 DEBUG_TRACE ("reg type check failed");
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888 type = reg->type;
889
890 if (type == REG_TYPE_VN
891 && parse_neon_operand_type (&parsetype, &str))
892 {
893 /* Register if of the form Vn.[bhsdq]. */
894 is_typed_vecreg = TRUE;
895
896 if (parsetype.width == 0)
897 /* Expect index. In the new scheme we cannot have
898 Vn.[bhsdq] represent a scalar. Therefore any
899 Vn.[bhsdq] should have an index following it.
900 Except in reglists ofcourse. */
901 atype.defined |= NTA_HASINDEX;
902 else
903 atype.defined |= NTA_HASTYPE;
904
905 atype.type = parsetype.type;
906 atype.width = parsetype.width;
907 }
908
909 if (skip_past_char (&str, '['))
910 {
911 expressionS exp;
912
913 /* Reject Sn[index] syntax. */
914 if (!is_typed_vecreg)
915 {
916 first_error (_("this type of register can't be indexed"));
917 return PARSE_FAIL;
918 }
919
920 if (in_reg_list == TRUE)
921 {
922 first_error (_("index not allowed inside register list"));
923 return PARSE_FAIL;
924 }
925
926 atype.defined |= NTA_HASINDEX;
927
928 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
929
930 if (exp.X_op != O_constant)
931 {
932 first_error (_("constant expression required"));
933 return PARSE_FAIL;
934 }
935
936 if (! skip_past_char (&str, ']'))
937 return PARSE_FAIL;
938
939 atype.index = exp.X_add_number;
940 }
941 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
942 {
943 /* Indexed vector register expected. */
944 first_error (_("indexed vector register expected"));
945 return PARSE_FAIL;
946 }
947
948 /* A vector reg Vn should be typed or indexed. */
949 if (type == REG_TYPE_VN && atype.defined == 0)
950 {
951 first_error (_("invalid use of vector register"));
952 }
953
954 if (typeinfo)
955 *typeinfo = atype;
956
957 if (rtype)
958 *rtype = type;
959
960 *ccp = str;
961
962 return reg->number;
963 }
964
965 /* Parse register.
966
967 Return the register number on success; return PARSE_FAIL otherwise.
968
969 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
970 the register (e.g. NEON double or quad reg when either has been requested).
971
972 If this is a NEON vector register with additional type information, fill
973 in the struct pointed to by VECTYPE (if non-NULL).
974
975 This parser does not handle register list. */
976
977 static int
978 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
979 aarch64_reg_type *rtype, struct neon_type_el *vectype)
980 {
981 struct neon_type_el atype;
982 char *str = *ccp;
983 int reg = parse_typed_reg (&str, type, rtype, &atype,
984 /*in_reg_list= */ FALSE);
985
986 if (reg == PARSE_FAIL)
987 return PARSE_FAIL;
988
989 if (vectype)
990 *vectype = atype;
991
992 *ccp = str;
993
994 return reg;
995 }
996
997 static inline bfd_boolean
998 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
999 {
1000 return
1001 e1.type == e2.type
1002 && e1.defined == e2.defined
1003 && e1.width == e2.width && e1.index == e2.index;
1004 }
1005
1006 /* This function parses the NEON register list. On success, it returns
1007 the parsed register list information in the following encoded format:
1008
1009 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1010 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1011
1012 The information of the register shape and/or index is returned in
1013 *VECTYPE.
1014
1015 It returns PARSE_FAIL if the register list is invalid.
1016
1017 The list contains one to four registers.
1018 Each register can be one of:
1019 <Vt>.<T>[<index>]
1020 <Vt>.<T>
1021 All <T> should be identical.
1022 All <index> should be identical.
1023 There are restrictions on <Vt> numbers which are checked later
1024 (by reg_list_valid_p). */
1025
1026 static int
1027 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1028 {
1029 char *str = *ccp;
1030 int nb_regs;
1031 struct neon_type_el typeinfo, typeinfo_first;
1032 int val, val_range;
1033 int in_range;
1034 int ret_val;
1035 int i;
1036 bfd_boolean error = FALSE;
1037 bfd_boolean expect_index = FALSE;
1038
1039 if (*str != '{')
1040 {
1041 set_syntax_error (_("expecting {"));
1042 return PARSE_FAIL;
1043 }
1044 str++;
1045
1046 nb_regs = 0;
1047 typeinfo_first.defined = 0;
1048 typeinfo_first.type = NT_invtype;
1049 typeinfo_first.width = -1;
1050 typeinfo_first.index = 0;
1051 ret_val = 0;
1052 val = -1;
1053 val_range = -1;
1054 in_range = 0;
1055 do
1056 {
1057 if (in_range)
1058 {
1059 str++; /* skip over '-' */
1060 val_range = val;
1061 }
1062 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1063 /*in_reg_list= */ TRUE);
1064 if (val == PARSE_FAIL)
1065 {
1066 set_first_syntax_error (_("invalid vector register in list"));
1067 error = TRUE;
1068 continue;
1069 }
1070 /* reject [bhsd]n */
1071 if (typeinfo.defined == 0)
1072 {
1073 set_first_syntax_error (_("invalid scalar register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077
1078 if (typeinfo.defined & NTA_HASINDEX)
1079 expect_index = TRUE;
1080
1081 if (in_range)
1082 {
1083 if (val < val_range)
1084 {
1085 set_first_syntax_error
1086 (_("invalid range in vector register list"));
1087 error = TRUE;
1088 }
1089 val_range++;
1090 }
1091 else
1092 {
1093 val_range = val;
1094 if (nb_regs == 0)
1095 typeinfo_first = typeinfo;
1096 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1097 {
1098 set_first_syntax_error
1099 (_("type mismatch in vector register list"));
1100 error = TRUE;
1101 }
1102 }
1103 if (! error)
1104 for (i = val_range; i <= val; i++)
1105 {
1106 ret_val |= i << (5 * nb_regs);
1107 nb_regs++;
1108 }
1109 in_range = 0;
1110 }
1111 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1112
1113 skip_whitespace (str);
1114 if (*str != '}')
1115 {
1116 set_first_syntax_error (_("end of vector register list not found"));
1117 error = TRUE;
1118 }
1119 str++;
1120
1121 skip_whitespace (str);
1122
1123 if (expect_index)
1124 {
1125 if (skip_past_char (&str, '['))
1126 {
1127 expressionS exp;
1128
1129 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1130 if (exp.X_op != O_constant)
1131 {
1132 set_first_syntax_error (_("constant expression required."));
1133 error = TRUE;
1134 }
1135 if (! skip_past_char (&str, ']'))
1136 error = TRUE;
1137 else
1138 typeinfo_first.index = exp.X_add_number;
1139 }
1140 else
1141 {
1142 set_first_syntax_error (_("expected index"));
1143 error = TRUE;
1144 }
1145 }
1146
1147 if (nb_regs > 4)
1148 {
1149 set_first_syntax_error (_("too many registers in vector register list"));
1150 error = TRUE;
1151 }
1152 else if (nb_regs == 0)
1153 {
1154 set_first_syntax_error (_("empty vector register list"));
1155 error = TRUE;
1156 }
1157
1158 *ccp = str;
1159 if (! error)
1160 *vectype = typeinfo_first;
1161
1162 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1163 }
1164
1165 /* Directives: register aliases. */
1166
1167 static reg_entry *
1168 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1169 {
1170 reg_entry *new;
1171 const char *name;
1172
1173 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1174 {
1175 if (new->builtin)
1176 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1177 str);
1178
1179 /* Only warn about a redefinition if it's not defined as the
1180 same register. */
1181 else if (new->number != number || new->type != type)
1182 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1183
1184 return NULL;
1185 }
1186
1187 name = xstrdup (str);
1188 new = xmalloc (sizeof (reg_entry));
1189
1190 new->name = name;
1191 new->number = number;
1192 new->type = type;
1193 new->builtin = FALSE;
1194
1195 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1196 abort ();
1197
1198 return new;
1199 }
1200
1201 /* Look for the .req directive. This is of the form:
1202
1203 new_register_name .req existing_register_name
1204
1205 If we find one, or if it looks sufficiently like one that we want to
1206 handle any error here, return TRUE. Otherwise return FALSE. */
1207
1208 static bfd_boolean
1209 create_register_alias (char *newname, char *p)
1210 {
1211 const reg_entry *old;
1212 char *oldname, *nbuf;
1213 size_t nlen;
1214
1215 /* The input scrubber ensures that whitespace after the mnemonic is
1216 collapsed to single spaces. */
1217 oldname = p;
1218 if (strncmp (oldname, " .req ", 6) != 0)
1219 return FALSE;
1220
1221 oldname += 6;
1222 if (*oldname == '\0')
1223 return FALSE;
1224
1225 old = hash_find (aarch64_reg_hsh, oldname);
1226 if (!old)
1227 {
1228 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1229 return TRUE;
1230 }
1231
1232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1233 the desired alias name, and p points to its end. If not, then
1234 the desired alias name is in the global original_case_string. */
1235 #ifdef TC_CASE_SENSITIVE
1236 nlen = p - newname;
1237 #else
1238 newname = original_case_string;
1239 nlen = strlen (newname);
1240 #endif
1241
1242 nbuf = alloca (nlen + 1);
1243 memcpy (nbuf, newname, nlen);
1244 nbuf[nlen] = '\0';
1245
1246 /* Create aliases under the new name as stated; an all-lowercase
1247 version of the new name; and an all-uppercase version of the new
1248 name. */
1249 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1250 {
1251 for (p = nbuf; *p; p++)
1252 *p = TOUPPER (*p);
1253
1254 if (strncmp (nbuf, newname, nlen))
1255 {
1256 /* If this attempt to create an additional alias fails, do not bother
1257 trying to create the all-lower case alias. We will fail and issue
1258 a second, duplicate error message. This situation arises when the
1259 programmer does something like:
1260 foo .req r0
1261 Foo .req r1
1262 The second .req creates the "Foo" alias but then fails to create
1263 the artificial FOO alias because it has already been created by the
1264 first .req. */
1265 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1266 return TRUE;
1267 }
1268
1269 for (p = nbuf; *p; p++)
1270 *p = TOLOWER (*p);
1271
1272 if (strncmp (nbuf, newname, nlen))
1273 insert_reg_alias (nbuf, old->number, old->type);
1274 }
1275
1276 return TRUE;
1277 }
1278
1279 /* Should never be called, as .req goes between the alias and the
1280 register name, not at the beginning of the line. */
1281 static void
1282 s_req (int a ATTRIBUTE_UNUSED)
1283 {
1284 as_bad (_("invalid syntax for .req directive"));
1285 }
1286
1287 /* The .unreq directive deletes an alias which was previously defined
1288 by .req. For example:
1289
1290 my_alias .req r11
1291 .unreq my_alias */
1292
1293 static void
1294 s_unreq (int a ATTRIBUTE_UNUSED)
1295 {
1296 char *name;
1297 char saved_char;
1298
1299 name = input_line_pointer;
1300
1301 while (*input_line_pointer != 0
1302 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1303 ++input_line_pointer;
1304
1305 saved_char = *input_line_pointer;
1306 *input_line_pointer = 0;
1307
1308 if (!*name)
1309 as_bad (_("invalid syntax for .unreq directive"));
1310 else
1311 {
1312 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1313
1314 if (!reg)
1315 as_bad (_("unknown register alias '%s'"), name);
1316 else if (reg->builtin)
1317 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1318 name);
1319 else
1320 {
1321 char *p;
1322 char *nbuf;
1323
1324 hash_delete (aarch64_reg_hsh, name, FALSE);
1325 free ((char *) reg->name);
1326 free (reg);
1327
1328 /* Also locate the all upper case and all lower case versions.
1329 Do not complain if we cannot find one or the other as it
1330 was probably deleted above. */
1331
1332 nbuf = strdup (name);
1333 for (p = nbuf; *p; p++)
1334 *p = TOUPPER (*p);
1335 reg = hash_find (aarch64_reg_hsh, nbuf);
1336 if (reg)
1337 {
1338 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1339 free ((char *) reg->name);
1340 free (reg);
1341 }
1342
1343 for (p = nbuf; *p; p++)
1344 *p = TOLOWER (*p);
1345 reg = hash_find (aarch64_reg_hsh, nbuf);
1346 if (reg)
1347 {
1348 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1349 free ((char *) reg->name);
1350 free (reg);
1351 }
1352
1353 free (nbuf);
1354 }
1355 }
1356
1357 *input_line_pointer = saved_char;
1358 demand_empty_rest_of_line ();
1359 }
1360
1361 /* Directives: Instruction set selection. */
1362
1363 #ifdef OBJ_ELF
1364 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1365 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1366 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1367 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1368
1369 /* Create a new mapping symbol for the transition to STATE. */
1370
1371 static void
1372 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1373 {
1374 symbolS *symbolP;
1375 const char *symname;
1376 int type;
1377
1378 switch (state)
1379 {
1380 case MAP_DATA:
1381 symname = "$d";
1382 type = BSF_NO_FLAGS;
1383 break;
1384 case MAP_INSN:
1385 symname = "$x";
1386 type = BSF_NO_FLAGS;
1387 break;
1388 default:
1389 abort ();
1390 }
1391
1392 symbolP = symbol_new (symname, now_seg, value, frag);
1393 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1394
1395 /* Save the mapping symbols for future reference. Also check that
1396 we do not place two mapping symbols at the same offset within a
1397 frag. We'll handle overlap between frags in
1398 check_mapping_symbols.
1399
1400 If .fill or other data filling directive generates zero sized data,
1401 the mapping symbol for the following code will have the same value
1402 as the one generated for the data filling directive. In this case,
1403 we replace the old symbol with the new one at the same address. */
1404 if (value == 0)
1405 {
1406 if (frag->tc_frag_data.first_map != NULL)
1407 {
1408 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1409 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1410 &symbol_lastP);
1411 }
1412 frag->tc_frag_data.first_map = symbolP;
1413 }
1414 if (frag->tc_frag_data.last_map != NULL)
1415 {
1416 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1417 S_GET_VALUE (symbolP));
1418 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1419 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1420 &symbol_lastP);
1421 }
1422 frag->tc_frag_data.last_map = symbolP;
1423 }
1424
1425 /* We must sometimes convert a region marked as code to data during
1426 code alignment, if an odd number of bytes have to be padded. The
1427 code mapping symbol is pushed to an aligned address. */
1428
1429 static void
1430 insert_data_mapping_symbol (enum mstate state,
1431 valueT value, fragS * frag, offsetT bytes)
1432 {
1433 /* If there was already a mapping symbol, remove it. */
1434 if (frag->tc_frag_data.last_map != NULL
1435 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1436 frag->fr_address + value)
1437 {
1438 symbolS *symp = frag->tc_frag_data.last_map;
1439
1440 if (value == 0)
1441 {
1442 know (frag->tc_frag_data.first_map == symp);
1443 frag->tc_frag_data.first_map = NULL;
1444 }
1445 frag->tc_frag_data.last_map = NULL;
1446 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1447 }
1448
1449 make_mapping_symbol (MAP_DATA, value, frag);
1450 make_mapping_symbol (state, value + bytes, frag);
1451 }
1452
1453 static void mapping_state_2 (enum mstate state, int max_chars);
1454
1455 /* Set the mapping state to STATE. Only call this when about to
1456 emit some STATE bytes to the file. */
1457
1458 void
1459 mapping_state (enum mstate state)
1460 {
1461 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1462
1463 if (state == MAP_INSN)
1464 /* AArch64 instructions require 4-byte alignment. When emitting
1465 instructions into any section, record the appropriate section
1466 alignment. */
1467 record_alignment (now_seg, 2);
1468
1469 if (mapstate == state)
1470 /* The mapping symbol has already been emitted.
1471 There is nothing else to do. */
1472 return;
1473
1474 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1475 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1476 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1477 evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492 #undef TRANSITION
1493
1494 mapping_state_2 (state, 0);
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 expressionS * litexp = & pool->literals[entry].exp;
1621
1622 if ((litexp->X_op == exp->X_op)
1623 && (exp->X_op == O_constant)
1624 && (litexp->X_add_number == exp->X_add_number)
1625 && (litexp->X_unsigned == exp->X_unsigned))
1626 break;
1627
1628 if ((litexp->X_op == exp->X_op)
1629 && (exp->X_op == O_symbol)
1630 && (litexp->X_add_number == exp->X_add_number)
1631 && (litexp->X_add_symbol == exp->X_add_symbol)
1632 && (litexp->X_op_symbol == exp->X_op_symbol))
1633 break;
1634 }
1635
1636 /* Do we need to create a new entry? */
1637 if (entry == pool->next_free_entry)
1638 {
1639 if (entry >= MAX_LITERAL_POOL_SIZE)
1640 {
1641 set_syntax_error (_("literal pool overflow"));
1642 return FALSE;
1643 }
1644
1645 pool->literals[entry].exp = *exp;
1646 pool->next_free_entry += 1;
1647 if (exp->X_op == O_big)
1648 {
1649 /* PR 16688: Bignums are held in a single global array. We must
1650 copy and preserve that value now, before it is overwritten. */
1651 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1652 memcpy (pool->literals[entry].bignum, generic_bignum,
1653 CHARS_PER_LITTLENUM * exp->X_add_number);
1654 }
1655 else
1656 pool->literals[entry].bignum = NULL;
1657 }
1658
1659 exp->X_op = O_symbol;
1660 exp->X_add_number = ((int) entry) * size;
1661 exp->X_add_symbol = pool->symbol;
1662
1663 return TRUE;
1664 }
1665
1666 /* Can't use symbol_new here, so have to create a symbol and then at
1667 a later date assign it a value. Thats what these functions do. */
1668
1669 static void
1670 symbol_locate (symbolS * symbolP,
1671 const char *name,/* It is copied, the caller can modify. */
1672 segT segment, /* Segment identifier (SEG_<something>). */
1673 valueT valu, /* Symbol value. */
1674 fragS * frag) /* Associated fragment. */
1675 {
1676 size_t name_length;
1677 char *preserved_copy_of_name;
1678
1679 name_length = strlen (name) + 1; /* +1 for \0. */
1680 obstack_grow (&notes, name, name_length);
1681 preserved_copy_of_name = obstack_finish (&notes);
1682
1683 #ifdef tc_canonicalize_symbol_name
1684 preserved_copy_of_name =
1685 tc_canonicalize_symbol_name (preserved_copy_of_name);
1686 #endif
1687
1688 S_SET_NAME (symbolP, preserved_copy_of_name);
1689
1690 S_SET_SEGMENT (symbolP, segment);
1691 S_SET_VALUE (symbolP, valu);
1692 symbol_clear_list_pointers (symbolP);
1693
1694 symbol_set_frag (symbolP, frag);
1695
1696 /* Link to end of symbol chain. */
1697 {
1698 extern int symbol_table_frozen;
1699
1700 if (symbol_table_frozen)
1701 abort ();
1702 }
1703
1704 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1705
1706 obj_symbol_new_hook (symbolP);
1707
1708 #ifdef tc_symbol_new_hook
1709 tc_symbol_new_hook (symbolP);
1710 #endif
1711
1712 #ifdef DEBUG_SYMS
1713 verify_symbol_chain (symbol_rootP, symbol_lastP);
1714 #endif /* DEBUG_SYMS */
1715 }
1716
1717
1718 static void
1719 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1720 {
1721 unsigned int entry;
1722 literal_pool *pool;
1723 char sym_name[20];
1724 int align;
1725
1726 for (align = 2; align <= 4; align++)
1727 {
1728 int size = 1 << align;
1729
1730 pool = find_literal_pool (size);
1731 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1732 continue;
1733
1734 mapping_state (MAP_DATA);
1735
1736 /* Align pool as you have word accesses.
1737 Only make a frag if we have to. */
1738 if (!need_pass_2)
1739 frag_align (align, 0, 0);
1740
1741 record_alignment (now_seg, align);
1742
1743 sprintf (sym_name, "$$lit_\002%x", pool->id);
1744
1745 symbol_locate (pool->symbol, sym_name, now_seg,
1746 (valueT) frag_now_fix (), frag_now);
1747 symbol_table_insert (pool->symbol);
1748
1749 for (entry = 0; entry < pool->next_free_entry; entry++)
1750 {
1751 expressionS * exp = & pool->literals[entry].exp;
1752
1753 if (exp->X_op == O_big)
1754 {
1755 /* PR 16688: Restore the global bignum value. */
1756 gas_assert (pool->literals[entry].bignum != NULL);
1757 memcpy (generic_bignum, pool->literals[entry].bignum,
1758 CHARS_PER_LITTLENUM * exp->X_add_number);
1759 }
1760
1761 /* First output the expression in the instruction to the pool. */
1762 emit_expr (exp, size); /* .word|.xword */
1763
1764 if (exp->X_op == O_big)
1765 {
1766 free (pool->literals[entry].bignum);
1767 pool->literals[entry].bignum = NULL;
1768 }
1769 }
1770
1771 /* Mark the pool as empty. */
1772 pool->next_free_entry = 0;
1773 pool->symbol = NULL;
1774 }
1775 }
1776
1777 #ifdef OBJ_ELF
1778 /* Forward declarations for functions below, in the MD interface
1779 section. */
1780 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1781 static struct reloc_table_entry * find_reloc_table_entry (char **);
1782
1783 /* Directives: Data. */
1784 /* N.B. the support for relocation suffix in this directive needs to be
1785 implemented properly. */
1786
1787 static void
1788 s_aarch64_elf_cons (int nbytes)
1789 {
1790 expressionS exp;
1791
1792 #ifdef md_flush_pending_output
1793 md_flush_pending_output ();
1794 #endif
1795
1796 if (is_it_end_of_statement ())
1797 {
1798 demand_empty_rest_of_line ();
1799 return;
1800 }
1801
1802 #ifdef md_cons_align
1803 md_cons_align (nbytes);
1804 #endif
1805
1806 mapping_state (MAP_DATA);
1807 do
1808 {
1809 struct reloc_table_entry *reloc;
1810
1811 expression (&exp);
1812
1813 if (exp.X_op != O_symbol)
1814 emit_expr (&exp, (unsigned int) nbytes);
1815 else
1816 {
1817 skip_past_char (&input_line_pointer, '#');
1818 if (skip_past_char (&input_line_pointer, ':'))
1819 {
1820 reloc = find_reloc_table_entry (&input_line_pointer);
1821 if (reloc == NULL)
1822 as_bad (_("unrecognized relocation suffix"));
1823 else
1824 as_bad (_("unimplemented relocation suffix"));
1825 ignore_rest_of_line ();
1826 return;
1827 }
1828 else
1829 emit_expr (&exp, (unsigned int) nbytes);
1830 }
1831 }
1832 while (*input_line_pointer++ == ',');
1833
1834 /* Put terminator back into stream. */
1835 input_line_pointer--;
1836 demand_empty_rest_of_line ();
1837 }
1838
1839 #endif /* OBJ_ELF */
1840
1841 /* Output a 32-bit word, but mark as an instruction. */
1842
1843 static void
1844 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1845 {
1846 expressionS exp;
1847
1848 #ifdef md_flush_pending_output
1849 md_flush_pending_output ();
1850 #endif
1851
1852 if (is_it_end_of_statement ())
1853 {
1854 demand_empty_rest_of_line ();
1855 return;
1856 }
1857
1858 /* Sections are assumed to start aligned. In executable section, there is no
1859 MAP_DATA symbol pending. So we only align the address during
1860 MAP_DATA --> MAP_INSN transition.
1861 For other sections, this is not guaranteed. */
1862 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1863 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1864 frag_align_code (2, 0);
1865
1866 #ifdef OBJ_ELF
1867 mapping_state (MAP_INSN);
1868 #endif
1869
1870 do
1871 {
1872 expression (&exp);
1873 if (exp.X_op != O_constant)
1874 {
1875 as_bad (_("constant expression required"));
1876 ignore_rest_of_line ();
1877 return;
1878 }
1879
1880 if (target_big_endian)
1881 {
1882 unsigned int val = exp.X_add_number;
1883 exp.X_add_number = SWAP_32 (val);
1884 }
1885 emit_expr (&exp, 4);
1886 }
1887 while (*input_line_pointer++ == ',');
1888
1889 /* Put terminator back into stream. */
1890 input_line_pointer--;
1891 demand_empty_rest_of_line ();
1892 }
1893
1894 #ifdef OBJ_ELF
1895 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1896
1897 static void
1898 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1899 {
1900 expressionS exp;
1901
1902 /* Since we're just labelling the code, there's no need to define a
1903 mapping symbol. */
1904 expression (&exp);
1905 /* Make sure there is enough room in this frag for the following
1906 blr. This trick only works if the blr follows immediately after
1907 the .tlsdesc directive. */
1908 frag_grow (4);
1909 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1910 BFD_RELOC_AARCH64_TLSDESC_CALL);
1911
1912 demand_empty_rest_of_line ();
1913 }
1914 #endif /* OBJ_ELF */
1915
1916 static void s_aarch64_arch (int);
1917 static void s_aarch64_cpu (int);
1918 static void s_aarch64_arch_extension (int);
1919
1920 /* This table describes all the machine specific pseudo-ops the assembler
1921 has to support. The fields are:
1922 pseudo-op name without dot
1923 function to call to execute this pseudo-op
1924 Integer arg to pass to the function. */
1925
1926 const pseudo_typeS md_pseudo_table[] = {
1927 /* Never called because '.req' does not start a line. */
1928 {"req", s_req, 0},
1929 {"unreq", s_unreq, 0},
1930 {"bss", s_bss, 0},
1931 {"even", s_even, 0},
1932 {"ltorg", s_ltorg, 0},
1933 {"pool", s_ltorg, 0},
1934 {"cpu", s_aarch64_cpu, 0},
1935 {"arch", s_aarch64_arch, 0},
1936 {"arch_extension", s_aarch64_arch_extension, 0},
1937 {"inst", s_aarch64_inst, 0},
1938 #ifdef OBJ_ELF
1939 {"tlsdesccall", s_tlsdesccall, 0},
1940 {"word", s_aarch64_elf_cons, 4},
1941 {"long", s_aarch64_elf_cons, 4},
1942 {"xword", s_aarch64_elf_cons, 8},
1943 {"dword", s_aarch64_elf_cons, 8},
1944 #endif
1945 {0, 0, 0}
1946 };
1947 \f
1948
1949 /* Check whether STR points to a register name followed by a comma or the
1950 end of line; REG_TYPE indicates which register types are checked
1951 against. Return TRUE if STR is such a register name; otherwise return
1952 FALSE. The function does not intend to produce any diagnostics, but since
1953 the register parser aarch64_reg_parse, which is called by this function,
1954 does produce diagnostics, we call clear_error to clear any diagnostics
1955 that may be generated by aarch64_reg_parse.
1956 Also, the function returns FALSE directly if there is any user error
1957 present at the function entry. This prevents the existing diagnostics
1958 state from being spoiled.
1959 The function currently serves parse_constant_immediate and
1960 parse_big_immediate only. */
1961 static bfd_boolean
1962 reg_name_p (char *str, aarch64_reg_type reg_type)
1963 {
1964 int reg;
1965
1966 /* Prevent the diagnostics state from being spoiled. */
1967 if (error_p ())
1968 return FALSE;
1969
1970 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1971
1972 /* Clear the parsing error that may be set by the reg parser. */
1973 clear_error ();
1974
1975 if (reg == PARSE_FAIL)
1976 return FALSE;
1977
1978 skip_whitespace (str);
1979 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1980 return TRUE;
1981
1982 return FALSE;
1983 }
1984
1985 /* Parser functions used exclusively in instruction operands. */
1986
1987 /* Parse an immediate expression which may not be constant.
1988
1989 To prevent the expression parser from pushing a register name
1990 into the symbol table as an undefined symbol, firstly a check is
1991 done to find out whether STR is a valid register name followed
1992 by a comma or the end of line. Return FALSE if STR is such a
1993 string. */
1994
1995 static bfd_boolean
1996 parse_immediate_expression (char **str, expressionS *exp)
1997 {
1998 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1999 {
2000 set_recoverable_error (_("immediate operand required"));
2001 return FALSE;
2002 }
2003
2004 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2005
2006 if (exp->X_op == O_absent)
2007 {
2008 set_fatal_syntax_error (_("missing immediate expression"));
2009 return FALSE;
2010 }
2011
2012 return TRUE;
2013 }
2014
2015 /* Constant immediate-value read function for use in insn parsing.
2016 STR points to the beginning of the immediate (with the optional
2017 leading #); *VAL receives the value.
2018
2019 Return TRUE on success; otherwise return FALSE. */
2020
2021 static bfd_boolean
2022 parse_constant_immediate (char **str, int64_t * val)
2023 {
2024 expressionS exp;
2025
2026 if (! parse_immediate_expression (str, &exp))
2027 return FALSE;
2028
2029 if (exp.X_op != O_constant)
2030 {
2031 set_syntax_error (_("constant expression required"));
2032 return FALSE;
2033 }
2034
2035 *val = exp.X_add_number;
2036 return TRUE;
2037 }
2038
2039 static uint32_t
2040 encode_imm_float_bits (uint32_t imm)
2041 {
2042 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2043 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2044 }
2045
2046 /* Return TRUE if the single-precision floating-point value encoded in IMM
2047 can be expressed in the AArch64 8-bit signed floating-point format with
2048 3-bit exponent and normalized 4 bits of precision; in other words, the
2049 floating-point value must be expressable as
2050 (+/-) n / 16 * power (2, r)
2051 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2052
2053 static bfd_boolean
2054 aarch64_imm_float_p (uint32_t imm)
2055 {
2056 /* If a single-precision floating-point value has the following bit
2057 pattern, it can be expressed in the AArch64 8-bit floating-point
2058 format:
2059
2060 3 32222222 2221111111111
2061 1 09876543 21098765432109876543210
2062 n Eeeeeexx xxxx0000000000000000000
2063
2064 where n, e and each x are either 0 or 1 independently, with
2065 E == ~ e. */
2066
2067 uint32_t pattern;
2068
2069 /* Prepare the pattern for 'Eeeeee'. */
2070 if (((imm >> 30) & 0x1) == 0)
2071 pattern = 0x3e000000;
2072 else
2073 pattern = 0x40000000;
2074
2075 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2076 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2077 }
2078
2079 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2080
2081 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2082 8-bit signed floating-point format with 3-bit exponent and normalized 4
2083 bits of precision (i.e. can be used in an FMOV instruction); return the
2084 equivalent single-precision encoding in *FPWORD.
2085
2086 Otherwise return FALSE. */
2087
2088 static bfd_boolean
2089 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2090 {
2091 /* If a double-precision floating-point value has the following bit
2092 pattern, it can be expressed in the AArch64 8-bit floating-point
2093 format:
2094
2095 6 66655555555 554444444...21111111111
2096 3 21098765432 109876543...098765432109876543210
2097 n Eeeeeeeeexx xxxx00000...000000000000000000000
2098
2099 where n, e and each x are either 0 or 1 independently, with
2100 E == ~ e. */
2101
2102 uint32_t pattern;
2103 uint32_t high32 = imm >> 32;
2104
2105 /* Lower 32 bits need to be 0s. */
2106 if ((imm & 0xffffffff) != 0)
2107 return FALSE;
2108
2109 /* Prepare the pattern for 'Eeeeeeeee'. */
2110 if (((high32 >> 30) & 0x1) == 0)
2111 pattern = 0x3fc00000;
2112 else
2113 pattern = 0x40000000;
2114
2115 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2116 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2117 {
2118 /* Convert to the single-precision encoding.
2119 i.e. convert
2120 n Eeeeeeeeexx xxxx00000...000000000000000000000
2121 to
2122 n Eeeeeexx xxxx0000000000000000000. */
2123 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2124 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2125 return TRUE;
2126 }
2127 else
2128 return FALSE;
2129 }
2130
2131 /* Parse a floating-point immediate. Return TRUE on success and return the
2132 value in *IMMED in the format of IEEE754 single-precision encoding.
2133 *CCP points to the start of the string; DP_P is TRUE when the immediate
2134 is expected to be in double-precision (N.B. this only matters when
2135 hexadecimal representation is involved).
2136
2137 N.B. 0.0 is accepted by this function. */
2138
2139 static bfd_boolean
2140 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2141 {
2142 char *str = *ccp;
2143 char *fpnum;
2144 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2145 int found_fpchar = 0;
2146 int64_t val = 0;
2147 unsigned fpword = 0;
2148 bfd_boolean hex_p = FALSE;
2149
2150 skip_past_char (&str, '#');
2151
2152 fpnum = str;
2153 skip_whitespace (fpnum);
2154
2155 if (strncmp (fpnum, "0x", 2) == 0)
2156 {
2157 /* Support the hexadecimal representation of the IEEE754 encoding.
2158 Double-precision is expected when DP_P is TRUE, otherwise the
2159 representation should be in single-precision. */
2160 if (! parse_constant_immediate (&str, &val))
2161 goto invalid_fp;
2162
2163 if (dp_p)
2164 {
2165 if (! aarch64_double_precision_fmovable (val, &fpword))
2166 goto invalid_fp;
2167 }
2168 else if ((uint64_t) val > 0xffffffff)
2169 goto invalid_fp;
2170 else
2171 fpword = val;
2172
2173 hex_p = TRUE;
2174 }
2175 else
2176 {
2177 /* We must not accidentally parse an integer as a floating-point number.
2178 Make sure that the value we parse is not an integer by checking for
2179 special characters '.' or 'e'. */
2180 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2181 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2182 {
2183 found_fpchar = 1;
2184 break;
2185 }
2186
2187 if (!found_fpchar)
2188 return FALSE;
2189 }
2190
2191 if (! hex_p)
2192 {
2193 int i;
2194
2195 if ((str = atof_ieee (str, 's', words)) == NULL)
2196 goto invalid_fp;
2197
2198 /* Our FP word must be 32 bits (single-precision FP). */
2199 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2200 {
2201 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2202 fpword |= words[i];
2203 }
2204 }
2205
2206 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2207 {
2208 *immed = fpword;
2209 *ccp = str;
2210 return TRUE;
2211 }
2212
2213 invalid_fp:
2214 set_fatal_syntax_error (_("invalid floating-point constant"));
2215 return FALSE;
2216 }
2217
2218 /* Less-generic immediate-value read function with the possibility of loading
2219 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2220 instructions.
2221
2222 To prevent the expression parser from pushing a register name into the
2223 symbol table as an undefined symbol, a check is firstly done to find
2224 out whether STR is a valid register name followed by a comma or the end
2225 of line. Return FALSE if STR is such a register. */
2226
2227 static bfd_boolean
2228 parse_big_immediate (char **str, int64_t *imm)
2229 {
2230 char *ptr = *str;
2231
2232 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2233 {
2234 set_syntax_error (_("immediate operand required"));
2235 return FALSE;
2236 }
2237
2238 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2239
2240 if (inst.reloc.exp.X_op == O_constant)
2241 *imm = inst.reloc.exp.X_add_number;
2242
2243 *str = ptr;
2244
2245 return TRUE;
2246 }
2247
2248 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2249 if NEED_LIBOPCODES is non-zero, the fixup will need
2250 assistance from the libopcodes. */
2251
2252 static inline void
2253 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2254 const aarch64_opnd_info *operand,
2255 int need_libopcodes_p)
2256 {
2257 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2258 reloc->opnd = operand->type;
2259 if (need_libopcodes_p)
2260 reloc->need_libopcodes_p = 1;
2261 };
2262
2263 /* Return TRUE if the instruction needs to be fixed up later internally by
2264 the GAS; otherwise return FALSE. */
2265
2266 static inline bfd_boolean
2267 aarch64_gas_internal_fixup_p (void)
2268 {
2269 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2270 }
2271
2272 /* Assign the immediate value to the relavant field in *OPERAND if
2273 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2274 needs an internal fixup in a later stage.
2275 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2276 IMM.VALUE that may get assigned with the constant. */
2277 static inline void
2278 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2279 aarch64_opnd_info *operand,
2280 int addr_off_p,
2281 int need_libopcodes_p,
2282 int skip_p)
2283 {
2284 if (reloc->exp.X_op == O_constant)
2285 {
2286 if (addr_off_p)
2287 operand->addr.offset.imm = reloc->exp.X_add_number;
2288 else
2289 operand->imm.value = reloc->exp.X_add_number;
2290 reloc->type = BFD_RELOC_UNUSED;
2291 }
2292 else
2293 {
2294 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2295 /* Tell libopcodes to ignore this operand or not. This is helpful
2296 when one of the operands needs to be fixed up later but we need
2297 libopcodes to check the other operands. */
2298 operand->skip = skip_p;
2299 }
2300 }
2301
2302 /* Relocation modifiers. Each entry in the table contains the textual
2303 name for the relocation which may be placed before a symbol used as
2304 a load/store offset, or add immediate. It must be surrounded by a
2305 leading and trailing colon, for example:
2306
2307 ldr x0, [x1, #:rello:varsym]
2308 add x0, x1, #:rello:varsym */
2309
2310 struct reloc_table_entry
2311 {
2312 const char *name;
2313 int pc_rel;
2314 bfd_reloc_code_real_type adr_type;
2315 bfd_reloc_code_real_type adrp_type;
2316 bfd_reloc_code_real_type movw_type;
2317 bfd_reloc_code_real_type add_type;
2318 bfd_reloc_code_real_type ldst_type;
2319 bfd_reloc_code_real_type ld_literal_type;
2320 };
2321
2322 static struct reloc_table_entry reloc_table[] = {
2323 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2324 {"lo12", 0,
2325 0, /* adr_type */
2326 0,
2327 0,
2328 BFD_RELOC_AARCH64_ADD_LO12,
2329 BFD_RELOC_AARCH64_LDST_LO12,
2330 0},
2331
2332 /* Higher 21 bits of pc-relative page offset: ADRP */
2333 {"pg_hi21", 1,
2334 0, /* adr_type */
2335 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2336 0,
2337 0,
2338 0,
2339 0},
2340
2341 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2342 {"pg_hi21_nc", 1,
2343 0, /* adr_type */
2344 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2345 0,
2346 0,
2347 0,
2348 0},
2349
2350 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2351 {"abs_g0", 0,
2352 0, /* adr_type */
2353 0,
2354 BFD_RELOC_AARCH64_MOVW_G0,
2355 0,
2356 0,
2357 0},
2358
2359 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2360 {"abs_g0_s", 0,
2361 0, /* adr_type */
2362 0,
2363 BFD_RELOC_AARCH64_MOVW_G0_S,
2364 0,
2365 0,
2366 0},
2367
2368 /* Less significant bits 0-15 of address/value: MOVK, no check */
2369 {"abs_g0_nc", 0,
2370 0, /* adr_type */
2371 0,
2372 BFD_RELOC_AARCH64_MOVW_G0_NC,
2373 0,
2374 0,
2375 0},
2376
2377 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2378 {"abs_g1", 0,
2379 0, /* adr_type */
2380 0,
2381 BFD_RELOC_AARCH64_MOVW_G1,
2382 0,
2383 0,
2384 0},
2385
2386 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2387 {"abs_g1_s", 0,
2388 0, /* adr_type */
2389 0,
2390 BFD_RELOC_AARCH64_MOVW_G1_S,
2391 0,
2392 0,
2393 0},
2394
2395 /* Less significant bits 16-31 of address/value: MOVK, no check */
2396 {"abs_g1_nc", 0,
2397 0, /* adr_type */
2398 0,
2399 BFD_RELOC_AARCH64_MOVW_G1_NC,
2400 0,
2401 0,
2402 0},
2403
2404 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2405 {"abs_g2", 0,
2406 0, /* adr_type */
2407 0,
2408 BFD_RELOC_AARCH64_MOVW_G2,
2409 0,
2410 0,
2411 0},
2412
2413 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2414 {"abs_g2_s", 0,
2415 0, /* adr_type */
2416 0,
2417 BFD_RELOC_AARCH64_MOVW_G2_S,
2418 0,
2419 0,
2420 0},
2421
2422 /* Less significant bits 32-47 of address/value: MOVK, no check */
2423 {"abs_g2_nc", 0,
2424 0, /* adr_type */
2425 0,
2426 BFD_RELOC_AARCH64_MOVW_G2_NC,
2427 0,
2428 0,
2429 0},
2430
2431 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2432 {"abs_g3", 0,
2433 0, /* adr_type */
2434 0,
2435 BFD_RELOC_AARCH64_MOVW_G3,
2436 0,
2437 0,
2438 0},
2439
2440 /* Get to the page containing GOT entry for a symbol. */
2441 {"got", 1,
2442 0, /* adr_type */
2443 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2444 0,
2445 0,
2446 0,
2447 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2448
2449 /* 12 bit offset into the page containing GOT entry for that symbol. */
2450 {"got_lo12", 0,
2451 0, /* adr_type */
2452 0,
2453 0,
2454 0,
2455 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2456 0},
2457
2458 /* 0-15 bits of address/value: MOVk, no check. */
2459 {"gotoff_g0_nc", 0,
2460 0, /* adr_type */
2461 0,
2462 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2463 0,
2464 0,
2465 0},
2466
2467 /* Most significant bits 16-31 of address/value: MOVZ. */
2468 {"gotoff_g1", 0,
2469 0, /* adr_type */
2470 0,
2471 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2472 0,
2473 0,
2474 0},
2475
2476 /* 15 bit offset into the page containing GOT entry for that symbol. */
2477 {"gotoff_lo15", 0,
2478 0, /* adr_type */
2479 0,
2480 0,
2481 0,
2482 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2483 0},
2484
2485 /* Get to the page containing GOT TLS entry for a symbol */
2486 {"tlsgd", 0,
2487 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2488 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2489 0,
2490 0,
2491 0,
2492 0},
2493
2494 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2495 {"tlsgd_lo12", 0,
2496 0, /* adr_type */
2497 0,
2498 0,
2499 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2500 0,
2501 0},
2502
2503 /* Most significant bits 16-31 of address/value: MOVZ. */
2504 {"tlsgd_g1", 0,
2505 0, /* adr_type */
2506 0,
2507 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2508 0,
2509 0,
2510 0},
2511
2512 /* Get to the page containing GOT TLS entry for a symbol */
2513 {"tlsdesc", 0,
2514 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2515 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2516 0,
2517 0,
2518 0,
2519 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2520
2521 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2522 {"tlsdesc_lo12", 0,
2523 0, /* adr_type */
2524 0,
2525 0,
2526 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2527 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2528 0},
2529
2530 /* Get to the page containing GOT TLS entry for a symbol.
2531 The same as GD, we allocate two consecutive GOT slots
2532 for module index and module offset, the only difference
2533 with GD is the module offset should be intialized to
2534 zero without any outstanding runtime relocation. */
2535 {"tlsldm", 0,
2536 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2537 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2538 0,
2539 0,
2540 0,
2541 0},
2542
2543 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2544 {"tlsldm_lo12_nc", 0,
2545 0, /* adr_type */
2546 0,
2547 0,
2548 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2549 0,
2550 0},
2551
2552 /* 12 bit offset into the module TLS base address. */
2553 {"dtprel_lo12", 0,
2554 0, /* adr_type */
2555 0,
2556 0,
2557 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2558 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2559 0},
2560
2561 /* Same as dtprel_lo12, no overflow check. */
2562 {"dtprel_lo12_nc", 0,
2563 0, /* adr_type */
2564 0,
2565 0,
2566 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2567 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2568 0},
2569
2570 /* bits[23:12] of offset to the module TLS base address. */
2571 {"dtprel_hi12", 0,
2572 0, /* adr_type */
2573 0,
2574 0,
2575 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2576 0,
2577 0},
2578
2579 /* bits[15:0] of offset to the module TLS base address. */
2580 {"dtprel_g0", 0,
2581 0, /* adr_type */
2582 0,
2583 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2584 0,
2585 0,
2586 0},
2587
2588 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2589 {"dtprel_g0_nc", 0,
2590 0, /* adr_type */
2591 0,
2592 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2593 0,
2594 0,
2595 0},
2596
2597 /* bits[31:16] of offset to the module TLS base address. */
2598 {"dtprel_g1", 0,
2599 0, /* adr_type */
2600 0,
2601 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2602 0,
2603 0,
2604 0},
2605
2606 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2607 {"dtprel_g1_nc", 0,
2608 0, /* adr_type */
2609 0,
2610 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2611 0,
2612 0,
2613 0},
2614
2615 /* bits[47:32] of offset to the module TLS base address. */
2616 {"dtprel_g2", 0,
2617 0, /* adr_type */
2618 0,
2619 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2620 0,
2621 0,
2622 0},
2623
2624 /* Get to the page containing GOT TLS entry for a symbol */
2625 {"gottprel", 0,
2626 0, /* adr_type */
2627 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2628 0,
2629 0,
2630 0,
2631 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2632
2633 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2634 {"gottprel_lo12", 0,
2635 0, /* adr_type */
2636 0,
2637 0,
2638 0,
2639 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2640 0},
2641
2642 /* Get tp offset for a symbol. */
2643 {"tprel", 0,
2644 0, /* adr_type */
2645 0,
2646 0,
2647 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2648 0,
2649 0},
2650
2651 /* Get tp offset for a symbol. */
2652 {"tprel_lo12", 0,
2653 0, /* adr_type */
2654 0,
2655 0,
2656 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2657 0,
2658 0},
2659
2660 /* Get tp offset for a symbol. */
2661 {"tprel_hi12", 0,
2662 0, /* adr_type */
2663 0,
2664 0,
2665 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2666 0,
2667 0},
2668
2669 /* Get tp offset for a symbol. */
2670 {"tprel_lo12_nc", 0,
2671 0, /* adr_type */
2672 0,
2673 0,
2674 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2675 0,
2676 0},
2677
2678 /* Most significant bits 32-47 of address/value: MOVZ. */
2679 {"tprel_g2", 0,
2680 0, /* adr_type */
2681 0,
2682 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2683 0,
2684 0,
2685 0},
2686
2687 /* Most significant bits 16-31 of address/value: MOVZ. */
2688 {"tprel_g1", 0,
2689 0, /* adr_type */
2690 0,
2691 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2692 0,
2693 0,
2694 0},
2695
2696 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2697 {"tprel_g1_nc", 0,
2698 0, /* adr_type */
2699 0,
2700 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2701 0,
2702 0,
2703 0},
2704
2705 /* Most significant bits 0-15 of address/value: MOVZ. */
2706 {"tprel_g0", 0,
2707 0, /* adr_type */
2708 0,
2709 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2710 0,
2711 0,
2712 0},
2713
2714 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2715 {"tprel_g0_nc", 0,
2716 0, /* adr_type */
2717 0,
2718 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2719 0,
2720 0,
2721 0},
2722
2723 /* 15bit offset from got entry to base address of GOT table. */
2724 {"gotpage_lo15", 0,
2725 0,
2726 0,
2727 0,
2728 0,
2729 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2730 0},
2731
2732 /* 14bit offset from got entry to base address of GOT table. */
2733 {"gotpage_lo14", 0,
2734 0,
2735 0,
2736 0,
2737 0,
2738 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2739 0},
2740 };
2741
2742 /* Given the address of a pointer pointing to the textual name of a
2743 relocation as may appear in assembler source, attempt to find its
2744 details in reloc_table. The pointer will be updated to the character
2745 after the trailing colon. On failure, NULL will be returned;
2746 otherwise return the reloc_table_entry. */
2747
2748 static struct reloc_table_entry *
2749 find_reloc_table_entry (char **str)
2750 {
2751 unsigned int i;
2752 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2753 {
2754 int length = strlen (reloc_table[i].name);
2755
2756 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2757 && (*str)[length] == ':')
2758 {
2759 *str += (length + 1);
2760 return &reloc_table[i];
2761 }
2762 }
2763
2764 return NULL;
2765 }
2766
2767 /* Mode argument to parse_shift and parser_shifter_operand. */
2768 enum parse_shift_mode
2769 {
2770 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2771 "#imm{,lsl #n}" */
2772 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2773 "#imm" */
2774 SHIFTED_LSL, /* bare "lsl #n" */
2775 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2776 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2777 };
2778
2779 /* Parse a <shift> operator on an AArch64 data processing instruction.
2780 Return TRUE on success; otherwise return FALSE. */
2781 static bfd_boolean
2782 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2783 {
2784 const struct aarch64_name_value_pair *shift_op;
2785 enum aarch64_modifier_kind kind;
2786 expressionS exp;
2787 int exp_has_prefix;
2788 char *s = *str;
2789 char *p = s;
2790
2791 for (p = *str; ISALPHA (*p); p++)
2792 ;
2793
2794 if (p == *str)
2795 {
2796 set_syntax_error (_("shift expression expected"));
2797 return FALSE;
2798 }
2799
2800 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2801
2802 if (shift_op == NULL)
2803 {
2804 set_syntax_error (_("shift operator expected"));
2805 return FALSE;
2806 }
2807
2808 kind = aarch64_get_operand_modifier (shift_op);
2809
2810 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2811 {
2812 set_syntax_error (_("invalid use of 'MSL'"));
2813 return FALSE;
2814 }
2815
2816 switch (mode)
2817 {
2818 case SHIFTED_LOGIC_IMM:
2819 if (aarch64_extend_operator_p (kind) == TRUE)
2820 {
2821 set_syntax_error (_("extending shift is not permitted"));
2822 return FALSE;
2823 }
2824 break;
2825
2826 case SHIFTED_ARITH_IMM:
2827 if (kind == AARCH64_MOD_ROR)
2828 {
2829 set_syntax_error (_("'ROR' shift is not permitted"));
2830 return FALSE;
2831 }
2832 break;
2833
2834 case SHIFTED_LSL:
2835 if (kind != AARCH64_MOD_LSL)
2836 {
2837 set_syntax_error (_("only 'LSL' shift is permitted"));
2838 return FALSE;
2839 }
2840 break;
2841
2842 case SHIFTED_REG_OFFSET:
2843 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2844 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2845 {
2846 set_fatal_syntax_error
2847 (_("invalid shift for the register offset addressing mode"));
2848 return FALSE;
2849 }
2850 break;
2851
2852 case SHIFTED_LSL_MSL:
2853 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2854 {
2855 set_syntax_error (_("invalid shift operator"));
2856 return FALSE;
2857 }
2858 break;
2859
2860 default:
2861 abort ();
2862 }
2863
2864 /* Whitespace can appear here if the next thing is a bare digit. */
2865 skip_whitespace (p);
2866
2867 /* Parse shift amount. */
2868 exp_has_prefix = 0;
2869 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2870 exp.X_op = O_absent;
2871 else
2872 {
2873 if (is_immediate_prefix (*p))
2874 {
2875 p++;
2876 exp_has_prefix = 1;
2877 }
2878 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2879 }
2880 if (exp.X_op == O_absent)
2881 {
2882 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2883 {
2884 set_syntax_error (_("missing shift amount"));
2885 return FALSE;
2886 }
2887 operand->shifter.amount = 0;
2888 }
2889 else if (exp.X_op != O_constant)
2890 {
2891 set_syntax_error (_("constant shift amount required"));
2892 return FALSE;
2893 }
2894 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2895 {
2896 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2897 return FALSE;
2898 }
2899 else
2900 {
2901 operand->shifter.amount = exp.X_add_number;
2902 operand->shifter.amount_present = 1;
2903 }
2904
2905 operand->shifter.operator_present = 1;
2906 operand->shifter.kind = kind;
2907
2908 *str = p;
2909 return TRUE;
2910 }
2911
2912 /* Parse a <shifter_operand> for a data processing instruction:
2913
2914 #<immediate>
2915 #<immediate>, LSL #imm
2916
2917 Validation of immediate operands is deferred to md_apply_fix.
2918
2919 Return TRUE on success; otherwise return FALSE. */
2920
2921 static bfd_boolean
2922 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2923 enum parse_shift_mode mode)
2924 {
2925 char *p;
2926
2927 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2928 return FALSE;
2929
2930 p = *str;
2931
2932 /* Accept an immediate expression. */
2933 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2934 return FALSE;
2935
2936 /* Accept optional LSL for arithmetic immediate values. */
2937 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2938 if (! parse_shift (&p, operand, SHIFTED_LSL))
2939 return FALSE;
2940
2941 /* Not accept any shifter for logical immediate values. */
2942 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2943 && parse_shift (&p, operand, mode))
2944 {
2945 set_syntax_error (_("unexpected shift operator"));
2946 return FALSE;
2947 }
2948
2949 *str = p;
2950 return TRUE;
2951 }
2952
2953 /* Parse a <shifter_operand> for a data processing instruction:
2954
2955 <Rm>
2956 <Rm>, <shift>
2957 #<immediate>
2958 #<immediate>, LSL #imm
2959
2960 where <shift> is handled by parse_shift above, and the last two
2961 cases are handled by the function above.
2962
2963 Validation of immediate operands is deferred to md_apply_fix.
2964
2965 Return TRUE on success; otherwise return FALSE. */
2966
2967 static bfd_boolean
2968 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2969 enum parse_shift_mode mode)
2970 {
2971 int reg;
2972 int isreg32, isregzero;
2973 enum aarch64_operand_class opd_class
2974 = aarch64_get_operand_class (operand->type);
2975
2976 if ((reg =
2977 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2978 {
2979 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2980 {
2981 set_syntax_error (_("unexpected register in the immediate operand"));
2982 return FALSE;
2983 }
2984
2985 if (!isregzero && reg == REG_SP)
2986 {
2987 set_syntax_error (BAD_SP);
2988 return FALSE;
2989 }
2990
2991 operand->reg.regno = reg;
2992 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2993
2994 /* Accept optional shift operation on register. */
2995 if (! skip_past_comma (str))
2996 return TRUE;
2997
2998 if (! parse_shift (str, operand, mode))
2999 return FALSE;
3000
3001 return TRUE;
3002 }
3003 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3004 {
3005 set_syntax_error
3006 (_("integer register expected in the extended/shifted operand "
3007 "register"));
3008 return FALSE;
3009 }
3010
3011 /* We have a shifted immediate variable. */
3012 return parse_shifter_operand_imm (str, operand, mode);
3013 }
3014
3015 /* Return TRUE on success; return FALSE otherwise. */
3016
3017 static bfd_boolean
3018 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3019 enum parse_shift_mode mode)
3020 {
3021 char *p = *str;
3022
3023 /* Determine if we have the sequence of characters #: or just :
3024 coming next. If we do, then we check for a :rello: relocation
3025 modifier. If we don't, punt the whole lot to
3026 parse_shifter_operand. */
3027
3028 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3029 {
3030 struct reloc_table_entry *entry;
3031
3032 if (p[0] == '#')
3033 p += 2;
3034 else
3035 p++;
3036 *str = p;
3037
3038 /* Try to parse a relocation. Anything else is an error. */
3039 if (!(entry = find_reloc_table_entry (str)))
3040 {
3041 set_syntax_error (_("unknown relocation modifier"));
3042 return FALSE;
3043 }
3044
3045 if (entry->add_type == 0)
3046 {
3047 set_syntax_error
3048 (_("this relocation modifier is not allowed on this instruction"));
3049 return FALSE;
3050 }
3051
3052 /* Save str before we decompose it. */
3053 p = *str;
3054
3055 /* Next, we parse the expression. */
3056 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3057 return FALSE;
3058
3059 /* Record the relocation type (use the ADD variant here). */
3060 inst.reloc.type = entry->add_type;
3061 inst.reloc.pc_rel = entry->pc_rel;
3062
3063 /* If str is empty, we've reached the end, stop here. */
3064 if (**str == '\0')
3065 return TRUE;
3066
3067 /* Otherwise, we have a shifted reloc modifier, so rewind to
3068 recover the variable name and continue parsing for the shifter. */
3069 *str = p;
3070 return parse_shifter_operand_imm (str, operand, mode);
3071 }
3072
3073 return parse_shifter_operand (str, operand, mode);
3074 }
3075
3076 /* Parse all forms of an address expression. Information is written
3077 to *OPERAND and/or inst.reloc.
3078
3079 The A64 instruction set has the following addressing modes:
3080
3081 Offset
3082 [base] // in SIMD ld/st structure
3083 [base{,#0}] // in ld/st exclusive
3084 [base{,#imm}]
3085 [base,Xm{,LSL #imm}]
3086 [base,Xm,SXTX {#imm}]
3087 [base,Wm,(S|U)XTW {#imm}]
3088 Pre-indexed
3089 [base,#imm]!
3090 Post-indexed
3091 [base],#imm
3092 [base],Xm // in SIMD ld/st structure
3093 PC-relative (literal)
3094 label
3095 =immediate
3096
3097 (As a convenience, the notation "=immediate" is permitted in conjunction
3098 with the pc-relative literal load instructions to automatically place an
3099 immediate value or symbolic address in a nearby literal pool and generate
3100 a hidden label which references it.)
3101
3102 Upon a successful parsing, the address structure in *OPERAND will be
3103 filled in the following way:
3104
3105 .base_regno = <base>
3106 .offset.is_reg // 1 if the offset is a register
3107 .offset.imm = <imm>
3108 .offset.regno = <Rm>
3109
3110 For different addressing modes defined in the A64 ISA:
3111
3112 Offset
3113 .pcrel=0; .preind=1; .postind=0; .writeback=0
3114 Pre-indexed
3115 .pcrel=0; .preind=1; .postind=0; .writeback=1
3116 Post-indexed
3117 .pcrel=0; .preind=0; .postind=1; .writeback=1
3118 PC-relative (literal)
3119 .pcrel=1; .preind=1; .postind=0; .writeback=0
3120
3121 The shift/extension information, if any, will be stored in .shifter.
3122
3123 It is the caller's responsibility to check for addressing modes not
3124 supported by the instruction, and to set inst.reloc.type. */
3125
3126 static bfd_boolean
3127 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
3128 int accept_reg_post_index)
3129 {
3130 char *p = *str;
3131 int reg;
3132 int isreg32, isregzero;
3133 expressionS *exp = &inst.reloc.exp;
3134
3135 if (! skip_past_char (&p, '['))
3136 {
3137 /* =immediate or label. */
3138 operand->addr.pcrel = 1;
3139 operand->addr.preind = 1;
3140
3141 /* #:<reloc_op>:<symbol> */
3142 skip_past_char (&p, '#');
3143 if (reloc && skip_past_char (&p, ':'))
3144 {
3145 bfd_reloc_code_real_type ty;
3146 struct reloc_table_entry *entry;
3147
3148 /* Try to parse a relocation modifier. Anything else is
3149 an error. */
3150 entry = find_reloc_table_entry (&p);
3151 if (! entry)
3152 {
3153 set_syntax_error (_("unknown relocation modifier"));
3154 return FALSE;
3155 }
3156
3157 switch (operand->type)
3158 {
3159 case AARCH64_OPND_ADDR_PCREL21:
3160 /* adr */
3161 ty = entry->adr_type;
3162 break;
3163
3164 default:
3165 ty = entry->ld_literal_type;
3166 break;
3167 }
3168
3169 if (ty == 0)
3170 {
3171 set_syntax_error
3172 (_("this relocation modifier is not allowed on this "
3173 "instruction"));
3174 return FALSE;
3175 }
3176
3177 /* #:<reloc_op>: */
3178 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3179 {
3180 set_syntax_error (_("invalid relocation expression"));
3181 return FALSE;
3182 }
3183
3184 /* #:<reloc_op>:<expr> */
3185 /* Record the relocation type. */
3186 inst.reloc.type = ty;
3187 inst.reloc.pc_rel = entry->pc_rel;
3188 }
3189 else
3190 {
3191
3192 if (skip_past_char (&p, '='))
3193 /* =immediate; need to generate the literal in the literal pool. */
3194 inst.gen_lit_pool = 1;
3195
3196 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3197 {
3198 set_syntax_error (_("invalid address"));
3199 return FALSE;
3200 }
3201 }
3202
3203 *str = p;
3204 return TRUE;
3205 }
3206
3207 /* [ */
3208
3209 /* Accept SP and reject ZR */
3210 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3211 if (reg == PARSE_FAIL || isreg32)
3212 {
3213 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3214 return FALSE;
3215 }
3216 operand->addr.base_regno = reg;
3217
3218 /* [Xn */
3219 if (skip_past_comma (&p))
3220 {
3221 /* [Xn, */
3222 operand->addr.preind = 1;
3223
3224 /* Reject SP and accept ZR */
3225 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3226 if (reg != PARSE_FAIL)
3227 {
3228 /* [Xn,Rm */
3229 operand->addr.offset.regno = reg;
3230 operand->addr.offset.is_reg = 1;
3231 /* Shifted index. */
3232 if (skip_past_comma (&p))
3233 {
3234 /* [Xn,Rm, */
3235 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3236 /* Use the diagnostics set in parse_shift, so not set new
3237 error message here. */
3238 return FALSE;
3239 }
3240 /* We only accept:
3241 [base,Xm{,LSL #imm}]
3242 [base,Xm,SXTX {#imm}]
3243 [base,Wm,(S|U)XTW {#imm}] */
3244 if (operand->shifter.kind == AARCH64_MOD_NONE
3245 || operand->shifter.kind == AARCH64_MOD_LSL
3246 || operand->shifter.kind == AARCH64_MOD_SXTX)
3247 {
3248 if (isreg32)
3249 {
3250 set_syntax_error (_("invalid use of 32-bit register offset"));
3251 return FALSE;
3252 }
3253 }
3254 else if (!isreg32)
3255 {
3256 set_syntax_error (_("invalid use of 64-bit register offset"));
3257 return FALSE;
3258 }
3259 }
3260 else
3261 {
3262 /* [Xn,#:<reloc_op>:<symbol> */
3263 skip_past_char (&p, '#');
3264 if (reloc && skip_past_char (&p, ':'))
3265 {
3266 struct reloc_table_entry *entry;
3267
3268 /* Try to parse a relocation modifier. Anything else is
3269 an error. */
3270 if (!(entry = find_reloc_table_entry (&p)))
3271 {
3272 set_syntax_error (_("unknown relocation modifier"));
3273 return FALSE;
3274 }
3275
3276 if (entry->ldst_type == 0)
3277 {
3278 set_syntax_error
3279 (_("this relocation modifier is not allowed on this "
3280 "instruction"));
3281 return FALSE;
3282 }
3283
3284 /* [Xn,#:<reloc_op>: */
3285 /* We now have the group relocation table entry corresponding to
3286 the name in the assembler source. Next, we parse the
3287 expression. */
3288 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3289 {
3290 set_syntax_error (_("invalid relocation expression"));
3291 return FALSE;
3292 }
3293
3294 /* [Xn,#:<reloc_op>:<expr> */
3295 /* Record the load/store relocation type. */
3296 inst.reloc.type = entry->ldst_type;
3297 inst.reloc.pc_rel = entry->pc_rel;
3298 }
3299 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3300 {
3301 set_syntax_error (_("invalid expression in the address"));
3302 return FALSE;
3303 }
3304 /* [Xn,<expr> */
3305 }
3306 }
3307
3308 if (! skip_past_char (&p, ']'))
3309 {
3310 set_syntax_error (_("']' expected"));
3311 return FALSE;
3312 }
3313
3314 if (skip_past_char (&p, '!'))
3315 {
3316 if (operand->addr.preind && operand->addr.offset.is_reg)
3317 {
3318 set_syntax_error (_("register offset not allowed in pre-indexed "
3319 "addressing mode"));
3320 return FALSE;
3321 }
3322 /* [Xn]! */
3323 operand->addr.writeback = 1;
3324 }
3325 else if (skip_past_comma (&p))
3326 {
3327 /* [Xn], */
3328 operand->addr.postind = 1;
3329 operand->addr.writeback = 1;
3330
3331 if (operand->addr.preind)
3332 {
3333 set_syntax_error (_("cannot combine pre- and post-indexing"));
3334 return FALSE;
3335 }
3336
3337 if (accept_reg_post_index
3338 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3339 &isregzero)) != PARSE_FAIL)
3340 {
3341 /* [Xn],Xm */
3342 if (isreg32)
3343 {
3344 set_syntax_error (_("invalid 32-bit register offset"));
3345 return FALSE;
3346 }
3347 operand->addr.offset.regno = reg;
3348 operand->addr.offset.is_reg = 1;
3349 }
3350 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3351 {
3352 /* [Xn],#expr */
3353 set_syntax_error (_("invalid expression in the address"));
3354 return FALSE;
3355 }
3356 }
3357
3358 /* If at this point neither .preind nor .postind is set, we have a
3359 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3360 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3361 {
3362 if (operand->addr.writeback)
3363 {
3364 /* Reject [Rn]! */
3365 set_syntax_error (_("missing offset in the pre-indexed address"));
3366 return FALSE;
3367 }
3368 operand->addr.preind = 1;
3369 inst.reloc.exp.X_op = O_constant;
3370 inst.reloc.exp.X_add_number = 0;
3371 }
3372
3373 *str = p;
3374 return TRUE;
3375 }
3376
3377 /* Return TRUE on success; otherwise return FALSE. */
3378 static bfd_boolean
3379 parse_address (char **str, aarch64_opnd_info *operand,
3380 int accept_reg_post_index)
3381 {
3382 return parse_address_main (str, operand, 0, accept_reg_post_index);
3383 }
3384
3385 /* Return TRUE on success; otherwise return FALSE. */
3386 static bfd_boolean
3387 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3388 {
3389 return parse_address_main (str, operand, 1, 0);
3390 }
3391
3392 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3393 Return TRUE on success; otherwise return FALSE. */
3394 static bfd_boolean
3395 parse_half (char **str, int *internal_fixup_p)
3396 {
3397 char *p, *saved;
3398 int dummy;
3399
3400 p = *str;
3401 skip_past_char (&p, '#');
3402
3403 gas_assert (internal_fixup_p);
3404 *internal_fixup_p = 0;
3405
3406 if (*p == ':')
3407 {
3408 struct reloc_table_entry *entry;
3409
3410 /* Try to parse a relocation. Anything else is an error. */
3411 ++p;
3412 if (!(entry = find_reloc_table_entry (&p)))
3413 {
3414 set_syntax_error (_("unknown relocation modifier"));
3415 return FALSE;
3416 }
3417
3418 if (entry->movw_type == 0)
3419 {
3420 set_syntax_error
3421 (_("this relocation modifier is not allowed on this instruction"));
3422 return FALSE;
3423 }
3424
3425 inst.reloc.type = entry->movw_type;
3426 }
3427 else
3428 *internal_fixup_p = 1;
3429
3430 /* Avoid parsing a register as a general symbol. */
3431 saved = p;
3432 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3433 return FALSE;
3434 p = saved;
3435
3436 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3437 return FALSE;
3438
3439 *str = p;
3440 return TRUE;
3441 }
3442
3443 /* Parse an operand for an ADRP instruction:
3444 ADRP <Xd>, <label>
3445 Return TRUE on success; otherwise return FALSE. */
3446
3447 static bfd_boolean
3448 parse_adrp (char **str)
3449 {
3450 char *p;
3451
3452 p = *str;
3453 if (*p == ':')
3454 {
3455 struct reloc_table_entry *entry;
3456
3457 /* Try to parse a relocation. Anything else is an error. */
3458 ++p;
3459 if (!(entry = find_reloc_table_entry (&p)))
3460 {
3461 set_syntax_error (_("unknown relocation modifier"));
3462 return FALSE;
3463 }
3464
3465 if (entry->adrp_type == 0)
3466 {
3467 set_syntax_error
3468 (_("this relocation modifier is not allowed on this instruction"));
3469 return FALSE;
3470 }
3471
3472 inst.reloc.type = entry->adrp_type;
3473 }
3474 else
3475 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3476
3477 inst.reloc.pc_rel = 1;
3478
3479 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3480 return FALSE;
3481
3482 *str = p;
3483 return TRUE;
3484 }
3485
3486 /* Miscellaneous. */
3487
3488 /* Parse an option for a preload instruction. Returns the encoding for the
3489 option, or PARSE_FAIL. */
3490
3491 static int
3492 parse_pldop (char **str)
3493 {
3494 char *p, *q;
3495 const struct aarch64_name_value_pair *o;
3496
3497 p = q = *str;
3498 while (ISALNUM (*q))
3499 q++;
3500
3501 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3502 if (!o)
3503 return PARSE_FAIL;
3504
3505 *str = q;
3506 return o->value;
3507 }
3508
3509 /* Parse an option for a barrier instruction. Returns the encoding for the
3510 option, or PARSE_FAIL. */
3511
3512 static int
3513 parse_barrier (char **str)
3514 {
3515 char *p, *q;
3516 const asm_barrier_opt *o;
3517
3518 p = q = *str;
3519 while (ISALPHA (*q))
3520 q++;
3521
3522 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3523 if (!o)
3524 return PARSE_FAIL;
3525
3526 *str = q;
3527 return o->value;
3528 }
3529
3530 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3531 Returns the encoding for the option, or PARSE_FAIL.
3532
3533 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3534 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3535
3536 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3537 field, otherwise as a system register.
3538 */
3539
3540 static int
3541 parse_sys_reg (char **str, struct hash_control *sys_regs,
3542 int imple_defined_p, int pstatefield_p)
3543 {
3544 char *p, *q;
3545 char buf[32];
3546 const aarch64_sys_reg *o;
3547 int value;
3548
3549 p = buf;
3550 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3551 if (p < buf + 31)
3552 *p++ = TOLOWER (*q);
3553 *p = '\0';
3554 /* Assert that BUF be large enough. */
3555 gas_assert (p - buf == q - *str);
3556
3557 o = hash_find (sys_regs, buf);
3558 if (!o)
3559 {
3560 if (!imple_defined_p)
3561 return PARSE_FAIL;
3562 else
3563 {
3564 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3565 unsigned int op0, op1, cn, cm, op2;
3566
3567 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3568 != 5)
3569 return PARSE_FAIL;
3570 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3571 return PARSE_FAIL;
3572 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3573 }
3574 }
3575 else
3576 {
3577 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3578 as_bad (_("selected processor does not support PSTATE field "
3579 "name '%s'"), buf);
3580 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3581 as_bad (_("selected processor does not support system register "
3582 "name '%s'"), buf);
3583 if (aarch64_sys_reg_deprecated_p (o))
3584 as_warn (_("system register name '%s' is deprecated and may be "
3585 "removed in a future release"), buf);
3586 value = o->value;
3587 }
3588
3589 *str = q;
3590 return value;
3591 }
3592
3593 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3594 for the option, or NULL. */
3595
3596 static const aarch64_sys_ins_reg *
3597 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3598 {
3599 char *p, *q;
3600 char buf[32];
3601 const aarch64_sys_ins_reg *o;
3602
3603 p = buf;
3604 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3605 if (p < buf + 31)
3606 *p++ = TOLOWER (*q);
3607 *p = '\0';
3608
3609 o = hash_find (sys_ins_regs, buf);
3610 if (!o)
3611 return NULL;
3612
3613 *str = q;
3614 return o;
3615 }
3616 \f
3617 #define po_char_or_fail(chr) do { \
3618 if (! skip_past_char (&str, chr)) \
3619 goto failure; \
3620 } while (0)
3621
3622 #define po_reg_or_fail(regtype) do { \
3623 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3624 if (val == PARSE_FAIL) \
3625 { \
3626 set_default_error (); \
3627 goto failure; \
3628 } \
3629 } while (0)
3630
3631 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3632 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3633 &isreg32, &isregzero); \
3634 if (val == PARSE_FAIL) \
3635 { \
3636 set_default_error (); \
3637 goto failure; \
3638 } \
3639 info->reg.regno = val; \
3640 if (isreg32) \
3641 info->qualifier = AARCH64_OPND_QLF_W; \
3642 else \
3643 info->qualifier = AARCH64_OPND_QLF_X; \
3644 } while (0)
3645
3646 #define po_imm_nc_or_fail() do { \
3647 if (! parse_constant_immediate (&str, &val)) \
3648 goto failure; \
3649 } while (0)
3650
3651 #define po_imm_or_fail(min, max) do { \
3652 if (! parse_constant_immediate (&str, &val)) \
3653 goto failure; \
3654 if (val < min || val > max) \
3655 { \
3656 set_fatal_syntax_error (_("immediate value out of range "\
3657 #min " to "#max)); \
3658 goto failure; \
3659 } \
3660 } while (0)
3661
3662 #define po_misc_or_fail(expr) do { \
3663 if (!expr) \
3664 goto failure; \
3665 } while (0)
3666 \f
3667 /* encode the 12-bit imm field of Add/sub immediate */
3668 static inline uint32_t
3669 encode_addsub_imm (uint32_t imm)
3670 {
3671 return imm << 10;
3672 }
3673
3674 /* encode the shift amount field of Add/sub immediate */
3675 static inline uint32_t
3676 encode_addsub_imm_shift_amount (uint32_t cnt)
3677 {
3678 return cnt << 22;
3679 }
3680
3681
3682 /* encode the imm field of Adr instruction */
3683 static inline uint32_t
3684 encode_adr_imm (uint32_t imm)
3685 {
3686 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3687 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3688 }
3689
3690 /* encode the immediate field of Move wide immediate */
3691 static inline uint32_t
3692 encode_movw_imm (uint32_t imm)
3693 {
3694 return imm << 5;
3695 }
3696
3697 /* encode the 26-bit offset of unconditional branch */
3698 static inline uint32_t
3699 encode_branch_ofs_26 (uint32_t ofs)
3700 {
3701 return ofs & ((1 << 26) - 1);
3702 }
3703
3704 /* encode the 19-bit offset of conditional branch and compare & branch */
3705 static inline uint32_t
3706 encode_cond_branch_ofs_19 (uint32_t ofs)
3707 {
3708 return (ofs & ((1 << 19) - 1)) << 5;
3709 }
3710
3711 /* encode the 19-bit offset of ld literal */
3712 static inline uint32_t
3713 encode_ld_lit_ofs_19 (uint32_t ofs)
3714 {
3715 return (ofs & ((1 << 19) - 1)) << 5;
3716 }
3717
3718 /* Encode the 14-bit offset of test & branch. */
3719 static inline uint32_t
3720 encode_tst_branch_ofs_14 (uint32_t ofs)
3721 {
3722 return (ofs & ((1 << 14) - 1)) << 5;
3723 }
3724
3725 /* Encode the 16-bit imm field of svc/hvc/smc. */
3726 static inline uint32_t
3727 encode_svc_imm (uint32_t imm)
3728 {
3729 return imm << 5;
3730 }
3731
3732 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3733 static inline uint32_t
3734 reencode_addsub_switch_add_sub (uint32_t opcode)
3735 {
3736 return opcode ^ (1 << 30);
3737 }
3738
3739 static inline uint32_t
3740 reencode_movzn_to_movz (uint32_t opcode)
3741 {
3742 return opcode | (1 << 30);
3743 }
3744
3745 static inline uint32_t
3746 reencode_movzn_to_movn (uint32_t opcode)
3747 {
3748 return opcode & ~(1 << 30);
3749 }
3750
3751 /* Overall per-instruction processing. */
3752
3753 /* We need to be able to fix up arbitrary expressions in some statements.
3754 This is so that we can handle symbols that are an arbitrary distance from
3755 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3756 which returns part of an address in a form which will be valid for
3757 a data instruction. We do this by pushing the expression into a symbol
3758 in the expr_section, and creating a fix for that. */
3759
3760 static fixS *
3761 fix_new_aarch64 (fragS * frag,
3762 int where,
3763 short int size, expressionS * exp, int pc_rel, int reloc)
3764 {
3765 fixS *new_fix;
3766
3767 switch (exp->X_op)
3768 {
3769 case O_constant:
3770 case O_symbol:
3771 case O_add:
3772 case O_subtract:
3773 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3774 break;
3775
3776 default:
3777 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3778 pc_rel, reloc);
3779 break;
3780 }
3781 return new_fix;
3782 }
3783 \f
3784 /* Diagnostics on operands errors. */
3785
3786 /* By default, output verbose error message.
3787 Disable the verbose error message by -mno-verbose-error. */
3788 static int verbose_error_p = 1;
3789
3790 #ifdef DEBUG_AARCH64
3791 /* N.B. this is only for the purpose of debugging. */
3792 const char* operand_mismatch_kind_names[] =
3793 {
3794 "AARCH64_OPDE_NIL",
3795 "AARCH64_OPDE_RECOVERABLE",
3796 "AARCH64_OPDE_SYNTAX_ERROR",
3797 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3798 "AARCH64_OPDE_INVALID_VARIANT",
3799 "AARCH64_OPDE_OUT_OF_RANGE",
3800 "AARCH64_OPDE_UNALIGNED",
3801 "AARCH64_OPDE_REG_LIST",
3802 "AARCH64_OPDE_OTHER_ERROR",
3803 };
3804 #endif /* DEBUG_AARCH64 */
3805
3806 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3807
3808 When multiple errors of different kinds are found in the same assembly
3809 line, only the error of the highest severity will be picked up for
3810 issuing the diagnostics. */
3811
3812 static inline bfd_boolean
3813 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3814 enum aarch64_operand_error_kind rhs)
3815 {
3816 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3817 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3818 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3819 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3820 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3821 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3822 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3823 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3824 return lhs > rhs;
3825 }
3826
3827 /* Helper routine to get the mnemonic name from the assembly instruction
3828 line; should only be called for the diagnosis purpose, as there is
3829 string copy operation involved, which may affect the runtime
3830 performance if used in elsewhere. */
3831
3832 static const char*
3833 get_mnemonic_name (const char *str)
3834 {
3835 static char mnemonic[32];
3836 char *ptr;
3837
3838 /* Get the first 15 bytes and assume that the full name is included. */
3839 strncpy (mnemonic, str, 31);
3840 mnemonic[31] = '\0';
3841
3842 /* Scan up to the end of the mnemonic, which must end in white space,
3843 '.', or end of string. */
3844 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3845 ;
3846
3847 *ptr = '\0';
3848
3849 /* Append '...' to the truncated long name. */
3850 if (ptr - mnemonic == 31)
3851 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3852
3853 return mnemonic;
3854 }
3855
3856 static void
3857 reset_aarch64_instruction (aarch64_instruction *instruction)
3858 {
3859 memset (instruction, '\0', sizeof (aarch64_instruction));
3860 instruction->reloc.type = BFD_RELOC_UNUSED;
3861 }
3862
3863 /* Data strutures storing one user error in the assembly code related to
3864 operands. */
3865
3866 struct operand_error_record
3867 {
3868 const aarch64_opcode *opcode;
3869 aarch64_operand_error detail;
3870 struct operand_error_record *next;
3871 };
3872
3873 typedef struct operand_error_record operand_error_record;
3874
3875 struct operand_errors
3876 {
3877 operand_error_record *head;
3878 operand_error_record *tail;
3879 };
3880
3881 typedef struct operand_errors operand_errors;
3882
3883 /* Top-level data structure reporting user errors for the current line of
3884 the assembly code.
3885 The way md_assemble works is that all opcodes sharing the same mnemonic
3886 name are iterated to find a match to the assembly line. In this data
3887 structure, each of the such opcodes will have one operand_error_record
3888 allocated and inserted. In other words, excessive errors related with
3889 a single opcode are disregarded. */
3890 operand_errors operand_error_report;
3891
3892 /* Free record nodes. */
3893 static operand_error_record *free_opnd_error_record_nodes = NULL;
3894
3895 /* Initialize the data structure that stores the operand mismatch
3896 information on assembling one line of the assembly code. */
3897 static void
3898 init_operand_error_report (void)
3899 {
3900 if (operand_error_report.head != NULL)
3901 {
3902 gas_assert (operand_error_report.tail != NULL);
3903 operand_error_report.tail->next = free_opnd_error_record_nodes;
3904 free_opnd_error_record_nodes = operand_error_report.head;
3905 operand_error_report.head = NULL;
3906 operand_error_report.tail = NULL;
3907 return;
3908 }
3909 gas_assert (operand_error_report.tail == NULL);
3910 }
3911
3912 /* Return TRUE if some operand error has been recorded during the
3913 parsing of the current assembly line using the opcode *OPCODE;
3914 otherwise return FALSE. */
3915 static inline bfd_boolean
3916 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3917 {
3918 operand_error_record *record = operand_error_report.head;
3919 return record && record->opcode == opcode;
3920 }
3921
3922 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3923 OPCODE field is initialized with OPCODE.
3924 N.B. only one record for each opcode, i.e. the maximum of one error is
3925 recorded for each instruction template. */
3926
3927 static void
3928 add_operand_error_record (const operand_error_record* new_record)
3929 {
3930 const aarch64_opcode *opcode = new_record->opcode;
3931 operand_error_record* record = operand_error_report.head;
3932
3933 /* The record may have been created for this opcode. If not, we need
3934 to prepare one. */
3935 if (! opcode_has_operand_error_p (opcode))
3936 {
3937 /* Get one empty record. */
3938 if (free_opnd_error_record_nodes == NULL)
3939 {
3940 record = xmalloc (sizeof (operand_error_record));
3941 if (record == NULL)
3942 abort ();
3943 }
3944 else
3945 {
3946 record = free_opnd_error_record_nodes;
3947 free_opnd_error_record_nodes = record->next;
3948 }
3949 record->opcode = opcode;
3950 /* Insert at the head. */
3951 record->next = operand_error_report.head;
3952 operand_error_report.head = record;
3953 if (operand_error_report.tail == NULL)
3954 operand_error_report.tail = record;
3955 }
3956 else if (record->detail.kind != AARCH64_OPDE_NIL
3957 && record->detail.index <= new_record->detail.index
3958 && operand_error_higher_severity_p (record->detail.kind,
3959 new_record->detail.kind))
3960 {
3961 /* In the case of multiple errors found on operands related with a
3962 single opcode, only record the error of the leftmost operand and
3963 only if the error is of higher severity. */
3964 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3965 " the existing error %s on operand %d",
3966 operand_mismatch_kind_names[new_record->detail.kind],
3967 new_record->detail.index,
3968 operand_mismatch_kind_names[record->detail.kind],
3969 record->detail.index);
3970 return;
3971 }
3972
3973 record->detail = new_record->detail;
3974 }
3975
3976 static inline void
3977 record_operand_error_info (const aarch64_opcode *opcode,
3978 aarch64_operand_error *error_info)
3979 {
3980 operand_error_record record;
3981 record.opcode = opcode;
3982 record.detail = *error_info;
3983 add_operand_error_record (&record);
3984 }
3985
3986 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3987 error message *ERROR, for operand IDX (count from 0). */
3988
3989 static void
3990 record_operand_error (const aarch64_opcode *opcode, int idx,
3991 enum aarch64_operand_error_kind kind,
3992 const char* error)
3993 {
3994 aarch64_operand_error info;
3995 memset(&info, 0, sizeof (info));
3996 info.index = idx;
3997 info.kind = kind;
3998 info.error = error;
3999 record_operand_error_info (opcode, &info);
4000 }
4001
4002 static void
4003 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4004 enum aarch64_operand_error_kind kind,
4005 const char* error, const int *extra_data)
4006 {
4007 aarch64_operand_error info;
4008 info.index = idx;
4009 info.kind = kind;
4010 info.error = error;
4011 info.data[0] = extra_data[0];
4012 info.data[1] = extra_data[1];
4013 info.data[2] = extra_data[2];
4014 record_operand_error_info (opcode, &info);
4015 }
4016
4017 static void
4018 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4019 const char* error, int lower_bound,
4020 int upper_bound)
4021 {
4022 int data[3] = {lower_bound, upper_bound, 0};
4023 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4024 error, data);
4025 }
4026
4027 /* Remove the operand error record for *OPCODE. */
4028 static void ATTRIBUTE_UNUSED
4029 remove_operand_error_record (const aarch64_opcode *opcode)
4030 {
4031 if (opcode_has_operand_error_p (opcode))
4032 {
4033 operand_error_record* record = operand_error_report.head;
4034 gas_assert (record != NULL && operand_error_report.tail != NULL);
4035 operand_error_report.head = record->next;
4036 record->next = free_opnd_error_record_nodes;
4037 free_opnd_error_record_nodes = record;
4038 if (operand_error_report.head == NULL)
4039 {
4040 gas_assert (operand_error_report.tail == record);
4041 operand_error_report.tail = NULL;
4042 }
4043 }
4044 }
4045
4046 /* Given the instruction in *INSTR, return the index of the best matched
4047 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4048
4049 Return -1 if there is no qualifier sequence; return the first match
4050 if there is multiple matches found. */
4051
4052 static int
4053 find_best_match (const aarch64_inst *instr,
4054 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4055 {
4056 int i, num_opnds, max_num_matched, idx;
4057
4058 num_opnds = aarch64_num_of_operands (instr->opcode);
4059 if (num_opnds == 0)
4060 {
4061 DEBUG_TRACE ("no operand");
4062 return -1;
4063 }
4064
4065 max_num_matched = 0;
4066 idx = -1;
4067
4068 /* For each pattern. */
4069 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4070 {
4071 int j, num_matched;
4072 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4073
4074 /* Most opcodes has much fewer patterns in the list. */
4075 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4076 {
4077 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4078 if (i != 0 && idx == -1)
4079 /* If nothing has been matched, return the 1st sequence. */
4080 idx = 0;
4081 break;
4082 }
4083
4084 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4085 if (*qualifiers == instr->operands[j].qualifier)
4086 ++num_matched;
4087
4088 if (num_matched > max_num_matched)
4089 {
4090 max_num_matched = num_matched;
4091 idx = i;
4092 }
4093 }
4094
4095 DEBUG_TRACE ("return with %d", idx);
4096 return idx;
4097 }
4098
4099 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4100 corresponding operands in *INSTR. */
4101
4102 static inline void
4103 assign_qualifier_sequence (aarch64_inst *instr,
4104 const aarch64_opnd_qualifier_t *qualifiers)
4105 {
4106 int i = 0;
4107 int num_opnds = aarch64_num_of_operands (instr->opcode);
4108 gas_assert (num_opnds);
4109 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4110 instr->operands[i].qualifier = *qualifiers;
4111 }
4112
4113 /* Print operands for the diagnosis purpose. */
4114
4115 static void
4116 print_operands (char *buf, const aarch64_opcode *opcode,
4117 const aarch64_opnd_info *opnds)
4118 {
4119 int i;
4120
4121 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4122 {
4123 const size_t size = 128;
4124 char str[size];
4125
4126 /* We regard the opcode operand info more, however we also look into
4127 the inst->operands to support the disassembling of the optional
4128 operand.
4129 The two operand code should be the same in all cases, apart from
4130 when the operand can be optional. */
4131 if (opcode->operands[i] == AARCH64_OPND_NIL
4132 || opnds[i].type == AARCH64_OPND_NIL)
4133 break;
4134
4135 /* Generate the operand string in STR. */
4136 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
4137
4138 /* Delimiter. */
4139 if (str[0] != '\0')
4140 strcat (buf, i == 0 ? " " : ",");
4141
4142 /* Append the operand string. */
4143 strcat (buf, str);
4144 }
4145 }
4146
4147 /* Send to stderr a string as information. */
4148
4149 static void
4150 output_info (const char *format, ...)
4151 {
4152 char *file;
4153 unsigned int line;
4154 va_list args;
4155
4156 as_where (&file, &line);
4157 if (file)
4158 {
4159 if (line != 0)
4160 fprintf (stderr, "%s:%u: ", file, line);
4161 else
4162 fprintf (stderr, "%s: ", file);
4163 }
4164 fprintf (stderr, _("Info: "));
4165 va_start (args, format);
4166 vfprintf (stderr, format, args);
4167 va_end (args);
4168 (void) putc ('\n', stderr);
4169 }
4170
4171 /* Output one operand error record. */
4172
4173 static void
4174 output_operand_error_record (const operand_error_record *record, char *str)
4175 {
4176 const aarch64_operand_error *detail = &record->detail;
4177 int idx = detail->index;
4178 const aarch64_opcode *opcode = record->opcode;
4179 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4180 : AARCH64_OPND_NIL);
4181
4182 switch (detail->kind)
4183 {
4184 case AARCH64_OPDE_NIL:
4185 gas_assert (0);
4186 break;
4187
4188 case AARCH64_OPDE_SYNTAX_ERROR:
4189 case AARCH64_OPDE_RECOVERABLE:
4190 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4191 case AARCH64_OPDE_OTHER_ERROR:
4192 /* Use the prepared error message if there is, otherwise use the
4193 operand description string to describe the error. */
4194 if (detail->error != NULL)
4195 {
4196 if (idx < 0)
4197 as_bad (_("%s -- `%s'"), detail->error, str);
4198 else
4199 as_bad (_("%s at operand %d -- `%s'"),
4200 detail->error, idx + 1, str);
4201 }
4202 else
4203 {
4204 gas_assert (idx >= 0);
4205 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4206 aarch64_get_operand_desc (opd_code), str);
4207 }
4208 break;
4209
4210 case AARCH64_OPDE_INVALID_VARIANT:
4211 as_bad (_("operand mismatch -- `%s'"), str);
4212 if (verbose_error_p)
4213 {
4214 /* We will try to correct the erroneous instruction and also provide
4215 more information e.g. all other valid variants.
4216
4217 The string representation of the corrected instruction and other
4218 valid variants are generated by
4219
4220 1) obtaining the intermediate representation of the erroneous
4221 instruction;
4222 2) manipulating the IR, e.g. replacing the operand qualifier;
4223 3) printing out the instruction by calling the printer functions
4224 shared with the disassembler.
4225
4226 The limitation of this method is that the exact input assembly
4227 line cannot be accurately reproduced in some cases, for example an
4228 optional operand present in the actual assembly line will be
4229 omitted in the output; likewise for the optional syntax rules,
4230 e.g. the # before the immediate. Another limitation is that the
4231 assembly symbols and relocation operations in the assembly line
4232 currently cannot be printed out in the error report. Last but not
4233 least, when there is other error(s) co-exist with this error, the
4234 'corrected' instruction may be still incorrect, e.g. given
4235 'ldnp h0,h1,[x0,#6]!'
4236 this diagnosis will provide the version:
4237 'ldnp s0,s1,[x0,#6]!'
4238 which is still not right. */
4239 size_t len = strlen (get_mnemonic_name (str));
4240 int i, qlf_idx;
4241 bfd_boolean result;
4242 const size_t size = 2048;
4243 char buf[size];
4244 aarch64_inst *inst_base = &inst.base;
4245 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4246
4247 /* Init inst. */
4248 reset_aarch64_instruction (&inst);
4249 inst_base->opcode = opcode;
4250
4251 /* Reset the error report so that there is no side effect on the
4252 following operand parsing. */
4253 init_operand_error_report ();
4254
4255 /* Fill inst. */
4256 result = parse_operands (str + len, opcode)
4257 && programmer_friendly_fixup (&inst);
4258 gas_assert (result);
4259 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4260 NULL, NULL);
4261 gas_assert (!result);
4262
4263 /* Find the most matched qualifier sequence. */
4264 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4265 gas_assert (qlf_idx > -1);
4266
4267 /* Assign the qualifiers. */
4268 assign_qualifier_sequence (inst_base,
4269 opcode->qualifiers_list[qlf_idx]);
4270
4271 /* Print the hint. */
4272 output_info (_(" did you mean this?"));
4273 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4274 print_operands (buf, opcode, inst_base->operands);
4275 output_info (_(" %s"), buf);
4276
4277 /* Print out other variant(s) if there is any. */
4278 if (qlf_idx != 0 ||
4279 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4280 output_info (_(" other valid variant(s):"));
4281
4282 /* For each pattern. */
4283 qualifiers_list = opcode->qualifiers_list;
4284 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4285 {
4286 /* Most opcodes has much fewer patterns in the list.
4287 First NIL qualifier indicates the end in the list. */
4288 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4289 break;
4290
4291 if (i != qlf_idx)
4292 {
4293 /* Mnemonics name. */
4294 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4295
4296 /* Assign the qualifiers. */
4297 assign_qualifier_sequence (inst_base, *qualifiers_list);
4298
4299 /* Print instruction. */
4300 print_operands (buf, opcode, inst_base->operands);
4301
4302 output_info (_(" %s"), buf);
4303 }
4304 }
4305 }
4306 break;
4307
4308 case AARCH64_OPDE_OUT_OF_RANGE:
4309 if (detail->data[0] != detail->data[1])
4310 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4311 detail->error ? detail->error : _("immediate value"),
4312 detail->data[0], detail->data[1], idx + 1, str);
4313 else
4314 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4315 detail->error ? detail->error : _("immediate value"),
4316 detail->data[0], idx + 1, str);
4317 break;
4318
4319 case AARCH64_OPDE_REG_LIST:
4320 if (detail->data[0] == 1)
4321 as_bad (_("invalid number of registers in the list; "
4322 "only 1 register is expected at operand %d -- `%s'"),
4323 idx + 1, str);
4324 else
4325 as_bad (_("invalid number of registers in the list; "
4326 "%d registers are expected at operand %d -- `%s'"),
4327 detail->data[0], idx + 1, str);
4328 break;
4329
4330 case AARCH64_OPDE_UNALIGNED:
4331 as_bad (_("immediate value should be a multiple of "
4332 "%d at operand %d -- `%s'"),
4333 detail->data[0], idx + 1, str);
4334 break;
4335
4336 default:
4337 gas_assert (0);
4338 break;
4339 }
4340 }
4341
4342 /* Process and output the error message about the operand mismatching.
4343
4344 When this function is called, the operand error information had
4345 been collected for an assembly line and there will be multiple
4346 errors in the case of mulitple instruction templates; output the
4347 error message that most closely describes the problem. */
4348
4349 static void
4350 output_operand_error_report (char *str)
4351 {
4352 int largest_error_pos;
4353 const char *msg = NULL;
4354 enum aarch64_operand_error_kind kind;
4355 operand_error_record *curr;
4356 operand_error_record *head = operand_error_report.head;
4357 operand_error_record *record = NULL;
4358
4359 /* No error to report. */
4360 if (head == NULL)
4361 return;
4362
4363 gas_assert (head != NULL && operand_error_report.tail != NULL);
4364
4365 /* Only one error. */
4366 if (head == operand_error_report.tail)
4367 {
4368 DEBUG_TRACE ("single opcode entry with error kind: %s",
4369 operand_mismatch_kind_names[head->detail.kind]);
4370 output_operand_error_record (head, str);
4371 return;
4372 }
4373
4374 /* Find the error kind of the highest severity. */
4375 DEBUG_TRACE ("multiple opcode entres with error kind");
4376 kind = AARCH64_OPDE_NIL;
4377 for (curr = head; curr != NULL; curr = curr->next)
4378 {
4379 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4380 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4381 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4382 kind = curr->detail.kind;
4383 }
4384 gas_assert (kind != AARCH64_OPDE_NIL);
4385
4386 /* Pick up one of errors of KIND to report. */
4387 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4388 for (curr = head; curr != NULL; curr = curr->next)
4389 {
4390 if (curr->detail.kind != kind)
4391 continue;
4392 /* If there are multiple errors, pick up the one with the highest
4393 mismatching operand index. In the case of multiple errors with
4394 the equally highest operand index, pick up the first one or the
4395 first one with non-NULL error message. */
4396 if (curr->detail.index > largest_error_pos
4397 || (curr->detail.index == largest_error_pos && msg == NULL
4398 && curr->detail.error != NULL))
4399 {
4400 largest_error_pos = curr->detail.index;
4401 record = curr;
4402 msg = record->detail.error;
4403 }
4404 }
4405
4406 gas_assert (largest_error_pos != -2 && record != NULL);
4407 DEBUG_TRACE ("Pick up error kind %s to report",
4408 operand_mismatch_kind_names[record->detail.kind]);
4409
4410 /* Output. */
4411 output_operand_error_record (record, str);
4412 }
4413 \f
4414 /* Write an AARCH64 instruction to buf - always little-endian. */
4415 static void
4416 put_aarch64_insn (char *buf, uint32_t insn)
4417 {
4418 unsigned char *where = (unsigned char *) buf;
4419 where[0] = insn;
4420 where[1] = insn >> 8;
4421 where[2] = insn >> 16;
4422 where[3] = insn >> 24;
4423 }
4424
4425 static uint32_t
4426 get_aarch64_insn (char *buf)
4427 {
4428 unsigned char *where = (unsigned char *) buf;
4429 uint32_t result;
4430 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4431 return result;
4432 }
4433
4434 static void
4435 output_inst (struct aarch64_inst *new_inst)
4436 {
4437 char *to = NULL;
4438
4439 to = frag_more (INSN_SIZE);
4440
4441 frag_now->tc_frag_data.recorded = 1;
4442
4443 put_aarch64_insn (to, inst.base.value);
4444
4445 if (inst.reloc.type != BFD_RELOC_UNUSED)
4446 {
4447 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4448 INSN_SIZE, &inst.reloc.exp,
4449 inst.reloc.pc_rel,
4450 inst.reloc.type);
4451 DEBUG_TRACE ("Prepared relocation fix up");
4452 /* Don't check the addend value against the instruction size,
4453 that's the job of our code in md_apply_fix(). */
4454 fixp->fx_no_overflow = 1;
4455 if (new_inst != NULL)
4456 fixp->tc_fix_data.inst = new_inst;
4457 if (aarch64_gas_internal_fixup_p ())
4458 {
4459 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4460 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4461 fixp->fx_addnumber = inst.reloc.flags;
4462 }
4463 }
4464
4465 dwarf2_emit_insn (INSN_SIZE);
4466 }
4467
4468 /* Link together opcodes of the same name. */
4469
4470 struct templates
4471 {
4472 aarch64_opcode *opcode;
4473 struct templates *next;
4474 };
4475
4476 typedef struct templates templates;
4477
4478 static templates *
4479 lookup_mnemonic (const char *start, int len)
4480 {
4481 templates *templ = NULL;
4482
4483 templ = hash_find_n (aarch64_ops_hsh, start, len);
4484 return templ;
4485 }
4486
4487 /* Subroutine of md_assemble, responsible for looking up the primary
4488 opcode from the mnemonic the user wrote. STR points to the
4489 beginning of the mnemonic. */
4490
4491 static templates *
4492 opcode_lookup (char **str)
4493 {
4494 char *end, *base;
4495 const aarch64_cond *cond;
4496 char condname[16];
4497 int len;
4498
4499 /* Scan up to the end of the mnemonic, which must end in white space,
4500 '.', or end of string. */
4501 for (base = end = *str; is_part_of_name(*end); end++)
4502 if (*end == '.')
4503 break;
4504
4505 if (end == base)
4506 return 0;
4507
4508 inst.cond = COND_ALWAYS;
4509
4510 /* Handle a possible condition. */
4511 if (end[0] == '.')
4512 {
4513 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4514 if (cond)
4515 {
4516 inst.cond = cond->value;
4517 *str = end + 3;
4518 }
4519 else
4520 {
4521 *str = end;
4522 return 0;
4523 }
4524 }
4525 else
4526 *str = end;
4527
4528 len = end - base;
4529
4530 if (inst.cond == COND_ALWAYS)
4531 {
4532 /* Look for unaffixed mnemonic. */
4533 return lookup_mnemonic (base, len);
4534 }
4535 else if (len <= 13)
4536 {
4537 /* append ".c" to mnemonic if conditional */
4538 memcpy (condname, base, len);
4539 memcpy (condname + len, ".c", 2);
4540 base = condname;
4541 len += 2;
4542 return lookup_mnemonic (base, len);
4543 }
4544
4545 return NULL;
4546 }
4547
4548 /* Internal helper routine converting a vector neon_type_el structure
4549 *VECTYPE to a corresponding operand qualifier. */
4550
4551 static inline aarch64_opnd_qualifier_t
4552 vectype_to_qualifier (const struct neon_type_el *vectype)
4553 {
4554 /* Element size in bytes indexed by neon_el_type. */
4555 const unsigned char ele_size[5]
4556 = {1, 2, 4, 8, 16};
4557
4558 if (!vectype->defined || vectype->type == NT_invtype)
4559 goto vectype_conversion_fail;
4560
4561 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4562
4563 if (vectype->defined & NTA_HASINDEX)
4564 /* Vector element register. */
4565 return AARCH64_OPND_QLF_S_B + vectype->type;
4566 else
4567 {
4568 /* Vector register. */
4569 int reg_size = ele_size[vectype->type] * vectype->width;
4570 unsigned offset;
4571 if (reg_size != 16 && reg_size != 8)
4572 goto vectype_conversion_fail;
4573 /* The conversion is calculated based on the relation of the order of
4574 qualifiers to the vector element size and vector register size. */
4575 offset = (vectype->type == NT_q)
4576 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4577 gas_assert (offset <= 8);
4578 return AARCH64_OPND_QLF_V_8B + offset;
4579 }
4580
4581 vectype_conversion_fail:
4582 first_error (_("bad vector arrangement type"));
4583 return AARCH64_OPND_QLF_NIL;
4584 }
4585
4586 /* Process an optional operand that is found omitted from the assembly line.
4587 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4588 instruction's opcode entry while IDX is the index of this omitted operand.
4589 */
4590
4591 static void
4592 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4593 int idx, aarch64_opnd_info *operand)
4594 {
4595 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4596 gas_assert (optional_operand_p (opcode, idx));
4597 gas_assert (!operand->present);
4598
4599 switch (type)
4600 {
4601 case AARCH64_OPND_Rd:
4602 case AARCH64_OPND_Rn:
4603 case AARCH64_OPND_Rm:
4604 case AARCH64_OPND_Rt:
4605 case AARCH64_OPND_Rt2:
4606 case AARCH64_OPND_Rs:
4607 case AARCH64_OPND_Ra:
4608 case AARCH64_OPND_Rt_SYS:
4609 case AARCH64_OPND_Rd_SP:
4610 case AARCH64_OPND_Rn_SP:
4611 case AARCH64_OPND_Fd:
4612 case AARCH64_OPND_Fn:
4613 case AARCH64_OPND_Fm:
4614 case AARCH64_OPND_Fa:
4615 case AARCH64_OPND_Ft:
4616 case AARCH64_OPND_Ft2:
4617 case AARCH64_OPND_Sd:
4618 case AARCH64_OPND_Sn:
4619 case AARCH64_OPND_Sm:
4620 case AARCH64_OPND_Vd:
4621 case AARCH64_OPND_Vn:
4622 case AARCH64_OPND_Vm:
4623 case AARCH64_OPND_VdD1:
4624 case AARCH64_OPND_VnD1:
4625 operand->reg.regno = default_value;
4626 break;
4627
4628 case AARCH64_OPND_Ed:
4629 case AARCH64_OPND_En:
4630 case AARCH64_OPND_Em:
4631 operand->reglane.regno = default_value;
4632 break;
4633
4634 case AARCH64_OPND_IDX:
4635 case AARCH64_OPND_BIT_NUM:
4636 case AARCH64_OPND_IMMR:
4637 case AARCH64_OPND_IMMS:
4638 case AARCH64_OPND_SHLL_IMM:
4639 case AARCH64_OPND_IMM_VLSL:
4640 case AARCH64_OPND_IMM_VLSR:
4641 case AARCH64_OPND_CCMP_IMM:
4642 case AARCH64_OPND_FBITS:
4643 case AARCH64_OPND_UIMM4:
4644 case AARCH64_OPND_UIMM3_OP1:
4645 case AARCH64_OPND_UIMM3_OP2:
4646 case AARCH64_OPND_IMM:
4647 case AARCH64_OPND_WIDTH:
4648 case AARCH64_OPND_UIMM7:
4649 case AARCH64_OPND_NZCV:
4650 operand->imm.value = default_value;
4651 break;
4652
4653 case AARCH64_OPND_EXCEPTION:
4654 inst.reloc.type = BFD_RELOC_UNUSED;
4655 break;
4656
4657 case AARCH64_OPND_BARRIER_ISB:
4658 operand->barrier = aarch64_barrier_options + default_value;
4659
4660 default:
4661 break;
4662 }
4663 }
4664
4665 /* Process the relocation type for move wide instructions.
4666 Return TRUE on success; otherwise return FALSE. */
4667
4668 static bfd_boolean
4669 process_movw_reloc_info (void)
4670 {
4671 int is32;
4672 unsigned shift;
4673
4674 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4675
4676 if (inst.base.opcode->op == OP_MOVK)
4677 switch (inst.reloc.type)
4678 {
4679 case BFD_RELOC_AARCH64_MOVW_G0_S:
4680 case BFD_RELOC_AARCH64_MOVW_G1_S:
4681 case BFD_RELOC_AARCH64_MOVW_G2_S:
4682 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4683 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4684 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4685 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4686 set_syntax_error
4687 (_("the specified relocation type is not allowed for MOVK"));
4688 return FALSE;
4689 default:
4690 break;
4691 }
4692
4693 switch (inst.reloc.type)
4694 {
4695 case BFD_RELOC_AARCH64_MOVW_G0:
4696 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4697 case BFD_RELOC_AARCH64_MOVW_G0_S:
4698 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4699 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
4700 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
4701 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4702 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4703 shift = 0;
4704 break;
4705 case BFD_RELOC_AARCH64_MOVW_G1:
4706 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4707 case BFD_RELOC_AARCH64_MOVW_G1_S:
4708 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4709 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4710 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
4711 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
4712 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4713 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4714 shift = 16;
4715 break;
4716 case BFD_RELOC_AARCH64_MOVW_G2:
4717 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4718 case BFD_RELOC_AARCH64_MOVW_G2_S:
4719 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
4720 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4721 if (is32)
4722 {
4723 set_fatal_syntax_error
4724 (_("the specified relocation type is not allowed for 32-bit "
4725 "register"));
4726 return FALSE;
4727 }
4728 shift = 32;
4729 break;
4730 case BFD_RELOC_AARCH64_MOVW_G3:
4731 if (is32)
4732 {
4733 set_fatal_syntax_error
4734 (_("the specified relocation type is not allowed for 32-bit "
4735 "register"));
4736 return FALSE;
4737 }
4738 shift = 48;
4739 break;
4740 default:
4741 /* More cases should be added when more MOVW-related relocation types
4742 are supported in GAS. */
4743 gas_assert (aarch64_gas_internal_fixup_p ());
4744 /* The shift amount should have already been set by the parser. */
4745 return TRUE;
4746 }
4747 inst.base.operands[1].shifter.amount = shift;
4748 return TRUE;
4749 }
4750
4751 /* A primitive log caculator. */
4752
4753 static inline unsigned int
4754 get_logsz (unsigned int size)
4755 {
4756 const unsigned char ls[16] =
4757 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4758 if (size > 16)
4759 {
4760 gas_assert (0);
4761 return -1;
4762 }
4763 gas_assert (ls[size - 1] != (unsigned char)-1);
4764 return ls[size - 1];
4765 }
4766
4767 /* Determine and return the real reloc type code for an instruction
4768 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4769
4770 static inline bfd_reloc_code_real_type
4771 ldst_lo12_determine_real_reloc_type (void)
4772 {
4773 unsigned logsz;
4774 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4775 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4776
4777 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
4778 {
4779 BFD_RELOC_AARCH64_LDST8_LO12,
4780 BFD_RELOC_AARCH64_LDST16_LO12,
4781 BFD_RELOC_AARCH64_LDST32_LO12,
4782 BFD_RELOC_AARCH64_LDST64_LO12,
4783 BFD_RELOC_AARCH64_LDST128_LO12
4784 },
4785 {
4786 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
4787 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
4788 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
4789 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
4790 BFD_RELOC_AARCH64_NONE
4791 },
4792 {
4793 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
4794 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
4795 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
4796 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
4797 BFD_RELOC_AARCH64_NONE
4798 }
4799 };
4800
4801 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
4802 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4803 || (inst.reloc.type
4804 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
4805 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4806
4807 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4808 opd1_qlf =
4809 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4810 1, opd0_qlf, 0);
4811 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4812
4813 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4814 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4815 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
4816 gas_assert (logsz <= 3);
4817 else
4818 gas_assert (logsz <= 4);
4819
4820 /* In reloc.c, these pseudo relocation types should be defined in similar
4821 order as above reloc_ldst_lo12 array. Because the array index calcuation
4822 below relies on this. */
4823 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
4824 }
4825
4826 /* Check whether a register list REGINFO is valid. The registers must be
4827 numbered in increasing order (modulo 32), in increments of one or two.
4828
4829 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4830 increments of two.
4831
4832 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4833
4834 static bfd_boolean
4835 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4836 {
4837 uint32_t i, nb_regs, prev_regno, incr;
4838
4839 nb_regs = 1 + (reginfo & 0x3);
4840 reginfo >>= 2;
4841 prev_regno = reginfo & 0x1f;
4842 incr = accept_alternate ? 2 : 1;
4843
4844 for (i = 1; i < nb_regs; ++i)
4845 {
4846 uint32_t curr_regno;
4847 reginfo >>= 5;
4848 curr_regno = reginfo & 0x1f;
4849 if (curr_regno != ((prev_regno + incr) & 0x1f))
4850 return FALSE;
4851 prev_regno = curr_regno;
4852 }
4853
4854 return TRUE;
4855 }
4856
4857 /* Generic instruction operand parser. This does no encoding and no
4858 semantic validation; it merely squirrels values away in the inst
4859 structure. Returns TRUE or FALSE depending on whether the
4860 specified grammar matched. */
4861
4862 static bfd_boolean
4863 parse_operands (char *str, const aarch64_opcode *opcode)
4864 {
4865 int i;
4866 char *backtrack_pos = 0;
4867 const enum aarch64_opnd *operands = opcode->operands;
4868
4869 clear_error ();
4870 skip_whitespace (str);
4871
4872 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4873 {
4874 int64_t val;
4875 int isreg32, isregzero;
4876 int comma_skipped_p = 0;
4877 aarch64_reg_type rtype;
4878 struct neon_type_el vectype;
4879 aarch64_opnd_info *info = &inst.base.operands[i];
4880
4881 DEBUG_TRACE ("parse operand %d", i);
4882
4883 /* Assign the operand code. */
4884 info->type = operands[i];
4885
4886 if (optional_operand_p (opcode, i))
4887 {
4888 /* Remember where we are in case we need to backtrack. */
4889 gas_assert (!backtrack_pos);
4890 backtrack_pos = str;
4891 }
4892
4893 /* Expect comma between operands; the backtrack mechanizm will take
4894 care of cases of omitted optional operand. */
4895 if (i > 0 && ! skip_past_char (&str, ','))
4896 {
4897 set_syntax_error (_("comma expected between operands"));
4898 goto failure;
4899 }
4900 else
4901 comma_skipped_p = 1;
4902
4903 switch (operands[i])
4904 {
4905 case AARCH64_OPND_Rd:
4906 case AARCH64_OPND_Rn:
4907 case AARCH64_OPND_Rm:
4908 case AARCH64_OPND_Rt:
4909 case AARCH64_OPND_Rt2:
4910 case AARCH64_OPND_Rs:
4911 case AARCH64_OPND_Ra:
4912 case AARCH64_OPND_Rt_SYS:
4913 case AARCH64_OPND_PAIRREG:
4914 po_int_reg_or_fail (1, 0);
4915 break;
4916
4917 case AARCH64_OPND_Rd_SP:
4918 case AARCH64_OPND_Rn_SP:
4919 po_int_reg_or_fail (0, 1);
4920 break;
4921
4922 case AARCH64_OPND_Rm_EXT:
4923 case AARCH64_OPND_Rm_SFT:
4924 po_misc_or_fail (parse_shifter_operand
4925 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4926 ? SHIFTED_ARITH_IMM
4927 : SHIFTED_LOGIC_IMM)));
4928 if (!info->shifter.operator_present)
4929 {
4930 /* Default to LSL if not present. Libopcodes prefers shifter
4931 kind to be explicit. */
4932 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4933 info->shifter.kind = AARCH64_MOD_LSL;
4934 /* For Rm_EXT, libopcodes will carry out further check on whether
4935 or not stack pointer is used in the instruction (Recall that
4936 "the extend operator is not optional unless at least one of
4937 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4938 }
4939 break;
4940
4941 case AARCH64_OPND_Fd:
4942 case AARCH64_OPND_Fn:
4943 case AARCH64_OPND_Fm:
4944 case AARCH64_OPND_Fa:
4945 case AARCH64_OPND_Ft:
4946 case AARCH64_OPND_Ft2:
4947 case AARCH64_OPND_Sd:
4948 case AARCH64_OPND_Sn:
4949 case AARCH64_OPND_Sm:
4950 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4951 if (val == PARSE_FAIL)
4952 {
4953 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4954 goto failure;
4955 }
4956 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4957
4958 info->reg.regno = val;
4959 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4960 break;
4961
4962 case AARCH64_OPND_Vd:
4963 case AARCH64_OPND_Vn:
4964 case AARCH64_OPND_Vm:
4965 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4966 if (val == PARSE_FAIL)
4967 {
4968 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4969 goto failure;
4970 }
4971 if (vectype.defined & NTA_HASINDEX)
4972 goto failure;
4973
4974 info->reg.regno = val;
4975 info->qualifier = vectype_to_qualifier (&vectype);
4976 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4977 goto failure;
4978 break;
4979
4980 case AARCH64_OPND_VdD1:
4981 case AARCH64_OPND_VnD1:
4982 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4983 if (val == PARSE_FAIL)
4984 {
4985 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4986 goto failure;
4987 }
4988 if (vectype.type != NT_d || vectype.index != 1)
4989 {
4990 set_fatal_syntax_error
4991 (_("the top half of a 128-bit FP/SIMD register is expected"));
4992 goto failure;
4993 }
4994 info->reg.regno = val;
4995 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4996 here; it is correct for the purpose of encoding/decoding since
4997 only the register number is explicitly encoded in the related
4998 instructions, although this appears a bit hacky. */
4999 info->qualifier = AARCH64_OPND_QLF_S_D;
5000 break;
5001
5002 case AARCH64_OPND_Ed:
5003 case AARCH64_OPND_En:
5004 case AARCH64_OPND_Em:
5005 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5006 if (val == PARSE_FAIL)
5007 {
5008 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5009 goto failure;
5010 }
5011 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5012 goto failure;
5013
5014 info->reglane.regno = val;
5015 info->reglane.index = vectype.index;
5016 info->qualifier = vectype_to_qualifier (&vectype);
5017 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5018 goto failure;
5019 break;
5020
5021 case AARCH64_OPND_LVn:
5022 case AARCH64_OPND_LVt:
5023 case AARCH64_OPND_LVt_AL:
5024 case AARCH64_OPND_LEt:
5025 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
5026 goto failure;
5027 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5028 {
5029 set_fatal_syntax_error (_("invalid register list"));
5030 goto failure;
5031 }
5032 info->reglist.first_regno = (val >> 2) & 0x1f;
5033 info->reglist.num_regs = (val & 0x3) + 1;
5034 if (operands[i] == AARCH64_OPND_LEt)
5035 {
5036 if (!(vectype.defined & NTA_HASINDEX))
5037 goto failure;
5038 info->reglist.has_index = 1;
5039 info->reglist.index = vectype.index;
5040 }
5041 else if (!(vectype.defined & NTA_HASTYPE))
5042 goto failure;
5043 info->qualifier = vectype_to_qualifier (&vectype);
5044 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5045 goto failure;
5046 break;
5047
5048 case AARCH64_OPND_Cn:
5049 case AARCH64_OPND_Cm:
5050 po_reg_or_fail (REG_TYPE_CN);
5051 if (val > 15)
5052 {
5053 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5054 goto failure;
5055 }
5056 inst.base.operands[i].reg.regno = val;
5057 break;
5058
5059 case AARCH64_OPND_SHLL_IMM:
5060 case AARCH64_OPND_IMM_VLSR:
5061 po_imm_or_fail (1, 64);
5062 info->imm.value = val;
5063 break;
5064
5065 case AARCH64_OPND_CCMP_IMM:
5066 case AARCH64_OPND_FBITS:
5067 case AARCH64_OPND_UIMM4:
5068 case AARCH64_OPND_UIMM3_OP1:
5069 case AARCH64_OPND_UIMM3_OP2:
5070 case AARCH64_OPND_IMM_VLSL:
5071 case AARCH64_OPND_IMM:
5072 case AARCH64_OPND_WIDTH:
5073 po_imm_nc_or_fail ();
5074 info->imm.value = val;
5075 break;
5076
5077 case AARCH64_OPND_UIMM7:
5078 po_imm_or_fail (0, 127);
5079 info->imm.value = val;
5080 break;
5081
5082 case AARCH64_OPND_IDX:
5083 case AARCH64_OPND_BIT_NUM:
5084 case AARCH64_OPND_IMMR:
5085 case AARCH64_OPND_IMMS:
5086 po_imm_or_fail (0, 63);
5087 info->imm.value = val;
5088 break;
5089
5090 case AARCH64_OPND_IMM0:
5091 po_imm_nc_or_fail ();
5092 if (val != 0)
5093 {
5094 set_fatal_syntax_error (_("immediate zero expected"));
5095 goto failure;
5096 }
5097 info->imm.value = 0;
5098 break;
5099
5100 case AARCH64_OPND_FPIMM0:
5101 {
5102 int qfloat;
5103 bfd_boolean res1 = FALSE, res2 = FALSE;
5104 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5105 it is probably not worth the effort to support it. */
5106 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
5107 && !(res2 = parse_constant_immediate (&str, &val)))
5108 goto failure;
5109 if ((res1 && qfloat == 0) || (res2 && val == 0))
5110 {
5111 info->imm.value = 0;
5112 info->imm.is_fp = 1;
5113 break;
5114 }
5115 set_fatal_syntax_error (_("immediate zero expected"));
5116 goto failure;
5117 }
5118
5119 case AARCH64_OPND_IMM_MOV:
5120 {
5121 char *saved = str;
5122 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5123 reg_name_p (str, REG_TYPE_VN))
5124 goto failure;
5125 str = saved;
5126 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5127 GE_OPT_PREFIX, 1));
5128 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5129 later. fix_mov_imm_insn will try to determine a machine
5130 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5131 message if the immediate cannot be moved by a single
5132 instruction. */
5133 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5134 inst.base.operands[i].skip = 1;
5135 }
5136 break;
5137
5138 case AARCH64_OPND_SIMD_IMM:
5139 case AARCH64_OPND_SIMD_IMM_SFT:
5140 if (! parse_big_immediate (&str, &val))
5141 goto failure;
5142 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5143 /* addr_off_p */ 0,
5144 /* need_libopcodes_p */ 1,
5145 /* skip_p */ 1);
5146 /* Parse shift.
5147 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5148 shift, we don't check it here; we leave the checking to
5149 the libopcodes (operand_general_constraint_met_p). By
5150 doing this, we achieve better diagnostics. */
5151 if (skip_past_comma (&str)
5152 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5153 goto failure;
5154 if (!info->shifter.operator_present
5155 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5156 {
5157 /* Default to LSL if not present. Libopcodes prefers shifter
5158 kind to be explicit. */
5159 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5160 info->shifter.kind = AARCH64_MOD_LSL;
5161 }
5162 break;
5163
5164 case AARCH64_OPND_FPIMM:
5165 case AARCH64_OPND_SIMD_FPIMM:
5166 {
5167 int qfloat;
5168 bfd_boolean dp_p
5169 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5170 == 8);
5171 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
5172 goto failure;
5173 if (qfloat == 0)
5174 {
5175 set_fatal_syntax_error (_("invalid floating-point constant"));
5176 goto failure;
5177 }
5178 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5179 inst.base.operands[i].imm.is_fp = 1;
5180 }
5181 break;
5182
5183 case AARCH64_OPND_LIMM:
5184 po_misc_or_fail (parse_shifter_operand (&str, info,
5185 SHIFTED_LOGIC_IMM));
5186 if (info->shifter.operator_present)
5187 {
5188 set_fatal_syntax_error
5189 (_("shift not allowed for bitmask immediate"));
5190 goto failure;
5191 }
5192 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5193 /* addr_off_p */ 0,
5194 /* need_libopcodes_p */ 1,
5195 /* skip_p */ 1);
5196 break;
5197
5198 case AARCH64_OPND_AIMM:
5199 if (opcode->op == OP_ADD)
5200 /* ADD may have relocation types. */
5201 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5202 SHIFTED_ARITH_IMM));
5203 else
5204 po_misc_or_fail (parse_shifter_operand (&str, info,
5205 SHIFTED_ARITH_IMM));
5206 switch (inst.reloc.type)
5207 {
5208 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5209 info->shifter.amount = 12;
5210 break;
5211 case BFD_RELOC_UNUSED:
5212 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5213 if (info->shifter.kind != AARCH64_MOD_NONE)
5214 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5215 inst.reloc.pc_rel = 0;
5216 break;
5217 default:
5218 break;
5219 }
5220 info->imm.value = 0;
5221 if (!info->shifter.operator_present)
5222 {
5223 /* Default to LSL if not present. Libopcodes prefers shifter
5224 kind to be explicit. */
5225 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5226 info->shifter.kind = AARCH64_MOD_LSL;
5227 }
5228 break;
5229
5230 case AARCH64_OPND_HALF:
5231 {
5232 /* #<imm16> or relocation. */
5233 int internal_fixup_p;
5234 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5235 if (internal_fixup_p)
5236 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5237 skip_whitespace (str);
5238 if (skip_past_comma (&str))
5239 {
5240 /* {, LSL #<shift>} */
5241 if (! aarch64_gas_internal_fixup_p ())
5242 {
5243 set_fatal_syntax_error (_("can't mix relocation modifier "
5244 "with explicit shift"));
5245 goto failure;
5246 }
5247 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5248 }
5249 else
5250 inst.base.operands[i].shifter.amount = 0;
5251 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5252 inst.base.operands[i].imm.value = 0;
5253 if (! process_movw_reloc_info ())
5254 goto failure;
5255 }
5256 break;
5257
5258 case AARCH64_OPND_EXCEPTION:
5259 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5260 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5261 /* addr_off_p */ 0,
5262 /* need_libopcodes_p */ 0,
5263 /* skip_p */ 1);
5264 break;
5265
5266 case AARCH64_OPND_NZCV:
5267 {
5268 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5269 if (nzcv != NULL)
5270 {
5271 str += 4;
5272 info->imm.value = nzcv->value;
5273 break;
5274 }
5275 po_imm_or_fail (0, 15);
5276 info->imm.value = val;
5277 }
5278 break;
5279
5280 case AARCH64_OPND_COND:
5281 case AARCH64_OPND_COND1:
5282 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5283 str += 2;
5284 if (info->cond == NULL)
5285 {
5286 set_syntax_error (_("invalid condition"));
5287 goto failure;
5288 }
5289 else if (operands[i] == AARCH64_OPND_COND1
5290 && (info->cond->value & 0xe) == 0xe)
5291 {
5292 /* Not allow AL or NV. */
5293 set_default_error ();
5294 goto failure;
5295 }
5296 break;
5297
5298 case AARCH64_OPND_ADDR_ADRP:
5299 po_misc_or_fail (parse_adrp (&str));
5300 /* Clear the value as operand needs to be relocated. */
5301 info->imm.value = 0;
5302 break;
5303
5304 case AARCH64_OPND_ADDR_PCREL14:
5305 case AARCH64_OPND_ADDR_PCREL19:
5306 case AARCH64_OPND_ADDR_PCREL21:
5307 case AARCH64_OPND_ADDR_PCREL26:
5308 po_misc_or_fail (parse_address_reloc (&str, info));
5309 if (!info->addr.pcrel)
5310 {
5311 set_syntax_error (_("invalid pc-relative address"));
5312 goto failure;
5313 }
5314 if (inst.gen_lit_pool
5315 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5316 {
5317 /* Only permit "=value" in the literal load instructions.
5318 The literal will be generated by programmer_friendly_fixup. */
5319 set_syntax_error (_("invalid use of \"=immediate\""));
5320 goto failure;
5321 }
5322 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5323 {
5324 set_syntax_error (_("unrecognized relocation suffix"));
5325 goto failure;
5326 }
5327 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5328 {
5329 info->imm.value = inst.reloc.exp.X_add_number;
5330 inst.reloc.type = BFD_RELOC_UNUSED;
5331 }
5332 else
5333 {
5334 info->imm.value = 0;
5335 if (inst.reloc.type == BFD_RELOC_UNUSED)
5336 switch (opcode->iclass)
5337 {
5338 case compbranch:
5339 case condbranch:
5340 /* e.g. CBZ or B.COND */
5341 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5342 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5343 break;
5344 case testbranch:
5345 /* e.g. TBZ */
5346 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5347 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5348 break;
5349 case branch_imm:
5350 /* e.g. B or BL */
5351 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5352 inst.reloc.type =
5353 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5354 : BFD_RELOC_AARCH64_JUMP26;
5355 break;
5356 case loadlit:
5357 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5358 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5359 break;
5360 case pcreladdr:
5361 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5362 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5363 break;
5364 default:
5365 gas_assert (0);
5366 abort ();
5367 }
5368 inst.reloc.pc_rel = 1;
5369 }
5370 break;
5371
5372 case AARCH64_OPND_ADDR_SIMPLE:
5373 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5374 /* [<Xn|SP>{, #<simm>}] */
5375 po_char_or_fail ('[');
5376 po_reg_or_fail (REG_TYPE_R64_SP);
5377 /* Accept optional ", #0". */
5378 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5379 && skip_past_char (&str, ','))
5380 {
5381 skip_past_char (&str, '#');
5382 if (! skip_past_char (&str, '0'))
5383 {
5384 set_fatal_syntax_error
5385 (_("the optional immediate offset can only be 0"));
5386 goto failure;
5387 }
5388 }
5389 po_char_or_fail (']');
5390 info->addr.base_regno = val;
5391 break;
5392
5393 case AARCH64_OPND_ADDR_REGOFF:
5394 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5395 po_misc_or_fail (parse_address (&str, info, 0));
5396 if (info->addr.pcrel || !info->addr.offset.is_reg
5397 || !info->addr.preind || info->addr.postind
5398 || info->addr.writeback)
5399 {
5400 set_syntax_error (_("invalid addressing mode"));
5401 goto failure;
5402 }
5403 if (!info->shifter.operator_present)
5404 {
5405 /* Default to LSL if not present. Libopcodes prefers shifter
5406 kind to be explicit. */
5407 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5408 info->shifter.kind = AARCH64_MOD_LSL;
5409 }
5410 /* Qualifier to be deduced by libopcodes. */
5411 break;
5412
5413 case AARCH64_OPND_ADDR_SIMM7:
5414 po_misc_or_fail (parse_address (&str, info, 0));
5415 if (info->addr.pcrel || info->addr.offset.is_reg
5416 || (!info->addr.preind && !info->addr.postind))
5417 {
5418 set_syntax_error (_("invalid addressing mode"));
5419 goto failure;
5420 }
5421 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5422 /* addr_off_p */ 1,
5423 /* need_libopcodes_p */ 1,
5424 /* skip_p */ 0);
5425 break;
5426
5427 case AARCH64_OPND_ADDR_SIMM9:
5428 case AARCH64_OPND_ADDR_SIMM9_2:
5429 po_misc_or_fail (parse_address_reloc (&str, info));
5430 if (info->addr.pcrel || info->addr.offset.is_reg
5431 || (!info->addr.preind && !info->addr.postind)
5432 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5433 && info->addr.writeback))
5434 {
5435 set_syntax_error (_("invalid addressing mode"));
5436 goto failure;
5437 }
5438 if (inst.reloc.type != BFD_RELOC_UNUSED)
5439 {
5440 set_syntax_error (_("relocation not allowed"));
5441 goto failure;
5442 }
5443 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5444 /* addr_off_p */ 1,
5445 /* need_libopcodes_p */ 1,
5446 /* skip_p */ 0);
5447 break;
5448
5449 case AARCH64_OPND_ADDR_UIMM12:
5450 po_misc_or_fail (parse_address_reloc (&str, info));
5451 if (info->addr.pcrel || info->addr.offset.is_reg
5452 || !info->addr.preind || info->addr.writeback)
5453 {
5454 set_syntax_error (_("invalid addressing mode"));
5455 goto failure;
5456 }
5457 if (inst.reloc.type == BFD_RELOC_UNUSED)
5458 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5459 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5460 || (inst.reloc.type
5461 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
5462 || (inst.reloc.type
5463 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
5464 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5465 /* Leave qualifier to be determined by libopcodes. */
5466 break;
5467
5468 case AARCH64_OPND_SIMD_ADDR_POST:
5469 /* [<Xn|SP>], <Xm|#<amount>> */
5470 po_misc_or_fail (parse_address (&str, info, 1));
5471 if (!info->addr.postind || !info->addr.writeback)
5472 {
5473 set_syntax_error (_("invalid addressing mode"));
5474 goto failure;
5475 }
5476 if (!info->addr.offset.is_reg)
5477 {
5478 if (inst.reloc.exp.X_op == O_constant)
5479 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5480 else
5481 {
5482 set_fatal_syntax_error
5483 (_("writeback value should be an immediate constant"));
5484 goto failure;
5485 }
5486 }
5487 /* No qualifier. */
5488 break;
5489
5490 case AARCH64_OPND_SYSREG:
5491 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5492 == PARSE_FAIL)
5493 {
5494 set_syntax_error (_("unknown or missing system register name"));
5495 goto failure;
5496 }
5497 inst.base.operands[i].sysreg = val;
5498 break;
5499
5500 case AARCH64_OPND_PSTATEFIELD:
5501 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5502 == PARSE_FAIL)
5503 {
5504 set_syntax_error (_("unknown or missing PSTATE field name"));
5505 goto failure;
5506 }
5507 inst.base.operands[i].pstatefield = val;
5508 break;
5509
5510 case AARCH64_OPND_SYSREG_IC:
5511 inst.base.operands[i].sysins_op =
5512 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5513 goto sys_reg_ins;
5514 case AARCH64_OPND_SYSREG_DC:
5515 inst.base.operands[i].sysins_op =
5516 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5517 goto sys_reg_ins;
5518 case AARCH64_OPND_SYSREG_AT:
5519 inst.base.operands[i].sysins_op =
5520 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5521 goto sys_reg_ins;
5522 case AARCH64_OPND_SYSREG_TLBI:
5523 inst.base.operands[i].sysins_op =
5524 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5525 sys_reg_ins:
5526 if (inst.base.operands[i].sysins_op == NULL)
5527 {
5528 set_fatal_syntax_error ( _("unknown or missing operation name"));
5529 goto failure;
5530 }
5531 break;
5532
5533 case AARCH64_OPND_BARRIER:
5534 case AARCH64_OPND_BARRIER_ISB:
5535 val = parse_barrier (&str);
5536 if (val != PARSE_FAIL
5537 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5538 {
5539 /* ISB only accepts options name 'sy'. */
5540 set_syntax_error
5541 (_("the specified option is not accepted in ISB"));
5542 /* Turn off backtrack as this optional operand is present. */
5543 backtrack_pos = 0;
5544 goto failure;
5545 }
5546 /* This is an extension to accept a 0..15 immediate. */
5547 if (val == PARSE_FAIL)
5548 po_imm_or_fail (0, 15);
5549 info->barrier = aarch64_barrier_options + val;
5550 break;
5551
5552 case AARCH64_OPND_PRFOP:
5553 val = parse_pldop (&str);
5554 /* This is an extension to accept a 0..31 immediate. */
5555 if (val == PARSE_FAIL)
5556 po_imm_or_fail (0, 31);
5557 inst.base.operands[i].prfop = aarch64_prfops + val;
5558 break;
5559
5560 default:
5561 as_fatal (_("unhandled operand code %d"), operands[i]);
5562 }
5563
5564 /* If we get here, this operand was successfully parsed. */
5565 inst.base.operands[i].present = 1;
5566 continue;
5567
5568 failure:
5569 /* The parse routine should already have set the error, but in case
5570 not, set a default one here. */
5571 if (! error_p ())
5572 set_default_error ();
5573
5574 if (! backtrack_pos)
5575 goto parse_operands_return;
5576
5577 {
5578 /* We reach here because this operand is marked as optional, and
5579 either no operand was supplied or the operand was supplied but it
5580 was syntactically incorrect. In the latter case we report an
5581 error. In the former case we perform a few more checks before
5582 dropping through to the code to insert the default operand. */
5583
5584 char *tmp = backtrack_pos;
5585 char endchar = END_OF_INSN;
5586
5587 if (i != (aarch64_num_of_operands (opcode) - 1))
5588 endchar = ',';
5589 skip_past_char (&tmp, ',');
5590
5591 if (*tmp != endchar)
5592 /* The user has supplied an operand in the wrong format. */
5593 goto parse_operands_return;
5594
5595 /* Make sure there is not a comma before the optional operand.
5596 For example the fifth operand of 'sys' is optional:
5597
5598 sys #0,c0,c0,#0, <--- wrong
5599 sys #0,c0,c0,#0 <--- correct. */
5600 if (comma_skipped_p && i && endchar == END_OF_INSN)
5601 {
5602 set_fatal_syntax_error
5603 (_("unexpected comma before the omitted optional operand"));
5604 goto parse_operands_return;
5605 }
5606 }
5607
5608 /* Reaching here means we are dealing with an optional operand that is
5609 omitted from the assembly line. */
5610 gas_assert (optional_operand_p (opcode, i));
5611 info->present = 0;
5612 process_omitted_operand (operands[i], opcode, i, info);
5613
5614 /* Try again, skipping the optional operand at backtrack_pos. */
5615 str = backtrack_pos;
5616 backtrack_pos = 0;
5617
5618 /* Clear any error record after the omitted optional operand has been
5619 successfully handled. */
5620 clear_error ();
5621 }
5622
5623 /* Check if we have parsed all the operands. */
5624 if (*str != '\0' && ! error_p ())
5625 {
5626 /* Set I to the index of the last present operand; this is
5627 for the purpose of diagnostics. */
5628 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5629 ;
5630 set_fatal_syntax_error
5631 (_("unexpected characters following instruction"));
5632 }
5633
5634 parse_operands_return:
5635
5636 if (error_p ())
5637 {
5638 DEBUG_TRACE ("parsing FAIL: %s - %s",
5639 operand_mismatch_kind_names[get_error_kind ()],
5640 get_error_message ());
5641 /* Record the operand error properly; this is useful when there
5642 are multiple instruction templates for a mnemonic name, so that
5643 later on, we can select the error that most closely describes
5644 the problem. */
5645 record_operand_error (opcode, i, get_error_kind (),
5646 get_error_message ());
5647 return FALSE;
5648 }
5649 else
5650 {
5651 DEBUG_TRACE ("parsing SUCCESS");
5652 return TRUE;
5653 }
5654 }
5655
5656 /* It does some fix-up to provide some programmer friendly feature while
5657 keeping the libopcodes happy, i.e. libopcodes only accepts
5658 the preferred architectural syntax.
5659 Return FALSE if there is any failure; otherwise return TRUE. */
5660
5661 static bfd_boolean
5662 programmer_friendly_fixup (aarch64_instruction *instr)
5663 {
5664 aarch64_inst *base = &instr->base;
5665 const aarch64_opcode *opcode = base->opcode;
5666 enum aarch64_op op = opcode->op;
5667 aarch64_opnd_info *operands = base->operands;
5668
5669 DEBUG_TRACE ("enter");
5670
5671 switch (opcode->iclass)
5672 {
5673 case testbranch:
5674 /* TBNZ Xn|Wn, #uimm6, label
5675 Test and Branch Not Zero: conditionally jumps to label if bit number
5676 uimm6 in register Xn is not zero. The bit number implies the width of
5677 the register, which may be written and should be disassembled as Wn if
5678 uimm is less than 32. */
5679 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5680 {
5681 if (operands[1].imm.value >= 32)
5682 {
5683 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5684 0, 31);
5685 return FALSE;
5686 }
5687 operands[0].qualifier = AARCH64_OPND_QLF_X;
5688 }
5689 break;
5690 case loadlit:
5691 /* LDR Wt, label | =value
5692 As a convenience assemblers will typically permit the notation
5693 "=value" in conjunction with the pc-relative literal load instructions
5694 to automatically place an immediate value or symbolic address in a
5695 nearby literal pool and generate a hidden label which references it.
5696 ISREG has been set to 0 in the case of =value. */
5697 if (instr->gen_lit_pool
5698 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5699 {
5700 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5701 if (op == OP_LDRSW_LIT)
5702 size = 4;
5703 if (instr->reloc.exp.X_op != O_constant
5704 && instr->reloc.exp.X_op != O_big
5705 && instr->reloc.exp.X_op != O_symbol)
5706 {
5707 record_operand_error (opcode, 1,
5708 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5709 _("constant expression expected"));
5710 return FALSE;
5711 }
5712 if (! add_to_lit_pool (&instr->reloc.exp, size))
5713 {
5714 record_operand_error (opcode, 1,
5715 AARCH64_OPDE_OTHER_ERROR,
5716 _("literal pool insertion failed"));
5717 return FALSE;
5718 }
5719 }
5720 break;
5721 case log_shift:
5722 case bitfield:
5723 /* UXT[BHW] Wd, Wn
5724 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5725 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5726 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5727 A programmer-friendly assembler should accept a destination Xd in
5728 place of Wd, however that is not the preferred form for disassembly.
5729 */
5730 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5731 && operands[1].qualifier == AARCH64_OPND_QLF_W
5732 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5733 operands[0].qualifier = AARCH64_OPND_QLF_W;
5734 break;
5735
5736 case addsub_ext:
5737 {
5738 /* In the 64-bit form, the final register operand is written as Wm
5739 for all but the (possibly omitted) UXTX/LSL and SXTX
5740 operators.
5741 As a programmer-friendly assembler, we accept e.g.
5742 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5743 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5744 int idx = aarch64_operand_index (opcode->operands,
5745 AARCH64_OPND_Rm_EXT);
5746 gas_assert (idx == 1 || idx == 2);
5747 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5748 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5749 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5750 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5751 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5752 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5753 }
5754 break;
5755
5756 default:
5757 break;
5758 }
5759
5760 DEBUG_TRACE ("exit with SUCCESS");
5761 return TRUE;
5762 }
5763
5764 /* Check for loads and stores that will cause unpredictable behavior. */
5765
5766 static void
5767 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5768 {
5769 aarch64_inst *base = &instr->base;
5770 const aarch64_opcode *opcode = base->opcode;
5771 const aarch64_opnd_info *opnds = base->operands;
5772 switch (opcode->iclass)
5773 {
5774 case ldst_pos:
5775 case ldst_imm9:
5776 case ldst_unscaled:
5777 case ldst_unpriv:
5778 /* Loading/storing the base register is unpredictable if writeback. */
5779 if ((aarch64_get_operand_class (opnds[0].type)
5780 == AARCH64_OPND_CLASS_INT_REG)
5781 && opnds[0].reg.regno == opnds[1].addr.base_regno
5782 && opnds[1].addr.base_regno != REG_SP
5783 && opnds[1].addr.writeback)
5784 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5785 break;
5786 case ldstpair_off:
5787 case ldstnapair_offs:
5788 case ldstpair_indexed:
5789 /* Loading/storing the base register is unpredictable if writeback. */
5790 if ((aarch64_get_operand_class (opnds[0].type)
5791 == AARCH64_OPND_CLASS_INT_REG)
5792 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5793 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5794 && opnds[2].addr.base_regno != REG_SP
5795 && opnds[2].addr.writeback)
5796 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5797 /* Load operations must load different registers. */
5798 if ((opcode->opcode & (1 << 22))
5799 && opnds[0].reg.regno == opnds[1].reg.regno)
5800 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5801 break;
5802 default:
5803 break;
5804 }
5805 }
5806
5807 /* A wrapper function to interface with libopcodes on encoding and
5808 record the error message if there is any.
5809
5810 Return TRUE on success; otherwise return FALSE. */
5811
5812 static bfd_boolean
5813 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5814 aarch64_insn *code)
5815 {
5816 aarch64_operand_error error_info;
5817 error_info.kind = AARCH64_OPDE_NIL;
5818 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5819 return TRUE;
5820 else
5821 {
5822 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5823 record_operand_error_info (opcode, &error_info);
5824 return FALSE;
5825 }
5826 }
5827
5828 #ifdef DEBUG_AARCH64
5829 static inline void
5830 dump_opcode_operands (const aarch64_opcode *opcode)
5831 {
5832 int i = 0;
5833 while (opcode->operands[i] != AARCH64_OPND_NIL)
5834 {
5835 aarch64_verbose ("\t\t opnd%d: %s", i,
5836 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5837 ? aarch64_get_operand_name (opcode->operands[i])
5838 : aarch64_get_operand_desc (opcode->operands[i]));
5839 ++i;
5840 }
5841 }
5842 #endif /* DEBUG_AARCH64 */
5843
5844 /* This is the guts of the machine-dependent assembler. STR points to a
5845 machine dependent instruction. This function is supposed to emit
5846 the frags/bytes it assembles to. */
5847
5848 void
5849 md_assemble (char *str)
5850 {
5851 char *p = str;
5852 templates *template;
5853 aarch64_opcode *opcode;
5854 aarch64_inst *inst_base;
5855 unsigned saved_cond;
5856
5857 /* Align the previous label if needed. */
5858 if (last_label_seen != NULL)
5859 {
5860 symbol_set_frag (last_label_seen, frag_now);
5861 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5862 S_SET_SEGMENT (last_label_seen, now_seg);
5863 }
5864
5865 inst.reloc.type = BFD_RELOC_UNUSED;
5866
5867 DEBUG_TRACE ("\n\n");
5868 DEBUG_TRACE ("==============================");
5869 DEBUG_TRACE ("Enter md_assemble with %s", str);
5870
5871 template = opcode_lookup (&p);
5872 if (!template)
5873 {
5874 /* It wasn't an instruction, but it might be a register alias of
5875 the form alias .req reg directive. */
5876 if (!create_register_alias (str, p))
5877 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5878 str);
5879 return;
5880 }
5881
5882 skip_whitespace (p);
5883 if (*p == ',')
5884 {
5885 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5886 get_mnemonic_name (str), str);
5887 return;
5888 }
5889
5890 init_operand_error_report ();
5891
5892 /* Sections are assumed to start aligned. In executable section, there is no
5893 MAP_DATA symbol pending. So we only align the address during
5894 MAP_DATA --> MAP_INSN transition.
5895 For other sections, this is not guaranteed. */
5896 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
5897 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
5898 frag_align_code (2, 0);
5899
5900 saved_cond = inst.cond;
5901 reset_aarch64_instruction (&inst);
5902 inst.cond = saved_cond;
5903
5904 /* Iterate through all opcode entries with the same mnemonic name. */
5905 do
5906 {
5907 opcode = template->opcode;
5908
5909 DEBUG_TRACE ("opcode %s found", opcode->name);
5910 #ifdef DEBUG_AARCH64
5911 if (debug_dump)
5912 dump_opcode_operands (opcode);
5913 #endif /* DEBUG_AARCH64 */
5914
5915 mapping_state (MAP_INSN);
5916
5917 inst_base = &inst.base;
5918 inst_base->opcode = opcode;
5919
5920 /* Truly conditionally executed instructions, e.g. b.cond. */
5921 if (opcode->flags & F_COND)
5922 {
5923 gas_assert (inst.cond != COND_ALWAYS);
5924 inst_base->cond = get_cond_from_value (inst.cond);
5925 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5926 }
5927 else if (inst.cond != COND_ALWAYS)
5928 {
5929 /* It shouldn't arrive here, where the assembly looks like a
5930 conditional instruction but the found opcode is unconditional. */
5931 gas_assert (0);
5932 continue;
5933 }
5934
5935 if (parse_operands (p, opcode)
5936 && programmer_friendly_fixup (&inst)
5937 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5938 {
5939 /* Check that this instruction is supported for this CPU. */
5940 if (!opcode->avariant
5941 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5942 {
5943 as_bad (_("selected processor does not support `%s'"), str);
5944 return;
5945 }
5946
5947 warn_unpredictable_ldst (&inst, str);
5948
5949 if (inst.reloc.type == BFD_RELOC_UNUSED
5950 || !inst.reloc.need_libopcodes_p)
5951 output_inst (NULL);
5952 else
5953 {
5954 /* If there is relocation generated for the instruction,
5955 store the instruction information for the future fix-up. */
5956 struct aarch64_inst *copy;
5957 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5958 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5959 abort ();
5960 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5961 output_inst (copy);
5962 }
5963 return;
5964 }
5965
5966 template = template->next;
5967 if (template != NULL)
5968 {
5969 reset_aarch64_instruction (&inst);
5970 inst.cond = saved_cond;
5971 }
5972 }
5973 while (template != NULL);
5974
5975 /* Issue the error messages if any. */
5976 output_operand_error_report (str);
5977 }
5978
5979 /* Various frobbings of labels and their addresses. */
5980
5981 void
5982 aarch64_start_line_hook (void)
5983 {
5984 last_label_seen = NULL;
5985 }
5986
5987 void
5988 aarch64_frob_label (symbolS * sym)
5989 {
5990 last_label_seen = sym;
5991
5992 dwarf2_emit_label (sym);
5993 }
5994
5995 int
5996 aarch64_data_in_code (void)
5997 {
5998 if (!strncmp (input_line_pointer + 1, "data:", 5))
5999 {
6000 *input_line_pointer = '/';
6001 input_line_pointer += 5;
6002 *input_line_pointer = 0;
6003 return 1;
6004 }
6005
6006 return 0;
6007 }
6008
6009 char *
6010 aarch64_canonicalize_symbol_name (char *name)
6011 {
6012 int len;
6013
6014 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6015 *(name + len - 5) = 0;
6016
6017 return name;
6018 }
6019 \f
6020 /* Table of all register names defined by default. The user can
6021 define additional names with .req. Note that all register names
6022 should appear in both upper and lowercase variants. Some registers
6023 also have mixed-case names. */
6024
6025 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6026 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6027 #define REGSET31(p,t) \
6028 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6029 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6030 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6031 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
6032 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6033 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6034 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6035 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6036 #define REGSET(p,t) \
6037 REGSET31(p,t), REGNUM(p,31,t)
6038
6039 /* These go into aarch64_reg_hsh hash-table. */
6040 static const reg_entry reg_names[] = {
6041 /* Integer registers. */
6042 REGSET31 (x, R_64), REGSET31 (X, R_64),
6043 REGSET31 (w, R_32), REGSET31 (W, R_32),
6044
6045 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6046 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6047
6048 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6049 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6050
6051 /* Coprocessor register numbers. */
6052 REGSET (c, CN), REGSET (C, CN),
6053
6054 /* Floating-point single precision registers. */
6055 REGSET (s, FP_S), REGSET (S, FP_S),
6056
6057 /* Floating-point double precision registers. */
6058 REGSET (d, FP_D), REGSET (D, FP_D),
6059
6060 /* Floating-point half precision registers. */
6061 REGSET (h, FP_H), REGSET (H, FP_H),
6062
6063 /* Floating-point byte precision registers. */
6064 REGSET (b, FP_B), REGSET (B, FP_B),
6065
6066 /* Floating-point quad precision registers. */
6067 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6068
6069 /* FP/SIMD registers. */
6070 REGSET (v, VN), REGSET (V, VN),
6071 };
6072
6073 #undef REGDEF
6074 #undef REGNUM
6075 #undef REGSET
6076
6077 #define N 1
6078 #define n 0
6079 #define Z 1
6080 #define z 0
6081 #define C 1
6082 #define c 0
6083 #define V 1
6084 #define v 0
6085 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6086 static const asm_nzcv nzcv_names[] = {
6087 {"nzcv", B (n, z, c, v)},
6088 {"nzcV", B (n, z, c, V)},
6089 {"nzCv", B (n, z, C, v)},
6090 {"nzCV", B (n, z, C, V)},
6091 {"nZcv", B (n, Z, c, v)},
6092 {"nZcV", B (n, Z, c, V)},
6093 {"nZCv", B (n, Z, C, v)},
6094 {"nZCV", B (n, Z, C, V)},
6095 {"Nzcv", B (N, z, c, v)},
6096 {"NzcV", B (N, z, c, V)},
6097 {"NzCv", B (N, z, C, v)},
6098 {"NzCV", B (N, z, C, V)},
6099 {"NZcv", B (N, Z, c, v)},
6100 {"NZcV", B (N, Z, c, V)},
6101 {"NZCv", B (N, Z, C, v)},
6102 {"NZCV", B (N, Z, C, V)}
6103 };
6104
6105 #undef N
6106 #undef n
6107 #undef Z
6108 #undef z
6109 #undef C
6110 #undef c
6111 #undef V
6112 #undef v
6113 #undef B
6114 \f
6115 /* MD interface: bits in the object file. */
6116
6117 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6118 for use in the a.out file, and stores them in the array pointed to by buf.
6119 This knows about the endian-ness of the target machine and does
6120 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6121 2 (short) and 4 (long) Floating numbers are put out as a series of
6122 LITTLENUMS (shorts, here at least). */
6123
6124 void
6125 md_number_to_chars (char *buf, valueT val, int n)
6126 {
6127 if (target_big_endian)
6128 number_to_chars_bigendian (buf, val, n);
6129 else
6130 number_to_chars_littleendian (buf, val, n);
6131 }
6132
6133 /* MD interface: Sections. */
6134
6135 /* Estimate the size of a frag before relaxing. Assume everything fits in
6136 4 bytes. */
6137
6138 int
6139 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6140 {
6141 fragp->fr_var = 4;
6142 return 4;
6143 }
6144
6145 /* Round up a section size to the appropriate boundary. */
6146
6147 valueT
6148 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6149 {
6150 return size;
6151 }
6152
6153 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6154 of an rs_align_code fragment.
6155
6156 Here we fill the frag with the appropriate info for padding the
6157 output stream. The resulting frag will consist of a fixed (fr_fix)
6158 and of a repeating (fr_var) part.
6159
6160 The fixed content is always emitted before the repeating content and
6161 these two parts are used as follows in constructing the output:
6162 - the fixed part will be used to align to a valid instruction word
6163 boundary, in case that we start at a misaligned address; as no
6164 executable instruction can live at the misaligned location, we
6165 simply fill with zeros;
6166 - the variable part will be used to cover the remaining padding and
6167 we fill using the AArch64 NOP instruction.
6168
6169 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6170 enough storage space for up to 3 bytes for padding the back to a valid
6171 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6172
6173 void
6174 aarch64_handle_align (fragS * fragP)
6175 {
6176 /* NOP = d503201f */
6177 /* AArch64 instructions are always little-endian. */
6178 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6179
6180 int bytes, fix, noop_size;
6181 char *p;
6182
6183 if (fragP->fr_type != rs_align_code)
6184 return;
6185
6186 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6187 p = fragP->fr_literal + fragP->fr_fix;
6188
6189 #ifdef OBJ_ELF
6190 gas_assert (fragP->tc_frag_data.recorded);
6191 #endif
6192
6193 noop_size = sizeof (aarch64_noop);
6194
6195 fix = bytes & (noop_size - 1);
6196 if (fix)
6197 {
6198 #ifdef OBJ_ELF
6199 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6200 #endif
6201 memset (p, 0, fix);
6202 p += fix;
6203 fragP->fr_fix += fix;
6204 }
6205
6206 if (noop_size)
6207 memcpy (p, aarch64_noop, noop_size);
6208 fragP->fr_var = noop_size;
6209 }
6210
6211 /* Perform target specific initialisation of a frag.
6212 Note - despite the name this initialisation is not done when the frag
6213 is created, but only when its type is assigned. A frag can be created
6214 and used a long time before its type is set, so beware of assuming that
6215 this initialisationis performed first. */
6216
6217 #ifndef OBJ_ELF
6218 void
6219 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6220 int max_chars ATTRIBUTE_UNUSED)
6221 {
6222 }
6223
6224 #else /* OBJ_ELF is defined. */
6225 void
6226 aarch64_init_frag (fragS * fragP, int max_chars)
6227 {
6228 /* Record a mapping symbol for alignment frags. We will delete this
6229 later if the alignment ends up empty. */
6230 if (!fragP->tc_frag_data.recorded)
6231 fragP->tc_frag_data.recorded = 1;
6232
6233 switch (fragP->fr_type)
6234 {
6235 case rs_align:
6236 case rs_align_test:
6237 case rs_fill:
6238 mapping_state_2 (MAP_DATA, max_chars);
6239 break;
6240 case rs_align_code:
6241 mapping_state_2 (MAP_INSN, max_chars);
6242 break;
6243 default:
6244 break;
6245 }
6246 }
6247 \f
6248 /* Initialize the DWARF-2 unwind information for this procedure. */
6249
6250 void
6251 tc_aarch64_frame_initial_instructions (void)
6252 {
6253 cfi_add_CFA_def_cfa (REG_SP, 0);
6254 }
6255 #endif /* OBJ_ELF */
6256
6257 /* Convert REGNAME to a DWARF-2 register number. */
6258
6259 int
6260 tc_aarch64_regname_to_dw2regnum (char *regname)
6261 {
6262 const reg_entry *reg = parse_reg (&regname);
6263 if (reg == NULL)
6264 return -1;
6265
6266 switch (reg->type)
6267 {
6268 case REG_TYPE_SP_32:
6269 case REG_TYPE_SP_64:
6270 case REG_TYPE_R_32:
6271 case REG_TYPE_R_64:
6272 return reg->number;
6273
6274 case REG_TYPE_FP_B:
6275 case REG_TYPE_FP_H:
6276 case REG_TYPE_FP_S:
6277 case REG_TYPE_FP_D:
6278 case REG_TYPE_FP_Q:
6279 return reg->number + 64;
6280
6281 default:
6282 break;
6283 }
6284 return -1;
6285 }
6286
6287 /* Implement DWARF2_ADDR_SIZE. */
6288
6289 int
6290 aarch64_dwarf2_addr_size (void)
6291 {
6292 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6293 if (ilp32_p)
6294 return 4;
6295 #endif
6296 return bfd_arch_bits_per_address (stdoutput) / 8;
6297 }
6298
6299 /* MD interface: Symbol and relocation handling. */
6300
6301 /* Return the address within the segment that a PC-relative fixup is
6302 relative to. For AArch64 PC-relative fixups applied to instructions
6303 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6304
6305 long
6306 md_pcrel_from_section (fixS * fixP, segT seg)
6307 {
6308 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6309
6310 /* If this is pc-relative and we are going to emit a relocation
6311 then we just want to put out any pipeline compensation that the linker
6312 will need. Otherwise we want to use the calculated base. */
6313 if (fixP->fx_pcrel
6314 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6315 || aarch64_force_relocation (fixP)))
6316 base = 0;
6317
6318 /* AArch64 should be consistent for all pc-relative relocations. */
6319 return base + AARCH64_PCREL_OFFSET;
6320 }
6321
6322 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6323 Otherwise we have no need to default values of symbols. */
6324
6325 symbolS *
6326 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6327 {
6328 #ifdef OBJ_ELF
6329 if (name[0] == '_' && name[1] == 'G'
6330 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6331 {
6332 if (!GOT_symbol)
6333 {
6334 if (symbol_find (name))
6335 as_bad (_("GOT already in the symbol table"));
6336
6337 GOT_symbol = symbol_new (name, undefined_section,
6338 (valueT) 0, &zero_address_frag);
6339 }
6340
6341 return GOT_symbol;
6342 }
6343 #endif
6344
6345 return 0;
6346 }
6347
6348 /* Return non-zero if the indicated VALUE has overflowed the maximum
6349 range expressible by a unsigned number with the indicated number of
6350 BITS. */
6351
6352 static bfd_boolean
6353 unsigned_overflow (valueT value, unsigned bits)
6354 {
6355 valueT lim;
6356 if (bits >= sizeof (valueT) * 8)
6357 return FALSE;
6358 lim = (valueT) 1 << bits;
6359 return (value >= lim);
6360 }
6361
6362
6363 /* Return non-zero if the indicated VALUE has overflowed the maximum
6364 range expressible by an signed number with the indicated number of
6365 BITS. */
6366
6367 static bfd_boolean
6368 signed_overflow (offsetT value, unsigned bits)
6369 {
6370 offsetT lim;
6371 if (bits >= sizeof (offsetT) * 8)
6372 return FALSE;
6373 lim = (offsetT) 1 << (bits - 1);
6374 return (value < -lim || value >= lim);
6375 }
6376
6377 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6378 unsigned immediate offset load/store instruction, try to encode it as
6379 an unscaled, 9-bit, signed immediate offset load/store instruction.
6380 Return TRUE if it is successful; otherwise return FALSE.
6381
6382 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6383 in response to the standard LDR/STR mnemonics when the immediate offset is
6384 unambiguous, i.e. when it is negative or unaligned. */
6385
6386 static bfd_boolean
6387 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6388 {
6389 int idx;
6390 enum aarch64_op new_op;
6391 const aarch64_opcode *new_opcode;
6392
6393 gas_assert (instr->opcode->iclass == ldst_pos);
6394
6395 switch (instr->opcode->op)
6396 {
6397 case OP_LDRB_POS:new_op = OP_LDURB; break;
6398 case OP_STRB_POS: new_op = OP_STURB; break;
6399 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6400 case OP_LDRH_POS: new_op = OP_LDURH; break;
6401 case OP_STRH_POS: new_op = OP_STURH; break;
6402 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6403 case OP_LDR_POS: new_op = OP_LDUR; break;
6404 case OP_STR_POS: new_op = OP_STUR; break;
6405 case OP_LDRF_POS: new_op = OP_LDURV; break;
6406 case OP_STRF_POS: new_op = OP_STURV; break;
6407 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6408 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6409 default: new_op = OP_NIL; break;
6410 }
6411
6412 if (new_op == OP_NIL)
6413 return FALSE;
6414
6415 new_opcode = aarch64_get_opcode (new_op);
6416 gas_assert (new_opcode != NULL);
6417
6418 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6419 instr->opcode->op, new_opcode->op);
6420
6421 aarch64_replace_opcode (instr, new_opcode);
6422
6423 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6424 qualifier matching may fail because the out-of-date qualifier will
6425 prevent the operand being updated with a new and correct qualifier. */
6426 idx = aarch64_operand_index (instr->opcode->operands,
6427 AARCH64_OPND_ADDR_SIMM9);
6428 gas_assert (idx == 1);
6429 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6430
6431 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6432
6433 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6434 return FALSE;
6435
6436 return TRUE;
6437 }
6438
6439 /* Called by fix_insn to fix a MOV immediate alias instruction.
6440
6441 Operand for a generic move immediate instruction, which is an alias
6442 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6443 a 32-bit/64-bit immediate value into general register. An assembler error
6444 shall result if the immediate cannot be created by a single one of these
6445 instructions. If there is a choice, then to ensure reversability an
6446 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6447
6448 static void
6449 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6450 {
6451 const aarch64_opcode *opcode;
6452
6453 /* Need to check if the destination is SP/ZR. The check has to be done
6454 before any aarch64_replace_opcode. */
6455 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6456 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6457
6458 instr->operands[1].imm.value = value;
6459 instr->operands[1].skip = 0;
6460
6461 if (try_mov_wide_p)
6462 {
6463 /* Try the MOVZ alias. */
6464 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6465 aarch64_replace_opcode (instr, opcode);
6466 if (aarch64_opcode_encode (instr->opcode, instr,
6467 &instr->value, NULL, NULL))
6468 {
6469 put_aarch64_insn (buf, instr->value);
6470 return;
6471 }
6472 /* Try the MOVK alias. */
6473 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6474 aarch64_replace_opcode (instr, opcode);
6475 if (aarch64_opcode_encode (instr->opcode, instr,
6476 &instr->value, NULL, NULL))
6477 {
6478 put_aarch64_insn (buf, instr->value);
6479 return;
6480 }
6481 }
6482
6483 if (try_mov_bitmask_p)
6484 {
6485 /* Try the ORR alias. */
6486 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6487 aarch64_replace_opcode (instr, opcode);
6488 if (aarch64_opcode_encode (instr->opcode, instr,
6489 &instr->value, NULL, NULL))
6490 {
6491 put_aarch64_insn (buf, instr->value);
6492 return;
6493 }
6494 }
6495
6496 as_bad_where (fixP->fx_file, fixP->fx_line,
6497 _("immediate cannot be moved by a single instruction"));
6498 }
6499
6500 /* An instruction operand which is immediate related may have symbol used
6501 in the assembly, e.g.
6502
6503 mov w0, u32
6504 .set u32, 0x00ffff00
6505
6506 At the time when the assembly instruction is parsed, a referenced symbol,
6507 like 'u32' in the above example may not have been seen; a fixS is created
6508 in such a case and is handled here after symbols have been resolved.
6509 Instruction is fixed up with VALUE using the information in *FIXP plus
6510 extra information in FLAGS.
6511
6512 This function is called by md_apply_fix to fix up instructions that need
6513 a fix-up described above but does not involve any linker-time relocation. */
6514
6515 static void
6516 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6517 {
6518 int idx;
6519 uint32_t insn;
6520 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6521 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6522 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6523
6524 if (new_inst)
6525 {
6526 /* Now the instruction is about to be fixed-up, so the operand that
6527 was previously marked as 'ignored' needs to be unmarked in order
6528 to get the encoding done properly. */
6529 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6530 new_inst->operands[idx].skip = 0;
6531 }
6532
6533 gas_assert (opnd != AARCH64_OPND_NIL);
6534
6535 switch (opnd)
6536 {
6537 case AARCH64_OPND_EXCEPTION:
6538 if (unsigned_overflow (value, 16))
6539 as_bad_where (fixP->fx_file, fixP->fx_line,
6540 _("immediate out of range"));
6541 insn = get_aarch64_insn (buf);
6542 insn |= encode_svc_imm (value);
6543 put_aarch64_insn (buf, insn);
6544 break;
6545
6546 case AARCH64_OPND_AIMM:
6547 /* ADD or SUB with immediate.
6548 NOTE this assumes we come here with a add/sub shifted reg encoding
6549 3 322|2222|2 2 2 21111 111111
6550 1 098|7654|3 2 1 09876 543210 98765 43210
6551 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6552 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6553 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6554 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6555 ->
6556 3 322|2222|2 2 221111111111
6557 1 098|7654|3 2 109876543210 98765 43210
6558 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6559 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6560 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6561 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6562 Fields sf Rn Rd are already set. */
6563 insn = get_aarch64_insn (buf);
6564 if (value < 0)
6565 {
6566 /* Add <-> sub. */
6567 insn = reencode_addsub_switch_add_sub (insn);
6568 value = -value;
6569 }
6570
6571 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6572 && unsigned_overflow (value, 12))
6573 {
6574 /* Try to shift the value by 12 to make it fit. */
6575 if (((value >> 12) << 12) == value
6576 && ! unsigned_overflow (value, 12 + 12))
6577 {
6578 value >>= 12;
6579 insn |= encode_addsub_imm_shift_amount (1);
6580 }
6581 }
6582
6583 if (unsigned_overflow (value, 12))
6584 as_bad_where (fixP->fx_file, fixP->fx_line,
6585 _("immediate out of range"));
6586
6587 insn |= encode_addsub_imm (value);
6588
6589 put_aarch64_insn (buf, insn);
6590 break;
6591
6592 case AARCH64_OPND_SIMD_IMM:
6593 case AARCH64_OPND_SIMD_IMM_SFT:
6594 case AARCH64_OPND_LIMM:
6595 /* Bit mask immediate. */
6596 gas_assert (new_inst != NULL);
6597 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6598 new_inst->operands[idx].imm.value = value;
6599 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6600 &new_inst->value, NULL, NULL))
6601 put_aarch64_insn (buf, new_inst->value);
6602 else
6603 as_bad_where (fixP->fx_file, fixP->fx_line,
6604 _("invalid immediate"));
6605 break;
6606
6607 case AARCH64_OPND_HALF:
6608 /* 16-bit unsigned immediate. */
6609 if (unsigned_overflow (value, 16))
6610 as_bad_where (fixP->fx_file, fixP->fx_line,
6611 _("immediate out of range"));
6612 insn = get_aarch64_insn (buf);
6613 insn |= encode_movw_imm (value & 0xffff);
6614 put_aarch64_insn (buf, insn);
6615 break;
6616
6617 case AARCH64_OPND_IMM_MOV:
6618 /* Operand for a generic move immediate instruction, which is
6619 an alias instruction that generates a single MOVZ, MOVN or ORR
6620 instruction to loads a 32-bit/64-bit immediate value into general
6621 register. An assembler error shall result if the immediate cannot be
6622 created by a single one of these instructions. If there is a choice,
6623 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6624 and MOVZ or MOVN to ORR. */
6625 gas_assert (new_inst != NULL);
6626 fix_mov_imm_insn (fixP, buf, new_inst, value);
6627 break;
6628
6629 case AARCH64_OPND_ADDR_SIMM7:
6630 case AARCH64_OPND_ADDR_SIMM9:
6631 case AARCH64_OPND_ADDR_SIMM9_2:
6632 case AARCH64_OPND_ADDR_UIMM12:
6633 /* Immediate offset in an address. */
6634 insn = get_aarch64_insn (buf);
6635
6636 gas_assert (new_inst != NULL && new_inst->value == insn);
6637 gas_assert (new_inst->opcode->operands[1] == opnd
6638 || new_inst->opcode->operands[2] == opnd);
6639
6640 /* Get the index of the address operand. */
6641 if (new_inst->opcode->operands[1] == opnd)
6642 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6643 idx = 1;
6644 else
6645 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6646 idx = 2;
6647
6648 /* Update the resolved offset value. */
6649 new_inst->operands[idx].addr.offset.imm = value;
6650
6651 /* Encode/fix-up. */
6652 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6653 &new_inst->value, NULL, NULL))
6654 {
6655 put_aarch64_insn (buf, new_inst->value);
6656 break;
6657 }
6658 else if (new_inst->opcode->iclass == ldst_pos
6659 && try_to_encode_as_unscaled_ldst (new_inst))
6660 {
6661 put_aarch64_insn (buf, new_inst->value);
6662 break;
6663 }
6664
6665 as_bad_where (fixP->fx_file, fixP->fx_line,
6666 _("immediate offset out of range"));
6667 break;
6668
6669 default:
6670 gas_assert (0);
6671 as_fatal (_("unhandled operand code %d"), opnd);
6672 }
6673 }
6674
6675 /* Apply a fixup (fixP) to segment data, once it has been determined
6676 by our caller that we have all the info we need to fix it up.
6677
6678 Parameter valP is the pointer to the value of the bits. */
6679
6680 void
6681 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6682 {
6683 offsetT value = *valP;
6684 uint32_t insn;
6685 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6686 int scale;
6687 unsigned flags = fixP->fx_addnumber;
6688
6689 DEBUG_TRACE ("\n\n");
6690 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6691 DEBUG_TRACE ("Enter md_apply_fix");
6692
6693 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6694
6695 /* Note whether this will delete the relocation. */
6696
6697 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6698 fixP->fx_done = 1;
6699
6700 /* Process the relocations. */
6701 switch (fixP->fx_r_type)
6702 {
6703 case BFD_RELOC_NONE:
6704 /* This will need to go in the object file. */
6705 fixP->fx_done = 0;
6706 break;
6707
6708 case BFD_RELOC_8:
6709 case BFD_RELOC_8_PCREL:
6710 if (fixP->fx_done || !seg->use_rela_p)
6711 md_number_to_chars (buf, value, 1);
6712 break;
6713
6714 case BFD_RELOC_16:
6715 case BFD_RELOC_16_PCREL:
6716 if (fixP->fx_done || !seg->use_rela_p)
6717 md_number_to_chars (buf, value, 2);
6718 break;
6719
6720 case BFD_RELOC_32:
6721 case BFD_RELOC_32_PCREL:
6722 if (fixP->fx_done || !seg->use_rela_p)
6723 md_number_to_chars (buf, value, 4);
6724 break;
6725
6726 case BFD_RELOC_64:
6727 case BFD_RELOC_64_PCREL:
6728 if (fixP->fx_done || !seg->use_rela_p)
6729 md_number_to_chars (buf, value, 8);
6730 break;
6731
6732 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6733 /* We claim that these fixups have been processed here, even if
6734 in fact we generate an error because we do not have a reloc
6735 for them, so tc_gen_reloc() will reject them. */
6736 fixP->fx_done = 1;
6737 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6738 {
6739 as_bad_where (fixP->fx_file, fixP->fx_line,
6740 _("undefined symbol %s used as an immediate value"),
6741 S_GET_NAME (fixP->fx_addsy));
6742 goto apply_fix_return;
6743 }
6744 fix_insn (fixP, flags, value);
6745 break;
6746
6747 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6748 if (fixP->fx_done || !seg->use_rela_p)
6749 {
6750 if (value & 3)
6751 as_bad_where (fixP->fx_file, fixP->fx_line,
6752 _("pc-relative load offset not word aligned"));
6753 if (signed_overflow (value, 21))
6754 as_bad_where (fixP->fx_file, fixP->fx_line,
6755 _("pc-relative load offset out of range"));
6756 insn = get_aarch64_insn (buf);
6757 insn |= encode_ld_lit_ofs_19 (value >> 2);
6758 put_aarch64_insn (buf, insn);
6759 }
6760 break;
6761
6762 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6763 if (fixP->fx_done || !seg->use_rela_p)
6764 {
6765 if (signed_overflow (value, 21))
6766 as_bad_where (fixP->fx_file, fixP->fx_line,
6767 _("pc-relative address offset out of range"));
6768 insn = get_aarch64_insn (buf);
6769 insn |= encode_adr_imm (value);
6770 put_aarch64_insn (buf, insn);
6771 }
6772 break;
6773
6774 case BFD_RELOC_AARCH64_BRANCH19:
6775 if (fixP->fx_done || !seg->use_rela_p)
6776 {
6777 if (value & 3)
6778 as_bad_where (fixP->fx_file, fixP->fx_line,
6779 _("conditional branch target not word aligned"));
6780 if (signed_overflow (value, 21))
6781 as_bad_where (fixP->fx_file, fixP->fx_line,
6782 _("conditional branch out of range"));
6783 insn = get_aarch64_insn (buf);
6784 insn |= encode_cond_branch_ofs_19 (value >> 2);
6785 put_aarch64_insn (buf, insn);
6786 }
6787 break;
6788
6789 case BFD_RELOC_AARCH64_TSTBR14:
6790 if (fixP->fx_done || !seg->use_rela_p)
6791 {
6792 if (value & 3)
6793 as_bad_where (fixP->fx_file, fixP->fx_line,
6794 _("conditional branch target not word aligned"));
6795 if (signed_overflow (value, 16))
6796 as_bad_where (fixP->fx_file, fixP->fx_line,
6797 _("conditional branch out of range"));
6798 insn = get_aarch64_insn (buf);
6799 insn |= encode_tst_branch_ofs_14 (value >> 2);
6800 put_aarch64_insn (buf, insn);
6801 }
6802 break;
6803
6804 case BFD_RELOC_AARCH64_CALL26:
6805 case BFD_RELOC_AARCH64_JUMP26:
6806 if (fixP->fx_done || !seg->use_rela_p)
6807 {
6808 if (value & 3)
6809 as_bad_where (fixP->fx_file, fixP->fx_line,
6810 _("branch target not word aligned"));
6811 if (signed_overflow (value, 28))
6812 as_bad_where (fixP->fx_file, fixP->fx_line,
6813 _("branch out of range"));
6814 insn = get_aarch64_insn (buf);
6815 insn |= encode_branch_ofs_26 (value >> 2);
6816 put_aarch64_insn (buf, insn);
6817 }
6818 break;
6819
6820 case BFD_RELOC_AARCH64_MOVW_G0:
6821 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6822 case BFD_RELOC_AARCH64_MOVW_G0_S:
6823 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6824 scale = 0;
6825 goto movw_common;
6826 case BFD_RELOC_AARCH64_MOVW_G1:
6827 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6828 case BFD_RELOC_AARCH64_MOVW_G1_S:
6829 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6830 scale = 16;
6831 goto movw_common;
6832 case BFD_RELOC_AARCH64_MOVW_G2:
6833 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6834 case BFD_RELOC_AARCH64_MOVW_G2_S:
6835 scale = 32;
6836 goto movw_common;
6837 case BFD_RELOC_AARCH64_MOVW_G3:
6838 scale = 48;
6839 movw_common:
6840 if (fixP->fx_done || !seg->use_rela_p)
6841 {
6842 insn = get_aarch64_insn (buf);
6843
6844 if (!fixP->fx_done)
6845 {
6846 /* REL signed addend must fit in 16 bits */
6847 if (signed_overflow (value, 16))
6848 as_bad_where (fixP->fx_file, fixP->fx_line,
6849 _("offset out of range"));
6850 }
6851 else
6852 {
6853 /* Check for overflow and scale. */
6854 switch (fixP->fx_r_type)
6855 {
6856 case BFD_RELOC_AARCH64_MOVW_G0:
6857 case BFD_RELOC_AARCH64_MOVW_G1:
6858 case BFD_RELOC_AARCH64_MOVW_G2:
6859 case BFD_RELOC_AARCH64_MOVW_G3:
6860 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6861 if (unsigned_overflow (value, scale + 16))
6862 as_bad_where (fixP->fx_file, fixP->fx_line,
6863 _("unsigned value out of range"));
6864 break;
6865 case BFD_RELOC_AARCH64_MOVW_G0_S:
6866 case BFD_RELOC_AARCH64_MOVW_G1_S:
6867 case BFD_RELOC_AARCH64_MOVW_G2_S:
6868 /* NOTE: We can only come here with movz or movn. */
6869 if (signed_overflow (value, scale + 16))
6870 as_bad_where (fixP->fx_file, fixP->fx_line,
6871 _("signed value out of range"));
6872 if (value < 0)
6873 {
6874 /* Force use of MOVN. */
6875 value = ~value;
6876 insn = reencode_movzn_to_movn (insn);
6877 }
6878 else
6879 {
6880 /* Force use of MOVZ. */
6881 insn = reencode_movzn_to_movz (insn);
6882 }
6883 break;
6884 default:
6885 /* Unchecked relocations. */
6886 break;
6887 }
6888 value >>= scale;
6889 }
6890
6891 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6892 insn |= encode_movw_imm (value & 0xffff);
6893
6894 put_aarch64_insn (buf, insn);
6895 }
6896 break;
6897
6898 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6899 fixP->fx_r_type = (ilp32_p
6900 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6901 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6902 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6903 /* Should always be exported to object file, see
6904 aarch64_force_relocation(). */
6905 gas_assert (!fixP->fx_done);
6906 gas_assert (seg->use_rela_p);
6907 break;
6908
6909 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6910 fixP->fx_r_type = (ilp32_p
6911 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6912 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6913 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6914 /* Should always be exported to object file, see
6915 aarch64_force_relocation(). */
6916 gas_assert (!fixP->fx_done);
6917 gas_assert (seg->use_rela_p);
6918 break;
6919
6920 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6921 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6922 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6923 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6924 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6925 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6926 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6927 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6928 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6929 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6930 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6931 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6932 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6933 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6934 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
6935 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
6936 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6937 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6938 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6939 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6940 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
6941 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
6942 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
6943 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
6944 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
6945 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
6946 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
6947 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
6948 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6949 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6950 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6951 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6952 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6953 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6954 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6955 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6956 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6957 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6958 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6959 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6960 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6961 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6962 /* Should always be exported to object file, see
6963 aarch64_force_relocation(). */
6964 gas_assert (!fixP->fx_done);
6965 gas_assert (seg->use_rela_p);
6966 break;
6967
6968 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6969 /* Should always be exported to object file, see
6970 aarch64_force_relocation(). */
6971 fixP->fx_r_type = (ilp32_p
6972 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6973 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6974 gas_assert (!fixP->fx_done);
6975 gas_assert (seg->use_rela_p);
6976 break;
6977
6978 case BFD_RELOC_AARCH64_ADD_LO12:
6979 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6980 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6981 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6982 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6983 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6984 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6985 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
6986 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6987 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6988 case BFD_RELOC_AARCH64_LDST128_LO12:
6989 case BFD_RELOC_AARCH64_LDST16_LO12:
6990 case BFD_RELOC_AARCH64_LDST32_LO12:
6991 case BFD_RELOC_AARCH64_LDST64_LO12:
6992 case BFD_RELOC_AARCH64_LDST8_LO12:
6993 /* Should always be exported to object file, see
6994 aarch64_force_relocation(). */
6995 gas_assert (!fixP->fx_done);
6996 gas_assert (seg->use_rela_p);
6997 break;
6998
6999 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7000 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7001 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7002 break;
7003
7004 case BFD_RELOC_UNUSED:
7005 /* An error will already have been reported. */
7006 break;
7007
7008 default:
7009 as_bad_where (fixP->fx_file, fixP->fx_line,
7010 _("unexpected %s fixup"),
7011 bfd_get_reloc_code_name (fixP->fx_r_type));
7012 break;
7013 }
7014
7015 apply_fix_return:
7016 /* Free the allocated the struct aarch64_inst.
7017 N.B. currently there are very limited number of fix-up types actually use
7018 this field, so the impact on the performance should be minimal . */
7019 if (fixP->tc_fix_data.inst != NULL)
7020 free (fixP->tc_fix_data.inst);
7021
7022 return;
7023 }
7024
7025 /* Translate internal representation of relocation info to BFD target
7026 format. */
7027
7028 arelent *
7029 tc_gen_reloc (asection * section, fixS * fixp)
7030 {
7031 arelent *reloc;
7032 bfd_reloc_code_real_type code;
7033
7034 reloc = xmalloc (sizeof (arelent));
7035
7036 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
7037 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7038 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7039
7040 if (fixp->fx_pcrel)
7041 {
7042 if (section->use_rela_p)
7043 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7044 else
7045 fixp->fx_offset = reloc->address;
7046 }
7047 reloc->addend = fixp->fx_offset;
7048
7049 code = fixp->fx_r_type;
7050 switch (code)
7051 {
7052 case BFD_RELOC_16:
7053 if (fixp->fx_pcrel)
7054 code = BFD_RELOC_16_PCREL;
7055 break;
7056
7057 case BFD_RELOC_32:
7058 if (fixp->fx_pcrel)
7059 code = BFD_RELOC_32_PCREL;
7060 break;
7061
7062 case BFD_RELOC_64:
7063 if (fixp->fx_pcrel)
7064 code = BFD_RELOC_64_PCREL;
7065 break;
7066
7067 default:
7068 break;
7069 }
7070
7071 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7072 if (reloc->howto == NULL)
7073 {
7074 as_bad_where (fixp->fx_file, fixp->fx_line,
7075 _
7076 ("cannot represent %s relocation in this object file format"),
7077 bfd_get_reloc_code_name (code));
7078 return NULL;
7079 }
7080
7081 return reloc;
7082 }
7083
7084 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7085
7086 void
7087 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7088 {
7089 bfd_reloc_code_real_type type;
7090 int pcrel = 0;
7091
7092 /* Pick a reloc.
7093 FIXME: @@ Should look at CPU word size. */
7094 switch (size)
7095 {
7096 case 1:
7097 type = BFD_RELOC_8;
7098 break;
7099 case 2:
7100 type = BFD_RELOC_16;
7101 break;
7102 case 4:
7103 type = BFD_RELOC_32;
7104 break;
7105 case 8:
7106 type = BFD_RELOC_64;
7107 break;
7108 default:
7109 as_bad (_("cannot do %u-byte relocation"), size);
7110 type = BFD_RELOC_UNUSED;
7111 break;
7112 }
7113
7114 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7115 }
7116
7117 int
7118 aarch64_force_relocation (struct fix *fixp)
7119 {
7120 switch (fixp->fx_r_type)
7121 {
7122 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7123 /* Perform these "immediate" internal relocations
7124 even if the symbol is extern or weak. */
7125 return 0;
7126
7127 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7128 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7129 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7130 /* Pseudo relocs that need to be fixed up according to
7131 ilp32_p. */
7132 return 0;
7133
7134 case BFD_RELOC_AARCH64_ADD_LO12:
7135 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7136 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7137 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7138 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7139 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7140 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7141 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7142 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7143 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7144 case BFD_RELOC_AARCH64_LDST128_LO12:
7145 case BFD_RELOC_AARCH64_LDST16_LO12:
7146 case BFD_RELOC_AARCH64_LDST32_LO12:
7147 case BFD_RELOC_AARCH64_LDST64_LO12:
7148 case BFD_RELOC_AARCH64_LDST8_LO12:
7149 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7150 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7151 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7152 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7153 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7154 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7155 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7156 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7157 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7158 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7159 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7160 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7161 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7162 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7163 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7164 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7165 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7166 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7167 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7168 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7169 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7170 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7171 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7172 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7173 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7174 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7175 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7176 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7177 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7178 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7179 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7180 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7181 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7182 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7183 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7184 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7185 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7186 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7187 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7188 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7189 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7190 /* Always leave these relocations for the linker. */
7191 return 1;
7192
7193 default:
7194 break;
7195 }
7196
7197 return generic_force_reloc (fixp);
7198 }
7199
7200 #ifdef OBJ_ELF
7201
7202 const char *
7203 elf64_aarch64_target_format (void)
7204 {
7205 if (target_big_endian)
7206 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7207 else
7208 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7209 }
7210
7211 void
7212 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7213 {
7214 elf_frob_symbol (symp, puntp);
7215 }
7216 #endif
7217
7218 /* MD interface: Finalization. */
7219
7220 /* A good place to do this, although this was probably not intended
7221 for this kind of use. We need to dump the literal pool before
7222 references are made to a null symbol pointer. */
7223
7224 void
7225 aarch64_cleanup (void)
7226 {
7227 literal_pool *pool;
7228
7229 for (pool = list_of_pools; pool; pool = pool->next)
7230 {
7231 /* Put it at the end of the relevant section. */
7232 subseg_set (pool->section, pool->sub_section);
7233 s_ltorg (0);
7234 }
7235 }
7236
7237 #ifdef OBJ_ELF
7238 /* Remove any excess mapping symbols generated for alignment frags in
7239 SEC. We may have created a mapping symbol before a zero byte
7240 alignment; remove it if there's a mapping symbol after the
7241 alignment. */
7242 static void
7243 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7244 void *dummy ATTRIBUTE_UNUSED)
7245 {
7246 segment_info_type *seginfo = seg_info (sec);
7247 fragS *fragp;
7248
7249 if (seginfo == NULL || seginfo->frchainP == NULL)
7250 return;
7251
7252 for (fragp = seginfo->frchainP->frch_root;
7253 fragp != NULL; fragp = fragp->fr_next)
7254 {
7255 symbolS *sym = fragp->tc_frag_data.last_map;
7256 fragS *next = fragp->fr_next;
7257
7258 /* Variable-sized frags have been converted to fixed size by
7259 this point. But if this was variable-sized to start with,
7260 there will be a fixed-size frag after it. So don't handle
7261 next == NULL. */
7262 if (sym == NULL || next == NULL)
7263 continue;
7264
7265 if (S_GET_VALUE (sym) < next->fr_address)
7266 /* Not at the end of this frag. */
7267 continue;
7268 know (S_GET_VALUE (sym) == next->fr_address);
7269
7270 do
7271 {
7272 if (next->tc_frag_data.first_map != NULL)
7273 {
7274 /* Next frag starts with a mapping symbol. Discard this
7275 one. */
7276 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7277 break;
7278 }
7279
7280 if (next->fr_next == NULL)
7281 {
7282 /* This mapping symbol is at the end of the section. Discard
7283 it. */
7284 know (next->fr_fix == 0 && next->fr_var == 0);
7285 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7286 break;
7287 }
7288
7289 /* As long as we have empty frags without any mapping symbols,
7290 keep looking. */
7291 /* If the next frag is non-empty and does not start with a
7292 mapping symbol, then this mapping symbol is required. */
7293 if (next->fr_address != next->fr_next->fr_address)
7294 break;
7295
7296 next = next->fr_next;
7297 }
7298 while (next != NULL);
7299 }
7300 }
7301 #endif
7302
7303 /* Adjust the symbol table. */
7304
7305 void
7306 aarch64_adjust_symtab (void)
7307 {
7308 #ifdef OBJ_ELF
7309 /* Remove any overlapping mapping symbols generated by alignment frags. */
7310 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7311 /* Now do generic ELF adjustments. */
7312 elf_adjust_symtab ();
7313 #endif
7314 }
7315
7316 static void
7317 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7318 {
7319 const char *hash_err;
7320
7321 hash_err = hash_insert (table, key, value);
7322 if (hash_err)
7323 printf ("Internal Error: Can't hash %s\n", key);
7324 }
7325
7326 static void
7327 fill_instruction_hash_table (void)
7328 {
7329 aarch64_opcode *opcode = aarch64_opcode_table;
7330
7331 while (opcode->name != NULL)
7332 {
7333 templates *templ, *new_templ;
7334 templ = hash_find (aarch64_ops_hsh, opcode->name);
7335
7336 new_templ = (templates *) xmalloc (sizeof (templates));
7337 new_templ->opcode = opcode;
7338 new_templ->next = NULL;
7339
7340 if (!templ)
7341 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7342 else
7343 {
7344 new_templ->next = templ->next;
7345 templ->next = new_templ;
7346 }
7347 ++opcode;
7348 }
7349 }
7350
7351 static inline void
7352 convert_to_upper (char *dst, const char *src, size_t num)
7353 {
7354 unsigned int i;
7355 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7356 *dst = TOUPPER (*src);
7357 *dst = '\0';
7358 }
7359
7360 /* Assume STR point to a lower-case string, allocate, convert and return
7361 the corresponding upper-case string. */
7362 static inline const char*
7363 get_upper_str (const char *str)
7364 {
7365 char *ret;
7366 size_t len = strlen (str);
7367 if ((ret = xmalloc (len + 1)) == NULL)
7368 abort ();
7369 convert_to_upper (ret, str, len);
7370 return ret;
7371 }
7372
7373 /* MD interface: Initialization. */
7374
7375 void
7376 md_begin (void)
7377 {
7378 unsigned mach;
7379 unsigned int i;
7380
7381 if ((aarch64_ops_hsh = hash_new ()) == NULL
7382 || (aarch64_cond_hsh = hash_new ()) == NULL
7383 || (aarch64_shift_hsh = hash_new ()) == NULL
7384 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7385 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7386 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7387 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7388 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7389 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7390 || (aarch64_reg_hsh = hash_new ()) == NULL
7391 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7392 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7393 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7394 as_fatal (_("virtual memory exhausted"));
7395
7396 fill_instruction_hash_table ();
7397
7398 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7399 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7400 (void *) (aarch64_sys_regs + i));
7401
7402 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7403 checked_hash_insert (aarch64_pstatefield_hsh,
7404 aarch64_pstatefields[i].name,
7405 (void *) (aarch64_pstatefields + i));
7406
7407 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7408 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7409 aarch64_sys_regs_ic[i].template,
7410 (void *) (aarch64_sys_regs_ic + i));
7411
7412 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7413 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7414 aarch64_sys_regs_dc[i].template,
7415 (void *) (aarch64_sys_regs_dc + i));
7416
7417 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7418 checked_hash_insert (aarch64_sys_regs_at_hsh,
7419 aarch64_sys_regs_at[i].template,
7420 (void *) (aarch64_sys_regs_at + i));
7421
7422 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7423 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7424 aarch64_sys_regs_tlbi[i].template,
7425 (void *) (aarch64_sys_regs_tlbi + i));
7426
7427 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7428 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7429 (void *) (reg_names + i));
7430
7431 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7432 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7433 (void *) (nzcv_names + i));
7434
7435 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7436 {
7437 const char *name = aarch64_operand_modifiers[i].name;
7438 checked_hash_insert (aarch64_shift_hsh, name,
7439 (void *) (aarch64_operand_modifiers + i));
7440 /* Also hash the name in the upper case. */
7441 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7442 (void *) (aarch64_operand_modifiers + i));
7443 }
7444
7445 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7446 {
7447 unsigned int j;
7448 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7449 the same condition code. */
7450 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7451 {
7452 const char *name = aarch64_conds[i].names[j];
7453 if (name == NULL)
7454 break;
7455 checked_hash_insert (aarch64_cond_hsh, name,
7456 (void *) (aarch64_conds + i));
7457 /* Also hash the name in the upper case. */
7458 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7459 (void *) (aarch64_conds + i));
7460 }
7461 }
7462
7463 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7464 {
7465 const char *name = aarch64_barrier_options[i].name;
7466 /* Skip xx00 - the unallocated values of option. */
7467 if ((i & 0x3) == 0)
7468 continue;
7469 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7470 (void *) (aarch64_barrier_options + i));
7471 /* Also hash the name in the upper case. */
7472 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7473 (void *) (aarch64_barrier_options + i));
7474 }
7475
7476 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7477 {
7478 const char* name = aarch64_prfops[i].name;
7479 /* Skip the unallocated hint encodings. */
7480 if (name == NULL)
7481 continue;
7482 checked_hash_insert (aarch64_pldop_hsh, name,
7483 (void *) (aarch64_prfops + i));
7484 /* Also hash the name in the upper case. */
7485 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7486 (void *) (aarch64_prfops + i));
7487 }
7488
7489 /* Set the cpu variant based on the command-line options. */
7490 if (!mcpu_cpu_opt)
7491 mcpu_cpu_opt = march_cpu_opt;
7492
7493 if (!mcpu_cpu_opt)
7494 mcpu_cpu_opt = &cpu_default;
7495
7496 cpu_variant = *mcpu_cpu_opt;
7497
7498 /* Record the CPU type. */
7499 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7500
7501 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7502 }
7503
7504 /* Command line processing. */
7505
7506 const char *md_shortopts = "m:";
7507
7508 #ifdef AARCH64_BI_ENDIAN
7509 #define OPTION_EB (OPTION_MD_BASE + 0)
7510 #define OPTION_EL (OPTION_MD_BASE + 1)
7511 #else
7512 #if TARGET_BYTES_BIG_ENDIAN
7513 #define OPTION_EB (OPTION_MD_BASE + 0)
7514 #else
7515 #define OPTION_EL (OPTION_MD_BASE + 1)
7516 #endif
7517 #endif
7518
7519 struct option md_longopts[] = {
7520 #ifdef OPTION_EB
7521 {"EB", no_argument, NULL, OPTION_EB},
7522 #endif
7523 #ifdef OPTION_EL
7524 {"EL", no_argument, NULL, OPTION_EL},
7525 #endif
7526 {NULL, no_argument, NULL, 0}
7527 };
7528
7529 size_t md_longopts_size = sizeof (md_longopts);
7530
7531 struct aarch64_option_table
7532 {
7533 char *option; /* Option name to match. */
7534 char *help; /* Help information. */
7535 int *var; /* Variable to change. */
7536 int value; /* What to change it to. */
7537 char *deprecated; /* If non-null, print this message. */
7538 };
7539
7540 static struct aarch64_option_table aarch64_opts[] = {
7541 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7542 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7543 NULL},
7544 #ifdef DEBUG_AARCH64
7545 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7546 #endif /* DEBUG_AARCH64 */
7547 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7548 NULL},
7549 {"mno-verbose-error", N_("do not output verbose error messages"),
7550 &verbose_error_p, 0, NULL},
7551 {NULL, NULL, NULL, 0, NULL}
7552 };
7553
7554 struct aarch64_cpu_option_table
7555 {
7556 char *name;
7557 const aarch64_feature_set value;
7558 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7559 case. */
7560 const char *canonical_name;
7561 };
7562
7563 /* This list should, at a minimum, contain all the cpu names
7564 recognized by GCC. */
7565 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7566 {"all", AARCH64_ANY, NULL},
7567 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7568 AARCH64_FEATURE_CRC), "Cortex-A53"},
7569 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7570 AARCH64_FEATURE_CRC), "Cortex-A57"},
7571 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7572 AARCH64_FEATURE_CRC), "Cortex-A72"},
7573 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7574 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7575 "Samsung Exynos M1"},
7576 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7577 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7578 "Cavium ThunderX"},
7579 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7580 in earlier releases and is superseded by 'xgene1' in all
7581 tools. */
7582 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7583 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7584 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7585 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7586 {"generic", AARCH64_ARCH_V8, NULL},
7587
7588 {NULL, AARCH64_ARCH_NONE, NULL}
7589 };
7590
7591 struct aarch64_arch_option_table
7592 {
7593 char *name;
7594 const aarch64_feature_set value;
7595 };
7596
7597 /* This list should, at a minimum, contain all the architecture names
7598 recognized by GCC. */
7599 static const struct aarch64_arch_option_table aarch64_archs[] = {
7600 {"all", AARCH64_ANY},
7601 {"armv8-a", AARCH64_ARCH_V8},
7602 {"armv8.1-a", AARCH64_ARCH_V8_1},
7603 {NULL, AARCH64_ARCH_NONE}
7604 };
7605
7606 /* ISA extensions. */
7607 struct aarch64_option_cpu_value_table
7608 {
7609 char *name;
7610 const aarch64_feature_set value;
7611 };
7612
7613 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7614 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7615 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7616 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7617 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7618 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7619 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0)},
7620 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0)},
7621 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_SIMD
7622 | AARCH64_FEATURE_RDMA, 0)},
7623 {NULL, AARCH64_ARCH_NONE}
7624 };
7625
7626 struct aarch64_long_option_table
7627 {
7628 char *option; /* Substring to match. */
7629 char *help; /* Help information. */
7630 int (*func) (char *subopt); /* Function to decode sub-option. */
7631 char *deprecated; /* If non-null, print this message. */
7632 };
7633
7634 static int
7635 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7636 bfd_boolean ext_only)
7637 {
7638 /* We insist on extensions being added before being removed. We achieve
7639 this by using the ADDING_VALUE variable to indicate whether we are
7640 adding an extension (1) or removing it (0) and only allowing it to
7641 change in the order -1 -> 1 -> 0. */
7642 int adding_value = -1;
7643 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7644
7645 /* Copy the feature set, so that we can modify it. */
7646 *ext_set = **opt_p;
7647 *opt_p = ext_set;
7648
7649 while (str != NULL && *str != 0)
7650 {
7651 const struct aarch64_option_cpu_value_table *opt;
7652 char *ext = NULL;
7653 int optlen;
7654
7655 if (!ext_only)
7656 {
7657 if (*str != '+')
7658 {
7659 as_bad (_("invalid architectural extension"));
7660 return 0;
7661 }
7662
7663 ext = strchr (++str, '+');
7664 }
7665
7666 if (ext != NULL)
7667 optlen = ext - str;
7668 else
7669 optlen = strlen (str);
7670
7671 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7672 {
7673 if (adding_value != 0)
7674 adding_value = 0;
7675 optlen -= 2;
7676 str += 2;
7677 }
7678 else if (optlen > 0)
7679 {
7680 if (adding_value == -1)
7681 adding_value = 1;
7682 else if (adding_value != 1)
7683 {
7684 as_bad (_("must specify extensions to add before specifying "
7685 "those to remove"));
7686 return FALSE;
7687 }
7688 }
7689
7690 if (optlen == 0)
7691 {
7692 as_bad (_("missing architectural extension"));
7693 return 0;
7694 }
7695
7696 gas_assert (adding_value != -1);
7697
7698 for (opt = aarch64_features; opt->name != NULL; opt++)
7699 if (strncmp (opt->name, str, optlen) == 0)
7700 {
7701 /* Add or remove the extension. */
7702 if (adding_value)
7703 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7704 else
7705 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7706 break;
7707 }
7708
7709 if (opt->name == NULL)
7710 {
7711 as_bad (_("unknown architectural extension `%s'"), str);
7712 return 0;
7713 }
7714
7715 str = ext;
7716 };
7717
7718 return 1;
7719 }
7720
7721 static int
7722 aarch64_parse_cpu (char *str)
7723 {
7724 const struct aarch64_cpu_option_table *opt;
7725 char *ext = strchr (str, '+');
7726 size_t optlen;
7727
7728 if (ext != NULL)
7729 optlen = ext - str;
7730 else
7731 optlen = strlen (str);
7732
7733 if (optlen == 0)
7734 {
7735 as_bad (_("missing cpu name `%s'"), str);
7736 return 0;
7737 }
7738
7739 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7740 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7741 {
7742 mcpu_cpu_opt = &opt->value;
7743 if (ext != NULL)
7744 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7745
7746 return 1;
7747 }
7748
7749 as_bad (_("unknown cpu `%s'"), str);
7750 return 0;
7751 }
7752
7753 static int
7754 aarch64_parse_arch (char *str)
7755 {
7756 const struct aarch64_arch_option_table *opt;
7757 char *ext = strchr (str, '+');
7758 size_t optlen;
7759
7760 if (ext != NULL)
7761 optlen = ext - str;
7762 else
7763 optlen = strlen (str);
7764
7765 if (optlen == 0)
7766 {
7767 as_bad (_("missing architecture name `%s'"), str);
7768 return 0;
7769 }
7770
7771 for (opt = aarch64_archs; opt->name != NULL; opt++)
7772 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7773 {
7774 march_cpu_opt = &opt->value;
7775 if (ext != NULL)
7776 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7777
7778 return 1;
7779 }
7780
7781 as_bad (_("unknown architecture `%s'\n"), str);
7782 return 0;
7783 }
7784
7785 /* ABIs. */
7786 struct aarch64_option_abi_value_table
7787 {
7788 char *name;
7789 enum aarch64_abi_type value;
7790 };
7791
7792 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7793 {"ilp32", AARCH64_ABI_ILP32},
7794 {"lp64", AARCH64_ABI_LP64},
7795 {NULL, 0}
7796 };
7797
7798 static int
7799 aarch64_parse_abi (char *str)
7800 {
7801 const struct aarch64_option_abi_value_table *opt;
7802 size_t optlen = strlen (str);
7803
7804 if (optlen == 0)
7805 {
7806 as_bad (_("missing abi name `%s'"), str);
7807 return 0;
7808 }
7809
7810 for (opt = aarch64_abis; opt->name != NULL; opt++)
7811 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7812 {
7813 aarch64_abi = opt->value;
7814 return 1;
7815 }
7816
7817 as_bad (_("unknown abi `%s'\n"), str);
7818 return 0;
7819 }
7820
7821 static struct aarch64_long_option_table aarch64_long_opts[] = {
7822 #ifdef OBJ_ELF
7823 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7824 aarch64_parse_abi, NULL},
7825 #endif /* OBJ_ELF */
7826 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7827 aarch64_parse_cpu, NULL},
7828 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7829 aarch64_parse_arch, NULL},
7830 {NULL, NULL, 0, NULL}
7831 };
7832
7833 int
7834 md_parse_option (int c, char *arg)
7835 {
7836 struct aarch64_option_table *opt;
7837 struct aarch64_long_option_table *lopt;
7838
7839 switch (c)
7840 {
7841 #ifdef OPTION_EB
7842 case OPTION_EB:
7843 target_big_endian = 1;
7844 break;
7845 #endif
7846
7847 #ifdef OPTION_EL
7848 case OPTION_EL:
7849 target_big_endian = 0;
7850 break;
7851 #endif
7852
7853 case 'a':
7854 /* Listing option. Just ignore these, we don't support additional
7855 ones. */
7856 return 0;
7857
7858 default:
7859 for (opt = aarch64_opts; opt->option != NULL; opt++)
7860 {
7861 if (c == opt->option[0]
7862 && ((arg == NULL && opt->option[1] == 0)
7863 || streq (arg, opt->option + 1)))
7864 {
7865 /* If the option is deprecated, tell the user. */
7866 if (opt->deprecated != NULL)
7867 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7868 arg ? arg : "", _(opt->deprecated));
7869
7870 if (opt->var != NULL)
7871 *opt->var = opt->value;
7872
7873 return 1;
7874 }
7875 }
7876
7877 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7878 {
7879 /* These options are expected to have an argument. */
7880 if (c == lopt->option[0]
7881 && arg != NULL
7882 && strncmp (arg, lopt->option + 1,
7883 strlen (lopt->option + 1)) == 0)
7884 {
7885 /* If the option is deprecated, tell the user. */
7886 if (lopt->deprecated != NULL)
7887 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7888 _(lopt->deprecated));
7889
7890 /* Call the sup-option parser. */
7891 return lopt->func (arg + strlen (lopt->option) - 1);
7892 }
7893 }
7894
7895 return 0;
7896 }
7897
7898 return 1;
7899 }
7900
7901 void
7902 md_show_usage (FILE * fp)
7903 {
7904 struct aarch64_option_table *opt;
7905 struct aarch64_long_option_table *lopt;
7906
7907 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7908
7909 for (opt = aarch64_opts; opt->option != NULL; opt++)
7910 if (opt->help != NULL)
7911 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7912
7913 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7914 if (lopt->help != NULL)
7915 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7916
7917 #ifdef OPTION_EB
7918 fprintf (fp, _("\
7919 -EB assemble code for a big-endian cpu\n"));
7920 #endif
7921
7922 #ifdef OPTION_EL
7923 fprintf (fp, _("\
7924 -EL assemble code for a little-endian cpu\n"));
7925 #endif
7926 }
7927
7928 /* Parse a .cpu directive. */
7929
7930 static void
7931 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7932 {
7933 const struct aarch64_cpu_option_table *opt;
7934 char saved_char;
7935 char *name;
7936 char *ext;
7937 size_t optlen;
7938
7939 name = input_line_pointer;
7940 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7941 input_line_pointer++;
7942 saved_char = *input_line_pointer;
7943 *input_line_pointer = 0;
7944
7945 ext = strchr (name, '+');
7946
7947 if (ext != NULL)
7948 optlen = ext - name;
7949 else
7950 optlen = strlen (name);
7951
7952 /* Skip the first "all" entry. */
7953 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7954 if (strlen (opt->name) == optlen
7955 && strncmp (name, opt->name, optlen) == 0)
7956 {
7957 mcpu_cpu_opt = &opt->value;
7958 if (ext != NULL)
7959 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7960 return;
7961
7962 cpu_variant = *mcpu_cpu_opt;
7963
7964 *input_line_pointer = saved_char;
7965 demand_empty_rest_of_line ();
7966 return;
7967 }
7968 as_bad (_("unknown cpu `%s'"), name);
7969 *input_line_pointer = saved_char;
7970 ignore_rest_of_line ();
7971 }
7972
7973
7974 /* Parse a .arch directive. */
7975
7976 static void
7977 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7978 {
7979 const struct aarch64_arch_option_table *opt;
7980 char saved_char;
7981 char *name;
7982 char *ext;
7983 size_t optlen;
7984
7985 name = input_line_pointer;
7986 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7987 input_line_pointer++;
7988 saved_char = *input_line_pointer;
7989 *input_line_pointer = 0;
7990
7991 ext = strchr (name, '+');
7992
7993 if (ext != NULL)
7994 optlen = ext - name;
7995 else
7996 optlen = strlen (name);
7997
7998 /* Skip the first "all" entry. */
7999 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8000 if (strlen (opt->name) == optlen
8001 && strncmp (name, opt->name, optlen) == 0)
8002 {
8003 mcpu_cpu_opt = &opt->value;
8004 if (ext != NULL)
8005 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8006 return;
8007
8008 cpu_variant = *mcpu_cpu_opt;
8009
8010 *input_line_pointer = saved_char;
8011 demand_empty_rest_of_line ();
8012 return;
8013 }
8014
8015 as_bad (_("unknown architecture `%s'\n"), name);
8016 *input_line_pointer = saved_char;
8017 ignore_rest_of_line ();
8018 }
8019
8020 /* Parse a .arch_extension directive. */
8021
8022 static void
8023 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8024 {
8025 char saved_char;
8026 char *ext = input_line_pointer;;
8027
8028 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8029 input_line_pointer++;
8030 saved_char = *input_line_pointer;
8031 *input_line_pointer = 0;
8032
8033 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8034 return;
8035
8036 cpu_variant = *mcpu_cpu_opt;
8037
8038 *input_line_pointer = saved_char;
8039 demand_empty_rest_of_line ();
8040 }
8041
8042 /* Copy symbol information. */
8043
8044 void
8045 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8046 {
8047 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8048 }
This page took 0.572253 seconds and 4 git commands to generate.