[Binutils][AARCH64]Add TLS IE large memory support.
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Structure for a hash table entry for a register. */
251 typedef struct
252 {
253 const char *name;
254 unsigned char number;
255 unsigned char type;
256 unsigned char builtin;
257 } reg_entry;
258
259 /* Macros to define the register types and masks for the purpose
260 of parsing. */
261
262 #undef AARCH64_REG_TYPES
263 #define AARCH64_REG_TYPES \
264 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
265 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
266 BASIC_REG_TYPE(SP_32) /* wsp */ \
267 BASIC_REG_TYPE(SP_64) /* sp */ \
268 BASIC_REG_TYPE(Z_32) /* wzr */ \
269 BASIC_REG_TYPE(Z_64) /* xzr */ \
270 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
271 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
272 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
273 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
274 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
275 BASIC_REG_TYPE(CN) /* c[0-7] */ \
276 BASIC_REG_TYPE(VN) /* v[0-31] */ \
277 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
278 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
279 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
280 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
281 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
283 /* Typecheck: any [BHSDQ]P FP. */ \
284 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
285 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
286 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
287 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
289 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
290 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
291 /* Any integer register; used for error messages only. */ \
292 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Pseudo type to mark the end of the enumerator sequence. */ \
296 BASIC_REG_TYPE(MAX)
297
298 #undef BASIC_REG_TYPE
299 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
300 #undef MULTI_REG_TYPE
301 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
302
303 /* Register type enumerators. */
304 typedef enum
305 {
306 /* A list of REG_TYPE_*. */
307 AARCH64_REG_TYPES
308 } aarch64_reg_type;
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
312 #undef REG_TYPE
313 #define REG_TYPE(T) (1 << REG_TYPE_##T)
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) V,
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409
410 /* Stuff needed to resolve the label ambiguity
411 As:
412 ...
413 label: <insn>
414 may differ from:
415 ...
416 label:
417 <insn> */
418
419 static symbolS *last_label_seen;
420
421 /* Literal pool structure. Held on a per-section
422 and per-sub-section basis. */
423
424 #define MAX_LITERAL_POOL_SIZE 1024
425 typedef struct literal_expression
426 {
427 expressionS exp;
428 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
429 LITTLENUM_TYPE * bignum;
430 } literal_expression;
431
432 typedef struct literal_pool
433 {
434 literal_expression literals[MAX_LITERAL_POOL_SIZE];
435 unsigned int next_free_entry;
436 unsigned int id;
437 symbolS *symbol;
438 segT section;
439 subsegT sub_section;
440 int size;
441 struct literal_pool *next;
442 } literal_pool;
443
444 /* Pointer to a linked list of literal pools. */
445 static literal_pool *list_of_pools = NULL;
446 \f
447 /* Pure syntax. */
448
449 /* This array holds the chars that always start a comment. If the
450 pre-processor is disabled, these aren't very useful. */
451 const char comment_chars[] = "";
452
453 /* This array holds the chars that only start a comment at the beginning of
454 a line. If the line seems to have the form '# 123 filename'
455 .line and .file directives will appear in the pre-processed output. */
456 /* Note that input_file.c hand checks for '#' at the beginning of the
457 first line of the input file. This is because the compiler outputs
458 #NO_APP at the beginning of its output. */
459 /* Also note that comments like this one will always work. */
460 const char line_comment_chars[] = "#";
461
462 const char line_separator_chars[] = ";";
463
464 /* Chars that can be used to separate mant
465 from exp in floating point numbers. */
466 const char EXP_CHARS[] = "eE";
467
468 /* Chars that mean this number is a floating point constant. */
469 /* As in 0f12.456 */
470 /* or 0d1.2345e12 */
471
472 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
473
474 /* Prefix character that indicates the start of an immediate value. */
475 #define is_immediate_prefix(C) ((C) == '#')
476
477 /* Separator character handling. */
478
479 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
480
481 static inline bfd_boolean
482 skip_past_char (char **str, char c)
483 {
484 if (**str == c)
485 {
486 (*str)++;
487 return TRUE;
488 }
489 else
490 return FALSE;
491 }
492
493 #define skip_past_comma(str) skip_past_char (str, ',')
494
495 /* Arithmetic expressions (possibly involving symbols). */
496
497 static bfd_boolean in_my_get_expression_p = FALSE;
498
499 /* Third argument to my_get_expression. */
500 #define GE_NO_PREFIX 0
501 #define GE_OPT_PREFIX 1
502
503 /* Return TRUE if the string pointed by *STR is successfully parsed
504 as an valid expression; *EP will be filled with the information of
505 such an expression. Otherwise return FALSE. */
506
507 static bfd_boolean
508 my_get_expression (expressionS * ep, char **str, int prefix_mode,
509 int reject_absent)
510 {
511 char *save_in;
512 segT seg;
513 int prefix_present_p = 0;
514
515 switch (prefix_mode)
516 {
517 case GE_NO_PREFIX:
518 break;
519 case GE_OPT_PREFIX:
520 if (is_immediate_prefix (**str))
521 {
522 (*str)++;
523 prefix_present_p = 1;
524 }
525 break;
526 default:
527 abort ();
528 }
529
530 memset (ep, 0, sizeof (expressionS));
531
532 save_in = input_line_pointer;
533 input_line_pointer = *str;
534 in_my_get_expression_p = TRUE;
535 seg = expression (ep);
536 in_my_get_expression_p = FALSE;
537
538 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
539 {
540 /* We found a bad expression in md_operand(). */
541 *str = input_line_pointer;
542 input_line_pointer = save_in;
543 if (prefix_present_p && ! error_p ())
544 set_fatal_syntax_error (_("bad expression"));
545 else
546 set_first_syntax_error (_("bad expression"));
547 return FALSE;
548 }
549
550 #ifdef OBJ_AOUT
551 if (seg != absolute_section
552 && seg != text_section
553 && seg != data_section
554 && seg != bss_section && seg != undefined_section)
555 {
556 set_syntax_error (_("bad segment"));
557 *str = input_line_pointer;
558 input_line_pointer = save_in;
559 return FALSE;
560 }
561 #else
562 (void) seg;
563 #endif
564
565 *str = input_line_pointer;
566 input_line_pointer = save_in;
567 return TRUE;
568 }
569
570 /* Turn a string in input_line_pointer into a floating point constant
571 of type TYPE, and store the appropriate bytes in *LITP. The number
572 of LITTLENUMS emitted is stored in *SIZEP. An error message is
573 returned, or NULL on OK. */
574
575 char *
576 md_atof (int type, char *litP, int *sizeP)
577 {
578 return ieee_md_atof (type, litP, sizeP, target_big_endian);
579 }
580
581 /* We handle all bad expressions here, so that we can report the faulty
582 instruction in the error message. */
583 void
584 md_operand (expressionS * exp)
585 {
586 if (in_my_get_expression_p)
587 exp->X_op = O_illegal;
588 }
589
590 /* Immediate values. */
591
592 /* Errors may be set multiple times during parsing or bit encoding
593 (particularly in the Neon bits), but usually the earliest error which is set
594 will be the most meaningful. Avoid overwriting it with later (cascading)
595 errors by calling this function. */
596
597 static void
598 first_error (const char *error)
599 {
600 if (! error_p ())
601 set_syntax_error (error);
602 }
603
604 /* Similiar to first_error, but this function accepts formatted error
605 message. */
606 static void
607 first_error_fmt (const char *format, ...)
608 {
609 va_list args;
610 enum
611 { size = 100 };
612 /* N.B. this single buffer will not cause error messages for different
613 instructions to pollute each other; this is because at the end of
614 processing of each assembly line, error message if any will be
615 collected by as_bad. */
616 static char buffer[size];
617
618 if (! error_p ())
619 {
620 int ret ATTRIBUTE_UNUSED;
621 va_start (args, format);
622 ret = vsnprintf (buffer, size, format, args);
623 know (ret <= size - 1 && ret >= 0);
624 va_end (args);
625 set_syntax_error (buffer);
626 }
627 }
628
629 /* Register parsing. */
630
631 /* Generic register parser which is called by other specialized
632 register parsers.
633 CCP points to what should be the beginning of a register name.
634 If it is indeed a valid register name, advance CCP over it and
635 return the reg_entry structure; otherwise return NULL.
636 It does not issue diagnostics. */
637
638 static reg_entry *
639 parse_reg (char **ccp)
640 {
641 char *start = *ccp;
642 char *p;
643 reg_entry *reg;
644
645 #ifdef REGISTER_PREFIX
646 if (*start != REGISTER_PREFIX)
647 return NULL;
648 start++;
649 #endif
650
651 p = start;
652 if (!ISALPHA (*p) || !is_name_beginner (*p))
653 return NULL;
654
655 do
656 p++;
657 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
658
659 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
660
661 if (!reg)
662 return NULL;
663
664 *ccp = p;
665 return reg;
666 }
667
668 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
669 return FALSE. */
670 static bfd_boolean
671 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
672 {
673 if (reg->type == type)
674 return TRUE;
675
676 switch (type)
677 {
678 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
679 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
680 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
681 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
682 case REG_TYPE_VN: /* Vector register. */
683 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
684 return ((reg_type_masks[reg->type] & reg_type_masks[type])
685 == reg_type_masks[reg->type]);
686 default:
687 as_fatal ("unhandled type %d", type);
688 abort ();
689 }
690 }
691
692 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
693 Return the register number otherwise. *ISREG32 is set to one if the
694 register is 32-bit wide; *ISREGZERO is set to one if the register is
695 of type Z_32 or Z_64.
696 Note that this function does not issue any diagnostics. */
697
698 static int
699 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
700 int *isreg32, int *isregzero)
701 {
702 char *str = *ccp;
703 const reg_entry *reg = parse_reg (&str);
704
705 if (reg == NULL)
706 return PARSE_FAIL;
707
708 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
709 return PARSE_FAIL;
710
711 switch (reg->type)
712 {
713 case REG_TYPE_SP_32:
714 case REG_TYPE_SP_64:
715 if (reject_sp)
716 return PARSE_FAIL;
717 *isreg32 = reg->type == REG_TYPE_SP_32;
718 *isregzero = 0;
719 break;
720 case REG_TYPE_R_32:
721 case REG_TYPE_R_64:
722 *isreg32 = reg->type == REG_TYPE_R_32;
723 *isregzero = 0;
724 break;
725 case REG_TYPE_Z_32:
726 case REG_TYPE_Z_64:
727 if (reject_rz)
728 return PARSE_FAIL;
729 *isreg32 = reg->type == REG_TYPE_Z_32;
730 *isregzero = 1;
731 break;
732 default:
733 return PARSE_FAIL;
734 }
735
736 *ccp = str;
737
738 return reg->number;
739 }
740
741 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
742 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
743 otherwise return FALSE.
744
745 Accept only one occurrence of:
746 8b 16b 4h 8h 2s 4s 1d 2d
747 b h s d q */
748 static bfd_boolean
749 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
750 {
751 char *ptr = *str;
752 unsigned width;
753 unsigned element_size;
754 enum neon_el_type type;
755
756 /* skip '.' */
757 ptr++;
758
759 if (!ISDIGIT (*ptr))
760 {
761 width = 0;
762 goto elt_size;
763 }
764 width = strtoul (ptr, &ptr, 10);
765 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
766 {
767 first_error_fmt (_("bad size %d in vector width specifier"), width);
768 return FALSE;
769 }
770
771 elt_size:
772 switch (TOLOWER (*ptr))
773 {
774 case 'b':
775 type = NT_b;
776 element_size = 8;
777 break;
778 case 'h':
779 type = NT_h;
780 element_size = 16;
781 break;
782 case 's':
783 type = NT_s;
784 element_size = 32;
785 break;
786 case 'd':
787 type = NT_d;
788 element_size = 64;
789 break;
790 case 'q':
791 if (width == 1)
792 {
793 type = NT_q;
794 element_size = 128;
795 break;
796 }
797 /* fall through. */
798 default:
799 if (*ptr != '\0')
800 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
801 else
802 first_error (_("missing element size"));
803 return FALSE;
804 }
805 if (width != 0 && width * element_size != 64 && width * element_size != 128)
806 {
807 first_error_fmt (_
808 ("invalid element size %d and vector size combination %c"),
809 width, *ptr);
810 return FALSE;
811 }
812 ptr++;
813
814 parsed_type->type = type;
815 parsed_type->width = width;
816
817 *str = ptr;
818
819 return TRUE;
820 }
821
822 /* Parse a single type, e.g. ".8b", leading period included.
823 Only applicable to Vn registers.
824
825 Return TRUE on success; otherwise return FALSE. */
826 static bfd_boolean
827 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
828 {
829 char *str = *ccp;
830
831 if (*str == '.')
832 {
833 if (! parse_neon_type_for_operand (vectype, &str))
834 {
835 first_error (_("vector type expected"));
836 return FALSE;
837 }
838 }
839 else
840 return FALSE;
841
842 *ccp = str;
843
844 return TRUE;
845 }
846
847 /* Parse a register of the type TYPE.
848
849 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
850 name or the parsed register is not of TYPE.
851
852 Otherwise return the register number, and optionally fill in the actual
853 type of the register in *RTYPE when multiple alternatives were given, and
854 return the register shape and element index information in *TYPEINFO.
855
856 IN_REG_LIST should be set with TRUE if the caller is parsing a register
857 list. */
858
859 static int
860 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
861 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
862 {
863 char *str = *ccp;
864 const reg_entry *reg = parse_reg (&str);
865 struct neon_type_el atype;
866 struct neon_type_el parsetype;
867 bfd_boolean is_typed_vecreg = FALSE;
868
869 atype.defined = 0;
870 atype.type = NT_invtype;
871 atype.width = -1;
872 atype.index = 0;
873
874 if (reg == NULL)
875 {
876 if (typeinfo)
877 *typeinfo = atype;
878 set_default_error ();
879 return PARSE_FAIL;
880 }
881
882 if (! aarch64_check_reg_type (reg, type))
883 {
884 DEBUG_TRACE ("reg type check failed");
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888 type = reg->type;
889
890 if (type == REG_TYPE_VN
891 && parse_neon_operand_type (&parsetype, &str))
892 {
893 /* Register if of the form Vn.[bhsdq]. */
894 is_typed_vecreg = TRUE;
895
896 if (parsetype.width == 0)
897 /* Expect index. In the new scheme we cannot have
898 Vn.[bhsdq] represent a scalar. Therefore any
899 Vn.[bhsdq] should have an index following it.
900 Except in reglists ofcourse. */
901 atype.defined |= NTA_HASINDEX;
902 else
903 atype.defined |= NTA_HASTYPE;
904
905 atype.type = parsetype.type;
906 atype.width = parsetype.width;
907 }
908
909 if (skip_past_char (&str, '['))
910 {
911 expressionS exp;
912
913 /* Reject Sn[index] syntax. */
914 if (!is_typed_vecreg)
915 {
916 first_error (_("this type of register can't be indexed"));
917 return PARSE_FAIL;
918 }
919
920 if (in_reg_list == TRUE)
921 {
922 first_error (_("index not allowed inside register list"));
923 return PARSE_FAIL;
924 }
925
926 atype.defined |= NTA_HASINDEX;
927
928 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
929
930 if (exp.X_op != O_constant)
931 {
932 first_error (_("constant expression required"));
933 return PARSE_FAIL;
934 }
935
936 if (! skip_past_char (&str, ']'))
937 return PARSE_FAIL;
938
939 atype.index = exp.X_add_number;
940 }
941 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
942 {
943 /* Indexed vector register expected. */
944 first_error (_("indexed vector register expected"));
945 return PARSE_FAIL;
946 }
947
948 /* A vector reg Vn should be typed or indexed. */
949 if (type == REG_TYPE_VN && atype.defined == 0)
950 {
951 first_error (_("invalid use of vector register"));
952 }
953
954 if (typeinfo)
955 *typeinfo = atype;
956
957 if (rtype)
958 *rtype = type;
959
960 *ccp = str;
961
962 return reg->number;
963 }
964
965 /* Parse register.
966
967 Return the register number on success; return PARSE_FAIL otherwise.
968
969 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
970 the register (e.g. NEON double or quad reg when either has been requested).
971
972 If this is a NEON vector register with additional type information, fill
973 in the struct pointed to by VECTYPE (if non-NULL).
974
975 This parser does not handle register list. */
976
977 static int
978 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
979 aarch64_reg_type *rtype, struct neon_type_el *vectype)
980 {
981 struct neon_type_el atype;
982 char *str = *ccp;
983 int reg = parse_typed_reg (&str, type, rtype, &atype,
984 /*in_reg_list= */ FALSE);
985
986 if (reg == PARSE_FAIL)
987 return PARSE_FAIL;
988
989 if (vectype)
990 *vectype = atype;
991
992 *ccp = str;
993
994 return reg;
995 }
996
997 static inline bfd_boolean
998 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
999 {
1000 return
1001 e1.type == e2.type
1002 && e1.defined == e2.defined
1003 && e1.width == e2.width && e1.index == e2.index;
1004 }
1005
1006 /* This function parses the NEON register list. On success, it returns
1007 the parsed register list information in the following encoded format:
1008
1009 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1010 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1011
1012 The information of the register shape and/or index is returned in
1013 *VECTYPE.
1014
1015 It returns PARSE_FAIL if the register list is invalid.
1016
1017 The list contains one to four registers.
1018 Each register can be one of:
1019 <Vt>.<T>[<index>]
1020 <Vt>.<T>
1021 All <T> should be identical.
1022 All <index> should be identical.
1023 There are restrictions on <Vt> numbers which are checked later
1024 (by reg_list_valid_p). */
1025
1026 static int
1027 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1028 {
1029 char *str = *ccp;
1030 int nb_regs;
1031 struct neon_type_el typeinfo, typeinfo_first;
1032 int val, val_range;
1033 int in_range;
1034 int ret_val;
1035 int i;
1036 bfd_boolean error = FALSE;
1037 bfd_boolean expect_index = FALSE;
1038
1039 if (*str != '{')
1040 {
1041 set_syntax_error (_("expecting {"));
1042 return PARSE_FAIL;
1043 }
1044 str++;
1045
1046 nb_regs = 0;
1047 typeinfo_first.defined = 0;
1048 typeinfo_first.type = NT_invtype;
1049 typeinfo_first.width = -1;
1050 typeinfo_first.index = 0;
1051 ret_val = 0;
1052 val = -1;
1053 val_range = -1;
1054 in_range = 0;
1055 do
1056 {
1057 if (in_range)
1058 {
1059 str++; /* skip over '-' */
1060 val_range = val;
1061 }
1062 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1063 /*in_reg_list= */ TRUE);
1064 if (val == PARSE_FAIL)
1065 {
1066 set_first_syntax_error (_("invalid vector register in list"));
1067 error = TRUE;
1068 continue;
1069 }
1070 /* reject [bhsd]n */
1071 if (typeinfo.defined == 0)
1072 {
1073 set_first_syntax_error (_("invalid scalar register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077
1078 if (typeinfo.defined & NTA_HASINDEX)
1079 expect_index = TRUE;
1080
1081 if (in_range)
1082 {
1083 if (val < val_range)
1084 {
1085 set_first_syntax_error
1086 (_("invalid range in vector register list"));
1087 error = TRUE;
1088 }
1089 val_range++;
1090 }
1091 else
1092 {
1093 val_range = val;
1094 if (nb_regs == 0)
1095 typeinfo_first = typeinfo;
1096 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1097 {
1098 set_first_syntax_error
1099 (_("type mismatch in vector register list"));
1100 error = TRUE;
1101 }
1102 }
1103 if (! error)
1104 for (i = val_range; i <= val; i++)
1105 {
1106 ret_val |= i << (5 * nb_regs);
1107 nb_regs++;
1108 }
1109 in_range = 0;
1110 }
1111 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1112
1113 skip_whitespace (str);
1114 if (*str != '}')
1115 {
1116 set_first_syntax_error (_("end of vector register list not found"));
1117 error = TRUE;
1118 }
1119 str++;
1120
1121 skip_whitespace (str);
1122
1123 if (expect_index)
1124 {
1125 if (skip_past_char (&str, '['))
1126 {
1127 expressionS exp;
1128
1129 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1130 if (exp.X_op != O_constant)
1131 {
1132 set_first_syntax_error (_("constant expression required."));
1133 error = TRUE;
1134 }
1135 if (! skip_past_char (&str, ']'))
1136 error = TRUE;
1137 else
1138 typeinfo_first.index = exp.X_add_number;
1139 }
1140 else
1141 {
1142 set_first_syntax_error (_("expected index"));
1143 error = TRUE;
1144 }
1145 }
1146
1147 if (nb_regs > 4)
1148 {
1149 set_first_syntax_error (_("too many registers in vector register list"));
1150 error = TRUE;
1151 }
1152 else if (nb_regs == 0)
1153 {
1154 set_first_syntax_error (_("empty vector register list"));
1155 error = TRUE;
1156 }
1157
1158 *ccp = str;
1159 if (! error)
1160 *vectype = typeinfo_first;
1161
1162 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1163 }
1164
1165 /* Directives: register aliases. */
1166
1167 static reg_entry *
1168 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1169 {
1170 reg_entry *new;
1171 const char *name;
1172
1173 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1174 {
1175 if (new->builtin)
1176 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1177 str);
1178
1179 /* Only warn about a redefinition if it's not defined as the
1180 same register. */
1181 else if (new->number != number || new->type != type)
1182 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1183
1184 return NULL;
1185 }
1186
1187 name = xstrdup (str);
1188 new = xmalloc (sizeof (reg_entry));
1189
1190 new->name = name;
1191 new->number = number;
1192 new->type = type;
1193 new->builtin = FALSE;
1194
1195 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1196 abort ();
1197
1198 return new;
1199 }
1200
1201 /* Look for the .req directive. This is of the form:
1202
1203 new_register_name .req existing_register_name
1204
1205 If we find one, or if it looks sufficiently like one that we want to
1206 handle any error here, return TRUE. Otherwise return FALSE. */
1207
1208 static bfd_boolean
1209 create_register_alias (char *newname, char *p)
1210 {
1211 const reg_entry *old;
1212 char *oldname, *nbuf;
1213 size_t nlen;
1214
1215 /* The input scrubber ensures that whitespace after the mnemonic is
1216 collapsed to single spaces. */
1217 oldname = p;
1218 if (strncmp (oldname, " .req ", 6) != 0)
1219 return FALSE;
1220
1221 oldname += 6;
1222 if (*oldname == '\0')
1223 return FALSE;
1224
1225 old = hash_find (aarch64_reg_hsh, oldname);
1226 if (!old)
1227 {
1228 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1229 return TRUE;
1230 }
1231
1232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1233 the desired alias name, and p points to its end. If not, then
1234 the desired alias name is in the global original_case_string. */
1235 #ifdef TC_CASE_SENSITIVE
1236 nlen = p - newname;
1237 #else
1238 newname = original_case_string;
1239 nlen = strlen (newname);
1240 #endif
1241
1242 nbuf = alloca (nlen + 1);
1243 memcpy (nbuf, newname, nlen);
1244 nbuf[nlen] = '\0';
1245
1246 /* Create aliases under the new name as stated; an all-lowercase
1247 version of the new name; and an all-uppercase version of the new
1248 name. */
1249 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1250 {
1251 for (p = nbuf; *p; p++)
1252 *p = TOUPPER (*p);
1253
1254 if (strncmp (nbuf, newname, nlen))
1255 {
1256 /* If this attempt to create an additional alias fails, do not bother
1257 trying to create the all-lower case alias. We will fail and issue
1258 a second, duplicate error message. This situation arises when the
1259 programmer does something like:
1260 foo .req r0
1261 Foo .req r1
1262 The second .req creates the "Foo" alias but then fails to create
1263 the artificial FOO alias because it has already been created by the
1264 first .req. */
1265 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1266 return TRUE;
1267 }
1268
1269 for (p = nbuf; *p; p++)
1270 *p = TOLOWER (*p);
1271
1272 if (strncmp (nbuf, newname, nlen))
1273 insert_reg_alias (nbuf, old->number, old->type);
1274 }
1275
1276 return TRUE;
1277 }
1278
1279 /* Should never be called, as .req goes between the alias and the
1280 register name, not at the beginning of the line. */
1281 static void
1282 s_req (int a ATTRIBUTE_UNUSED)
1283 {
1284 as_bad (_("invalid syntax for .req directive"));
1285 }
1286
1287 /* The .unreq directive deletes an alias which was previously defined
1288 by .req. For example:
1289
1290 my_alias .req r11
1291 .unreq my_alias */
1292
1293 static void
1294 s_unreq (int a ATTRIBUTE_UNUSED)
1295 {
1296 char *name;
1297 char saved_char;
1298
1299 name = input_line_pointer;
1300
1301 while (*input_line_pointer != 0
1302 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1303 ++input_line_pointer;
1304
1305 saved_char = *input_line_pointer;
1306 *input_line_pointer = 0;
1307
1308 if (!*name)
1309 as_bad (_("invalid syntax for .unreq directive"));
1310 else
1311 {
1312 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1313
1314 if (!reg)
1315 as_bad (_("unknown register alias '%s'"), name);
1316 else if (reg->builtin)
1317 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1318 name);
1319 else
1320 {
1321 char *p;
1322 char *nbuf;
1323
1324 hash_delete (aarch64_reg_hsh, name, FALSE);
1325 free ((char *) reg->name);
1326 free (reg);
1327
1328 /* Also locate the all upper case and all lower case versions.
1329 Do not complain if we cannot find one or the other as it
1330 was probably deleted above. */
1331
1332 nbuf = strdup (name);
1333 for (p = nbuf; *p; p++)
1334 *p = TOUPPER (*p);
1335 reg = hash_find (aarch64_reg_hsh, nbuf);
1336 if (reg)
1337 {
1338 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1339 free ((char *) reg->name);
1340 free (reg);
1341 }
1342
1343 for (p = nbuf; *p; p++)
1344 *p = TOLOWER (*p);
1345 reg = hash_find (aarch64_reg_hsh, nbuf);
1346 if (reg)
1347 {
1348 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1349 free ((char *) reg->name);
1350 free (reg);
1351 }
1352
1353 free (nbuf);
1354 }
1355 }
1356
1357 *input_line_pointer = saved_char;
1358 demand_empty_rest_of_line ();
1359 }
1360
1361 /* Directives: Instruction set selection. */
1362
1363 #ifdef OBJ_ELF
1364 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1365 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1366 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1367 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1368
1369 /* Create a new mapping symbol for the transition to STATE. */
1370
1371 static void
1372 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1373 {
1374 symbolS *symbolP;
1375 const char *symname;
1376 int type;
1377
1378 switch (state)
1379 {
1380 case MAP_DATA:
1381 symname = "$d";
1382 type = BSF_NO_FLAGS;
1383 break;
1384 case MAP_INSN:
1385 symname = "$x";
1386 type = BSF_NO_FLAGS;
1387 break;
1388 default:
1389 abort ();
1390 }
1391
1392 symbolP = symbol_new (symname, now_seg, value, frag);
1393 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1394
1395 /* Save the mapping symbols for future reference. Also check that
1396 we do not place two mapping symbols at the same offset within a
1397 frag. We'll handle overlap between frags in
1398 check_mapping_symbols.
1399
1400 If .fill or other data filling directive generates zero sized data,
1401 the mapping symbol for the following code will have the same value
1402 as the one generated for the data filling directive. In this case,
1403 we replace the old symbol with the new one at the same address. */
1404 if (value == 0)
1405 {
1406 if (frag->tc_frag_data.first_map != NULL)
1407 {
1408 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1409 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1410 &symbol_lastP);
1411 }
1412 frag->tc_frag_data.first_map = symbolP;
1413 }
1414 if (frag->tc_frag_data.last_map != NULL)
1415 {
1416 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1417 S_GET_VALUE (symbolP));
1418 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1419 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1420 &symbol_lastP);
1421 }
1422 frag->tc_frag_data.last_map = symbolP;
1423 }
1424
1425 /* We must sometimes convert a region marked as code to data during
1426 code alignment, if an odd number of bytes have to be padded. The
1427 code mapping symbol is pushed to an aligned address. */
1428
1429 static void
1430 insert_data_mapping_symbol (enum mstate state,
1431 valueT value, fragS * frag, offsetT bytes)
1432 {
1433 /* If there was already a mapping symbol, remove it. */
1434 if (frag->tc_frag_data.last_map != NULL
1435 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1436 frag->fr_address + value)
1437 {
1438 symbolS *symp = frag->tc_frag_data.last_map;
1439
1440 if (value == 0)
1441 {
1442 know (frag->tc_frag_data.first_map == symp);
1443 frag->tc_frag_data.first_map = NULL;
1444 }
1445 frag->tc_frag_data.last_map = NULL;
1446 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1447 }
1448
1449 make_mapping_symbol (MAP_DATA, value, frag);
1450 make_mapping_symbol (state, value + bytes, frag);
1451 }
1452
1453 static void mapping_state_2 (enum mstate state, int max_chars);
1454
1455 /* Set the mapping state to STATE. Only call this when about to
1456 emit some STATE bytes to the file. */
1457
1458 void
1459 mapping_state (enum mstate state)
1460 {
1461 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1462
1463 if (state == MAP_INSN)
1464 /* AArch64 instructions require 4-byte alignment. When emitting
1465 instructions into any section, record the appropriate section
1466 alignment. */
1467 record_alignment (now_seg, 2);
1468
1469 if (mapstate == state)
1470 /* The mapping symbol has already been emitted.
1471 There is nothing else to do. */
1472 return;
1473
1474 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1475 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1476 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1477 evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492 #undef TRANSITION
1493
1494 mapping_state_2 (state, 0);
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 expressionS * litexp = & pool->literals[entry].exp;
1621
1622 if ((litexp->X_op == exp->X_op)
1623 && (exp->X_op == O_constant)
1624 && (litexp->X_add_number == exp->X_add_number)
1625 && (litexp->X_unsigned == exp->X_unsigned))
1626 break;
1627
1628 if ((litexp->X_op == exp->X_op)
1629 && (exp->X_op == O_symbol)
1630 && (litexp->X_add_number == exp->X_add_number)
1631 && (litexp->X_add_symbol == exp->X_add_symbol)
1632 && (litexp->X_op_symbol == exp->X_op_symbol))
1633 break;
1634 }
1635
1636 /* Do we need to create a new entry? */
1637 if (entry == pool->next_free_entry)
1638 {
1639 if (entry >= MAX_LITERAL_POOL_SIZE)
1640 {
1641 set_syntax_error (_("literal pool overflow"));
1642 return FALSE;
1643 }
1644
1645 pool->literals[entry].exp = *exp;
1646 pool->next_free_entry += 1;
1647 if (exp->X_op == O_big)
1648 {
1649 /* PR 16688: Bignums are held in a single global array. We must
1650 copy and preserve that value now, before it is overwritten. */
1651 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1652 memcpy (pool->literals[entry].bignum, generic_bignum,
1653 CHARS_PER_LITTLENUM * exp->X_add_number);
1654 }
1655 else
1656 pool->literals[entry].bignum = NULL;
1657 }
1658
1659 exp->X_op = O_symbol;
1660 exp->X_add_number = ((int) entry) * size;
1661 exp->X_add_symbol = pool->symbol;
1662
1663 return TRUE;
1664 }
1665
1666 /* Can't use symbol_new here, so have to create a symbol and then at
1667 a later date assign it a value. Thats what these functions do. */
1668
1669 static void
1670 symbol_locate (symbolS * symbolP,
1671 const char *name,/* It is copied, the caller can modify. */
1672 segT segment, /* Segment identifier (SEG_<something>). */
1673 valueT valu, /* Symbol value. */
1674 fragS * frag) /* Associated fragment. */
1675 {
1676 size_t name_length;
1677 char *preserved_copy_of_name;
1678
1679 name_length = strlen (name) + 1; /* +1 for \0. */
1680 obstack_grow (&notes, name, name_length);
1681 preserved_copy_of_name = obstack_finish (&notes);
1682
1683 #ifdef tc_canonicalize_symbol_name
1684 preserved_copy_of_name =
1685 tc_canonicalize_symbol_name (preserved_copy_of_name);
1686 #endif
1687
1688 S_SET_NAME (symbolP, preserved_copy_of_name);
1689
1690 S_SET_SEGMENT (symbolP, segment);
1691 S_SET_VALUE (symbolP, valu);
1692 symbol_clear_list_pointers (symbolP);
1693
1694 symbol_set_frag (symbolP, frag);
1695
1696 /* Link to end of symbol chain. */
1697 {
1698 extern int symbol_table_frozen;
1699
1700 if (symbol_table_frozen)
1701 abort ();
1702 }
1703
1704 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1705
1706 obj_symbol_new_hook (symbolP);
1707
1708 #ifdef tc_symbol_new_hook
1709 tc_symbol_new_hook (symbolP);
1710 #endif
1711
1712 #ifdef DEBUG_SYMS
1713 verify_symbol_chain (symbol_rootP, symbol_lastP);
1714 #endif /* DEBUG_SYMS */
1715 }
1716
1717
1718 static void
1719 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1720 {
1721 unsigned int entry;
1722 literal_pool *pool;
1723 char sym_name[20];
1724 int align;
1725
1726 for (align = 2; align <= 4; align++)
1727 {
1728 int size = 1 << align;
1729
1730 pool = find_literal_pool (size);
1731 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1732 continue;
1733
1734 mapping_state (MAP_DATA);
1735
1736 /* Align pool as you have word accesses.
1737 Only make a frag if we have to. */
1738 if (!need_pass_2)
1739 frag_align (align, 0, 0);
1740
1741 record_alignment (now_seg, align);
1742
1743 sprintf (sym_name, "$$lit_\002%x", pool->id);
1744
1745 symbol_locate (pool->symbol, sym_name, now_seg,
1746 (valueT) frag_now_fix (), frag_now);
1747 symbol_table_insert (pool->symbol);
1748
1749 for (entry = 0; entry < pool->next_free_entry; entry++)
1750 {
1751 expressionS * exp = & pool->literals[entry].exp;
1752
1753 if (exp->X_op == O_big)
1754 {
1755 /* PR 16688: Restore the global bignum value. */
1756 gas_assert (pool->literals[entry].bignum != NULL);
1757 memcpy (generic_bignum, pool->literals[entry].bignum,
1758 CHARS_PER_LITTLENUM * exp->X_add_number);
1759 }
1760
1761 /* First output the expression in the instruction to the pool. */
1762 emit_expr (exp, size); /* .word|.xword */
1763
1764 if (exp->X_op == O_big)
1765 {
1766 free (pool->literals[entry].bignum);
1767 pool->literals[entry].bignum = NULL;
1768 }
1769 }
1770
1771 /* Mark the pool as empty. */
1772 pool->next_free_entry = 0;
1773 pool->symbol = NULL;
1774 }
1775 }
1776
1777 #ifdef OBJ_ELF
1778 /* Forward declarations for functions below, in the MD interface
1779 section. */
1780 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1781 static struct reloc_table_entry * find_reloc_table_entry (char **);
1782
1783 /* Directives: Data. */
1784 /* N.B. the support for relocation suffix in this directive needs to be
1785 implemented properly. */
1786
1787 static void
1788 s_aarch64_elf_cons (int nbytes)
1789 {
1790 expressionS exp;
1791
1792 #ifdef md_flush_pending_output
1793 md_flush_pending_output ();
1794 #endif
1795
1796 if (is_it_end_of_statement ())
1797 {
1798 demand_empty_rest_of_line ();
1799 return;
1800 }
1801
1802 #ifdef md_cons_align
1803 md_cons_align (nbytes);
1804 #endif
1805
1806 mapping_state (MAP_DATA);
1807 do
1808 {
1809 struct reloc_table_entry *reloc;
1810
1811 expression (&exp);
1812
1813 if (exp.X_op != O_symbol)
1814 emit_expr (&exp, (unsigned int) nbytes);
1815 else
1816 {
1817 skip_past_char (&input_line_pointer, '#');
1818 if (skip_past_char (&input_line_pointer, ':'))
1819 {
1820 reloc = find_reloc_table_entry (&input_line_pointer);
1821 if (reloc == NULL)
1822 as_bad (_("unrecognized relocation suffix"));
1823 else
1824 as_bad (_("unimplemented relocation suffix"));
1825 ignore_rest_of_line ();
1826 return;
1827 }
1828 else
1829 emit_expr (&exp, (unsigned int) nbytes);
1830 }
1831 }
1832 while (*input_line_pointer++ == ',');
1833
1834 /* Put terminator back into stream. */
1835 input_line_pointer--;
1836 demand_empty_rest_of_line ();
1837 }
1838
1839 #endif /* OBJ_ELF */
1840
1841 /* Output a 32-bit word, but mark as an instruction. */
1842
1843 static void
1844 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1845 {
1846 expressionS exp;
1847
1848 #ifdef md_flush_pending_output
1849 md_flush_pending_output ();
1850 #endif
1851
1852 if (is_it_end_of_statement ())
1853 {
1854 demand_empty_rest_of_line ();
1855 return;
1856 }
1857
1858 /* Sections are assumed to start aligned. In executable section, there is no
1859 MAP_DATA symbol pending. So we only align the address during
1860 MAP_DATA --> MAP_INSN transition.
1861 For other sections, this is not guaranteed. */
1862 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1863 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1864 frag_align_code (2, 0);
1865
1866 #ifdef OBJ_ELF
1867 mapping_state (MAP_INSN);
1868 #endif
1869
1870 do
1871 {
1872 expression (&exp);
1873 if (exp.X_op != O_constant)
1874 {
1875 as_bad (_("constant expression required"));
1876 ignore_rest_of_line ();
1877 return;
1878 }
1879
1880 if (target_big_endian)
1881 {
1882 unsigned int val = exp.X_add_number;
1883 exp.X_add_number = SWAP_32 (val);
1884 }
1885 emit_expr (&exp, 4);
1886 }
1887 while (*input_line_pointer++ == ',');
1888
1889 /* Put terminator back into stream. */
1890 input_line_pointer--;
1891 demand_empty_rest_of_line ();
1892 }
1893
1894 #ifdef OBJ_ELF
1895 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1896
1897 static void
1898 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1899 {
1900 expressionS exp;
1901
1902 /* Since we're just labelling the code, there's no need to define a
1903 mapping symbol. */
1904 expression (&exp);
1905 /* Make sure there is enough room in this frag for the following
1906 blr. This trick only works if the blr follows immediately after
1907 the .tlsdesc directive. */
1908 frag_grow (4);
1909 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1910 BFD_RELOC_AARCH64_TLSDESC_CALL);
1911
1912 demand_empty_rest_of_line ();
1913 }
1914 #endif /* OBJ_ELF */
1915
1916 static void s_aarch64_arch (int);
1917 static void s_aarch64_cpu (int);
1918 static void s_aarch64_arch_extension (int);
1919
1920 /* This table describes all the machine specific pseudo-ops the assembler
1921 has to support. The fields are:
1922 pseudo-op name without dot
1923 function to call to execute this pseudo-op
1924 Integer arg to pass to the function. */
1925
1926 const pseudo_typeS md_pseudo_table[] = {
1927 /* Never called because '.req' does not start a line. */
1928 {"req", s_req, 0},
1929 {"unreq", s_unreq, 0},
1930 {"bss", s_bss, 0},
1931 {"even", s_even, 0},
1932 {"ltorg", s_ltorg, 0},
1933 {"pool", s_ltorg, 0},
1934 {"cpu", s_aarch64_cpu, 0},
1935 {"arch", s_aarch64_arch, 0},
1936 {"arch_extension", s_aarch64_arch_extension, 0},
1937 {"inst", s_aarch64_inst, 0},
1938 #ifdef OBJ_ELF
1939 {"tlsdesccall", s_tlsdesccall, 0},
1940 {"word", s_aarch64_elf_cons, 4},
1941 {"long", s_aarch64_elf_cons, 4},
1942 {"xword", s_aarch64_elf_cons, 8},
1943 {"dword", s_aarch64_elf_cons, 8},
1944 #endif
1945 {0, 0, 0}
1946 };
1947 \f
1948
1949 /* Check whether STR points to a register name followed by a comma or the
1950 end of line; REG_TYPE indicates which register types are checked
1951 against. Return TRUE if STR is such a register name; otherwise return
1952 FALSE. The function does not intend to produce any diagnostics, but since
1953 the register parser aarch64_reg_parse, which is called by this function,
1954 does produce diagnostics, we call clear_error to clear any diagnostics
1955 that may be generated by aarch64_reg_parse.
1956 Also, the function returns FALSE directly if there is any user error
1957 present at the function entry. This prevents the existing diagnostics
1958 state from being spoiled.
1959 The function currently serves parse_constant_immediate and
1960 parse_big_immediate only. */
1961 static bfd_boolean
1962 reg_name_p (char *str, aarch64_reg_type reg_type)
1963 {
1964 int reg;
1965
1966 /* Prevent the diagnostics state from being spoiled. */
1967 if (error_p ())
1968 return FALSE;
1969
1970 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1971
1972 /* Clear the parsing error that may be set by the reg parser. */
1973 clear_error ();
1974
1975 if (reg == PARSE_FAIL)
1976 return FALSE;
1977
1978 skip_whitespace (str);
1979 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1980 return TRUE;
1981
1982 return FALSE;
1983 }
1984
1985 /* Parser functions used exclusively in instruction operands. */
1986
1987 /* Parse an immediate expression which may not be constant.
1988
1989 To prevent the expression parser from pushing a register name
1990 into the symbol table as an undefined symbol, firstly a check is
1991 done to find out whether STR is a valid register name followed
1992 by a comma or the end of line. Return FALSE if STR is such a
1993 string. */
1994
1995 static bfd_boolean
1996 parse_immediate_expression (char **str, expressionS *exp)
1997 {
1998 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1999 {
2000 set_recoverable_error (_("immediate operand required"));
2001 return FALSE;
2002 }
2003
2004 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2005
2006 if (exp->X_op == O_absent)
2007 {
2008 set_fatal_syntax_error (_("missing immediate expression"));
2009 return FALSE;
2010 }
2011
2012 return TRUE;
2013 }
2014
2015 /* Constant immediate-value read function for use in insn parsing.
2016 STR points to the beginning of the immediate (with the optional
2017 leading #); *VAL receives the value.
2018
2019 Return TRUE on success; otherwise return FALSE. */
2020
2021 static bfd_boolean
2022 parse_constant_immediate (char **str, int64_t * val)
2023 {
2024 expressionS exp;
2025
2026 if (! parse_immediate_expression (str, &exp))
2027 return FALSE;
2028
2029 if (exp.X_op != O_constant)
2030 {
2031 set_syntax_error (_("constant expression required"));
2032 return FALSE;
2033 }
2034
2035 *val = exp.X_add_number;
2036 return TRUE;
2037 }
2038
2039 static uint32_t
2040 encode_imm_float_bits (uint32_t imm)
2041 {
2042 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2043 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2044 }
2045
2046 /* Return TRUE if the single-precision floating-point value encoded in IMM
2047 can be expressed in the AArch64 8-bit signed floating-point format with
2048 3-bit exponent and normalized 4 bits of precision; in other words, the
2049 floating-point value must be expressable as
2050 (+/-) n / 16 * power (2, r)
2051 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2052
2053 static bfd_boolean
2054 aarch64_imm_float_p (uint32_t imm)
2055 {
2056 /* If a single-precision floating-point value has the following bit
2057 pattern, it can be expressed in the AArch64 8-bit floating-point
2058 format:
2059
2060 3 32222222 2221111111111
2061 1 09876543 21098765432109876543210
2062 n Eeeeeexx xxxx0000000000000000000
2063
2064 where n, e and each x are either 0 or 1 independently, with
2065 E == ~ e. */
2066
2067 uint32_t pattern;
2068
2069 /* Prepare the pattern for 'Eeeeee'. */
2070 if (((imm >> 30) & 0x1) == 0)
2071 pattern = 0x3e000000;
2072 else
2073 pattern = 0x40000000;
2074
2075 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2076 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2077 }
2078
2079 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2080
2081 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2082 8-bit signed floating-point format with 3-bit exponent and normalized 4
2083 bits of precision (i.e. can be used in an FMOV instruction); return the
2084 equivalent single-precision encoding in *FPWORD.
2085
2086 Otherwise return FALSE. */
2087
2088 static bfd_boolean
2089 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2090 {
2091 /* If a double-precision floating-point value has the following bit
2092 pattern, it can be expressed in the AArch64 8-bit floating-point
2093 format:
2094
2095 6 66655555555 554444444...21111111111
2096 3 21098765432 109876543...098765432109876543210
2097 n Eeeeeeeeexx xxxx00000...000000000000000000000
2098
2099 where n, e and each x are either 0 or 1 independently, with
2100 E == ~ e. */
2101
2102 uint32_t pattern;
2103 uint32_t high32 = imm >> 32;
2104
2105 /* Lower 32 bits need to be 0s. */
2106 if ((imm & 0xffffffff) != 0)
2107 return FALSE;
2108
2109 /* Prepare the pattern for 'Eeeeeeeee'. */
2110 if (((high32 >> 30) & 0x1) == 0)
2111 pattern = 0x3fc00000;
2112 else
2113 pattern = 0x40000000;
2114
2115 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2116 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2117 {
2118 /* Convert to the single-precision encoding.
2119 i.e. convert
2120 n Eeeeeeeeexx xxxx00000...000000000000000000000
2121 to
2122 n Eeeeeexx xxxx0000000000000000000. */
2123 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2124 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2125 return TRUE;
2126 }
2127 else
2128 return FALSE;
2129 }
2130
2131 /* Parse a floating-point immediate. Return TRUE on success and return the
2132 value in *IMMED in the format of IEEE754 single-precision encoding.
2133 *CCP points to the start of the string; DP_P is TRUE when the immediate
2134 is expected to be in double-precision (N.B. this only matters when
2135 hexadecimal representation is involved).
2136
2137 N.B. 0.0 is accepted by this function. */
2138
2139 static bfd_boolean
2140 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2141 {
2142 char *str = *ccp;
2143 char *fpnum;
2144 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2145 int found_fpchar = 0;
2146 int64_t val = 0;
2147 unsigned fpword = 0;
2148 bfd_boolean hex_p = FALSE;
2149
2150 skip_past_char (&str, '#');
2151
2152 fpnum = str;
2153 skip_whitespace (fpnum);
2154
2155 if (strncmp (fpnum, "0x", 2) == 0)
2156 {
2157 /* Support the hexadecimal representation of the IEEE754 encoding.
2158 Double-precision is expected when DP_P is TRUE, otherwise the
2159 representation should be in single-precision. */
2160 if (! parse_constant_immediate (&str, &val))
2161 goto invalid_fp;
2162
2163 if (dp_p)
2164 {
2165 if (! aarch64_double_precision_fmovable (val, &fpword))
2166 goto invalid_fp;
2167 }
2168 else if ((uint64_t) val > 0xffffffff)
2169 goto invalid_fp;
2170 else
2171 fpword = val;
2172
2173 hex_p = TRUE;
2174 }
2175 else
2176 {
2177 /* We must not accidentally parse an integer as a floating-point number.
2178 Make sure that the value we parse is not an integer by checking for
2179 special characters '.' or 'e'. */
2180 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2181 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2182 {
2183 found_fpchar = 1;
2184 break;
2185 }
2186
2187 if (!found_fpchar)
2188 return FALSE;
2189 }
2190
2191 if (! hex_p)
2192 {
2193 int i;
2194
2195 if ((str = atof_ieee (str, 's', words)) == NULL)
2196 goto invalid_fp;
2197
2198 /* Our FP word must be 32 bits (single-precision FP). */
2199 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2200 {
2201 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2202 fpword |= words[i];
2203 }
2204 }
2205
2206 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2207 {
2208 *immed = fpword;
2209 *ccp = str;
2210 return TRUE;
2211 }
2212
2213 invalid_fp:
2214 set_fatal_syntax_error (_("invalid floating-point constant"));
2215 return FALSE;
2216 }
2217
2218 /* Less-generic immediate-value read function with the possibility of loading
2219 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2220 instructions.
2221
2222 To prevent the expression parser from pushing a register name into the
2223 symbol table as an undefined symbol, a check is firstly done to find
2224 out whether STR is a valid register name followed by a comma or the end
2225 of line. Return FALSE if STR is such a register. */
2226
2227 static bfd_boolean
2228 parse_big_immediate (char **str, int64_t *imm)
2229 {
2230 char *ptr = *str;
2231
2232 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2233 {
2234 set_syntax_error (_("immediate operand required"));
2235 return FALSE;
2236 }
2237
2238 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2239
2240 if (inst.reloc.exp.X_op == O_constant)
2241 *imm = inst.reloc.exp.X_add_number;
2242
2243 *str = ptr;
2244
2245 return TRUE;
2246 }
2247
2248 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2249 if NEED_LIBOPCODES is non-zero, the fixup will need
2250 assistance from the libopcodes. */
2251
2252 static inline void
2253 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2254 const aarch64_opnd_info *operand,
2255 int need_libopcodes_p)
2256 {
2257 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2258 reloc->opnd = operand->type;
2259 if (need_libopcodes_p)
2260 reloc->need_libopcodes_p = 1;
2261 };
2262
2263 /* Return TRUE if the instruction needs to be fixed up later internally by
2264 the GAS; otherwise return FALSE. */
2265
2266 static inline bfd_boolean
2267 aarch64_gas_internal_fixup_p (void)
2268 {
2269 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2270 }
2271
2272 /* Assign the immediate value to the relavant field in *OPERAND if
2273 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2274 needs an internal fixup in a later stage.
2275 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2276 IMM.VALUE that may get assigned with the constant. */
2277 static inline void
2278 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2279 aarch64_opnd_info *operand,
2280 int addr_off_p,
2281 int need_libopcodes_p,
2282 int skip_p)
2283 {
2284 if (reloc->exp.X_op == O_constant)
2285 {
2286 if (addr_off_p)
2287 operand->addr.offset.imm = reloc->exp.X_add_number;
2288 else
2289 operand->imm.value = reloc->exp.X_add_number;
2290 reloc->type = BFD_RELOC_UNUSED;
2291 }
2292 else
2293 {
2294 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2295 /* Tell libopcodes to ignore this operand or not. This is helpful
2296 when one of the operands needs to be fixed up later but we need
2297 libopcodes to check the other operands. */
2298 operand->skip = skip_p;
2299 }
2300 }
2301
2302 /* Relocation modifiers. Each entry in the table contains the textual
2303 name for the relocation which may be placed before a symbol used as
2304 a load/store offset, or add immediate. It must be surrounded by a
2305 leading and trailing colon, for example:
2306
2307 ldr x0, [x1, #:rello:varsym]
2308 add x0, x1, #:rello:varsym */
2309
2310 struct reloc_table_entry
2311 {
2312 const char *name;
2313 int pc_rel;
2314 bfd_reloc_code_real_type adr_type;
2315 bfd_reloc_code_real_type adrp_type;
2316 bfd_reloc_code_real_type movw_type;
2317 bfd_reloc_code_real_type add_type;
2318 bfd_reloc_code_real_type ldst_type;
2319 bfd_reloc_code_real_type ld_literal_type;
2320 };
2321
2322 static struct reloc_table_entry reloc_table[] = {
2323 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2324 {"lo12", 0,
2325 0, /* adr_type */
2326 0,
2327 0,
2328 BFD_RELOC_AARCH64_ADD_LO12,
2329 BFD_RELOC_AARCH64_LDST_LO12,
2330 0},
2331
2332 /* Higher 21 bits of pc-relative page offset: ADRP */
2333 {"pg_hi21", 1,
2334 0, /* adr_type */
2335 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2336 0,
2337 0,
2338 0,
2339 0},
2340
2341 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2342 {"pg_hi21_nc", 1,
2343 0, /* adr_type */
2344 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2345 0,
2346 0,
2347 0,
2348 0},
2349
2350 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2351 {"abs_g0", 0,
2352 0, /* adr_type */
2353 0,
2354 BFD_RELOC_AARCH64_MOVW_G0,
2355 0,
2356 0,
2357 0},
2358
2359 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2360 {"abs_g0_s", 0,
2361 0, /* adr_type */
2362 0,
2363 BFD_RELOC_AARCH64_MOVW_G0_S,
2364 0,
2365 0,
2366 0},
2367
2368 /* Less significant bits 0-15 of address/value: MOVK, no check */
2369 {"abs_g0_nc", 0,
2370 0, /* adr_type */
2371 0,
2372 BFD_RELOC_AARCH64_MOVW_G0_NC,
2373 0,
2374 0,
2375 0},
2376
2377 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2378 {"abs_g1", 0,
2379 0, /* adr_type */
2380 0,
2381 BFD_RELOC_AARCH64_MOVW_G1,
2382 0,
2383 0,
2384 0},
2385
2386 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2387 {"abs_g1_s", 0,
2388 0, /* adr_type */
2389 0,
2390 BFD_RELOC_AARCH64_MOVW_G1_S,
2391 0,
2392 0,
2393 0},
2394
2395 /* Less significant bits 16-31 of address/value: MOVK, no check */
2396 {"abs_g1_nc", 0,
2397 0, /* adr_type */
2398 0,
2399 BFD_RELOC_AARCH64_MOVW_G1_NC,
2400 0,
2401 0,
2402 0},
2403
2404 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2405 {"abs_g2", 0,
2406 0, /* adr_type */
2407 0,
2408 BFD_RELOC_AARCH64_MOVW_G2,
2409 0,
2410 0,
2411 0},
2412
2413 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2414 {"abs_g2_s", 0,
2415 0, /* adr_type */
2416 0,
2417 BFD_RELOC_AARCH64_MOVW_G2_S,
2418 0,
2419 0,
2420 0},
2421
2422 /* Less significant bits 32-47 of address/value: MOVK, no check */
2423 {"abs_g2_nc", 0,
2424 0, /* adr_type */
2425 0,
2426 BFD_RELOC_AARCH64_MOVW_G2_NC,
2427 0,
2428 0,
2429 0},
2430
2431 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2432 {"abs_g3", 0,
2433 0, /* adr_type */
2434 0,
2435 BFD_RELOC_AARCH64_MOVW_G3,
2436 0,
2437 0,
2438 0},
2439
2440 /* Get to the page containing GOT entry for a symbol. */
2441 {"got", 1,
2442 0, /* adr_type */
2443 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2444 0,
2445 0,
2446 0,
2447 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2448
2449 /* 12 bit offset into the page containing GOT entry for that symbol. */
2450 {"got_lo12", 0,
2451 0, /* adr_type */
2452 0,
2453 0,
2454 0,
2455 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2456 0},
2457
2458 /* 0-15 bits of address/value: MOVk, no check. */
2459 {"gotoff_g0_nc", 0,
2460 0, /* adr_type */
2461 0,
2462 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2463 0,
2464 0,
2465 0},
2466
2467 /* Most significant bits 16-31 of address/value: MOVZ. */
2468 {"gotoff_g1", 0,
2469 0, /* adr_type */
2470 0,
2471 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2472 0,
2473 0,
2474 0},
2475
2476 /* 15 bit offset into the page containing GOT entry for that symbol. */
2477 {"gotoff_lo15", 0,
2478 0, /* adr_type */
2479 0,
2480 0,
2481 0,
2482 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2483 0},
2484
2485 /* Get to the page containing GOT TLS entry for a symbol */
2486 {"gottprel_g0_nc", 0,
2487 0, /* adr_type */
2488 0,
2489 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2490 0,
2491 0,
2492 0},
2493
2494 /* Get to the page containing GOT TLS entry for a symbol */
2495 {"gottprel_g1", 0,
2496 0, /* adr_type */
2497 0,
2498 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2499 0,
2500 0,
2501 0},
2502
2503 /* Get to the page containing GOT TLS entry for a symbol */
2504 {"tlsgd", 0,
2505 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2506 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2507 0,
2508 0,
2509 0,
2510 0},
2511
2512 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2513 {"tlsgd_lo12", 0,
2514 0, /* adr_type */
2515 0,
2516 0,
2517 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2518 0,
2519 0},
2520
2521 /* Lower 16 bits address/value: MOVk. */
2522 {"tlsgd_g0_nc", 0,
2523 0, /* adr_type */
2524 0,
2525 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2526 0,
2527 0,
2528 0},
2529
2530 /* Most significant bits 16-31 of address/value: MOVZ. */
2531 {"tlsgd_g1", 0,
2532 0, /* adr_type */
2533 0,
2534 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2535 0,
2536 0,
2537 0},
2538
2539 /* Get to the page containing GOT TLS entry for a symbol */
2540 {"tlsdesc", 0,
2541 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2542 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2543 0,
2544 0,
2545 0,
2546 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2547
2548 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2549 {"tlsdesc_lo12", 0,
2550 0, /* adr_type */
2551 0,
2552 0,
2553 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2554 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2555 0},
2556
2557 /* Get to the page containing GOT TLS entry for a symbol.
2558 The same as GD, we allocate two consecutive GOT slots
2559 for module index and module offset, the only difference
2560 with GD is the module offset should be intialized to
2561 zero without any outstanding runtime relocation. */
2562 {"tlsldm", 0,
2563 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2564 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2565 0,
2566 0,
2567 0,
2568 0},
2569
2570 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2571 {"tlsldm_lo12_nc", 0,
2572 0, /* adr_type */
2573 0,
2574 0,
2575 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2576 0,
2577 0},
2578
2579 /* 12 bit offset into the module TLS base address. */
2580 {"dtprel_lo12", 0,
2581 0, /* adr_type */
2582 0,
2583 0,
2584 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2585 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2586 0},
2587
2588 /* Same as dtprel_lo12, no overflow check. */
2589 {"dtprel_lo12_nc", 0,
2590 0, /* adr_type */
2591 0,
2592 0,
2593 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2594 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2595 0},
2596
2597 /* bits[23:12] of offset to the module TLS base address. */
2598 {"dtprel_hi12", 0,
2599 0, /* adr_type */
2600 0,
2601 0,
2602 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2603 0,
2604 0},
2605
2606 /* bits[15:0] of offset to the module TLS base address. */
2607 {"dtprel_g0", 0,
2608 0, /* adr_type */
2609 0,
2610 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2611 0,
2612 0,
2613 0},
2614
2615 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2616 {"dtprel_g0_nc", 0,
2617 0, /* adr_type */
2618 0,
2619 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2620 0,
2621 0,
2622 0},
2623
2624 /* bits[31:16] of offset to the module TLS base address. */
2625 {"dtprel_g1", 0,
2626 0, /* adr_type */
2627 0,
2628 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2629 0,
2630 0,
2631 0},
2632
2633 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2634 {"dtprel_g1_nc", 0,
2635 0, /* adr_type */
2636 0,
2637 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2638 0,
2639 0,
2640 0},
2641
2642 /* bits[47:32] of offset to the module TLS base address. */
2643 {"dtprel_g2", 0,
2644 0, /* adr_type */
2645 0,
2646 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2647 0,
2648 0,
2649 0},
2650
2651 /* Get to the page containing GOT TLS entry for a symbol */
2652 {"gottprel", 0,
2653 0, /* adr_type */
2654 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2655 0,
2656 0,
2657 0,
2658 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2659
2660 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2661 {"gottprel_lo12", 0,
2662 0, /* adr_type */
2663 0,
2664 0,
2665 0,
2666 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2667 0},
2668
2669 /* Get tp offset for a symbol. */
2670 {"tprel", 0,
2671 0, /* adr_type */
2672 0,
2673 0,
2674 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2675 0,
2676 0},
2677
2678 /* Get tp offset for a symbol. */
2679 {"tprel_lo12", 0,
2680 0, /* adr_type */
2681 0,
2682 0,
2683 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2684 0,
2685 0},
2686
2687 /* Get tp offset for a symbol. */
2688 {"tprel_hi12", 0,
2689 0, /* adr_type */
2690 0,
2691 0,
2692 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2693 0,
2694 0},
2695
2696 /* Get tp offset for a symbol. */
2697 {"tprel_lo12_nc", 0,
2698 0, /* adr_type */
2699 0,
2700 0,
2701 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2702 0,
2703 0},
2704
2705 /* Most significant bits 32-47 of address/value: MOVZ. */
2706 {"tprel_g2", 0,
2707 0, /* adr_type */
2708 0,
2709 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2710 0,
2711 0,
2712 0},
2713
2714 /* Most significant bits 16-31 of address/value: MOVZ. */
2715 {"tprel_g1", 0,
2716 0, /* adr_type */
2717 0,
2718 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2719 0,
2720 0,
2721 0},
2722
2723 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2724 {"tprel_g1_nc", 0,
2725 0, /* adr_type */
2726 0,
2727 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2728 0,
2729 0,
2730 0},
2731
2732 /* Most significant bits 0-15 of address/value: MOVZ. */
2733 {"tprel_g0", 0,
2734 0, /* adr_type */
2735 0,
2736 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2737 0,
2738 0,
2739 0},
2740
2741 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2742 {"tprel_g0_nc", 0,
2743 0, /* adr_type */
2744 0,
2745 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2746 0,
2747 0,
2748 0},
2749
2750 /* 15bit offset from got entry to base address of GOT table. */
2751 {"gotpage_lo15", 0,
2752 0,
2753 0,
2754 0,
2755 0,
2756 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2757 0},
2758
2759 /* 14bit offset from got entry to base address of GOT table. */
2760 {"gotpage_lo14", 0,
2761 0,
2762 0,
2763 0,
2764 0,
2765 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2766 0},
2767 };
2768
2769 /* Given the address of a pointer pointing to the textual name of a
2770 relocation as may appear in assembler source, attempt to find its
2771 details in reloc_table. The pointer will be updated to the character
2772 after the trailing colon. On failure, NULL will be returned;
2773 otherwise return the reloc_table_entry. */
2774
2775 static struct reloc_table_entry *
2776 find_reloc_table_entry (char **str)
2777 {
2778 unsigned int i;
2779 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2780 {
2781 int length = strlen (reloc_table[i].name);
2782
2783 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2784 && (*str)[length] == ':')
2785 {
2786 *str += (length + 1);
2787 return &reloc_table[i];
2788 }
2789 }
2790
2791 return NULL;
2792 }
2793
2794 /* Mode argument to parse_shift and parser_shifter_operand. */
2795 enum parse_shift_mode
2796 {
2797 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2798 "#imm{,lsl #n}" */
2799 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2800 "#imm" */
2801 SHIFTED_LSL, /* bare "lsl #n" */
2802 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2803 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2804 };
2805
2806 /* Parse a <shift> operator on an AArch64 data processing instruction.
2807 Return TRUE on success; otherwise return FALSE. */
2808 static bfd_boolean
2809 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2810 {
2811 const struct aarch64_name_value_pair *shift_op;
2812 enum aarch64_modifier_kind kind;
2813 expressionS exp;
2814 int exp_has_prefix;
2815 char *s = *str;
2816 char *p = s;
2817
2818 for (p = *str; ISALPHA (*p); p++)
2819 ;
2820
2821 if (p == *str)
2822 {
2823 set_syntax_error (_("shift expression expected"));
2824 return FALSE;
2825 }
2826
2827 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2828
2829 if (shift_op == NULL)
2830 {
2831 set_syntax_error (_("shift operator expected"));
2832 return FALSE;
2833 }
2834
2835 kind = aarch64_get_operand_modifier (shift_op);
2836
2837 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2838 {
2839 set_syntax_error (_("invalid use of 'MSL'"));
2840 return FALSE;
2841 }
2842
2843 switch (mode)
2844 {
2845 case SHIFTED_LOGIC_IMM:
2846 if (aarch64_extend_operator_p (kind) == TRUE)
2847 {
2848 set_syntax_error (_("extending shift is not permitted"));
2849 return FALSE;
2850 }
2851 break;
2852
2853 case SHIFTED_ARITH_IMM:
2854 if (kind == AARCH64_MOD_ROR)
2855 {
2856 set_syntax_error (_("'ROR' shift is not permitted"));
2857 return FALSE;
2858 }
2859 break;
2860
2861 case SHIFTED_LSL:
2862 if (kind != AARCH64_MOD_LSL)
2863 {
2864 set_syntax_error (_("only 'LSL' shift is permitted"));
2865 return FALSE;
2866 }
2867 break;
2868
2869 case SHIFTED_REG_OFFSET:
2870 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2871 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2872 {
2873 set_fatal_syntax_error
2874 (_("invalid shift for the register offset addressing mode"));
2875 return FALSE;
2876 }
2877 break;
2878
2879 case SHIFTED_LSL_MSL:
2880 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2881 {
2882 set_syntax_error (_("invalid shift operator"));
2883 return FALSE;
2884 }
2885 break;
2886
2887 default:
2888 abort ();
2889 }
2890
2891 /* Whitespace can appear here if the next thing is a bare digit. */
2892 skip_whitespace (p);
2893
2894 /* Parse shift amount. */
2895 exp_has_prefix = 0;
2896 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2897 exp.X_op = O_absent;
2898 else
2899 {
2900 if (is_immediate_prefix (*p))
2901 {
2902 p++;
2903 exp_has_prefix = 1;
2904 }
2905 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2906 }
2907 if (exp.X_op == O_absent)
2908 {
2909 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2910 {
2911 set_syntax_error (_("missing shift amount"));
2912 return FALSE;
2913 }
2914 operand->shifter.amount = 0;
2915 }
2916 else if (exp.X_op != O_constant)
2917 {
2918 set_syntax_error (_("constant shift amount required"));
2919 return FALSE;
2920 }
2921 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2922 {
2923 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2924 return FALSE;
2925 }
2926 else
2927 {
2928 operand->shifter.amount = exp.X_add_number;
2929 operand->shifter.amount_present = 1;
2930 }
2931
2932 operand->shifter.operator_present = 1;
2933 operand->shifter.kind = kind;
2934
2935 *str = p;
2936 return TRUE;
2937 }
2938
2939 /* Parse a <shifter_operand> for a data processing instruction:
2940
2941 #<immediate>
2942 #<immediate>, LSL #imm
2943
2944 Validation of immediate operands is deferred to md_apply_fix.
2945
2946 Return TRUE on success; otherwise return FALSE. */
2947
2948 static bfd_boolean
2949 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2950 enum parse_shift_mode mode)
2951 {
2952 char *p;
2953
2954 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2955 return FALSE;
2956
2957 p = *str;
2958
2959 /* Accept an immediate expression. */
2960 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2961 return FALSE;
2962
2963 /* Accept optional LSL for arithmetic immediate values. */
2964 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2965 if (! parse_shift (&p, operand, SHIFTED_LSL))
2966 return FALSE;
2967
2968 /* Not accept any shifter for logical immediate values. */
2969 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2970 && parse_shift (&p, operand, mode))
2971 {
2972 set_syntax_error (_("unexpected shift operator"));
2973 return FALSE;
2974 }
2975
2976 *str = p;
2977 return TRUE;
2978 }
2979
2980 /* Parse a <shifter_operand> for a data processing instruction:
2981
2982 <Rm>
2983 <Rm>, <shift>
2984 #<immediate>
2985 #<immediate>, LSL #imm
2986
2987 where <shift> is handled by parse_shift above, and the last two
2988 cases are handled by the function above.
2989
2990 Validation of immediate operands is deferred to md_apply_fix.
2991
2992 Return TRUE on success; otherwise return FALSE. */
2993
2994 static bfd_boolean
2995 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2996 enum parse_shift_mode mode)
2997 {
2998 int reg;
2999 int isreg32, isregzero;
3000 enum aarch64_operand_class opd_class
3001 = aarch64_get_operand_class (operand->type);
3002
3003 if ((reg =
3004 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
3005 {
3006 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3007 {
3008 set_syntax_error (_("unexpected register in the immediate operand"));
3009 return FALSE;
3010 }
3011
3012 if (!isregzero && reg == REG_SP)
3013 {
3014 set_syntax_error (BAD_SP);
3015 return FALSE;
3016 }
3017
3018 operand->reg.regno = reg;
3019 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
3020
3021 /* Accept optional shift operation on register. */
3022 if (! skip_past_comma (str))
3023 return TRUE;
3024
3025 if (! parse_shift (str, operand, mode))
3026 return FALSE;
3027
3028 return TRUE;
3029 }
3030 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3031 {
3032 set_syntax_error
3033 (_("integer register expected in the extended/shifted operand "
3034 "register"));
3035 return FALSE;
3036 }
3037
3038 /* We have a shifted immediate variable. */
3039 return parse_shifter_operand_imm (str, operand, mode);
3040 }
3041
3042 /* Return TRUE on success; return FALSE otherwise. */
3043
3044 static bfd_boolean
3045 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3046 enum parse_shift_mode mode)
3047 {
3048 char *p = *str;
3049
3050 /* Determine if we have the sequence of characters #: or just :
3051 coming next. If we do, then we check for a :rello: relocation
3052 modifier. If we don't, punt the whole lot to
3053 parse_shifter_operand. */
3054
3055 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3056 {
3057 struct reloc_table_entry *entry;
3058
3059 if (p[0] == '#')
3060 p += 2;
3061 else
3062 p++;
3063 *str = p;
3064
3065 /* Try to parse a relocation. Anything else is an error. */
3066 if (!(entry = find_reloc_table_entry (str)))
3067 {
3068 set_syntax_error (_("unknown relocation modifier"));
3069 return FALSE;
3070 }
3071
3072 if (entry->add_type == 0)
3073 {
3074 set_syntax_error
3075 (_("this relocation modifier is not allowed on this instruction"));
3076 return FALSE;
3077 }
3078
3079 /* Save str before we decompose it. */
3080 p = *str;
3081
3082 /* Next, we parse the expression. */
3083 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3084 return FALSE;
3085
3086 /* Record the relocation type (use the ADD variant here). */
3087 inst.reloc.type = entry->add_type;
3088 inst.reloc.pc_rel = entry->pc_rel;
3089
3090 /* If str is empty, we've reached the end, stop here. */
3091 if (**str == '\0')
3092 return TRUE;
3093
3094 /* Otherwise, we have a shifted reloc modifier, so rewind to
3095 recover the variable name and continue parsing for the shifter. */
3096 *str = p;
3097 return parse_shifter_operand_imm (str, operand, mode);
3098 }
3099
3100 return parse_shifter_operand (str, operand, mode);
3101 }
3102
3103 /* Parse all forms of an address expression. Information is written
3104 to *OPERAND and/or inst.reloc.
3105
3106 The A64 instruction set has the following addressing modes:
3107
3108 Offset
3109 [base] // in SIMD ld/st structure
3110 [base{,#0}] // in ld/st exclusive
3111 [base{,#imm}]
3112 [base,Xm{,LSL #imm}]
3113 [base,Xm,SXTX {#imm}]
3114 [base,Wm,(S|U)XTW {#imm}]
3115 Pre-indexed
3116 [base,#imm]!
3117 Post-indexed
3118 [base],#imm
3119 [base],Xm // in SIMD ld/st structure
3120 PC-relative (literal)
3121 label
3122 =immediate
3123
3124 (As a convenience, the notation "=immediate" is permitted in conjunction
3125 with the pc-relative literal load instructions to automatically place an
3126 immediate value or symbolic address in a nearby literal pool and generate
3127 a hidden label which references it.)
3128
3129 Upon a successful parsing, the address structure in *OPERAND will be
3130 filled in the following way:
3131
3132 .base_regno = <base>
3133 .offset.is_reg // 1 if the offset is a register
3134 .offset.imm = <imm>
3135 .offset.regno = <Rm>
3136
3137 For different addressing modes defined in the A64 ISA:
3138
3139 Offset
3140 .pcrel=0; .preind=1; .postind=0; .writeback=0
3141 Pre-indexed
3142 .pcrel=0; .preind=1; .postind=0; .writeback=1
3143 Post-indexed
3144 .pcrel=0; .preind=0; .postind=1; .writeback=1
3145 PC-relative (literal)
3146 .pcrel=1; .preind=1; .postind=0; .writeback=0
3147
3148 The shift/extension information, if any, will be stored in .shifter.
3149
3150 It is the caller's responsibility to check for addressing modes not
3151 supported by the instruction, and to set inst.reloc.type. */
3152
3153 static bfd_boolean
3154 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
3155 int accept_reg_post_index)
3156 {
3157 char *p = *str;
3158 int reg;
3159 int isreg32, isregzero;
3160 expressionS *exp = &inst.reloc.exp;
3161
3162 if (! skip_past_char (&p, '['))
3163 {
3164 /* =immediate or label. */
3165 operand->addr.pcrel = 1;
3166 operand->addr.preind = 1;
3167
3168 /* #:<reloc_op>:<symbol> */
3169 skip_past_char (&p, '#');
3170 if (reloc && skip_past_char (&p, ':'))
3171 {
3172 bfd_reloc_code_real_type ty;
3173 struct reloc_table_entry *entry;
3174
3175 /* Try to parse a relocation modifier. Anything else is
3176 an error. */
3177 entry = find_reloc_table_entry (&p);
3178 if (! entry)
3179 {
3180 set_syntax_error (_("unknown relocation modifier"));
3181 return FALSE;
3182 }
3183
3184 switch (operand->type)
3185 {
3186 case AARCH64_OPND_ADDR_PCREL21:
3187 /* adr */
3188 ty = entry->adr_type;
3189 break;
3190
3191 default:
3192 ty = entry->ld_literal_type;
3193 break;
3194 }
3195
3196 if (ty == 0)
3197 {
3198 set_syntax_error
3199 (_("this relocation modifier is not allowed on this "
3200 "instruction"));
3201 return FALSE;
3202 }
3203
3204 /* #:<reloc_op>: */
3205 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3206 {
3207 set_syntax_error (_("invalid relocation expression"));
3208 return FALSE;
3209 }
3210
3211 /* #:<reloc_op>:<expr> */
3212 /* Record the relocation type. */
3213 inst.reloc.type = ty;
3214 inst.reloc.pc_rel = entry->pc_rel;
3215 }
3216 else
3217 {
3218
3219 if (skip_past_char (&p, '='))
3220 /* =immediate; need to generate the literal in the literal pool. */
3221 inst.gen_lit_pool = 1;
3222
3223 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3224 {
3225 set_syntax_error (_("invalid address"));
3226 return FALSE;
3227 }
3228 }
3229
3230 *str = p;
3231 return TRUE;
3232 }
3233
3234 /* [ */
3235
3236 /* Accept SP and reject ZR */
3237 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3238 if (reg == PARSE_FAIL || isreg32)
3239 {
3240 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3241 return FALSE;
3242 }
3243 operand->addr.base_regno = reg;
3244
3245 /* [Xn */
3246 if (skip_past_comma (&p))
3247 {
3248 /* [Xn, */
3249 operand->addr.preind = 1;
3250
3251 /* Reject SP and accept ZR */
3252 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3253 if (reg != PARSE_FAIL)
3254 {
3255 /* [Xn,Rm */
3256 operand->addr.offset.regno = reg;
3257 operand->addr.offset.is_reg = 1;
3258 /* Shifted index. */
3259 if (skip_past_comma (&p))
3260 {
3261 /* [Xn,Rm, */
3262 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3263 /* Use the diagnostics set in parse_shift, so not set new
3264 error message here. */
3265 return FALSE;
3266 }
3267 /* We only accept:
3268 [base,Xm{,LSL #imm}]
3269 [base,Xm,SXTX {#imm}]
3270 [base,Wm,(S|U)XTW {#imm}] */
3271 if (operand->shifter.kind == AARCH64_MOD_NONE
3272 || operand->shifter.kind == AARCH64_MOD_LSL
3273 || operand->shifter.kind == AARCH64_MOD_SXTX)
3274 {
3275 if (isreg32)
3276 {
3277 set_syntax_error (_("invalid use of 32-bit register offset"));
3278 return FALSE;
3279 }
3280 }
3281 else if (!isreg32)
3282 {
3283 set_syntax_error (_("invalid use of 64-bit register offset"));
3284 return FALSE;
3285 }
3286 }
3287 else
3288 {
3289 /* [Xn,#:<reloc_op>:<symbol> */
3290 skip_past_char (&p, '#');
3291 if (reloc && skip_past_char (&p, ':'))
3292 {
3293 struct reloc_table_entry *entry;
3294
3295 /* Try to parse a relocation modifier. Anything else is
3296 an error. */
3297 if (!(entry = find_reloc_table_entry (&p)))
3298 {
3299 set_syntax_error (_("unknown relocation modifier"));
3300 return FALSE;
3301 }
3302
3303 if (entry->ldst_type == 0)
3304 {
3305 set_syntax_error
3306 (_("this relocation modifier is not allowed on this "
3307 "instruction"));
3308 return FALSE;
3309 }
3310
3311 /* [Xn,#:<reloc_op>: */
3312 /* We now have the group relocation table entry corresponding to
3313 the name in the assembler source. Next, we parse the
3314 expression. */
3315 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3316 {
3317 set_syntax_error (_("invalid relocation expression"));
3318 return FALSE;
3319 }
3320
3321 /* [Xn,#:<reloc_op>:<expr> */
3322 /* Record the load/store relocation type. */
3323 inst.reloc.type = entry->ldst_type;
3324 inst.reloc.pc_rel = entry->pc_rel;
3325 }
3326 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3327 {
3328 set_syntax_error (_("invalid expression in the address"));
3329 return FALSE;
3330 }
3331 /* [Xn,<expr> */
3332 }
3333 }
3334
3335 if (! skip_past_char (&p, ']'))
3336 {
3337 set_syntax_error (_("']' expected"));
3338 return FALSE;
3339 }
3340
3341 if (skip_past_char (&p, '!'))
3342 {
3343 if (operand->addr.preind && operand->addr.offset.is_reg)
3344 {
3345 set_syntax_error (_("register offset not allowed in pre-indexed "
3346 "addressing mode"));
3347 return FALSE;
3348 }
3349 /* [Xn]! */
3350 operand->addr.writeback = 1;
3351 }
3352 else if (skip_past_comma (&p))
3353 {
3354 /* [Xn], */
3355 operand->addr.postind = 1;
3356 operand->addr.writeback = 1;
3357
3358 if (operand->addr.preind)
3359 {
3360 set_syntax_error (_("cannot combine pre- and post-indexing"));
3361 return FALSE;
3362 }
3363
3364 if (accept_reg_post_index
3365 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3366 &isregzero)) != PARSE_FAIL)
3367 {
3368 /* [Xn],Xm */
3369 if (isreg32)
3370 {
3371 set_syntax_error (_("invalid 32-bit register offset"));
3372 return FALSE;
3373 }
3374 operand->addr.offset.regno = reg;
3375 operand->addr.offset.is_reg = 1;
3376 }
3377 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3378 {
3379 /* [Xn],#expr */
3380 set_syntax_error (_("invalid expression in the address"));
3381 return FALSE;
3382 }
3383 }
3384
3385 /* If at this point neither .preind nor .postind is set, we have a
3386 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3387 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3388 {
3389 if (operand->addr.writeback)
3390 {
3391 /* Reject [Rn]! */
3392 set_syntax_error (_("missing offset in the pre-indexed address"));
3393 return FALSE;
3394 }
3395 operand->addr.preind = 1;
3396 inst.reloc.exp.X_op = O_constant;
3397 inst.reloc.exp.X_add_number = 0;
3398 }
3399
3400 *str = p;
3401 return TRUE;
3402 }
3403
3404 /* Return TRUE on success; otherwise return FALSE. */
3405 static bfd_boolean
3406 parse_address (char **str, aarch64_opnd_info *operand,
3407 int accept_reg_post_index)
3408 {
3409 return parse_address_main (str, operand, 0, accept_reg_post_index);
3410 }
3411
3412 /* Return TRUE on success; otherwise return FALSE. */
3413 static bfd_boolean
3414 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3415 {
3416 return parse_address_main (str, operand, 1, 0);
3417 }
3418
3419 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3420 Return TRUE on success; otherwise return FALSE. */
3421 static bfd_boolean
3422 parse_half (char **str, int *internal_fixup_p)
3423 {
3424 char *p, *saved;
3425 int dummy;
3426
3427 p = *str;
3428 skip_past_char (&p, '#');
3429
3430 gas_assert (internal_fixup_p);
3431 *internal_fixup_p = 0;
3432
3433 if (*p == ':')
3434 {
3435 struct reloc_table_entry *entry;
3436
3437 /* Try to parse a relocation. Anything else is an error. */
3438 ++p;
3439 if (!(entry = find_reloc_table_entry (&p)))
3440 {
3441 set_syntax_error (_("unknown relocation modifier"));
3442 return FALSE;
3443 }
3444
3445 if (entry->movw_type == 0)
3446 {
3447 set_syntax_error
3448 (_("this relocation modifier is not allowed on this instruction"));
3449 return FALSE;
3450 }
3451
3452 inst.reloc.type = entry->movw_type;
3453 }
3454 else
3455 *internal_fixup_p = 1;
3456
3457 /* Avoid parsing a register as a general symbol. */
3458 saved = p;
3459 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3460 return FALSE;
3461 p = saved;
3462
3463 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3464 return FALSE;
3465
3466 *str = p;
3467 return TRUE;
3468 }
3469
3470 /* Parse an operand for an ADRP instruction:
3471 ADRP <Xd>, <label>
3472 Return TRUE on success; otherwise return FALSE. */
3473
3474 static bfd_boolean
3475 parse_adrp (char **str)
3476 {
3477 char *p;
3478
3479 p = *str;
3480 if (*p == ':')
3481 {
3482 struct reloc_table_entry *entry;
3483
3484 /* Try to parse a relocation. Anything else is an error. */
3485 ++p;
3486 if (!(entry = find_reloc_table_entry (&p)))
3487 {
3488 set_syntax_error (_("unknown relocation modifier"));
3489 return FALSE;
3490 }
3491
3492 if (entry->adrp_type == 0)
3493 {
3494 set_syntax_error
3495 (_("this relocation modifier is not allowed on this instruction"));
3496 return FALSE;
3497 }
3498
3499 inst.reloc.type = entry->adrp_type;
3500 }
3501 else
3502 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3503
3504 inst.reloc.pc_rel = 1;
3505
3506 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3507 return FALSE;
3508
3509 *str = p;
3510 return TRUE;
3511 }
3512
3513 /* Miscellaneous. */
3514
3515 /* Parse an option for a preload instruction. Returns the encoding for the
3516 option, or PARSE_FAIL. */
3517
3518 static int
3519 parse_pldop (char **str)
3520 {
3521 char *p, *q;
3522 const struct aarch64_name_value_pair *o;
3523
3524 p = q = *str;
3525 while (ISALNUM (*q))
3526 q++;
3527
3528 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3529 if (!o)
3530 return PARSE_FAIL;
3531
3532 *str = q;
3533 return o->value;
3534 }
3535
3536 /* Parse an option for a barrier instruction. Returns the encoding for the
3537 option, or PARSE_FAIL. */
3538
3539 static int
3540 parse_barrier (char **str)
3541 {
3542 char *p, *q;
3543 const asm_barrier_opt *o;
3544
3545 p = q = *str;
3546 while (ISALPHA (*q))
3547 q++;
3548
3549 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3550 if (!o)
3551 return PARSE_FAIL;
3552
3553 *str = q;
3554 return o->value;
3555 }
3556
3557 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3558 Returns the encoding for the option, or PARSE_FAIL.
3559
3560 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3561 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3562
3563 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3564 field, otherwise as a system register.
3565 */
3566
3567 static int
3568 parse_sys_reg (char **str, struct hash_control *sys_regs,
3569 int imple_defined_p, int pstatefield_p)
3570 {
3571 char *p, *q;
3572 char buf[32];
3573 const aarch64_sys_reg *o;
3574 int value;
3575
3576 p = buf;
3577 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3578 if (p < buf + 31)
3579 *p++ = TOLOWER (*q);
3580 *p = '\0';
3581 /* Assert that BUF be large enough. */
3582 gas_assert (p - buf == q - *str);
3583
3584 o = hash_find (sys_regs, buf);
3585 if (!o)
3586 {
3587 if (!imple_defined_p)
3588 return PARSE_FAIL;
3589 else
3590 {
3591 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3592 unsigned int op0, op1, cn, cm, op2;
3593
3594 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3595 != 5)
3596 return PARSE_FAIL;
3597 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3598 return PARSE_FAIL;
3599 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3600 }
3601 }
3602 else
3603 {
3604 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3605 as_bad (_("selected processor does not support PSTATE field "
3606 "name '%s'"), buf);
3607 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3608 as_bad (_("selected processor does not support system register "
3609 "name '%s'"), buf);
3610 if (aarch64_sys_reg_deprecated_p (o))
3611 as_warn (_("system register name '%s' is deprecated and may be "
3612 "removed in a future release"), buf);
3613 value = o->value;
3614 }
3615
3616 *str = q;
3617 return value;
3618 }
3619
3620 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3621 for the option, or NULL. */
3622
3623 static const aarch64_sys_ins_reg *
3624 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3625 {
3626 char *p, *q;
3627 char buf[32];
3628 const aarch64_sys_ins_reg *o;
3629
3630 p = buf;
3631 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3632 if (p < buf + 31)
3633 *p++ = TOLOWER (*q);
3634 *p = '\0';
3635
3636 o = hash_find (sys_ins_regs, buf);
3637 if (!o)
3638 return NULL;
3639
3640 *str = q;
3641 return o;
3642 }
3643 \f
3644 #define po_char_or_fail(chr) do { \
3645 if (! skip_past_char (&str, chr)) \
3646 goto failure; \
3647 } while (0)
3648
3649 #define po_reg_or_fail(regtype) do { \
3650 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3651 if (val == PARSE_FAIL) \
3652 { \
3653 set_default_error (); \
3654 goto failure; \
3655 } \
3656 } while (0)
3657
3658 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3659 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3660 &isreg32, &isregzero); \
3661 if (val == PARSE_FAIL) \
3662 { \
3663 set_default_error (); \
3664 goto failure; \
3665 } \
3666 info->reg.regno = val; \
3667 if (isreg32) \
3668 info->qualifier = AARCH64_OPND_QLF_W; \
3669 else \
3670 info->qualifier = AARCH64_OPND_QLF_X; \
3671 } while (0)
3672
3673 #define po_imm_nc_or_fail() do { \
3674 if (! parse_constant_immediate (&str, &val)) \
3675 goto failure; \
3676 } while (0)
3677
3678 #define po_imm_or_fail(min, max) do { \
3679 if (! parse_constant_immediate (&str, &val)) \
3680 goto failure; \
3681 if (val < min || val > max) \
3682 { \
3683 set_fatal_syntax_error (_("immediate value out of range "\
3684 #min " to "#max)); \
3685 goto failure; \
3686 } \
3687 } while (0)
3688
3689 #define po_misc_or_fail(expr) do { \
3690 if (!expr) \
3691 goto failure; \
3692 } while (0)
3693 \f
3694 /* encode the 12-bit imm field of Add/sub immediate */
3695 static inline uint32_t
3696 encode_addsub_imm (uint32_t imm)
3697 {
3698 return imm << 10;
3699 }
3700
3701 /* encode the shift amount field of Add/sub immediate */
3702 static inline uint32_t
3703 encode_addsub_imm_shift_amount (uint32_t cnt)
3704 {
3705 return cnt << 22;
3706 }
3707
3708
3709 /* encode the imm field of Adr instruction */
3710 static inline uint32_t
3711 encode_adr_imm (uint32_t imm)
3712 {
3713 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3714 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3715 }
3716
3717 /* encode the immediate field of Move wide immediate */
3718 static inline uint32_t
3719 encode_movw_imm (uint32_t imm)
3720 {
3721 return imm << 5;
3722 }
3723
3724 /* encode the 26-bit offset of unconditional branch */
3725 static inline uint32_t
3726 encode_branch_ofs_26 (uint32_t ofs)
3727 {
3728 return ofs & ((1 << 26) - 1);
3729 }
3730
3731 /* encode the 19-bit offset of conditional branch and compare & branch */
3732 static inline uint32_t
3733 encode_cond_branch_ofs_19 (uint32_t ofs)
3734 {
3735 return (ofs & ((1 << 19) - 1)) << 5;
3736 }
3737
3738 /* encode the 19-bit offset of ld literal */
3739 static inline uint32_t
3740 encode_ld_lit_ofs_19 (uint32_t ofs)
3741 {
3742 return (ofs & ((1 << 19) - 1)) << 5;
3743 }
3744
3745 /* Encode the 14-bit offset of test & branch. */
3746 static inline uint32_t
3747 encode_tst_branch_ofs_14 (uint32_t ofs)
3748 {
3749 return (ofs & ((1 << 14) - 1)) << 5;
3750 }
3751
3752 /* Encode the 16-bit imm field of svc/hvc/smc. */
3753 static inline uint32_t
3754 encode_svc_imm (uint32_t imm)
3755 {
3756 return imm << 5;
3757 }
3758
3759 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3760 static inline uint32_t
3761 reencode_addsub_switch_add_sub (uint32_t opcode)
3762 {
3763 return opcode ^ (1 << 30);
3764 }
3765
3766 static inline uint32_t
3767 reencode_movzn_to_movz (uint32_t opcode)
3768 {
3769 return opcode | (1 << 30);
3770 }
3771
3772 static inline uint32_t
3773 reencode_movzn_to_movn (uint32_t opcode)
3774 {
3775 return opcode & ~(1 << 30);
3776 }
3777
3778 /* Overall per-instruction processing. */
3779
3780 /* We need to be able to fix up arbitrary expressions in some statements.
3781 This is so that we can handle symbols that are an arbitrary distance from
3782 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3783 which returns part of an address in a form which will be valid for
3784 a data instruction. We do this by pushing the expression into a symbol
3785 in the expr_section, and creating a fix for that. */
3786
3787 static fixS *
3788 fix_new_aarch64 (fragS * frag,
3789 int where,
3790 short int size, expressionS * exp, int pc_rel, int reloc)
3791 {
3792 fixS *new_fix;
3793
3794 switch (exp->X_op)
3795 {
3796 case O_constant:
3797 case O_symbol:
3798 case O_add:
3799 case O_subtract:
3800 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3801 break;
3802
3803 default:
3804 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3805 pc_rel, reloc);
3806 break;
3807 }
3808 return new_fix;
3809 }
3810 \f
3811 /* Diagnostics on operands errors. */
3812
3813 /* By default, output verbose error message.
3814 Disable the verbose error message by -mno-verbose-error. */
3815 static int verbose_error_p = 1;
3816
3817 #ifdef DEBUG_AARCH64
3818 /* N.B. this is only for the purpose of debugging. */
3819 const char* operand_mismatch_kind_names[] =
3820 {
3821 "AARCH64_OPDE_NIL",
3822 "AARCH64_OPDE_RECOVERABLE",
3823 "AARCH64_OPDE_SYNTAX_ERROR",
3824 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3825 "AARCH64_OPDE_INVALID_VARIANT",
3826 "AARCH64_OPDE_OUT_OF_RANGE",
3827 "AARCH64_OPDE_UNALIGNED",
3828 "AARCH64_OPDE_REG_LIST",
3829 "AARCH64_OPDE_OTHER_ERROR",
3830 };
3831 #endif /* DEBUG_AARCH64 */
3832
3833 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3834
3835 When multiple errors of different kinds are found in the same assembly
3836 line, only the error of the highest severity will be picked up for
3837 issuing the diagnostics. */
3838
3839 static inline bfd_boolean
3840 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3841 enum aarch64_operand_error_kind rhs)
3842 {
3843 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3844 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3845 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3846 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3847 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3848 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3849 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3850 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3851 return lhs > rhs;
3852 }
3853
3854 /* Helper routine to get the mnemonic name from the assembly instruction
3855 line; should only be called for the diagnosis purpose, as there is
3856 string copy operation involved, which may affect the runtime
3857 performance if used in elsewhere. */
3858
3859 static const char*
3860 get_mnemonic_name (const char *str)
3861 {
3862 static char mnemonic[32];
3863 char *ptr;
3864
3865 /* Get the first 15 bytes and assume that the full name is included. */
3866 strncpy (mnemonic, str, 31);
3867 mnemonic[31] = '\0';
3868
3869 /* Scan up to the end of the mnemonic, which must end in white space,
3870 '.', or end of string. */
3871 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3872 ;
3873
3874 *ptr = '\0';
3875
3876 /* Append '...' to the truncated long name. */
3877 if (ptr - mnemonic == 31)
3878 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3879
3880 return mnemonic;
3881 }
3882
3883 static void
3884 reset_aarch64_instruction (aarch64_instruction *instruction)
3885 {
3886 memset (instruction, '\0', sizeof (aarch64_instruction));
3887 instruction->reloc.type = BFD_RELOC_UNUSED;
3888 }
3889
3890 /* Data strutures storing one user error in the assembly code related to
3891 operands. */
3892
3893 struct operand_error_record
3894 {
3895 const aarch64_opcode *opcode;
3896 aarch64_operand_error detail;
3897 struct operand_error_record *next;
3898 };
3899
3900 typedef struct operand_error_record operand_error_record;
3901
3902 struct operand_errors
3903 {
3904 operand_error_record *head;
3905 operand_error_record *tail;
3906 };
3907
3908 typedef struct operand_errors operand_errors;
3909
3910 /* Top-level data structure reporting user errors for the current line of
3911 the assembly code.
3912 The way md_assemble works is that all opcodes sharing the same mnemonic
3913 name are iterated to find a match to the assembly line. In this data
3914 structure, each of the such opcodes will have one operand_error_record
3915 allocated and inserted. In other words, excessive errors related with
3916 a single opcode are disregarded. */
3917 operand_errors operand_error_report;
3918
3919 /* Free record nodes. */
3920 static operand_error_record *free_opnd_error_record_nodes = NULL;
3921
3922 /* Initialize the data structure that stores the operand mismatch
3923 information on assembling one line of the assembly code. */
3924 static void
3925 init_operand_error_report (void)
3926 {
3927 if (operand_error_report.head != NULL)
3928 {
3929 gas_assert (operand_error_report.tail != NULL);
3930 operand_error_report.tail->next = free_opnd_error_record_nodes;
3931 free_opnd_error_record_nodes = operand_error_report.head;
3932 operand_error_report.head = NULL;
3933 operand_error_report.tail = NULL;
3934 return;
3935 }
3936 gas_assert (operand_error_report.tail == NULL);
3937 }
3938
3939 /* Return TRUE if some operand error has been recorded during the
3940 parsing of the current assembly line using the opcode *OPCODE;
3941 otherwise return FALSE. */
3942 static inline bfd_boolean
3943 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3944 {
3945 operand_error_record *record = operand_error_report.head;
3946 return record && record->opcode == opcode;
3947 }
3948
3949 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3950 OPCODE field is initialized with OPCODE.
3951 N.B. only one record for each opcode, i.e. the maximum of one error is
3952 recorded for each instruction template. */
3953
3954 static void
3955 add_operand_error_record (const operand_error_record* new_record)
3956 {
3957 const aarch64_opcode *opcode = new_record->opcode;
3958 operand_error_record* record = operand_error_report.head;
3959
3960 /* The record may have been created for this opcode. If not, we need
3961 to prepare one. */
3962 if (! opcode_has_operand_error_p (opcode))
3963 {
3964 /* Get one empty record. */
3965 if (free_opnd_error_record_nodes == NULL)
3966 {
3967 record = xmalloc (sizeof (operand_error_record));
3968 if (record == NULL)
3969 abort ();
3970 }
3971 else
3972 {
3973 record = free_opnd_error_record_nodes;
3974 free_opnd_error_record_nodes = record->next;
3975 }
3976 record->opcode = opcode;
3977 /* Insert at the head. */
3978 record->next = operand_error_report.head;
3979 operand_error_report.head = record;
3980 if (operand_error_report.tail == NULL)
3981 operand_error_report.tail = record;
3982 }
3983 else if (record->detail.kind != AARCH64_OPDE_NIL
3984 && record->detail.index <= new_record->detail.index
3985 && operand_error_higher_severity_p (record->detail.kind,
3986 new_record->detail.kind))
3987 {
3988 /* In the case of multiple errors found on operands related with a
3989 single opcode, only record the error of the leftmost operand and
3990 only if the error is of higher severity. */
3991 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3992 " the existing error %s on operand %d",
3993 operand_mismatch_kind_names[new_record->detail.kind],
3994 new_record->detail.index,
3995 operand_mismatch_kind_names[record->detail.kind],
3996 record->detail.index);
3997 return;
3998 }
3999
4000 record->detail = new_record->detail;
4001 }
4002
4003 static inline void
4004 record_operand_error_info (const aarch64_opcode *opcode,
4005 aarch64_operand_error *error_info)
4006 {
4007 operand_error_record record;
4008 record.opcode = opcode;
4009 record.detail = *error_info;
4010 add_operand_error_record (&record);
4011 }
4012
4013 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4014 error message *ERROR, for operand IDX (count from 0). */
4015
4016 static void
4017 record_operand_error (const aarch64_opcode *opcode, int idx,
4018 enum aarch64_operand_error_kind kind,
4019 const char* error)
4020 {
4021 aarch64_operand_error info;
4022 memset(&info, 0, sizeof (info));
4023 info.index = idx;
4024 info.kind = kind;
4025 info.error = error;
4026 record_operand_error_info (opcode, &info);
4027 }
4028
4029 static void
4030 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4031 enum aarch64_operand_error_kind kind,
4032 const char* error, const int *extra_data)
4033 {
4034 aarch64_operand_error info;
4035 info.index = idx;
4036 info.kind = kind;
4037 info.error = error;
4038 info.data[0] = extra_data[0];
4039 info.data[1] = extra_data[1];
4040 info.data[2] = extra_data[2];
4041 record_operand_error_info (opcode, &info);
4042 }
4043
4044 static void
4045 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4046 const char* error, int lower_bound,
4047 int upper_bound)
4048 {
4049 int data[3] = {lower_bound, upper_bound, 0};
4050 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4051 error, data);
4052 }
4053
4054 /* Remove the operand error record for *OPCODE. */
4055 static void ATTRIBUTE_UNUSED
4056 remove_operand_error_record (const aarch64_opcode *opcode)
4057 {
4058 if (opcode_has_operand_error_p (opcode))
4059 {
4060 operand_error_record* record = operand_error_report.head;
4061 gas_assert (record != NULL && operand_error_report.tail != NULL);
4062 operand_error_report.head = record->next;
4063 record->next = free_opnd_error_record_nodes;
4064 free_opnd_error_record_nodes = record;
4065 if (operand_error_report.head == NULL)
4066 {
4067 gas_assert (operand_error_report.tail == record);
4068 operand_error_report.tail = NULL;
4069 }
4070 }
4071 }
4072
4073 /* Given the instruction in *INSTR, return the index of the best matched
4074 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4075
4076 Return -1 if there is no qualifier sequence; return the first match
4077 if there is multiple matches found. */
4078
4079 static int
4080 find_best_match (const aarch64_inst *instr,
4081 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4082 {
4083 int i, num_opnds, max_num_matched, idx;
4084
4085 num_opnds = aarch64_num_of_operands (instr->opcode);
4086 if (num_opnds == 0)
4087 {
4088 DEBUG_TRACE ("no operand");
4089 return -1;
4090 }
4091
4092 max_num_matched = 0;
4093 idx = -1;
4094
4095 /* For each pattern. */
4096 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4097 {
4098 int j, num_matched;
4099 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4100
4101 /* Most opcodes has much fewer patterns in the list. */
4102 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4103 {
4104 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4105 if (i != 0 && idx == -1)
4106 /* If nothing has been matched, return the 1st sequence. */
4107 idx = 0;
4108 break;
4109 }
4110
4111 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4112 if (*qualifiers == instr->operands[j].qualifier)
4113 ++num_matched;
4114
4115 if (num_matched > max_num_matched)
4116 {
4117 max_num_matched = num_matched;
4118 idx = i;
4119 }
4120 }
4121
4122 DEBUG_TRACE ("return with %d", idx);
4123 return idx;
4124 }
4125
4126 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4127 corresponding operands in *INSTR. */
4128
4129 static inline void
4130 assign_qualifier_sequence (aarch64_inst *instr,
4131 const aarch64_opnd_qualifier_t *qualifiers)
4132 {
4133 int i = 0;
4134 int num_opnds = aarch64_num_of_operands (instr->opcode);
4135 gas_assert (num_opnds);
4136 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4137 instr->operands[i].qualifier = *qualifiers;
4138 }
4139
4140 /* Print operands for the diagnosis purpose. */
4141
4142 static void
4143 print_operands (char *buf, const aarch64_opcode *opcode,
4144 const aarch64_opnd_info *opnds)
4145 {
4146 int i;
4147
4148 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4149 {
4150 const size_t size = 128;
4151 char str[size];
4152
4153 /* We regard the opcode operand info more, however we also look into
4154 the inst->operands to support the disassembling of the optional
4155 operand.
4156 The two operand code should be the same in all cases, apart from
4157 when the operand can be optional. */
4158 if (opcode->operands[i] == AARCH64_OPND_NIL
4159 || opnds[i].type == AARCH64_OPND_NIL)
4160 break;
4161
4162 /* Generate the operand string in STR. */
4163 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
4164
4165 /* Delimiter. */
4166 if (str[0] != '\0')
4167 strcat (buf, i == 0 ? " " : ",");
4168
4169 /* Append the operand string. */
4170 strcat (buf, str);
4171 }
4172 }
4173
4174 /* Send to stderr a string as information. */
4175
4176 static void
4177 output_info (const char *format, ...)
4178 {
4179 char *file;
4180 unsigned int line;
4181 va_list args;
4182
4183 as_where (&file, &line);
4184 if (file)
4185 {
4186 if (line != 0)
4187 fprintf (stderr, "%s:%u: ", file, line);
4188 else
4189 fprintf (stderr, "%s: ", file);
4190 }
4191 fprintf (stderr, _("Info: "));
4192 va_start (args, format);
4193 vfprintf (stderr, format, args);
4194 va_end (args);
4195 (void) putc ('\n', stderr);
4196 }
4197
4198 /* Output one operand error record. */
4199
4200 static void
4201 output_operand_error_record (const operand_error_record *record, char *str)
4202 {
4203 const aarch64_operand_error *detail = &record->detail;
4204 int idx = detail->index;
4205 const aarch64_opcode *opcode = record->opcode;
4206 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4207 : AARCH64_OPND_NIL);
4208
4209 switch (detail->kind)
4210 {
4211 case AARCH64_OPDE_NIL:
4212 gas_assert (0);
4213 break;
4214
4215 case AARCH64_OPDE_SYNTAX_ERROR:
4216 case AARCH64_OPDE_RECOVERABLE:
4217 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4218 case AARCH64_OPDE_OTHER_ERROR:
4219 /* Use the prepared error message if there is, otherwise use the
4220 operand description string to describe the error. */
4221 if (detail->error != NULL)
4222 {
4223 if (idx < 0)
4224 as_bad (_("%s -- `%s'"), detail->error, str);
4225 else
4226 as_bad (_("%s at operand %d -- `%s'"),
4227 detail->error, idx + 1, str);
4228 }
4229 else
4230 {
4231 gas_assert (idx >= 0);
4232 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4233 aarch64_get_operand_desc (opd_code), str);
4234 }
4235 break;
4236
4237 case AARCH64_OPDE_INVALID_VARIANT:
4238 as_bad (_("operand mismatch -- `%s'"), str);
4239 if (verbose_error_p)
4240 {
4241 /* We will try to correct the erroneous instruction and also provide
4242 more information e.g. all other valid variants.
4243
4244 The string representation of the corrected instruction and other
4245 valid variants are generated by
4246
4247 1) obtaining the intermediate representation of the erroneous
4248 instruction;
4249 2) manipulating the IR, e.g. replacing the operand qualifier;
4250 3) printing out the instruction by calling the printer functions
4251 shared with the disassembler.
4252
4253 The limitation of this method is that the exact input assembly
4254 line cannot be accurately reproduced in some cases, for example an
4255 optional operand present in the actual assembly line will be
4256 omitted in the output; likewise for the optional syntax rules,
4257 e.g. the # before the immediate. Another limitation is that the
4258 assembly symbols and relocation operations in the assembly line
4259 currently cannot be printed out in the error report. Last but not
4260 least, when there is other error(s) co-exist with this error, the
4261 'corrected' instruction may be still incorrect, e.g. given
4262 'ldnp h0,h1,[x0,#6]!'
4263 this diagnosis will provide the version:
4264 'ldnp s0,s1,[x0,#6]!'
4265 which is still not right. */
4266 size_t len = strlen (get_mnemonic_name (str));
4267 int i, qlf_idx;
4268 bfd_boolean result;
4269 const size_t size = 2048;
4270 char buf[size];
4271 aarch64_inst *inst_base = &inst.base;
4272 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4273
4274 /* Init inst. */
4275 reset_aarch64_instruction (&inst);
4276 inst_base->opcode = opcode;
4277
4278 /* Reset the error report so that there is no side effect on the
4279 following operand parsing. */
4280 init_operand_error_report ();
4281
4282 /* Fill inst. */
4283 result = parse_operands (str + len, opcode)
4284 && programmer_friendly_fixup (&inst);
4285 gas_assert (result);
4286 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4287 NULL, NULL);
4288 gas_assert (!result);
4289
4290 /* Find the most matched qualifier sequence. */
4291 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4292 gas_assert (qlf_idx > -1);
4293
4294 /* Assign the qualifiers. */
4295 assign_qualifier_sequence (inst_base,
4296 opcode->qualifiers_list[qlf_idx]);
4297
4298 /* Print the hint. */
4299 output_info (_(" did you mean this?"));
4300 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4301 print_operands (buf, opcode, inst_base->operands);
4302 output_info (_(" %s"), buf);
4303
4304 /* Print out other variant(s) if there is any. */
4305 if (qlf_idx != 0 ||
4306 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4307 output_info (_(" other valid variant(s):"));
4308
4309 /* For each pattern. */
4310 qualifiers_list = opcode->qualifiers_list;
4311 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4312 {
4313 /* Most opcodes has much fewer patterns in the list.
4314 First NIL qualifier indicates the end in the list. */
4315 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4316 break;
4317
4318 if (i != qlf_idx)
4319 {
4320 /* Mnemonics name. */
4321 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4322
4323 /* Assign the qualifiers. */
4324 assign_qualifier_sequence (inst_base, *qualifiers_list);
4325
4326 /* Print instruction. */
4327 print_operands (buf, opcode, inst_base->operands);
4328
4329 output_info (_(" %s"), buf);
4330 }
4331 }
4332 }
4333 break;
4334
4335 case AARCH64_OPDE_OUT_OF_RANGE:
4336 if (detail->data[0] != detail->data[1])
4337 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4338 detail->error ? detail->error : _("immediate value"),
4339 detail->data[0], detail->data[1], idx + 1, str);
4340 else
4341 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4342 detail->error ? detail->error : _("immediate value"),
4343 detail->data[0], idx + 1, str);
4344 break;
4345
4346 case AARCH64_OPDE_REG_LIST:
4347 if (detail->data[0] == 1)
4348 as_bad (_("invalid number of registers in the list; "
4349 "only 1 register is expected at operand %d -- `%s'"),
4350 idx + 1, str);
4351 else
4352 as_bad (_("invalid number of registers in the list; "
4353 "%d registers are expected at operand %d -- `%s'"),
4354 detail->data[0], idx + 1, str);
4355 break;
4356
4357 case AARCH64_OPDE_UNALIGNED:
4358 as_bad (_("immediate value should be a multiple of "
4359 "%d at operand %d -- `%s'"),
4360 detail->data[0], idx + 1, str);
4361 break;
4362
4363 default:
4364 gas_assert (0);
4365 break;
4366 }
4367 }
4368
4369 /* Process and output the error message about the operand mismatching.
4370
4371 When this function is called, the operand error information had
4372 been collected for an assembly line and there will be multiple
4373 errors in the case of mulitple instruction templates; output the
4374 error message that most closely describes the problem. */
4375
4376 static void
4377 output_operand_error_report (char *str)
4378 {
4379 int largest_error_pos;
4380 const char *msg = NULL;
4381 enum aarch64_operand_error_kind kind;
4382 operand_error_record *curr;
4383 operand_error_record *head = operand_error_report.head;
4384 operand_error_record *record = NULL;
4385
4386 /* No error to report. */
4387 if (head == NULL)
4388 return;
4389
4390 gas_assert (head != NULL && operand_error_report.tail != NULL);
4391
4392 /* Only one error. */
4393 if (head == operand_error_report.tail)
4394 {
4395 DEBUG_TRACE ("single opcode entry with error kind: %s",
4396 operand_mismatch_kind_names[head->detail.kind]);
4397 output_operand_error_record (head, str);
4398 return;
4399 }
4400
4401 /* Find the error kind of the highest severity. */
4402 DEBUG_TRACE ("multiple opcode entres with error kind");
4403 kind = AARCH64_OPDE_NIL;
4404 for (curr = head; curr != NULL; curr = curr->next)
4405 {
4406 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4407 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4408 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4409 kind = curr->detail.kind;
4410 }
4411 gas_assert (kind != AARCH64_OPDE_NIL);
4412
4413 /* Pick up one of errors of KIND to report. */
4414 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4415 for (curr = head; curr != NULL; curr = curr->next)
4416 {
4417 if (curr->detail.kind != kind)
4418 continue;
4419 /* If there are multiple errors, pick up the one with the highest
4420 mismatching operand index. In the case of multiple errors with
4421 the equally highest operand index, pick up the first one or the
4422 first one with non-NULL error message. */
4423 if (curr->detail.index > largest_error_pos
4424 || (curr->detail.index == largest_error_pos && msg == NULL
4425 && curr->detail.error != NULL))
4426 {
4427 largest_error_pos = curr->detail.index;
4428 record = curr;
4429 msg = record->detail.error;
4430 }
4431 }
4432
4433 gas_assert (largest_error_pos != -2 && record != NULL);
4434 DEBUG_TRACE ("Pick up error kind %s to report",
4435 operand_mismatch_kind_names[record->detail.kind]);
4436
4437 /* Output. */
4438 output_operand_error_record (record, str);
4439 }
4440 \f
4441 /* Write an AARCH64 instruction to buf - always little-endian. */
4442 static void
4443 put_aarch64_insn (char *buf, uint32_t insn)
4444 {
4445 unsigned char *where = (unsigned char *) buf;
4446 where[0] = insn;
4447 where[1] = insn >> 8;
4448 where[2] = insn >> 16;
4449 where[3] = insn >> 24;
4450 }
4451
4452 static uint32_t
4453 get_aarch64_insn (char *buf)
4454 {
4455 unsigned char *where = (unsigned char *) buf;
4456 uint32_t result;
4457 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4458 return result;
4459 }
4460
4461 static void
4462 output_inst (struct aarch64_inst *new_inst)
4463 {
4464 char *to = NULL;
4465
4466 to = frag_more (INSN_SIZE);
4467
4468 frag_now->tc_frag_data.recorded = 1;
4469
4470 put_aarch64_insn (to, inst.base.value);
4471
4472 if (inst.reloc.type != BFD_RELOC_UNUSED)
4473 {
4474 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4475 INSN_SIZE, &inst.reloc.exp,
4476 inst.reloc.pc_rel,
4477 inst.reloc.type);
4478 DEBUG_TRACE ("Prepared relocation fix up");
4479 /* Don't check the addend value against the instruction size,
4480 that's the job of our code in md_apply_fix(). */
4481 fixp->fx_no_overflow = 1;
4482 if (new_inst != NULL)
4483 fixp->tc_fix_data.inst = new_inst;
4484 if (aarch64_gas_internal_fixup_p ())
4485 {
4486 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4487 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4488 fixp->fx_addnumber = inst.reloc.flags;
4489 }
4490 }
4491
4492 dwarf2_emit_insn (INSN_SIZE);
4493 }
4494
4495 /* Link together opcodes of the same name. */
4496
4497 struct templates
4498 {
4499 aarch64_opcode *opcode;
4500 struct templates *next;
4501 };
4502
4503 typedef struct templates templates;
4504
4505 static templates *
4506 lookup_mnemonic (const char *start, int len)
4507 {
4508 templates *templ = NULL;
4509
4510 templ = hash_find_n (aarch64_ops_hsh, start, len);
4511 return templ;
4512 }
4513
4514 /* Subroutine of md_assemble, responsible for looking up the primary
4515 opcode from the mnemonic the user wrote. STR points to the
4516 beginning of the mnemonic. */
4517
4518 static templates *
4519 opcode_lookup (char **str)
4520 {
4521 char *end, *base;
4522 const aarch64_cond *cond;
4523 char condname[16];
4524 int len;
4525
4526 /* Scan up to the end of the mnemonic, which must end in white space,
4527 '.', or end of string. */
4528 for (base = end = *str; is_part_of_name(*end); end++)
4529 if (*end == '.')
4530 break;
4531
4532 if (end == base)
4533 return 0;
4534
4535 inst.cond = COND_ALWAYS;
4536
4537 /* Handle a possible condition. */
4538 if (end[0] == '.')
4539 {
4540 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4541 if (cond)
4542 {
4543 inst.cond = cond->value;
4544 *str = end + 3;
4545 }
4546 else
4547 {
4548 *str = end;
4549 return 0;
4550 }
4551 }
4552 else
4553 *str = end;
4554
4555 len = end - base;
4556
4557 if (inst.cond == COND_ALWAYS)
4558 {
4559 /* Look for unaffixed mnemonic. */
4560 return lookup_mnemonic (base, len);
4561 }
4562 else if (len <= 13)
4563 {
4564 /* append ".c" to mnemonic if conditional */
4565 memcpy (condname, base, len);
4566 memcpy (condname + len, ".c", 2);
4567 base = condname;
4568 len += 2;
4569 return lookup_mnemonic (base, len);
4570 }
4571
4572 return NULL;
4573 }
4574
4575 /* Internal helper routine converting a vector neon_type_el structure
4576 *VECTYPE to a corresponding operand qualifier. */
4577
4578 static inline aarch64_opnd_qualifier_t
4579 vectype_to_qualifier (const struct neon_type_el *vectype)
4580 {
4581 /* Element size in bytes indexed by neon_el_type. */
4582 const unsigned char ele_size[5]
4583 = {1, 2, 4, 8, 16};
4584
4585 if (!vectype->defined || vectype->type == NT_invtype)
4586 goto vectype_conversion_fail;
4587
4588 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4589
4590 if (vectype->defined & NTA_HASINDEX)
4591 /* Vector element register. */
4592 return AARCH64_OPND_QLF_S_B + vectype->type;
4593 else
4594 {
4595 /* Vector register. */
4596 int reg_size = ele_size[vectype->type] * vectype->width;
4597 unsigned offset;
4598 if (reg_size != 16 && reg_size != 8)
4599 goto vectype_conversion_fail;
4600 /* The conversion is calculated based on the relation of the order of
4601 qualifiers to the vector element size and vector register size. */
4602 offset = (vectype->type == NT_q)
4603 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4604 gas_assert (offset <= 8);
4605 return AARCH64_OPND_QLF_V_8B + offset;
4606 }
4607
4608 vectype_conversion_fail:
4609 first_error (_("bad vector arrangement type"));
4610 return AARCH64_OPND_QLF_NIL;
4611 }
4612
4613 /* Process an optional operand that is found omitted from the assembly line.
4614 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4615 instruction's opcode entry while IDX is the index of this omitted operand.
4616 */
4617
4618 static void
4619 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4620 int idx, aarch64_opnd_info *operand)
4621 {
4622 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4623 gas_assert (optional_operand_p (opcode, idx));
4624 gas_assert (!operand->present);
4625
4626 switch (type)
4627 {
4628 case AARCH64_OPND_Rd:
4629 case AARCH64_OPND_Rn:
4630 case AARCH64_OPND_Rm:
4631 case AARCH64_OPND_Rt:
4632 case AARCH64_OPND_Rt2:
4633 case AARCH64_OPND_Rs:
4634 case AARCH64_OPND_Ra:
4635 case AARCH64_OPND_Rt_SYS:
4636 case AARCH64_OPND_Rd_SP:
4637 case AARCH64_OPND_Rn_SP:
4638 case AARCH64_OPND_Fd:
4639 case AARCH64_OPND_Fn:
4640 case AARCH64_OPND_Fm:
4641 case AARCH64_OPND_Fa:
4642 case AARCH64_OPND_Ft:
4643 case AARCH64_OPND_Ft2:
4644 case AARCH64_OPND_Sd:
4645 case AARCH64_OPND_Sn:
4646 case AARCH64_OPND_Sm:
4647 case AARCH64_OPND_Vd:
4648 case AARCH64_OPND_Vn:
4649 case AARCH64_OPND_Vm:
4650 case AARCH64_OPND_VdD1:
4651 case AARCH64_OPND_VnD1:
4652 operand->reg.regno = default_value;
4653 break;
4654
4655 case AARCH64_OPND_Ed:
4656 case AARCH64_OPND_En:
4657 case AARCH64_OPND_Em:
4658 operand->reglane.regno = default_value;
4659 break;
4660
4661 case AARCH64_OPND_IDX:
4662 case AARCH64_OPND_BIT_NUM:
4663 case AARCH64_OPND_IMMR:
4664 case AARCH64_OPND_IMMS:
4665 case AARCH64_OPND_SHLL_IMM:
4666 case AARCH64_OPND_IMM_VLSL:
4667 case AARCH64_OPND_IMM_VLSR:
4668 case AARCH64_OPND_CCMP_IMM:
4669 case AARCH64_OPND_FBITS:
4670 case AARCH64_OPND_UIMM4:
4671 case AARCH64_OPND_UIMM3_OP1:
4672 case AARCH64_OPND_UIMM3_OP2:
4673 case AARCH64_OPND_IMM:
4674 case AARCH64_OPND_WIDTH:
4675 case AARCH64_OPND_UIMM7:
4676 case AARCH64_OPND_NZCV:
4677 operand->imm.value = default_value;
4678 break;
4679
4680 case AARCH64_OPND_EXCEPTION:
4681 inst.reloc.type = BFD_RELOC_UNUSED;
4682 break;
4683
4684 case AARCH64_OPND_BARRIER_ISB:
4685 operand->barrier = aarch64_barrier_options + default_value;
4686
4687 default:
4688 break;
4689 }
4690 }
4691
4692 /* Process the relocation type for move wide instructions.
4693 Return TRUE on success; otherwise return FALSE. */
4694
4695 static bfd_boolean
4696 process_movw_reloc_info (void)
4697 {
4698 int is32;
4699 unsigned shift;
4700
4701 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4702
4703 if (inst.base.opcode->op == OP_MOVK)
4704 switch (inst.reloc.type)
4705 {
4706 case BFD_RELOC_AARCH64_MOVW_G0_S:
4707 case BFD_RELOC_AARCH64_MOVW_G1_S:
4708 case BFD_RELOC_AARCH64_MOVW_G2_S:
4709 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4710 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4711 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4712 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4713 set_syntax_error
4714 (_("the specified relocation type is not allowed for MOVK"));
4715 return FALSE;
4716 default:
4717 break;
4718 }
4719
4720 switch (inst.reloc.type)
4721 {
4722 case BFD_RELOC_AARCH64_MOVW_G0:
4723 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4724 case BFD_RELOC_AARCH64_MOVW_G0_S:
4725 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4726 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4727 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
4728 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
4729 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
4730 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4731 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4732 shift = 0;
4733 break;
4734 case BFD_RELOC_AARCH64_MOVW_G1:
4735 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4736 case BFD_RELOC_AARCH64_MOVW_G1_S:
4737 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4738 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4739 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
4740 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
4741 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
4742 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4743 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4744 shift = 16;
4745 break;
4746 case BFD_RELOC_AARCH64_MOVW_G2:
4747 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4748 case BFD_RELOC_AARCH64_MOVW_G2_S:
4749 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
4750 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4751 if (is32)
4752 {
4753 set_fatal_syntax_error
4754 (_("the specified relocation type is not allowed for 32-bit "
4755 "register"));
4756 return FALSE;
4757 }
4758 shift = 32;
4759 break;
4760 case BFD_RELOC_AARCH64_MOVW_G3:
4761 if (is32)
4762 {
4763 set_fatal_syntax_error
4764 (_("the specified relocation type is not allowed for 32-bit "
4765 "register"));
4766 return FALSE;
4767 }
4768 shift = 48;
4769 break;
4770 default:
4771 /* More cases should be added when more MOVW-related relocation types
4772 are supported in GAS. */
4773 gas_assert (aarch64_gas_internal_fixup_p ());
4774 /* The shift amount should have already been set by the parser. */
4775 return TRUE;
4776 }
4777 inst.base.operands[1].shifter.amount = shift;
4778 return TRUE;
4779 }
4780
4781 /* A primitive log caculator. */
4782
4783 static inline unsigned int
4784 get_logsz (unsigned int size)
4785 {
4786 const unsigned char ls[16] =
4787 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4788 if (size > 16)
4789 {
4790 gas_assert (0);
4791 return -1;
4792 }
4793 gas_assert (ls[size - 1] != (unsigned char)-1);
4794 return ls[size - 1];
4795 }
4796
4797 /* Determine and return the real reloc type code for an instruction
4798 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4799
4800 static inline bfd_reloc_code_real_type
4801 ldst_lo12_determine_real_reloc_type (void)
4802 {
4803 unsigned logsz;
4804 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4805 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4806
4807 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
4808 {
4809 BFD_RELOC_AARCH64_LDST8_LO12,
4810 BFD_RELOC_AARCH64_LDST16_LO12,
4811 BFD_RELOC_AARCH64_LDST32_LO12,
4812 BFD_RELOC_AARCH64_LDST64_LO12,
4813 BFD_RELOC_AARCH64_LDST128_LO12
4814 },
4815 {
4816 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
4817 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
4818 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
4819 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
4820 BFD_RELOC_AARCH64_NONE
4821 },
4822 {
4823 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
4824 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
4825 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
4826 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
4827 BFD_RELOC_AARCH64_NONE
4828 }
4829 };
4830
4831 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
4832 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4833 || (inst.reloc.type
4834 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
4835 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4836
4837 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4838 opd1_qlf =
4839 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4840 1, opd0_qlf, 0);
4841 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4842
4843 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4844 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4845 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
4846 gas_assert (logsz <= 3);
4847 else
4848 gas_assert (logsz <= 4);
4849
4850 /* In reloc.c, these pseudo relocation types should be defined in similar
4851 order as above reloc_ldst_lo12 array. Because the array index calcuation
4852 below relies on this. */
4853 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
4854 }
4855
4856 /* Check whether a register list REGINFO is valid. The registers must be
4857 numbered in increasing order (modulo 32), in increments of one or two.
4858
4859 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4860 increments of two.
4861
4862 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4863
4864 static bfd_boolean
4865 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4866 {
4867 uint32_t i, nb_regs, prev_regno, incr;
4868
4869 nb_regs = 1 + (reginfo & 0x3);
4870 reginfo >>= 2;
4871 prev_regno = reginfo & 0x1f;
4872 incr = accept_alternate ? 2 : 1;
4873
4874 for (i = 1; i < nb_regs; ++i)
4875 {
4876 uint32_t curr_regno;
4877 reginfo >>= 5;
4878 curr_regno = reginfo & 0x1f;
4879 if (curr_regno != ((prev_regno + incr) & 0x1f))
4880 return FALSE;
4881 prev_regno = curr_regno;
4882 }
4883
4884 return TRUE;
4885 }
4886
4887 /* Generic instruction operand parser. This does no encoding and no
4888 semantic validation; it merely squirrels values away in the inst
4889 structure. Returns TRUE or FALSE depending on whether the
4890 specified grammar matched. */
4891
4892 static bfd_boolean
4893 parse_operands (char *str, const aarch64_opcode *opcode)
4894 {
4895 int i;
4896 char *backtrack_pos = 0;
4897 const enum aarch64_opnd *operands = opcode->operands;
4898
4899 clear_error ();
4900 skip_whitespace (str);
4901
4902 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4903 {
4904 int64_t val;
4905 int isreg32, isregzero;
4906 int comma_skipped_p = 0;
4907 aarch64_reg_type rtype;
4908 struct neon_type_el vectype;
4909 aarch64_opnd_info *info = &inst.base.operands[i];
4910
4911 DEBUG_TRACE ("parse operand %d", i);
4912
4913 /* Assign the operand code. */
4914 info->type = operands[i];
4915
4916 if (optional_operand_p (opcode, i))
4917 {
4918 /* Remember where we are in case we need to backtrack. */
4919 gas_assert (!backtrack_pos);
4920 backtrack_pos = str;
4921 }
4922
4923 /* Expect comma between operands; the backtrack mechanizm will take
4924 care of cases of omitted optional operand. */
4925 if (i > 0 && ! skip_past_char (&str, ','))
4926 {
4927 set_syntax_error (_("comma expected between operands"));
4928 goto failure;
4929 }
4930 else
4931 comma_skipped_p = 1;
4932
4933 switch (operands[i])
4934 {
4935 case AARCH64_OPND_Rd:
4936 case AARCH64_OPND_Rn:
4937 case AARCH64_OPND_Rm:
4938 case AARCH64_OPND_Rt:
4939 case AARCH64_OPND_Rt2:
4940 case AARCH64_OPND_Rs:
4941 case AARCH64_OPND_Ra:
4942 case AARCH64_OPND_Rt_SYS:
4943 case AARCH64_OPND_PAIRREG:
4944 po_int_reg_or_fail (1, 0);
4945 break;
4946
4947 case AARCH64_OPND_Rd_SP:
4948 case AARCH64_OPND_Rn_SP:
4949 po_int_reg_or_fail (0, 1);
4950 break;
4951
4952 case AARCH64_OPND_Rm_EXT:
4953 case AARCH64_OPND_Rm_SFT:
4954 po_misc_or_fail (parse_shifter_operand
4955 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4956 ? SHIFTED_ARITH_IMM
4957 : SHIFTED_LOGIC_IMM)));
4958 if (!info->shifter.operator_present)
4959 {
4960 /* Default to LSL if not present. Libopcodes prefers shifter
4961 kind to be explicit. */
4962 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4963 info->shifter.kind = AARCH64_MOD_LSL;
4964 /* For Rm_EXT, libopcodes will carry out further check on whether
4965 or not stack pointer is used in the instruction (Recall that
4966 "the extend operator is not optional unless at least one of
4967 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4968 }
4969 break;
4970
4971 case AARCH64_OPND_Fd:
4972 case AARCH64_OPND_Fn:
4973 case AARCH64_OPND_Fm:
4974 case AARCH64_OPND_Fa:
4975 case AARCH64_OPND_Ft:
4976 case AARCH64_OPND_Ft2:
4977 case AARCH64_OPND_Sd:
4978 case AARCH64_OPND_Sn:
4979 case AARCH64_OPND_Sm:
4980 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4981 if (val == PARSE_FAIL)
4982 {
4983 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4984 goto failure;
4985 }
4986 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4987
4988 info->reg.regno = val;
4989 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4990 break;
4991
4992 case AARCH64_OPND_Vd:
4993 case AARCH64_OPND_Vn:
4994 case AARCH64_OPND_Vm:
4995 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4996 if (val == PARSE_FAIL)
4997 {
4998 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4999 goto failure;
5000 }
5001 if (vectype.defined & NTA_HASINDEX)
5002 goto failure;
5003
5004 info->reg.regno = val;
5005 info->qualifier = vectype_to_qualifier (&vectype);
5006 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5007 goto failure;
5008 break;
5009
5010 case AARCH64_OPND_VdD1:
5011 case AARCH64_OPND_VnD1:
5012 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5013 if (val == PARSE_FAIL)
5014 {
5015 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5016 goto failure;
5017 }
5018 if (vectype.type != NT_d || vectype.index != 1)
5019 {
5020 set_fatal_syntax_error
5021 (_("the top half of a 128-bit FP/SIMD register is expected"));
5022 goto failure;
5023 }
5024 info->reg.regno = val;
5025 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5026 here; it is correct for the purpose of encoding/decoding since
5027 only the register number is explicitly encoded in the related
5028 instructions, although this appears a bit hacky. */
5029 info->qualifier = AARCH64_OPND_QLF_S_D;
5030 break;
5031
5032 case AARCH64_OPND_Ed:
5033 case AARCH64_OPND_En:
5034 case AARCH64_OPND_Em:
5035 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5036 if (val == PARSE_FAIL)
5037 {
5038 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5039 goto failure;
5040 }
5041 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5042 goto failure;
5043
5044 info->reglane.regno = val;
5045 info->reglane.index = vectype.index;
5046 info->qualifier = vectype_to_qualifier (&vectype);
5047 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5048 goto failure;
5049 break;
5050
5051 case AARCH64_OPND_LVn:
5052 case AARCH64_OPND_LVt:
5053 case AARCH64_OPND_LVt_AL:
5054 case AARCH64_OPND_LEt:
5055 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
5056 goto failure;
5057 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5058 {
5059 set_fatal_syntax_error (_("invalid register list"));
5060 goto failure;
5061 }
5062 info->reglist.first_regno = (val >> 2) & 0x1f;
5063 info->reglist.num_regs = (val & 0x3) + 1;
5064 if (operands[i] == AARCH64_OPND_LEt)
5065 {
5066 if (!(vectype.defined & NTA_HASINDEX))
5067 goto failure;
5068 info->reglist.has_index = 1;
5069 info->reglist.index = vectype.index;
5070 }
5071 else if (!(vectype.defined & NTA_HASTYPE))
5072 goto failure;
5073 info->qualifier = vectype_to_qualifier (&vectype);
5074 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5075 goto failure;
5076 break;
5077
5078 case AARCH64_OPND_Cn:
5079 case AARCH64_OPND_Cm:
5080 po_reg_or_fail (REG_TYPE_CN);
5081 if (val > 15)
5082 {
5083 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5084 goto failure;
5085 }
5086 inst.base.operands[i].reg.regno = val;
5087 break;
5088
5089 case AARCH64_OPND_SHLL_IMM:
5090 case AARCH64_OPND_IMM_VLSR:
5091 po_imm_or_fail (1, 64);
5092 info->imm.value = val;
5093 break;
5094
5095 case AARCH64_OPND_CCMP_IMM:
5096 case AARCH64_OPND_FBITS:
5097 case AARCH64_OPND_UIMM4:
5098 case AARCH64_OPND_UIMM3_OP1:
5099 case AARCH64_OPND_UIMM3_OP2:
5100 case AARCH64_OPND_IMM_VLSL:
5101 case AARCH64_OPND_IMM:
5102 case AARCH64_OPND_WIDTH:
5103 po_imm_nc_or_fail ();
5104 info->imm.value = val;
5105 break;
5106
5107 case AARCH64_OPND_UIMM7:
5108 po_imm_or_fail (0, 127);
5109 info->imm.value = val;
5110 break;
5111
5112 case AARCH64_OPND_IDX:
5113 case AARCH64_OPND_BIT_NUM:
5114 case AARCH64_OPND_IMMR:
5115 case AARCH64_OPND_IMMS:
5116 po_imm_or_fail (0, 63);
5117 info->imm.value = val;
5118 break;
5119
5120 case AARCH64_OPND_IMM0:
5121 po_imm_nc_or_fail ();
5122 if (val != 0)
5123 {
5124 set_fatal_syntax_error (_("immediate zero expected"));
5125 goto failure;
5126 }
5127 info->imm.value = 0;
5128 break;
5129
5130 case AARCH64_OPND_FPIMM0:
5131 {
5132 int qfloat;
5133 bfd_boolean res1 = FALSE, res2 = FALSE;
5134 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5135 it is probably not worth the effort to support it. */
5136 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
5137 && !(res2 = parse_constant_immediate (&str, &val)))
5138 goto failure;
5139 if ((res1 && qfloat == 0) || (res2 && val == 0))
5140 {
5141 info->imm.value = 0;
5142 info->imm.is_fp = 1;
5143 break;
5144 }
5145 set_fatal_syntax_error (_("immediate zero expected"));
5146 goto failure;
5147 }
5148
5149 case AARCH64_OPND_IMM_MOV:
5150 {
5151 char *saved = str;
5152 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5153 reg_name_p (str, REG_TYPE_VN))
5154 goto failure;
5155 str = saved;
5156 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5157 GE_OPT_PREFIX, 1));
5158 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5159 later. fix_mov_imm_insn will try to determine a machine
5160 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5161 message if the immediate cannot be moved by a single
5162 instruction. */
5163 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5164 inst.base.operands[i].skip = 1;
5165 }
5166 break;
5167
5168 case AARCH64_OPND_SIMD_IMM:
5169 case AARCH64_OPND_SIMD_IMM_SFT:
5170 if (! parse_big_immediate (&str, &val))
5171 goto failure;
5172 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5173 /* addr_off_p */ 0,
5174 /* need_libopcodes_p */ 1,
5175 /* skip_p */ 1);
5176 /* Parse shift.
5177 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5178 shift, we don't check it here; we leave the checking to
5179 the libopcodes (operand_general_constraint_met_p). By
5180 doing this, we achieve better diagnostics. */
5181 if (skip_past_comma (&str)
5182 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5183 goto failure;
5184 if (!info->shifter.operator_present
5185 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5186 {
5187 /* Default to LSL if not present. Libopcodes prefers shifter
5188 kind to be explicit. */
5189 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5190 info->shifter.kind = AARCH64_MOD_LSL;
5191 }
5192 break;
5193
5194 case AARCH64_OPND_FPIMM:
5195 case AARCH64_OPND_SIMD_FPIMM:
5196 {
5197 int qfloat;
5198 bfd_boolean dp_p
5199 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5200 == 8);
5201 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
5202 goto failure;
5203 if (qfloat == 0)
5204 {
5205 set_fatal_syntax_error (_("invalid floating-point constant"));
5206 goto failure;
5207 }
5208 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5209 inst.base.operands[i].imm.is_fp = 1;
5210 }
5211 break;
5212
5213 case AARCH64_OPND_LIMM:
5214 po_misc_or_fail (parse_shifter_operand (&str, info,
5215 SHIFTED_LOGIC_IMM));
5216 if (info->shifter.operator_present)
5217 {
5218 set_fatal_syntax_error
5219 (_("shift not allowed for bitmask immediate"));
5220 goto failure;
5221 }
5222 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5223 /* addr_off_p */ 0,
5224 /* need_libopcodes_p */ 1,
5225 /* skip_p */ 1);
5226 break;
5227
5228 case AARCH64_OPND_AIMM:
5229 if (opcode->op == OP_ADD)
5230 /* ADD may have relocation types. */
5231 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5232 SHIFTED_ARITH_IMM));
5233 else
5234 po_misc_or_fail (parse_shifter_operand (&str, info,
5235 SHIFTED_ARITH_IMM));
5236 switch (inst.reloc.type)
5237 {
5238 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5239 info->shifter.amount = 12;
5240 break;
5241 case BFD_RELOC_UNUSED:
5242 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5243 if (info->shifter.kind != AARCH64_MOD_NONE)
5244 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5245 inst.reloc.pc_rel = 0;
5246 break;
5247 default:
5248 break;
5249 }
5250 info->imm.value = 0;
5251 if (!info->shifter.operator_present)
5252 {
5253 /* Default to LSL if not present. Libopcodes prefers shifter
5254 kind to be explicit. */
5255 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5256 info->shifter.kind = AARCH64_MOD_LSL;
5257 }
5258 break;
5259
5260 case AARCH64_OPND_HALF:
5261 {
5262 /* #<imm16> or relocation. */
5263 int internal_fixup_p;
5264 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5265 if (internal_fixup_p)
5266 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5267 skip_whitespace (str);
5268 if (skip_past_comma (&str))
5269 {
5270 /* {, LSL #<shift>} */
5271 if (! aarch64_gas_internal_fixup_p ())
5272 {
5273 set_fatal_syntax_error (_("can't mix relocation modifier "
5274 "with explicit shift"));
5275 goto failure;
5276 }
5277 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5278 }
5279 else
5280 inst.base.operands[i].shifter.amount = 0;
5281 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5282 inst.base.operands[i].imm.value = 0;
5283 if (! process_movw_reloc_info ())
5284 goto failure;
5285 }
5286 break;
5287
5288 case AARCH64_OPND_EXCEPTION:
5289 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5290 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5291 /* addr_off_p */ 0,
5292 /* need_libopcodes_p */ 0,
5293 /* skip_p */ 1);
5294 break;
5295
5296 case AARCH64_OPND_NZCV:
5297 {
5298 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5299 if (nzcv != NULL)
5300 {
5301 str += 4;
5302 info->imm.value = nzcv->value;
5303 break;
5304 }
5305 po_imm_or_fail (0, 15);
5306 info->imm.value = val;
5307 }
5308 break;
5309
5310 case AARCH64_OPND_COND:
5311 case AARCH64_OPND_COND1:
5312 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5313 str += 2;
5314 if (info->cond == NULL)
5315 {
5316 set_syntax_error (_("invalid condition"));
5317 goto failure;
5318 }
5319 else if (operands[i] == AARCH64_OPND_COND1
5320 && (info->cond->value & 0xe) == 0xe)
5321 {
5322 /* Not allow AL or NV. */
5323 set_default_error ();
5324 goto failure;
5325 }
5326 break;
5327
5328 case AARCH64_OPND_ADDR_ADRP:
5329 po_misc_or_fail (parse_adrp (&str));
5330 /* Clear the value as operand needs to be relocated. */
5331 info->imm.value = 0;
5332 break;
5333
5334 case AARCH64_OPND_ADDR_PCREL14:
5335 case AARCH64_OPND_ADDR_PCREL19:
5336 case AARCH64_OPND_ADDR_PCREL21:
5337 case AARCH64_OPND_ADDR_PCREL26:
5338 po_misc_or_fail (parse_address_reloc (&str, info));
5339 if (!info->addr.pcrel)
5340 {
5341 set_syntax_error (_("invalid pc-relative address"));
5342 goto failure;
5343 }
5344 if (inst.gen_lit_pool
5345 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5346 {
5347 /* Only permit "=value" in the literal load instructions.
5348 The literal will be generated by programmer_friendly_fixup. */
5349 set_syntax_error (_("invalid use of \"=immediate\""));
5350 goto failure;
5351 }
5352 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5353 {
5354 set_syntax_error (_("unrecognized relocation suffix"));
5355 goto failure;
5356 }
5357 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5358 {
5359 info->imm.value = inst.reloc.exp.X_add_number;
5360 inst.reloc.type = BFD_RELOC_UNUSED;
5361 }
5362 else
5363 {
5364 info->imm.value = 0;
5365 if (inst.reloc.type == BFD_RELOC_UNUSED)
5366 switch (opcode->iclass)
5367 {
5368 case compbranch:
5369 case condbranch:
5370 /* e.g. CBZ or B.COND */
5371 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5372 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5373 break;
5374 case testbranch:
5375 /* e.g. TBZ */
5376 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5377 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5378 break;
5379 case branch_imm:
5380 /* e.g. B or BL */
5381 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5382 inst.reloc.type =
5383 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5384 : BFD_RELOC_AARCH64_JUMP26;
5385 break;
5386 case loadlit:
5387 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5388 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5389 break;
5390 case pcreladdr:
5391 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5392 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5393 break;
5394 default:
5395 gas_assert (0);
5396 abort ();
5397 }
5398 inst.reloc.pc_rel = 1;
5399 }
5400 break;
5401
5402 case AARCH64_OPND_ADDR_SIMPLE:
5403 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5404 /* [<Xn|SP>{, #<simm>}] */
5405 po_char_or_fail ('[');
5406 po_reg_or_fail (REG_TYPE_R64_SP);
5407 /* Accept optional ", #0". */
5408 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5409 && skip_past_char (&str, ','))
5410 {
5411 skip_past_char (&str, '#');
5412 if (! skip_past_char (&str, '0'))
5413 {
5414 set_fatal_syntax_error
5415 (_("the optional immediate offset can only be 0"));
5416 goto failure;
5417 }
5418 }
5419 po_char_or_fail (']');
5420 info->addr.base_regno = val;
5421 break;
5422
5423 case AARCH64_OPND_ADDR_REGOFF:
5424 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5425 po_misc_or_fail (parse_address (&str, info, 0));
5426 if (info->addr.pcrel || !info->addr.offset.is_reg
5427 || !info->addr.preind || info->addr.postind
5428 || info->addr.writeback)
5429 {
5430 set_syntax_error (_("invalid addressing mode"));
5431 goto failure;
5432 }
5433 if (!info->shifter.operator_present)
5434 {
5435 /* Default to LSL if not present. Libopcodes prefers shifter
5436 kind to be explicit. */
5437 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5438 info->shifter.kind = AARCH64_MOD_LSL;
5439 }
5440 /* Qualifier to be deduced by libopcodes. */
5441 break;
5442
5443 case AARCH64_OPND_ADDR_SIMM7:
5444 po_misc_or_fail (parse_address (&str, info, 0));
5445 if (info->addr.pcrel || info->addr.offset.is_reg
5446 || (!info->addr.preind && !info->addr.postind))
5447 {
5448 set_syntax_error (_("invalid addressing mode"));
5449 goto failure;
5450 }
5451 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5452 /* addr_off_p */ 1,
5453 /* need_libopcodes_p */ 1,
5454 /* skip_p */ 0);
5455 break;
5456
5457 case AARCH64_OPND_ADDR_SIMM9:
5458 case AARCH64_OPND_ADDR_SIMM9_2:
5459 po_misc_or_fail (parse_address_reloc (&str, info));
5460 if (info->addr.pcrel || info->addr.offset.is_reg
5461 || (!info->addr.preind && !info->addr.postind)
5462 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5463 && info->addr.writeback))
5464 {
5465 set_syntax_error (_("invalid addressing mode"));
5466 goto failure;
5467 }
5468 if (inst.reloc.type != BFD_RELOC_UNUSED)
5469 {
5470 set_syntax_error (_("relocation not allowed"));
5471 goto failure;
5472 }
5473 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5474 /* addr_off_p */ 1,
5475 /* need_libopcodes_p */ 1,
5476 /* skip_p */ 0);
5477 break;
5478
5479 case AARCH64_OPND_ADDR_UIMM12:
5480 po_misc_or_fail (parse_address_reloc (&str, info));
5481 if (info->addr.pcrel || info->addr.offset.is_reg
5482 || !info->addr.preind || info->addr.writeback)
5483 {
5484 set_syntax_error (_("invalid addressing mode"));
5485 goto failure;
5486 }
5487 if (inst.reloc.type == BFD_RELOC_UNUSED)
5488 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5489 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5490 || (inst.reloc.type
5491 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
5492 || (inst.reloc.type
5493 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
5494 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5495 /* Leave qualifier to be determined by libopcodes. */
5496 break;
5497
5498 case AARCH64_OPND_SIMD_ADDR_POST:
5499 /* [<Xn|SP>], <Xm|#<amount>> */
5500 po_misc_or_fail (parse_address (&str, info, 1));
5501 if (!info->addr.postind || !info->addr.writeback)
5502 {
5503 set_syntax_error (_("invalid addressing mode"));
5504 goto failure;
5505 }
5506 if (!info->addr.offset.is_reg)
5507 {
5508 if (inst.reloc.exp.X_op == O_constant)
5509 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5510 else
5511 {
5512 set_fatal_syntax_error
5513 (_("writeback value should be an immediate constant"));
5514 goto failure;
5515 }
5516 }
5517 /* No qualifier. */
5518 break;
5519
5520 case AARCH64_OPND_SYSREG:
5521 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5522 == PARSE_FAIL)
5523 {
5524 set_syntax_error (_("unknown or missing system register name"));
5525 goto failure;
5526 }
5527 inst.base.operands[i].sysreg = val;
5528 break;
5529
5530 case AARCH64_OPND_PSTATEFIELD:
5531 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5532 == PARSE_FAIL)
5533 {
5534 set_syntax_error (_("unknown or missing PSTATE field name"));
5535 goto failure;
5536 }
5537 inst.base.operands[i].pstatefield = val;
5538 break;
5539
5540 case AARCH64_OPND_SYSREG_IC:
5541 inst.base.operands[i].sysins_op =
5542 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5543 goto sys_reg_ins;
5544 case AARCH64_OPND_SYSREG_DC:
5545 inst.base.operands[i].sysins_op =
5546 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5547 goto sys_reg_ins;
5548 case AARCH64_OPND_SYSREG_AT:
5549 inst.base.operands[i].sysins_op =
5550 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5551 goto sys_reg_ins;
5552 case AARCH64_OPND_SYSREG_TLBI:
5553 inst.base.operands[i].sysins_op =
5554 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5555 sys_reg_ins:
5556 if (inst.base.operands[i].sysins_op == NULL)
5557 {
5558 set_fatal_syntax_error ( _("unknown or missing operation name"));
5559 goto failure;
5560 }
5561 break;
5562
5563 case AARCH64_OPND_BARRIER:
5564 case AARCH64_OPND_BARRIER_ISB:
5565 val = parse_barrier (&str);
5566 if (val != PARSE_FAIL
5567 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5568 {
5569 /* ISB only accepts options name 'sy'. */
5570 set_syntax_error
5571 (_("the specified option is not accepted in ISB"));
5572 /* Turn off backtrack as this optional operand is present. */
5573 backtrack_pos = 0;
5574 goto failure;
5575 }
5576 /* This is an extension to accept a 0..15 immediate. */
5577 if (val == PARSE_FAIL)
5578 po_imm_or_fail (0, 15);
5579 info->barrier = aarch64_barrier_options + val;
5580 break;
5581
5582 case AARCH64_OPND_PRFOP:
5583 val = parse_pldop (&str);
5584 /* This is an extension to accept a 0..31 immediate. */
5585 if (val == PARSE_FAIL)
5586 po_imm_or_fail (0, 31);
5587 inst.base.operands[i].prfop = aarch64_prfops + val;
5588 break;
5589
5590 default:
5591 as_fatal (_("unhandled operand code %d"), operands[i]);
5592 }
5593
5594 /* If we get here, this operand was successfully parsed. */
5595 inst.base.operands[i].present = 1;
5596 continue;
5597
5598 failure:
5599 /* The parse routine should already have set the error, but in case
5600 not, set a default one here. */
5601 if (! error_p ())
5602 set_default_error ();
5603
5604 if (! backtrack_pos)
5605 goto parse_operands_return;
5606
5607 {
5608 /* We reach here because this operand is marked as optional, and
5609 either no operand was supplied or the operand was supplied but it
5610 was syntactically incorrect. In the latter case we report an
5611 error. In the former case we perform a few more checks before
5612 dropping through to the code to insert the default operand. */
5613
5614 char *tmp = backtrack_pos;
5615 char endchar = END_OF_INSN;
5616
5617 if (i != (aarch64_num_of_operands (opcode) - 1))
5618 endchar = ',';
5619 skip_past_char (&tmp, ',');
5620
5621 if (*tmp != endchar)
5622 /* The user has supplied an operand in the wrong format. */
5623 goto parse_operands_return;
5624
5625 /* Make sure there is not a comma before the optional operand.
5626 For example the fifth operand of 'sys' is optional:
5627
5628 sys #0,c0,c0,#0, <--- wrong
5629 sys #0,c0,c0,#0 <--- correct. */
5630 if (comma_skipped_p && i && endchar == END_OF_INSN)
5631 {
5632 set_fatal_syntax_error
5633 (_("unexpected comma before the omitted optional operand"));
5634 goto parse_operands_return;
5635 }
5636 }
5637
5638 /* Reaching here means we are dealing with an optional operand that is
5639 omitted from the assembly line. */
5640 gas_assert (optional_operand_p (opcode, i));
5641 info->present = 0;
5642 process_omitted_operand (operands[i], opcode, i, info);
5643
5644 /* Try again, skipping the optional operand at backtrack_pos. */
5645 str = backtrack_pos;
5646 backtrack_pos = 0;
5647
5648 /* Clear any error record after the omitted optional operand has been
5649 successfully handled. */
5650 clear_error ();
5651 }
5652
5653 /* Check if we have parsed all the operands. */
5654 if (*str != '\0' && ! error_p ())
5655 {
5656 /* Set I to the index of the last present operand; this is
5657 for the purpose of diagnostics. */
5658 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5659 ;
5660 set_fatal_syntax_error
5661 (_("unexpected characters following instruction"));
5662 }
5663
5664 parse_operands_return:
5665
5666 if (error_p ())
5667 {
5668 DEBUG_TRACE ("parsing FAIL: %s - %s",
5669 operand_mismatch_kind_names[get_error_kind ()],
5670 get_error_message ());
5671 /* Record the operand error properly; this is useful when there
5672 are multiple instruction templates for a mnemonic name, so that
5673 later on, we can select the error that most closely describes
5674 the problem. */
5675 record_operand_error (opcode, i, get_error_kind (),
5676 get_error_message ());
5677 return FALSE;
5678 }
5679 else
5680 {
5681 DEBUG_TRACE ("parsing SUCCESS");
5682 return TRUE;
5683 }
5684 }
5685
5686 /* It does some fix-up to provide some programmer friendly feature while
5687 keeping the libopcodes happy, i.e. libopcodes only accepts
5688 the preferred architectural syntax.
5689 Return FALSE if there is any failure; otherwise return TRUE. */
5690
5691 static bfd_boolean
5692 programmer_friendly_fixup (aarch64_instruction *instr)
5693 {
5694 aarch64_inst *base = &instr->base;
5695 const aarch64_opcode *opcode = base->opcode;
5696 enum aarch64_op op = opcode->op;
5697 aarch64_opnd_info *operands = base->operands;
5698
5699 DEBUG_TRACE ("enter");
5700
5701 switch (opcode->iclass)
5702 {
5703 case testbranch:
5704 /* TBNZ Xn|Wn, #uimm6, label
5705 Test and Branch Not Zero: conditionally jumps to label if bit number
5706 uimm6 in register Xn is not zero. The bit number implies the width of
5707 the register, which may be written and should be disassembled as Wn if
5708 uimm is less than 32. */
5709 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5710 {
5711 if (operands[1].imm.value >= 32)
5712 {
5713 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5714 0, 31);
5715 return FALSE;
5716 }
5717 operands[0].qualifier = AARCH64_OPND_QLF_X;
5718 }
5719 break;
5720 case loadlit:
5721 /* LDR Wt, label | =value
5722 As a convenience assemblers will typically permit the notation
5723 "=value" in conjunction with the pc-relative literal load instructions
5724 to automatically place an immediate value or symbolic address in a
5725 nearby literal pool and generate a hidden label which references it.
5726 ISREG has been set to 0 in the case of =value. */
5727 if (instr->gen_lit_pool
5728 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5729 {
5730 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5731 if (op == OP_LDRSW_LIT)
5732 size = 4;
5733 if (instr->reloc.exp.X_op != O_constant
5734 && instr->reloc.exp.X_op != O_big
5735 && instr->reloc.exp.X_op != O_symbol)
5736 {
5737 record_operand_error (opcode, 1,
5738 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5739 _("constant expression expected"));
5740 return FALSE;
5741 }
5742 if (! add_to_lit_pool (&instr->reloc.exp, size))
5743 {
5744 record_operand_error (opcode, 1,
5745 AARCH64_OPDE_OTHER_ERROR,
5746 _("literal pool insertion failed"));
5747 return FALSE;
5748 }
5749 }
5750 break;
5751 case log_shift:
5752 case bitfield:
5753 /* UXT[BHW] Wd, Wn
5754 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5755 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5756 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5757 A programmer-friendly assembler should accept a destination Xd in
5758 place of Wd, however that is not the preferred form for disassembly.
5759 */
5760 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5761 && operands[1].qualifier == AARCH64_OPND_QLF_W
5762 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5763 operands[0].qualifier = AARCH64_OPND_QLF_W;
5764 break;
5765
5766 case addsub_ext:
5767 {
5768 /* In the 64-bit form, the final register operand is written as Wm
5769 for all but the (possibly omitted) UXTX/LSL and SXTX
5770 operators.
5771 As a programmer-friendly assembler, we accept e.g.
5772 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5773 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5774 int idx = aarch64_operand_index (opcode->operands,
5775 AARCH64_OPND_Rm_EXT);
5776 gas_assert (idx == 1 || idx == 2);
5777 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5778 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5779 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5780 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5781 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5782 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5783 }
5784 break;
5785
5786 default:
5787 break;
5788 }
5789
5790 DEBUG_TRACE ("exit with SUCCESS");
5791 return TRUE;
5792 }
5793
5794 /* Check for loads and stores that will cause unpredictable behavior. */
5795
5796 static void
5797 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5798 {
5799 aarch64_inst *base = &instr->base;
5800 const aarch64_opcode *opcode = base->opcode;
5801 const aarch64_opnd_info *opnds = base->operands;
5802 switch (opcode->iclass)
5803 {
5804 case ldst_pos:
5805 case ldst_imm9:
5806 case ldst_unscaled:
5807 case ldst_unpriv:
5808 /* Loading/storing the base register is unpredictable if writeback. */
5809 if ((aarch64_get_operand_class (opnds[0].type)
5810 == AARCH64_OPND_CLASS_INT_REG)
5811 && opnds[0].reg.regno == opnds[1].addr.base_regno
5812 && opnds[1].addr.base_regno != REG_SP
5813 && opnds[1].addr.writeback)
5814 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5815 break;
5816 case ldstpair_off:
5817 case ldstnapair_offs:
5818 case ldstpair_indexed:
5819 /* Loading/storing the base register is unpredictable if writeback. */
5820 if ((aarch64_get_operand_class (opnds[0].type)
5821 == AARCH64_OPND_CLASS_INT_REG)
5822 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5823 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5824 && opnds[2].addr.base_regno != REG_SP
5825 && opnds[2].addr.writeback)
5826 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5827 /* Load operations must load different registers. */
5828 if ((opcode->opcode & (1 << 22))
5829 && opnds[0].reg.regno == opnds[1].reg.regno)
5830 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5831 break;
5832 default:
5833 break;
5834 }
5835 }
5836
5837 /* A wrapper function to interface with libopcodes on encoding and
5838 record the error message if there is any.
5839
5840 Return TRUE on success; otherwise return FALSE. */
5841
5842 static bfd_boolean
5843 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5844 aarch64_insn *code)
5845 {
5846 aarch64_operand_error error_info;
5847 error_info.kind = AARCH64_OPDE_NIL;
5848 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5849 return TRUE;
5850 else
5851 {
5852 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5853 record_operand_error_info (opcode, &error_info);
5854 return FALSE;
5855 }
5856 }
5857
5858 #ifdef DEBUG_AARCH64
5859 static inline void
5860 dump_opcode_operands (const aarch64_opcode *opcode)
5861 {
5862 int i = 0;
5863 while (opcode->operands[i] != AARCH64_OPND_NIL)
5864 {
5865 aarch64_verbose ("\t\t opnd%d: %s", i,
5866 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5867 ? aarch64_get_operand_name (opcode->operands[i])
5868 : aarch64_get_operand_desc (opcode->operands[i]));
5869 ++i;
5870 }
5871 }
5872 #endif /* DEBUG_AARCH64 */
5873
5874 /* This is the guts of the machine-dependent assembler. STR points to a
5875 machine dependent instruction. This function is supposed to emit
5876 the frags/bytes it assembles to. */
5877
5878 void
5879 md_assemble (char *str)
5880 {
5881 char *p = str;
5882 templates *template;
5883 aarch64_opcode *opcode;
5884 aarch64_inst *inst_base;
5885 unsigned saved_cond;
5886
5887 /* Align the previous label if needed. */
5888 if (last_label_seen != NULL)
5889 {
5890 symbol_set_frag (last_label_seen, frag_now);
5891 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5892 S_SET_SEGMENT (last_label_seen, now_seg);
5893 }
5894
5895 inst.reloc.type = BFD_RELOC_UNUSED;
5896
5897 DEBUG_TRACE ("\n\n");
5898 DEBUG_TRACE ("==============================");
5899 DEBUG_TRACE ("Enter md_assemble with %s", str);
5900
5901 template = opcode_lookup (&p);
5902 if (!template)
5903 {
5904 /* It wasn't an instruction, but it might be a register alias of
5905 the form alias .req reg directive. */
5906 if (!create_register_alias (str, p))
5907 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5908 str);
5909 return;
5910 }
5911
5912 skip_whitespace (p);
5913 if (*p == ',')
5914 {
5915 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5916 get_mnemonic_name (str), str);
5917 return;
5918 }
5919
5920 init_operand_error_report ();
5921
5922 /* Sections are assumed to start aligned. In executable section, there is no
5923 MAP_DATA symbol pending. So we only align the address during
5924 MAP_DATA --> MAP_INSN transition.
5925 For other sections, this is not guaranteed. */
5926 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
5927 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
5928 frag_align_code (2, 0);
5929
5930 saved_cond = inst.cond;
5931 reset_aarch64_instruction (&inst);
5932 inst.cond = saved_cond;
5933
5934 /* Iterate through all opcode entries with the same mnemonic name. */
5935 do
5936 {
5937 opcode = template->opcode;
5938
5939 DEBUG_TRACE ("opcode %s found", opcode->name);
5940 #ifdef DEBUG_AARCH64
5941 if (debug_dump)
5942 dump_opcode_operands (opcode);
5943 #endif /* DEBUG_AARCH64 */
5944
5945 mapping_state (MAP_INSN);
5946
5947 inst_base = &inst.base;
5948 inst_base->opcode = opcode;
5949
5950 /* Truly conditionally executed instructions, e.g. b.cond. */
5951 if (opcode->flags & F_COND)
5952 {
5953 gas_assert (inst.cond != COND_ALWAYS);
5954 inst_base->cond = get_cond_from_value (inst.cond);
5955 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5956 }
5957 else if (inst.cond != COND_ALWAYS)
5958 {
5959 /* It shouldn't arrive here, where the assembly looks like a
5960 conditional instruction but the found opcode is unconditional. */
5961 gas_assert (0);
5962 continue;
5963 }
5964
5965 if (parse_operands (p, opcode)
5966 && programmer_friendly_fixup (&inst)
5967 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5968 {
5969 /* Check that this instruction is supported for this CPU. */
5970 if (!opcode->avariant
5971 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5972 {
5973 as_bad (_("selected processor does not support `%s'"), str);
5974 return;
5975 }
5976
5977 warn_unpredictable_ldst (&inst, str);
5978
5979 if (inst.reloc.type == BFD_RELOC_UNUSED
5980 || !inst.reloc.need_libopcodes_p)
5981 output_inst (NULL);
5982 else
5983 {
5984 /* If there is relocation generated for the instruction,
5985 store the instruction information for the future fix-up. */
5986 struct aarch64_inst *copy;
5987 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5988 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5989 abort ();
5990 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5991 output_inst (copy);
5992 }
5993 return;
5994 }
5995
5996 template = template->next;
5997 if (template != NULL)
5998 {
5999 reset_aarch64_instruction (&inst);
6000 inst.cond = saved_cond;
6001 }
6002 }
6003 while (template != NULL);
6004
6005 /* Issue the error messages if any. */
6006 output_operand_error_report (str);
6007 }
6008
6009 /* Various frobbings of labels and their addresses. */
6010
6011 void
6012 aarch64_start_line_hook (void)
6013 {
6014 last_label_seen = NULL;
6015 }
6016
6017 void
6018 aarch64_frob_label (symbolS * sym)
6019 {
6020 last_label_seen = sym;
6021
6022 dwarf2_emit_label (sym);
6023 }
6024
6025 int
6026 aarch64_data_in_code (void)
6027 {
6028 if (!strncmp (input_line_pointer + 1, "data:", 5))
6029 {
6030 *input_line_pointer = '/';
6031 input_line_pointer += 5;
6032 *input_line_pointer = 0;
6033 return 1;
6034 }
6035
6036 return 0;
6037 }
6038
6039 char *
6040 aarch64_canonicalize_symbol_name (char *name)
6041 {
6042 int len;
6043
6044 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6045 *(name + len - 5) = 0;
6046
6047 return name;
6048 }
6049 \f
6050 /* Table of all register names defined by default. The user can
6051 define additional names with .req. Note that all register names
6052 should appear in both upper and lowercase variants. Some registers
6053 also have mixed-case names. */
6054
6055 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6056 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6057 #define REGSET31(p,t) \
6058 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6059 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6060 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6061 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
6062 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6063 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6064 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6065 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6066 #define REGSET(p,t) \
6067 REGSET31(p,t), REGNUM(p,31,t)
6068
6069 /* These go into aarch64_reg_hsh hash-table. */
6070 static const reg_entry reg_names[] = {
6071 /* Integer registers. */
6072 REGSET31 (x, R_64), REGSET31 (X, R_64),
6073 REGSET31 (w, R_32), REGSET31 (W, R_32),
6074
6075 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6076 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6077
6078 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6079 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6080
6081 /* Coprocessor register numbers. */
6082 REGSET (c, CN), REGSET (C, CN),
6083
6084 /* Floating-point single precision registers. */
6085 REGSET (s, FP_S), REGSET (S, FP_S),
6086
6087 /* Floating-point double precision registers. */
6088 REGSET (d, FP_D), REGSET (D, FP_D),
6089
6090 /* Floating-point half precision registers. */
6091 REGSET (h, FP_H), REGSET (H, FP_H),
6092
6093 /* Floating-point byte precision registers. */
6094 REGSET (b, FP_B), REGSET (B, FP_B),
6095
6096 /* Floating-point quad precision registers. */
6097 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6098
6099 /* FP/SIMD registers. */
6100 REGSET (v, VN), REGSET (V, VN),
6101 };
6102
6103 #undef REGDEF
6104 #undef REGNUM
6105 #undef REGSET
6106
6107 #define N 1
6108 #define n 0
6109 #define Z 1
6110 #define z 0
6111 #define C 1
6112 #define c 0
6113 #define V 1
6114 #define v 0
6115 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6116 static const asm_nzcv nzcv_names[] = {
6117 {"nzcv", B (n, z, c, v)},
6118 {"nzcV", B (n, z, c, V)},
6119 {"nzCv", B (n, z, C, v)},
6120 {"nzCV", B (n, z, C, V)},
6121 {"nZcv", B (n, Z, c, v)},
6122 {"nZcV", B (n, Z, c, V)},
6123 {"nZCv", B (n, Z, C, v)},
6124 {"nZCV", B (n, Z, C, V)},
6125 {"Nzcv", B (N, z, c, v)},
6126 {"NzcV", B (N, z, c, V)},
6127 {"NzCv", B (N, z, C, v)},
6128 {"NzCV", B (N, z, C, V)},
6129 {"NZcv", B (N, Z, c, v)},
6130 {"NZcV", B (N, Z, c, V)},
6131 {"NZCv", B (N, Z, C, v)},
6132 {"NZCV", B (N, Z, C, V)}
6133 };
6134
6135 #undef N
6136 #undef n
6137 #undef Z
6138 #undef z
6139 #undef C
6140 #undef c
6141 #undef V
6142 #undef v
6143 #undef B
6144 \f
6145 /* MD interface: bits in the object file. */
6146
6147 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6148 for use in the a.out file, and stores them in the array pointed to by buf.
6149 This knows about the endian-ness of the target machine and does
6150 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6151 2 (short) and 4 (long) Floating numbers are put out as a series of
6152 LITTLENUMS (shorts, here at least). */
6153
6154 void
6155 md_number_to_chars (char *buf, valueT val, int n)
6156 {
6157 if (target_big_endian)
6158 number_to_chars_bigendian (buf, val, n);
6159 else
6160 number_to_chars_littleendian (buf, val, n);
6161 }
6162
6163 /* MD interface: Sections. */
6164
6165 /* Estimate the size of a frag before relaxing. Assume everything fits in
6166 4 bytes. */
6167
6168 int
6169 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6170 {
6171 fragp->fr_var = 4;
6172 return 4;
6173 }
6174
6175 /* Round up a section size to the appropriate boundary. */
6176
6177 valueT
6178 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6179 {
6180 return size;
6181 }
6182
6183 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6184 of an rs_align_code fragment.
6185
6186 Here we fill the frag with the appropriate info for padding the
6187 output stream. The resulting frag will consist of a fixed (fr_fix)
6188 and of a repeating (fr_var) part.
6189
6190 The fixed content is always emitted before the repeating content and
6191 these two parts are used as follows in constructing the output:
6192 - the fixed part will be used to align to a valid instruction word
6193 boundary, in case that we start at a misaligned address; as no
6194 executable instruction can live at the misaligned location, we
6195 simply fill with zeros;
6196 - the variable part will be used to cover the remaining padding and
6197 we fill using the AArch64 NOP instruction.
6198
6199 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6200 enough storage space for up to 3 bytes for padding the back to a valid
6201 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6202
6203 void
6204 aarch64_handle_align (fragS * fragP)
6205 {
6206 /* NOP = d503201f */
6207 /* AArch64 instructions are always little-endian. */
6208 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6209
6210 int bytes, fix, noop_size;
6211 char *p;
6212
6213 if (fragP->fr_type != rs_align_code)
6214 return;
6215
6216 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6217 p = fragP->fr_literal + fragP->fr_fix;
6218
6219 #ifdef OBJ_ELF
6220 gas_assert (fragP->tc_frag_data.recorded);
6221 #endif
6222
6223 noop_size = sizeof (aarch64_noop);
6224
6225 fix = bytes & (noop_size - 1);
6226 if (fix)
6227 {
6228 #ifdef OBJ_ELF
6229 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6230 #endif
6231 memset (p, 0, fix);
6232 p += fix;
6233 fragP->fr_fix += fix;
6234 }
6235
6236 if (noop_size)
6237 memcpy (p, aarch64_noop, noop_size);
6238 fragP->fr_var = noop_size;
6239 }
6240
6241 /* Perform target specific initialisation of a frag.
6242 Note - despite the name this initialisation is not done when the frag
6243 is created, but only when its type is assigned. A frag can be created
6244 and used a long time before its type is set, so beware of assuming that
6245 this initialisationis performed first. */
6246
6247 #ifndef OBJ_ELF
6248 void
6249 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6250 int max_chars ATTRIBUTE_UNUSED)
6251 {
6252 }
6253
6254 #else /* OBJ_ELF is defined. */
6255 void
6256 aarch64_init_frag (fragS * fragP, int max_chars)
6257 {
6258 /* Record a mapping symbol for alignment frags. We will delete this
6259 later if the alignment ends up empty. */
6260 if (!fragP->tc_frag_data.recorded)
6261 fragP->tc_frag_data.recorded = 1;
6262
6263 switch (fragP->fr_type)
6264 {
6265 case rs_align:
6266 case rs_align_test:
6267 case rs_fill:
6268 mapping_state_2 (MAP_DATA, max_chars);
6269 break;
6270 case rs_align_code:
6271 mapping_state_2 (MAP_INSN, max_chars);
6272 break;
6273 default:
6274 break;
6275 }
6276 }
6277 \f
6278 /* Initialize the DWARF-2 unwind information for this procedure. */
6279
6280 void
6281 tc_aarch64_frame_initial_instructions (void)
6282 {
6283 cfi_add_CFA_def_cfa (REG_SP, 0);
6284 }
6285 #endif /* OBJ_ELF */
6286
6287 /* Convert REGNAME to a DWARF-2 register number. */
6288
6289 int
6290 tc_aarch64_regname_to_dw2regnum (char *regname)
6291 {
6292 const reg_entry *reg = parse_reg (&regname);
6293 if (reg == NULL)
6294 return -1;
6295
6296 switch (reg->type)
6297 {
6298 case REG_TYPE_SP_32:
6299 case REG_TYPE_SP_64:
6300 case REG_TYPE_R_32:
6301 case REG_TYPE_R_64:
6302 return reg->number;
6303
6304 case REG_TYPE_FP_B:
6305 case REG_TYPE_FP_H:
6306 case REG_TYPE_FP_S:
6307 case REG_TYPE_FP_D:
6308 case REG_TYPE_FP_Q:
6309 return reg->number + 64;
6310
6311 default:
6312 break;
6313 }
6314 return -1;
6315 }
6316
6317 /* Implement DWARF2_ADDR_SIZE. */
6318
6319 int
6320 aarch64_dwarf2_addr_size (void)
6321 {
6322 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6323 if (ilp32_p)
6324 return 4;
6325 #endif
6326 return bfd_arch_bits_per_address (stdoutput) / 8;
6327 }
6328
6329 /* MD interface: Symbol and relocation handling. */
6330
6331 /* Return the address within the segment that a PC-relative fixup is
6332 relative to. For AArch64 PC-relative fixups applied to instructions
6333 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6334
6335 long
6336 md_pcrel_from_section (fixS * fixP, segT seg)
6337 {
6338 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6339
6340 /* If this is pc-relative and we are going to emit a relocation
6341 then we just want to put out any pipeline compensation that the linker
6342 will need. Otherwise we want to use the calculated base. */
6343 if (fixP->fx_pcrel
6344 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6345 || aarch64_force_relocation (fixP)))
6346 base = 0;
6347
6348 /* AArch64 should be consistent for all pc-relative relocations. */
6349 return base + AARCH64_PCREL_OFFSET;
6350 }
6351
6352 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6353 Otherwise we have no need to default values of symbols. */
6354
6355 symbolS *
6356 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6357 {
6358 #ifdef OBJ_ELF
6359 if (name[0] == '_' && name[1] == 'G'
6360 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6361 {
6362 if (!GOT_symbol)
6363 {
6364 if (symbol_find (name))
6365 as_bad (_("GOT already in the symbol table"));
6366
6367 GOT_symbol = symbol_new (name, undefined_section,
6368 (valueT) 0, &zero_address_frag);
6369 }
6370
6371 return GOT_symbol;
6372 }
6373 #endif
6374
6375 return 0;
6376 }
6377
6378 /* Return non-zero if the indicated VALUE has overflowed the maximum
6379 range expressible by a unsigned number with the indicated number of
6380 BITS. */
6381
6382 static bfd_boolean
6383 unsigned_overflow (valueT value, unsigned bits)
6384 {
6385 valueT lim;
6386 if (bits >= sizeof (valueT) * 8)
6387 return FALSE;
6388 lim = (valueT) 1 << bits;
6389 return (value >= lim);
6390 }
6391
6392
6393 /* Return non-zero if the indicated VALUE has overflowed the maximum
6394 range expressible by an signed number with the indicated number of
6395 BITS. */
6396
6397 static bfd_boolean
6398 signed_overflow (offsetT value, unsigned bits)
6399 {
6400 offsetT lim;
6401 if (bits >= sizeof (offsetT) * 8)
6402 return FALSE;
6403 lim = (offsetT) 1 << (bits - 1);
6404 return (value < -lim || value >= lim);
6405 }
6406
6407 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6408 unsigned immediate offset load/store instruction, try to encode it as
6409 an unscaled, 9-bit, signed immediate offset load/store instruction.
6410 Return TRUE if it is successful; otherwise return FALSE.
6411
6412 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6413 in response to the standard LDR/STR mnemonics when the immediate offset is
6414 unambiguous, i.e. when it is negative or unaligned. */
6415
6416 static bfd_boolean
6417 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6418 {
6419 int idx;
6420 enum aarch64_op new_op;
6421 const aarch64_opcode *new_opcode;
6422
6423 gas_assert (instr->opcode->iclass == ldst_pos);
6424
6425 switch (instr->opcode->op)
6426 {
6427 case OP_LDRB_POS:new_op = OP_LDURB; break;
6428 case OP_STRB_POS: new_op = OP_STURB; break;
6429 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6430 case OP_LDRH_POS: new_op = OP_LDURH; break;
6431 case OP_STRH_POS: new_op = OP_STURH; break;
6432 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6433 case OP_LDR_POS: new_op = OP_LDUR; break;
6434 case OP_STR_POS: new_op = OP_STUR; break;
6435 case OP_LDRF_POS: new_op = OP_LDURV; break;
6436 case OP_STRF_POS: new_op = OP_STURV; break;
6437 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6438 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6439 default: new_op = OP_NIL; break;
6440 }
6441
6442 if (new_op == OP_NIL)
6443 return FALSE;
6444
6445 new_opcode = aarch64_get_opcode (new_op);
6446 gas_assert (new_opcode != NULL);
6447
6448 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6449 instr->opcode->op, new_opcode->op);
6450
6451 aarch64_replace_opcode (instr, new_opcode);
6452
6453 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6454 qualifier matching may fail because the out-of-date qualifier will
6455 prevent the operand being updated with a new and correct qualifier. */
6456 idx = aarch64_operand_index (instr->opcode->operands,
6457 AARCH64_OPND_ADDR_SIMM9);
6458 gas_assert (idx == 1);
6459 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6460
6461 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6462
6463 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6464 return FALSE;
6465
6466 return TRUE;
6467 }
6468
6469 /* Called by fix_insn to fix a MOV immediate alias instruction.
6470
6471 Operand for a generic move immediate instruction, which is an alias
6472 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6473 a 32-bit/64-bit immediate value into general register. An assembler error
6474 shall result if the immediate cannot be created by a single one of these
6475 instructions. If there is a choice, then to ensure reversability an
6476 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6477
6478 static void
6479 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6480 {
6481 const aarch64_opcode *opcode;
6482
6483 /* Need to check if the destination is SP/ZR. The check has to be done
6484 before any aarch64_replace_opcode. */
6485 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6486 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6487
6488 instr->operands[1].imm.value = value;
6489 instr->operands[1].skip = 0;
6490
6491 if (try_mov_wide_p)
6492 {
6493 /* Try the MOVZ alias. */
6494 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6495 aarch64_replace_opcode (instr, opcode);
6496 if (aarch64_opcode_encode (instr->opcode, instr,
6497 &instr->value, NULL, NULL))
6498 {
6499 put_aarch64_insn (buf, instr->value);
6500 return;
6501 }
6502 /* Try the MOVK alias. */
6503 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6504 aarch64_replace_opcode (instr, opcode);
6505 if (aarch64_opcode_encode (instr->opcode, instr,
6506 &instr->value, NULL, NULL))
6507 {
6508 put_aarch64_insn (buf, instr->value);
6509 return;
6510 }
6511 }
6512
6513 if (try_mov_bitmask_p)
6514 {
6515 /* Try the ORR alias. */
6516 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6517 aarch64_replace_opcode (instr, opcode);
6518 if (aarch64_opcode_encode (instr->opcode, instr,
6519 &instr->value, NULL, NULL))
6520 {
6521 put_aarch64_insn (buf, instr->value);
6522 return;
6523 }
6524 }
6525
6526 as_bad_where (fixP->fx_file, fixP->fx_line,
6527 _("immediate cannot be moved by a single instruction"));
6528 }
6529
6530 /* An instruction operand which is immediate related may have symbol used
6531 in the assembly, e.g.
6532
6533 mov w0, u32
6534 .set u32, 0x00ffff00
6535
6536 At the time when the assembly instruction is parsed, a referenced symbol,
6537 like 'u32' in the above example may not have been seen; a fixS is created
6538 in such a case and is handled here after symbols have been resolved.
6539 Instruction is fixed up with VALUE using the information in *FIXP plus
6540 extra information in FLAGS.
6541
6542 This function is called by md_apply_fix to fix up instructions that need
6543 a fix-up described above but does not involve any linker-time relocation. */
6544
6545 static void
6546 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6547 {
6548 int idx;
6549 uint32_t insn;
6550 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6551 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6552 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6553
6554 if (new_inst)
6555 {
6556 /* Now the instruction is about to be fixed-up, so the operand that
6557 was previously marked as 'ignored' needs to be unmarked in order
6558 to get the encoding done properly. */
6559 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6560 new_inst->operands[idx].skip = 0;
6561 }
6562
6563 gas_assert (opnd != AARCH64_OPND_NIL);
6564
6565 switch (opnd)
6566 {
6567 case AARCH64_OPND_EXCEPTION:
6568 if (unsigned_overflow (value, 16))
6569 as_bad_where (fixP->fx_file, fixP->fx_line,
6570 _("immediate out of range"));
6571 insn = get_aarch64_insn (buf);
6572 insn |= encode_svc_imm (value);
6573 put_aarch64_insn (buf, insn);
6574 break;
6575
6576 case AARCH64_OPND_AIMM:
6577 /* ADD or SUB with immediate.
6578 NOTE this assumes we come here with a add/sub shifted reg encoding
6579 3 322|2222|2 2 2 21111 111111
6580 1 098|7654|3 2 1 09876 543210 98765 43210
6581 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6582 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6583 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6584 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6585 ->
6586 3 322|2222|2 2 221111111111
6587 1 098|7654|3 2 109876543210 98765 43210
6588 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6589 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6590 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6591 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6592 Fields sf Rn Rd are already set. */
6593 insn = get_aarch64_insn (buf);
6594 if (value < 0)
6595 {
6596 /* Add <-> sub. */
6597 insn = reencode_addsub_switch_add_sub (insn);
6598 value = -value;
6599 }
6600
6601 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6602 && unsigned_overflow (value, 12))
6603 {
6604 /* Try to shift the value by 12 to make it fit. */
6605 if (((value >> 12) << 12) == value
6606 && ! unsigned_overflow (value, 12 + 12))
6607 {
6608 value >>= 12;
6609 insn |= encode_addsub_imm_shift_amount (1);
6610 }
6611 }
6612
6613 if (unsigned_overflow (value, 12))
6614 as_bad_where (fixP->fx_file, fixP->fx_line,
6615 _("immediate out of range"));
6616
6617 insn |= encode_addsub_imm (value);
6618
6619 put_aarch64_insn (buf, insn);
6620 break;
6621
6622 case AARCH64_OPND_SIMD_IMM:
6623 case AARCH64_OPND_SIMD_IMM_SFT:
6624 case AARCH64_OPND_LIMM:
6625 /* Bit mask immediate. */
6626 gas_assert (new_inst != NULL);
6627 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6628 new_inst->operands[idx].imm.value = value;
6629 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6630 &new_inst->value, NULL, NULL))
6631 put_aarch64_insn (buf, new_inst->value);
6632 else
6633 as_bad_where (fixP->fx_file, fixP->fx_line,
6634 _("invalid immediate"));
6635 break;
6636
6637 case AARCH64_OPND_HALF:
6638 /* 16-bit unsigned immediate. */
6639 if (unsigned_overflow (value, 16))
6640 as_bad_where (fixP->fx_file, fixP->fx_line,
6641 _("immediate out of range"));
6642 insn = get_aarch64_insn (buf);
6643 insn |= encode_movw_imm (value & 0xffff);
6644 put_aarch64_insn (buf, insn);
6645 break;
6646
6647 case AARCH64_OPND_IMM_MOV:
6648 /* Operand for a generic move immediate instruction, which is
6649 an alias instruction that generates a single MOVZ, MOVN or ORR
6650 instruction to loads a 32-bit/64-bit immediate value into general
6651 register. An assembler error shall result if the immediate cannot be
6652 created by a single one of these instructions. If there is a choice,
6653 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6654 and MOVZ or MOVN to ORR. */
6655 gas_assert (new_inst != NULL);
6656 fix_mov_imm_insn (fixP, buf, new_inst, value);
6657 break;
6658
6659 case AARCH64_OPND_ADDR_SIMM7:
6660 case AARCH64_OPND_ADDR_SIMM9:
6661 case AARCH64_OPND_ADDR_SIMM9_2:
6662 case AARCH64_OPND_ADDR_UIMM12:
6663 /* Immediate offset in an address. */
6664 insn = get_aarch64_insn (buf);
6665
6666 gas_assert (new_inst != NULL && new_inst->value == insn);
6667 gas_assert (new_inst->opcode->operands[1] == opnd
6668 || new_inst->opcode->operands[2] == opnd);
6669
6670 /* Get the index of the address operand. */
6671 if (new_inst->opcode->operands[1] == opnd)
6672 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6673 idx = 1;
6674 else
6675 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6676 idx = 2;
6677
6678 /* Update the resolved offset value. */
6679 new_inst->operands[idx].addr.offset.imm = value;
6680
6681 /* Encode/fix-up. */
6682 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6683 &new_inst->value, NULL, NULL))
6684 {
6685 put_aarch64_insn (buf, new_inst->value);
6686 break;
6687 }
6688 else if (new_inst->opcode->iclass == ldst_pos
6689 && try_to_encode_as_unscaled_ldst (new_inst))
6690 {
6691 put_aarch64_insn (buf, new_inst->value);
6692 break;
6693 }
6694
6695 as_bad_where (fixP->fx_file, fixP->fx_line,
6696 _("immediate offset out of range"));
6697 break;
6698
6699 default:
6700 gas_assert (0);
6701 as_fatal (_("unhandled operand code %d"), opnd);
6702 }
6703 }
6704
6705 /* Apply a fixup (fixP) to segment data, once it has been determined
6706 by our caller that we have all the info we need to fix it up.
6707
6708 Parameter valP is the pointer to the value of the bits. */
6709
6710 void
6711 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6712 {
6713 offsetT value = *valP;
6714 uint32_t insn;
6715 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6716 int scale;
6717 unsigned flags = fixP->fx_addnumber;
6718
6719 DEBUG_TRACE ("\n\n");
6720 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6721 DEBUG_TRACE ("Enter md_apply_fix");
6722
6723 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6724
6725 /* Note whether this will delete the relocation. */
6726
6727 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6728 fixP->fx_done = 1;
6729
6730 /* Process the relocations. */
6731 switch (fixP->fx_r_type)
6732 {
6733 case BFD_RELOC_NONE:
6734 /* This will need to go in the object file. */
6735 fixP->fx_done = 0;
6736 break;
6737
6738 case BFD_RELOC_8:
6739 case BFD_RELOC_8_PCREL:
6740 if (fixP->fx_done || !seg->use_rela_p)
6741 md_number_to_chars (buf, value, 1);
6742 break;
6743
6744 case BFD_RELOC_16:
6745 case BFD_RELOC_16_PCREL:
6746 if (fixP->fx_done || !seg->use_rela_p)
6747 md_number_to_chars (buf, value, 2);
6748 break;
6749
6750 case BFD_RELOC_32:
6751 case BFD_RELOC_32_PCREL:
6752 if (fixP->fx_done || !seg->use_rela_p)
6753 md_number_to_chars (buf, value, 4);
6754 break;
6755
6756 case BFD_RELOC_64:
6757 case BFD_RELOC_64_PCREL:
6758 if (fixP->fx_done || !seg->use_rela_p)
6759 md_number_to_chars (buf, value, 8);
6760 break;
6761
6762 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6763 /* We claim that these fixups have been processed here, even if
6764 in fact we generate an error because we do not have a reloc
6765 for them, so tc_gen_reloc() will reject them. */
6766 fixP->fx_done = 1;
6767 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6768 {
6769 as_bad_where (fixP->fx_file, fixP->fx_line,
6770 _("undefined symbol %s used as an immediate value"),
6771 S_GET_NAME (fixP->fx_addsy));
6772 goto apply_fix_return;
6773 }
6774 fix_insn (fixP, flags, value);
6775 break;
6776
6777 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6778 if (fixP->fx_done || !seg->use_rela_p)
6779 {
6780 if (value & 3)
6781 as_bad_where (fixP->fx_file, fixP->fx_line,
6782 _("pc-relative load offset not word aligned"));
6783 if (signed_overflow (value, 21))
6784 as_bad_where (fixP->fx_file, fixP->fx_line,
6785 _("pc-relative load offset out of range"));
6786 insn = get_aarch64_insn (buf);
6787 insn |= encode_ld_lit_ofs_19 (value >> 2);
6788 put_aarch64_insn (buf, insn);
6789 }
6790 break;
6791
6792 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6793 if (fixP->fx_done || !seg->use_rela_p)
6794 {
6795 if (signed_overflow (value, 21))
6796 as_bad_where (fixP->fx_file, fixP->fx_line,
6797 _("pc-relative address offset out of range"));
6798 insn = get_aarch64_insn (buf);
6799 insn |= encode_adr_imm (value);
6800 put_aarch64_insn (buf, insn);
6801 }
6802 break;
6803
6804 case BFD_RELOC_AARCH64_BRANCH19:
6805 if (fixP->fx_done || !seg->use_rela_p)
6806 {
6807 if (value & 3)
6808 as_bad_where (fixP->fx_file, fixP->fx_line,
6809 _("conditional branch target not word aligned"));
6810 if (signed_overflow (value, 21))
6811 as_bad_where (fixP->fx_file, fixP->fx_line,
6812 _("conditional branch out of range"));
6813 insn = get_aarch64_insn (buf);
6814 insn |= encode_cond_branch_ofs_19 (value >> 2);
6815 put_aarch64_insn (buf, insn);
6816 }
6817 break;
6818
6819 case BFD_RELOC_AARCH64_TSTBR14:
6820 if (fixP->fx_done || !seg->use_rela_p)
6821 {
6822 if (value & 3)
6823 as_bad_where (fixP->fx_file, fixP->fx_line,
6824 _("conditional branch target not word aligned"));
6825 if (signed_overflow (value, 16))
6826 as_bad_where (fixP->fx_file, fixP->fx_line,
6827 _("conditional branch out of range"));
6828 insn = get_aarch64_insn (buf);
6829 insn |= encode_tst_branch_ofs_14 (value >> 2);
6830 put_aarch64_insn (buf, insn);
6831 }
6832 break;
6833
6834 case BFD_RELOC_AARCH64_CALL26:
6835 case BFD_RELOC_AARCH64_JUMP26:
6836 if (fixP->fx_done || !seg->use_rela_p)
6837 {
6838 if (value & 3)
6839 as_bad_where (fixP->fx_file, fixP->fx_line,
6840 _("branch target not word aligned"));
6841 if (signed_overflow (value, 28))
6842 as_bad_where (fixP->fx_file, fixP->fx_line,
6843 _("branch out of range"));
6844 insn = get_aarch64_insn (buf);
6845 insn |= encode_branch_ofs_26 (value >> 2);
6846 put_aarch64_insn (buf, insn);
6847 }
6848 break;
6849
6850 case BFD_RELOC_AARCH64_MOVW_G0:
6851 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6852 case BFD_RELOC_AARCH64_MOVW_G0_S:
6853 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6854 scale = 0;
6855 goto movw_common;
6856 case BFD_RELOC_AARCH64_MOVW_G1:
6857 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6858 case BFD_RELOC_AARCH64_MOVW_G1_S:
6859 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6860 scale = 16;
6861 goto movw_common;
6862 case BFD_RELOC_AARCH64_MOVW_G2:
6863 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6864 case BFD_RELOC_AARCH64_MOVW_G2_S:
6865 scale = 32;
6866 goto movw_common;
6867 case BFD_RELOC_AARCH64_MOVW_G3:
6868 scale = 48;
6869 movw_common:
6870 if (fixP->fx_done || !seg->use_rela_p)
6871 {
6872 insn = get_aarch64_insn (buf);
6873
6874 if (!fixP->fx_done)
6875 {
6876 /* REL signed addend must fit in 16 bits */
6877 if (signed_overflow (value, 16))
6878 as_bad_where (fixP->fx_file, fixP->fx_line,
6879 _("offset out of range"));
6880 }
6881 else
6882 {
6883 /* Check for overflow and scale. */
6884 switch (fixP->fx_r_type)
6885 {
6886 case BFD_RELOC_AARCH64_MOVW_G0:
6887 case BFD_RELOC_AARCH64_MOVW_G1:
6888 case BFD_RELOC_AARCH64_MOVW_G2:
6889 case BFD_RELOC_AARCH64_MOVW_G3:
6890 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6891 if (unsigned_overflow (value, scale + 16))
6892 as_bad_where (fixP->fx_file, fixP->fx_line,
6893 _("unsigned value out of range"));
6894 break;
6895 case BFD_RELOC_AARCH64_MOVW_G0_S:
6896 case BFD_RELOC_AARCH64_MOVW_G1_S:
6897 case BFD_RELOC_AARCH64_MOVW_G2_S:
6898 /* NOTE: We can only come here with movz or movn. */
6899 if (signed_overflow (value, scale + 16))
6900 as_bad_where (fixP->fx_file, fixP->fx_line,
6901 _("signed value out of range"));
6902 if (value < 0)
6903 {
6904 /* Force use of MOVN. */
6905 value = ~value;
6906 insn = reencode_movzn_to_movn (insn);
6907 }
6908 else
6909 {
6910 /* Force use of MOVZ. */
6911 insn = reencode_movzn_to_movz (insn);
6912 }
6913 break;
6914 default:
6915 /* Unchecked relocations. */
6916 break;
6917 }
6918 value >>= scale;
6919 }
6920
6921 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6922 insn |= encode_movw_imm (value & 0xffff);
6923
6924 put_aarch64_insn (buf, insn);
6925 }
6926 break;
6927
6928 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6929 fixP->fx_r_type = (ilp32_p
6930 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6931 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6932 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6933 /* Should always be exported to object file, see
6934 aarch64_force_relocation(). */
6935 gas_assert (!fixP->fx_done);
6936 gas_assert (seg->use_rela_p);
6937 break;
6938
6939 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6940 fixP->fx_r_type = (ilp32_p
6941 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6942 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6943 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6944 /* Should always be exported to object file, see
6945 aarch64_force_relocation(). */
6946 gas_assert (!fixP->fx_done);
6947 gas_assert (seg->use_rela_p);
6948 break;
6949
6950 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6951 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6952 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6953 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6954 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6955 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6956 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6957 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6958 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6959 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6960 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6961 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6962 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6963 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6964 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6965 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6966 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6967 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
6968 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
6969 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6970 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6971 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6972 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6973 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
6974 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
6975 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
6976 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
6977 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
6978 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
6979 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
6980 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
6981 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6982 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6983 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6984 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6985 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6986 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6987 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6988 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6989 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6990 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6991 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6992 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6993 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6994 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6995 /* Should always be exported to object file, see
6996 aarch64_force_relocation(). */
6997 gas_assert (!fixP->fx_done);
6998 gas_assert (seg->use_rela_p);
6999 break;
7000
7001 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7002 /* Should always be exported to object file, see
7003 aarch64_force_relocation(). */
7004 fixP->fx_r_type = (ilp32_p
7005 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7006 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7007 gas_assert (!fixP->fx_done);
7008 gas_assert (seg->use_rela_p);
7009 break;
7010
7011 case BFD_RELOC_AARCH64_ADD_LO12:
7012 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7013 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7014 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7015 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7016 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7017 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7018 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7019 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7020 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7021 case BFD_RELOC_AARCH64_LDST128_LO12:
7022 case BFD_RELOC_AARCH64_LDST16_LO12:
7023 case BFD_RELOC_AARCH64_LDST32_LO12:
7024 case BFD_RELOC_AARCH64_LDST64_LO12:
7025 case BFD_RELOC_AARCH64_LDST8_LO12:
7026 /* Should always be exported to object file, see
7027 aarch64_force_relocation(). */
7028 gas_assert (!fixP->fx_done);
7029 gas_assert (seg->use_rela_p);
7030 break;
7031
7032 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7033 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7034 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7035 break;
7036
7037 case BFD_RELOC_UNUSED:
7038 /* An error will already have been reported. */
7039 break;
7040
7041 default:
7042 as_bad_where (fixP->fx_file, fixP->fx_line,
7043 _("unexpected %s fixup"),
7044 bfd_get_reloc_code_name (fixP->fx_r_type));
7045 break;
7046 }
7047
7048 apply_fix_return:
7049 /* Free the allocated the struct aarch64_inst.
7050 N.B. currently there are very limited number of fix-up types actually use
7051 this field, so the impact on the performance should be minimal . */
7052 if (fixP->tc_fix_data.inst != NULL)
7053 free (fixP->tc_fix_data.inst);
7054
7055 return;
7056 }
7057
7058 /* Translate internal representation of relocation info to BFD target
7059 format. */
7060
7061 arelent *
7062 tc_gen_reloc (asection * section, fixS * fixp)
7063 {
7064 arelent *reloc;
7065 bfd_reloc_code_real_type code;
7066
7067 reloc = xmalloc (sizeof (arelent));
7068
7069 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
7070 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7071 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7072
7073 if (fixp->fx_pcrel)
7074 {
7075 if (section->use_rela_p)
7076 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7077 else
7078 fixp->fx_offset = reloc->address;
7079 }
7080 reloc->addend = fixp->fx_offset;
7081
7082 code = fixp->fx_r_type;
7083 switch (code)
7084 {
7085 case BFD_RELOC_16:
7086 if (fixp->fx_pcrel)
7087 code = BFD_RELOC_16_PCREL;
7088 break;
7089
7090 case BFD_RELOC_32:
7091 if (fixp->fx_pcrel)
7092 code = BFD_RELOC_32_PCREL;
7093 break;
7094
7095 case BFD_RELOC_64:
7096 if (fixp->fx_pcrel)
7097 code = BFD_RELOC_64_PCREL;
7098 break;
7099
7100 default:
7101 break;
7102 }
7103
7104 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7105 if (reloc->howto == NULL)
7106 {
7107 as_bad_where (fixp->fx_file, fixp->fx_line,
7108 _
7109 ("cannot represent %s relocation in this object file format"),
7110 bfd_get_reloc_code_name (code));
7111 return NULL;
7112 }
7113
7114 return reloc;
7115 }
7116
7117 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7118
7119 void
7120 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7121 {
7122 bfd_reloc_code_real_type type;
7123 int pcrel = 0;
7124
7125 /* Pick a reloc.
7126 FIXME: @@ Should look at CPU word size. */
7127 switch (size)
7128 {
7129 case 1:
7130 type = BFD_RELOC_8;
7131 break;
7132 case 2:
7133 type = BFD_RELOC_16;
7134 break;
7135 case 4:
7136 type = BFD_RELOC_32;
7137 break;
7138 case 8:
7139 type = BFD_RELOC_64;
7140 break;
7141 default:
7142 as_bad (_("cannot do %u-byte relocation"), size);
7143 type = BFD_RELOC_UNUSED;
7144 break;
7145 }
7146
7147 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7148 }
7149
7150 int
7151 aarch64_force_relocation (struct fix *fixp)
7152 {
7153 switch (fixp->fx_r_type)
7154 {
7155 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7156 /* Perform these "immediate" internal relocations
7157 even if the symbol is extern or weak. */
7158 return 0;
7159
7160 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7161 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7162 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7163 /* Pseudo relocs that need to be fixed up according to
7164 ilp32_p. */
7165 return 0;
7166
7167 case BFD_RELOC_AARCH64_ADD_LO12:
7168 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7169 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7170 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7171 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7172 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7173 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7174 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7175 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7176 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7177 case BFD_RELOC_AARCH64_LDST128_LO12:
7178 case BFD_RELOC_AARCH64_LDST16_LO12:
7179 case BFD_RELOC_AARCH64_LDST32_LO12:
7180 case BFD_RELOC_AARCH64_LDST64_LO12:
7181 case BFD_RELOC_AARCH64_LDST8_LO12:
7182 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7183 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7184 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7185 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7186 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7187 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7188 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7189 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7190 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7191 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7192 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7193 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7194 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7195 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7196 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7197 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7198 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7199 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7200 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7201 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7202 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7203 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7204 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7205 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7206 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7207 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7208 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7209 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7210 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7211 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7212 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7213 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7214 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7215 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7216 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7217 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7218 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7219 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7220 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7221 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7222 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7223 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7224 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7225 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7226 /* Always leave these relocations for the linker. */
7227 return 1;
7228
7229 default:
7230 break;
7231 }
7232
7233 return generic_force_reloc (fixp);
7234 }
7235
7236 #ifdef OBJ_ELF
7237
7238 const char *
7239 elf64_aarch64_target_format (void)
7240 {
7241 if (target_big_endian)
7242 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7243 else
7244 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7245 }
7246
7247 void
7248 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7249 {
7250 elf_frob_symbol (symp, puntp);
7251 }
7252 #endif
7253
7254 /* MD interface: Finalization. */
7255
7256 /* A good place to do this, although this was probably not intended
7257 for this kind of use. We need to dump the literal pool before
7258 references are made to a null symbol pointer. */
7259
7260 void
7261 aarch64_cleanup (void)
7262 {
7263 literal_pool *pool;
7264
7265 for (pool = list_of_pools; pool; pool = pool->next)
7266 {
7267 /* Put it at the end of the relevant section. */
7268 subseg_set (pool->section, pool->sub_section);
7269 s_ltorg (0);
7270 }
7271 }
7272
7273 #ifdef OBJ_ELF
7274 /* Remove any excess mapping symbols generated for alignment frags in
7275 SEC. We may have created a mapping symbol before a zero byte
7276 alignment; remove it if there's a mapping symbol after the
7277 alignment. */
7278 static void
7279 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7280 void *dummy ATTRIBUTE_UNUSED)
7281 {
7282 segment_info_type *seginfo = seg_info (sec);
7283 fragS *fragp;
7284
7285 if (seginfo == NULL || seginfo->frchainP == NULL)
7286 return;
7287
7288 for (fragp = seginfo->frchainP->frch_root;
7289 fragp != NULL; fragp = fragp->fr_next)
7290 {
7291 symbolS *sym = fragp->tc_frag_data.last_map;
7292 fragS *next = fragp->fr_next;
7293
7294 /* Variable-sized frags have been converted to fixed size by
7295 this point. But if this was variable-sized to start with,
7296 there will be a fixed-size frag after it. So don't handle
7297 next == NULL. */
7298 if (sym == NULL || next == NULL)
7299 continue;
7300
7301 if (S_GET_VALUE (sym) < next->fr_address)
7302 /* Not at the end of this frag. */
7303 continue;
7304 know (S_GET_VALUE (sym) == next->fr_address);
7305
7306 do
7307 {
7308 if (next->tc_frag_data.first_map != NULL)
7309 {
7310 /* Next frag starts with a mapping symbol. Discard this
7311 one. */
7312 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7313 break;
7314 }
7315
7316 if (next->fr_next == NULL)
7317 {
7318 /* This mapping symbol is at the end of the section. Discard
7319 it. */
7320 know (next->fr_fix == 0 && next->fr_var == 0);
7321 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7322 break;
7323 }
7324
7325 /* As long as we have empty frags without any mapping symbols,
7326 keep looking. */
7327 /* If the next frag is non-empty and does not start with a
7328 mapping symbol, then this mapping symbol is required. */
7329 if (next->fr_address != next->fr_next->fr_address)
7330 break;
7331
7332 next = next->fr_next;
7333 }
7334 while (next != NULL);
7335 }
7336 }
7337 #endif
7338
7339 /* Adjust the symbol table. */
7340
7341 void
7342 aarch64_adjust_symtab (void)
7343 {
7344 #ifdef OBJ_ELF
7345 /* Remove any overlapping mapping symbols generated by alignment frags. */
7346 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7347 /* Now do generic ELF adjustments. */
7348 elf_adjust_symtab ();
7349 #endif
7350 }
7351
7352 static void
7353 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7354 {
7355 const char *hash_err;
7356
7357 hash_err = hash_insert (table, key, value);
7358 if (hash_err)
7359 printf ("Internal Error: Can't hash %s\n", key);
7360 }
7361
7362 static void
7363 fill_instruction_hash_table (void)
7364 {
7365 aarch64_opcode *opcode = aarch64_opcode_table;
7366
7367 while (opcode->name != NULL)
7368 {
7369 templates *templ, *new_templ;
7370 templ = hash_find (aarch64_ops_hsh, opcode->name);
7371
7372 new_templ = (templates *) xmalloc (sizeof (templates));
7373 new_templ->opcode = opcode;
7374 new_templ->next = NULL;
7375
7376 if (!templ)
7377 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7378 else
7379 {
7380 new_templ->next = templ->next;
7381 templ->next = new_templ;
7382 }
7383 ++opcode;
7384 }
7385 }
7386
7387 static inline void
7388 convert_to_upper (char *dst, const char *src, size_t num)
7389 {
7390 unsigned int i;
7391 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7392 *dst = TOUPPER (*src);
7393 *dst = '\0';
7394 }
7395
7396 /* Assume STR point to a lower-case string, allocate, convert and return
7397 the corresponding upper-case string. */
7398 static inline const char*
7399 get_upper_str (const char *str)
7400 {
7401 char *ret;
7402 size_t len = strlen (str);
7403 if ((ret = xmalloc (len + 1)) == NULL)
7404 abort ();
7405 convert_to_upper (ret, str, len);
7406 return ret;
7407 }
7408
7409 /* MD interface: Initialization. */
7410
7411 void
7412 md_begin (void)
7413 {
7414 unsigned mach;
7415 unsigned int i;
7416
7417 if ((aarch64_ops_hsh = hash_new ()) == NULL
7418 || (aarch64_cond_hsh = hash_new ()) == NULL
7419 || (aarch64_shift_hsh = hash_new ()) == NULL
7420 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7421 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7422 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7423 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7424 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7425 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7426 || (aarch64_reg_hsh = hash_new ()) == NULL
7427 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7428 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7429 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7430 as_fatal (_("virtual memory exhausted"));
7431
7432 fill_instruction_hash_table ();
7433
7434 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7435 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7436 (void *) (aarch64_sys_regs + i));
7437
7438 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7439 checked_hash_insert (aarch64_pstatefield_hsh,
7440 aarch64_pstatefields[i].name,
7441 (void *) (aarch64_pstatefields + i));
7442
7443 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7444 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7445 aarch64_sys_regs_ic[i].template,
7446 (void *) (aarch64_sys_regs_ic + i));
7447
7448 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7449 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7450 aarch64_sys_regs_dc[i].template,
7451 (void *) (aarch64_sys_regs_dc + i));
7452
7453 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7454 checked_hash_insert (aarch64_sys_regs_at_hsh,
7455 aarch64_sys_regs_at[i].template,
7456 (void *) (aarch64_sys_regs_at + i));
7457
7458 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7459 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7460 aarch64_sys_regs_tlbi[i].template,
7461 (void *) (aarch64_sys_regs_tlbi + i));
7462
7463 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7464 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7465 (void *) (reg_names + i));
7466
7467 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7468 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7469 (void *) (nzcv_names + i));
7470
7471 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7472 {
7473 const char *name = aarch64_operand_modifiers[i].name;
7474 checked_hash_insert (aarch64_shift_hsh, name,
7475 (void *) (aarch64_operand_modifiers + i));
7476 /* Also hash the name in the upper case. */
7477 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7478 (void *) (aarch64_operand_modifiers + i));
7479 }
7480
7481 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7482 {
7483 unsigned int j;
7484 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7485 the same condition code. */
7486 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7487 {
7488 const char *name = aarch64_conds[i].names[j];
7489 if (name == NULL)
7490 break;
7491 checked_hash_insert (aarch64_cond_hsh, name,
7492 (void *) (aarch64_conds + i));
7493 /* Also hash the name in the upper case. */
7494 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7495 (void *) (aarch64_conds + i));
7496 }
7497 }
7498
7499 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7500 {
7501 const char *name = aarch64_barrier_options[i].name;
7502 /* Skip xx00 - the unallocated values of option. */
7503 if ((i & 0x3) == 0)
7504 continue;
7505 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7506 (void *) (aarch64_barrier_options + i));
7507 /* Also hash the name in the upper case. */
7508 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7509 (void *) (aarch64_barrier_options + i));
7510 }
7511
7512 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7513 {
7514 const char* name = aarch64_prfops[i].name;
7515 /* Skip the unallocated hint encodings. */
7516 if (name == NULL)
7517 continue;
7518 checked_hash_insert (aarch64_pldop_hsh, name,
7519 (void *) (aarch64_prfops + i));
7520 /* Also hash the name in the upper case. */
7521 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7522 (void *) (aarch64_prfops + i));
7523 }
7524
7525 /* Set the cpu variant based on the command-line options. */
7526 if (!mcpu_cpu_opt)
7527 mcpu_cpu_opt = march_cpu_opt;
7528
7529 if (!mcpu_cpu_opt)
7530 mcpu_cpu_opt = &cpu_default;
7531
7532 cpu_variant = *mcpu_cpu_opt;
7533
7534 /* Record the CPU type. */
7535 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7536
7537 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7538 }
7539
7540 /* Command line processing. */
7541
7542 const char *md_shortopts = "m:";
7543
7544 #ifdef AARCH64_BI_ENDIAN
7545 #define OPTION_EB (OPTION_MD_BASE + 0)
7546 #define OPTION_EL (OPTION_MD_BASE + 1)
7547 #else
7548 #if TARGET_BYTES_BIG_ENDIAN
7549 #define OPTION_EB (OPTION_MD_BASE + 0)
7550 #else
7551 #define OPTION_EL (OPTION_MD_BASE + 1)
7552 #endif
7553 #endif
7554
7555 struct option md_longopts[] = {
7556 #ifdef OPTION_EB
7557 {"EB", no_argument, NULL, OPTION_EB},
7558 #endif
7559 #ifdef OPTION_EL
7560 {"EL", no_argument, NULL, OPTION_EL},
7561 #endif
7562 {NULL, no_argument, NULL, 0}
7563 };
7564
7565 size_t md_longopts_size = sizeof (md_longopts);
7566
7567 struct aarch64_option_table
7568 {
7569 char *option; /* Option name to match. */
7570 char *help; /* Help information. */
7571 int *var; /* Variable to change. */
7572 int value; /* What to change it to. */
7573 char *deprecated; /* If non-null, print this message. */
7574 };
7575
7576 static struct aarch64_option_table aarch64_opts[] = {
7577 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7578 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7579 NULL},
7580 #ifdef DEBUG_AARCH64
7581 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7582 #endif /* DEBUG_AARCH64 */
7583 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7584 NULL},
7585 {"mno-verbose-error", N_("do not output verbose error messages"),
7586 &verbose_error_p, 0, NULL},
7587 {NULL, NULL, NULL, 0, NULL}
7588 };
7589
7590 struct aarch64_cpu_option_table
7591 {
7592 char *name;
7593 const aarch64_feature_set value;
7594 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7595 case. */
7596 const char *canonical_name;
7597 };
7598
7599 /* This list should, at a minimum, contain all the cpu names
7600 recognized by GCC. */
7601 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7602 {"all", AARCH64_ANY, NULL},
7603 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7604 AARCH64_FEATURE_CRC), "Cortex-A53"},
7605 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7606 AARCH64_FEATURE_CRC), "Cortex-A57"},
7607 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7608 AARCH64_FEATURE_CRC), "Cortex-A72"},
7609 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7610 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7611 "Samsung Exynos M1"},
7612 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7613 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7614 "Cavium ThunderX"},
7615 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7616 in earlier releases and is superseded by 'xgene1' in all
7617 tools. */
7618 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7619 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7620 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7621 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7622 {"generic", AARCH64_ARCH_V8, NULL},
7623
7624 {NULL, AARCH64_ARCH_NONE, NULL}
7625 };
7626
7627 struct aarch64_arch_option_table
7628 {
7629 char *name;
7630 const aarch64_feature_set value;
7631 };
7632
7633 /* This list should, at a minimum, contain all the architecture names
7634 recognized by GCC. */
7635 static const struct aarch64_arch_option_table aarch64_archs[] = {
7636 {"all", AARCH64_ANY},
7637 {"armv8-a", AARCH64_ARCH_V8},
7638 {"armv8.1-a", AARCH64_ARCH_V8_1},
7639 {NULL, AARCH64_ARCH_NONE}
7640 };
7641
7642 /* ISA extensions. */
7643 struct aarch64_option_cpu_value_table
7644 {
7645 char *name;
7646 const aarch64_feature_set value;
7647 };
7648
7649 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7650 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7651 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7652 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7653 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7654 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7655 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0)},
7656 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0)},
7657 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_SIMD
7658 | AARCH64_FEATURE_RDMA, 0)},
7659 {NULL, AARCH64_ARCH_NONE}
7660 };
7661
7662 struct aarch64_long_option_table
7663 {
7664 char *option; /* Substring to match. */
7665 char *help; /* Help information. */
7666 int (*func) (char *subopt); /* Function to decode sub-option. */
7667 char *deprecated; /* If non-null, print this message. */
7668 };
7669
7670 static int
7671 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7672 bfd_boolean ext_only)
7673 {
7674 /* We insist on extensions being added before being removed. We achieve
7675 this by using the ADDING_VALUE variable to indicate whether we are
7676 adding an extension (1) or removing it (0) and only allowing it to
7677 change in the order -1 -> 1 -> 0. */
7678 int adding_value = -1;
7679 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7680
7681 /* Copy the feature set, so that we can modify it. */
7682 *ext_set = **opt_p;
7683 *opt_p = ext_set;
7684
7685 while (str != NULL && *str != 0)
7686 {
7687 const struct aarch64_option_cpu_value_table *opt;
7688 char *ext = NULL;
7689 int optlen;
7690
7691 if (!ext_only)
7692 {
7693 if (*str != '+')
7694 {
7695 as_bad (_("invalid architectural extension"));
7696 return 0;
7697 }
7698
7699 ext = strchr (++str, '+');
7700 }
7701
7702 if (ext != NULL)
7703 optlen = ext - str;
7704 else
7705 optlen = strlen (str);
7706
7707 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7708 {
7709 if (adding_value != 0)
7710 adding_value = 0;
7711 optlen -= 2;
7712 str += 2;
7713 }
7714 else if (optlen > 0)
7715 {
7716 if (adding_value == -1)
7717 adding_value = 1;
7718 else if (adding_value != 1)
7719 {
7720 as_bad (_("must specify extensions to add before specifying "
7721 "those to remove"));
7722 return FALSE;
7723 }
7724 }
7725
7726 if (optlen == 0)
7727 {
7728 as_bad (_("missing architectural extension"));
7729 return 0;
7730 }
7731
7732 gas_assert (adding_value != -1);
7733
7734 for (opt = aarch64_features; opt->name != NULL; opt++)
7735 if (strncmp (opt->name, str, optlen) == 0)
7736 {
7737 /* Add or remove the extension. */
7738 if (adding_value)
7739 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7740 else
7741 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7742 break;
7743 }
7744
7745 if (opt->name == NULL)
7746 {
7747 as_bad (_("unknown architectural extension `%s'"), str);
7748 return 0;
7749 }
7750
7751 str = ext;
7752 };
7753
7754 return 1;
7755 }
7756
7757 static int
7758 aarch64_parse_cpu (char *str)
7759 {
7760 const struct aarch64_cpu_option_table *opt;
7761 char *ext = strchr (str, '+');
7762 size_t optlen;
7763
7764 if (ext != NULL)
7765 optlen = ext - str;
7766 else
7767 optlen = strlen (str);
7768
7769 if (optlen == 0)
7770 {
7771 as_bad (_("missing cpu name `%s'"), str);
7772 return 0;
7773 }
7774
7775 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7776 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7777 {
7778 mcpu_cpu_opt = &opt->value;
7779 if (ext != NULL)
7780 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7781
7782 return 1;
7783 }
7784
7785 as_bad (_("unknown cpu `%s'"), str);
7786 return 0;
7787 }
7788
7789 static int
7790 aarch64_parse_arch (char *str)
7791 {
7792 const struct aarch64_arch_option_table *opt;
7793 char *ext = strchr (str, '+');
7794 size_t optlen;
7795
7796 if (ext != NULL)
7797 optlen = ext - str;
7798 else
7799 optlen = strlen (str);
7800
7801 if (optlen == 0)
7802 {
7803 as_bad (_("missing architecture name `%s'"), str);
7804 return 0;
7805 }
7806
7807 for (opt = aarch64_archs; opt->name != NULL; opt++)
7808 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7809 {
7810 march_cpu_opt = &opt->value;
7811 if (ext != NULL)
7812 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7813
7814 return 1;
7815 }
7816
7817 as_bad (_("unknown architecture `%s'\n"), str);
7818 return 0;
7819 }
7820
7821 /* ABIs. */
7822 struct aarch64_option_abi_value_table
7823 {
7824 char *name;
7825 enum aarch64_abi_type value;
7826 };
7827
7828 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7829 {"ilp32", AARCH64_ABI_ILP32},
7830 {"lp64", AARCH64_ABI_LP64},
7831 {NULL, 0}
7832 };
7833
7834 static int
7835 aarch64_parse_abi (char *str)
7836 {
7837 const struct aarch64_option_abi_value_table *opt;
7838 size_t optlen = strlen (str);
7839
7840 if (optlen == 0)
7841 {
7842 as_bad (_("missing abi name `%s'"), str);
7843 return 0;
7844 }
7845
7846 for (opt = aarch64_abis; opt->name != NULL; opt++)
7847 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7848 {
7849 aarch64_abi = opt->value;
7850 return 1;
7851 }
7852
7853 as_bad (_("unknown abi `%s'\n"), str);
7854 return 0;
7855 }
7856
7857 static struct aarch64_long_option_table aarch64_long_opts[] = {
7858 #ifdef OBJ_ELF
7859 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7860 aarch64_parse_abi, NULL},
7861 #endif /* OBJ_ELF */
7862 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7863 aarch64_parse_cpu, NULL},
7864 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7865 aarch64_parse_arch, NULL},
7866 {NULL, NULL, 0, NULL}
7867 };
7868
7869 int
7870 md_parse_option (int c, char *arg)
7871 {
7872 struct aarch64_option_table *opt;
7873 struct aarch64_long_option_table *lopt;
7874
7875 switch (c)
7876 {
7877 #ifdef OPTION_EB
7878 case OPTION_EB:
7879 target_big_endian = 1;
7880 break;
7881 #endif
7882
7883 #ifdef OPTION_EL
7884 case OPTION_EL:
7885 target_big_endian = 0;
7886 break;
7887 #endif
7888
7889 case 'a':
7890 /* Listing option. Just ignore these, we don't support additional
7891 ones. */
7892 return 0;
7893
7894 default:
7895 for (opt = aarch64_opts; opt->option != NULL; opt++)
7896 {
7897 if (c == opt->option[0]
7898 && ((arg == NULL && opt->option[1] == 0)
7899 || streq (arg, opt->option + 1)))
7900 {
7901 /* If the option is deprecated, tell the user. */
7902 if (opt->deprecated != NULL)
7903 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7904 arg ? arg : "", _(opt->deprecated));
7905
7906 if (opt->var != NULL)
7907 *opt->var = opt->value;
7908
7909 return 1;
7910 }
7911 }
7912
7913 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7914 {
7915 /* These options are expected to have an argument. */
7916 if (c == lopt->option[0]
7917 && arg != NULL
7918 && strncmp (arg, lopt->option + 1,
7919 strlen (lopt->option + 1)) == 0)
7920 {
7921 /* If the option is deprecated, tell the user. */
7922 if (lopt->deprecated != NULL)
7923 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7924 _(lopt->deprecated));
7925
7926 /* Call the sup-option parser. */
7927 return lopt->func (arg + strlen (lopt->option) - 1);
7928 }
7929 }
7930
7931 return 0;
7932 }
7933
7934 return 1;
7935 }
7936
7937 void
7938 md_show_usage (FILE * fp)
7939 {
7940 struct aarch64_option_table *opt;
7941 struct aarch64_long_option_table *lopt;
7942
7943 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7944
7945 for (opt = aarch64_opts; opt->option != NULL; opt++)
7946 if (opt->help != NULL)
7947 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7948
7949 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7950 if (lopt->help != NULL)
7951 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7952
7953 #ifdef OPTION_EB
7954 fprintf (fp, _("\
7955 -EB assemble code for a big-endian cpu\n"));
7956 #endif
7957
7958 #ifdef OPTION_EL
7959 fprintf (fp, _("\
7960 -EL assemble code for a little-endian cpu\n"));
7961 #endif
7962 }
7963
7964 /* Parse a .cpu directive. */
7965
7966 static void
7967 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7968 {
7969 const struct aarch64_cpu_option_table *opt;
7970 char saved_char;
7971 char *name;
7972 char *ext;
7973 size_t optlen;
7974
7975 name = input_line_pointer;
7976 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7977 input_line_pointer++;
7978 saved_char = *input_line_pointer;
7979 *input_line_pointer = 0;
7980
7981 ext = strchr (name, '+');
7982
7983 if (ext != NULL)
7984 optlen = ext - name;
7985 else
7986 optlen = strlen (name);
7987
7988 /* Skip the first "all" entry. */
7989 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7990 if (strlen (opt->name) == optlen
7991 && strncmp (name, opt->name, optlen) == 0)
7992 {
7993 mcpu_cpu_opt = &opt->value;
7994 if (ext != NULL)
7995 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7996 return;
7997
7998 cpu_variant = *mcpu_cpu_opt;
7999
8000 *input_line_pointer = saved_char;
8001 demand_empty_rest_of_line ();
8002 return;
8003 }
8004 as_bad (_("unknown cpu `%s'"), name);
8005 *input_line_pointer = saved_char;
8006 ignore_rest_of_line ();
8007 }
8008
8009
8010 /* Parse a .arch directive. */
8011
8012 static void
8013 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8014 {
8015 const struct aarch64_arch_option_table *opt;
8016 char saved_char;
8017 char *name;
8018 char *ext;
8019 size_t optlen;
8020
8021 name = input_line_pointer;
8022 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8023 input_line_pointer++;
8024 saved_char = *input_line_pointer;
8025 *input_line_pointer = 0;
8026
8027 ext = strchr (name, '+');
8028
8029 if (ext != NULL)
8030 optlen = ext - name;
8031 else
8032 optlen = strlen (name);
8033
8034 /* Skip the first "all" entry. */
8035 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8036 if (strlen (opt->name) == optlen
8037 && strncmp (name, opt->name, optlen) == 0)
8038 {
8039 mcpu_cpu_opt = &opt->value;
8040 if (ext != NULL)
8041 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8042 return;
8043
8044 cpu_variant = *mcpu_cpu_opt;
8045
8046 *input_line_pointer = saved_char;
8047 demand_empty_rest_of_line ();
8048 return;
8049 }
8050
8051 as_bad (_("unknown architecture `%s'\n"), name);
8052 *input_line_pointer = saved_char;
8053 ignore_rest_of_line ();
8054 }
8055
8056 /* Parse a .arch_extension directive. */
8057
8058 static void
8059 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8060 {
8061 char saved_char;
8062 char *ext = input_line_pointer;;
8063
8064 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8065 input_line_pointer++;
8066 saved_char = *input_line_pointer;
8067 *input_line_pointer = 0;
8068
8069 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8070 return;
8071
8072 cpu_variant = *mcpu_cpu_opt;
8073
8074 *input_line_pointer = saved_char;
8075 demand_empty_rest_of_line ();
8076 }
8077
8078 /* Copy symbol information. */
8079
8080 void
8081 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8082 {
8083 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8084 }
This page took 0.210349 seconds and 5 git commands to generate.