Convert more variables to a constant form.
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Structure for a hash table entry for a register. */
251 typedef struct
252 {
253 const char *name;
254 unsigned char number;
255 unsigned char type;
256 unsigned char builtin;
257 } reg_entry;
258
259 /* Macros to define the register types and masks for the purpose
260 of parsing. */
261
262 #undef AARCH64_REG_TYPES
263 #define AARCH64_REG_TYPES \
264 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
265 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
266 BASIC_REG_TYPE(SP_32) /* wsp */ \
267 BASIC_REG_TYPE(SP_64) /* sp */ \
268 BASIC_REG_TYPE(Z_32) /* wzr */ \
269 BASIC_REG_TYPE(Z_64) /* xzr */ \
270 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
271 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
272 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
273 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
274 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
275 BASIC_REG_TYPE(CN) /* c[0-7] */ \
276 BASIC_REG_TYPE(VN) /* v[0-31] */ \
277 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
278 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
279 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
280 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
281 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
283 /* Typecheck: any [BHSDQ]P FP. */ \
284 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
285 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
286 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
287 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
289 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
290 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
291 /* Any integer register; used for error messages only. */ \
292 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Pseudo type to mark the end of the enumerator sequence. */ \
296 BASIC_REG_TYPE(MAX)
297
298 #undef BASIC_REG_TYPE
299 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
300 #undef MULTI_REG_TYPE
301 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
302
303 /* Register type enumerators. */
304 typedef enum
305 {
306 /* A list of REG_TYPE_*. */
307 AARCH64_REG_TYPES
308 } aarch64_reg_type;
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
312 #undef REG_TYPE
313 #define REG_TYPE(T) (1 << REG_TYPE_##T)
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) V,
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409 static struct hash_control *aarch64_hint_opt_hsh;
410
411 /* Stuff needed to resolve the label ambiguity
412 As:
413 ...
414 label: <insn>
415 may differ from:
416 ...
417 label:
418 <insn> */
419
420 static symbolS *last_label_seen;
421
422 /* Literal pool structure. Held on a per-section
423 and per-sub-section basis. */
424
425 #define MAX_LITERAL_POOL_SIZE 1024
426 typedef struct literal_expression
427 {
428 expressionS exp;
429 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
430 LITTLENUM_TYPE * bignum;
431 } literal_expression;
432
433 typedef struct literal_pool
434 {
435 literal_expression literals[MAX_LITERAL_POOL_SIZE];
436 unsigned int next_free_entry;
437 unsigned int id;
438 symbolS *symbol;
439 segT section;
440 subsegT sub_section;
441 int size;
442 struct literal_pool *next;
443 } literal_pool;
444
445 /* Pointer to a linked list of literal pools. */
446 static literal_pool *list_of_pools = NULL;
447 \f
448 /* Pure syntax. */
449
450 /* This array holds the chars that always start a comment. If the
451 pre-processor is disabled, these aren't very useful. */
452 const char comment_chars[] = "";
453
454 /* This array holds the chars that only start a comment at the beginning of
455 a line. If the line seems to have the form '# 123 filename'
456 .line and .file directives will appear in the pre-processed output. */
457 /* Note that input_file.c hand checks for '#' at the beginning of the
458 first line of the input file. This is because the compiler outputs
459 #NO_APP at the beginning of its output. */
460 /* Also note that comments like this one will always work. */
461 const char line_comment_chars[] = "#";
462
463 const char line_separator_chars[] = ";";
464
465 /* Chars that can be used to separate mant
466 from exp in floating point numbers. */
467 const char EXP_CHARS[] = "eE";
468
469 /* Chars that mean this number is a floating point constant. */
470 /* As in 0f12.456 */
471 /* or 0d1.2345e12 */
472
473 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
474
475 /* Prefix character that indicates the start of an immediate value. */
476 #define is_immediate_prefix(C) ((C) == '#')
477
478 /* Separator character handling. */
479
480 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
481
482 static inline bfd_boolean
483 skip_past_char (char **str, char c)
484 {
485 if (**str == c)
486 {
487 (*str)++;
488 return TRUE;
489 }
490 else
491 return FALSE;
492 }
493
494 #define skip_past_comma(str) skip_past_char (str, ',')
495
496 /* Arithmetic expressions (possibly involving symbols). */
497
498 static bfd_boolean in_my_get_expression_p = FALSE;
499
500 /* Third argument to my_get_expression. */
501 #define GE_NO_PREFIX 0
502 #define GE_OPT_PREFIX 1
503
504 /* Return TRUE if the string pointed by *STR is successfully parsed
505 as an valid expression; *EP will be filled with the information of
506 such an expression. Otherwise return FALSE. */
507
508 static bfd_boolean
509 my_get_expression (expressionS * ep, char **str, int prefix_mode,
510 int reject_absent)
511 {
512 char *save_in;
513 segT seg;
514 int prefix_present_p = 0;
515
516 switch (prefix_mode)
517 {
518 case GE_NO_PREFIX:
519 break;
520 case GE_OPT_PREFIX:
521 if (is_immediate_prefix (**str))
522 {
523 (*str)++;
524 prefix_present_p = 1;
525 }
526 break;
527 default:
528 abort ();
529 }
530
531 memset (ep, 0, sizeof (expressionS));
532
533 save_in = input_line_pointer;
534 input_line_pointer = *str;
535 in_my_get_expression_p = TRUE;
536 seg = expression (ep);
537 in_my_get_expression_p = FALSE;
538
539 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
540 {
541 /* We found a bad expression in md_operand(). */
542 *str = input_line_pointer;
543 input_line_pointer = save_in;
544 if (prefix_present_p && ! error_p ())
545 set_fatal_syntax_error (_("bad expression"));
546 else
547 set_first_syntax_error (_("bad expression"));
548 return FALSE;
549 }
550
551 #ifdef OBJ_AOUT
552 if (seg != absolute_section
553 && seg != text_section
554 && seg != data_section
555 && seg != bss_section && seg != undefined_section)
556 {
557 set_syntax_error (_("bad segment"));
558 *str = input_line_pointer;
559 input_line_pointer = save_in;
560 return FALSE;
561 }
562 #else
563 (void) seg;
564 #endif
565
566 *str = input_line_pointer;
567 input_line_pointer = save_in;
568 return TRUE;
569 }
570
571 /* Turn a string in input_line_pointer into a floating point constant
572 of type TYPE, and store the appropriate bytes in *LITP. The number
573 of LITTLENUMS emitted is stored in *SIZEP. An error message is
574 returned, or NULL on OK. */
575
576 char *
577 md_atof (int type, char *litP, int *sizeP)
578 {
579 return ieee_md_atof (type, litP, sizeP, target_big_endian);
580 }
581
582 /* We handle all bad expressions here, so that we can report the faulty
583 instruction in the error message. */
584 void
585 md_operand (expressionS * exp)
586 {
587 if (in_my_get_expression_p)
588 exp->X_op = O_illegal;
589 }
590
591 /* Immediate values. */
592
593 /* Errors may be set multiple times during parsing or bit encoding
594 (particularly in the Neon bits), but usually the earliest error which is set
595 will be the most meaningful. Avoid overwriting it with later (cascading)
596 errors by calling this function. */
597
598 static void
599 first_error (const char *error)
600 {
601 if (! error_p ())
602 set_syntax_error (error);
603 }
604
605 /* Similiar to first_error, but this function accepts formatted error
606 message. */
607 static void
608 first_error_fmt (const char *format, ...)
609 {
610 va_list args;
611 enum
612 { size = 100 };
613 /* N.B. this single buffer will not cause error messages for different
614 instructions to pollute each other; this is because at the end of
615 processing of each assembly line, error message if any will be
616 collected by as_bad. */
617 static char buffer[size];
618
619 if (! error_p ())
620 {
621 int ret ATTRIBUTE_UNUSED;
622 va_start (args, format);
623 ret = vsnprintf (buffer, size, format, args);
624 know (ret <= size - 1 && ret >= 0);
625 va_end (args);
626 set_syntax_error (buffer);
627 }
628 }
629
630 /* Register parsing. */
631
632 /* Generic register parser which is called by other specialized
633 register parsers.
634 CCP points to what should be the beginning of a register name.
635 If it is indeed a valid register name, advance CCP over it and
636 return the reg_entry structure; otherwise return NULL.
637 It does not issue diagnostics. */
638
639 static reg_entry *
640 parse_reg (char **ccp)
641 {
642 char *start = *ccp;
643 char *p;
644 reg_entry *reg;
645
646 #ifdef REGISTER_PREFIX
647 if (*start != REGISTER_PREFIX)
648 return NULL;
649 start++;
650 #endif
651
652 p = start;
653 if (!ISALPHA (*p) || !is_name_beginner (*p))
654 return NULL;
655
656 do
657 p++;
658 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
659
660 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
661
662 if (!reg)
663 return NULL;
664
665 *ccp = p;
666 return reg;
667 }
668
669 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
670 return FALSE. */
671 static bfd_boolean
672 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
673 {
674 if (reg->type == type)
675 return TRUE;
676
677 switch (type)
678 {
679 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
680 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
681 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
682 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
683 case REG_TYPE_VN: /* Vector register. */
684 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
685 return ((reg_type_masks[reg->type] & reg_type_masks[type])
686 == reg_type_masks[reg->type]);
687 default:
688 as_fatal ("unhandled type %d", type);
689 abort ();
690 }
691 }
692
693 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
694 Return the register number otherwise. *ISREG32 is set to one if the
695 register is 32-bit wide; *ISREGZERO is set to one if the register is
696 of type Z_32 or Z_64.
697 Note that this function does not issue any diagnostics. */
698
699 static int
700 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
701 int *isreg32, int *isregzero)
702 {
703 char *str = *ccp;
704 const reg_entry *reg = parse_reg (&str);
705
706 if (reg == NULL)
707 return PARSE_FAIL;
708
709 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
710 return PARSE_FAIL;
711
712 switch (reg->type)
713 {
714 case REG_TYPE_SP_32:
715 case REG_TYPE_SP_64:
716 if (reject_sp)
717 return PARSE_FAIL;
718 *isreg32 = reg->type == REG_TYPE_SP_32;
719 *isregzero = 0;
720 break;
721 case REG_TYPE_R_32:
722 case REG_TYPE_R_64:
723 *isreg32 = reg->type == REG_TYPE_R_32;
724 *isregzero = 0;
725 break;
726 case REG_TYPE_Z_32:
727 case REG_TYPE_Z_64:
728 if (reject_rz)
729 return PARSE_FAIL;
730 *isreg32 = reg->type == REG_TYPE_Z_32;
731 *isregzero = 1;
732 break;
733 default:
734 return PARSE_FAIL;
735 }
736
737 *ccp = str;
738
739 return reg->number;
740 }
741
742 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
743 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
744 otherwise return FALSE.
745
746 Accept only one occurrence of:
747 8b 16b 2h 4h 8h 2s 4s 1d 2d
748 b h s d q */
749 static bfd_boolean
750 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
751 {
752 char *ptr = *str;
753 unsigned width;
754 unsigned element_size;
755 enum neon_el_type type;
756
757 /* skip '.' */
758 ptr++;
759
760 if (!ISDIGIT (*ptr))
761 {
762 width = 0;
763 goto elt_size;
764 }
765 width = strtoul (ptr, &ptr, 10);
766 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
767 {
768 first_error_fmt (_("bad size %d in vector width specifier"), width);
769 return FALSE;
770 }
771
772 elt_size:
773 switch (TOLOWER (*ptr))
774 {
775 case 'b':
776 type = NT_b;
777 element_size = 8;
778 break;
779 case 'h':
780 type = NT_h;
781 element_size = 16;
782 break;
783 case 's':
784 type = NT_s;
785 element_size = 32;
786 break;
787 case 'd':
788 type = NT_d;
789 element_size = 64;
790 break;
791 case 'q':
792 if (width == 1)
793 {
794 type = NT_q;
795 element_size = 128;
796 break;
797 }
798 /* fall through. */
799 default:
800 if (*ptr != '\0')
801 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
802 else
803 first_error (_("missing element size"));
804 return FALSE;
805 }
806 if (width != 0 && width * element_size != 64 && width * element_size != 128
807 && !(width == 2 && element_size == 16))
808 {
809 first_error_fmt (_
810 ("invalid element size %d and vector size combination %c"),
811 width, *ptr);
812 return FALSE;
813 }
814 ptr++;
815
816 parsed_type->type = type;
817 parsed_type->width = width;
818
819 *str = ptr;
820
821 return TRUE;
822 }
823
824 /* Parse a single type, e.g. ".8b", leading period included.
825 Only applicable to Vn registers.
826
827 Return TRUE on success; otherwise return FALSE. */
828 static bfd_boolean
829 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
830 {
831 char *str = *ccp;
832
833 if (*str == '.')
834 {
835 if (! parse_neon_type_for_operand (vectype, &str))
836 {
837 first_error (_("vector type expected"));
838 return FALSE;
839 }
840 }
841 else
842 return FALSE;
843
844 *ccp = str;
845
846 return TRUE;
847 }
848
849 /* Parse a register of the type TYPE.
850
851 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
852 name or the parsed register is not of TYPE.
853
854 Otherwise return the register number, and optionally fill in the actual
855 type of the register in *RTYPE when multiple alternatives were given, and
856 return the register shape and element index information in *TYPEINFO.
857
858 IN_REG_LIST should be set with TRUE if the caller is parsing a register
859 list. */
860
861 static int
862 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
863 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
864 {
865 char *str = *ccp;
866 const reg_entry *reg = parse_reg (&str);
867 struct neon_type_el atype;
868 struct neon_type_el parsetype;
869 bfd_boolean is_typed_vecreg = FALSE;
870
871 atype.defined = 0;
872 atype.type = NT_invtype;
873 atype.width = -1;
874 atype.index = 0;
875
876 if (reg == NULL)
877 {
878 if (typeinfo)
879 *typeinfo = atype;
880 set_default_error ();
881 return PARSE_FAIL;
882 }
883
884 if (! aarch64_check_reg_type (reg, type))
885 {
886 DEBUG_TRACE ("reg type check failed");
887 set_default_error ();
888 return PARSE_FAIL;
889 }
890 type = reg->type;
891
892 if (type == REG_TYPE_VN
893 && parse_neon_operand_type (&parsetype, &str))
894 {
895 /* Register if of the form Vn.[bhsdq]. */
896 is_typed_vecreg = TRUE;
897
898 if (parsetype.width == 0)
899 /* Expect index. In the new scheme we cannot have
900 Vn.[bhsdq] represent a scalar. Therefore any
901 Vn.[bhsdq] should have an index following it.
902 Except in reglists ofcourse. */
903 atype.defined |= NTA_HASINDEX;
904 else
905 atype.defined |= NTA_HASTYPE;
906
907 atype.type = parsetype.type;
908 atype.width = parsetype.width;
909 }
910
911 if (skip_past_char (&str, '['))
912 {
913 expressionS exp;
914
915 /* Reject Sn[index] syntax. */
916 if (!is_typed_vecreg)
917 {
918 first_error (_("this type of register can't be indexed"));
919 return PARSE_FAIL;
920 }
921
922 if (in_reg_list == TRUE)
923 {
924 first_error (_("index not allowed inside register list"));
925 return PARSE_FAIL;
926 }
927
928 atype.defined |= NTA_HASINDEX;
929
930 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
931
932 if (exp.X_op != O_constant)
933 {
934 first_error (_("constant expression required"));
935 return PARSE_FAIL;
936 }
937
938 if (! skip_past_char (&str, ']'))
939 return PARSE_FAIL;
940
941 atype.index = exp.X_add_number;
942 }
943 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
944 {
945 /* Indexed vector register expected. */
946 first_error (_("indexed vector register expected"));
947 return PARSE_FAIL;
948 }
949
950 /* A vector reg Vn should be typed or indexed. */
951 if (type == REG_TYPE_VN && atype.defined == 0)
952 {
953 first_error (_("invalid use of vector register"));
954 }
955
956 if (typeinfo)
957 *typeinfo = atype;
958
959 if (rtype)
960 *rtype = type;
961
962 *ccp = str;
963
964 return reg->number;
965 }
966
967 /* Parse register.
968
969 Return the register number on success; return PARSE_FAIL otherwise.
970
971 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
972 the register (e.g. NEON double or quad reg when either has been requested).
973
974 If this is a NEON vector register with additional type information, fill
975 in the struct pointed to by VECTYPE (if non-NULL).
976
977 This parser does not handle register list. */
978
979 static int
980 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
981 aarch64_reg_type *rtype, struct neon_type_el *vectype)
982 {
983 struct neon_type_el atype;
984 char *str = *ccp;
985 int reg = parse_typed_reg (&str, type, rtype, &atype,
986 /*in_reg_list= */ FALSE);
987
988 if (reg == PARSE_FAIL)
989 return PARSE_FAIL;
990
991 if (vectype)
992 *vectype = atype;
993
994 *ccp = str;
995
996 return reg;
997 }
998
999 static inline bfd_boolean
1000 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
1001 {
1002 return
1003 e1.type == e2.type
1004 && e1.defined == e2.defined
1005 && e1.width == e2.width && e1.index == e2.index;
1006 }
1007
1008 /* This function parses the NEON register list. On success, it returns
1009 the parsed register list information in the following encoded format:
1010
1011 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1012 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1013
1014 The information of the register shape and/or index is returned in
1015 *VECTYPE.
1016
1017 It returns PARSE_FAIL if the register list is invalid.
1018
1019 The list contains one to four registers.
1020 Each register can be one of:
1021 <Vt>.<T>[<index>]
1022 <Vt>.<T>
1023 All <T> should be identical.
1024 All <index> should be identical.
1025 There are restrictions on <Vt> numbers which are checked later
1026 (by reg_list_valid_p). */
1027
1028 static int
1029 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1030 {
1031 char *str = *ccp;
1032 int nb_regs;
1033 struct neon_type_el typeinfo, typeinfo_first;
1034 int val, val_range;
1035 int in_range;
1036 int ret_val;
1037 int i;
1038 bfd_boolean error = FALSE;
1039 bfd_boolean expect_index = FALSE;
1040
1041 if (*str != '{')
1042 {
1043 set_syntax_error (_("expecting {"));
1044 return PARSE_FAIL;
1045 }
1046 str++;
1047
1048 nb_regs = 0;
1049 typeinfo_first.defined = 0;
1050 typeinfo_first.type = NT_invtype;
1051 typeinfo_first.width = -1;
1052 typeinfo_first.index = 0;
1053 ret_val = 0;
1054 val = -1;
1055 val_range = -1;
1056 in_range = 0;
1057 do
1058 {
1059 if (in_range)
1060 {
1061 str++; /* skip over '-' */
1062 val_range = val;
1063 }
1064 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1065 /*in_reg_list= */ TRUE);
1066 if (val == PARSE_FAIL)
1067 {
1068 set_first_syntax_error (_("invalid vector register in list"));
1069 error = TRUE;
1070 continue;
1071 }
1072 /* reject [bhsd]n */
1073 if (typeinfo.defined == 0)
1074 {
1075 set_first_syntax_error (_("invalid scalar register in list"));
1076 error = TRUE;
1077 continue;
1078 }
1079
1080 if (typeinfo.defined & NTA_HASINDEX)
1081 expect_index = TRUE;
1082
1083 if (in_range)
1084 {
1085 if (val < val_range)
1086 {
1087 set_first_syntax_error
1088 (_("invalid range in vector register list"));
1089 error = TRUE;
1090 }
1091 val_range++;
1092 }
1093 else
1094 {
1095 val_range = val;
1096 if (nb_regs == 0)
1097 typeinfo_first = typeinfo;
1098 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1099 {
1100 set_first_syntax_error
1101 (_("type mismatch in vector register list"));
1102 error = TRUE;
1103 }
1104 }
1105 if (! error)
1106 for (i = val_range; i <= val; i++)
1107 {
1108 ret_val |= i << (5 * nb_regs);
1109 nb_regs++;
1110 }
1111 in_range = 0;
1112 }
1113 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1114
1115 skip_whitespace (str);
1116 if (*str != '}')
1117 {
1118 set_first_syntax_error (_("end of vector register list not found"));
1119 error = TRUE;
1120 }
1121 str++;
1122
1123 skip_whitespace (str);
1124
1125 if (expect_index)
1126 {
1127 if (skip_past_char (&str, '['))
1128 {
1129 expressionS exp;
1130
1131 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1132 if (exp.X_op != O_constant)
1133 {
1134 set_first_syntax_error (_("constant expression required."));
1135 error = TRUE;
1136 }
1137 if (! skip_past_char (&str, ']'))
1138 error = TRUE;
1139 else
1140 typeinfo_first.index = exp.X_add_number;
1141 }
1142 else
1143 {
1144 set_first_syntax_error (_("expected index"));
1145 error = TRUE;
1146 }
1147 }
1148
1149 if (nb_regs > 4)
1150 {
1151 set_first_syntax_error (_("too many registers in vector register list"));
1152 error = TRUE;
1153 }
1154 else if (nb_regs == 0)
1155 {
1156 set_first_syntax_error (_("empty vector register list"));
1157 error = TRUE;
1158 }
1159
1160 *ccp = str;
1161 if (! error)
1162 *vectype = typeinfo_first;
1163
1164 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1165 }
1166
1167 /* Directives: register aliases. */
1168
1169 static reg_entry *
1170 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1171 {
1172 reg_entry *new;
1173 const char *name;
1174
1175 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1176 {
1177 if (new->builtin)
1178 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1179 str);
1180
1181 /* Only warn about a redefinition if it's not defined as the
1182 same register. */
1183 else if (new->number != number || new->type != type)
1184 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1185
1186 return NULL;
1187 }
1188
1189 name = xstrdup (str);
1190 new = xmalloc (sizeof (reg_entry));
1191
1192 new->name = name;
1193 new->number = number;
1194 new->type = type;
1195 new->builtin = FALSE;
1196
1197 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1198 abort ();
1199
1200 return new;
1201 }
1202
1203 /* Look for the .req directive. This is of the form:
1204
1205 new_register_name .req existing_register_name
1206
1207 If we find one, or if it looks sufficiently like one that we want to
1208 handle any error here, return TRUE. Otherwise return FALSE. */
1209
1210 static bfd_boolean
1211 create_register_alias (char *newname, char *p)
1212 {
1213 const reg_entry *old;
1214 char *oldname, *nbuf;
1215 size_t nlen;
1216
1217 /* The input scrubber ensures that whitespace after the mnemonic is
1218 collapsed to single spaces. */
1219 oldname = p;
1220 if (strncmp (oldname, " .req ", 6) != 0)
1221 return FALSE;
1222
1223 oldname += 6;
1224 if (*oldname == '\0')
1225 return FALSE;
1226
1227 old = hash_find (aarch64_reg_hsh, oldname);
1228 if (!old)
1229 {
1230 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1231 return TRUE;
1232 }
1233
1234 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1235 the desired alias name, and p points to its end. If not, then
1236 the desired alias name is in the global original_case_string. */
1237 #ifdef TC_CASE_SENSITIVE
1238 nlen = p - newname;
1239 #else
1240 newname = original_case_string;
1241 nlen = strlen (newname);
1242 #endif
1243
1244 nbuf = alloca (nlen + 1);
1245 memcpy (nbuf, newname, nlen);
1246 nbuf[nlen] = '\0';
1247
1248 /* Create aliases under the new name as stated; an all-lowercase
1249 version of the new name; and an all-uppercase version of the new
1250 name. */
1251 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1252 {
1253 for (p = nbuf; *p; p++)
1254 *p = TOUPPER (*p);
1255
1256 if (strncmp (nbuf, newname, nlen))
1257 {
1258 /* If this attempt to create an additional alias fails, do not bother
1259 trying to create the all-lower case alias. We will fail and issue
1260 a second, duplicate error message. This situation arises when the
1261 programmer does something like:
1262 foo .req r0
1263 Foo .req r1
1264 The second .req creates the "Foo" alias but then fails to create
1265 the artificial FOO alias because it has already been created by the
1266 first .req. */
1267 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1268 return TRUE;
1269 }
1270
1271 for (p = nbuf; *p; p++)
1272 *p = TOLOWER (*p);
1273
1274 if (strncmp (nbuf, newname, nlen))
1275 insert_reg_alias (nbuf, old->number, old->type);
1276 }
1277
1278 return TRUE;
1279 }
1280
1281 /* Should never be called, as .req goes between the alias and the
1282 register name, not at the beginning of the line. */
1283 static void
1284 s_req (int a ATTRIBUTE_UNUSED)
1285 {
1286 as_bad (_("invalid syntax for .req directive"));
1287 }
1288
1289 /* The .unreq directive deletes an alias which was previously defined
1290 by .req. For example:
1291
1292 my_alias .req r11
1293 .unreq my_alias */
1294
1295 static void
1296 s_unreq (int a ATTRIBUTE_UNUSED)
1297 {
1298 char *name;
1299 char saved_char;
1300
1301 name = input_line_pointer;
1302
1303 while (*input_line_pointer != 0
1304 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1305 ++input_line_pointer;
1306
1307 saved_char = *input_line_pointer;
1308 *input_line_pointer = 0;
1309
1310 if (!*name)
1311 as_bad (_("invalid syntax for .unreq directive"));
1312 else
1313 {
1314 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1315
1316 if (!reg)
1317 as_bad (_("unknown register alias '%s'"), name);
1318 else if (reg->builtin)
1319 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1320 name);
1321 else
1322 {
1323 char *p;
1324 char *nbuf;
1325
1326 hash_delete (aarch64_reg_hsh, name, FALSE);
1327 free ((char *) reg->name);
1328 free (reg);
1329
1330 /* Also locate the all upper case and all lower case versions.
1331 Do not complain if we cannot find one or the other as it
1332 was probably deleted above. */
1333
1334 nbuf = strdup (name);
1335 for (p = nbuf; *p; p++)
1336 *p = TOUPPER (*p);
1337 reg = hash_find (aarch64_reg_hsh, nbuf);
1338 if (reg)
1339 {
1340 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1341 free ((char *) reg->name);
1342 free (reg);
1343 }
1344
1345 for (p = nbuf; *p; p++)
1346 *p = TOLOWER (*p);
1347 reg = hash_find (aarch64_reg_hsh, nbuf);
1348 if (reg)
1349 {
1350 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1351 free ((char *) reg->name);
1352 free (reg);
1353 }
1354
1355 free (nbuf);
1356 }
1357 }
1358
1359 *input_line_pointer = saved_char;
1360 demand_empty_rest_of_line ();
1361 }
1362
1363 /* Directives: Instruction set selection. */
1364
1365 #ifdef OBJ_ELF
1366 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1367 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1368 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1369 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1370
1371 /* Create a new mapping symbol for the transition to STATE. */
1372
1373 static void
1374 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1375 {
1376 symbolS *symbolP;
1377 const char *symname;
1378 int type;
1379
1380 switch (state)
1381 {
1382 case MAP_DATA:
1383 symname = "$d";
1384 type = BSF_NO_FLAGS;
1385 break;
1386 case MAP_INSN:
1387 symname = "$x";
1388 type = BSF_NO_FLAGS;
1389 break;
1390 default:
1391 abort ();
1392 }
1393
1394 symbolP = symbol_new (symname, now_seg, value, frag);
1395 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1396
1397 /* Save the mapping symbols for future reference. Also check that
1398 we do not place two mapping symbols at the same offset within a
1399 frag. We'll handle overlap between frags in
1400 check_mapping_symbols.
1401
1402 If .fill or other data filling directive generates zero sized data,
1403 the mapping symbol for the following code will have the same value
1404 as the one generated for the data filling directive. In this case,
1405 we replace the old symbol with the new one at the same address. */
1406 if (value == 0)
1407 {
1408 if (frag->tc_frag_data.first_map != NULL)
1409 {
1410 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1411 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1412 &symbol_lastP);
1413 }
1414 frag->tc_frag_data.first_map = symbolP;
1415 }
1416 if (frag->tc_frag_data.last_map != NULL)
1417 {
1418 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1419 S_GET_VALUE (symbolP));
1420 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1421 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1422 &symbol_lastP);
1423 }
1424 frag->tc_frag_data.last_map = symbolP;
1425 }
1426
1427 /* We must sometimes convert a region marked as code to data during
1428 code alignment, if an odd number of bytes have to be padded. The
1429 code mapping symbol is pushed to an aligned address. */
1430
1431 static void
1432 insert_data_mapping_symbol (enum mstate state,
1433 valueT value, fragS * frag, offsetT bytes)
1434 {
1435 /* If there was already a mapping symbol, remove it. */
1436 if (frag->tc_frag_data.last_map != NULL
1437 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1438 frag->fr_address + value)
1439 {
1440 symbolS *symp = frag->tc_frag_data.last_map;
1441
1442 if (value == 0)
1443 {
1444 know (frag->tc_frag_data.first_map == symp);
1445 frag->tc_frag_data.first_map = NULL;
1446 }
1447 frag->tc_frag_data.last_map = NULL;
1448 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1449 }
1450
1451 make_mapping_symbol (MAP_DATA, value, frag);
1452 make_mapping_symbol (state, value + bytes, frag);
1453 }
1454
1455 static void mapping_state_2 (enum mstate state, int max_chars);
1456
1457 /* Set the mapping state to STATE. Only call this when about to
1458 emit some STATE bytes to the file. */
1459
1460 void
1461 mapping_state (enum mstate state)
1462 {
1463 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1464
1465 if (state == MAP_INSN)
1466 /* AArch64 instructions require 4-byte alignment. When emitting
1467 instructions into any section, record the appropriate section
1468 alignment. */
1469 record_alignment (now_seg, 2);
1470
1471 if (mapstate == state)
1472 /* The mapping symbol has already been emitted.
1473 There is nothing else to do. */
1474 return;
1475
1476 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1477 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1478 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1479 evaluated later in the next else. */
1480 return;
1481 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1482 {
1483 /* Only add the symbol if the offset is > 0:
1484 if we're at the first frag, check it's size > 0;
1485 if we're not at the first frag, then for sure
1486 the offset is > 0. */
1487 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1488 const int add_symbol = (frag_now != frag_first)
1489 || (frag_now_fix () > 0);
1490
1491 if (add_symbol)
1492 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1493 }
1494 #undef TRANSITION
1495
1496 mapping_state_2 (state, 0);
1497 }
1498
1499 /* Same as mapping_state, but MAX_CHARS bytes have already been
1500 allocated. Put the mapping symbol that far back. */
1501
1502 static void
1503 mapping_state_2 (enum mstate state, int max_chars)
1504 {
1505 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1506
1507 if (!SEG_NORMAL (now_seg))
1508 return;
1509
1510 if (mapstate == state)
1511 /* The mapping symbol has already been emitted.
1512 There is nothing else to do. */
1513 return;
1514
1515 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1516 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1517 }
1518 #else
1519 #define mapping_state(x) /* nothing */
1520 #define mapping_state_2(x, y) /* nothing */
1521 #endif
1522
1523 /* Directives: sectioning and alignment. */
1524
1525 static void
1526 s_bss (int ignore ATTRIBUTE_UNUSED)
1527 {
1528 /* We don't support putting frags in the BSS segment, we fake it by
1529 marking in_bss, then looking at s_skip for clues. */
1530 subseg_set (bss_section, 0);
1531 demand_empty_rest_of_line ();
1532 mapping_state (MAP_DATA);
1533 }
1534
1535 static void
1536 s_even (int ignore ATTRIBUTE_UNUSED)
1537 {
1538 /* Never make frag if expect extra pass. */
1539 if (!need_pass_2)
1540 frag_align (1, 0, 0);
1541
1542 record_alignment (now_seg, 1);
1543
1544 demand_empty_rest_of_line ();
1545 }
1546
1547 /* Directives: Literal pools. */
1548
1549 static literal_pool *
1550 find_literal_pool (int size)
1551 {
1552 literal_pool *pool;
1553
1554 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1555 {
1556 if (pool->section == now_seg
1557 && pool->sub_section == now_subseg && pool->size == size)
1558 break;
1559 }
1560
1561 return pool;
1562 }
1563
1564 static literal_pool *
1565 find_or_make_literal_pool (int size)
1566 {
1567 /* Next literal pool ID number. */
1568 static unsigned int latest_pool_num = 1;
1569 literal_pool *pool;
1570
1571 pool = find_literal_pool (size);
1572
1573 if (pool == NULL)
1574 {
1575 /* Create a new pool. */
1576 pool = xmalloc (sizeof (*pool));
1577 if (!pool)
1578 return NULL;
1579
1580 /* Currently we always put the literal pool in the current text
1581 section. If we were generating "small" model code where we
1582 knew that all code and initialised data was within 1MB then
1583 we could output literals to mergeable, read-only data
1584 sections. */
1585
1586 pool->next_free_entry = 0;
1587 pool->section = now_seg;
1588 pool->sub_section = now_subseg;
1589 pool->size = size;
1590 pool->next = list_of_pools;
1591 pool->symbol = NULL;
1592
1593 /* Add it to the list. */
1594 list_of_pools = pool;
1595 }
1596
1597 /* New pools, and emptied pools, will have a NULL symbol. */
1598 if (pool->symbol == NULL)
1599 {
1600 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1601 (valueT) 0, &zero_address_frag);
1602 pool->id = latest_pool_num++;
1603 }
1604
1605 /* Done. */
1606 return pool;
1607 }
1608
1609 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1610 Return TRUE on success, otherwise return FALSE. */
1611 static bfd_boolean
1612 add_to_lit_pool (expressionS *exp, int size)
1613 {
1614 literal_pool *pool;
1615 unsigned int entry;
1616
1617 pool = find_or_make_literal_pool (size);
1618
1619 /* Check if this literal value is already in the pool. */
1620 for (entry = 0; entry < pool->next_free_entry; entry++)
1621 {
1622 expressionS * litexp = & pool->literals[entry].exp;
1623
1624 if ((litexp->X_op == exp->X_op)
1625 && (exp->X_op == O_constant)
1626 && (litexp->X_add_number == exp->X_add_number)
1627 && (litexp->X_unsigned == exp->X_unsigned))
1628 break;
1629
1630 if ((litexp->X_op == exp->X_op)
1631 && (exp->X_op == O_symbol)
1632 && (litexp->X_add_number == exp->X_add_number)
1633 && (litexp->X_add_symbol == exp->X_add_symbol)
1634 && (litexp->X_op_symbol == exp->X_op_symbol))
1635 break;
1636 }
1637
1638 /* Do we need to create a new entry? */
1639 if (entry == pool->next_free_entry)
1640 {
1641 if (entry >= MAX_LITERAL_POOL_SIZE)
1642 {
1643 set_syntax_error (_("literal pool overflow"));
1644 return FALSE;
1645 }
1646
1647 pool->literals[entry].exp = *exp;
1648 pool->next_free_entry += 1;
1649 if (exp->X_op == O_big)
1650 {
1651 /* PR 16688: Bignums are held in a single global array. We must
1652 copy and preserve that value now, before it is overwritten. */
1653 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1654 memcpy (pool->literals[entry].bignum, generic_bignum,
1655 CHARS_PER_LITTLENUM * exp->X_add_number);
1656 }
1657 else
1658 pool->literals[entry].bignum = NULL;
1659 }
1660
1661 exp->X_op = O_symbol;
1662 exp->X_add_number = ((int) entry) * size;
1663 exp->X_add_symbol = pool->symbol;
1664
1665 return TRUE;
1666 }
1667
1668 /* Can't use symbol_new here, so have to create a symbol and then at
1669 a later date assign it a value. Thats what these functions do. */
1670
1671 static void
1672 symbol_locate (symbolS * symbolP,
1673 const char *name,/* It is copied, the caller can modify. */
1674 segT segment, /* Segment identifier (SEG_<something>). */
1675 valueT valu, /* Symbol value. */
1676 fragS * frag) /* Associated fragment. */
1677 {
1678 size_t name_length;
1679 char *preserved_copy_of_name;
1680
1681 name_length = strlen (name) + 1; /* +1 for \0. */
1682 obstack_grow (&notes, name, name_length);
1683 preserved_copy_of_name = obstack_finish (&notes);
1684
1685 #ifdef tc_canonicalize_symbol_name
1686 preserved_copy_of_name =
1687 tc_canonicalize_symbol_name (preserved_copy_of_name);
1688 #endif
1689
1690 S_SET_NAME (symbolP, preserved_copy_of_name);
1691
1692 S_SET_SEGMENT (symbolP, segment);
1693 S_SET_VALUE (symbolP, valu);
1694 symbol_clear_list_pointers (symbolP);
1695
1696 symbol_set_frag (symbolP, frag);
1697
1698 /* Link to end of symbol chain. */
1699 {
1700 extern int symbol_table_frozen;
1701
1702 if (symbol_table_frozen)
1703 abort ();
1704 }
1705
1706 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1707
1708 obj_symbol_new_hook (symbolP);
1709
1710 #ifdef tc_symbol_new_hook
1711 tc_symbol_new_hook (symbolP);
1712 #endif
1713
1714 #ifdef DEBUG_SYMS
1715 verify_symbol_chain (symbol_rootP, symbol_lastP);
1716 #endif /* DEBUG_SYMS */
1717 }
1718
1719
1720 static void
1721 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1722 {
1723 unsigned int entry;
1724 literal_pool *pool;
1725 char sym_name[20];
1726 int align;
1727
1728 for (align = 2; align <= 4; align++)
1729 {
1730 int size = 1 << align;
1731
1732 pool = find_literal_pool (size);
1733 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1734 continue;
1735
1736 mapping_state (MAP_DATA);
1737
1738 /* Align pool as you have word accesses.
1739 Only make a frag if we have to. */
1740 if (!need_pass_2)
1741 frag_align (align, 0, 0);
1742
1743 record_alignment (now_seg, align);
1744
1745 sprintf (sym_name, "$$lit_\002%x", pool->id);
1746
1747 symbol_locate (pool->symbol, sym_name, now_seg,
1748 (valueT) frag_now_fix (), frag_now);
1749 symbol_table_insert (pool->symbol);
1750
1751 for (entry = 0; entry < pool->next_free_entry; entry++)
1752 {
1753 expressionS * exp = & pool->literals[entry].exp;
1754
1755 if (exp->X_op == O_big)
1756 {
1757 /* PR 16688: Restore the global bignum value. */
1758 gas_assert (pool->literals[entry].bignum != NULL);
1759 memcpy (generic_bignum, pool->literals[entry].bignum,
1760 CHARS_PER_LITTLENUM * exp->X_add_number);
1761 }
1762
1763 /* First output the expression in the instruction to the pool. */
1764 emit_expr (exp, size); /* .word|.xword */
1765
1766 if (exp->X_op == O_big)
1767 {
1768 free (pool->literals[entry].bignum);
1769 pool->literals[entry].bignum = NULL;
1770 }
1771 }
1772
1773 /* Mark the pool as empty. */
1774 pool->next_free_entry = 0;
1775 pool->symbol = NULL;
1776 }
1777 }
1778
1779 #ifdef OBJ_ELF
1780 /* Forward declarations for functions below, in the MD interface
1781 section. */
1782 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1783 static struct reloc_table_entry * find_reloc_table_entry (char **);
1784
1785 /* Directives: Data. */
1786 /* N.B. the support for relocation suffix in this directive needs to be
1787 implemented properly. */
1788
1789 static void
1790 s_aarch64_elf_cons (int nbytes)
1791 {
1792 expressionS exp;
1793
1794 #ifdef md_flush_pending_output
1795 md_flush_pending_output ();
1796 #endif
1797
1798 if (is_it_end_of_statement ())
1799 {
1800 demand_empty_rest_of_line ();
1801 return;
1802 }
1803
1804 #ifdef md_cons_align
1805 md_cons_align (nbytes);
1806 #endif
1807
1808 mapping_state (MAP_DATA);
1809 do
1810 {
1811 struct reloc_table_entry *reloc;
1812
1813 expression (&exp);
1814
1815 if (exp.X_op != O_symbol)
1816 emit_expr (&exp, (unsigned int) nbytes);
1817 else
1818 {
1819 skip_past_char (&input_line_pointer, '#');
1820 if (skip_past_char (&input_line_pointer, ':'))
1821 {
1822 reloc = find_reloc_table_entry (&input_line_pointer);
1823 if (reloc == NULL)
1824 as_bad (_("unrecognized relocation suffix"));
1825 else
1826 as_bad (_("unimplemented relocation suffix"));
1827 ignore_rest_of_line ();
1828 return;
1829 }
1830 else
1831 emit_expr (&exp, (unsigned int) nbytes);
1832 }
1833 }
1834 while (*input_line_pointer++ == ',');
1835
1836 /* Put terminator back into stream. */
1837 input_line_pointer--;
1838 demand_empty_rest_of_line ();
1839 }
1840
1841 #endif /* OBJ_ELF */
1842
1843 /* Output a 32-bit word, but mark as an instruction. */
1844
1845 static void
1846 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1847 {
1848 expressionS exp;
1849
1850 #ifdef md_flush_pending_output
1851 md_flush_pending_output ();
1852 #endif
1853
1854 if (is_it_end_of_statement ())
1855 {
1856 demand_empty_rest_of_line ();
1857 return;
1858 }
1859
1860 /* Sections are assumed to start aligned. In executable section, there is no
1861 MAP_DATA symbol pending. So we only align the address during
1862 MAP_DATA --> MAP_INSN transition.
1863 For other sections, this is not guaranteed. */
1864 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1865 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1866 frag_align_code (2, 0);
1867
1868 #ifdef OBJ_ELF
1869 mapping_state (MAP_INSN);
1870 #endif
1871
1872 do
1873 {
1874 expression (&exp);
1875 if (exp.X_op != O_constant)
1876 {
1877 as_bad (_("constant expression required"));
1878 ignore_rest_of_line ();
1879 return;
1880 }
1881
1882 if (target_big_endian)
1883 {
1884 unsigned int val = exp.X_add_number;
1885 exp.X_add_number = SWAP_32 (val);
1886 }
1887 emit_expr (&exp, 4);
1888 }
1889 while (*input_line_pointer++ == ',');
1890
1891 /* Put terminator back into stream. */
1892 input_line_pointer--;
1893 demand_empty_rest_of_line ();
1894 }
1895
1896 #ifdef OBJ_ELF
1897 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1898
1899 static void
1900 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1901 {
1902 expressionS exp;
1903
1904 expression (&exp);
1905 frag_grow (4);
1906 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1907 BFD_RELOC_AARCH64_TLSDESC_ADD);
1908
1909 demand_empty_rest_of_line ();
1910 }
1911
1912 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1913
1914 static void
1915 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1916 {
1917 expressionS exp;
1918
1919 /* Since we're just labelling the code, there's no need to define a
1920 mapping symbol. */
1921 expression (&exp);
1922 /* Make sure there is enough room in this frag for the following
1923 blr. This trick only works if the blr follows immediately after
1924 the .tlsdesc directive. */
1925 frag_grow (4);
1926 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1927 BFD_RELOC_AARCH64_TLSDESC_CALL);
1928
1929 demand_empty_rest_of_line ();
1930 }
1931
1932 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
1933
1934 static void
1935 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
1936 {
1937 expressionS exp;
1938
1939 expression (&exp);
1940 frag_grow (4);
1941 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1942 BFD_RELOC_AARCH64_TLSDESC_LDR);
1943
1944 demand_empty_rest_of_line ();
1945 }
1946 #endif /* OBJ_ELF */
1947
1948 static void s_aarch64_arch (int);
1949 static void s_aarch64_cpu (int);
1950 static void s_aarch64_arch_extension (int);
1951
1952 /* This table describes all the machine specific pseudo-ops the assembler
1953 has to support. The fields are:
1954 pseudo-op name without dot
1955 function to call to execute this pseudo-op
1956 Integer arg to pass to the function. */
1957
1958 const pseudo_typeS md_pseudo_table[] = {
1959 /* Never called because '.req' does not start a line. */
1960 {"req", s_req, 0},
1961 {"unreq", s_unreq, 0},
1962 {"bss", s_bss, 0},
1963 {"even", s_even, 0},
1964 {"ltorg", s_ltorg, 0},
1965 {"pool", s_ltorg, 0},
1966 {"cpu", s_aarch64_cpu, 0},
1967 {"arch", s_aarch64_arch, 0},
1968 {"arch_extension", s_aarch64_arch_extension, 0},
1969 {"inst", s_aarch64_inst, 0},
1970 #ifdef OBJ_ELF
1971 {"tlsdescadd", s_tlsdescadd, 0},
1972 {"tlsdesccall", s_tlsdesccall, 0},
1973 {"tlsdescldr", s_tlsdescldr, 0},
1974 {"word", s_aarch64_elf_cons, 4},
1975 {"long", s_aarch64_elf_cons, 4},
1976 {"xword", s_aarch64_elf_cons, 8},
1977 {"dword", s_aarch64_elf_cons, 8},
1978 #endif
1979 {0, 0, 0}
1980 };
1981 \f
1982
1983 /* Check whether STR points to a register name followed by a comma or the
1984 end of line; REG_TYPE indicates which register types are checked
1985 against. Return TRUE if STR is such a register name; otherwise return
1986 FALSE. The function does not intend to produce any diagnostics, but since
1987 the register parser aarch64_reg_parse, which is called by this function,
1988 does produce diagnostics, we call clear_error to clear any diagnostics
1989 that may be generated by aarch64_reg_parse.
1990 Also, the function returns FALSE directly if there is any user error
1991 present at the function entry. This prevents the existing diagnostics
1992 state from being spoiled.
1993 The function currently serves parse_constant_immediate and
1994 parse_big_immediate only. */
1995 static bfd_boolean
1996 reg_name_p (char *str, aarch64_reg_type reg_type)
1997 {
1998 int reg;
1999
2000 /* Prevent the diagnostics state from being spoiled. */
2001 if (error_p ())
2002 return FALSE;
2003
2004 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2005
2006 /* Clear the parsing error that may be set by the reg parser. */
2007 clear_error ();
2008
2009 if (reg == PARSE_FAIL)
2010 return FALSE;
2011
2012 skip_whitespace (str);
2013 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2014 return TRUE;
2015
2016 return FALSE;
2017 }
2018
2019 /* Parser functions used exclusively in instruction operands. */
2020
2021 /* Parse an immediate expression which may not be constant.
2022
2023 To prevent the expression parser from pushing a register name
2024 into the symbol table as an undefined symbol, firstly a check is
2025 done to find out whether STR is a valid register name followed
2026 by a comma or the end of line. Return FALSE if STR is such a
2027 string. */
2028
2029 static bfd_boolean
2030 parse_immediate_expression (char **str, expressionS *exp)
2031 {
2032 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
2033 {
2034 set_recoverable_error (_("immediate operand required"));
2035 return FALSE;
2036 }
2037
2038 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2039
2040 if (exp->X_op == O_absent)
2041 {
2042 set_fatal_syntax_error (_("missing immediate expression"));
2043 return FALSE;
2044 }
2045
2046 return TRUE;
2047 }
2048
2049 /* Constant immediate-value read function for use in insn parsing.
2050 STR points to the beginning of the immediate (with the optional
2051 leading #); *VAL receives the value.
2052
2053 Return TRUE on success; otherwise return FALSE. */
2054
2055 static bfd_boolean
2056 parse_constant_immediate (char **str, int64_t * val)
2057 {
2058 expressionS exp;
2059
2060 if (! parse_immediate_expression (str, &exp))
2061 return FALSE;
2062
2063 if (exp.X_op != O_constant)
2064 {
2065 set_syntax_error (_("constant expression required"));
2066 return FALSE;
2067 }
2068
2069 *val = exp.X_add_number;
2070 return TRUE;
2071 }
2072
2073 static uint32_t
2074 encode_imm_float_bits (uint32_t imm)
2075 {
2076 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2077 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2078 }
2079
2080 /* Return TRUE if the single-precision floating-point value encoded in IMM
2081 can be expressed in the AArch64 8-bit signed floating-point format with
2082 3-bit exponent and normalized 4 bits of precision; in other words, the
2083 floating-point value must be expressable as
2084 (+/-) n / 16 * power (2, r)
2085 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2086
2087 static bfd_boolean
2088 aarch64_imm_float_p (uint32_t imm)
2089 {
2090 /* If a single-precision floating-point value has the following bit
2091 pattern, it can be expressed in the AArch64 8-bit floating-point
2092 format:
2093
2094 3 32222222 2221111111111
2095 1 09876543 21098765432109876543210
2096 n Eeeeeexx xxxx0000000000000000000
2097
2098 where n, e and each x are either 0 or 1 independently, with
2099 E == ~ e. */
2100
2101 uint32_t pattern;
2102
2103 /* Prepare the pattern for 'Eeeeee'. */
2104 if (((imm >> 30) & 0x1) == 0)
2105 pattern = 0x3e000000;
2106 else
2107 pattern = 0x40000000;
2108
2109 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2110 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2111 }
2112
2113 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2114
2115 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2116 8-bit signed floating-point format with 3-bit exponent and normalized 4
2117 bits of precision (i.e. can be used in an FMOV instruction); return the
2118 equivalent single-precision encoding in *FPWORD.
2119
2120 Otherwise return FALSE. */
2121
2122 static bfd_boolean
2123 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2124 {
2125 /* If a double-precision floating-point value has the following bit
2126 pattern, it can be expressed in the AArch64 8-bit floating-point
2127 format:
2128
2129 6 66655555555 554444444...21111111111
2130 3 21098765432 109876543...098765432109876543210
2131 n Eeeeeeeeexx xxxx00000...000000000000000000000
2132
2133 where n, e and each x are either 0 or 1 independently, with
2134 E == ~ e. */
2135
2136 uint32_t pattern;
2137 uint32_t high32 = imm >> 32;
2138
2139 /* Lower 32 bits need to be 0s. */
2140 if ((imm & 0xffffffff) != 0)
2141 return FALSE;
2142
2143 /* Prepare the pattern for 'Eeeeeeeee'. */
2144 if (((high32 >> 30) & 0x1) == 0)
2145 pattern = 0x3fc00000;
2146 else
2147 pattern = 0x40000000;
2148
2149 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2150 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2151 {
2152 /* Convert to the single-precision encoding.
2153 i.e. convert
2154 n Eeeeeeeeexx xxxx00000...000000000000000000000
2155 to
2156 n Eeeeeexx xxxx0000000000000000000. */
2157 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2158 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2159 return TRUE;
2160 }
2161 else
2162 return FALSE;
2163 }
2164
2165 /* Parse a floating-point immediate. Return TRUE on success and return the
2166 value in *IMMED in the format of IEEE754 single-precision encoding.
2167 *CCP points to the start of the string; DP_P is TRUE when the immediate
2168 is expected to be in double-precision (N.B. this only matters when
2169 hexadecimal representation is involved).
2170
2171 N.B. 0.0 is accepted by this function. */
2172
2173 static bfd_boolean
2174 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2175 {
2176 char *str = *ccp;
2177 char *fpnum;
2178 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2179 int found_fpchar = 0;
2180 int64_t val = 0;
2181 unsigned fpword = 0;
2182 bfd_boolean hex_p = FALSE;
2183
2184 skip_past_char (&str, '#');
2185
2186 fpnum = str;
2187 skip_whitespace (fpnum);
2188
2189 if (strncmp (fpnum, "0x", 2) == 0)
2190 {
2191 /* Support the hexadecimal representation of the IEEE754 encoding.
2192 Double-precision is expected when DP_P is TRUE, otherwise the
2193 representation should be in single-precision. */
2194 if (! parse_constant_immediate (&str, &val))
2195 goto invalid_fp;
2196
2197 if (dp_p)
2198 {
2199 if (! aarch64_double_precision_fmovable (val, &fpword))
2200 goto invalid_fp;
2201 }
2202 else if ((uint64_t) val > 0xffffffff)
2203 goto invalid_fp;
2204 else
2205 fpword = val;
2206
2207 hex_p = TRUE;
2208 }
2209 else
2210 {
2211 /* We must not accidentally parse an integer as a floating-point number.
2212 Make sure that the value we parse is not an integer by checking for
2213 special characters '.' or 'e'. */
2214 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2215 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2216 {
2217 found_fpchar = 1;
2218 break;
2219 }
2220
2221 if (!found_fpchar)
2222 return FALSE;
2223 }
2224
2225 if (! hex_p)
2226 {
2227 int i;
2228
2229 if ((str = atof_ieee (str, 's', words)) == NULL)
2230 goto invalid_fp;
2231
2232 /* Our FP word must be 32 bits (single-precision FP). */
2233 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2234 {
2235 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2236 fpword |= words[i];
2237 }
2238 }
2239
2240 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2241 {
2242 *immed = fpword;
2243 *ccp = str;
2244 return TRUE;
2245 }
2246
2247 invalid_fp:
2248 set_fatal_syntax_error (_("invalid floating-point constant"));
2249 return FALSE;
2250 }
2251
2252 /* Less-generic immediate-value read function with the possibility of loading
2253 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2254 instructions.
2255
2256 To prevent the expression parser from pushing a register name into the
2257 symbol table as an undefined symbol, a check is firstly done to find
2258 out whether STR is a valid register name followed by a comma or the end
2259 of line. Return FALSE if STR is such a register. */
2260
2261 static bfd_boolean
2262 parse_big_immediate (char **str, int64_t *imm)
2263 {
2264 char *ptr = *str;
2265
2266 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2267 {
2268 set_syntax_error (_("immediate operand required"));
2269 return FALSE;
2270 }
2271
2272 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2273
2274 if (inst.reloc.exp.X_op == O_constant)
2275 *imm = inst.reloc.exp.X_add_number;
2276
2277 *str = ptr;
2278
2279 return TRUE;
2280 }
2281
2282 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2283 if NEED_LIBOPCODES is non-zero, the fixup will need
2284 assistance from the libopcodes. */
2285
2286 static inline void
2287 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2288 const aarch64_opnd_info *operand,
2289 int need_libopcodes_p)
2290 {
2291 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2292 reloc->opnd = operand->type;
2293 if (need_libopcodes_p)
2294 reloc->need_libopcodes_p = 1;
2295 };
2296
2297 /* Return TRUE if the instruction needs to be fixed up later internally by
2298 the GAS; otherwise return FALSE. */
2299
2300 static inline bfd_boolean
2301 aarch64_gas_internal_fixup_p (void)
2302 {
2303 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2304 }
2305
2306 /* Assign the immediate value to the relavant field in *OPERAND if
2307 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2308 needs an internal fixup in a later stage.
2309 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2310 IMM.VALUE that may get assigned with the constant. */
2311 static inline void
2312 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2313 aarch64_opnd_info *operand,
2314 int addr_off_p,
2315 int need_libopcodes_p,
2316 int skip_p)
2317 {
2318 if (reloc->exp.X_op == O_constant)
2319 {
2320 if (addr_off_p)
2321 operand->addr.offset.imm = reloc->exp.X_add_number;
2322 else
2323 operand->imm.value = reloc->exp.X_add_number;
2324 reloc->type = BFD_RELOC_UNUSED;
2325 }
2326 else
2327 {
2328 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2329 /* Tell libopcodes to ignore this operand or not. This is helpful
2330 when one of the operands needs to be fixed up later but we need
2331 libopcodes to check the other operands. */
2332 operand->skip = skip_p;
2333 }
2334 }
2335
2336 /* Relocation modifiers. Each entry in the table contains the textual
2337 name for the relocation which may be placed before a symbol used as
2338 a load/store offset, or add immediate. It must be surrounded by a
2339 leading and trailing colon, for example:
2340
2341 ldr x0, [x1, #:rello:varsym]
2342 add x0, x1, #:rello:varsym */
2343
2344 struct reloc_table_entry
2345 {
2346 const char *name;
2347 int pc_rel;
2348 bfd_reloc_code_real_type adr_type;
2349 bfd_reloc_code_real_type adrp_type;
2350 bfd_reloc_code_real_type movw_type;
2351 bfd_reloc_code_real_type add_type;
2352 bfd_reloc_code_real_type ldst_type;
2353 bfd_reloc_code_real_type ld_literal_type;
2354 };
2355
2356 static struct reloc_table_entry reloc_table[] = {
2357 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2358 {"lo12", 0,
2359 0, /* adr_type */
2360 0,
2361 0,
2362 BFD_RELOC_AARCH64_ADD_LO12,
2363 BFD_RELOC_AARCH64_LDST_LO12,
2364 0},
2365
2366 /* Higher 21 bits of pc-relative page offset: ADRP */
2367 {"pg_hi21", 1,
2368 0, /* adr_type */
2369 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2370 0,
2371 0,
2372 0,
2373 0},
2374
2375 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2376 {"pg_hi21_nc", 1,
2377 0, /* adr_type */
2378 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2379 0,
2380 0,
2381 0,
2382 0},
2383
2384 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2385 {"abs_g0", 0,
2386 0, /* adr_type */
2387 0,
2388 BFD_RELOC_AARCH64_MOVW_G0,
2389 0,
2390 0,
2391 0},
2392
2393 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2394 {"abs_g0_s", 0,
2395 0, /* adr_type */
2396 0,
2397 BFD_RELOC_AARCH64_MOVW_G0_S,
2398 0,
2399 0,
2400 0},
2401
2402 /* Less significant bits 0-15 of address/value: MOVK, no check */
2403 {"abs_g0_nc", 0,
2404 0, /* adr_type */
2405 0,
2406 BFD_RELOC_AARCH64_MOVW_G0_NC,
2407 0,
2408 0,
2409 0},
2410
2411 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2412 {"abs_g1", 0,
2413 0, /* adr_type */
2414 0,
2415 BFD_RELOC_AARCH64_MOVW_G1,
2416 0,
2417 0,
2418 0},
2419
2420 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2421 {"abs_g1_s", 0,
2422 0, /* adr_type */
2423 0,
2424 BFD_RELOC_AARCH64_MOVW_G1_S,
2425 0,
2426 0,
2427 0},
2428
2429 /* Less significant bits 16-31 of address/value: MOVK, no check */
2430 {"abs_g1_nc", 0,
2431 0, /* adr_type */
2432 0,
2433 BFD_RELOC_AARCH64_MOVW_G1_NC,
2434 0,
2435 0,
2436 0},
2437
2438 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2439 {"abs_g2", 0,
2440 0, /* adr_type */
2441 0,
2442 BFD_RELOC_AARCH64_MOVW_G2,
2443 0,
2444 0,
2445 0},
2446
2447 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2448 {"abs_g2_s", 0,
2449 0, /* adr_type */
2450 0,
2451 BFD_RELOC_AARCH64_MOVW_G2_S,
2452 0,
2453 0,
2454 0},
2455
2456 /* Less significant bits 32-47 of address/value: MOVK, no check */
2457 {"abs_g2_nc", 0,
2458 0, /* adr_type */
2459 0,
2460 BFD_RELOC_AARCH64_MOVW_G2_NC,
2461 0,
2462 0,
2463 0},
2464
2465 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2466 {"abs_g3", 0,
2467 0, /* adr_type */
2468 0,
2469 BFD_RELOC_AARCH64_MOVW_G3,
2470 0,
2471 0,
2472 0},
2473
2474 /* Get to the page containing GOT entry for a symbol. */
2475 {"got", 1,
2476 0, /* adr_type */
2477 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2478 0,
2479 0,
2480 0,
2481 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2482
2483 /* 12 bit offset into the page containing GOT entry for that symbol. */
2484 {"got_lo12", 0,
2485 0, /* adr_type */
2486 0,
2487 0,
2488 0,
2489 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2490 0},
2491
2492 /* 0-15 bits of address/value: MOVk, no check. */
2493 {"gotoff_g0_nc", 0,
2494 0, /* adr_type */
2495 0,
2496 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2497 0,
2498 0,
2499 0},
2500
2501 /* Most significant bits 16-31 of address/value: MOVZ. */
2502 {"gotoff_g1", 0,
2503 0, /* adr_type */
2504 0,
2505 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2506 0,
2507 0,
2508 0},
2509
2510 /* 15 bit offset into the page containing GOT entry for that symbol. */
2511 {"gotoff_lo15", 0,
2512 0, /* adr_type */
2513 0,
2514 0,
2515 0,
2516 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2517 0},
2518
2519 /* Get to the page containing GOT TLS entry for a symbol */
2520 {"gottprel_g0_nc", 0,
2521 0, /* adr_type */
2522 0,
2523 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2524 0,
2525 0,
2526 0},
2527
2528 /* Get to the page containing GOT TLS entry for a symbol */
2529 {"gottprel_g1", 0,
2530 0, /* adr_type */
2531 0,
2532 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2533 0,
2534 0,
2535 0},
2536
2537 /* Get to the page containing GOT TLS entry for a symbol */
2538 {"tlsgd", 0,
2539 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2540 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2541 0,
2542 0,
2543 0,
2544 0},
2545
2546 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2547 {"tlsgd_lo12", 0,
2548 0, /* adr_type */
2549 0,
2550 0,
2551 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2552 0,
2553 0},
2554
2555 /* Lower 16 bits address/value: MOVk. */
2556 {"tlsgd_g0_nc", 0,
2557 0, /* adr_type */
2558 0,
2559 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2560 0,
2561 0,
2562 0},
2563
2564 /* Most significant bits 16-31 of address/value: MOVZ. */
2565 {"tlsgd_g1", 0,
2566 0, /* adr_type */
2567 0,
2568 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2569 0,
2570 0,
2571 0},
2572
2573 /* Get to the page containing GOT TLS entry for a symbol */
2574 {"tlsdesc", 0,
2575 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2576 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2577 0,
2578 0,
2579 0,
2580 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2581
2582 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2583 {"tlsdesc_lo12", 0,
2584 0, /* adr_type */
2585 0,
2586 0,
2587 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2588 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2589 0},
2590
2591 /* Get to the page containing GOT TLS entry for a symbol.
2592 The same as GD, we allocate two consecutive GOT slots
2593 for module index and module offset, the only difference
2594 with GD is the module offset should be intialized to
2595 zero without any outstanding runtime relocation. */
2596 {"tlsldm", 0,
2597 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2598 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2599 0,
2600 0,
2601 0,
2602 0},
2603
2604 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2605 {"tlsldm_lo12_nc", 0,
2606 0, /* adr_type */
2607 0,
2608 0,
2609 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2610 0,
2611 0},
2612
2613 /* 12 bit offset into the module TLS base address. */
2614 {"dtprel_lo12", 0,
2615 0, /* adr_type */
2616 0,
2617 0,
2618 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2619 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2620 0},
2621
2622 /* Same as dtprel_lo12, no overflow check. */
2623 {"dtprel_lo12_nc", 0,
2624 0, /* adr_type */
2625 0,
2626 0,
2627 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2628 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2629 0},
2630
2631 /* bits[23:12] of offset to the module TLS base address. */
2632 {"dtprel_hi12", 0,
2633 0, /* adr_type */
2634 0,
2635 0,
2636 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2637 0,
2638 0},
2639
2640 /* bits[15:0] of offset to the module TLS base address. */
2641 {"dtprel_g0", 0,
2642 0, /* adr_type */
2643 0,
2644 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2645 0,
2646 0,
2647 0},
2648
2649 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2650 {"dtprel_g0_nc", 0,
2651 0, /* adr_type */
2652 0,
2653 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2654 0,
2655 0,
2656 0},
2657
2658 /* bits[31:16] of offset to the module TLS base address. */
2659 {"dtprel_g1", 0,
2660 0, /* adr_type */
2661 0,
2662 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2663 0,
2664 0,
2665 0},
2666
2667 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2668 {"dtprel_g1_nc", 0,
2669 0, /* adr_type */
2670 0,
2671 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2672 0,
2673 0,
2674 0},
2675
2676 /* bits[47:32] of offset to the module TLS base address. */
2677 {"dtprel_g2", 0,
2678 0, /* adr_type */
2679 0,
2680 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2681 0,
2682 0,
2683 0},
2684
2685 /* Lower 16 bit offset into GOT entry for a symbol */
2686 {"tlsdesc_off_g0_nc", 0,
2687 0, /* adr_type */
2688 0,
2689 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2690 0,
2691 0,
2692 0},
2693
2694 /* Higher 16 bit offset into GOT entry for a symbol */
2695 {"tlsdesc_off_g1", 0,
2696 0, /* adr_type */
2697 0,
2698 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2699 0,
2700 0,
2701 0},
2702
2703 /* Get to the page containing GOT TLS entry for a symbol */
2704 {"gottprel", 0,
2705 0, /* adr_type */
2706 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2707 0,
2708 0,
2709 0,
2710 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2711
2712 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2713 {"gottprel_lo12", 0,
2714 0, /* adr_type */
2715 0,
2716 0,
2717 0,
2718 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2719 0},
2720
2721 /* Get tp offset for a symbol. */
2722 {"tprel", 0,
2723 0, /* adr_type */
2724 0,
2725 0,
2726 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2727 0,
2728 0},
2729
2730 /* Get tp offset for a symbol. */
2731 {"tprel_lo12", 0,
2732 0, /* adr_type */
2733 0,
2734 0,
2735 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2736 0,
2737 0},
2738
2739 /* Get tp offset for a symbol. */
2740 {"tprel_hi12", 0,
2741 0, /* adr_type */
2742 0,
2743 0,
2744 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2745 0,
2746 0},
2747
2748 /* Get tp offset for a symbol. */
2749 {"tprel_lo12_nc", 0,
2750 0, /* adr_type */
2751 0,
2752 0,
2753 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2754 0,
2755 0},
2756
2757 /* Most significant bits 32-47 of address/value: MOVZ. */
2758 {"tprel_g2", 0,
2759 0, /* adr_type */
2760 0,
2761 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2762 0,
2763 0,
2764 0},
2765
2766 /* Most significant bits 16-31 of address/value: MOVZ. */
2767 {"tprel_g1", 0,
2768 0, /* adr_type */
2769 0,
2770 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2771 0,
2772 0,
2773 0},
2774
2775 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2776 {"tprel_g1_nc", 0,
2777 0, /* adr_type */
2778 0,
2779 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2780 0,
2781 0,
2782 0},
2783
2784 /* Most significant bits 0-15 of address/value: MOVZ. */
2785 {"tprel_g0", 0,
2786 0, /* adr_type */
2787 0,
2788 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2789 0,
2790 0,
2791 0},
2792
2793 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2794 {"tprel_g0_nc", 0,
2795 0, /* adr_type */
2796 0,
2797 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2798 0,
2799 0,
2800 0},
2801
2802 /* 15bit offset from got entry to base address of GOT table. */
2803 {"gotpage_lo15", 0,
2804 0,
2805 0,
2806 0,
2807 0,
2808 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2809 0},
2810
2811 /* 14bit offset from got entry to base address of GOT table. */
2812 {"gotpage_lo14", 0,
2813 0,
2814 0,
2815 0,
2816 0,
2817 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2818 0},
2819 };
2820
2821 /* Given the address of a pointer pointing to the textual name of a
2822 relocation as may appear in assembler source, attempt to find its
2823 details in reloc_table. The pointer will be updated to the character
2824 after the trailing colon. On failure, NULL will be returned;
2825 otherwise return the reloc_table_entry. */
2826
2827 static struct reloc_table_entry *
2828 find_reloc_table_entry (char **str)
2829 {
2830 unsigned int i;
2831 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2832 {
2833 int length = strlen (reloc_table[i].name);
2834
2835 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2836 && (*str)[length] == ':')
2837 {
2838 *str += (length + 1);
2839 return &reloc_table[i];
2840 }
2841 }
2842
2843 return NULL;
2844 }
2845
2846 /* Mode argument to parse_shift and parser_shifter_operand. */
2847 enum parse_shift_mode
2848 {
2849 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2850 "#imm{,lsl #n}" */
2851 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2852 "#imm" */
2853 SHIFTED_LSL, /* bare "lsl #n" */
2854 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2855 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2856 };
2857
2858 /* Parse a <shift> operator on an AArch64 data processing instruction.
2859 Return TRUE on success; otherwise return FALSE. */
2860 static bfd_boolean
2861 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2862 {
2863 const struct aarch64_name_value_pair *shift_op;
2864 enum aarch64_modifier_kind kind;
2865 expressionS exp;
2866 int exp_has_prefix;
2867 char *s = *str;
2868 char *p = s;
2869
2870 for (p = *str; ISALPHA (*p); p++)
2871 ;
2872
2873 if (p == *str)
2874 {
2875 set_syntax_error (_("shift expression expected"));
2876 return FALSE;
2877 }
2878
2879 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2880
2881 if (shift_op == NULL)
2882 {
2883 set_syntax_error (_("shift operator expected"));
2884 return FALSE;
2885 }
2886
2887 kind = aarch64_get_operand_modifier (shift_op);
2888
2889 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2890 {
2891 set_syntax_error (_("invalid use of 'MSL'"));
2892 return FALSE;
2893 }
2894
2895 switch (mode)
2896 {
2897 case SHIFTED_LOGIC_IMM:
2898 if (aarch64_extend_operator_p (kind) == TRUE)
2899 {
2900 set_syntax_error (_("extending shift is not permitted"));
2901 return FALSE;
2902 }
2903 break;
2904
2905 case SHIFTED_ARITH_IMM:
2906 if (kind == AARCH64_MOD_ROR)
2907 {
2908 set_syntax_error (_("'ROR' shift is not permitted"));
2909 return FALSE;
2910 }
2911 break;
2912
2913 case SHIFTED_LSL:
2914 if (kind != AARCH64_MOD_LSL)
2915 {
2916 set_syntax_error (_("only 'LSL' shift is permitted"));
2917 return FALSE;
2918 }
2919 break;
2920
2921 case SHIFTED_REG_OFFSET:
2922 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2923 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2924 {
2925 set_fatal_syntax_error
2926 (_("invalid shift for the register offset addressing mode"));
2927 return FALSE;
2928 }
2929 break;
2930
2931 case SHIFTED_LSL_MSL:
2932 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2933 {
2934 set_syntax_error (_("invalid shift operator"));
2935 return FALSE;
2936 }
2937 break;
2938
2939 default:
2940 abort ();
2941 }
2942
2943 /* Whitespace can appear here if the next thing is a bare digit. */
2944 skip_whitespace (p);
2945
2946 /* Parse shift amount. */
2947 exp_has_prefix = 0;
2948 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2949 exp.X_op = O_absent;
2950 else
2951 {
2952 if (is_immediate_prefix (*p))
2953 {
2954 p++;
2955 exp_has_prefix = 1;
2956 }
2957 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2958 }
2959 if (exp.X_op == O_absent)
2960 {
2961 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2962 {
2963 set_syntax_error (_("missing shift amount"));
2964 return FALSE;
2965 }
2966 operand->shifter.amount = 0;
2967 }
2968 else if (exp.X_op != O_constant)
2969 {
2970 set_syntax_error (_("constant shift amount required"));
2971 return FALSE;
2972 }
2973 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2974 {
2975 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2976 return FALSE;
2977 }
2978 else
2979 {
2980 operand->shifter.amount = exp.X_add_number;
2981 operand->shifter.amount_present = 1;
2982 }
2983
2984 operand->shifter.operator_present = 1;
2985 operand->shifter.kind = kind;
2986
2987 *str = p;
2988 return TRUE;
2989 }
2990
2991 /* Parse a <shifter_operand> for a data processing instruction:
2992
2993 #<immediate>
2994 #<immediate>, LSL #imm
2995
2996 Validation of immediate operands is deferred to md_apply_fix.
2997
2998 Return TRUE on success; otherwise return FALSE. */
2999
3000 static bfd_boolean
3001 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3002 enum parse_shift_mode mode)
3003 {
3004 char *p;
3005
3006 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3007 return FALSE;
3008
3009 p = *str;
3010
3011 /* Accept an immediate expression. */
3012 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3013 return FALSE;
3014
3015 /* Accept optional LSL for arithmetic immediate values. */
3016 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3017 if (! parse_shift (&p, operand, SHIFTED_LSL))
3018 return FALSE;
3019
3020 /* Not accept any shifter for logical immediate values. */
3021 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3022 && parse_shift (&p, operand, mode))
3023 {
3024 set_syntax_error (_("unexpected shift operator"));
3025 return FALSE;
3026 }
3027
3028 *str = p;
3029 return TRUE;
3030 }
3031
3032 /* Parse a <shifter_operand> for a data processing instruction:
3033
3034 <Rm>
3035 <Rm>, <shift>
3036 #<immediate>
3037 #<immediate>, LSL #imm
3038
3039 where <shift> is handled by parse_shift above, and the last two
3040 cases are handled by the function above.
3041
3042 Validation of immediate operands is deferred to md_apply_fix.
3043
3044 Return TRUE on success; otherwise return FALSE. */
3045
3046 static bfd_boolean
3047 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3048 enum parse_shift_mode mode)
3049 {
3050 int reg;
3051 int isreg32, isregzero;
3052 enum aarch64_operand_class opd_class
3053 = aarch64_get_operand_class (operand->type);
3054
3055 if ((reg =
3056 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
3057 {
3058 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3059 {
3060 set_syntax_error (_("unexpected register in the immediate operand"));
3061 return FALSE;
3062 }
3063
3064 if (!isregzero && reg == REG_SP)
3065 {
3066 set_syntax_error (BAD_SP);
3067 return FALSE;
3068 }
3069
3070 operand->reg.regno = reg;
3071 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
3072
3073 /* Accept optional shift operation on register. */
3074 if (! skip_past_comma (str))
3075 return TRUE;
3076
3077 if (! parse_shift (str, operand, mode))
3078 return FALSE;
3079
3080 return TRUE;
3081 }
3082 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3083 {
3084 set_syntax_error
3085 (_("integer register expected in the extended/shifted operand "
3086 "register"));
3087 return FALSE;
3088 }
3089
3090 /* We have a shifted immediate variable. */
3091 return parse_shifter_operand_imm (str, operand, mode);
3092 }
3093
3094 /* Return TRUE on success; return FALSE otherwise. */
3095
3096 static bfd_boolean
3097 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3098 enum parse_shift_mode mode)
3099 {
3100 char *p = *str;
3101
3102 /* Determine if we have the sequence of characters #: or just :
3103 coming next. If we do, then we check for a :rello: relocation
3104 modifier. If we don't, punt the whole lot to
3105 parse_shifter_operand. */
3106
3107 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3108 {
3109 struct reloc_table_entry *entry;
3110
3111 if (p[0] == '#')
3112 p += 2;
3113 else
3114 p++;
3115 *str = p;
3116
3117 /* Try to parse a relocation. Anything else is an error. */
3118 if (!(entry = find_reloc_table_entry (str)))
3119 {
3120 set_syntax_error (_("unknown relocation modifier"));
3121 return FALSE;
3122 }
3123
3124 if (entry->add_type == 0)
3125 {
3126 set_syntax_error
3127 (_("this relocation modifier is not allowed on this instruction"));
3128 return FALSE;
3129 }
3130
3131 /* Save str before we decompose it. */
3132 p = *str;
3133
3134 /* Next, we parse the expression. */
3135 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3136 return FALSE;
3137
3138 /* Record the relocation type (use the ADD variant here). */
3139 inst.reloc.type = entry->add_type;
3140 inst.reloc.pc_rel = entry->pc_rel;
3141
3142 /* If str is empty, we've reached the end, stop here. */
3143 if (**str == '\0')
3144 return TRUE;
3145
3146 /* Otherwise, we have a shifted reloc modifier, so rewind to
3147 recover the variable name and continue parsing for the shifter. */
3148 *str = p;
3149 return parse_shifter_operand_imm (str, operand, mode);
3150 }
3151
3152 return parse_shifter_operand (str, operand, mode);
3153 }
3154
3155 /* Parse all forms of an address expression. Information is written
3156 to *OPERAND and/or inst.reloc.
3157
3158 The A64 instruction set has the following addressing modes:
3159
3160 Offset
3161 [base] // in SIMD ld/st structure
3162 [base{,#0}] // in ld/st exclusive
3163 [base{,#imm}]
3164 [base,Xm{,LSL #imm}]
3165 [base,Xm,SXTX {#imm}]
3166 [base,Wm,(S|U)XTW {#imm}]
3167 Pre-indexed
3168 [base,#imm]!
3169 Post-indexed
3170 [base],#imm
3171 [base],Xm // in SIMD ld/st structure
3172 PC-relative (literal)
3173 label
3174 =immediate
3175
3176 (As a convenience, the notation "=immediate" is permitted in conjunction
3177 with the pc-relative literal load instructions to automatically place an
3178 immediate value or symbolic address in a nearby literal pool and generate
3179 a hidden label which references it.)
3180
3181 Upon a successful parsing, the address structure in *OPERAND will be
3182 filled in the following way:
3183
3184 .base_regno = <base>
3185 .offset.is_reg // 1 if the offset is a register
3186 .offset.imm = <imm>
3187 .offset.regno = <Rm>
3188
3189 For different addressing modes defined in the A64 ISA:
3190
3191 Offset
3192 .pcrel=0; .preind=1; .postind=0; .writeback=0
3193 Pre-indexed
3194 .pcrel=0; .preind=1; .postind=0; .writeback=1
3195 Post-indexed
3196 .pcrel=0; .preind=0; .postind=1; .writeback=1
3197 PC-relative (literal)
3198 .pcrel=1; .preind=1; .postind=0; .writeback=0
3199
3200 The shift/extension information, if any, will be stored in .shifter.
3201
3202 It is the caller's responsibility to check for addressing modes not
3203 supported by the instruction, and to set inst.reloc.type. */
3204
3205 static bfd_boolean
3206 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
3207 int accept_reg_post_index)
3208 {
3209 char *p = *str;
3210 int reg;
3211 int isreg32, isregzero;
3212 expressionS *exp = &inst.reloc.exp;
3213
3214 if (! skip_past_char (&p, '['))
3215 {
3216 /* =immediate or label. */
3217 operand->addr.pcrel = 1;
3218 operand->addr.preind = 1;
3219
3220 /* #:<reloc_op>:<symbol> */
3221 skip_past_char (&p, '#');
3222 if (reloc && skip_past_char (&p, ':'))
3223 {
3224 bfd_reloc_code_real_type ty;
3225 struct reloc_table_entry *entry;
3226
3227 /* Try to parse a relocation modifier. Anything else is
3228 an error. */
3229 entry = find_reloc_table_entry (&p);
3230 if (! entry)
3231 {
3232 set_syntax_error (_("unknown relocation modifier"));
3233 return FALSE;
3234 }
3235
3236 switch (operand->type)
3237 {
3238 case AARCH64_OPND_ADDR_PCREL21:
3239 /* adr */
3240 ty = entry->adr_type;
3241 break;
3242
3243 default:
3244 ty = entry->ld_literal_type;
3245 break;
3246 }
3247
3248 if (ty == 0)
3249 {
3250 set_syntax_error
3251 (_("this relocation modifier is not allowed on this "
3252 "instruction"));
3253 return FALSE;
3254 }
3255
3256 /* #:<reloc_op>: */
3257 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3258 {
3259 set_syntax_error (_("invalid relocation expression"));
3260 return FALSE;
3261 }
3262
3263 /* #:<reloc_op>:<expr> */
3264 /* Record the relocation type. */
3265 inst.reloc.type = ty;
3266 inst.reloc.pc_rel = entry->pc_rel;
3267 }
3268 else
3269 {
3270
3271 if (skip_past_char (&p, '='))
3272 /* =immediate; need to generate the literal in the literal pool. */
3273 inst.gen_lit_pool = 1;
3274
3275 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3276 {
3277 set_syntax_error (_("invalid address"));
3278 return FALSE;
3279 }
3280 }
3281
3282 *str = p;
3283 return TRUE;
3284 }
3285
3286 /* [ */
3287
3288 /* Accept SP and reject ZR */
3289 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3290 if (reg == PARSE_FAIL || isreg32)
3291 {
3292 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3293 return FALSE;
3294 }
3295 operand->addr.base_regno = reg;
3296
3297 /* [Xn */
3298 if (skip_past_comma (&p))
3299 {
3300 /* [Xn, */
3301 operand->addr.preind = 1;
3302
3303 /* Reject SP and accept ZR */
3304 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3305 if (reg != PARSE_FAIL)
3306 {
3307 /* [Xn,Rm */
3308 operand->addr.offset.regno = reg;
3309 operand->addr.offset.is_reg = 1;
3310 /* Shifted index. */
3311 if (skip_past_comma (&p))
3312 {
3313 /* [Xn,Rm, */
3314 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3315 /* Use the diagnostics set in parse_shift, so not set new
3316 error message here. */
3317 return FALSE;
3318 }
3319 /* We only accept:
3320 [base,Xm{,LSL #imm}]
3321 [base,Xm,SXTX {#imm}]
3322 [base,Wm,(S|U)XTW {#imm}] */
3323 if (operand->shifter.kind == AARCH64_MOD_NONE
3324 || operand->shifter.kind == AARCH64_MOD_LSL
3325 || operand->shifter.kind == AARCH64_MOD_SXTX)
3326 {
3327 if (isreg32)
3328 {
3329 set_syntax_error (_("invalid use of 32-bit register offset"));
3330 return FALSE;
3331 }
3332 }
3333 else if (!isreg32)
3334 {
3335 set_syntax_error (_("invalid use of 64-bit register offset"));
3336 return FALSE;
3337 }
3338 }
3339 else
3340 {
3341 /* [Xn,#:<reloc_op>:<symbol> */
3342 skip_past_char (&p, '#');
3343 if (reloc && skip_past_char (&p, ':'))
3344 {
3345 struct reloc_table_entry *entry;
3346
3347 /* Try to parse a relocation modifier. Anything else is
3348 an error. */
3349 if (!(entry = find_reloc_table_entry (&p)))
3350 {
3351 set_syntax_error (_("unknown relocation modifier"));
3352 return FALSE;
3353 }
3354
3355 if (entry->ldst_type == 0)
3356 {
3357 set_syntax_error
3358 (_("this relocation modifier is not allowed on this "
3359 "instruction"));
3360 return FALSE;
3361 }
3362
3363 /* [Xn,#:<reloc_op>: */
3364 /* We now have the group relocation table entry corresponding to
3365 the name in the assembler source. Next, we parse the
3366 expression. */
3367 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3368 {
3369 set_syntax_error (_("invalid relocation expression"));
3370 return FALSE;
3371 }
3372
3373 /* [Xn,#:<reloc_op>:<expr> */
3374 /* Record the load/store relocation type. */
3375 inst.reloc.type = entry->ldst_type;
3376 inst.reloc.pc_rel = entry->pc_rel;
3377 }
3378 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3379 {
3380 set_syntax_error (_("invalid expression in the address"));
3381 return FALSE;
3382 }
3383 /* [Xn,<expr> */
3384 }
3385 }
3386
3387 if (! skip_past_char (&p, ']'))
3388 {
3389 set_syntax_error (_("']' expected"));
3390 return FALSE;
3391 }
3392
3393 if (skip_past_char (&p, '!'))
3394 {
3395 if (operand->addr.preind && operand->addr.offset.is_reg)
3396 {
3397 set_syntax_error (_("register offset not allowed in pre-indexed "
3398 "addressing mode"));
3399 return FALSE;
3400 }
3401 /* [Xn]! */
3402 operand->addr.writeback = 1;
3403 }
3404 else if (skip_past_comma (&p))
3405 {
3406 /* [Xn], */
3407 operand->addr.postind = 1;
3408 operand->addr.writeback = 1;
3409
3410 if (operand->addr.preind)
3411 {
3412 set_syntax_error (_("cannot combine pre- and post-indexing"));
3413 return FALSE;
3414 }
3415
3416 if (accept_reg_post_index
3417 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3418 &isregzero)) != PARSE_FAIL)
3419 {
3420 /* [Xn],Xm */
3421 if (isreg32)
3422 {
3423 set_syntax_error (_("invalid 32-bit register offset"));
3424 return FALSE;
3425 }
3426 operand->addr.offset.regno = reg;
3427 operand->addr.offset.is_reg = 1;
3428 }
3429 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3430 {
3431 /* [Xn],#expr */
3432 set_syntax_error (_("invalid expression in the address"));
3433 return FALSE;
3434 }
3435 }
3436
3437 /* If at this point neither .preind nor .postind is set, we have a
3438 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3439 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3440 {
3441 if (operand->addr.writeback)
3442 {
3443 /* Reject [Rn]! */
3444 set_syntax_error (_("missing offset in the pre-indexed address"));
3445 return FALSE;
3446 }
3447 operand->addr.preind = 1;
3448 inst.reloc.exp.X_op = O_constant;
3449 inst.reloc.exp.X_add_number = 0;
3450 }
3451
3452 *str = p;
3453 return TRUE;
3454 }
3455
3456 /* Return TRUE on success; otherwise return FALSE. */
3457 static bfd_boolean
3458 parse_address (char **str, aarch64_opnd_info *operand,
3459 int accept_reg_post_index)
3460 {
3461 return parse_address_main (str, operand, 0, accept_reg_post_index);
3462 }
3463
3464 /* Return TRUE on success; otherwise return FALSE. */
3465 static bfd_boolean
3466 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3467 {
3468 return parse_address_main (str, operand, 1, 0);
3469 }
3470
3471 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3472 Return TRUE on success; otherwise return FALSE. */
3473 static bfd_boolean
3474 parse_half (char **str, int *internal_fixup_p)
3475 {
3476 char *p = *str;
3477
3478 skip_past_char (&p, '#');
3479
3480 gas_assert (internal_fixup_p);
3481 *internal_fixup_p = 0;
3482
3483 if (*p == ':')
3484 {
3485 struct reloc_table_entry *entry;
3486
3487 /* Try to parse a relocation. Anything else is an error. */
3488 ++p;
3489 if (!(entry = find_reloc_table_entry (&p)))
3490 {
3491 set_syntax_error (_("unknown relocation modifier"));
3492 return FALSE;
3493 }
3494
3495 if (entry->movw_type == 0)
3496 {
3497 set_syntax_error
3498 (_("this relocation modifier is not allowed on this instruction"));
3499 return FALSE;
3500 }
3501
3502 inst.reloc.type = entry->movw_type;
3503 }
3504 else
3505 *internal_fixup_p = 1;
3506
3507 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3508 return FALSE;
3509
3510 *str = p;
3511 return TRUE;
3512 }
3513
3514 /* Parse an operand for an ADRP instruction:
3515 ADRP <Xd>, <label>
3516 Return TRUE on success; otherwise return FALSE. */
3517
3518 static bfd_boolean
3519 parse_adrp (char **str)
3520 {
3521 char *p;
3522
3523 p = *str;
3524 if (*p == ':')
3525 {
3526 struct reloc_table_entry *entry;
3527
3528 /* Try to parse a relocation. Anything else is an error. */
3529 ++p;
3530 if (!(entry = find_reloc_table_entry (&p)))
3531 {
3532 set_syntax_error (_("unknown relocation modifier"));
3533 return FALSE;
3534 }
3535
3536 if (entry->adrp_type == 0)
3537 {
3538 set_syntax_error
3539 (_("this relocation modifier is not allowed on this instruction"));
3540 return FALSE;
3541 }
3542
3543 inst.reloc.type = entry->adrp_type;
3544 }
3545 else
3546 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3547
3548 inst.reloc.pc_rel = 1;
3549
3550 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3551 return FALSE;
3552
3553 *str = p;
3554 return TRUE;
3555 }
3556
3557 /* Miscellaneous. */
3558
3559 /* Parse an option for a preload instruction. Returns the encoding for the
3560 option, or PARSE_FAIL. */
3561
3562 static int
3563 parse_pldop (char **str)
3564 {
3565 char *p, *q;
3566 const struct aarch64_name_value_pair *o;
3567
3568 p = q = *str;
3569 while (ISALNUM (*q))
3570 q++;
3571
3572 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3573 if (!o)
3574 return PARSE_FAIL;
3575
3576 *str = q;
3577 return o->value;
3578 }
3579
3580 /* Parse an option for a barrier instruction. Returns the encoding for the
3581 option, or PARSE_FAIL. */
3582
3583 static int
3584 parse_barrier (char **str)
3585 {
3586 char *p, *q;
3587 const asm_barrier_opt *o;
3588
3589 p = q = *str;
3590 while (ISALPHA (*q))
3591 q++;
3592
3593 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3594 if (!o)
3595 return PARSE_FAIL;
3596
3597 *str = q;
3598 return o->value;
3599 }
3600
3601 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3602 return 0 if successful. Otherwise return PARSE_FAIL. */
3603
3604 static int
3605 parse_barrier_psb (char **str,
3606 const struct aarch64_name_value_pair ** hint_opt)
3607 {
3608 char *p, *q;
3609 const struct aarch64_name_value_pair *o;
3610
3611 p = q = *str;
3612 while (ISALPHA (*q))
3613 q++;
3614
3615 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3616 if (!o)
3617 {
3618 set_fatal_syntax_error
3619 ( _("unknown or missing option to PSB"));
3620 return PARSE_FAIL;
3621 }
3622
3623 if (o->value != 0x11)
3624 {
3625 /* PSB only accepts option name 'CSYNC'. */
3626 set_syntax_error
3627 (_("the specified option is not accepted for PSB"));
3628 return PARSE_FAIL;
3629 }
3630
3631 *str = q;
3632 *hint_opt = o;
3633 return 0;
3634 }
3635
3636 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3637 Returns the encoding for the option, or PARSE_FAIL.
3638
3639 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3640 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3641
3642 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3643 field, otherwise as a system register.
3644 */
3645
3646 static int
3647 parse_sys_reg (char **str, struct hash_control *sys_regs,
3648 int imple_defined_p, int pstatefield_p)
3649 {
3650 char *p, *q;
3651 char buf[32];
3652 const aarch64_sys_reg *o;
3653 int value;
3654
3655 p = buf;
3656 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3657 if (p < buf + 31)
3658 *p++ = TOLOWER (*q);
3659 *p = '\0';
3660 /* Assert that BUF be large enough. */
3661 gas_assert (p - buf == q - *str);
3662
3663 o = hash_find (sys_regs, buf);
3664 if (!o)
3665 {
3666 if (!imple_defined_p)
3667 return PARSE_FAIL;
3668 else
3669 {
3670 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3671 unsigned int op0, op1, cn, cm, op2;
3672
3673 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3674 != 5)
3675 return PARSE_FAIL;
3676 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3677 return PARSE_FAIL;
3678 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3679 }
3680 }
3681 else
3682 {
3683 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3684 as_bad (_("selected processor does not support PSTATE field "
3685 "name '%s'"), buf);
3686 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3687 as_bad (_("selected processor does not support system register "
3688 "name '%s'"), buf);
3689 if (aarch64_sys_reg_deprecated_p (o))
3690 as_warn (_("system register name '%s' is deprecated and may be "
3691 "removed in a future release"), buf);
3692 value = o->value;
3693 }
3694
3695 *str = q;
3696 return value;
3697 }
3698
3699 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3700 for the option, or NULL. */
3701
3702 static const aarch64_sys_ins_reg *
3703 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3704 {
3705 char *p, *q;
3706 char buf[32];
3707 const aarch64_sys_ins_reg *o;
3708
3709 p = buf;
3710 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3711 if (p < buf + 31)
3712 *p++ = TOLOWER (*q);
3713 *p = '\0';
3714
3715 o = hash_find (sys_ins_regs, buf);
3716 if (!o)
3717 return NULL;
3718
3719 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3720 as_bad (_("selected processor does not support system register "
3721 "name '%s'"), buf);
3722
3723 *str = q;
3724 return o;
3725 }
3726 \f
3727 #define po_char_or_fail(chr) do { \
3728 if (! skip_past_char (&str, chr)) \
3729 goto failure; \
3730 } while (0)
3731
3732 #define po_reg_or_fail(regtype) do { \
3733 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3734 if (val == PARSE_FAIL) \
3735 { \
3736 set_default_error (); \
3737 goto failure; \
3738 } \
3739 } while (0)
3740
3741 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3742 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3743 &isreg32, &isregzero); \
3744 if (val == PARSE_FAIL) \
3745 { \
3746 set_default_error (); \
3747 goto failure; \
3748 } \
3749 info->reg.regno = val; \
3750 if (isreg32) \
3751 info->qualifier = AARCH64_OPND_QLF_W; \
3752 else \
3753 info->qualifier = AARCH64_OPND_QLF_X; \
3754 } while (0)
3755
3756 #define po_imm_nc_or_fail() do { \
3757 if (! parse_constant_immediate (&str, &val)) \
3758 goto failure; \
3759 } while (0)
3760
3761 #define po_imm_or_fail(min, max) do { \
3762 if (! parse_constant_immediate (&str, &val)) \
3763 goto failure; \
3764 if (val < min || val > max) \
3765 { \
3766 set_fatal_syntax_error (_("immediate value out of range "\
3767 #min " to "#max)); \
3768 goto failure; \
3769 } \
3770 } while (0)
3771
3772 #define po_misc_or_fail(expr) do { \
3773 if (!expr) \
3774 goto failure; \
3775 } while (0)
3776 \f
3777 /* encode the 12-bit imm field of Add/sub immediate */
3778 static inline uint32_t
3779 encode_addsub_imm (uint32_t imm)
3780 {
3781 return imm << 10;
3782 }
3783
3784 /* encode the shift amount field of Add/sub immediate */
3785 static inline uint32_t
3786 encode_addsub_imm_shift_amount (uint32_t cnt)
3787 {
3788 return cnt << 22;
3789 }
3790
3791
3792 /* encode the imm field of Adr instruction */
3793 static inline uint32_t
3794 encode_adr_imm (uint32_t imm)
3795 {
3796 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3797 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3798 }
3799
3800 /* encode the immediate field of Move wide immediate */
3801 static inline uint32_t
3802 encode_movw_imm (uint32_t imm)
3803 {
3804 return imm << 5;
3805 }
3806
3807 /* encode the 26-bit offset of unconditional branch */
3808 static inline uint32_t
3809 encode_branch_ofs_26 (uint32_t ofs)
3810 {
3811 return ofs & ((1 << 26) - 1);
3812 }
3813
3814 /* encode the 19-bit offset of conditional branch and compare & branch */
3815 static inline uint32_t
3816 encode_cond_branch_ofs_19 (uint32_t ofs)
3817 {
3818 return (ofs & ((1 << 19) - 1)) << 5;
3819 }
3820
3821 /* encode the 19-bit offset of ld literal */
3822 static inline uint32_t
3823 encode_ld_lit_ofs_19 (uint32_t ofs)
3824 {
3825 return (ofs & ((1 << 19) - 1)) << 5;
3826 }
3827
3828 /* Encode the 14-bit offset of test & branch. */
3829 static inline uint32_t
3830 encode_tst_branch_ofs_14 (uint32_t ofs)
3831 {
3832 return (ofs & ((1 << 14) - 1)) << 5;
3833 }
3834
3835 /* Encode the 16-bit imm field of svc/hvc/smc. */
3836 static inline uint32_t
3837 encode_svc_imm (uint32_t imm)
3838 {
3839 return imm << 5;
3840 }
3841
3842 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3843 static inline uint32_t
3844 reencode_addsub_switch_add_sub (uint32_t opcode)
3845 {
3846 return opcode ^ (1 << 30);
3847 }
3848
3849 static inline uint32_t
3850 reencode_movzn_to_movz (uint32_t opcode)
3851 {
3852 return opcode | (1 << 30);
3853 }
3854
3855 static inline uint32_t
3856 reencode_movzn_to_movn (uint32_t opcode)
3857 {
3858 return opcode & ~(1 << 30);
3859 }
3860
3861 /* Overall per-instruction processing. */
3862
3863 /* We need to be able to fix up arbitrary expressions in some statements.
3864 This is so that we can handle symbols that are an arbitrary distance from
3865 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3866 which returns part of an address in a form which will be valid for
3867 a data instruction. We do this by pushing the expression into a symbol
3868 in the expr_section, and creating a fix for that. */
3869
3870 static fixS *
3871 fix_new_aarch64 (fragS * frag,
3872 int where,
3873 short int size, expressionS * exp, int pc_rel, int reloc)
3874 {
3875 fixS *new_fix;
3876
3877 switch (exp->X_op)
3878 {
3879 case O_constant:
3880 case O_symbol:
3881 case O_add:
3882 case O_subtract:
3883 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3884 break;
3885
3886 default:
3887 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3888 pc_rel, reloc);
3889 break;
3890 }
3891 return new_fix;
3892 }
3893 \f
3894 /* Diagnostics on operands errors. */
3895
3896 /* By default, output verbose error message.
3897 Disable the verbose error message by -mno-verbose-error. */
3898 static int verbose_error_p = 1;
3899
3900 #ifdef DEBUG_AARCH64
3901 /* N.B. this is only for the purpose of debugging. */
3902 const char* operand_mismatch_kind_names[] =
3903 {
3904 "AARCH64_OPDE_NIL",
3905 "AARCH64_OPDE_RECOVERABLE",
3906 "AARCH64_OPDE_SYNTAX_ERROR",
3907 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3908 "AARCH64_OPDE_INVALID_VARIANT",
3909 "AARCH64_OPDE_OUT_OF_RANGE",
3910 "AARCH64_OPDE_UNALIGNED",
3911 "AARCH64_OPDE_REG_LIST",
3912 "AARCH64_OPDE_OTHER_ERROR",
3913 };
3914 #endif /* DEBUG_AARCH64 */
3915
3916 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3917
3918 When multiple errors of different kinds are found in the same assembly
3919 line, only the error of the highest severity will be picked up for
3920 issuing the diagnostics. */
3921
3922 static inline bfd_boolean
3923 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3924 enum aarch64_operand_error_kind rhs)
3925 {
3926 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3927 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3928 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3929 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3930 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3931 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3932 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3933 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3934 return lhs > rhs;
3935 }
3936
3937 /* Helper routine to get the mnemonic name from the assembly instruction
3938 line; should only be called for the diagnosis purpose, as there is
3939 string copy operation involved, which may affect the runtime
3940 performance if used in elsewhere. */
3941
3942 static const char*
3943 get_mnemonic_name (const char *str)
3944 {
3945 static char mnemonic[32];
3946 char *ptr;
3947
3948 /* Get the first 15 bytes and assume that the full name is included. */
3949 strncpy (mnemonic, str, 31);
3950 mnemonic[31] = '\0';
3951
3952 /* Scan up to the end of the mnemonic, which must end in white space,
3953 '.', or end of string. */
3954 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3955 ;
3956
3957 *ptr = '\0';
3958
3959 /* Append '...' to the truncated long name. */
3960 if (ptr - mnemonic == 31)
3961 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3962
3963 return mnemonic;
3964 }
3965
3966 static void
3967 reset_aarch64_instruction (aarch64_instruction *instruction)
3968 {
3969 memset (instruction, '\0', sizeof (aarch64_instruction));
3970 instruction->reloc.type = BFD_RELOC_UNUSED;
3971 }
3972
3973 /* Data strutures storing one user error in the assembly code related to
3974 operands. */
3975
3976 struct operand_error_record
3977 {
3978 const aarch64_opcode *opcode;
3979 aarch64_operand_error detail;
3980 struct operand_error_record *next;
3981 };
3982
3983 typedef struct operand_error_record operand_error_record;
3984
3985 struct operand_errors
3986 {
3987 operand_error_record *head;
3988 operand_error_record *tail;
3989 };
3990
3991 typedef struct operand_errors operand_errors;
3992
3993 /* Top-level data structure reporting user errors for the current line of
3994 the assembly code.
3995 The way md_assemble works is that all opcodes sharing the same mnemonic
3996 name are iterated to find a match to the assembly line. In this data
3997 structure, each of the such opcodes will have one operand_error_record
3998 allocated and inserted. In other words, excessive errors related with
3999 a single opcode are disregarded. */
4000 operand_errors operand_error_report;
4001
4002 /* Free record nodes. */
4003 static operand_error_record *free_opnd_error_record_nodes = NULL;
4004
4005 /* Initialize the data structure that stores the operand mismatch
4006 information on assembling one line of the assembly code. */
4007 static void
4008 init_operand_error_report (void)
4009 {
4010 if (operand_error_report.head != NULL)
4011 {
4012 gas_assert (operand_error_report.tail != NULL);
4013 operand_error_report.tail->next = free_opnd_error_record_nodes;
4014 free_opnd_error_record_nodes = operand_error_report.head;
4015 operand_error_report.head = NULL;
4016 operand_error_report.tail = NULL;
4017 return;
4018 }
4019 gas_assert (operand_error_report.tail == NULL);
4020 }
4021
4022 /* Return TRUE if some operand error has been recorded during the
4023 parsing of the current assembly line using the opcode *OPCODE;
4024 otherwise return FALSE. */
4025 static inline bfd_boolean
4026 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4027 {
4028 operand_error_record *record = operand_error_report.head;
4029 return record && record->opcode == opcode;
4030 }
4031
4032 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4033 OPCODE field is initialized with OPCODE.
4034 N.B. only one record for each opcode, i.e. the maximum of one error is
4035 recorded for each instruction template. */
4036
4037 static void
4038 add_operand_error_record (const operand_error_record* new_record)
4039 {
4040 const aarch64_opcode *opcode = new_record->opcode;
4041 operand_error_record* record = operand_error_report.head;
4042
4043 /* The record may have been created for this opcode. If not, we need
4044 to prepare one. */
4045 if (! opcode_has_operand_error_p (opcode))
4046 {
4047 /* Get one empty record. */
4048 if (free_opnd_error_record_nodes == NULL)
4049 {
4050 record = xmalloc (sizeof (operand_error_record));
4051 if (record == NULL)
4052 abort ();
4053 }
4054 else
4055 {
4056 record = free_opnd_error_record_nodes;
4057 free_opnd_error_record_nodes = record->next;
4058 }
4059 record->opcode = opcode;
4060 /* Insert at the head. */
4061 record->next = operand_error_report.head;
4062 operand_error_report.head = record;
4063 if (operand_error_report.tail == NULL)
4064 operand_error_report.tail = record;
4065 }
4066 else if (record->detail.kind != AARCH64_OPDE_NIL
4067 && record->detail.index <= new_record->detail.index
4068 && operand_error_higher_severity_p (record->detail.kind,
4069 new_record->detail.kind))
4070 {
4071 /* In the case of multiple errors found on operands related with a
4072 single opcode, only record the error of the leftmost operand and
4073 only if the error is of higher severity. */
4074 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4075 " the existing error %s on operand %d",
4076 operand_mismatch_kind_names[new_record->detail.kind],
4077 new_record->detail.index,
4078 operand_mismatch_kind_names[record->detail.kind],
4079 record->detail.index);
4080 return;
4081 }
4082
4083 record->detail = new_record->detail;
4084 }
4085
4086 static inline void
4087 record_operand_error_info (const aarch64_opcode *opcode,
4088 aarch64_operand_error *error_info)
4089 {
4090 operand_error_record record;
4091 record.opcode = opcode;
4092 record.detail = *error_info;
4093 add_operand_error_record (&record);
4094 }
4095
4096 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4097 error message *ERROR, for operand IDX (count from 0). */
4098
4099 static void
4100 record_operand_error (const aarch64_opcode *opcode, int idx,
4101 enum aarch64_operand_error_kind kind,
4102 const char* error)
4103 {
4104 aarch64_operand_error info;
4105 memset(&info, 0, sizeof (info));
4106 info.index = idx;
4107 info.kind = kind;
4108 info.error = error;
4109 record_operand_error_info (opcode, &info);
4110 }
4111
4112 static void
4113 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4114 enum aarch64_operand_error_kind kind,
4115 const char* error, const int *extra_data)
4116 {
4117 aarch64_operand_error info;
4118 info.index = idx;
4119 info.kind = kind;
4120 info.error = error;
4121 info.data[0] = extra_data[0];
4122 info.data[1] = extra_data[1];
4123 info.data[2] = extra_data[2];
4124 record_operand_error_info (opcode, &info);
4125 }
4126
4127 static void
4128 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4129 const char* error, int lower_bound,
4130 int upper_bound)
4131 {
4132 int data[3] = {lower_bound, upper_bound, 0};
4133 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4134 error, data);
4135 }
4136
4137 /* Remove the operand error record for *OPCODE. */
4138 static void ATTRIBUTE_UNUSED
4139 remove_operand_error_record (const aarch64_opcode *opcode)
4140 {
4141 if (opcode_has_operand_error_p (opcode))
4142 {
4143 operand_error_record* record = operand_error_report.head;
4144 gas_assert (record != NULL && operand_error_report.tail != NULL);
4145 operand_error_report.head = record->next;
4146 record->next = free_opnd_error_record_nodes;
4147 free_opnd_error_record_nodes = record;
4148 if (operand_error_report.head == NULL)
4149 {
4150 gas_assert (operand_error_report.tail == record);
4151 operand_error_report.tail = NULL;
4152 }
4153 }
4154 }
4155
4156 /* Given the instruction in *INSTR, return the index of the best matched
4157 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4158
4159 Return -1 if there is no qualifier sequence; return the first match
4160 if there is multiple matches found. */
4161
4162 static int
4163 find_best_match (const aarch64_inst *instr,
4164 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4165 {
4166 int i, num_opnds, max_num_matched, idx;
4167
4168 num_opnds = aarch64_num_of_operands (instr->opcode);
4169 if (num_opnds == 0)
4170 {
4171 DEBUG_TRACE ("no operand");
4172 return -1;
4173 }
4174
4175 max_num_matched = 0;
4176 idx = -1;
4177
4178 /* For each pattern. */
4179 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4180 {
4181 int j, num_matched;
4182 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4183
4184 /* Most opcodes has much fewer patterns in the list. */
4185 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4186 {
4187 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4188 if (i != 0 && idx == -1)
4189 /* If nothing has been matched, return the 1st sequence. */
4190 idx = 0;
4191 break;
4192 }
4193
4194 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4195 if (*qualifiers == instr->operands[j].qualifier)
4196 ++num_matched;
4197
4198 if (num_matched > max_num_matched)
4199 {
4200 max_num_matched = num_matched;
4201 idx = i;
4202 }
4203 }
4204
4205 DEBUG_TRACE ("return with %d", idx);
4206 return idx;
4207 }
4208
4209 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4210 corresponding operands in *INSTR. */
4211
4212 static inline void
4213 assign_qualifier_sequence (aarch64_inst *instr,
4214 const aarch64_opnd_qualifier_t *qualifiers)
4215 {
4216 int i = 0;
4217 int num_opnds = aarch64_num_of_operands (instr->opcode);
4218 gas_assert (num_opnds);
4219 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4220 instr->operands[i].qualifier = *qualifiers;
4221 }
4222
4223 /* Print operands for the diagnosis purpose. */
4224
4225 static void
4226 print_operands (char *buf, const aarch64_opcode *opcode,
4227 const aarch64_opnd_info *opnds)
4228 {
4229 int i;
4230
4231 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4232 {
4233 const size_t size = 128;
4234 char str[size];
4235
4236 /* We regard the opcode operand info more, however we also look into
4237 the inst->operands to support the disassembling of the optional
4238 operand.
4239 The two operand code should be the same in all cases, apart from
4240 when the operand can be optional. */
4241 if (opcode->operands[i] == AARCH64_OPND_NIL
4242 || opnds[i].type == AARCH64_OPND_NIL)
4243 break;
4244
4245 /* Generate the operand string in STR. */
4246 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
4247
4248 /* Delimiter. */
4249 if (str[0] != '\0')
4250 strcat (buf, i == 0 ? " " : ",");
4251
4252 /* Append the operand string. */
4253 strcat (buf, str);
4254 }
4255 }
4256
4257 /* Send to stderr a string as information. */
4258
4259 static void
4260 output_info (const char *format, ...)
4261 {
4262 const char *file;
4263 unsigned int line;
4264 va_list args;
4265
4266 file = as_where (&line);
4267 if (file)
4268 {
4269 if (line != 0)
4270 fprintf (stderr, "%s:%u: ", file, line);
4271 else
4272 fprintf (stderr, "%s: ", file);
4273 }
4274 fprintf (stderr, _("Info: "));
4275 va_start (args, format);
4276 vfprintf (stderr, format, args);
4277 va_end (args);
4278 (void) putc ('\n', stderr);
4279 }
4280
4281 /* Output one operand error record. */
4282
4283 static void
4284 output_operand_error_record (const operand_error_record *record, char *str)
4285 {
4286 const aarch64_operand_error *detail = &record->detail;
4287 int idx = detail->index;
4288 const aarch64_opcode *opcode = record->opcode;
4289 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4290 : AARCH64_OPND_NIL);
4291
4292 switch (detail->kind)
4293 {
4294 case AARCH64_OPDE_NIL:
4295 gas_assert (0);
4296 break;
4297
4298 case AARCH64_OPDE_SYNTAX_ERROR:
4299 case AARCH64_OPDE_RECOVERABLE:
4300 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4301 case AARCH64_OPDE_OTHER_ERROR:
4302 /* Use the prepared error message if there is, otherwise use the
4303 operand description string to describe the error. */
4304 if (detail->error != NULL)
4305 {
4306 if (idx < 0)
4307 as_bad (_("%s -- `%s'"), detail->error, str);
4308 else
4309 as_bad (_("%s at operand %d -- `%s'"),
4310 detail->error, idx + 1, str);
4311 }
4312 else
4313 {
4314 gas_assert (idx >= 0);
4315 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4316 aarch64_get_operand_desc (opd_code), str);
4317 }
4318 break;
4319
4320 case AARCH64_OPDE_INVALID_VARIANT:
4321 as_bad (_("operand mismatch -- `%s'"), str);
4322 if (verbose_error_p)
4323 {
4324 /* We will try to correct the erroneous instruction and also provide
4325 more information e.g. all other valid variants.
4326
4327 The string representation of the corrected instruction and other
4328 valid variants are generated by
4329
4330 1) obtaining the intermediate representation of the erroneous
4331 instruction;
4332 2) manipulating the IR, e.g. replacing the operand qualifier;
4333 3) printing out the instruction by calling the printer functions
4334 shared with the disassembler.
4335
4336 The limitation of this method is that the exact input assembly
4337 line cannot be accurately reproduced in some cases, for example an
4338 optional operand present in the actual assembly line will be
4339 omitted in the output; likewise for the optional syntax rules,
4340 e.g. the # before the immediate. Another limitation is that the
4341 assembly symbols and relocation operations in the assembly line
4342 currently cannot be printed out in the error report. Last but not
4343 least, when there is other error(s) co-exist with this error, the
4344 'corrected' instruction may be still incorrect, e.g. given
4345 'ldnp h0,h1,[x0,#6]!'
4346 this diagnosis will provide the version:
4347 'ldnp s0,s1,[x0,#6]!'
4348 which is still not right. */
4349 size_t len = strlen (get_mnemonic_name (str));
4350 int i, qlf_idx;
4351 bfd_boolean result;
4352 const size_t size = 2048;
4353 char buf[size];
4354 aarch64_inst *inst_base = &inst.base;
4355 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4356
4357 /* Init inst. */
4358 reset_aarch64_instruction (&inst);
4359 inst_base->opcode = opcode;
4360
4361 /* Reset the error report so that there is no side effect on the
4362 following operand parsing. */
4363 init_operand_error_report ();
4364
4365 /* Fill inst. */
4366 result = parse_operands (str + len, opcode)
4367 && programmer_friendly_fixup (&inst);
4368 gas_assert (result);
4369 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4370 NULL, NULL);
4371 gas_assert (!result);
4372
4373 /* Find the most matched qualifier sequence. */
4374 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4375 gas_assert (qlf_idx > -1);
4376
4377 /* Assign the qualifiers. */
4378 assign_qualifier_sequence (inst_base,
4379 opcode->qualifiers_list[qlf_idx]);
4380
4381 /* Print the hint. */
4382 output_info (_(" did you mean this?"));
4383 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4384 print_operands (buf, opcode, inst_base->operands);
4385 output_info (_(" %s"), buf);
4386
4387 /* Print out other variant(s) if there is any. */
4388 if (qlf_idx != 0 ||
4389 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4390 output_info (_(" other valid variant(s):"));
4391
4392 /* For each pattern. */
4393 qualifiers_list = opcode->qualifiers_list;
4394 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4395 {
4396 /* Most opcodes has much fewer patterns in the list.
4397 First NIL qualifier indicates the end in the list. */
4398 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4399 break;
4400
4401 if (i != qlf_idx)
4402 {
4403 /* Mnemonics name. */
4404 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4405
4406 /* Assign the qualifiers. */
4407 assign_qualifier_sequence (inst_base, *qualifiers_list);
4408
4409 /* Print instruction. */
4410 print_operands (buf, opcode, inst_base->operands);
4411
4412 output_info (_(" %s"), buf);
4413 }
4414 }
4415 }
4416 break;
4417
4418 case AARCH64_OPDE_OUT_OF_RANGE:
4419 if (detail->data[0] != detail->data[1])
4420 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4421 detail->error ? detail->error : _("immediate value"),
4422 detail->data[0], detail->data[1], idx + 1, str);
4423 else
4424 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4425 detail->error ? detail->error : _("immediate value"),
4426 detail->data[0], idx + 1, str);
4427 break;
4428
4429 case AARCH64_OPDE_REG_LIST:
4430 if (detail->data[0] == 1)
4431 as_bad (_("invalid number of registers in the list; "
4432 "only 1 register is expected at operand %d -- `%s'"),
4433 idx + 1, str);
4434 else
4435 as_bad (_("invalid number of registers in the list; "
4436 "%d registers are expected at operand %d -- `%s'"),
4437 detail->data[0], idx + 1, str);
4438 break;
4439
4440 case AARCH64_OPDE_UNALIGNED:
4441 as_bad (_("immediate value should be a multiple of "
4442 "%d at operand %d -- `%s'"),
4443 detail->data[0], idx + 1, str);
4444 break;
4445
4446 default:
4447 gas_assert (0);
4448 break;
4449 }
4450 }
4451
4452 /* Process and output the error message about the operand mismatching.
4453
4454 When this function is called, the operand error information had
4455 been collected for an assembly line and there will be multiple
4456 errors in the case of mulitple instruction templates; output the
4457 error message that most closely describes the problem. */
4458
4459 static void
4460 output_operand_error_report (char *str)
4461 {
4462 int largest_error_pos;
4463 const char *msg = NULL;
4464 enum aarch64_operand_error_kind kind;
4465 operand_error_record *curr;
4466 operand_error_record *head = operand_error_report.head;
4467 operand_error_record *record = NULL;
4468
4469 /* No error to report. */
4470 if (head == NULL)
4471 return;
4472
4473 gas_assert (head != NULL && operand_error_report.tail != NULL);
4474
4475 /* Only one error. */
4476 if (head == operand_error_report.tail)
4477 {
4478 DEBUG_TRACE ("single opcode entry with error kind: %s",
4479 operand_mismatch_kind_names[head->detail.kind]);
4480 output_operand_error_record (head, str);
4481 return;
4482 }
4483
4484 /* Find the error kind of the highest severity. */
4485 DEBUG_TRACE ("multiple opcode entres with error kind");
4486 kind = AARCH64_OPDE_NIL;
4487 for (curr = head; curr != NULL; curr = curr->next)
4488 {
4489 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4490 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4491 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4492 kind = curr->detail.kind;
4493 }
4494 gas_assert (kind != AARCH64_OPDE_NIL);
4495
4496 /* Pick up one of errors of KIND to report. */
4497 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4498 for (curr = head; curr != NULL; curr = curr->next)
4499 {
4500 if (curr->detail.kind != kind)
4501 continue;
4502 /* If there are multiple errors, pick up the one with the highest
4503 mismatching operand index. In the case of multiple errors with
4504 the equally highest operand index, pick up the first one or the
4505 first one with non-NULL error message. */
4506 if (curr->detail.index > largest_error_pos
4507 || (curr->detail.index == largest_error_pos && msg == NULL
4508 && curr->detail.error != NULL))
4509 {
4510 largest_error_pos = curr->detail.index;
4511 record = curr;
4512 msg = record->detail.error;
4513 }
4514 }
4515
4516 gas_assert (largest_error_pos != -2 && record != NULL);
4517 DEBUG_TRACE ("Pick up error kind %s to report",
4518 operand_mismatch_kind_names[record->detail.kind]);
4519
4520 /* Output. */
4521 output_operand_error_record (record, str);
4522 }
4523 \f
4524 /* Write an AARCH64 instruction to buf - always little-endian. */
4525 static void
4526 put_aarch64_insn (char *buf, uint32_t insn)
4527 {
4528 unsigned char *where = (unsigned char *) buf;
4529 where[0] = insn;
4530 where[1] = insn >> 8;
4531 where[2] = insn >> 16;
4532 where[3] = insn >> 24;
4533 }
4534
4535 static uint32_t
4536 get_aarch64_insn (char *buf)
4537 {
4538 unsigned char *where = (unsigned char *) buf;
4539 uint32_t result;
4540 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4541 return result;
4542 }
4543
4544 static void
4545 output_inst (struct aarch64_inst *new_inst)
4546 {
4547 char *to = NULL;
4548
4549 to = frag_more (INSN_SIZE);
4550
4551 frag_now->tc_frag_data.recorded = 1;
4552
4553 put_aarch64_insn (to, inst.base.value);
4554
4555 if (inst.reloc.type != BFD_RELOC_UNUSED)
4556 {
4557 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4558 INSN_SIZE, &inst.reloc.exp,
4559 inst.reloc.pc_rel,
4560 inst.reloc.type);
4561 DEBUG_TRACE ("Prepared relocation fix up");
4562 /* Don't check the addend value against the instruction size,
4563 that's the job of our code in md_apply_fix(). */
4564 fixp->fx_no_overflow = 1;
4565 if (new_inst != NULL)
4566 fixp->tc_fix_data.inst = new_inst;
4567 if (aarch64_gas_internal_fixup_p ())
4568 {
4569 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4570 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4571 fixp->fx_addnumber = inst.reloc.flags;
4572 }
4573 }
4574
4575 dwarf2_emit_insn (INSN_SIZE);
4576 }
4577
4578 /* Link together opcodes of the same name. */
4579
4580 struct templates
4581 {
4582 aarch64_opcode *opcode;
4583 struct templates *next;
4584 };
4585
4586 typedef struct templates templates;
4587
4588 static templates *
4589 lookup_mnemonic (const char *start, int len)
4590 {
4591 templates *templ = NULL;
4592
4593 templ = hash_find_n (aarch64_ops_hsh, start, len);
4594 return templ;
4595 }
4596
4597 /* Subroutine of md_assemble, responsible for looking up the primary
4598 opcode from the mnemonic the user wrote. STR points to the
4599 beginning of the mnemonic. */
4600
4601 static templates *
4602 opcode_lookup (char **str)
4603 {
4604 char *end, *base;
4605 const aarch64_cond *cond;
4606 char condname[16];
4607 int len;
4608
4609 /* Scan up to the end of the mnemonic, which must end in white space,
4610 '.', or end of string. */
4611 for (base = end = *str; is_part_of_name(*end); end++)
4612 if (*end == '.')
4613 break;
4614
4615 if (end == base)
4616 return 0;
4617
4618 inst.cond = COND_ALWAYS;
4619
4620 /* Handle a possible condition. */
4621 if (end[0] == '.')
4622 {
4623 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4624 if (cond)
4625 {
4626 inst.cond = cond->value;
4627 *str = end + 3;
4628 }
4629 else
4630 {
4631 *str = end;
4632 return 0;
4633 }
4634 }
4635 else
4636 *str = end;
4637
4638 len = end - base;
4639
4640 if (inst.cond == COND_ALWAYS)
4641 {
4642 /* Look for unaffixed mnemonic. */
4643 return lookup_mnemonic (base, len);
4644 }
4645 else if (len <= 13)
4646 {
4647 /* append ".c" to mnemonic if conditional */
4648 memcpy (condname, base, len);
4649 memcpy (condname + len, ".c", 2);
4650 base = condname;
4651 len += 2;
4652 return lookup_mnemonic (base, len);
4653 }
4654
4655 return NULL;
4656 }
4657
4658 /* Internal helper routine converting a vector neon_type_el structure
4659 *VECTYPE to a corresponding operand qualifier. */
4660
4661 static inline aarch64_opnd_qualifier_t
4662 vectype_to_qualifier (const struct neon_type_el *vectype)
4663 {
4664 /* Element size in bytes indexed by neon_el_type. */
4665 const unsigned char ele_size[5]
4666 = {1, 2, 4, 8, 16};
4667 const unsigned int ele_base [5] =
4668 {
4669 AARCH64_OPND_QLF_V_8B,
4670 AARCH64_OPND_QLF_V_2H,
4671 AARCH64_OPND_QLF_V_2S,
4672 AARCH64_OPND_QLF_V_1D,
4673 AARCH64_OPND_QLF_V_1Q
4674 };
4675
4676 if (!vectype->defined || vectype->type == NT_invtype)
4677 goto vectype_conversion_fail;
4678
4679 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4680
4681 if (vectype->defined & NTA_HASINDEX)
4682 /* Vector element register. */
4683 return AARCH64_OPND_QLF_S_B + vectype->type;
4684 else
4685 {
4686 /* Vector register. */
4687 int reg_size = ele_size[vectype->type] * vectype->width;
4688 unsigned offset;
4689 unsigned shift;
4690 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4691 goto vectype_conversion_fail;
4692
4693 /* The conversion is by calculating the offset from the base operand
4694 qualifier for the vector type. The operand qualifiers are regular
4695 enough that the offset can established by shifting the vector width by
4696 a vector-type dependent amount. */
4697 shift = 0;
4698 if (vectype->type == NT_b)
4699 shift = 4;
4700 else if (vectype->type == NT_h || vectype->type == NT_s)
4701 shift = 2;
4702 else if (vectype->type >= NT_d)
4703 shift = 1;
4704 else
4705 gas_assert (0);
4706
4707 offset = ele_base [vectype->type] + (vectype->width >> shift);
4708 gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4709 && offset <= AARCH64_OPND_QLF_V_1Q);
4710 return offset;
4711 }
4712
4713 vectype_conversion_fail:
4714 first_error (_("bad vector arrangement type"));
4715 return AARCH64_OPND_QLF_NIL;
4716 }
4717
4718 /* Process an optional operand that is found omitted from the assembly line.
4719 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4720 instruction's opcode entry while IDX is the index of this omitted operand.
4721 */
4722
4723 static void
4724 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4725 int idx, aarch64_opnd_info *operand)
4726 {
4727 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4728 gas_assert (optional_operand_p (opcode, idx));
4729 gas_assert (!operand->present);
4730
4731 switch (type)
4732 {
4733 case AARCH64_OPND_Rd:
4734 case AARCH64_OPND_Rn:
4735 case AARCH64_OPND_Rm:
4736 case AARCH64_OPND_Rt:
4737 case AARCH64_OPND_Rt2:
4738 case AARCH64_OPND_Rs:
4739 case AARCH64_OPND_Ra:
4740 case AARCH64_OPND_Rt_SYS:
4741 case AARCH64_OPND_Rd_SP:
4742 case AARCH64_OPND_Rn_SP:
4743 case AARCH64_OPND_Fd:
4744 case AARCH64_OPND_Fn:
4745 case AARCH64_OPND_Fm:
4746 case AARCH64_OPND_Fa:
4747 case AARCH64_OPND_Ft:
4748 case AARCH64_OPND_Ft2:
4749 case AARCH64_OPND_Sd:
4750 case AARCH64_OPND_Sn:
4751 case AARCH64_OPND_Sm:
4752 case AARCH64_OPND_Vd:
4753 case AARCH64_OPND_Vn:
4754 case AARCH64_OPND_Vm:
4755 case AARCH64_OPND_VdD1:
4756 case AARCH64_OPND_VnD1:
4757 operand->reg.regno = default_value;
4758 break;
4759
4760 case AARCH64_OPND_Ed:
4761 case AARCH64_OPND_En:
4762 case AARCH64_OPND_Em:
4763 operand->reglane.regno = default_value;
4764 break;
4765
4766 case AARCH64_OPND_IDX:
4767 case AARCH64_OPND_BIT_NUM:
4768 case AARCH64_OPND_IMMR:
4769 case AARCH64_OPND_IMMS:
4770 case AARCH64_OPND_SHLL_IMM:
4771 case AARCH64_OPND_IMM_VLSL:
4772 case AARCH64_OPND_IMM_VLSR:
4773 case AARCH64_OPND_CCMP_IMM:
4774 case AARCH64_OPND_FBITS:
4775 case AARCH64_OPND_UIMM4:
4776 case AARCH64_OPND_UIMM3_OP1:
4777 case AARCH64_OPND_UIMM3_OP2:
4778 case AARCH64_OPND_IMM:
4779 case AARCH64_OPND_WIDTH:
4780 case AARCH64_OPND_UIMM7:
4781 case AARCH64_OPND_NZCV:
4782 operand->imm.value = default_value;
4783 break;
4784
4785 case AARCH64_OPND_EXCEPTION:
4786 inst.reloc.type = BFD_RELOC_UNUSED;
4787 break;
4788
4789 case AARCH64_OPND_BARRIER_ISB:
4790 operand->barrier = aarch64_barrier_options + default_value;
4791
4792 default:
4793 break;
4794 }
4795 }
4796
4797 /* Process the relocation type for move wide instructions.
4798 Return TRUE on success; otherwise return FALSE. */
4799
4800 static bfd_boolean
4801 process_movw_reloc_info (void)
4802 {
4803 int is32;
4804 unsigned shift;
4805
4806 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4807
4808 if (inst.base.opcode->op == OP_MOVK)
4809 switch (inst.reloc.type)
4810 {
4811 case BFD_RELOC_AARCH64_MOVW_G0_S:
4812 case BFD_RELOC_AARCH64_MOVW_G1_S:
4813 case BFD_RELOC_AARCH64_MOVW_G2_S:
4814 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4815 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4816 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4817 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4818 set_syntax_error
4819 (_("the specified relocation type is not allowed for MOVK"));
4820 return FALSE;
4821 default:
4822 break;
4823 }
4824
4825 switch (inst.reloc.type)
4826 {
4827 case BFD_RELOC_AARCH64_MOVW_G0:
4828 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4829 case BFD_RELOC_AARCH64_MOVW_G0_S:
4830 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4831 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4832 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4833 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
4834 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
4835 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
4836 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4837 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4838 shift = 0;
4839 break;
4840 case BFD_RELOC_AARCH64_MOVW_G1:
4841 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4842 case BFD_RELOC_AARCH64_MOVW_G1_S:
4843 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4844 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4845 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4846 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
4847 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
4848 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
4849 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4850 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4851 shift = 16;
4852 break;
4853 case BFD_RELOC_AARCH64_MOVW_G2:
4854 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4855 case BFD_RELOC_AARCH64_MOVW_G2_S:
4856 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
4857 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4858 if (is32)
4859 {
4860 set_fatal_syntax_error
4861 (_("the specified relocation type is not allowed for 32-bit "
4862 "register"));
4863 return FALSE;
4864 }
4865 shift = 32;
4866 break;
4867 case BFD_RELOC_AARCH64_MOVW_G3:
4868 if (is32)
4869 {
4870 set_fatal_syntax_error
4871 (_("the specified relocation type is not allowed for 32-bit "
4872 "register"));
4873 return FALSE;
4874 }
4875 shift = 48;
4876 break;
4877 default:
4878 /* More cases should be added when more MOVW-related relocation types
4879 are supported in GAS. */
4880 gas_assert (aarch64_gas_internal_fixup_p ());
4881 /* The shift amount should have already been set by the parser. */
4882 return TRUE;
4883 }
4884 inst.base.operands[1].shifter.amount = shift;
4885 return TRUE;
4886 }
4887
4888 /* A primitive log caculator. */
4889
4890 static inline unsigned int
4891 get_logsz (unsigned int size)
4892 {
4893 const unsigned char ls[16] =
4894 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4895 if (size > 16)
4896 {
4897 gas_assert (0);
4898 return -1;
4899 }
4900 gas_assert (ls[size - 1] != (unsigned char)-1);
4901 return ls[size - 1];
4902 }
4903
4904 /* Determine and return the real reloc type code for an instruction
4905 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4906
4907 static inline bfd_reloc_code_real_type
4908 ldst_lo12_determine_real_reloc_type (void)
4909 {
4910 unsigned logsz;
4911 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4912 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4913
4914 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
4915 {
4916 BFD_RELOC_AARCH64_LDST8_LO12,
4917 BFD_RELOC_AARCH64_LDST16_LO12,
4918 BFD_RELOC_AARCH64_LDST32_LO12,
4919 BFD_RELOC_AARCH64_LDST64_LO12,
4920 BFD_RELOC_AARCH64_LDST128_LO12
4921 },
4922 {
4923 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
4924 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
4925 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
4926 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
4927 BFD_RELOC_AARCH64_NONE
4928 },
4929 {
4930 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
4931 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
4932 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
4933 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
4934 BFD_RELOC_AARCH64_NONE
4935 }
4936 };
4937
4938 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
4939 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4940 || (inst.reloc.type
4941 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
4942 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4943
4944 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4945 opd1_qlf =
4946 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4947 1, opd0_qlf, 0);
4948 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4949
4950 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4951 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4952 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
4953 gas_assert (logsz <= 3);
4954 else
4955 gas_assert (logsz <= 4);
4956
4957 /* In reloc.c, these pseudo relocation types should be defined in similar
4958 order as above reloc_ldst_lo12 array. Because the array index calcuation
4959 below relies on this. */
4960 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
4961 }
4962
4963 /* Check whether a register list REGINFO is valid. The registers must be
4964 numbered in increasing order (modulo 32), in increments of one or two.
4965
4966 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4967 increments of two.
4968
4969 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4970
4971 static bfd_boolean
4972 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4973 {
4974 uint32_t i, nb_regs, prev_regno, incr;
4975
4976 nb_regs = 1 + (reginfo & 0x3);
4977 reginfo >>= 2;
4978 prev_regno = reginfo & 0x1f;
4979 incr = accept_alternate ? 2 : 1;
4980
4981 for (i = 1; i < nb_regs; ++i)
4982 {
4983 uint32_t curr_regno;
4984 reginfo >>= 5;
4985 curr_regno = reginfo & 0x1f;
4986 if (curr_regno != ((prev_regno + incr) & 0x1f))
4987 return FALSE;
4988 prev_regno = curr_regno;
4989 }
4990
4991 return TRUE;
4992 }
4993
4994 /* Generic instruction operand parser. This does no encoding and no
4995 semantic validation; it merely squirrels values away in the inst
4996 structure. Returns TRUE or FALSE depending on whether the
4997 specified grammar matched. */
4998
4999 static bfd_boolean
5000 parse_operands (char *str, const aarch64_opcode *opcode)
5001 {
5002 int i;
5003 char *backtrack_pos = 0;
5004 const enum aarch64_opnd *operands = opcode->operands;
5005
5006 clear_error ();
5007 skip_whitespace (str);
5008
5009 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5010 {
5011 int64_t val;
5012 int isreg32, isregzero;
5013 int comma_skipped_p = 0;
5014 aarch64_reg_type rtype;
5015 struct neon_type_el vectype;
5016 aarch64_opnd_info *info = &inst.base.operands[i];
5017
5018 DEBUG_TRACE ("parse operand %d", i);
5019
5020 /* Assign the operand code. */
5021 info->type = operands[i];
5022
5023 if (optional_operand_p (opcode, i))
5024 {
5025 /* Remember where we are in case we need to backtrack. */
5026 gas_assert (!backtrack_pos);
5027 backtrack_pos = str;
5028 }
5029
5030 /* Expect comma between operands; the backtrack mechanizm will take
5031 care of cases of omitted optional operand. */
5032 if (i > 0 && ! skip_past_char (&str, ','))
5033 {
5034 set_syntax_error (_("comma expected between operands"));
5035 goto failure;
5036 }
5037 else
5038 comma_skipped_p = 1;
5039
5040 switch (operands[i])
5041 {
5042 case AARCH64_OPND_Rd:
5043 case AARCH64_OPND_Rn:
5044 case AARCH64_OPND_Rm:
5045 case AARCH64_OPND_Rt:
5046 case AARCH64_OPND_Rt2:
5047 case AARCH64_OPND_Rs:
5048 case AARCH64_OPND_Ra:
5049 case AARCH64_OPND_Rt_SYS:
5050 case AARCH64_OPND_PAIRREG:
5051 po_int_reg_or_fail (1, 0);
5052 break;
5053
5054 case AARCH64_OPND_Rd_SP:
5055 case AARCH64_OPND_Rn_SP:
5056 po_int_reg_or_fail (0, 1);
5057 break;
5058
5059 case AARCH64_OPND_Rm_EXT:
5060 case AARCH64_OPND_Rm_SFT:
5061 po_misc_or_fail (parse_shifter_operand
5062 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5063 ? SHIFTED_ARITH_IMM
5064 : SHIFTED_LOGIC_IMM)));
5065 if (!info->shifter.operator_present)
5066 {
5067 /* Default to LSL if not present. Libopcodes prefers shifter
5068 kind to be explicit. */
5069 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5070 info->shifter.kind = AARCH64_MOD_LSL;
5071 /* For Rm_EXT, libopcodes will carry out further check on whether
5072 or not stack pointer is used in the instruction (Recall that
5073 "the extend operator is not optional unless at least one of
5074 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5075 }
5076 break;
5077
5078 case AARCH64_OPND_Fd:
5079 case AARCH64_OPND_Fn:
5080 case AARCH64_OPND_Fm:
5081 case AARCH64_OPND_Fa:
5082 case AARCH64_OPND_Ft:
5083 case AARCH64_OPND_Ft2:
5084 case AARCH64_OPND_Sd:
5085 case AARCH64_OPND_Sn:
5086 case AARCH64_OPND_Sm:
5087 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5088 if (val == PARSE_FAIL)
5089 {
5090 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5091 goto failure;
5092 }
5093 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5094
5095 info->reg.regno = val;
5096 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5097 break;
5098
5099 case AARCH64_OPND_Vd:
5100 case AARCH64_OPND_Vn:
5101 case AARCH64_OPND_Vm:
5102 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5103 if (val == PARSE_FAIL)
5104 {
5105 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5106 goto failure;
5107 }
5108 if (vectype.defined & NTA_HASINDEX)
5109 goto failure;
5110
5111 info->reg.regno = val;
5112 info->qualifier = vectype_to_qualifier (&vectype);
5113 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5114 goto failure;
5115 break;
5116
5117 case AARCH64_OPND_VdD1:
5118 case AARCH64_OPND_VnD1:
5119 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5120 if (val == PARSE_FAIL)
5121 {
5122 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5123 goto failure;
5124 }
5125 if (vectype.type != NT_d || vectype.index != 1)
5126 {
5127 set_fatal_syntax_error
5128 (_("the top half of a 128-bit FP/SIMD register is expected"));
5129 goto failure;
5130 }
5131 info->reg.regno = val;
5132 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5133 here; it is correct for the purpose of encoding/decoding since
5134 only the register number is explicitly encoded in the related
5135 instructions, although this appears a bit hacky. */
5136 info->qualifier = AARCH64_OPND_QLF_S_D;
5137 break;
5138
5139 case AARCH64_OPND_Ed:
5140 case AARCH64_OPND_En:
5141 case AARCH64_OPND_Em:
5142 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5143 if (val == PARSE_FAIL)
5144 {
5145 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5146 goto failure;
5147 }
5148 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5149 goto failure;
5150
5151 info->reglane.regno = val;
5152 info->reglane.index = vectype.index;
5153 info->qualifier = vectype_to_qualifier (&vectype);
5154 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5155 goto failure;
5156 break;
5157
5158 case AARCH64_OPND_LVn:
5159 case AARCH64_OPND_LVt:
5160 case AARCH64_OPND_LVt_AL:
5161 case AARCH64_OPND_LEt:
5162 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
5163 goto failure;
5164 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5165 {
5166 set_fatal_syntax_error (_("invalid register list"));
5167 goto failure;
5168 }
5169 info->reglist.first_regno = (val >> 2) & 0x1f;
5170 info->reglist.num_regs = (val & 0x3) + 1;
5171 if (operands[i] == AARCH64_OPND_LEt)
5172 {
5173 if (!(vectype.defined & NTA_HASINDEX))
5174 goto failure;
5175 info->reglist.has_index = 1;
5176 info->reglist.index = vectype.index;
5177 }
5178 else if (!(vectype.defined & NTA_HASTYPE))
5179 goto failure;
5180 info->qualifier = vectype_to_qualifier (&vectype);
5181 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5182 goto failure;
5183 break;
5184
5185 case AARCH64_OPND_Cn:
5186 case AARCH64_OPND_Cm:
5187 po_reg_or_fail (REG_TYPE_CN);
5188 if (val > 15)
5189 {
5190 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5191 goto failure;
5192 }
5193 inst.base.operands[i].reg.regno = val;
5194 break;
5195
5196 case AARCH64_OPND_SHLL_IMM:
5197 case AARCH64_OPND_IMM_VLSR:
5198 po_imm_or_fail (1, 64);
5199 info->imm.value = val;
5200 break;
5201
5202 case AARCH64_OPND_CCMP_IMM:
5203 case AARCH64_OPND_FBITS:
5204 case AARCH64_OPND_UIMM4:
5205 case AARCH64_OPND_UIMM3_OP1:
5206 case AARCH64_OPND_UIMM3_OP2:
5207 case AARCH64_OPND_IMM_VLSL:
5208 case AARCH64_OPND_IMM:
5209 case AARCH64_OPND_WIDTH:
5210 po_imm_nc_or_fail ();
5211 info->imm.value = val;
5212 break;
5213
5214 case AARCH64_OPND_UIMM7:
5215 po_imm_or_fail (0, 127);
5216 info->imm.value = val;
5217 break;
5218
5219 case AARCH64_OPND_IDX:
5220 case AARCH64_OPND_BIT_NUM:
5221 case AARCH64_OPND_IMMR:
5222 case AARCH64_OPND_IMMS:
5223 po_imm_or_fail (0, 63);
5224 info->imm.value = val;
5225 break;
5226
5227 case AARCH64_OPND_IMM0:
5228 po_imm_nc_or_fail ();
5229 if (val != 0)
5230 {
5231 set_fatal_syntax_error (_("immediate zero expected"));
5232 goto failure;
5233 }
5234 info->imm.value = 0;
5235 break;
5236
5237 case AARCH64_OPND_FPIMM0:
5238 {
5239 int qfloat;
5240 bfd_boolean res1 = FALSE, res2 = FALSE;
5241 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5242 it is probably not worth the effort to support it. */
5243 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
5244 && !(res2 = parse_constant_immediate (&str, &val)))
5245 goto failure;
5246 if ((res1 && qfloat == 0) || (res2 && val == 0))
5247 {
5248 info->imm.value = 0;
5249 info->imm.is_fp = 1;
5250 break;
5251 }
5252 set_fatal_syntax_error (_("immediate zero expected"));
5253 goto failure;
5254 }
5255
5256 case AARCH64_OPND_IMM_MOV:
5257 {
5258 char *saved = str;
5259 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5260 reg_name_p (str, REG_TYPE_VN))
5261 goto failure;
5262 str = saved;
5263 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5264 GE_OPT_PREFIX, 1));
5265 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5266 later. fix_mov_imm_insn will try to determine a machine
5267 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5268 message if the immediate cannot be moved by a single
5269 instruction. */
5270 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5271 inst.base.operands[i].skip = 1;
5272 }
5273 break;
5274
5275 case AARCH64_OPND_SIMD_IMM:
5276 case AARCH64_OPND_SIMD_IMM_SFT:
5277 if (! parse_big_immediate (&str, &val))
5278 goto failure;
5279 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5280 /* addr_off_p */ 0,
5281 /* need_libopcodes_p */ 1,
5282 /* skip_p */ 1);
5283 /* Parse shift.
5284 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5285 shift, we don't check it here; we leave the checking to
5286 the libopcodes (operand_general_constraint_met_p). By
5287 doing this, we achieve better diagnostics. */
5288 if (skip_past_comma (&str)
5289 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5290 goto failure;
5291 if (!info->shifter.operator_present
5292 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5293 {
5294 /* Default to LSL if not present. Libopcodes prefers shifter
5295 kind to be explicit. */
5296 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5297 info->shifter.kind = AARCH64_MOD_LSL;
5298 }
5299 break;
5300
5301 case AARCH64_OPND_FPIMM:
5302 case AARCH64_OPND_SIMD_FPIMM:
5303 {
5304 int qfloat;
5305 bfd_boolean dp_p
5306 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5307 == 8);
5308 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
5309 goto failure;
5310 if (qfloat == 0)
5311 {
5312 set_fatal_syntax_error (_("invalid floating-point constant"));
5313 goto failure;
5314 }
5315 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5316 inst.base.operands[i].imm.is_fp = 1;
5317 }
5318 break;
5319
5320 case AARCH64_OPND_LIMM:
5321 po_misc_or_fail (parse_shifter_operand (&str, info,
5322 SHIFTED_LOGIC_IMM));
5323 if (info->shifter.operator_present)
5324 {
5325 set_fatal_syntax_error
5326 (_("shift not allowed for bitmask immediate"));
5327 goto failure;
5328 }
5329 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5330 /* addr_off_p */ 0,
5331 /* need_libopcodes_p */ 1,
5332 /* skip_p */ 1);
5333 break;
5334
5335 case AARCH64_OPND_AIMM:
5336 if (opcode->op == OP_ADD)
5337 /* ADD may have relocation types. */
5338 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5339 SHIFTED_ARITH_IMM));
5340 else
5341 po_misc_or_fail (parse_shifter_operand (&str, info,
5342 SHIFTED_ARITH_IMM));
5343 switch (inst.reloc.type)
5344 {
5345 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5346 info->shifter.amount = 12;
5347 break;
5348 case BFD_RELOC_UNUSED:
5349 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5350 if (info->shifter.kind != AARCH64_MOD_NONE)
5351 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5352 inst.reloc.pc_rel = 0;
5353 break;
5354 default:
5355 break;
5356 }
5357 info->imm.value = 0;
5358 if (!info->shifter.operator_present)
5359 {
5360 /* Default to LSL if not present. Libopcodes prefers shifter
5361 kind to be explicit. */
5362 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5363 info->shifter.kind = AARCH64_MOD_LSL;
5364 }
5365 break;
5366
5367 case AARCH64_OPND_HALF:
5368 {
5369 /* #<imm16> or relocation. */
5370 int internal_fixup_p;
5371 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5372 if (internal_fixup_p)
5373 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5374 skip_whitespace (str);
5375 if (skip_past_comma (&str))
5376 {
5377 /* {, LSL #<shift>} */
5378 if (! aarch64_gas_internal_fixup_p ())
5379 {
5380 set_fatal_syntax_error (_("can't mix relocation modifier "
5381 "with explicit shift"));
5382 goto failure;
5383 }
5384 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5385 }
5386 else
5387 inst.base.operands[i].shifter.amount = 0;
5388 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5389 inst.base.operands[i].imm.value = 0;
5390 if (! process_movw_reloc_info ())
5391 goto failure;
5392 }
5393 break;
5394
5395 case AARCH64_OPND_EXCEPTION:
5396 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5397 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5398 /* addr_off_p */ 0,
5399 /* need_libopcodes_p */ 0,
5400 /* skip_p */ 1);
5401 break;
5402
5403 case AARCH64_OPND_NZCV:
5404 {
5405 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5406 if (nzcv != NULL)
5407 {
5408 str += 4;
5409 info->imm.value = nzcv->value;
5410 break;
5411 }
5412 po_imm_or_fail (0, 15);
5413 info->imm.value = val;
5414 }
5415 break;
5416
5417 case AARCH64_OPND_COND:
5418 case AARCH64_OPND_COND1:
5419 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5420 str += 2;
5421 if (info->cond == NULL)
5422 {
5423 set_syntax_error (_("invalid condition"));
5424 goto failure;
5425 }
5426 else if (operands[i] == AARCH64_OPND_COND1
5427 && (info->cond->value & 0xe) == 0xe)
5428 {
5429 /* Not allow AL or NV. */
5430 set_default_error ();
5431 goto failure;
5432 }
5433 break;
5434
5435 case AARCH64_OPND_ADDR_ADRP:
5436 po_misc_or_fail (parse_adrp (&str));
5437 /* Clear the value as operand needs to be relocated. */
5438 info->imm.value = 0;
5439 break;
5440
5441 case AARCH64_OPND_ADDR_PCREL14:
5442 case AARCH64_OPND_ADDR_PCREL19:
5443 case AARCH64_OPND_ADDR_PCREL21:
5444 case AARCH64_OPND_ADDR_PCREL26:
5445 po_misc_or_fail (parse_address_reloc (&str, info));
5446 if (!info->addr.pcrel)
5447 {
5448 set_syntax_error (_("invalid pc-relative address"));
5449 goto failure;
5450 }
5451 if (inst.gen_lit_pool
5452 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5453 {
5454 /* Only permit "=value" in the literal load instructions.
5455 The literal will be generated by programmer_friendly_fixup. */
5456 set_syntax_error (_("invalid use of \"=immediate\""));
5457 goto failure;
5458 }
5459 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5460 {
5461 set_syntax_error (_("unrecognized relocation suffix"));
5462 goto failure;
5463 }
5464 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5465 {
5466 info->imm.value = inst.reloc.exp.X_add_number;
5467 inst.reloc.type = BFD_RELOC_UNUSED;
5468 }
5469 else
5470 {
5471 info->imm.value = 0;
5472 if (inst.reloc.type == BFD_RELOC_UNUSED)
5473 switch (opcode->iclass)
5474 {
5475 case compbranch:
5476 case condbranch:
5477 /* e.g. CBZ or B.COND */
5478 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5479 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5480 break;
5481 case testbranch:
5482 /* e.g. TBZ */
5483 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5484 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5485 break;
5486 case branch_imm:
5487 /* e.g. B or BL */
5488 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5489 inst.reloc.type =
5490 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5491 : BFD_RELOC_AARCH64_JUMP26;
5492 break;
5493 case loadlit:
5494 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5495 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5496 break;
5497 case pcreladdr:
5498 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5499 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5500 break;
5501 default:
5502 gas_assert (0);
5503 abort ();
5504 }
5505 inst.reloc.pc_rel = 1;
5506 }
5507 break;
5508
5509 case AARCH64_OPND_ADDR_SIMPLE:
5510 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5511 /* [<Xn|SP>{, #<simm>}] */
5512 po_char_or_fail ('[');
5513 po_reg_or_fail (REG_TYPE_R64_SP);
5514 /* Accept optional ", #0". */
5515 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5516 && skip_past_char (&str, ','))
5517 {
5518 skip_past_char (&str, '#');
5519 if (! skip_past_char (&str, '0'))
5520 {
5521 set_fatal_syntax_error
5522 (_("the optional immediate offset can only be 0"));
5523 goto failure;
5524 }
5525 }
5526 po_char_or_fail (']');
5527 info->addr.base_regno = val;
5528 break;
5529
5530 case AARCH64_OPND_ADDR_REGOFF:
5531 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5532 po_misc_or_fail (parse_address (&str, info, 0));
5533 if (info->addr.pcrel || !info->addr.offset.is_reg
5534 || !info->addr.preind || info->addr.postind
5535 || info->addr.writeback)
5536 {
5537 set_syntax_error (_("invalid addressing mode"));
5538 goto failure;
5539 }
5540 if (!info->shifter.operator_present)
5541 {
5542 /* Default to LSL if not present. Libopcodes prefers shifter
5543 kind to be explicit. */
5544 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5545 info->shifter.kind = AARCH64_MOD_LSL;
5546 }
5547 /* Qualifier to be deduced by libopcodes. */
5548 break;
5549
5550 case AARCH64_OPND_ADDR_SIMM7:
5551 po_misc_or_fail (parse_address (&str, info, 0));
5552 if (info->addr.pcrel || info->addr.offset.is_reg
5553 || (!info->addr.preind && !info->addr.postind))
5554 {
5555 set_syntax_error (_("invalid addressing mode"));
5556 goto failure;
5557 }
5558 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5559 /* addr_off_p */ 1,
5560 /* need_libopcodes_p */ 1,
5561 /* skip_p */ 0);
5562 break;
5563
5564 case AARCH64_OPND_ADDR_SIMM9:
5565 case AARCH64_OPND_ADDR_SIMM9_2:
5566 po_misc_or_fail (parse_address_reloc (&str, info));
5567 if (info->addr.pcrel || info->addr.offset.is_reg
5568 || (!info->addr.preind && !info->addr.postind)
5569 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5570 && info->addr.writeback))
5571 {
5572 set_syntax_error (_("invalid addressing mode"));
5573 goto failure;
5574 }
5575 if (inst.reloc.type != BFD_RELOC_UNUSED)
5576 {
5577 set_syntax_error (_("relocation not allowed"));
5578 goto failure;
5579 }
5580 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5581 /* addr_off_p */ 1,
5582 /* need_libopcodes_p */ 1,
5583 /* skip_p */ 0);
5584 break;
5585
5586 case AARCH64_OPND_ADDR_UIMM12:
5587 po_misc_or_fail (parse_address_reloc (&str, info));
5588 if (info->addr.pcrel || info->addr.offset.is_reg
5589 || !info->addr.preind || info->addr.writeback)
5590 {
5591 set_syntax_error (_("invalid addressing mode"));
5592 goto failure;
5593 }
5594 if (inst.reloc.type == BFD_RELOC_UNUSED)
5595 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5596 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5597 || (inst.reloc.type
5598 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
5599 || (inst.reloc.type
5600 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
5601 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5602 /* Leave qualifier to be determined by libopcodes. */
5603 break;
5604
5605 case AARCH64_OPND_SIMD_ADDR_POST:
5606 /* [<Xn|SP>], <Xm|#<amount>> */
5607 po_misc_or_fail (parse_address (&str, info, 1));
5608 if (!info->addr.postind || !info->addr.writeback)
5609 {
5610 set_syntax_error (_("invalid addressing mode"));
5611 goto failure;
5612 }
5613 if (!info->addr.offset.is_reg)
5614 {
5615 if (inst.reloc.exp.X_op == O_constant)
5616 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5617 else
5618 {
5619 set_fatal_syntax_error
5620 (_("writeback value should be an immediate constant"));
5621 goto failure;
5622 }
5623 }
5624 /* No qualifier. */
5625 break;
5626
5627 case AARCH64_OPND_SYSREG:
5628 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5629 == PARSE_FAIL)
5630 {
5631 set_syntax_error (_("unknown or missing system register name"));
5632 goto failure;
5633 }
5634 inst.base.operands[i].sysreg = val;
5635 break;
5636
5637 case AARCH64_OPND_PSTATEFIELD:
5638 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5639 == PARSE_FAIL)
5640 {
5641 set_syntax_error (_("unknown or missing PSTATE field name"));
5642 goto failure;
5643 }
5644 inst.base.operands[i].pstatefield = val;
5645 break;
5646
5647 case AARCH64_OPND_SYSREG_IC:
5648 inst.base.operands[i].sysins_op =
5649 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5650 goto sys_reg_ins;
5651 case AARCH64_OPND_SYSREG_DC:
5652 inst.base.operands[i].sysins_op =
5653 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5654 goto sys_reg_ins;
5655 case AARCH64_OPND_SYSREG_AT:
5656 inst.base.operands[i].sysins_op =
5657 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5658 goto sys_reg_ins;
5659 case AARCH64_OPND_SYSREG_TLBI:
5660 inst.base.operands[i].sysins_op =
5661 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5662 sys_reg_ins:
5663 if (inst.base.operands[i].sysins_op == NULL)
5664 {
5665 set_fatal_syntax_error ( _("unknown or missing operation name"));
5666 goto failure;
5667 }
5668 break;
5669
5670 case AARCH64_OPND_BARRIER:
5671 case AARCH64_OPND_BARRIER_ISB:
5672 val = parse_barrier (&str);
5673 if (val != PARSE_FAIL
5674 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5675 {
5676 /* ISB only accepts options name 'sy'. */
5677 set_syntax_error
5678 (_("the specified option is not accepted in ISB"));
5679 /* Turn off backtrack as this optional operand is present. */
5680 backtrack_pos = 0;
5681 goto failure;
5682 }
5683 /* This is an extension to accept a 0..15 immediate. */
5684 if (val == PARSE_FAIL)
5685 po_imm_or_fail (0, 15);
5686 info->barrier = aarch64_barrier_options + val;
5687 break;
5688
5689 case AARCH64_OPND_PRFOP:
5690 val = parse_pldop (&str);
5691 /* This is an extension to accept a 0..31 immediate. */
5692 if (val == PARSE_FAIL)
5693 po_imm_or_fail (0, 31);
5694 inst.base.operands[i].prfop = aarch64_prfops + val;
5695 break;
5696
5697 case AARCH64_OPND_BARRIER_PSB:
5698 val = parse_barrier_psb (&str, &(info->hint_option));
5699 if (val == PARSE_FAIL)
5700 goto failure;
5701 break;
5702
5703 default:
5704 as_fatal (_("unhandled operand code %d"), operands[i]);
5705 }
5706
5707 /* If we get here, this operand was successfully parsed. */
5708 inst.base.operands[i].present = 1;
5709 continue;
5710
5711 failure:
5712 /* The parse routine should already have set the error, but in case
5713 not, set a default one here. */
5714 if (! error_p ())
5715 set_default_error ();
5716
5717 if (! backtrack_pos)
5718 goto parse_operands_return;
5719
5720 {
5721 /* We reach here because this operand is marked as optional, and
5722 either no operand was supplied or the operand was supplied but it
5723 was syntactically incorrect. In the latter case we report an
5724 error. In the former case we perform a few more checks before
5725 dropping through to the code to insert the default operand. */
5726
5727 char *tmp = backtrack_pos;
5728 char endchar = END_OF_INSN;
5729
5730 if (i != (aarch64_num_of_operands (opcode) - 1))
5731 endchar = ',';
5732 skip_past_char (&tmp, ',');
5733
5734 if (*tmp != endchar)
5735 /* The user has supplied an operand in the wrong format. */
5736 goto parse_operands_return;
5737
5738 /* Make sure there is not a comma before the optional operand.
5739 For example the fifth operand of 'sys' is optional:
5740
5741 sys #0,c0,c0,#0, <--- wrong
5742 sys #0,c0,c0,#0 <--- correct. */
5743 if (comma_skipped_p && i && endchar == END_OF_INSN)
5744 {
5745 set_fatal_syntax_error
5746 (_("unexpected comma before the omitted optional operand"));
5747 goto parse_operands_return;
5748 }
5749 }
5750
5751 /* Reaching here means we are dealing with an optional operand that is
5752 omitted from the assembly line. */
5753 gas_assert (optional_operand_p (opcode, i));
5754 info->present = 0;
5755 process_omitted_operand (operands[i], opcode, i, info);
5756
5757 /* Try again, skipping the optional operand at backtrack_pos. */
5758 str = backtrack_pos;
5759 backtrack_pos = 0;
5760
5761 /* Clear any error record after the omitted optional operand has been
5762 successfully handled. */
5763 clear_error ();
5764 }
5765
5766 /* Check if we have parsed all the operands. */
5767 if (*str != '\0' && ! error_p ())
5768 {
5769 /* Set I to the index of the last present operand; this is
5770 for the purpose of diagnostics. */
5771 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5772 ;
5773 set_fatal_syntax_error
5774 (_("unexpected characters following instruction"));
5775 }
5776
5777 parse_operands_return:
5778
5779 if (error_p ())
5780 {
5781 DEBUG_TRACE ("parsing FAIL: %s - %s",
5782 operand_mismatch_kind_names[get_error_kind ()],
5783 get_error_message ());
5784 /* Record the operand error properly; this is useful when there
5785 are multiple instruction templates for a mnemonic name, so that
5786 later on, we can select the error that most closely describes
5787 the problem. */
5788 record_operand_error (opcode, i, get_error_kind (),
5789 get_error_message ());
5790 return FALSE;
5791 }
5792 else
5793 {
5794 DEBUG_TRACE ("parsing SUCCESS");
5795 return TRUE;
5796 }
5797 }
5798
5799 /* It does some fix-up to provide some programmer friendly feature while
5800 keeping the libopcodes happy, i.e. libopcodes only accepts
5801 the preferred architectural syntax.
5802 Return FALSE if there is any failure; otherwise return TRUE. */
5803
5804 static bfd_boolean
5805 programmer_friendly_fixup (aarch64_instruction *instr)
5806 {
5807 aarch64_inst *base = &instr->base;
5808 const aarch64_opcode *opcode = base->opcode;
5809 enum aarch64_op op = opcode->op;
5810 aarch64_opnd_info *operands = base->operands;
5811
5812 DEBUG_TRACE ("enter");
5813
5814 switch (opcode->iclass)
5815 {
5816 case testbranch:
5817 /* TBNZ Xn|Wn, #uimm6, label
5818 Test and Branch Not Zero: conditionally jumps to label if bit number
5819 uimm6 in register Xn is not zero. The bit number implies the width of
5820 the register, which may be written and should be disassembled as Wn if
5821 uimm is less than 32. */
5822 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5823 {
5824 if (operands[1].imm.value >= 32)
5825 {
5826 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5827 0, 31);
5828 return FALSE;
5829 }
5830 operands[0].qualifier = AARCH64_OPND_QLF_X;
5831 }
5832 break;
5833 case loadlit:
5834 /* LDR Wt, label | =value
5835 As a convenience assemblers will typically permit the notation
5836 "=value" in conjunction with the pc-relative literal load instructions
5837 to automatically place an immediate value or symbolic address in a
5838 nearby literal pool and generate a hidden label which references it.
5839 ISREG has been set to 0 in the case of =value. */
5840 if (instr->gen_lit_pool
5841 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5842 {
5843 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5844 if (op == OP_LDRSW_LIT)
5845 size = 4;
5846 if (instr->reloc.exp.X_op != O_constant
5847 && instr->reloc.exp.X_op != O_big
5848 && instr->reloc.exp.X_op != O_symbol)
5849 {
5850 record_operand_error (opcode, 1,
5851 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5852 _("constant expression expected"));
5853 return FALSE;
5854 }
5855 if (! add_to_lit_pool (&instr->reloc.exp, size))
5856 {
5857 record_operand_error (opcode, 1,
5858 AARCH64_OPDE_OTHER_ERROR,
5859 _("literal pool insertion failed"));
5860 return FALSE;
5861 }
5862 }
5863 break;
5864 case log_shift:
5865 case bitfield:
5866 /* UXT[BHW] Wd, Wn
5867 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5868 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5869 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5870 A programmer-friendly assembler should accept a destination Xd in
5871 place of Wd, however that is not the preferred form for disassembly.
5872 */
5873 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5874 && operands[1].qualifier == AARCH64_OPND_QLF_W
5875 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5876 operands[0].qualifier = AARCH64_OPND_QLF_W;
5877 break;
5878
5879 case addsub_ext:
5880 {
5881 /* In the 64-bit form, the final register operand is written as Wm
5882 for all but the (possibly omitted) UXTX/LSL and SXTX
5883 operators.
5884 As a programmer-friendly assembler, we accept e.g.
5885 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5886 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5887 int idx = aarch64_operand_index (opcode->operands,
5888 AARCH64_OPND_Rm_EXT);
5889 gas_assert (idx == 1 || idx == 2);
5890 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5891 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5892 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5893 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5894 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5895 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5896 }
5897 break;
5898
5899 default:
5900 break;
5901 }
5902
5903 DEBUG_TRACE ("exit with SUCCESS");
5904 return TRUE;
5905 }
5906
5907 /* Check for loads and stores that will cause unpredictable behavior. */
5908
5909 static void
5910 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5911 {
5912 aarch64_inst *base = &instr->base;
5913 const aarch64_opcode *opcode = base->opcode;
5914 const aarch64_opnd_info *opnds = base->operands;
5915 switch (opcode->iclass)
5916 {
5917 case ldst_pos:
5918 case ldst_imm9:
5919 case ldst_unscaled:
5920 case ldst_unpriv:
5921 /* Loading/storing the base register is unpredictable if writeback. */
5922 if ((aarch64_get_operand_class (opnds[0].type)
5923 == AARCH64_OPND_CLASS_INT_REG)
5924 && opnds[0].reg.regno == opnds[1].addr.base_regno
5925 && opnds[1].addr.base_regno != REG_SP
5926 && opnds[1].addr.writeback)
5927 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5928 break;
5929 case ldstpair_off:
5930 case ldstnapair_offs:
5931 case ldstpair_indexed:
5932 /* Loading/storing the base register is unpredictable if writeback. */
5933 if ((aarch64_get_operand_class (opnds[0].type)
5934 == AARCH64_OPND_CLASS_INT_REG)
5935 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5936 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5937 && opnds[2].addr.base_regno != REG_SP
5938 && opnds[2].addr.writeback)
5939 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5940 /* Load operations must load different registers. */
5941 if ((opcode->opcode & (1 << 22))
5942 && opnds[0].reg.regno == opnds[1].reg.regno)
5943 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5944 break;
5945 default:
5946 break;
5947 }
5948 }
5949
5950 /* A wrapper function to interface with libopcodes on encoding and
5951 record the error message if there is any.
5952
5953 Return TRUE on success; otherwise return FALSE. */
5954
5955 static bfd_boolean
5956 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5957 aarch64_insn *code)
5958 {
5959 aarch64_operand_error error_info;
5960 error_info.kind = AARCH64_OPDE_NIL;
5961 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5962 return TRUE;
5963 else
5964 {
5965 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5966 record_operand_error_info (opcode, &error_info);
5967 return FALSE;
5968 }
5969 }
5970
5971 #ifdef DEBUG_AARCH64
5972 static inline void
5973 dump_opcode_operands (const aarch64_opcode *opcode)
5974 {
5975 int i = 0;
5976 while (opcode->operands[i] != AARCH64_OPND_NIL)
5977 {
5978 aarch64_verbose ("\t\t opnd%d: %s", i,
5979 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5980 ? aarch64_get_operand_name (opcode->operands[i])
5981 : aarch64_get_operand_desc (opcode->operands[i]));
5982 ++i;
5983 }
5984 }
5985 #endif /* DEBUG_AARCH64 */
5986
5987 /* This is the guts of the machine-dependent assembler. STR points to a
5988 machine dependent instruction. This function is supposed to emit
5989 the frags/bytes it assembles to. */
5990
5991 void
5992 md_assemble (char *str)
5993 {
5994 char *p = str;
5995 templates *template;
5996 aarch64_opcode *opcode;
5997 aarch64_inst *inst_base;
5998 unsigned saved_cond;
5999
6000 /* Align the previous label if needed. */
6001 if (last_label_seen != NULL)
6002 {
6003 symbol_set_frag (last_label_seen, frag_now);
6004 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6005 S_SET_SEGMENT (last_label_seen, now_seg);
6006 }
6007
6008 inst.reloc.type = BFD_RELOC_UNUSED;
6009
6010 DEBUG_TRACE ("\n\n");
6011 DEBUG_TRACE ("==============================");
6012 DEBUG_TRACE ("Enter md_assemble with %s", str);
6013
6014 template = opcode_lookup (&p);
6015 if (!template)
6016 {
6017 /* It wasn't an instruction, but it might be a register alias of
6018 the form alias .req reg directive. */
6019 if (!create_register_alias (str, p))
6020 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6021 str);
6022 return;
6023 }
6024
6025 skip_whitespace (p);
6026 if (*p == ',')
6027 {
6028 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6029 get_mnemonic_name (str), str);
6030 return;
6031 }
6032
6033 init_operand_error_report ();
6034
6035 /* Sections are assumed to start aligned. In executable section, there is no
6036 MAP_DATA symbol pending. So we only align the address during
6037 MAP_DATA --> MAP_INSN transition.
6038 For other sections, this is not guaranteed. */
6039 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6040 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6041 frag_align_code (2, 0);
6042
6043 saved_cond = inst.cond;
6044 reset_aarch64_instruction (&inst);
6045 inst.cond = saved_cond;
6046
6047 /* Iterate through all opcode entries with the same mnemonic name. */
6048 do
6049 {
6050 opcode = template->opcode;
6051
6052 DEBUG_TRACE ("opcode %s found", opcode->name);
6053 #ifdef DEBUG_AARCH64
6054 if (debug_dump)
6055 dump_opcode_operands (opcode);
6056 #endif /* DEBUG_AARCH64 */
6057
6058 mapping_state (MAP_INSN);
6059
6060 inst_base = &inst.base;
6061 inst_base->opcode = opcode;
6062
6063 /* Truly conditionally executed instructions, e.g. b.cond. */
6064 if (opcode->flags & F_COND)
6065 {
6066 gas_assert (inst.cond != COND_ALWAYS);
6067 inst_base->cond = get_cond_from_value (inst.cond);
6068 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6069 }
6070 else if (inst.cond != COND_ALWAYS)
6071 {
6072 /* It shouldn't arrive here, where the assembly looks like a
6073 conditional instruction but the found opcode is unconditional. */
6074 gas_assert (0);
6075 continue;
6076 }
6077
6078 if (parse_operands (p, opcode)
6079 && programmer_friendly_fixup (&inst)
6080 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6081 {
6082 /* Check that this instruction is supported for this CPU. */
6083 if (!opcode->avariant
6084 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
6085 {
6086 as_bad (_("selected processor does not support `%s'"), str);
6087 return;
6088 }
6089
6090 warn_unpredictable_ldst (&inst, str);
6091
6092 if (inst.reloc.type == BFD_RELOC_UNUSED
6093 || !inst.reloc.need_libopcodes_p)
6094 output_inst (NULL);
6095 else
6096 {
6097 /* If there is relocation generated for the instruction,
6098 store the instruction information for the future fix-up. */
6099 struct aarch64_inst *copy;
6100 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6101 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
6102 abort ();
6103 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6104 output_inst (copy);
6105 }
6106 return;
6107 }
6108
6109 template = template->next;
6110 if (template != NULL)
6111 {
6112 reset_aarch64_instruction (&inst);
6113 inst.cond = saved_cond;
6114 }
6115 }
6116 while (template != NULL);
6117
6118 /* Issue the error messages if any. */
6119 output_operand_error_report (str);
6120 }
6121
6122 /* Various frobbings of labels and their addresses. */
6123
6124 void
6125 aarch64_start_line_hook (void)
6126 {
6127 last_label_seen = NULL;
6128 }
6129
6130 void
6131 aarch64_frob_label (symbolS * sym)
6132 {
6133 last_label_seen = sym;
6134
6135 dwarf2_emit_label (sym);
6136 }
6137
6138 int
6139 aarch64_data_in_code (void)
6140 {
6141 if (!strncmp (input_line_pointer + 1, "data:", 5))
6142 {
6143 *input_line_pointer = '/';
6144 input_line_pointer += 5;
6145 *input_line_pointer = 0;
6146 return 1;
6147 }
6148
6149 return 0;
6150 }
6151
6152 char *
6153 aarch64_canonicalize_symbol_name (char *name)
6154 {
6155 int len;
6156
6157 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6158 *(name + len - 5) = 0;
6159
6160 return name;
6161 }
6162 \f
6163 /* Table of all register names defined by default. The user can
6164 define additional names with .req. Note that all register names
6165 should appear in both upper and lowercase variants. Some registers
6166 also have mixed-case names. */
6167
6168 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6169 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6170 #define REGSET31(p,t) \
6171 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6172 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6173 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6174 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
6175 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6176 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6177 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6178 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6179 #define REGSET(p,t) \
6180 REGSET31(p,t), REGNUM(p,31,t)
6181
6182 /* These go into aarch64_reg_hsh hash-table. */
6183 static const reg_entry reg_names[] = {
6184 /* Integer registers. */
6185 REGSET31 (x, R_64), REGSET31 (X, R_64),
6186 REGSET31 (w, R_32), REGSET31 (W, R_32),
6187
6188 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6189 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6190
6191 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6192 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6193
6194 /* Coprocessor register numbers. */
6195 REGSET (c, CN), REGSET (C, CN),
6196
6197 /* Floating-point single precision registers. */
6198 REGSET (s, FP_S), REGSET (S, FP_S),
6199
6200 /* Floating-point double precision registers. */
6201 REGSET (d, FP_D), REGSET (D, FP_D),
6202
6203 /* Floating-point half precision registers. */
6204 REGSET (h, FP_H), REGSET (H, FP_H),
6205
6206 /* Floating-point byte precision registers. */
6207 REGSET (b, FP_B), REGSET (B, FP_B),
6208
6209 /* Floating-point quad precision registers. */
6210 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6211
6212 /* FP/SIMD registers. */
6213 REGSET (v, VN), REGSET (V, VN),
6214 };
6215
6216 #undef REGDEF
6217 #undef REGNUM
6218 #undef REGSET
6219
6220 #define N 1
6221 #define n 0
6222 #define Z 1
6223 #define z 0
6224 #define C 1
6225 #define c 0
6226 #define V 1
6227 #define v 0
6228 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6229 static const asm_nzcv nzcv_names[] = {
6230 {"nzcv", B (n, z, c, v)},
6231 {"nzcV", B (n, z, c, V)},
6232 {"nzCv", B (n, z, C, v)},
6233 {"nzCV", B (n, z, C, V)},
6234 {"nZcv", B (n, Z, c, v)},
6235 {"nZcV", B (n, Z, c, V)},
6236 {"nZCv", B (n, Z, C, v)},
6237 {"nZCV", B (n, Z, C, V)},
6238 {"Nzcv", B (N, z, c, v)},
6239 {"NzcV", B (N, z, c, V)},
6240 {"NzCv", B (N, z, C, v)},
6241 {"NzCV", B (N, z, C, V)},
6242 {"NZcv", B (N, Z, c, v)},
6243 {"NZcV", B (N, Z, c, V)},
6244 {"NZCv", B (N, Z, C, v)},
6245 {"NZCV", B (N, Z, C, V)}
6246 };
6247
6248 #undef N
6249 #undef n
6250 #undef Z
6251 #undef z
6252 #undef C
6253 #undef c
6254 #undef V
6255 #undef v
6256 #undef B
6257 \f
6258 /* MD interface: bits in the object file. */
6259
6260 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6261 for use in the a.out file, and stores them in the array pointed to by buf.
6262 This knows about the endian-ness of the target machine and does
6263 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6264 2 (short) and 4 (long) Floating numbers are put out as a series of
6265 LITTLENUMS (shorts, here at least). */
6266
6267 void
6268 md_number_to_chars (char *buf, valueT val, int n)
6269 {
6270 if (target_big_endian)
6271 number_to_chars_bigendian (buf, val, n);
6272 else
6273 number_to_chars_littleendian (buf, val, n);
6274 }
6275
6276 /* MD interface: Sections. */
6277
6278 /* Estimate the size of a frag before relaxing. Assume everything fits in
6279 4 bytes. */
6280
6281 int
6282 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6283 {
6284 fragp->fr_var = 4;
6285 return 4;
6286 }
6287
6288 /* Round up a section size to the appropriate boundary. */
6289
6290 valueT
6291 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6292 {
6293 return size;
6294 }
6295
6296 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6297 of an rs_align_code fragment.
6298
6299 Here we fill the frag with the appropriate info for padding the
6300 output stream. The resulting frag will consist of a fixed (fr_fix)
6301 and of a repeating (fr_var) part.
6302
6303 The fixed content is always emitted before the repeating content and
6304 these two parts are used as follows in constructing the output:
6305 - the fixed part will be used to align to a valid instruction word
6306 boundary, in case that we start at a misaligned address; as no
6307 executable instruction can live at the misaligned location, we
6308 simply fill with zeros;
6309 - the variable part will be used to cover the remaining padding and
6310 we fill using the AArch64 NOP instruction.
6311
6312 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6313 enough storage space for up to 3 bytes for padding the back to a valid
6314 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6315
6316 void
6317 aarch64_handle_align (fragS * fragP)
6318 {
6319 /* NOP = d503201f */
6320 /* AArch64 instructions are always little-endian. */
6321 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6322
6323 int bytes, fix, noop_size;
6324 char *p;
6325
6326 if (fragP->fr_type != rs_align_code)
6327 return;
6328
6329 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6330 p = fragP->fr_literal + fragP->fr_fix;
6331
6332 #ifdef OBJ_ELF
6333 gas_assert (fragP->tc_frag_data.recorded);
6334 #endif
6335
6336 noop_size = sizeof (aarch64_noop);
6337
6338 fix = bytes & (noop_size - 1);
6339 if (fix)
6340 {
6341 #ifdef OBJ_ELF
6342 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6343 #endif
6344 memset (p, 0, fix);
6345 p += fix;
6346 fragP->fr_fix += fix;
6347 }
6348
6349 if (noop_size)
6350 memcpy (p, aarch64_noop, noop_size);
6351 fragP->fr_var = noop_size;
6352 }
6353
6354 /* Perform target specific initialisation of a frag.
6355 Note - despite the name this initialisation is not done when the frag
6356 is created, but only when its type is assigned. A frag can be created
6357 and used a long time before its type is set, so beware of assuming that
6358 this initialisationis performed first. */
6359
6360 #ifndef OBJ_ELF
6361 void
6362 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6363 int max_chars ATTRIBUTE_UNUSED)
6364 {
6365 }
6366
6367 #else /* OBJ_ELF is defined. */
6368 void
6369 aarch64_init_frag (fragS * fragP, int max_chars)
6370 {
6371 /* Record a mapping symbol for alignment frags. We will delete this
6372 later if the alignment ends up empty. */
6373 if (!fragP->tc_frag_data.recorded)
6374 fragP->tc_frag_data.recorded = 1;
6375
6376 switch (fragP->fr_type)
6377 {
6378 case rs_align:
6379 case rs_align_test:
6380 case rs_fill:
6381 mapping_state_2 (MAP_DATA, max_chars);
6382 break;
6383 case rs_align_code:
6384 mapping_state_2 (MAP_INSN, max_chars);
6385 break;
6386 default:
6387 break;
6388 }
6389 }
6390 \f
6391 /* Initialize the DWARF-2 unwind information for this procedure. */
6392
6393 void
6394 tc_aarch64_frame_initial_instructions (void)
6395 {
6396 cfi_add_CFA_def_cfa (REG_SP, 0);
6397 }
6398 #endif /* OBJ_ELF */
6399
6400 /* Convert REGNAME to a DWARF-2 register number. */
6401
6402 int
6403 tc_aarch64_regname_to_dw2regnum (char *regname)
6404 {
6405 const reg_entry *reg = parse_reg (&regname);
6406 if (reg == NULL)
6407 return -1;
6408
6409 switch (reg->type)
6410 {
6411 case REG_TYPE_SP_32:
6412 case REG_TYPE_SP_64:
6413 case REG_TYPE_R_32:
6414 case REG_TYPE_R_64:
6415 return reg->number;
6416
6417 case REG_TYPE_FP_B:
6418 case REG_TYPE_FP_H:
6419 case REG_TYPE_FP_S:
6420 case REG_TYPE_FP_D:
6421 case REG_TYPE_FP_Q:
6422 return reg->number + 64;
6423
6424 default:
6425 break;
6426 }
6427 return -1;
6428 }
6429
6430 /* Implement DWARF2_ADDR_SIZE. */
6431
6432 int
6433 aarch64_dwarf2_addr_size (void)
6434 {
6435 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6436 if (ilp32_p)
6437 return 4;
6438 #endif
6439 return bfd_arch_bits_per_address (stdoutput) / 8;
6440 }
6441
6442 /* MD interface: Symbol and relocation handling. */
6443
6444 /* Return the address within the segment that a PC-relative fixup is
6445 relative to. For AArch64 PC-relative fixups applied to instructions
6446 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6447
6448 long
6449 md_pcrel_from_section (fixS * fixP, segT seg)
6450 {
6451 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6452
6453 /* If this is pc-relative and we are going to emit a relocation
6454 then we just want to put out any pipeline compensation that the linker
6455 will need. Otherwise we want to use the calculated base. */
6456 if (fixP->fx_pcrel
6457 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6458 || aarch64_force_relocation (fixP)))
6459 base = 0;
6460
6461 /* AArch64 should be consistent for all pc-relative relocations. */
6462 return base + AARCH64_PCREL_OFFSET;
6463 }
6464
6465 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6466 Otherwise we have no need to default values of symbols. */
6467
6468 symbolS *
6469 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6470 {
6471 #ifdef OBJ_ELF
6472 if (name[0] == '_' && name[1] == 'G'
6473 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6474 {
6475 if (!GOT_symbol)
6476 {
6477 if (symbol_find (name))
6478 as_bad (_("GOT already in the symbol table"));
6479
6480 GOT_symbol = symbol_new (name, undefined_section,
6481 (valueT) 0, &zero_address_frag);
6482 }
6483
6484 return GOT_symbol;
6485 }
6486 #endif
6487
6488 return 0;
6489 }
6490
6491 /* Return non-zero if the indicated VALUE has overflowed the maximum
6492 range expressible by a unsigned number with the indicated number of
6493 BITS. */
6494
6495 static bfd_boolean
6496 unsigned_overflow (valueT value, unsigned bits)
6497 {
6498 valueT lim;
6499 if (bits >= sizeof (valueT) * 8)
6500 return FALSE;
6501 lim = (valueT) 1 << bits;
6502 return (value >= lim);
6503 }
6504
6505
6506 /* Return non-zero if the indicated VALUE has overflowed the maximum
6507 range expressible by an signed number with the indicated number of
6508 BITS. */
6509
6510 static bfd_boolean
6511 signed_overflow (offsetT value, unsigned bits)
6512 {
6513 offsetT lim;
6514 if (bits >= sizeof (offsetT) * 8)
6515 return FALSE;
6516 lim = (offsetT) 1 << (bits - 1);
6517 return (value < -lim || value >= lim);
6518 }
6519
6520 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6521 unsigned immediate offset load/store instruction, try to encode it as
6522 an unscaled, 9-bit, signed immediate offset load/store instruction.
6523 Return TRUE if it is successful; otherwise return FALSE.
6524
6525 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6526 in response to the standard LDR/STR mnemonics when the immediate offset is
6527 unambiguous, i.e. when it is negative or unaligned. */
6528
6529 static bfd_boolean
6530 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6531 {
6532 int idx;
6533 enum aarch64_op new_op;
6534 const aarch64_opcode *new_opcode;
6535
6536 gas_assert (instr->opcode->iclass == ldst_pos);
6537
6538 switch (instr->opcode->op)
6539 {
6540 case OP_LDRB_POS:new_op = OP_LDURB; break;
6541 case OP_STRB_POS: new_op = OP_STURB; break;
6542 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6543 case OP_LDRH_POS: new_op = OP_LDURH; break;
6544 case OP_STRH_POS: new_op = OP_STURH; break;
6545 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6546 case OP_LDR_POS: new_op = OP_LDUR; break;
6547 case OP_STR_POS: new_op = OP_STUR; break;
6548 case OP_LDRF_POS: new_op = OP_LDURV; break;
6549 case OP_STRF_POS: new_op = OP_STURV; break;
6550 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6551 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6552 default: new_op = OP_NIL; break;
6553 }
6554
6555 if (new_op == OP_NIL)
6556 return FALSE;
6557
6558 new_opcode = aarch64_get_opcode (new_op);
6559 gas_assert (new_opcode != NULL);
6560
6561 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6562 instr->opcode->op, new_opcode->op);
6563
6564 aarch64_replace_opcode (instr, new_opcode);
6565
6566 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6567 qualifier matching may fail because the out-of-date qualifier will
6568 prevent the operand being updated with a new and correct qualifier. */
6569 idx = aarch64_operand_index (instr->opcode->operands,
6570 AARCH64_OPND_ADDR_SIMM9);
6571 gas_assert (idx == 1);
6572 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6573
6574 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6575
6576 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6577 return FALSE;
6578
6579 return TRUE;
6580 }
6581
6582 /* Called by fix_insn to fix a MOV immediate alias instruction.
6583
6584 Operand for a generic move immediate instruction, which is an alias
6585 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6586 a 32-bit/64-bit immediate value into general register. An assembler error
6587 shall result if the immediate cannot be created by a single one of these
6588 instructions. If there is a choice, then to ensure reversability an
6589 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6590
6591 static void
6592 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6593 {
6594 const aarch64_opcode *opcode;
6595
6596 /* Need to check if the destination is SP/ZR. The check has to be done
6597 before any aarch64_replace_opcode. */
6598 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6599 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6600
6601 instr->operands[1].imm.value = value;
6602 instr->operands[1].skip = 0;
6603
6604 if (try_mov_wide_p)
6605 {
6606 /* Try the MOVZ alias. */
6607 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6608 aarch64_replace_opcode (instr, opcode);
6609 if (aarch64_opcode_encode (instr->opcode, instr,
6610 &instr->value, NULL, NULL))
6611 {
6612 put_aarch64_insn (buf, instr->value);
6613 return;
6614 }
6615 /* Try the MOVK alias. */
6616 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6617 aarch64_replace_opcode (instr, opcode);
6618 if (aarch64_opcode_encode (instr->opcode, instr,
6619 &instr->value, NULL, NULL))
6620 {
6621 put_aarch64_insn (buf, instr->value);
6622 return;
6623 }
6624 }
6625
6626 if (try_mov_bitmask_p)
6627 {
6628 /* Try the ORR alias. */
6629 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6630 aarch64_replace_opcode (instr, opcode);
6631 if (aarch64_opcode_encode (instr->opcode, instr,
6632 &instr->value, NULL, NULL))
6633 {
6634 put_aarch64_insn (buf, instr->value);
6635 return;
6636 }
6637 }
6638
6639 as_bad_where (fixP->fx_file, fixP->fx_line,
6640 _("immediate cannot be moved by a single instruction"));
6641 }
6642
6643 /* An instruction operand which is immediate related may have symbol used
6644 in the assembly, e.g.
6645
6646 mov w0, u32
6647 .set u32, 0x00ffff00
6648
6649 At the time when the assembly instruction is parsed, a referenced symbol,
6650 like 'u32' in the above example may not have been seen; a fixS is created
6651 in such a case and is handled here after symbols have been resolved.
6652 Instruction is fixed up with VALUE using the information in *FIXP plus
6653 extra information in FLAGS.
6654
6655 This function is called by md_apply_fix to fix up instructions that need
6656 a fix-up described above but does not involve any linker-time relocation. */
6657
6658 static void
6659 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6660 {
6661 int idx;
6662 uint32_t insn;
6663 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6664 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6665 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6666
6667 if (new_inst)
6668 {
6669 /* Now the instruction is about to be fixed-up, so the operand that
6670 was previously marked as 'ignored' needs to be unmarked in order
6671 to get the encoding done properly. */
6672 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6673 new_inst->operands[idx].skip = 0;
6674 }
6675
6676 gas_assert (opnd != AARCH64_OPND_NIL);
6677
6678 switch (opnd)
6679 {
6680 case AARCH64_OPND_EXCEPTION:
6681 if (unsigned_overflow (value, 16))
6682 as_bad_where (fixP->fx_file, fixP->fx_line,
6683 _("immediate out of range"));
6684 insn = get_aarch64_insn (buf);
6685 insn |= encode_svc_imm (value);
6686 put_aarch64_insn (buf, insn);
6687 break;
6688
6689 case AARCH64_OPND_AIMM:
6690 /* ADD or SUB with immediate.
6691 NOTE this assumes we come here with a add/sub shifted reg encoding
6692 3 322|2222|2 2 2 21111 111111
6693 1 098|7654|3 2 1 09876 543210 98765 43210
6694 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6695 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6696 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6697 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6698 ->
6699 3 322|2222|2 2 221111111111
6700 1 098|7654|3 2 109876543210 98765 43210
6701 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6702 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6703 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6704 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6705 Fields sf Rn Rd are already set. */
6706 insn = get_aarch64_insn (buf);
6707 if (value < 0)
6708 {
6709 /* Add <-> sub. */
6710 insn = reencode_addsub_switch_add_sub (insn);
6711 value = -value;
6712 }
6713
6714 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6715 && unsigned_overflow (value, 12))
6716 {
6717 /* Try to shift the value by 12 to make it fit. */
6718 if (((value >> 12) << 12) == value
6719 && ! unsigned_overflow (value, 12 + 12))
6720 {
6721 value >>= 12;
6722 insn |= encode_addsub_imm_shift_amount (1);
6723 }
6724 }
6725
6726 if (unsigned_overflow (value, 12))
6727 as_bad_where (fixP->fx_file, fixP->fx_line,
6728 _("immediate out of range"));
6729
6730 insn |= encode_addsub_imm (value);
6731
6732 put_aarch64_insn (buf, insn);
6733 break;
6734
6735 case AARCH64_OPND_SIMD_IMM:
6736 case AARCH64_OPND_SIMD_IMM_SFT:
6737 case AARCH64_OPND_LIMM:
6738 /* Bit mask immediate. */
6739 gas_assert (new_inst != NULL);
6740 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6741 new_inst->operands[idx].imm.value = value;
6742 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6743 &new_inst->value, NULL, NULL))
6744 put_aarch64_insn (buf, new_inst->value);
6745 else
6746 as_bad_where (fixP->fx_file, fixP->fx_line,
6747 _("invalid immediate"));
6748 break;
6749
6750 case AARCH64_OPND_HALF:
6751 /* 16-bit unsigned immediate. */
6752 if (unsigned_overflow (value, 16))
6753 as_bad_where (fixP->fx_file, fixP->fx_line,
6754 _("immediate out of range"));
6755 insn = get_aarch64_insn (buf);
6756 insn |= encode_movw_imm (value & 0xffff);
6757 put_aarch64_insn (buf, insn);
6758 break;
6759
6760 case AARCH64_OPND_IMM_MOV:
6761 /* Operand for a generic move immediate instruction, which is
6762 an alias instruction that generates a single MOVZ, MOVN or ORR
6763 instruction to loads a 32-bit/64-bit immediate value into general
6764 register. An assembler error shall result if the immediate cannot be
6765 created by a single one of these instructions. If there is a choice,
6766 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6767 and MOVZ or MOVN to ORR. */
6768 gas_assert (new_inst != NULL);
6769 fix_mov_imm_insn (fixP, buf, new_inst, value);
6770 break;
6771
6772 case AARCH64_OPND_ADDR_SIMM7:
6773 case AARCH64_OPND_ADDR_SIMM9:
6774 case AARCH64_OPND_ADDR_SIMM9_2:
6775 case AARCH64_OPND_ADDR_UIMM12:
6776 /* Immediate offset in an address. */
6777 insn = get_aarch64_insn (buf);
6778
6779 gas_assert (new_inst != NULL && new_inst->value == insn);
6780 gas_assert (new_inst->opcode->operands[1] == opnd
6781 || new_inst->opcode->operands[2] == opnd);
6782
6783 /* Get the index of the address operand. */
6784 if (new_inst->opcode->operands[1] == opnd)
6785 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6786 idx = 1;
6787 else
6788 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6789 idx = 2;
6790
6791 /* Update the resolved offset value. */
6792 new_inst->operands[idx].addr.offset.imm = value;
6793
6794 /* Encode/fix-up. */
6795 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6796 &new_inst->value, NULL, NULL))
6797 {
6798 put_aarch64_insn (buf, new_inst->value);
6799 break;
6800 }
6801 else if (new_inst->opcode->iclass == ldst_pos
6802 && try_to_encode_as_unscaled_ldst (new_inst))
6803 {
6804 put_aarch64_insn (buf, new_inst->value);
6805 break;
6806 }
6807
6808 as_bad_where (fixP->fx_file, fixP->fx_line,
6809 _("immediate offset out of range"));
6810 break;
6811
6812 default:
6813 gas_assert (0);
6814 as_fatal (_("unhandled operand code %d"), opnd);
6815 }
6816 }
6817
6818 /* Apply a fixup (fixP) to segment data, once it has been determined
6819 by our caller that we have all the info we need to fix it up.
6820
6821 Parameter valP is the pointer to the value of the bits. */
6822
6823 void
6824 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6825 {
6826 offsetT value = *valP;
6827 uint32_t insn;
6828 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6829 int scale;
6830 unsigned flags = fixP->fx_addnumber;
6831
6832 DEBUG_TRACE ("\n\n");
6833 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6834 DEBUG_TRACE ("Enter md_apply_fix");
6835
6836 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6837
6838 /* Note whether this will delete the relocation. */
6839
6840 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6841 fixP->fx_done = 1;
6842
6843 /* Process the relocations. */
6844 switch (fixP->fx_r_type)
6845 {
6846 case BFD_RELOC_NONE:
6847 /* This will need to go in the object file. */
6848 fixP->fx_done = 0;
6849 break;
6850
6851 case BFD_RELOC_8:
6852 case BFD_RELOC_8_PCREL:
6853 if (fixP->fx_done || !seg->use_rela_p)
6854 md_number_to_chars (buf, value, 1);
6855 break;
6856
6857 case BFD_RELOC_16:
6858 case BFD_RELOC_16_PCREL:
6859 if (fixP->fx_done || !seg->use_rela_p)
6860 md_number_to_chars (buf, value, 2);
6861 break;
6862
6863 case BFD_RELOC_32:
6864 case BFD_RELOC_32_PCREL:
6865 if (fixP->fx_done || !seg->use_rela_p)
6866 md_number_to_chars (buf, value, 4);
6867 break;
6868
6869 case BFD_RELOC_64:
6870 case BFD_RELOC_64_PCREL:
6871 if (fixP->fx_done || !seg->use_rela_p)
6872 md_number_to_chars (buf, value, 8);
6873 break;
6874
6875 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6876 /* We claim that these fixups have been processed here, even if
6877 in fact we generate an error because we do not have a reloc
6878 for them, so tc_gen_reloc() will reject them. */
6879 fixP->fx_done = 1;
6880 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6881 {
6882 as_bad_where (fixP->fx_file, fixP->fx_line,
6883 _("undefined symbol %s used as an immediate value"),
6884 S_GET_NAME (fixP->fx_addsy));
6885 goto apply_fix_return;
6886 }
6887 fix_insn (fixP, flags, value);
6888 break;
6889
6890 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6891 if (fixP->fx_done || !seg->use_rela_p)
6892 {
6893 if (value & 3)
6894 as_bad_where (fixP->fx_file, fixP->fx_line,
6895 _("pc-relative load offset not word aligned"));
6896 if (signed_overflow (value, 21))
6897 as_bad_where (fixP->fx_file, fixP->fx_line,
6898 _("pc-relative load offset out of range"));
6899 insn = get_aarch64_insn (buf);
6900 insn |= encode_ld_lit_ofs_19 (value >> 2);
6901 put_aarch64_insn (buf, insn);
6902 }
6903 break;
6904
6905 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6906 if (fixP->fx_done || !seg->use_rela_p)
6907 {
6908 if (signed_overflow (value, 21))
6909 as_bad_where (fixP->fx_file, fixP->fx_line,
6910 _("pc-relative address offset out of range"));
6911 insn = get_aarch64_insn (buf);
6912 insn |= encode_adr_imm (value);
6913 put_aarch64_insn (buf, insn);
6914 }
6915 break;
6916
6917 case BFD_RELOC_AARCH64_BRANCH19:
6918 if (fixP->fx_done || !seg->use_rela_p)
6919 {
6920 if (value & 3)
6921 as_bad_where (fixP->fx_file, fixP->fx_line,
6922 _("conditional branch target not word aligned"));
6923 if (signed_overflow (value, 21))
6924 as_bad_where (fixP->fx_file, fixP->fx_line,
6925 _("conditional branch out of range"));
6926 insn = get_aarch64_insn (buf);
6927 insn |= encode_cond_branch_ofs_19 (value >> 2);
6928 put_aarch64_insn (buf, insn);
6929 }
6930 break;
6931
6932 case BFD_RELOC_AARCH64_TSTBR14:
6933 if (fixP->fx_done || !seg->use_rela_p)
6934 {
6935 if (value & 3)
6936 as_bad_where (fixP->fx_file, fixP->fx_line,
6937 _("conditional branch target not word aligned"));
6938 if (signed_overflow (value, 16))
6939 as_bad_where (fixP->fx_file, fixP->fx_line,
6940 _("conditional branch out of range"));
6941 insn = get_aarch64_insn (buf);
6942 insn |= encode_tst_branch_ofs_14 (value >> 2);
6943 put_aarch64_insn (buf, insn);
6944 }
6945 break;
6946
6947 case BFD_RELOC_AARCH64_CALL26:
6948 case BFD_RELOC_AARCH64_JUMP26:
6949 if (fixP->fx_done || !seg->use_rela_p)
6950 {
6951 if (value & 3)
6952 as_bad_where (fixP->fx_file, fixP->fx_line,
6953 _("branch target not word aligned"));
6954 if (signed_overflow (value, 28))
6955 as_bad_where (fixP->fx_file, fixP->fx_line,
6956 _("branch out of range"));
6957 insn = get_aarch64_insn (buf);
6958 insn |= encode_branch_ofs_26 (value >> 2);
6959 put_aarch64_insn (buf, insn);
6960 }
6961 break;
6962
6963 case BFD_RELOC_AARCH64_MOVW_G0:
6964 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6965 case BFD_RELOC_AARCH64_MOVW_G0_S:
6966 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6967 scale = 0;
6968 goto movw_common;
6969 case BFD_RELOC_AARCH64_MOVW_G1:
6970 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6971 case BFD_RELOC_AARCH64_MOVW_G1_S:
6972 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6973 scale = 16;
6974 goto movw_common;
6975 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6976 scale = 0;
6977 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6978 /* Should always be exported to object file, see
6979 aarch64_force_relocation(). */
6980 gas_assert (!fixP->fx_done);
6981 gas_assert (seg->use_rela_p);
6982 goto movw_common;
6983 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6984 scale = 16;
6985 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6986 /* Should always be exported to object file, see
6987 aarch64_force_relocation(). */
6988 gas_assert (!fixP->fx_done);
6989 gas_assert (seg->use_rela_p);
6990 goto movw_common;
6991 case BFD_RELOC_AARCH64_MOVW_G2:
6992 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6993 case BFD_RELOC_AARCH64_MOVW_G2_S:
6994 scale = 32;
6995 goto movw_common;
6996 case BFD_RELOC_AARCH64_MOVW_G3:
6997 scale = 48;
6998 movw_common:
6999 if (fixP->fx_done || !seg->use_rela_p)
7000 {
7001 insn = get_aarch64_insn (buf);
7002
7003 if (!fixP->fx_done)
7004 {
7005 /* REL signed addend must fit in 16 bits */
7006 if (signed_overflow (value, 16))
7007 as_bad_where (fixP->fx_file, fixP->fx_line,
7008 _("offset out of range"));
7009 }
7010 else
7011 {
7012 /* Check for overflow and scale. */
7013 switch (fixP->fx_r_type)
7014 {
7015 case BFD_RELOC_AARCH64_MOVW_G0:
7016 case BFD_RELOC_AARCH64_MOVW_G1:
7017 case BFD_RELOC_AARCH64_MOVW_G2:
7018 case BFD_RELOC_AARCH64_MOVW_G3:
7019 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7020 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7021 if (unsigned_overflow (value, scale + 16))
7022 as_bad_where (fixP->fx_file, fixP->fx_line,
7023 _("unsigned value out of range"));
7024 break;
7025 case BFD_RELOC_AARCH64_MOVW_G0_S:
7026 case BFD_RELOC_AARCH64_MOVW_G1_S:
7027 case BFD_RELOC_AARCH64_MOVW_G2_S:
7028 /* NOTE: We can only come here with movz or movn. */
7029 if (signed_overflow (value, scale + 16))
7030 as_bad_where (fixP->fx_file, fixP->fx_line,
7031 _("signed value out of range"));
7032 if (value < 0)
7033 {
7034 /* Force use of MOVN. */
7035 value = ~value;
7036 insn = reencode_movzn_to_movn (insn);
7037 }
7038 else
7039 {
7040 /* Force use of MOVZ. */
7041 insn = reencode_movzn_to_movz (insn);
7042 }
7043 break;
7044 default:
7045 /* Unchecked relocations. */
7046 break;
7047 }
7048 value >>= scale;
7049 }
7050
7051 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7052 insn |= encode_movw_imm (value & 0xffff);
7053
7054 put_aarch64_insn (buf, insn);
7055 }
7056 break;
7057
7058 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7059 fixP->fx_r_type = (ilp32_p
7060 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7061 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7062 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7063 /* Should always be exported to object file, see
7064 aarch64_force_relocation(). */
7065 gas_assert (!fixP->fx_done);
7066 gas_assert (seg->use_rela_p);
7067 break;
7068
7069 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7070 fixP->fx_r_type = (ilp32_p
7071 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7072 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
7073 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7074 /* Should always be exported to object file, see
7075 aarch64_force_relocation(). */
7076 gas_assert (!fixP->fx_done);
7077 gas_assert (seg->use_rela_p);
7078 break;
7079
7080 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7081 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7082 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7083 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7084 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7085 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7086 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7087 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7088 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7089 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7090 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7091 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7092 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7093 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7094 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7095 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7096 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7097 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7098 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7099 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7100 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7101 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7102 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7103 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7104 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7105 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7106 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7107 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7108 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7109 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7110 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7111 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7112 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7113 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7114 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7115 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7116 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7117 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7118 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7119 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7120 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7121 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7122 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7123 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7124 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7125 /* Should always be exported to object file, see
7126 aarch64_force_relocation(). */
7127 gas_assert (!fixP->fx_done);
7128 gas_assert (seg->use_rela_p);
7129 break;
7130
7131 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7132 /* Should always be exported to object file, see
7133 aarch64_force_relocation(). */
7134 fixP->fx_r_type = (ilp32_p
7135 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7136 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7137 gas_assert (!fixP->fx_done);
7138 gas_assert (seg->use_rela_p);
7139 break;
7140
7141 case BFD_RELOC_AARCH64_ADD_LO12:
7142 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7143 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7144 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7145 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7146 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7147 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7148 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7149 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7150 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7151 case BFD_RELOC_AARCH64_LDST128_LO12:
7152 case BFD_RELOC_AARCH64_LDST16_LO12:
7153 case BFD_RELOC_AARCH64_LDST32_LO12:
7154 case BFD_RELOC_AARCH64_LDST64_LO12:
7155 case BFD_RELOC_AARCH64_LDST8_LO12:
7156 /* Should always be exported to object file, see
7157 aarch64_force_relocation(). */
7158 gas_assert (!fixP->fx_done);
7159 gas_assert (seg->use_rela_p);
7160 break;
7161
7162 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7163 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7164 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7165 break;
7166
7167 case BFD_RELOC_UNUSED:
7168 /* An error will already have been reported. */
7169 break;
7170
7171 default:
7172 as_bad_where (fixP->fx_file, fixP->fx_line,
7173 _("unexpected %s fixup"),
7174 bfd_get_reloc_code_name (fixP->fx_r_type));
7175 break;
7176 }
7177
7178 apply_fix_return:
7179 /* Free the allocated the struct aarch64_inst.
7180 N.B. currently there are very limited number of fix-up types actually use
7181 this field, so the impact on the performance should be minimal . */
7182 if (fixP->tc_fix_data.inst != NULL)
7183 free (fixP->tc_fix_data.inst);
7184
7185 return;
7186 }
7187
7188 /* Translate internal representation of relocation info to BFD target
7189 format. */
7190
7191 arelent *
7192 tc_gen_reloc (asection * section, fixS * fixp)
7193 {
7194 arelent *reloc;
7195 bfd_reloc_code_real_type code;
7196
7197 reloc = xmalloc (sizeof (arelent));
7198
7199 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
7200 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7201 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7202
7203 if (fixp->fx_pcrel)
7204 {
7205 if (section->use_rela_p)
7206 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7207 else
7208 fixp->fx_offset = reloc->address;
7209 }
7210 reloc->addend = fixp->fx_offset;
7211
7212 code = fixp->fx_r_type;
7213 switch (code)
7214 {
7215 case BFD_RELOC_16:
7216 if (fixp->fx_pcrel)
7217 code = BFD_RELOC_16_PCREL;
7218 break;
7219
7220 case BFD_RELOC_32:
7221 if (fixp->fx_pcrel)
7222 code = BFD_RELOC_32_PCREL;
7223 break;
7224
7225 case BFD_RELOC_64:
7226 if (fixp->fx_pcrel)
7227 code = BFD_RELOC_64_PCREL;
7228 break;
7229
7230 default:
7231 break;
7232 }
7233
7234 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7235 if (reloc->howto == NULL)
7236 {
7237 as_bad_where (fixp->fx_file, fixp->fx_line,
7238 _
7239 ("cannot represent %s relocation in this object file format"),
7240 bfd_get_reloc_code_name (code));
7241 return NULL;
7242 }
7243
7244 return reloc;
7245 }
7246
7247 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7248
7249 void
7250 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7251 {
7252 bfd_reloc_code_real_type type;
7253 int pcrel = 0;
7254
7255 /* Pick a reloc.
7256 FIXME: @@ Should look at CPU word size. */
7257 switch (size)
7258 {
7259 case 1:
7260 type = BFD_RELOC_8;
7261 break;
7262 case 2:
7263 type = BFD_RELOC_16;
7264 break;
7265 case 4:
7266 type = BFD_RELOC_32;
7267 break;
7268 case 8:
7269 type = BFD_RELOC_64;
7270 break;
7271 default:
7272 as_bad (_("cannot do %u-byte relocation"), size);
7273 type = BFD_RELOC_UNUSED;
7274 break;
7275 }
7276
7277 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7278 }
7279
7280 int
7281 aarch64_force_relocation (struct fix *fixp)
7282 {
7283 switch (fixp->fx_r_type)
7284 {
7285 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7286 /* Perform these "immediate" internal relocations
7287 even if the symbol is extern or weak. */
7288 return 0;
7289
7290 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7291 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7292 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7293 /* Pseudo relocs that need to be fixed up according to
7294 ilp32_p. */
7295 return 0;
7296
7297 case BFD_RELOC_AARCH64_ADD_LO12:
7298 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7299 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7300 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7301 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7302 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7303 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7304 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7305 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7306 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7307 case BFD_RELOC_AARCH64_LDST128_LO12:
7308 case BFD_RELOC_AARCH64_LDST16_LO12:
7309 case BFD_RELOC_AARCH64_LDST32_LO12:
7310 case BFD_RELOC_AARCH64_LDST64_LO12:
7311 case BFD_RELOC_AARCH64_LDST8_LO12:
7312 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7313 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7314 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7315 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7316 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7317 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7318 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7319 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7320 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7321 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7322 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7323 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7324 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7325 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7326 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7327 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7328 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7329 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7330 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7331 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7332 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7333 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7334 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7335 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7336 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7337 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7338 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7339 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7340 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7341 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7342 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7343 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7344 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7345 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7346 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7347 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7348 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7349 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7350 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7351 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7352 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7353 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7354 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7355 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7356 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7357 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7358 /* Always leave these relocations for the linker. */
7359 return 1;
7360
7361 default:
7362 break;
7363 }
7364
7365 return generic_force_reloc (fixp);
7366 }
7367
7368 #ifdef OBJ_ELF
7369
7370 const char *
7371 elf64_aarch64_target_format (void)
7372 {
7373 if (strcmp (TARGET_OS, "cloudabi") == 0)
7374 {
7375 /* FIXME: What to do for ilp32_p ? */
7376 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
7377 }
7378 if (target_big_endian)
7379 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7380 else
7381 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7382 }
7383
7384 void
7385 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7386 {
7387 elf_frob_symbol (symp, puntp);
7388 }
7389 #endif
7390
7391 /* MD interface: Finalization. */
7392
7393 /* A good place to do this, although this was probably not intended
7394 for this kind of use. We need to dump the literal pool before
7395 references are made to a null symbol pointer. */
7396
7397 void
7398 aarch64_cleanup (void)
7399 {
7400 literal_pool *pool;
7401
7402 for (pool = list_of_pools; pool; pool = pool->next)
7403 {
7404 /* Put it at the end of the relevant section. */
7405 subseg_set (pool->section, pool->sub_section);
7406 s_ltorg (0);
7407 }
7408 }
7409
7410 #ifdef OBJ_ELF
7411 /* Remove any excess mapping symbols generated for alignment frags in
7412 SEC. We may have created a mapping symbol before a zero byte
7413 alignment; remove it if there's a mapping symbol after the
7414 alignment. */
7415 static void
7416 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7417 void *dummy ATTRIBUTE_UNUSED)
7418 {
7419 segment_info_type *seginfo = seg_info (sec);
7420 fragS *fragp;
7421
7422 if (seginfo == NULL || seginfo->frchainP == NULL)
7423 return;
7424
7425 for (fragp = seginfo->frchainP->frch_root;
7426 fragp != NULL; fragp = fragp->fr_next)
7427 {
7428 symbolS *sym = fragp->tc_frag_data.last_map;
7429 fragS *next = fragp->fr_next;
7430
7431 /* Variable-sized frags have been converted to fixed size by
7432 this point. But if this was variable-sized to start with,
7433 there will be a fixed-size frag after it. So don't handle
7434 next == NULL. */
7435 if (sym == NULL || next == NULL)
7436 continue;
7437
7438 if (S_GET_VALUE (sym) < next->fr_address)
7439 /* Not at the end of this frag. */
7440 continue;
7441 know (S_GET_VALUE (sym) == next->fr_address);
7442
7443 do
7444 {
7445 if (next->tc_frag_data.first_map != NULL)
7446 {
7447 /* Next frag starts with a mapping symbol. Discard this
7448 one. */
7449 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7450 break;
7451 }
7452
7453 if (next->fr_next == NULL)
7454 {
7455 /* This mapping symbol is at the end of the section. Discard
7456 it. */
7457 know (next->fr_fix == 0 && next->fr_var == 0);
7458 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7459 break;
7460 }
7461
7462 /* As long as we have empty frags without any mapping symbols,
7463 keep looking. */
7464 /* If the next frag is non-empty and does not start with a
7465 mapping symbol, then this mapping symbol is required. */
7466 if (next->fr_address != next->fr_next->fr_address)
7467 break;
7468
7469 next = next->fr_next;
7470 }
7471 while (next != NULL);
7472 }
7473 }
7474 #endif
7475
7476 /* Adjust the symbol table. */
7477
7478 void
7479 aarch64_adjust_symtab (void)
7480 {
7481 #ifdef OBJ_ELF
7482 /* Remove any overlapping mapping symbols generated by alignment frags. */
7483 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7484 /* Now do generic ELF adjustments. */
7485 elf_adjust_symtab ();
7486 #endif
7487 }
7488
7489 static void
7490 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7491 {
7492 const char *hash_err;
7493
7494 hash_err = hash_insert (table, key, value);
7495 if (hash_err)
7496 printf ("Internal Error: Can't hash %s\n", key);
7497 }
7498
7499 static void
7500 fill_instruction_hash_table (void)
7501 {
7502 aarch64_opcode *opcode = aarch64_opcode_table;
7503
7504 while (opcode->name != NULL)
7505 {
7506 templates *templ, *new_templ;
7507 templ = hash_find (aarch64_ops_hsh, opcode->name);
7508
7509 new_templ = (templates *) xmalloc (sizeof (templates));
7510 new_templ->opcode = opcode;
7511 new_templ->next = NULL;
7512
7513 if (!templ)
7514 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7515 else
7516 {
7517 new_templ->next = templ->next;
7518 templ->next = new_templ;
7519 }
7520 ++opcode;
7521 }
7522 }
7523
7524 static inline void
7525 convert_to_upper (char *dst, const char *src, size_t num)
7526 {
7527 unsigned int i;
7528 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7529 *dst = TOUPPER (*src);
7530 *dst = '\0';
7531 }
7532
7533 /* Assume STR point to a lower-case string, allocate, convert and return
7534 the corresponding upper-case string. */
7535 static inline const char*
7536 get_upper_str (const char *str)
7537 {
7538 char *ret;
7539 size_t len = strlen (str);
7540 if ((ret = xmalloc (len + 1)) == NULL)
7541 abort ();
7542 convert_to_upper (ret, str, len);
7543 return ret;
7544 }
7545
7546 /* MD interface: Initialization. */
7547
7548 void
7549 md_begin (void)
7550 {
7551 unsigned mach;
7552 unsigned int i;
7553
7554 if ((aarch64_ops_hsh = hash_new ()) == NULL
7555 || (aarch64_cond_hsh = hash_new ()) == NULL
7556 || (aarch64_shift_hsh = hash_new ()) == NULL
7557 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7558 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7559 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7560 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7561 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7562 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7563 || (aarch64_reg_hsh = hash_new ()) == NULL
7564 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7565 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7566 || (aarch64_pldop_hsh = hash_new ()) == NULL
7567 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
7568 as_fatal (_("virtual memory exhausted"));
7569
7570 fill_instruction_hash_table ();
7571
7572 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7573 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7574 (void *) (aarch64_sys_regs + i));
7575
7576 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7577 checked_hash_insert (aarch64_pstatefield_hsh,
7578 aarch64_pstatefields[i].name,
7579 (void *) (aarch64_pstatefields + i));
7580
7581 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
7582 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7583 aarch64_sys_regs_ic[i].name,
7584 (void *) (aarch64_sys_regs_ic + i));
7585
7586 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
7587 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7588 aarch64_sys_regs_dc[i].name,
7589 (void *) (aarch64_sys_regs_dc + i));
7590
7591 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
7592 checked_hash_insert (aarch64_sys_regs_at_hsh,
7593 aarch64_sys_regs_at[i].name,
7594 (void *) (aarch64_sys_regs_at + i));
7595
7596 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
7597 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7598 aarch64_sys_regs_tlbi[i].name,
7599 (void *) (aarch64_sys_regs_tlbi + i));
7600
7601 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7602 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7603 (void *) (reg_names + i));
7604
7605 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7606 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7607 (void *) (nzcv_names + i));
7608
7609 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7610 {
7611 const char *name = aarch64_operand_modifiers[i].name;
7612 checked_hash_insert (aarch64_shift_hsh, name,
7613 (void *) (aarch64_operand_modifiers + i));
7614 /* Also hash the name in the upper case. */
7615 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7616 (void *) (aarch64_operand_modifiers + i));
7617 }
7618
7619 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7620 {
7621 unsigned int j;
7622 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7623 the same condition code. */
7624 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7625 {
7626 const char *name = aarch64_conds[i].names[j];
7627 if (name == NULL)
7628 break;
7629 checked_hash_insert (aarch64_cond_hsh, name,
7630 (void *) (aarch64_conds + i));
7631 /* Also hash the name in the upper case. */
7632 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7633 (void *) (aarch64_conds + i));
7634 }
7635 }
7636
7637 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7638 {
7639 const char *name = aarch64_barrier_options[i].name;
7640 /* Skip xx00 - the unallocated values of option. */
7641 if ((i & 0x3) == 0)
7642 continue;
7643 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7644 (void *) (aarch64_barrier_options + i));
7645 /* Also hash the name in the upper case. */
7646 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7647 (void *) (aarch64_barrier_options + i));
7648 }
7649
7650 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7651 {
7652 const char* name = aarch64_prfops[i].name;
7653 /* Skip the unallocated hint encodings. */
7654 if (name == NULL)
7655 continue;
7656 checked_hash_insert (aarch64_pldop_hsh, name,
7657 (void *) (aarch64_prfops + i));
7658 /* Also hash the name in the upper case. */
7659 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7660 (void *) (aarch64_prfops + i));
7661 }
7662
7663 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
7664 {
7665 const char* name = aarch64_hint_options[i].name;
7666
7667 checked_hash_insert (aarch64_hint_opt_hsh, name,
7668 (void *) (aarch64_hint_options + i));
7669 /* Also hash the name in the upper case. */
7670 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7671 (void *) (aarch64_hint_options + i));
7672 }
7673
7674 /* Set the cpu variant based on the command-line options. */
7675 if (!mcpu_cpu_opt)
7676 mcpu_cpu_opt = march_cpu_opt;
7677
7678 if (!mcpu_cpu_opt)
7679 mcpu_cpu_opt = &cpu_default;
7680
7681 cpu_variant = *mcpu_cpu_opt;
7682
7683 /* Record the CPU type. */
7684 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7685
7686 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7687 }
7688
7689 /* Command line processing. */
7690
7691 const char *md_shortopts = "m:";
7692
7693 #ifdef AARCH64_BI_ENDIAN
7694 #define OPTION_EB (OPTION_MD_BASE + 0)
7695 #define OPTION_EL (OPTION_MD_BASE + 1)
7696 #else
7697 #if TARGET_BYTES_BIG_ENDIAN
7698 #define OPTION_EB (OPTION_MD_BASE + 0)
7699 #else
7700 #define OPTION_EL (OPTION_MD_BASE + 1)
7701 #endif
7702 #endif
7703
7704 struct option md_longopts[] = {
7705 #ifdef OPTION_EB
7706 {"EB", no_argument, NULL, OPTION_EB},
7707 #endif
7708 #ifdef OPTION_EL
7709 {"EL", no_argument, NULL, OPTION_EL},
7710 #endif
7711 {NULL, no_argument, NULL, 0}
7712 };
7713
7714 size_t md_longopts_size = sizeof (md_longopts);
7715
7716 struct aarch64_option_table
7717 {
7718 const char *option; /* Option name to match. */
7719 const char *help; /* Help information. */
7720 int *var; /* Variable to change. */
7721 int value; /* What to change it to. */
7722 char *deprecated; /* If non-null, print this message. */
7723 };
7724
7725 static struct aarch64_option_table aarch64_opts[] = {
7726 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7727 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7728 NULL},
7729 #ifdef DEBUG_AARCH64
7730 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7731 #endif /* DEBUG_AARCH64 */
7732 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7733 NULL},
7734 {"mno-verbose-error", N_("do not output verbose error messages"),
7735 &verbose_error_p, 0, NULL},
7736 {NULL, NULL, NULL, 0, NULL}
7737 };
7738
7739 struct aarch64_cpu_option_table
7740 {
7741 const char *name;
7742 const aarch64_feature_set value;
7743 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7744 case. */
7745 const char *canonical_name;
7746 };
7747
7748 /* This list should, at a minimum, contain all the cpu names
7749 recognized by GCC. */
7750 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7751 {"all", AARCH64_ANY, NULL},
7752 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
7753 AARCH64_FEATURE_CRC), "Cortex-A35"},
7754 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7755 AARCH64_FEATURE_CRC), "Cortex-A53"},
7756 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7757 AARCH64_FEATURE_CRC), "Cortex-A57"},
7758 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7759 AARCH64_FEATURE_CRC), "Cortex-A72"},
7760 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7761 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7762 "Samsung Exynos M1"},
7763 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7764 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7765 "Qualcomm QDF24XX"},
7766 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7767 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7768 "Cavium ThunderX"},
7769 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7770 in earlier releases and is superseded by 'xgene1' in all
7771 tools. */
7772 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7773 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7774 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7775 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7776 {"generic", AARCH64_ARCH_V8, NULL},
7777
7778 {NULL, AARCH64_ARCH_NONE, NULL}
7779 };
7780
7781 struct aarch64_arch_option_table
7782 {
7783 const char *name;
7784 const aarch64_feature_set value;
7785 };
7786
7787 /* This list should, at a minimum, contain all the architecture names
7788 recognized by GCC. */
7789 static const struct aarch64_arch_option_table aarch64_archs[] = {
7790 {"all", AARCH64_ANY},
7791 {"armv8-a", AARCH64_ARCH_V8},
7792 {"armv8.1-a", AARCH64_ARCH_V8_1},
7793 {"armv8.2-a", AARCH64_ARCH_V8_2},
7794 {NULL, AARCH64_ARCH_NONE}
7795 };
7796
7797 /* ISA extensions. */
7798 struct aarch64_option_cpu_value_table
7799 {
7800 const char *name;
7801 const aarch64_feature_set value;
7802 };
7803
7804 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7805 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7806 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7807 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7808 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7809 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7810 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0)},
7811 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0)},
7812 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_SIMD
7813 | AARCH64_FEATURE_RDMA, 0)},
7814 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16
7815 | AARCH64_FEATURE_FP, 0)},
7816 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0)},
7817 {NULL, AARCH64_ARCH_NONE}
7818 };
7819
7820 struct aarch64_long_option_table
7821 {
7822 const char *option; /* Substring to match. */
7823 const char *help; /* Help information. */
7824 int (*func) (char *subopt); /* Function to decode sub-option. */
7825 char *deprecated; /* If non-null, print this message. */
7826 };
7827
7828 static int
7829 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7830 bfd_boolean ext_only)
7831 {
7832 /* We insist on extensions being added before being removed. We achieve
7833 this by using the ADDING_VALUE variable to indicate whether we are
7834 adding an extension (1) or removing it (0) and only allowing it to
7835 change in the order -1 -> 1 -> 0. */
7836 int adding_value = -1;
7837 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7838
7839 /* Copy the feature set, so that we can modify it. */
7840 *ext_set = **opt_p;
7841 *opt_p = ext_set;
7842
7843 while (str != NULL && *str != 0)
7844 {
7845 const struct aarch64_option_cpu_value_table *opt;
7846 char *ext = NULL;
7847 int optlen;
7848
7849 if (!ext_only)
7850 {
7851 if (*str != '+')
7852 {
7853 as_bad (_("invalid architectural extension"));
7854 return 0;
7855 }
7856
7857 ext = strchr (++str, '+');
7858 }
7859
7860 if (ext != NULL)
7861 optlen = ext - str;
7862 else
7863 optlen = strlen (str);
7864
7865 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7866 {
7867 if (adding_value != 0)
7868 adding_value = 0;
7869 optlen -= 2;
7870 str += 2;
7871 }
7872 else if (optlen > 0)
7873 {
7874 if (adding_value == -1)
7875 adding_value = 1;
7876 else if (adding_value != 1)
7877 {
7878 as_bad (_("must specify extensions to add before specifying "
7879 "those to remove"));
7880 return FALSE;
7881 }
7882 }
7883
7884 if (optlen == 0)
7885 {
7886 as_bad (_("missing architectural extension"));
7887 return 0;
7888 }
7889
7890 gas_assert (adding_value != -1);
7891
7892 for (opt = aarch64_features; opt->name != NULL; opt++)
7893 if (strncmp (opt->name, str, optlen) == 0)
7894 {
7895 /* Add or remove the extension. */
7896 if (adding_value)
7897 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7898 else
7899 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7900 break;
7901 }
7902
7903 if (opt->name == NULL)
7904 {
7905 as_bad (_("unknown architectural extension `%s'"), str);
7906 return 0;
7907 }
7908
7909 str = ext;
7910 };
7911
7912 return 1;
7913 }
7914
7915 static int
7916 aarch64_parse_cpu (char *str)
7917 {
7918 const struct aarch64_cpu_option_table *opt;
7919 char *ext = strchr (str, '+');
7920 size_t optlen;
7921
7922 if (ext != NULL)
7923 optlen = ext - str;
7924 else
7925 optlen = strlen (str);
7926
7927 if (optlen == 0)
7928 {
7929 as_bad (_("missing cpu name `%s'"), str);
7930 return 0;
7931 }
7932
7933 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7934 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7935 {
7936 mcpu_cpu_opt = &opt->value;
7937 if (ext != NULL)
7938 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7939
7940 return 1;
7941 }
7942
7943 as_bad (_("unknown cpu `%s'"), str);
7944 return 0;
7945 }
7946
7947 static int
7948 aarch64_parse_arch (char *str)
7949 {
7950 const struct aarch64_arch_option_table *opt;
7951 char *ext = strchr (str, '+');
7952 size_t optlen;
7953
7954 if (ext != NULL)
7955 optlen = ext - str;
7956 else
7957 optlen = strlen (str);
7958
7959 if (optlen == 0)
7960 {
7961 as_bad (_("missing architecture name `%s'"), str);
7962 return 0;
7963 }
7964
7965 for (opt = aarch64_archs; opt->name != NULL; opt++)
7966 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7967 {
7968 march_cpu_opt = &opt->value;
7969 if (ext != NULL)
7970 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7971
7972 return 1;
7973 }
7974
7975 as_bad (_("unknown architecture `%s'\n"), str);
7976 return 0;
7977 }
7978
7979 /* ABIs. */
7980 struct aarch64_option_abi_value_table
7981 {
7982 const char *name;
7983 enum aarch64_abi_type value;
7984 };
7985
7986 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7987 {"ilp32", AARCH64_ABI_ILP32},
7988 {"lp64", AARCH64_ABI_LP64},
7989 {NULL, 0}
7990 };
7991
7992 static int
7993 aarch64_parse_abi (char *str)
7994 {
7995 const struct aarch64_option_abi_value_table *opt;
7996 size_t optlen = strlen (str);
7997
7998 if (optlen == 0)
7999 {
8000 as_bad (_("missing abi name `%s'"), str);
8001 return 0;
8002 }
8003
8004 for (opt = aarch64_abis; opt->name != NULL; opt++)
8005 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8006 {
8007 aarch64_abi = opt->value;
8008 return 1;
8009 }
8010
8011 as_bad (_("unknown abi `%s'\n"), str);
8012 return 0;
8013 }
8014
8015 static struct aarch64_long_option_table aarch64_long_opts[] = {
8016 #ifdef OBJ_ELF
8017 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8018 aarch64_parse_abi, NULL},
8019 #endif /* OBJ_ELF */
8020 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8021 aarch64_parse_cpu, NULL},
8022 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8023 aarch64_parse_arch, NULL},
8024 {NULL, NULL, 0, NULL}
8025 };
8026
8027 int
8028 md_parse_option (int c, char *arg)
8029 {
8030 struct aarch64_option_table *opt;
8031 struct aarch64_long_option_table *lopt;
8032
8033 switch (c)
8034 {
8035 #ifdef OPTION_EB
8036 case OPTION_EB:
8037 target_big_endian = 1;
8038 break;
8039 #endif
8040
8041 #ifdef OPTION_EL
8042 case OPTION_EL:
8043 target_big_endian = 0;
8044 break;
8045 #endif
8046
8047 case 'a':
8048 /* Listing option. Just ignore these, we don't support additional
8049 ones. */
8050 return 0;
8051
8052 default:
8053 for (opt = aarch64_opts; opt->option != NULL; opt++)
8054 {
8055 if (c == opt->option[0]
8056 && ((arg == NULL && opt->option[1] == 0)
8057 || streq (arg, opt->option + 1)))
8058 {
8059 /* If the option is deprecated, tell the user. */
8060 if (opt->deprecated != NULL)
8061 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8062 arg ? arg : "", _(opt->deprecated));
8063
8064 if (opt->var != NULL)
8065 *opt->var = opt->value;
8066
8067 return 1;
8068 }
8069 }
8070
8071 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8072 {
8073 /* These options are expected to have an argument. */
8074 if (c == lopt->option[0]
8075 && arg != NULL
8076 && strncmp (arg, lopt->option + 1,
8077 strlen (lopt->option + 1)) == 0)
8078 {
8079 /* If the option is deprecated, tell the user. */
8080 if (lopt->deprecated != NULL)
8081 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8082 _(lopt->deprecated));
8083
8084 /* Call the sup-option parser. */
8085 return lopt->func (arg + strlen (lopt->option) - 1);
8086 }
8087 }
8088
8089 return 0;
8090 }
8091
8092 return 1;
8093 }
8094
8095 void
8096 md_show_usage (FILE * fp)
8097 {
8098 struct aarch64_option_table *opt;
8099 struct aarch64_long_option_table *lopt;
8100
8101 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8102
8103 for (opt = aarch64_opts; opt->option != NULL; opt++)
8104 if (opt->help != NULL)
8105 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8106
8107 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8108 if (lopt->help != NULL)
8109 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8110
8111 #ifdef OPTION_EB
8112 fprintf (fp, _("\
8113 -EB assemble code for a big-endian cpu\n"));
8114 #endif
8115
8116 #ifdef OPTION_EL
8117 fprintf (fp, _("\
8118 -EL assemble code for a little-endian cpu\n"));
8119 #endif
8120 }
8121
8122 /* Parse a .cpu directive. */
8123
8124 static void
8125 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8126 {
8127 const struct aarch64_cpu_option_table *opt;
8128 char saved_char;
8129 char *name;
8130 char *ext;
8131 size_t optlen;
8132
8133 name = input_line_pointer;
8134 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8135 input_line_pointer++;
8136 saved_char = *input_line_pointer;
8137 *input_line_pointer = 0;
8138
8139 ext = strchr (name, '+');
8140
8141 if (ext != NULL)
8142 optlen = ext - name;
8143 else
8144 optlen = strlen (name);
8145
8146 /* Skip the first "all" entry. */
8147 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8148 if (strlen (opt->name) == optlen
8149 && strncmp (name, opt->name, optlen) == 0)
8150 {
8151 mcpu_cpu_opt = &opt->value;
8152 if (ext != NULL)
8153 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8154 return;
8155
8156 cpu_variant = *mcpu_cpu_opt;
8157
8158 *input_line_pointer = saved_char;
8159 demand_empty_rest_of_line ();
8160 return;
8161 }
8162 as_bad (_("unknown cpu `%s'"), name);
8163 *input_line_pointer = saved_char;
8164 ignore_rest_of_line ();
8165 }
8166
8167
8168 /* Parse a .arch directive. */
8169
8170 static void
8171 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8172 {
8173 const struct aarch64_arch_option_table *opt;
8174 char saved_char;
8175 char *name;
8176 char *ext;
8177 size_t optlen;
8178
8179 name = input_line_pointer;
8180 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8181 input_line_pointer++;
8182 saved_char = *input_line_pointer;
8183 *input_line_pointer = 0;
8184
8185 ext = strchr (name, '+');
8186
8187 if (ext != NULL)
8188 optlen = ext - name;
8189 else
8190 optlen = strlen (name);
8191
8192 /* Skip the first "all" entry. */
8193 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8194 if (strlen (opt->name) == optlen
8195 && strncmp (name, opt->name, optlen) == 0)
8196 {
8197 mcpu_cpu_opt = &opt->value;
8198 if (ext != NULL)
8199 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8200 return;
8201
8202 cpu_variant = *mcpu_cpu_opt;
8203
8204 *input_line_pointer = saved_char;
8205 demand_empty_rest_of_line ();
8206 return;
8207 }
8208
8209 as_bad (_("unknown architecture `%s'\n"), name);
8210 *input_line_pointer = saved_char;
8211 ignore_rest_of_line ();
8212 }
8213
8214 /* Parse a .arch_extension directive. */
8215
8216 static void
8217 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8218 {
8219 char saved_char;
8220 char *ext = input_line_pointer;;
8221
8222 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8223 input_line_pointer++;
8224 saved_char = *input_line_pointer;
8225 *input_line_pointer = 0;
8226
8227 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8228 return;
8229
8230 cpu_variant = *mcpu_cpu_opt;
8231
8232 *input_line_pointer = saved_char;
8233 demand_empty_rest_of_line ();
8234 }
8235
8236 /* Copy symbol information. */
8237
8238 void
8239 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8240 {
8241 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8242 }
This page took 0.221402 seconds and 5 git commands to generate.