[AArch64][SVE 08/32] Generalise aarch64_double_precision_fmovable
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum vector_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in vector_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct vector_type_el
94 {
95 enum vector_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Macros to define the register types and masks for the purpose
251 of parsing. */
252
253 #undef AARCH64_REG_TYPES
254 #define AARCH64_REG_TYPES \
255 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
256 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
257 BASIC_REG_TYPE(SP_32) /* wsp */ \
258 BASIC_REG_TYPE(SP_64) /* sp */ \
259 BASIC_REG_TYPE(Z_32) /* wzr */ \
260 BASIC_REG_TYPE(Z_64) /* xzr */ \
261 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
262 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
263 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
264 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
265 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
266 BASIC_REG_TYPE(CN) /* c[0-7] */ \
267 BASIC_REG_TYPE(VN) /* v[0-31] */ \
268 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
269 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
270 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
271 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
272 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
273 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
274 /* Typecheck: any [BHSDQ]P FP. */ \
275 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
276 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
277 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
278 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
279 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
280 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
281 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
282 /* Any integer register; used for error messages only. */ \
283 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
284 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
285 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
286 /* Pseudo type to mark the end of the enumerator sequence. */ \
287 BASIC_REG_TYPE(MAX)
288
289 #undef BASIC_REG_TYPE
290 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
291 #undef MULTI_REG_TYPE
292 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
293
294 /* Register type enumerators. */
295 typedef enum aarch64_reg_type_
296 {
297 /* A list of REG_TYPE_*. */
298 AARCH64_REG_TYPES
299 } aarch64_reg_type;
300
301 #undef BASIC_REG_TYPE
302 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
303 #undef REG_TYPE
304 #define REG_TYPE(T) (1 << REG_TYPE_##T)
305 #undef MULTI_REG_TYPE
306 #define MULTI_REG_TYPE(T,V) V,
307
308 /* Structure for a hash table entry for a register. */
309 typedef struct
310 {
311 const char *name;
312 unsigned char number;
313 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
314 unsigned char builtin;
315 } reg_entry;
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409 static struct hash_control *aarch64_hint_opt_hsh;
410
411 /* Stuff needed to resolve the label ambiguity
412 As:
413 ...
414 label: <insn>
415 may differ from:
416 ...
417 label:
418 <insn> */
419
420 static symbolS *last_label_seen;
421
422 /* Literal pool structure. Held on a per-section
423 and per-sub-section basis. */
424
425 #define MAX_LITERAL_POOL_SIZE 1024
426 typedef struct literal_expression
427 {
428 expressionS exp;
429 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
430 LITTLENUM_TYPE * bignum;
431 } literal_expression;
432
433 typedef struct literal_pool
434 {
435 literal_expression literals[MAX_LITERAL_POOL_SIZE];
436 unsigned int next_free_entry;
437 unsigned int id;
438 symbolS *symbol;
439 segT section;
440 subsegT sub_section;
441 int size;
442 struct literal_pool *next;
443 } literal_pool;
444
445 /* Pointer to a linked list of literal pools. */
446 static literal_pool *list_of_pools = NULL;
447 \f
448 /* Pure syntax. */
449
450 /* This array holds the chars that always start a comment. If the
451 pre-processor is disabled, these aren't very useful. */
452 const char comment_chars[] = "";
453
454 /* This array holds the chars that only start a comment at the beginning of
455 a line. If the line seems to have the form '# 123 filename'
456 .line and .file directives will appear in the pre-processed output. */
457 /* Note that input_file.c hand checks for '#' at the beginning of the
458 first line of the input file. This is because the compiler outputs
459 #NO_APP at the beginning of its output. */
460 /* Also note that comments like this one will always work. */
461 const char line_comment_chars[] = "#";
462
463 const char line_separator_chars[] = ";";
464
465 /* Chars that can be used to separate mant
466 from exp in floating point numbers. */
467 const char EXP_CHARS[] = "eE";
468
469 /* Chars that mean this number is a floating point constant. */
470 /* As in 0f12.456 */
471 /* or 0d1.2345e12 */
472
473 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
474
475 /* Prefix character that indicates the start of an immediate value. */
476 #define is_immediate_prefix(C) ((C) == '#')
477
478 /* Separator character handling. */
479
480 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
481
482 static inline bfd_boolean
483 skip_past_char (char **str, char c)
484 {
485 if (**str == c)
486 {
487 (*str)++;
488 return TRUE;
489 }
490 else
491 return FALSE;
492 }
493
494 #define skip_past_comma(str) skip_past_char (str, ',')
495
496 /* Arithmetic expressions (possibly involving symbols). */
497
498 static bfd_boolean in_my_get_expression_p = FALSE;
499
500 /* Third argument to my_get_expression. */
501 #define GE_NO_PREFIX 0
502 #define GE_OPT_PREFIX 1
503
504 /* Return TRUE if the string pointed by *STR is successfully parsed
505 as an valid expression; *EP will be filled with the information of
506 such an expression. Otherwise return FALSE. */
507
508 static bfd_boolean
509 my_get_expression (expressionS * ep, char **str, int prefix_mode,
510 int reject_absent)
511 {
512 char *save_in;
513 segT seg;
514 int prefix_present_p = 0;
515
516 switch (prefix_mode)
517 {
518 case GE_NO_PREFIX:
519 break;
520 case GE_OPT_PREFIX:
521 if (is_immediate_prefix (**str))
522 {
523 (*str)++;
524 prefix_present_p = 1;
525 }
526 break;
527 default:
528 abort ();
529 }
530
531 memset (ep, 0, sizeof (expressionS));
532
533 save_in = input_line_pointer;
534 input_line_pointer = *str;
535 in_my_get_expression_p = TRUE;
536 seg = expression (ep);
537 in_my_get_expression_p = FALSE;
538
539 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
540 {
541 /* We found a bad expression in md_operand(). */
542 *str = input_line_pointer;
543 input_line_pointer = save_in;
544 if (prefix_present_p && ! error_p ())
545 set_fatal_syntax_error (_("bad expression"));
546 else
547 set_first_syntax_error (_("bad expression"));
548 return FALSE;
549 }
550
551 #ifdef OBJ_AOUT
552 if (seg != absolute_section
553 && seg != text_section
554 && seg != data_section
555 && seg != bss_section && seg != undefined_section)
556 {
557 set_syntax_error (_("bad segment"));
558 *str = input_line_pointer;
559 input_line_pointer = save_in;
560 return FALSE;
561 }
562 #else
563 (void) seg;
564 #endif
565
566 *str = input_line_pointer;
567 input_line_pointer = save_in;
568 return TRUE;
569 }
570
571 /* Turn a string in input_line_pointer into a floating point constant
572 of type TYPE, and store the appropriate bytes in *LITP. The number
573 of LITTLENUMS emitted is stored in *SIZEP. An error message is
574 returned, or NULL on OK. */
575
576 const char *
577 md_atof (int type, char *litP, int *sizeP)
578 {
579 return ieee_md_atof (type, litP, sizeP, target_big_endian);
580 }
581
582 /* We handle all bad expressions here, so that we can report the faulty
583 instruction in the error message. */
584 void
585 md_operand (expressionS * exp)
586 {
587 if (in_my_get_expression_p)
588 exp->X_op = O_illegal;
589 }
590
591 /* Immediate values. */
592
593 /* Errors may be set multiple times during parsing or bit encoding
594 (particularly in the Neon bits), but usually the earliest error which is set
595 will be the most meaningful. Avoid overwriting it with later (cascading)
596 errors by calling this function. */
597
598 static void
599 first_error (const char *error)
600 {
601 if (! error_p ())
602 set_syntax_error (error);
603 }
604
605 /* Similiar to first_error, but this function accepts formatted error
606 message. */
607 static void
608 first_error_fmt (const char *format, ...)
609 {
610 va_list args;
611 enum
612 { size = 100 };
613 /* N.B. this single buffer will not cause error messages for different
614 instructions to pollute each other; this is because at the end of
615 processing of each assembly line, error message if any will be
616 collected by as_bad. */
617 static char buffer[size];
618
619 if (! error_p ())
620 {
621 int ret ATTRIBUTE_UNUSED;
622 va_start (args, format);
623 ret = vsnprintf (buffer, size, format, args);
624 know (ret <= size - 1 && ret >= 0);
625 va_end (args);
626 set_syntax_error (buffer);
627 }
628 }
629
630 /* Register parsing. */
631
632 /* Generic register parser which is called by other specialized
633 register parsers.
634 CCP points to what should be the beginning of a register name.
635 If it is indeed a valid register name, advance CCP over it and
636 return the reg_entry structure; otherwise return NULL.
637 It does not issue diagnostics. */
638
639 static reg_entry *
640 parse_reg (char **ccp)
641 {
642 char *start = *ccp;
643 char *p;
644 reg_entry *reg;
645
646 #ifdef REGISTER_PREFIX
647 if (*start != REGISTER_PREFIX)
648 return NULL;
649 start++;
650 #endif
651
652 p = start;
653 if (!ISALPHA (*p) || !is_name_beginner (*p))
654 return NULL;
655
656 do
657 p++;
658 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
659
660 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
661
662 if (!reg)
663 return NULL;
664
665 *ccp = p;
666 return reg;
667 }
668
669 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
670 return FALSE. */
671 static bfd_boolean
672 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
673 {
674 if (reg->type == type)
675 return TRUE;
676
677 switch (type)
678 {
679 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
680 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
681 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
682 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
683 case REG_TYPE_VN: /* Vector register. */
684 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
685 return ((reg_type_masks[reg->type] & reg_type_masks[type])
686 == reg_type_masks[reg->type]);
687 default:
688 as_fatal ("unhandled type %d", type);
689 abort ();
690 }
691 }
692
693 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
694 Return the register number otherwise. *ISREG32 is set to one if the
695 register is 32-bit wide; *ISREGZERO is set to one if the register is
696 of type Z_32 or Z_64.
697 Note that this function does not issue any diagnostics. */
698
699 static int
700 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
701 int *isreg32, int *isregzero)
702 {
703 char *str = *ccp;
704 const reg_entry *reg = parse_reg (&str);
705
706 if (reg == NULL)
707 return PARSE_FAIL;
708
709 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
710 return PARSE_FAIL;
711
712 switch (reg->type)
713 {
714 case REG_TYPE_SP_32:
715 case REG_TYPE_SP_64:
716 if (reject_sp)
717 return PARSE_FAIL;
718 *isreg32 = reg->type == REG_TYPE_SP_32;
719 *isregzero = 0;
720 break;
721 case REG_TYPE_R_32:
722 case REG_TYPE_R_64:
723 *isreg32 = reg->type == REG_TYPE_R_32;
724 *isregzero = 0;
725 break;
726 case REG_TYPE_Z_32:
727 case REG_TYPE_Z_64:
728 if (reject_rz)
729 return PARSE_FAIL;
730 *isreg32 = reg->type == REG_TYPE_Z_32;
731 *isregzero = 1;
732 break;
733 default:
734 return PARSE_FAIL;
735 }
736
737 *ccp = str;
738
739 return reg->number;
740 }
741
742 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
743 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
744 otherwise return FALSE.
745
746 Accept only one occurrence of:
747 8b 16b 2h 4h 8h 2s 4s 1d 2d
748 b h s d q */
749 static bfd_boolean
750 parse_vector_type_for_operand (struct vector_type_el *parsed_type, char **str)
751 {
752 char *ptr = *str;
753 unsigned width;
754 unsigned element_size;
755 enum vector_el_type type;
756
757 /* skip '.' */
758 ptr++;
759
760 if (!ISDIGIT (*ptr))
761 {
762 width = 0;
763 goto elt_size;
764 }
765 width = strtoul (ptr, &ptr, 10);
766 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
767 {
768 first_error_fmt (_("bad size %d in vector width specifier"), width);
769 return FALSE;
770 }
771
772 elt_size:
773 switch (TOLOWER (*ptr))
774 {
775 case 'b':
776 type = NT_b;
777 element_size = 8;
778 break;
779 case 'h':
780 type = NT_h;
781 element_size = 16;
782 break;
783 case 's':
784 type = NT_s;
785 element_size = 32;
786 break;
787 case 'd':
788 type = NT_d;
789 element_size = 64;
790 break;
791 case 'q':
792 if (width == 1)
793 {
794 type = NT_q;
795 element_size = 128;
796 break;
797 }
798 /* fall through. */
799 default:
800 if (*ptr != '\0')
801 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
802 else
803 first_error (_("missing element size"));
804 return FALSE;
805 }
806 if (width != 0 && width * element_size != 64 && width * element_size != 128
807 && !(width == 2 && element_size == 16))
808 {
809 first_error_fmt (_
810 ("invalid element size %d and vector size combination %c"),
811 width, *ptr);
812 return FALSE;
813 }
814 ptr++;
815
816 parsed_type->type = type;
817 parsed_type->width = width;
818
819 *str = ptr;
820
821 return TRUE;
822 }
823
824 /* Parse a register of the type TYPE.
825
826 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
827 name or the parsed register is not of TYPE.
828
829 Otherwise return the register number, and optionally fill in the actual
830 type of the register in *RTYPE when multiple alternatives were given, and
831 return the register shape and element index information in *TYPEINFO.
832
833 IN_REG_LIST should be set with TRUE if the caller is parsing a register
834 list. */
835
836 static int
837 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
838 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
839 {
840 char *str = *ccp;
841 const reg_entry *reg = parse_reg (&str);
842 struct vector_type_el atype;
843 struct vector_type_el parsetype;
844 bfd_boolean is_typed_vecreg = FALSE;
845
846 atype.defined = 0;
847 atype.type = NT_invtype;
848 atype.width = -1;
849 atype.index = 0;
850
851 if (reg == NULL)
852 {
853 if (typeinfo)
854 *typeinfo = atype;
855 set_default_error ();
856 return PARSE_FAIL;
857 }
858
859 if (! aarch64_check_reg_type (reg, type))
860 {
861 DEBUG_TRACE ("reg type check failed");
862 set_default_error ();
863 return PARSE_FAIL;
864 }
865 type = reg->type;
866
867 if (type == REG_TYPE_VN && *str == '.')
868 {
869 if (!parse_vector_type_for_operand (&parsetype, &str))
870 return PARSE_FAIL;
871
872 /* Register if of the form Vn.[bhsdq]. */
873 is_typed_vecreg = TRUE;
874
875 if (parsetype.width == 0)
876 /* Expect index. In the new scheme we cannot have
877 Vn.[bhsdq] represent a scalar. Therefore any
878 Vn.[bhsdq] should have an index following it.
879 Except in reglists ofcourse. */
880 atype.defined |= NTA_HASINDEX;
881 else
882 atype.defined |= NTA_HASTYPE;
883
884 atype.type = parsetype.type;
885 atype.width = parsetype.width;
886 }
887
888 if (skip_past_char (&str, '['))
889 {
890 expressionS exp;
891
892 /* Reject Sn[index] syntax. */
893 if (!is_typed_vecreg)
894 {
895 first_error (_("this type of register can't be indexed"));
896 return PARSE_FAIL;
897 }
898
899 if (in_reg_list == TRUE)
900 {
901 first_error (_("index not allowed inside register list"));
902 return PARSE_FAIL;
903 }
904
905 atype.defined |= NTA_HASINDEX;
906
907 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
908
909 if (exp.X_op != O_constant)
910 {
911 first_error (_("constant expression required"));
912 return PARSE_FAIL;
913 }
914
915 if (! skip_past_char (&str, ']'))
916 return PARSE_FAIL;
917
918 atype.index = exp.X_add_number;
919 }
920 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
921 {
922 /* Indexed vector register expected. */
923 first_error (_("indexed vector register expected"));
924 return PARSE_FAIL;
925 }
926
927 /* A vector reg Vn should be typed or indexed. */
928 if (type == REG_TYPE_VN && atype.defined == 0)
929 {
930 first_error (_("invalid use of vector register"));
931 }
932
933 if (typeinfo)
934 *typeinfo = atype;
935
936 if (rtype)
937 *rtype = type;
938
939 *ccp = str;
940
941 return reg->number;
942 }
943
944 /* Parse register.
945
946 Return the register number on success; return PARSE_FAIL otherwise.
947
948 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
949 the register (e.g. NEON double or quad reg when either has been requested).
950
951 If this is a NEON vector register with additional type information, fill
952 in the struct pointed to by VECTYPE (if non-NULL).
953
954 This parser does not handle register list. */
955
956 static int
957 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
958 aarch64_reg_type *rtype, struct vector_type_el *vectype)
959 {
960 struct vector_type_el atype;
961 char *str = *ccp;
962 int reg = parse_typed_reg (&str, type, rtype, &atype,
963 /*in_reg_list= */ FALSE);
964
965 if (reg == PARSE_FAIL)
966 return PARSE_FAIL;
967
968 if (vectype)
969 *vectype = atype;
970
971 *ccp = str;
972
973 return reg;
974 }
975
976 static inline bfd_boolean
977 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
978 {
979 return
980 e1.type == e2.type
981 && e1.defined == e2.defined
982 && e1.width == e2.width && e1.index == e2.index;
983 }
984
985 /* This function parses a list of vector registers of type TYPE.
986 On success, it returns the parsed register list information in the
987 following encoded format:
988
989 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
990 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
991
992 The information of the register shape and/or index is returned in
993 *VECTYPE.
994
995 It returns PARSE_FAIL if the register list is invalid.
996
997 The list contains one to four registers.
998 Each register can be one of:
999 <Vt>.<T>[<index>]
1000 <Vt>.<T>
1001 All <T> should be identical.
1002 All <index> should be identical.
1003 There are restrictions on <Vt> numbers which are checked later
1004 (by reg_list_valid_p). */
1005
1006 static int
1007 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1008 struct vector_type_el *vectype)
1009 {
1010 char *str = *ccp;
1011 int nb_regs;
1012 struct vector_type_el typeinfo, typeinfo_first;
1013 int val, val_range;
1014 int in_range;
1015 int ret_val;
1016 int i;
1017 bfd_boolean error = FALSE;
1018 bfd_boolean expect_index = FALSE;
1019
1020 if (*str != '{')
1021 {
1022 set_syntax_error (_("expecting {"));
1023 return PARSE_FAIL;
1024 }
1025 str++;
1026
1027 nb_regs = 0;
1028 typeinfo_first.defined = 0;
1029 typeinfo_first.type = NT_invtype;
1030 typeinfo_first.width = -1;
1031 typeinfo_first.index = 0;
1032 ret_val = 0;
1033 val = -1;
1034 val_range = -1;
1035 in_range = 0;
1036 do
1037 {
1038 if (in_range)
1039 {
1040 str++; /* skip over '-' */
1041 val_range = val;
1042 }
1043 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1044 /*in_reg_list= */ TRUE);
1045 if (val == PARSE_FAIL)
1046 {
1047 set_first_syntax_error (_("invalid vector register in list"));
1048 error = TRUE;
1049 continue;
1050 }
1051 /* reject [bhsd]n */
1052 if (typeinfo.defined == 0)
1053 {
1054 set_first_syntax_error (_("invalid scalar register in list"));
1055 error = TRUE;
1056 continue;
1057 }
1058
1059 if (typeinfo.defined & NTA_HASINDEX)
1060 expect_index = TRUE;
1061
1062 if (in_range)
1063 {
1064 if (val < val_range)
1065 {
1066 set_first_syntax_error
1067 (_("invalid range in vector register list"));
1068 error = TRUE;
1069 }
1070 val_range++;
1071 }
1072 else
1073 {
1074 val_range = val;
1075 if (nb_regs == 0)
1076 typeinfo_first = typeinfo;
1077 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1078 {
1079 set_first_syntax_error
1080 (_("type mismatch in vector register list"));
1081 error = TRUE;
1082 }
1083 }
1084 if (! error)
1085 for (i = val_range; i <= val; i++)
1086 {
1087 ret_val |= i << (5 * nb_regs);
1088 nb_regs++;
1089 }
1090 in_range = 0;
1091 }
1092 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1093
1094 skip_whitespace (str);
1095 if (*str != '}')
1096 {
1097 set_first_syntax_error (_("end of vector register list not found"));
1098 error = TRUE;
1099 }
1100 str++;
1101
1102 skip_whitespace (str);
1103
1104 if (expect_index)
1105 {
1106 if (skip_past_char (&str, '['))
1107 {
1108 expressionS exp;
1109
1110 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1111 if (exp.X_op != O_constant)
1112 {
1113 set_first_syntax_error (_("constant expression required."));
1114 error = TRUE;
1115 }
1116 if (! skip_past_char (&str, ']'))
1117 error = TRUE;
1118 else
1119 typeinfo_first.index = exp.X_add_number;
1120 }
1121 else
1122 {
1123 set_first_syntax_error (_("expected index"));
1124 error = TRUE;
1125 }
1126 }
1127
1128 if (nb_regs > 4)
1129 {
1130 set_first_syntax_error (_("too many registers in vector register list"));
1131 error = TRUE;
1132 }
1133 else if (nb_regs == 0)
1134 {
1135 set_first_syntax_error (_("empty vector register list"));
1136 error = TRUE;
1137 }
1138
1139 *ccp = str;
1140 if (! error)
1141 *vectype = typeinfo_first;
1142
1143 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1144 }
1145
1146 /* Directives: register aliases. */
1147
1148 static reg_entry *
1149 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1150 {
1151 reg_entry *new;
1152 const char *name;
1153
1154 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1155 {
1156 if (new->builtin)
1157 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1158 str);
1159
1160 /* Only warn about a redefinition if it's not defined as the
1161 same register. */
1162 else if (new->number != number || new->type != type)
1163 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1164
1165 return NULL;
1166 }
1167
1168 name = xstrdup (str);
1169 new = XNEW (reg_entry);
1170
1171 new->name = name;
1172 new->number = number;
1173 new->type = type;
1174 new->builtin = FALSE;
1175
1176 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1177 abort ();
1178
1179 return new;
1180 }
1181
1182 /* Look for the .req directive. This is of the form:
1183
1184 new_register_name .req existing_register_name
1185
1186 If we find one, or if it looks sufficiently like one that we want to
1187 handle any error here, return TRUE. Otherwise return FALSE. */
1188
1189 static bfd_boolean
1190 create_register_alias (char *newname, char *p)
1191 {
1192 const reg_entry *old;
1193 char *oldname, *nbuf;
1194 size_t nlen;
1195
1196 /* The input scrubber ensures that whitespace after the mnemonic is
1197 collapsed to single spaces. */
1198 oldname = p;
1199 if (strncmp (oldname, " .req ", 6) != 0)
1200 return FALSE;
1201
1202 oldname += 6;
1203 if (*oldname == '\0')
1204 return FALSE;
1205
1206 old = hash_find (aarch64_reg_hsh, oldname);
1207 if (!old)
1208 {
1209 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1210 return TRUE;
1211 }
1212
1213 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1214 the desired alias name, and p points to its end. If not, then
1215 the desired alias name is in the global original_case_string. */
1216 #ifdef TC_CASE_SENSITIVE
1217 nlen = p - newname;
1218 #else
1219 newname = original_case_string;
1220 nlen = strlen (newname);
1221 #endif
1222
1223 nbuf = xmemdup0 (newname, nlen);
1224
1225 /* Create aliases under the new name as stated; an all-lowercase
1226 version of the new name; and an all-uppercase version of the new
1227 name. */
1228 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1229 {
1230 for (p = nbuf; *p; p++)
1231 *p = TOUPPER (*p);
1232
1233 if (strncmp (nbuf, newname, nlen))
1234 {
1235 /* If this attempt to create an additional alias fails, do not bother
1236 trying to create the all-lower case alias. We will fail and issue
1237 a second, duplicate error message. This situation arises when the
1238 programmer does something like:
1239 foo .req r0
1240 Foo .req r1
1241 The second .req creates the "Foo" alias but then fails to create
1242 the artificial FOO alias because it has already been created by the
1243 first .req. */
1244 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1245 {
1246 free (nbuf);
1247 return TRUE;
1248 }
1249 }
1250
1251 for (p = nbuf; *p; p++)
1252 *p = TOLOWER (*p);
1253
1254 if (strncmp (nbuf, newname, nlen))
1255 insert_reg_alias (nbuf, old->number, old->type);
1256 }
1257
1258 free (nbuf);
1259 return TRUE;
1260 }
1261
1262 /* Should never be called, as .req goes between the alias and the
1263 register name, not at the beginning of the line. */
1264 static void
1265 s_req (int a ATTRIBUTE_UNUSED)
1266 {
1267 as_bad (_("invalid syntax for .req directive"));
1268 }
1269
1270 /* The .unreq directive deletes an alias which was previously defined
1271 by .req. For example:
1272
1273 my_alias .req r11
1274 .unreq my_alias */
1275
1276 static void
1277 s_unreq (int a ATTRIBUTE_UNUSED)
1278 {
1279 char *name;
1280 char saved_char;
1281
1282 name = input_line_pointer;
1283
1284 while (*input_line_pointer != 0
1285 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1286 ++input_line_pointer;
1287
1288 saved_char = *input_line_pointer;
1289 *input_line_pointer = 0;
1290
1291 if (!*name)
1292 as_bad (_("invalid syntax for .unreq directive"));
1293 else
1294 {
1295 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1296
1297 if (!reg)
1298 as_bad (_("unknown register alias '%s'"), name);
1299 else if (reg->builtin)
1300 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1301 name);
1302 else
1303 {
1304 char *p;
1305 char *nbuf;
1306
1307 hash_delete (aarch64_reg_hsh, name, FALSE);
1308 free ((char *) reg->name);
1309 free (reg);
1310
1311 /* Also locate the all upper case and all lower case versions.
1312 Do not complain if we cannot find one or the other as it
1313 was probably deleted above. */
1314
1315 nbuf = strdup (name);
1316 for (p = nbuf; *p; p++)
1317 *p = TOUPPER (*p);
1318 reg = hash_find (aarch64_reg_hsh, nbuf);
1319 if (reg)
1320 {
1321 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1322 free ((char *) reg->name);
1323 free (reg);
1324 }
1325
1326 for (p = nbuf; *p; p++)
1327 *p = TOLOWER (*p);
1328 reg = hash_find (aarch64_reg_hsh, nbuf);
1329 if (reg)
1330 {
1331 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1332 free ((char *) reg->name);
1333 free (reg);
1334 }
1335
1336 free (nbuf);
1337 }
1338 }
1339
1340 *input_line_pointer = saved_char;
1341 demand_empty_rest_of_line ();
1342 }
1343
1344 /* Directives: Instruction set selection. */
1345
1346 #ifdef OBJ_ELF
1347 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1348 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1349 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1350 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1351
1352 /* Create a new mapping symbol for the transition to STATE. */
1353
1354 static void
1355 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1356 {
1357 symbolS *symbolP;
1358 const char *symname;
1359 int type;
1360
1361 switch (state)
1362 {
1363 case MAP_DATA:
1364 symname = "$d";
1365 type = BSF_NO_FLAGS;
1366 break;
1367 case MAP_INSN:
1368 symname = "$x";
1369 type = BSF_NO_FLAGS;
1370 break;
1371 default:
1372 abort ();
1373 }
1374
1375 symbolP = symbol_new (symname, now_seg, value, frag);
1376 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1377
1378 /* Save the mapping symbols for future reference. Also check that
1379 we do not place two mapping symbols at the same offset within a
1380 frag. We'll handle overlap between frags in
1381 check_mapping_symbols.
1382
1383 If .fill or other data filling directive generates zero sized data,
1384 the mapping symbol for the following code will have the same value
1385 as the one generated for the data filling directive. In this case,
1386 we replace the old symbol with the new one at the same address. */
1387 if (value == 0)
1388 {
1389 if (frag->tc_frag_data.first_map != NULL)
1390 {
1391 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1392 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1393 &symbol_lastP);
1394 }
1395 frag->tc_frag_data.first_map = symbolP;
1396 }
1397 if (frag->tc_frag_data.last_map != NULL)
1398 {
1399 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1400 S_GET_VALUE (symbolP));
1401 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1402 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1403 &symbol_lastP);
1404 }
1405 frag->tc_frag_data.last_map = symbolP;
1406 }
1407
1408 /* We must sometimes convert a region marked as code to data during
1409 code alignment, if an odd number of bytes have to be padded. The
1410 code mapping symbol is pushed to an aligned address. */
1411
1412 static void
1413 insert_data_mapping_symbol (enum mstate state,
1414 valueT value, fragS * frag, offsetT bytes)
1415 {
1416 /* If there was already a mapping symbol, remove it. */
1417 if (frag->tc_frag_data.last_map != NULL
1418 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1419 frag->fr_address + value)
1420 {
1421 symbolS *symp = frag->tc_frag_data.last_map;
1422
1423 if (value == 0)
1424 {
1425 know (frag->tc_frag_data.first_map == symp);
1426 frag->tc_frag_data.first_map = NULL;
1427 }
1428 frag->tc_frag_data.last_map = NULL;
1429 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1430 }
1431
1432 make_mapping_symbol (MAP_DATA, value, frag);
1433 make_mapping_symbol (state, value + bytes, frag);
1434 }
1435
1436 static void mapping_state_2 (enum mstate state, int max_chars);
1437
1438 /* Set the mapping state to STATE. Only call this when about to
1439 emit some STATE bytes to the file. */
1440
1441 void
1442 mapping_state (enum mstate state)
1443 {
1444 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1445
1446 if (state == MAP_INSN)
1447 /* AArch64 instructions require 4-byte alignment. When emitting
1448 instructions into any section, record the appropriate section
1449 alignment. */
1450 record_alignment (now_seg, 2);
1451
1452 if (mapstate == state)
1453 /* The mapping symbol has already been emitted.
1454 There is nothing else to do. */
1455 return;
1456
1457 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1458 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1459 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1460 evaluated later in the next else. */
1461 return;
1462 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1463 {
1464 /* Only add the symbol if the offset is > 0:
1465 if we're at the first frag, check it's size > 0;
1466 if we're not at the first frag, then for sure
1467 the offset is > 0. */
1468 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1469 const int add_symbol = (frag_now != frag_first)
1470 || (frag_now_fix () > 0);
1471
1472 if (add_symbol)
1473 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1474 }
1475 #undef TRANSITION
1476
1477 mapping_state_2 (state, 0);
1478 }
1479
1480 /* Same as mapping_state, but MAX_CHARS bytes have already been
1481 allocated. Put the mapping symbol that far back. */
1482
1483 static void
1484 mapping_state_2 (enum mstate state, int max_chars)
1485 {
1486 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1487
1488 if (!SEG_NORMAL (now_seg))
1489 return;
1490
1491 if (mapstate == state)
1492 /* The mapping symbol has already been emitted.
1493 There is nothing else to do. */
1494 return;
1495
1496 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1497 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1498 }
1499 #else
1500 #define mapping_state(x) /* nothing */
1501 #define mapping_state_2(x, y) /* nothing */
1502 #endif
1503
1504 /* Directives: sectioning and alignment. */
1505
1506 static void
1507 s_bss (int ignore ATTRIBUTE_UNUSED)
1508 {
1509 /* We don't support putting frags in the BSS segment, we fake it by
1510 marking in_bss, then looking at s_skip for clues. */
1511 subseg_set (bss_section, 0);
1512 demand_empty_rest_of_line ();
1513 mapping_state (MAP_DATA);
1514 }
1515
1516 static void
1517 s_even (int ignore ATTRIBUTE_UNUSED)
1518 {
1519 /* Never make frag if expect extra pass. */
1520 if (!need_pass_2)
1521 frag_align (1, 0, 0);
1522
1523 record_alignment (now_seg, 1);
1524
1525 demand_empty_rest_of_line ();
1526 }
1527
1528 /* Directives: Literal pools. */
1529
1530 static literal_pool *
1531 find_literal_pool (int size)
1532 {
1533 literal_pool *pool;
1534
1535 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1536 {
1537 if (pool->section == now_seg
1538 && pool->sub_section == now_subseg && pool->size == size)
1539 break;
1540 }
1541
1542 return pool;
1543 }
1544
1545 static literal_pool *
1546 find_or_make_literal_pool (int size)
1547 {
1548 /* Next literal pool ID number. */
1549 static unsigned int latest_pool_num = 1;
1550 literal_pool *pool;
1551
1552 pool = find_literal_pool (size);
1553
1554 if (pool == NULL)
1555 {
1556 /* Create a new pool. */
1557 pool = XNEW (literal_pool);
1558 if (!pool)
1559 return NULL;
1560
1561 /* Currently we always put the literal pool in the current text
1562 section. If we were generating "small" model code where we
1563 knew that all code and initialised data was within 1MB then
1564 we could output literals to mergeable, read-only data
1565 sections. */
1566
1567 pool->next_free_entry = 0;
1568 pool->section = now_seg;
1569 pool->sub_section = now_subseg;
1570 pool->size = size;
1571 pool->next = list_of_pools;
1572 pool->symbol = NULL;
1573
1574 /* Add it to the list. */
1575 list_of_pools = pool;
1576 }
1577
1578 /* New pools, and emptied pools, will have a NULL symbol. */
1579 if (pool->symbol == NULL)
1580 {
1581 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1582 (valueT) 0, &zero_address_frag);
1583 pool->id = latest_pool_num++;
1584 }
1585
1586 /* Done. */
1587 return pool;
1588 }
1589
1590 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1591 Return TRUE on success, otherwise return FALSE. */
1592 static bfd_boolean
1593 add_to_lit_pool (expressionS *exp, int size)
1594 {
1595 literal_pool *pool;
1596 unsigned int entry;
1597
1598 pool = find_or_make_literal_pool (size);
1599
1600 /* Check if this literal value is already in the pool. */
1601 for (entry = 0; entry < pool->next_free_entry; entry++)
1602 {
1603 expressionS * litexp = & pool->literals[entry].exp;
1604
1605 if ((litexp->X_op == exp->X_op)
1606 && (exp->X_op == O_constant)
1607 && (litexp->X_add_number == exp->X_add_number)
1608 && (litexp->X_unsigned == exp->X_unsigned))
1609 break;
1610
1611 if ((litexp->X_op == exp->X_op)
1612 && (exp->X_op == O_symbol)
1613 && (litexp->X_add_number == exp->X_add_number)
1614 && (litexp->X_add_symbol == exp->X_add_symbol)
1615 && (litexp->X_op_symbol == exp->X_op_symbol))
1616 break;
1617 }
1618
1619 /* Do we need to create a new entry? */
1620 if (entry == pool->next_free_entry)
1621 {
1622 if (entry >= MAX_LITERAL_POOL_SIZE)
1623 {
1624 set_syntax_error (_("literal pool overflow"));
1625 return FALSE;
1626 }
1627
1628 pool->literals[entry].exp = *exp;
1629 pool->next_free_entry += 1;
1630 if (exp->X_op == O_big)
1631 {
1632 /* PR 16688: Bignums are held in a single global array. We must
1633 copy and preserve that value now, before it is overwritten. */
1634 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1635 exp->X_add_number);
1636 memcpy (pool->literals[entry].bignum, generic_bignum,
1637 CHARS_PER_LITTLENUM * exp->X_add_number);
1638 }
1639 else
1640 pool->literals[entry].bignum = NULL;
1641 }
1642
1643 exp->X_op = O_symbol;
1644 exp->X_add_number = ((int) entry) * size;
1645 exp->X_add_symbol = pool->symbol;
1646
1647 return TRUE;
1648 }
1649
1650 /* Can't use symbol_new here, so have to create a symbol and then at
1651 a later date assign it a value. Thats what these functions do. */
1652
1653 static void
1654 symbol_locate (symbolS * symbolP,
1655 const char *name,/* It is copied, the caller can modify. */
1656 segT segment, /* Segment identifier (SEG_<something>). */
1657 valueT valu, /* Symbol value. */
1658 fragS * frag) /* Associated fragment. */
1659 {
1660 size_t name_length;
1661 char *preserved_copy_of_name;
1662
1663 name_length = strlen (name) + 1; /* +1 for \0. */
1664 obstack_grow (&notes, name, name_length);
1665 preserved_copy_of_name = obstack_finish (&notes);
1666
1667 #ifdef tc_canonicalize_symbol_name
1668 preserved_copy_of_name =
1669 tc_canonicalize_symbol_name (preserved_copy_of_name);
1670 #endif
1671
1672 S_SET_NAME (symbolP, preserved_copy_of_name);
1673
1674 S_SET_SEGMENT (symbolP, segment);
1675 S_SET_VALUE (symbolP, valu);
1676 symbol_clear_list_pointers (symbolP);
1677
1678 symbol_set_frag (symbolP, frag);
1679
1680 /* Link to end of symbol chain. */
1681 {
1682 extern int symbol_table_frozen;
1683
1684 if (symbol_table_frozen)
1685 abort ();
1686 }
1687
1688 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1689
1690 obj_symbol_new_hook (symbolP);
1691
1692 #ifdef tc_symbol_new_hook
1693 tc_symbol_new_hook (symbolP);
1694 #endif
1695
1696 #ifdef DEBUG_SYMS
1697 verify_symbol_chain (symbol_rootP, symbol_lastP);
1698 #endif /* DEBUG_SYMS */
1699 }
1700
1701
1702 static void
1703 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1704 {
1705 unsigned int entry;
1706 literal_pool *pool;
1707 char sym_name[20];
1708 int align;
1709
1710 for (align = 2; align <= 4; align++)
1711 {
1712 int size = 1 << align;
1713
1714 pool = find_literal_pool (size);
1715 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1716 continue;
1717
1718 /* Align pool as you have word accesses.
1719 Only make a frag if we have to. */
1720 if (!need_pass_2)
1721 frag_align (align, 0, 0);
1722
1723 mapping_state (MAP_DATA);
1724
1725 record_alignment (now_seg, align);
1726
1727 sprintf (sym_name, "$$lit_\002%x", pool->id);
1728
1729 symbol_locate (pool->symbol, sym_name, now_seg,
1730 (valueT) frag_now_fix (), frag_now);
1731 symbol_table_insert (pool->symbol);
1732
1733 for (entry = 0; entry < pool->next_free_entry; entry++)
1734 {
1735 expressionS * exp = & pool->literals[entry].exp;
1736
1737 if (exp->X_op == O_big)
1738 {
1739 /* PR 16688: Restore the global bignum value. */
1740 gas_assert (pool->literals[entry].bignum != NULL);
1741 memcpy (generic_bignum, pool->literals[entry].bignum,
1742 CHARS_PER_LITTLENUM * exp->X_add_number);
1743 }
1744
1745 /* First output the expression in the instruction to the pool. */
1746 emit_expr (exp, size); /* .word|.xword */
1747
1748 if (exp->X_op == O_big)
1749 {
1750 free (pool->literals[entry].bignum);
1751 pool->literals[entry].bignum = NULL;
1752 }
1753 }
1754
1755 /* Mark the pool as empty. */
1756 pool->next_free_entry = 0;
1757 pool->symbol = NULL;
1758 }
1759 }
1760
1761 #ifdef OBJ_ELF
1762 /* Forward declarations for functions below, in the MD interface
1763 section. */
1764 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1765 static struct reloc_table_entry * find_reloc_table_entry (char **);
1766
1767 /* Directives: Data. */
1768 /* N.B. the support for relocation suffix in this directive needs to be
1769 implemented properly. */
1770
1771 static void
1772 s_aarch64_elf_cons (int nbytes)
1773 {
1774 expressionS exp;
1775
1776 #ifdef md_flush_pending_output
1777 md_flush_pending_output ();
1778 #endif
1779
1780 if (is_it_end_of_statement ())
1781 {
1782 demand_empty_rest_of_line ();
1783 return;
1784 }
1785
1786 #ifdef md_cons_align
1787 md_cons_align (nbytes);
1788 #endif
1789
1790 mapping_state (MAP_DATA);
1791 do
1792 {
1793 struct reloc_table_entry *reloc;
1794
1795 expression (&exp);
1796
1797 if (exp.X_op != O_symbol)
1798 emit_expr (&exp, (unsigned int) nbytes);
1799 else
1800 {
1801 skip_past_char (&input_line_pointer, '#');
1802 if (skip_past_char (&input_line_pointer, ':'))
1803 {
1804 reloc = find_reloc_table_entry (&input_line_pointer);
1805 if (reloc == NULL)
1806 as_bad (_("unrecognized relocation suffix"));
1807 else
1808 as_bad (_("unimplemented relocation suffix"));
1809 ignore_rest_of_line ();
1810 return;
1811 }
1812 else
1813 emit_expr (&exp, (unsigned int) nbytes);
1814 }
1815 }
1816 while (*input_line_pointer++ == ',');
1817
1818 /* Put terminator back into stream. */
1819 input_line_pointer--;
1820 demand_empty_rest_of_line ();
1821 }
1822
1823 #endif /* OBJ_ELF */
1824
1825 /* Output a 32-bit word, but mark as an instruction. */
1826
1827 static void
1828 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1829 {
1830 expressionS exp;
1831
1832 #ifdef md_flush_pending_output
1833 md_flush_pending_output ();
1834 #endif
1835
1836 if (is_it_end_of_statement ())
1837 {
1838 demand_empty_rest_of_line ();
1839 return;
1840 }
1841
1842 /* Sections are assumed to start aligned. In executable section, there is no
1843 MAP_DATA symbol pending. So we only align the address during
1844 MAP_DATA --> MAP_INSN transition.
1845 For other sections, this is not guaranteed. */
1846 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1847 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1848 frag_align_code (2, 0);
1849
1850 #ifdef OBJ_ELF
1851 mapping_state (MAP_INSN);
1852 #endif
1853
1854 do
1855 {
1856 expression (&exp);
1857 if (exp.X_op != O_constant)
1858 {
1859 as_bad (_("constant expression required"));
1860 ignore_rest_of_line ();
1861 return;
1862 }
1863
1864 if (target_big_endian)
1865 {
1866 unsigned int val = exp.X_add_number;
1867 exp.X_add_number = SWAP_32 (val);
1868 }
1869 emit_expr (&exp, 4);
1870 }
1871 while (*input_line_pointer++ == ',');
1872
1873 /* Put terminator back into stream. */
1874 input_line_pointer--;
1875 demand_empty_rest_of_line ();
1876 }
1877
1878 #ifdef OBJ_ELF
1879 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1880
1881 static void
1882 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1883 {
1884 expressionS exp;
1885
1886 expression (&exp);
1887 frag_grow (4);
1888 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1889 BFD_RELOC_AARCH64_TLSDESC_ADD);
1890
1891 demand_empty_rest_of_line ();
1892 }
1893
1894 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1895
1896 static void
1897 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1898 {
1899 expressionS exp;
1900
1901 /* Since we're just labelling the code, there's no need to define a
1902 mapping symbol. */
1903 expression (&exp);
1904 /* Make sure there is enough room in this frag for the following
1905 blr. This trick only works if the blr follows immediately after
1906 the .tlsdesc directive. */
1907 frag_grow (4);
1908 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1909 BFD_RELOC_AARCH64_TLSDESC_CALL);
1910
1911 demand_empty_rest_of_line ();
1912 }
1913
1914 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
1915
1916 static void
1917 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
1918 {
1919 expressionS exp;
1920
1921 expression (&exp);
1922 frag_grow (4);
1923 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1924 BFD_RELOC_AARCH64_TLSDESC_LDR);
1925
1926 demand_empty_rest_of_line ();
1927 }
1928 #endif /* OBJ_ELF */
1929
1930 static void s_aarch64_arch (int);
1931 static void s_aarch64_cpu (int);
1932 static void s_aarch64_arch_extension (int);
1933
1934 /* This table describes all the machine specific pseudo-ops the assembler
1935 has to support. The fields are:
1936 pseudo-op name without dot
1937 function to call to execute this pseudo-op
1938 Integer arg to pass to the function. */
1939
1940 const pseudo_typeS md_pseudo_table[] = {
1941 /* Never called because '.req' does not start a line. */
1942 {"req", s_req, 0},
1943 {"unreq", s_unreq, 0},
1944 {"bss", s_bss, 0},
1945 {"even", s_even, 0},
1946 {"ltorg", s_ltorg, 0},
1947 {"pool", s_ltorg, 0},
1948 {"cpu", s_aarch64_cpu, 0},
1949 {"arch", s_aarch64_arch, 0},
1950 {"arch_extension", s_aarch64_arch_extension, 0},
1951 {"inst", s_aarch64_inst, 0},
1952 #ifdef OBJ_ELF
1953 {"tlsdescadd", s_tlsdescadd, 0},
1954 {"tlsdesccall", s_tlsdesccall, 0},
1955 {"tlsdescldr", s_tlsdescldr, 0},
1956 {"word", s_aarch64_elf_cons, 4},
1957 {"long", s_aarch64_elf_cons, 4},
1958 {"xword", s_aarch64_elf_cons, 8},
1959 {"dword", s_aarch64_elf_cons, 8},
1960 #endif
1961 {0, 0, 0}
1962 };
1963 \f
1964
1965 /* Check whether STR points to a register name followed by a comma or the
1966 end of line; REG_TYPE indicates which register types are checked
1967 against. Return TRUE if STR is such a register name; otherwise return
1968 FALSE. The function does not intend to produce any diagnostics, but since
1969 the register parser aarch64_reg_parse, which is called by this function,
1970 does produce diagnostics, we call clear_error to clear any diagnostics
1971 that may be generated by aarch64_reg_parse.
1972 Also, the function returns FALSE directly if there is any user error
1973 present at the function entry. This prevents the existing diagnostics
1974 state from being spoiled.
1975 The function currently serves parse_constant_immediate and
1976 parse_big_immediate only. */
1977 static bfd_boolean
1978 reg_name_p (char *str, aarch64_reg_type reg_type)
1979 {
1980 int reg;
1981
1982 /* Prevent the diagnostics state from being spoiled. */
1983 if (error_p ())
1984 return FALSE;
1985
1986 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1987
1988 /* Clear the parsing error that may be set by the reg parser. */
1989 clear_error ();
1990
1991 if (reg == PARSE_FAIL)
1992 return FALSE;
1993
1994 skip_whitespace (str);
1995 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1996 return TRUE;
1997
1998 return FALSE;
1999 }
2000
2001 /* Parser functions used exclusively in instruction operands. */
2002
2003 /* Parse an immediate expression which may not be constant.
2004
2005 To prevent the expression parser from pushing a register name
2006 into the symbol table as an undefined symbol, firstly a check is
2007 done to find out whether STR is a register of type REG_TYPE followed
2008 by a comma or the end of line. Return FALSE if STR is such a string. */
2009
2010 static bfd_boolean
2011 parse_immediate_expression (char **str, expressionS *exp,
2012 aarch64_reg_type reg_type)
2013 {
2014 if (reg_name_p (*str, reg_type))
2015 {
2016 set_recoverable_error (_("immediate operand required"));
2017 return FALSE;
2018 }
2019
2020 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2021
2022 if (exp->X_op == O_absent)
2023 {
2024 set_fatal_syntax_error (_("missing immediate expression"));
2025 return FALSE;
2026 }
2027
2028 return TRUE;
2029 }
2030
2031 /* Constant immediate-value read function for use in insn parsing.
2032 STR points to the beginning of the immediate (with the optional
2033 leading #); *VAL receives the value. REG_TYPE says which register
2034 names should be treated as registers rather than as symbolic immediates.
2035
2036 Return TRUE on success; otherwise return FALSE. */
2037
2038 static bfd_boolean
2039 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2040 {
2041 expressionS exp;
2042
2043 if (! parse_immediate_expression (str, &exp, reg_type))
2044 return FALSE;
2045
2046 if (exp.X_op != O_constant)
2047 {
2048 set_syntax_error (_("constant expression required"));
2049 return FALSE;
2050 }
2051
2052 *val = exp.X_add_number;
2053 return TRUE;
2054 }
2055
2056 static uint32_t
2057 encode_imm_float_bits (uint32_t imm)
2058 {
2059 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2060 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2061 }
2062
2063 /* Return TRUE if the single-precision floating-point value encoded in IMM
2064 can be expressed in the AArch64 8-bit signed floating-point format with
2065 3-bit exponent and normalized 4 bits of precision; in other words, the
2066 floating-point value must be expressable as
2067 (+/-) n / 16 * power (2, r)
2068 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2069
2070 static bfd_boolean
2071 aarch64_imm_float_p (uint32_t imm)
2072 {
2073 /* If a single-precision floating-point value has the following bit
2074 pattern, it can be expressed in the AArch64 8-bit floating-point
2075 format:
2076
2077 3 32222222 2221111111111
2078 1 09876543 21098765432109876543210
2079 n Eeeeeexx xxxx0000000000000000000
2080
2081 where n, e and each x are either 0 or 1 independently, with
2082 E == ~ e. */
2083
2084 uint32_t pattern;
2085
2086 /* Prepare the pattern for 'Eeeeee'. */
2087 if (((imm >> 30) & 0x1) == 0)
2088 pattern = 0x3e000000;
2089 else
2090 pattern = 0x40000000;
2091
2092 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2093 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2094 }
2095
2096 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2097 as an IEEE float without any loss of precision. Store the value in
2098 *FPWORD if so. */
2099
2100 static bfd_boolean
2101 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2102 {
2103 /* If a double-precision floating-point value has the following bit
2104 pattern, it can be expressed in a float:
2105
2106 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2107 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2108 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2109
2110 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2111 if Eeee_eeee != 1111_1111
2112
2113 where n, e, s and S are either 0 or 1 independently and where ~ is the
2114 inverse of E. */
2115
2116 uint32_t pattern;
2117 uint32_t high32 = imm >> 32;
2118 uint32_t low32 = imm;
2119
2120 /* Lower 29 bits need to be 0s. */
2121 if ((imm & 0x1fffffff) != 0)
2122 return FALSE;
2123
2124 /* Prepare the pattern for 'Eeeeeeeee'. */
2125 if (((high32 >> 30) & 0x1) == 0)
2126 pattern = 0x38000000;
2127 else
2128 pattern = 0x40000000;
2129
2130 /* Check E~~~. */
2131 if ((high32 & 0x78000000) != pattern)
2132 return FALSE;
2133
2134 /* Check Eeee_eeee != 1111_1111. */
2135 if ((high32 & 0x7ff00000) == 0x47f00000)
2136 return FALSE;
2137
2138 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2139 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2140 | (low32 >> 29)); /* 3 S bits. */
2141 return TRUE;
2142 }
2143
2144 /* Parse a floating-point immediate. Return TRUE on success and return the
2145 value in *IMMED in the format of IEEE754 single-precision encoding.
2146 *CCP points to the start of the string; DP_P is TRUE when the immediate
2147 is expected to be in double-precision (N.B. this only matters when
2148 hexadecimal representation is involved). REG_TYPE says which register
2149 names should be treated as registers rather than as symbolic immediates.
2150
2151 N.B. 0.0 is accepted by this function. */
2152
2153 static bfd_boolean
2154 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2155 aarch64_reg_type reg_type)
2156 {
2157 char *str = *ccp;
2158 char *fpnum;
2159 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2160 int found_fpchar = 0;
2161 int64_t val = 0;
2162 unsigned fpword = 0;
2163 bfd_boolean hex_p = FALSE;
2164
2165 skip_past_char (&str, '#');
2166
2167 fpnum = str;
2168 skip_whitespace (fpnum);
2169
2170 if (strncmp (fpnum, "0x", 2) == 0)
2171 {
2172 /* Support the hexadecimal representation of the IEEE754 encoding.
2173 Double-precision is expected when DP_P is TRUE, otherwise the
2174 representation should be in single-precision. */
2175 if (! parse_constant_immediate (&str, &val, reg_type))
2176 goto invalid_fp;
2177
2178 if (dp_p)
2179 {
2180 if (!can_convert_double_to_float (val, &fpword))
2181 goto invalid_fp;
2182 }
2183 else if ((uint64_t) val > 0xffffffff)
2184 goto invalid_fp;
2185 else
2186 fpword = val;
2187
2188 hex_p = TRUE;
2189 }
2190 else
2191 {
2192 /* We must not accidentally parse an integer as a floating-point number.
2193 Make sure that the value we parse is not an integer by checking for
2194 special characters '.' or 'e'. */
2195 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2196 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2197 {
2198 found_fpchar = 1;
2199 break;
2200 }
2201
2202 if (!found_fpchar)
2203 return FALSE;
2204 }
2205
2206 if (! hex_p)
2207 {
2208 int i;
2209
2210 if ((str = atof_ieee (str, 's', words)) == NULL)
2211 goto invalid_fp;
2212
2213 /* Our FP word must be 32 bits (single-precision FP). */
2214 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2215 {
2216 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2217 fpword |= words[i];
2218 }
2219 }
2220
2221 if (aarch64_imm_float_p (fpword) || fpword == 0)
2222 {
2223 *immed = fpword;
2224 *ccp = str;
2225 return TRUE;
2226 }
2227
2228 invalid_fp:
2229 set_fatal_syntax_error (_("invalid floating-point constant"));
2230 return FALSE;
2231 }
2232
2233 /* Less-generic immediate-value read function with the possibility of loading
2234 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2235 instructions.
2236
2237 To prevent the expression parser from pushing a register name into the
2238 symbol table as an undefined symbol, a check is firstly done to find
2239 out whether STR is a register of type REG_TYPE followed by a comma or
2240 the end of line. Return FALSE if STR is such a register. */
2241
2242 static bfd_boolean
2243 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2244 {
2245 char *ptr = *str;
2246
2247 if (reg_name_p (ptr, reg_type))
2248 {
2249 set_syntax_error (_("immediate operand required"));
2250 return FALSE;
2251 }
2252
2253 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2254
2255 if (inst.reloc.exp.X_op == O_constant)
2256 *imm = inst.reloc.exp.X_add_number;
2257
2258 *str = ptr;
2259
2260 return TRUE;
2261 }
2262
2263 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2264 if NEED_LIBOPCODES is non-zero, the fixup will need
2265 assistance from the libopcodes. */
2266
2267 static inline void
2268 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2269 const aarch64_opnd_info *operand,
2270 int need_libopcodes_p)
2271 {
2272 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2273 reloc->opnd = operand->type;
2274 if (need_libopcodes_p)
2275 reloc->need_libopcodes_p = 1;
2276 };
2277
2278 /* Return TRUE if the instruction needs to be fixed up later internally by
2279 the GAS; otherwise return FALSE. */
2280
2281 static inline bfd_boolean
2282 aarch64_gas_internal_fixup_p (void)
2283 {
2284 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2285 }
2286
2287 /* Assign the immediate value to the relavant field in *OPERAND if
2288 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2289 needs an internal fixup in a later stage.
2290 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2291 IMM.VALUE that may get assigned with the constant. */
2292 static inline void
2293 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2294 aarch64_opnd_info *operand,
2295 int addr_off_p,
2296 int need_libopcodes_p,
2297 int skip_p)
2298 {
2299 if (reloc->exp.X_op == O_constant)
2300 {
2301 if (addr_off_p)
2302 operand->addr.offset.imm = reloc->exp.X_add_number;
2303 else
2304 operand->imm.value = reloc->exp.X_add_number;
2305 reloc->type = BFD_RELOC_UNUSED;
2306 }
2307 else
2308 {
2309 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2310 /* Tell libopcodes to ignore this operand or not. This is helpful
2311 when one of the operands needs to be fixed up later but we need
2312 libopcodes to check the other operands. */
2313 operand->skip = skip_p;
2314 }
2315 }
2316
2317 /* Relocation modifiers. Each entry in the table contains the textual
2318 name for the relocation which may be placed before a symbol used as
2319 a load/store offset, or add immediate. It must be surrounded by a
2320 leading and trailing colon, for example:
2321
2322 ldr x0, [x1, #:rello:varsym]
2323 add x0, x1, #:rello:varsym */
2324
2325 struct reloc_table_entry
2326 {
2327 const char *name;
2328 int pc_rel;
2329 bfd_reloc_code_real_type adr_type;
2330 bfd_reloc_code_real_type adrp_type;
2331 bfd_reloc_code_real_type movw_type;
2332 bfd_reloc_code_real_type add_type;
2333 bfd_reloc_code_real_type ldst_type;
2334 bfd_reloc_code_real_type ld_literal_type;
2335 };
2336
2337 static struct reloc_table_entry reloc_table[] = {
2338 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2339 {"lo12", 0,
2340 0, /* adr_type */
2341 0,
2342 0,
2343 BFD_RELOC_AARCH64_ADD_LO12,
2344 BFD_RELOC_AARCH64_LDST_LO12,
2345 0},
2346
2347 /* Higher 21 bits of pc-relative page offset: ADRP */
2348 {"pg_hi21", 1,
2349 0, /* adr_type */
2350 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2351 0,
2352 0,
2353 0,
2354 0},
2355
2356 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2357 {"pg_hi21_nc", 1,
2358 0, /* adr_type */
2359 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2360 0,
2361 0,
2362 0,
2363 0},
2364
2365 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2366 {"abs_g0", 0,
2367 0, /* adr_type */
2368 0,
2369 BFD_RELOC_AARCH64_MOVW_G0,
2370 0,
2371 0,
2372 0},
2373
2374 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2375 {"abs_g0_s", 0,
2376 0, /* adr_type */
2377 0,
2378 BFD_RELOC_AARCH64_MOVW_G0_S,
2379 0,
2380 0,
2381 0},
2382
2383 /* Less significant bits 0-15 of address/value: MOVK, no check */
2384 {"abs_g0_nc", 0,
2385 0, /* adr_type */
2386 0,
2387 BFD_RELOC_AARCH64_MOVW_G0_NC,
2388 0,
2389 0,
2390 0},
2391
2392 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2393 {"abs_g1", 0,
2394 0, /* adr_type */
2395 0,
2396 BFD_RELOC_AARCH64_MOVW_G1,
2397 0,
2398 0,
2399 0},
2400
2401 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2402 {"abs_g1_s", 0,
2403 0, /* adr_type */
2404 0,
2405 BFD_RELOC_AARCH64_MOVW_G1_S,
2406 0,
2407 0,
2408 0},
2409
2410 /* Less significant bits 16-31 of address/value: MOVK, no check */
2411 {"abs_g1_nc", 0,
2412 0, /* adr_type */
2413 0,
2414 BFD_RELOC_AARCH64_MOVW_G1_NC,
2415 0,
2416 0,
2417 0},
2418
2419 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2420 {"abs_g2", 0,
2421 0, /* adr_type */
2422 0,
2423 BFD_RELOC_AARCH64_MOVW_G2,
2424 0,
2425 0,
2426 0},
2427
2428 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2429 {"abs_g2_s", 0,
2430 0, /* adr_type */
2431 0,
2432 BFD_RELOC_AARCH64_MOVW_G2_S,
2433 0,
2434 0,
2435 0},
2436
2437 /* Less significant bits 32-47 of address/value: MOVK, no check */
2438 {"abs_g2_nc", 0,
2439 0, /* adr_type */
2440 0,
2441 BFD_RELOC_AARCH64_MOVW_G2_NC,
2442 0,
2443 0,
2444 0},
2445
2446 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2447 {"abs_g3", 0,
2448 0, /* adr_type */
2449 0,
2450 BFD_RELOC_AARCH64_MOVW_G3,
2451 0,
2452 0,
2453 0},
2454
2455 /* Get to the page containing GOT entry for a symbol. */
2456 {"got", 1,
2457 0, /* adr_type */
2458 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2459 0,
2460 0,
2461 0,
2462 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2463
2464 /* 12 bit offset into the page containing GOT entry for that symbol. */
2465 {"got_lo12", 0,
2466 0, /* adr_type */
2467 0,
2468 0,
2469 0,
2470 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2471 0},
2472
2473 /* 0-15 bits of address/value: MOVk, no check. */
2474 {"gotoff_g0_nc", 0,
2475 0, /* adr_type */
2476 0,
2477 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2478 0,
2479 0,
2480 0},
2481
2482 /* Most significant bits 16-31 of address/value: MOVZ. */
2483 {"gotoff_g1", 0,
2484 0, /* adr_type */
2485 0,
2486 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2487 0,
2488 0,
2489 0},
2490
2491 /* 15 bit offset into the page containing GOT entry for that symbol. */
2492 {"gotoff_lo15", 0,
2493 0, /* adr_type */
2494 0,
2495 0,
2496 0,
2497 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2498 0},
2499
2500 /* Get to the page containing GOT TLS entry for a symbol */
2501 {"gottprel_g0_nc", 0,
2502 0, /* adr_type */
2503 0,
2504 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2505 0,
2506 0,
2507 0},
2508
2509 /* Get to the page containing GOT TLS entry for a symbol */
2510 {"gottprel_g1", 0,
2511 0, /* adr_type */
2512 0,
2513 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2514 0,
2515 0,
2516 0},
2517
2518 /* Get to the page containing GOT TLS entry for a symbol */
2519 {"tlsgd", 0,
2520 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2521 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2522 0,
2523 0,
2524 0,
2525 0},
2526
2527 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2528 {"tlsgd_lo12", 0,
2529 0, /* adr_type */
2530 0,
2531 0,
2532 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2533 0,
2534 0},
2535
2536 /* Lower 16 bits address/value: MOVk. */
2537 {"tlsgd_g0_nc", 0,
2538 0, /* adr_type */
2539 0,
2540 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2541 0,
2542 0,
2543 0},
2544
2545 /* Most significant bits 16-31 of address/value: MOVZ. */
2546 {"tlsgd_g1", 0,
2547 0, /* adr_type */
2548 0,
2549 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2550 0,
2551 0,
2552 0},
2553
2554 /* Get to the page containing GOT TLS entry for a symbol */
2555 {"tlsdesc", 0,
2556 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2557 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2558 0,
2559 0,
2560 0,
2561 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2562
2563 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2564 {"tlsdesc_lo12", 0,
2565 0, /* adr_type */
2566 0,
2567 0,
2568 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2569 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2570 0},
2571
2572 /* Get to the page containing GOT TLS entry for a symbol.
2573 The same as GD, we allocate two consecutive GOT slots
2574 for module index and module offset, the only difference
2575 with GD is the module offset should be intialized to
2576 zero without any outstanding runtime relocation. */
2577 {"tlsldm", 0,
2578 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2579 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2580 0,
2581 0,
2582 0,
2583 0},
2584
2585 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2586 {"tlsldm_lo12_nc", 0,
2587 0, /* adr_type */
2588 0,
2589 0,
2590 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2591 0,
2592 0},
2593
2594 /* 12 bit offset into the module TLS base address. */
2595 {"dtprel_lo12", 0,
2596 0, /* adr_type */
2597 0,
2598 0,
2599 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2600 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2601 0},
2602
2603 /* Same as dtprel_lo12, no overflow check. */
2604 {"dtprel_lo12_nc", 0,
2605 0, /* adr_type */
2606 0,
2607 0,
2608 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2609 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2610 0},
2611
2612 /* bits[23:12] of offset to the module TLS base address. */
2613 {"dtprel_hi12", 0,
2614 0, /* adr_type */
2615 0,
2616 0,
2617 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2618 0,
2619 0},
2620
2621 /* bits[15:0] of offset to the module TLS base address. */
2622 {"dtprel_g0", 0,
2623 0, /* adr_type */
2624 0,
2625 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2626 0,
2627 0,
2628 0},
2629
2630 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2631 {"dtprel_g0_nc", 0,
2632 0, /* adr_type */
2633 0,
2634 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2635 0,
2636 0,
2637 0},
2638
2639 /* bits[31:16] of offset to the module TLS base address. */
2640 {"dtprel_g1", 0,
2641 0, /* adr_type */
2642 0,
2643 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2644 0,
2645 0,
2646 0},
2647
2648 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2649 {"dtprel_g1_nc", 0,
2650 0, /* adr_type */
2651 0,
2652 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2653 0,
2654 0,
2655 0},
2656
2657 /* bits[47:32] of offset to the module TLS base address. */
2658 {"dtprel_g2", 0,
2659 0, /* adr_type */
2660 0,
2661 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2662 0,
2663 0,
2664 0},
2665
2666 /* Lower 16 bit offset into GOT entry for a symbol */
2667 {"tlsdesc_off_g0_nc", 0,
2668 0, /* adr_type */
2669 0,
2670 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2671 0,
2672 0,
2673 0},
2674
2675 /* Higher 16 bit offset into GOT entry for a symbol */
2676 {"tlsdesc_off_g1", 0,
2677 0, /* adr_type */
2678 0,
2679 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2680 0,
2681 0,
2682 0},
2683
2684 /* Get to the page containing GOT TLS entry for a symbol */
2685 {"gottprel", 0,
2686 0, /* adr_type */
2687 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2688 0,
2689 0,
2690 0,
2691 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2692
2693 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2694 {"gottprel_lo12", 0,
2695 0, /* adr_type */
2696 0,
2697 0,
2698 0,
2699 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2700 0},
2701
2702 /* Get tp offset for a symbol. */
2703 {"tprel", 0,
2704 0, /* adr_type */
2705 0,
2706 0,
2707 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2708 0,
2709 0},
2710
2711 /* Get tp offset for a symbol. */
2712 {"tprel_lo12", 0,
2713 0, /* adr_type */
2714 0,
2715 0,
2716 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2717 0,
2718 0},
2719
2720 /* Get tp offset for a symbol. */
2721 {"tprel_hi12", 0,
2722 0, /* adr_type */
2723 0,
2724 0,
2725 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2726 0,
2727 0},
2728
2729 /* Get tp offset for a symbol. */
2730 {"tprel_lo12_nc", 0,
2731 0, /* adr_type */
2732 0,
2733 0,
2734 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2735 0,
2736 0},
2737
2738 /* Most significant bits 32-47 of address/value: MOVZ. */
2739 {"tprel_g2", 0,
2740 0, /* adr_type */
2741 0,
2742 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2743 0,
2744 0,
2745 0},
2746
2747 /* Most significant bits 16-31 of address/value: MOVZ. */
2748 {"tprel_g1", 0,
2749 0, /* adr_type */
2750 0,
2751 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2752 0,
2753 0,
2754 0},
2755
2756 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2757 {"tprel_g1_nc", 0,
2758 0, /* adr_type */
2759 0,
2760 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2761 0,
2762 0,
2763 0},
2764
2765 /* Most significant bits 0-15 of address/value: MOVZ. */
2766 {"tprel_g0", 0,
2767 0, /* adr_type */
2768 0,
2769 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2770 0,
2771 0,
2772 0},
2773
2774 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2775 {"tprel_g0_nc", 0,
2776 0, /* adr_type */
2777 0,
2778 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2779 0,
2780 0,
2781 0},
2782
2783 /* 15bit offset from got entry to base address of GOT table. */
2784 {"gotpage_lo15", 0,
2785 0,
2786 0,
2787 0,
2788 0,
2789 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2790 0},
2791
2792 /* 14bit offset from got entry to base address of GOT table. */
2793 {"gotpage_lo14", 0,
2794 0,
2795 0,
2796 0,
2797 0,
2798 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2799 0},
2800 };
2801
2802 /* Given the address of a pointer pointing to the textual name of a
2803 relocation as may appear in assembler source, attempt to find its
2804 details in reloc_table. The pointer will be updated to the character
2805 after the trailing colon. On failure, NULL will be returned;
2806 otherwise return the reloc_table_entry. */
2807
2808 static struct reloc_table_entry *
2809 find_reloc_table_entry (char **str)
2810 {
2811 unsigned int i;
2812 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2813 {
2814 int length = strlen (reloc_table[i].name);
2815
2816 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2817 && (*str)[length] == ':')
2818 {
2819 *str += (length + 1);
2820 return &reloc_table[i];
2821 }
2822 }
2823
2824 return NULL;
2825 }
2826
2827 /* Mode argument to parse_shift and parser_shifter_operand. */
2828 enum parse_shift_mode
2829 {
2830 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2831 "#imm{,lsl #n}" */
2832 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2833 "#imm" */
2834 SHIFTED_LSL, /* bare "lsl #n" */
2835 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2836 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2837 };
2838
2839 /* Parse a <shift> operator on an AArch64 data processing instruction.
2840 Return TRUE on success; otherwise return FALSE. */
2841 static bfd_boolean
2842 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2843 {
2844 const struct aarch64_name_value_pair *shift_op;
2845 enum aarch64_modifier_kind kind;
2846 expressionS exp;
2847 int exp_has_prefix;
2848 char *s = *str;
2849 char *p = s;
2850
2851 for (p = *str; ISALPHA (*p); p++)
2852 ;
2853
2854 if (p == *str)
2855 {
2856 set_syntax_error (_("shift expression expected"));
2857 return FALSE;
2858 }
2859
2860 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2861
2862 if (shift_op == NULL)
2863 {
2864 set_syntax_error (_("shift operator expected"));
2865 return FALSE;
2866 }
2867
2868 kind = aarch64_get_operand_modifier (shift_op);
2869
2870 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2871 {
2872 set_syntax_error (_("invalid use of 'MSL'"));
2873 return FALSE;
2874 }
2875
2876 switch (mode)
2877 {
2878 case SHIFTED_LOGIC_IMM:
2879 if (aarch64_extend_operator_p (kind) == TRUE)
2880 {
2881 set_syntax_error (_("extending shift is not permitted"));
2882 return FALSE;
2883 }
2884 break;
2885
2886 case SHIFTED_ARITH_IMM:
2887 if (kind == AARCH64_MOD_ROR)
2888 {
2889 set_syntax_error (_("'ROR' shift is not permitted"));
2890 return FALSE;
2891 }
2892 break;
2893
2894 case SHIFTED_LSL:
2895 if (kind != AARCH64_MOD_LSL)
2896 {
2897 set_syntax_error (_("only 'LSL' shift is permitted"));
2898 return FALSE;
2899 }
2900 break;
2901
2902 case SHIFTED_REG_OFFSET:
2903 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2904 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2905 {
2906 set_fatal_syntax_error
2907 (_("invalid shift for the register offset addressing mode"));
2908 return FALSE;
2909 }
2910 break;
2911
2912 case SHIFTED_LSL_MSL:
2913 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2914 {
2915 set_syntax_error (_("invalid shift operator"));
2916 return FALSE;
2917 }
2918 break;
2919
2920 default:
2921 abort ();
2922 }
2923
2924 /* Whitespace can appear here if the next thing is a bare digit. */
2925 skip_whitespace (p);
2926
2927 /* Parse shift amount. */
2928 exp_has_prefix = 0;
2929 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2930 exp.X_op = O_absent;
2931 else
2932 {
2933 if (is_immediate_prefix (*p))
2934 {
2935 p++;
2936 exp_has_prefix = 1;
2937 }
2938 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2939 }
2940 if (exp.X_op == O_absent)
2941 {
2942 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2943 {
2944 set_syntax_error (_("missing shift amount"));
2945 return FALSE;
2946 }
2947 operand->shifter.amount = 0;
2948 }
2949 else if (exp.X_op != O_constant)
2950 {
2951 set_syntax_error (_("constant shift amount required"));
2952 return FALSE;
2953 }
2954 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2955 {
2956 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2957 return FALSE;
2958 }
2959 else
2960 {
2961 operand->shifter.amount = exp.X_add_number;
2962 operand->shifter.amount_present = 1;
2963 }
2964
2965 operand->shifter.operator_present = 1;
2966 operand->shifter.kind = kind;
2967
2968 *str = p;
2969 return TRUE;
2970 }
2971
2972 /* Parse a <shifter_operand> for a data processing instruction:
2973
2974 #<immediate>
2975 #<immediate>, LSL #imm
2976
2977 Validation of immediate operands is deferred to md_apply_fix.
2978
2979 Return TRUE on success; otherwise return FALSE. */
2980
2981 static bfd_boolean
2982 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2983 enum parse_shift_mode mode)
2984 {
2985 char *p;
2986
2987 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2988 return FALSE;
2989
2990 p = *str;
2991
2992 /* Accept an immediate expression. */
2993 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2994 return FALSE;
2995
2996 /* Accept optional LSL for arithmetic immediate values. */
2997 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2998 if (! parse_shift (&p, operand, SHIFTED_LSL))
2999 return FALSE;
3000
3001 /* Not accept any shifter for logical immediate values. */
3002 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3003 && parse_shift (&p, operand, mode))
3004 {
3005 set_syntax_error (_("unexpected shift operator"));
3006 return FALSE;
3007 }
3008
3009 *str = p;
3010 return TRUE;
3011 }
3012
3013 /* Parse a <shifter_operand> for a data processing instruction:
3014
3015 <Rm>
3016 <Rm>, <shift>
3017 #<immediate>
3018 #<immediate>, LSL #imm
3019
3020 where <shift> is handled by parse_shift above, and the last two
3021 cases are handled by the function above.
3022
3023 Validation of immediate operands is deferred to md_apply_fix.
3024
3025 Return TRUE on success; otherwise return FALSE. */
3026
3027 static bfd_boolean
3028 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3029 enum parse_shift_mode mode)
3030 {
3031 int reg;
3032 int isreg32, isregzero;
3033 enum aarch64_operand_class opd_class
3034 = aarch64_get_operand_class (operand->type);
3035
3036 if ((reg =
3037 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
3038 {
3039 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3040 {
3041 set_syntax_error (_("unexpected register in the immediate operand"));
3042 return FALSE;
3043 }
3044
3045 if (!isregzero && reg == REG_SP)
3046 {
3047 set_syntax_error (BAD_SP);
3048 return FALSE;
3049 }
3050
3051 operand->reg.regno = reg;
3052 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
3053
3054 /* Accept optional shift operation on register. */
3055 if (! skip_past_comma (str))
3056 return TRUE;
3057
3058 if (! parse_shift (str, operand, mode))
3059 return FALSE;
3060
3061 return TRUE;
3062 }
3063 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3064 {
3065 set_syntax_error
3066 (_("integer register expected in the extended/shifted operand "
3067 "register"));
3068 return FALSE;
3069 }
3070
3071 /* We have a shifted immediate variable. */
3072 return parse_shifter_operand_imm (str, operand, mode);
3073 }
3074
3075 /* Return TRUE on success; return FALSE otherwise. */
3076
3077 static bfd_boolean
3078 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3079 enum parse_shift_mode mode)
3080 {
3081 char *p = *str;
3082
3083 /* Determine if we have the sequence of characters #: or just :
3084 coming next. If we do, then we check for a :rello: relocation
3085 modifier. If we don't, punt the whole lot to
3086 parse_shifter_operand. */
3087
3088 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3089 {
3090 struct reloc_table_entry *entry;
3091
3092 if (p[0] == '#')
3093 p += 2;
3094 else
3095 p++;
3096 *str = p;
3097
3098 /* Try to parse a relocation. Anything else is an error. */
3099 if (!(entry = find_reloc_table_entry (str)))
3100 {
3101 set_syntax_error (_("unknown relocation modifier"));
3102 return FALSE;
3103 }
3104
3105 if (entry->add_type == 0)
3106 {
3107 set_syntax_error
3108 (_("this relocation modifier is not allowed on this instruction"));
3109 return FALSE;
3110 }
3111
3112 /* Save str before we decompose it. */
3113 p = *str;
3114
3115 /* Next, we parse the expression. */
3116 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3117 return FALSE;
3118
3119 /* Record the relocation type (use the ADD variant here). */
3120 inst.reloc.type = entry->add_type;
3121 inst.reloc.pc_rel = entry->pc_rel;
3122
3123 /* If str is empty, we've reached the end, stop here. */
3124 if (**str == '\0')
3125 return TRUE;
3126
3127 /* Otherwise, we have a shifted reloc modifier, so rewind to
3128 recover the variable name and continue parsing for the shifter. */
3129 *str = p;
3130 return parse_shifter_operand_imm (str, operand, mode);
3131 }
3132
3133 return parse_shifter_operand (str, operand, mode);
3134 }
3135
3136 /* Parse all forms of an address expression. Information is written
3137 to *OPERAND and/or inst.reloc.
3138
3139 The A64 instruction set has the following addressing modes:
3140
3141 Offset
3142 [base] // in SIMD ld/st structure
3143 [base{,#0}] // in ld/st exclusive
3144 [base{,#imm}]
3145 [base,Xm{,LSL #imm}]
3146 [base,Xm,SXTX {#imm}]
3147 [base,Wm,(S|U)XTW {#imm}]
3148 Pre-indexed
3149 [base,#imm]!
3150 Post-indexed
3151 [base],#imm
3152 [base],Xm // in SIMD ld/st structure
3153 PC-relative (literal)
3154 label
3155 =immediate
3156
3157 (As a convenience, the notation "=immediate" is permitted in conjunction
3158 with the pc-relative literal load instructions to automatically place an
3159 immediate value or symbolic address in a nearby literal pool and generate
3160 a hidden label which references it.)
3161
3162 Upon a successful parsing, the address structure in *OPERAND will be
3163 filled in the following way:
3164
3165 .base_regno = <base>
3166 .offset.is_reg // 1 if the offset is a register
3167 .offset.imm = <imm>
3168 .offset.regno = <Rm>
3169
3170 For different addressing modes defined in the A64 ISA:
3171
3172 Offset
3173 .pcrel=0; .preind=1; .postind=0; .writeback=0
3174 Pre-indexed
3175 .pcrel=0; .preind=1; .postind=0; .writeback=1
3176 Post-indexed
3177 .pcrel=0; .preind=0; .postind=1; .writeback=1
3178 PC-relative (literal)
3179 .pcrel=1; .preind=1; .postind=0; .writeback=0
3180
3181 The shift/extension information, if any, will be stored in .shifter.
3182
3183 It is the caller's responsibility to check for addressing modes not
3184 supported by the instruction, and to set inst.reloc.type. */
3185
3186 static bfd_boolean
3187 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
3188 int accept_reg_post_index)
3189 {
3190 char *p = *str;
3191 int reg;
3192 int isreg32, isregzero;
3193 expressionS *exp = &inst.reloc.exp;
3194
3195 if (! skip_past_char (&p, '['))
3196 {
3197 /* =immediate or label. */
3198 operand->addr.pcrel = 1;
3199 operand->addr.preind = 1;
3200
3201 /* #:<reloc_op>:<symbol> */
3202 skip_past_char (&p, '#');
3203 if (reloc && skip_past_char (&p, ':'))
3204 {
3205 bfd_reloc_code_real_type ty;
3206 struct reloc_table_entry *entry;
3207
3208 /* Try to parse a relocation modifier. Anything else is
3209 an error. */
3210 entry = find_reloc_table_entry (&p);
3211 if (! entry)
3212 {
3213 set_syntax_error (_("unknown relocation modifier"));
3214 return FALSE;
3215 }
3216
3217 switch (operand->type)
3218 {
3219 case AARCH64_OPND_ADDR_PCREL21:
3220 /* adr */
3221 ty = entry->adr_type;
3222 break;
3223
3224 default:
3225 ty = entry->ld_literal_type;
3226 break;
3227 }
3228
3229 if (ty == 0)
3230 {
3231 set_syntax_error
3232 (_("this relocation modifier is not allowed on this "
3233 "instruction"));
3234 return FALSE;
3235 }
3236
3237 /* #:<reloc_op>: */
3238 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3239 {
3240 set_syntax_error (_("invalid relocation expression"));
3241 return FALSE;
3242 }
3243
3244 /* #:<reloc_op>:<expr> */
3245 /* Record the relocation type. */
3246 inst.reloc.type = ty;
3247 inst.reloc.pc_rel = entry->pc_rel;
3248 }
3249 else
3250 {
3251
3252 if (skip_past_char (&p, '='))
3253 /* =immediate; need to generate the literal in the literal pool. */
3254 inst.gen_lit_pool = 1;
3255
3256 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3257 {
3258 set_syntax_error (_("invalid address"));
3259 return FALSE;
3260 }
3261 }
3262
3263 *str = p;
3264 return TRUE;
3265 }
3266
3267 /* [ */
3268
3269 /* Accept SP and reject ZR */
3270 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3271 if (reg == PARSE_FAIL || isreg32)
3272 {
3273 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3274 return FALSE;
3275 }
3276 operand->addr.base_regno = reg;
3277
3278 /* [Xn */
3279 if (skip_past_comma (&p))
3280 {
3281 /* [Xn, */
3282 operand->addr.preind = 1;
3283
3284 /* Reject SP and accept ZR */
3285 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3286 if (reg != PARSE_FAIL)
3287 {
3288 /* [Xn,Rm */
3289 operand->addr.offset.regno = reg;
3290 operand->addr.offset.is_reg = 1;
3291 /* Shifted index. */
3292 if (skip_past_comma (&p))
3293 {
3294 /* [Xn,Rm, */
3295 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3296 /* Use the diagnostics set in parse_shift, so not set new
3297 error message here. */
3298 return FALSE;
3299 }
3300 /* We only accept:
3301 [base,Xm{,LSL #imm}]
3302 [base,Xm,SXTX {#imm}]
3303 [base,Wm,(S|U)XTW {#imm}] */
3304 if (operand->shifter.kind == AARCH64_MOD_NONE
3305 || operand->shifter.kind == AARCH64_MOD_LSL
3306 || operand->shifter.kind == AARCH64_MOD_SXTX)
3307 {
3308 if (isreg32)
3309 {
3310 set_syntax_error (_("invalid use of 32-bit register offset"));
3311 return FALSE;
3312 }
3313 }
3314 else if (!isreg32)
3315 {
3316 set_syntax_error (_("invalid use of 64-bit register offset"));
3317 return FALSE;
3318 }
3319 }
3320 else
3321 {
3322 /* [Xn,#:<reloc_op>:<symbol> */
3323 skip_past_char (&p, '#');
3324 if (reloc && skip_past_char (&p, ':'))
3325 {
3326 struct reloc_table_entry *entry;
3327
3328 /* Try to parse a relocation modifier. Anything else is
3329 an error. */
3330 if (!(entry = find_reloc_table_entry (&p)))
3331 {
3332 set_syntax_error (_("unknown relocation modifier"));
3333 return FALSE;
3334 }
3335
3336 if (entry->ldst_type == 0)
3337 {
3338 set_syntax_error
3339 (_("this relocation modifier is not allowed on this "
3340 "instruction"));
3341 return FALSE;
3342 }
3343
3344 /* [Xn,#:<reloc_op>: */
3345 /* We now have the group relocation table entry corresponding to
3346 the name in the assembler source. Next, we parse the
3347 expression. */
3348 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3349 {
3350 set_syntax_error (_("invalid relocation expression"));
3351 return FALSE;
3352 }
3353
3354 /* [Xn,#:<reloc_op>:<expr> */
3355 /* Record the load/store relocation type. */
3356 inst.reloc.type = entry->ldst_type;
3357 inst.reloc.pc_rel = entry->pc_rel;
3358 }
3359 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3360 {
3361 set_syntax_error (_("invalid expression in the address"));
3362 return FALSE;
3363 }
3364 /* [Xn,<expr> */
3365 }
3366 }
3367
3368 if (! skip_past_char (&p, ']'))
3369 {
3370 set_syntax_error (_("']' expected"));
3371 return FALSE;
3372 }
3373
3374 if (skip_past_char (&p, '!'))
3375 {
3376 if (operand->addr.preind && operand->addr.offset.is_reg)
3377 {
3378 set_syntax_error (_("register offset not allowed in pre-indexed "
3379 "addressing mode"));
3380 return FALSE;
3381 }
3382 /* [Xn]! */
3383 operand->addr.writeback = 1;
3384 }
3385 else if (skip_past_comma (&p))
3386 {
3387 /* [Xn], */
3388 operand->addr.postind = 1;
3389 operand->addr.writeback = 1;
3390
3391 if (operand->addr.preind)
3392 {
3393 set_syntax_error (_("cannot combine pre- and post-indexing"));
3394 return FALSE;
3395 }
3396
3397 if (accept_reg_post_index
3398 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3399 &isregzero)) != PARSE_FAIL)
3400 {
3401 /* [Xn],Xm */
3402 if (isreg32)
3403 {
3404 set_syntax_error (_("invalid 32-bit register offset"));
3405 return FALSE;
3406 }
3407 operand->addr.offset.regno = reg;
3408 operand->addr.offset.is_reg = 1;
3409 }
3410 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3411 {
3412 /* [Xn],#expr */
3413 set_syntax_error (_("invalid expression in the address"));
3414 return FALSE;
3415 }
3416 }
3417
3418 /* If at this point neither .preind nor .postind is set, we have a
3419 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3420 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3421 {
3422 if (operand->addr.writeback)
3423 {
3424 /* Reject [Rn]! */
3425 set_syntax_error (_("missing offset in the pre-indexed address"));
3426 return FALSE;
3427 }
3428 operand->addr.preind = 1;
3429 inst.reloc.exp.X_op = O_constant;
3430 inst.reloc.exp.X_add_number = 0;
3431 }
3432
3433 *str = p;
3434 return TRUE;
3435 }
3436
3437 /* Return TRUE on success; otherwise return FALSE. */
3438 static bfd_boolean
3439 parse_address (char **str, aarch64_opnd_info *operand,
3440 int accept_reg_post_index)
3441 {
3442 return parse_address_main (str, operand, 0, accept_reg_post_index);
3443 }
3444
3445 /* Return TRUE on success; otherwise return FALSE. */
3446 static bfd_boolean
3447 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3448 {
3449 return parse_address_main (str, operand, 1, 0);
3450 }
3451
3452 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3453 Return TRUE on success; otherwise return FALSE. */
3454 static bfd_boolean
3455 parse_half (char **str, int *internal_fixup_p)
3456 {
3457 char *p = *str;
3458
3459 skip_past_char (&p, '#');
3460
3461 gas_assert (internal_fixup_p);
3462 *internal_fixup_p = 0;
3463
3464 if (*p == ':')
3465 {
3466 struct reloc_table_entry *entry;
3467
3468 /* Try to parse a relocation. Anything else is an error. */
3469 ++p;
3470 if (!(entry = find_reloc_table_entry (&p)))
3471 {
3472 set_syntax_error (_("unknown relocation modifier"));
3473 return FALSE;
3474 }
3475
3476 if (entry->movw_type == 0)
3477 {
3478 set_syntax_error
3479 (_("this relocation modifier is not allowed on this instruction"));
3480 return FALSE;
3481 }
3482
3483 inst.reloc.type = entry->movw_type;
3484 }
3485 else
3486 *internal_fixup_p = 1;
3487
3488 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3489 return FALSE;
3490
3491 *str = p;
3492 return TRUE;
3493 }
3494
3495 /* Parse an operand for an ADRP instruction:
3496 ADRP <Xd>, <label>
3497 Return TRUE on success; otherwise return FALSE. */
3498
3499 static bfd_boolean
3500 parse_adrp (char **str)
3501 {
3502 char *p;
3503
3504 p = *str;
3505 if (*p == ':')
3506 {
3507 struct reloc_table_entry *entry;
3508
3509 /* Try to parse a relocation. Anything else is an error. */
3510 ++p;
3511 if (!(entry = find_reloc_table_entry (&p)))
3512 {
3513 set_syntax_error (_("unknown relocation modifier"));
3514 return FALSE;
3515 }
3516
3517 if (entry->adrp_type == 0)
3518 {
3519 set_syntax_error
3520 (_("this relocation modifier is not allowed on this instruction"));
3521 return FALSE;
3522 }
3523
3524 inst.reloc.type = entry->adrp_type;
3525 }
3526 else
3527 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3528
3529 inst.reloc.pc_rel = 1;
3530
3531 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3532 return FALSE;
3533
3534 *str = p;
3535 return TRUE;
3536 }
3537
3538 /* Miscellaneous. */
3539
3540 /* Parse an option for a preload instruction. Returns the encoding for the
3541 option, or PARSE_FAIL. */
3542
3543 static int
3544 parse_pldop (char **str)
3545 {
3546 char *p, *q;
3547 const struct aarch64_name_value_pair *o;
3548
3549 p = q = *str;
3550 while (ISALNUM (*q))
3551 q++;
3552
3553 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3554 if (!o)
3555 return PARSE_FAIL;
3556
3557 *str = q;
3558 return o->value;
3559 }
3560
3561 /* Parse an option for a barrier instruction. Returns the encoding for the
3562 option, or PARSE_FAIL. */
3563
3564 static int
3565 parse_barrier (char **str)
3566 {
3567 char *p, *q;
3568 const asm_barrier_opt *o;
3569
3570 p = q = *str;
3571 while (ISALPHA (*q))
3572 q++;
3573
3574 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3575 if (!o)
3576 return PARSE_FAIL;
3577
3578 *str = q;
3579 return o->value;
3580 }
3581
3582 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3583 return 0 if successful. Otherwise return PARSE_FAIL. */
3584
3585 static int
3586 parse_barrier_psb (char **str,
3587 const struct aarch64_name_value_pair ** hint_opt)
3588 {
3589 char *p, *q;
3590 const struct aarch64_name_value_pair *o;
3591
3592 p = q = *str;
3593 while (ISALPHA (*q))
3594 q++;
3595
3596 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3597 if (!o)
3598 {
3599 set_fatal_syntax_error
3600 ( _("unknown or missing option to PSB"));
3601 return PARSE_FAIL;
3602 }
3603
3604 if (o->value != 0x11)
3605 {
3606 /* PSB only accepts option name 'CSYNC'. */
3607 set_syntax_error
3608 (_("the specified option is not accepted for PSB"));
3609 return PARSE_FAIL;
3610 }
3611
3612 *str = q;
3613 *hint_opt = o;
3614 return 0;
3615 }
3616
3617 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3618 Returns the encoding for the option, or PARSE_FAIL.
3619
3620 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3621 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3622
3623 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3624 field, otherwise as a system register.
3625 */
3626
3627 static int
3628 parse_sys_reg (char **str, struct hash_control *sys_regs,
3629 int imple_defined_p, int pstatefield_p)
3630 {
3631 char *p, *q;
3632 char buf[32];
3633 const aarch64_sys_reg *o;
3634 int value;
3635
3636 p = buf;
3637 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3638 if (p < buf + 31)
3639 *p++ = TOLOWER (*q);
3640 *p = '\0';
3641 /* Assert that BUF be large enough. */
3642 gas_assert (p - buf == q - *str);
3643
3644 o = hash_find (sys_regs, buf);
3645 if (!o)
3646 {
3647 if (!imple_defined_p)
3648 return PARSE_FAIL;
3649 else
3650 {
3651 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3652 unsigned int op0, op1, cn, cm, op2;
3653
3654 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3655 != 5)
3656 return PARSE_FAIL;
3657 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3658 return PARSE_FAIL;
3659 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3660 }
3661 }
3662 else
3663 {
3664 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3665 as_bad (_("selected processor does not support PSTATE field "
3666 "name '%s'"), buf);
3667 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3668 as_bad (_("selected processor does not support system register "
3669 "name '%s'"), buf);
3670 if (aarch64_sys_reg_deprecated_p (o))
3671 as_warn (_("system register name '%s' is deprecated and may be "
3672 "removed in a future release"), buf);
3673 value = o->value;
3674 }
3675
3676 *str = q;
3677 return value;
3678 }
3679
3680 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3681 for the option, or NULL. */
3682
3683 static const aarch64_sys_ins_reg *
3684 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3685 {
3686 char *p, *q;
3687 char buf[32];
3688 const aarch64_sys_ins_reg *o;
3689
3690 p = buf;
3691 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3692 if (p < buf + 31)
3693 *p++ = TOLOWER (*q);
3694 *p = '\0';
3695
3696 o = hash_find (sys_ins_regs, buf);
3697 if (!o)
3698 return NULL;
3699
3700 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3701 as_bad (_("selected processor does not support system register "
3702 "name '%s'"), buf);
3703
3704 *str = q;
3705 return o;
3706 }
3707 \f
3708 #define po_char_or_fail(chr) do { \
3709 if (! skip_past_char (&str, chr)) \
3710 goto failure; \
3711 } while (0)
3712
3713 #define po_reg_or_fail(regtype) do { \
3714 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3715 if (val == PARSE_FAIL) \
3716 { \
3717 set_default_error (); \
3718 goto failure; \
3719 } \
3720 } while (0)
3721
3722 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3723 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3724 &isreg32, &isregzero); \
3725 if (val == PARSE_FAIL) \
3726 { \
3727 set_default_error (); \
3728 goto failure; \
3729 } \
3730 info->reg.regno = val; \
3731 if (isreg32) \
3732 info->qualifier = AARCH64_OPND_QLF_W; \
3733 else \
3734 info->qualifier = AARCH64_OPND_QLF_X; \
3735 } while (0)
3736
3737 #define po_imm_nc_or_fail() do { \
3738 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3739 goto failure; \
3740 } while (0)
3741
3742 #define po_imm_or_fail(min, max) do { \
3743 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3744 goto failure; \
3745 if (val < min || val > max) \
3746 { \
3747 set_fatal_syntax_error (_("immediate value out of range "\
3748 #min " to "#max)); \
3749 goto failure; \
3750 } \
3751 } while (0)
3752
3753 #define po_misc_or_fail(expr) do { \
3754 if (!expr) \
3755 goto failure; \
3756 } while (0)
3757 \f
3758 /* encode the 12-bit imm field of Add/sub immediate */
3759 static inline uint32_t
3760 encode_addsub_imm (uint32_t imm)
3761 {
3762 return imm << 10;
3763 }
3764
3765 /* encode the shift amount field of Add/sub immediate */
3766 static inline uint32_t
3767 encode_addsub_imm_shift_amount (uint32_t cnt)
3768 {
3769 return cnt << 22;
3770 }
3771
3772
3773 /* encode the imm field of Adr instruction */
3774 static inline uint32_t
3775 encode_adr_imm (uint32_t imm)
3776 {
3777 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3778 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3779 }
3780
3781 /* encode the immediate field of Move wide immediate */
3782 static inline uint32_t
3783 encode_movw_imm (uint32_t imm)
3784 {
3785 return imm << 5;
3786 }
3787
3788 /* encode the 26-bit offset of unconditional branch */
3789 static inline uint32_t
3790 encode_branch_ofs_26 (uint32_t ofs)
3791 {
3792 return ofs & ((1 << 26) - 1);
3793 }
3794
3795 /* encode the 19-bit offset of conditional branch and compare & branch */
3796 static inline uint32_t
3797 encode_cond_branch_ofs_19 (uint32_t ofs)
3798 {
3799 return (ofs & ((1 << 19) - 1)) << 5;
3800 }
3801
3802 /* encode the 19-bit offset of ld literal */
3803 static inline uint32_t
3804 encode_ld_lit_ofs_19 (uint32_t ofs)
3805 {
3806 return (ofs & ((1 << 19) - 1)) << 5;
3807 }
3808
3809 /* Encode the 14-bit offset of test & branch. */
3810 static inline uint32_t
3811 encode_tst_branch_ofs_14 (uint32_t ofs)
3812 {
3813 return (ofs & ((1 << 14) - 1)) << 5;
3814 }
3815
3816 /* Encode the 16-bit imm field of svc/hvc/smc. */
3817 static inline uint32_t
3818 encode_svc_imm (uint32_t imm)
3819 {
3820 return imm << 5;
3821 }
3822
3823 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3824 static inline uint32_t
3825 reencode_addsub_switch_add_sub (uint32_t opcode)
3826 {
3827 return opcode ^ (1 << 30);
3828 }
3829
3830 static inline uint32_t
3831 reencode_movzn_to_movz (uint32_t opcode)
3832 {
3833 return opcode | (1 << 30);
3834 }
3835
3836 static inline uint32_t
3837 reencode_movzn_to_movn (uint32_t opcode)
3838 {
3839 return opcode & ~(1 << 30);
3840 }
3841
3842 /* Overall per-instruction processing. */
3843
3844 /* We need to be able to fix up arbitrary expressions in some statements.
3845 This is so that we can handle symbols that are an arbitrary distance from
3846 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3847 which returns part of an address in a form which will be valid for
3848 a data instruction. We do this by pushing the expression into a symbol
3849 in the expr_section, and creating a fix for that. */
3850
3851 static fixS *
3852 fix_new_aarch64 (fragS * frag,
3853 int where,
3854 short int size, expressionS * exp, int pc_rel, int reloc)
3855 {
3856 fixS *new_fix;
3857
3858 switch (exp->X_op)
3859 {
3860 case O_constant:
3861 case O_symbol:
3862 case O_add:
3863 case O_subtract:
3864 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3865 break;
3866
3867 default:
3868 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3869 pc_rel, reloc);
3870 break;
3871 }
3872 return new_fix;
3873 }
3874 \f
3875 /* Diagnostics on operands errors. */
3876
3877 /* By default, output verbose error message.
3878 Disable the verbose error message by -mno-verbose-error. */
3879 static int verbose_error_p = 1;
3880
3881 #ifdef DEBUG_AARCH64
3882 /* N.B. this is only for the purpose of debugging. */
3883 const char* operand_mismatch_kind_names[] =
3884 {
3885 "AARCH64_OPDE_NIL",
3886 "AARCH64_OPDE_RECOVERABLE",
3887 "AARCH64_OPDE_SYNTAX_ERROR",
3888 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3889 "AARCH64_OPDE_INVALID_VARIANT",
3890 "AARCH64_OPDE_OUT_OF_RANGE",
3891 "AARCH64_OPDE_UNALIGNED",
3892 "AARCH64_OPDE_REG_LIST",
3893 "AARCH64_OPDE_OTHER_ERROR",
3894 };
3895 #endif /* DEBUG_AARCH64 */
3896
3897 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3898
3899 When multiple errors of different kinds are found in the same assembly
3900 line, only the error of the highest severity will be picked up for
3901 issuing the diagnostics. */
3902
3903 static inline bfd_boolean
3904 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3905 enum aarch64_operand_error_kind rhs)
3906 {
3907 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3908 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3909 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3910 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3911 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3912 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3913 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3914 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3915 return lhs > rhs;
3916 }
3917
3918 /* Helper routine to get the mnemonic name from the assembly instruction
3919 line; should only be called for the diagnosis purpose, as there is
3920 string copy operation involved, which may affect the runtime
3921 performance if used in elsewhere. */
3922
3923 static const char*
3924 get_mnemonic_name (const char *str)
3925 {
3926 static char mnemonic[32];
3927 char *ptr;
3928
3929 /* Get the first 15 bytes and assume that the full name is included. */
3930 strncpy (mnemonic, str, 31);
3931 mnemonic[31] = '\0';
3932
3933 /* Scan up to the end of the mnemonic, which must end in white space,
3934 '.', or end of string. */
3935 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3936 ;
3937
3938 *ptr = '\0';
3939
3940 /* Append '...' to the truncated long name. */
3941 if (ptr - mnemonic == 31)
3942 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3943
3944 return mnemonic;
3945 }
3946
3947 static void
3948 reset_aarch64_instruction (aarch64_instruction *instruction)
3949 {
3950 memset (instruction, '\0', sizeof (aarch64_instruction));
3951 instruction->reloc.type = BFD_RELOC_UNUSED;
3952 }
3953
3954 /* Data strutures storing one user error in the assembly code related to
3955 operands. */
3956
3957 struct operand_error_record
3958 {
3959 const aarch64_opcode *opcode;
3960 aarch64_operand_error detail;
3961 struct operand_error_record *next;
3962 };
3963
3964 typedef struct operand_error_record operand_error_record;
3965
3966 struct operand_errors
3967 {
3968 operand_error_record *head;
3969 operand_error_record *tail;
3970 };
3971
3972 typedef struct operand_errors operand_errors;
3973
3974 /* Top-level data structure reporting user errors for the current line of
3975 the assembly code.
3976 The way md_assemble works is that all opcodes sharing the same mnemonic
3977 name are iterated to find a match to the assembly line. In this data
3978 structure, each of the such opcodes will have one operand_error_record
3979 allocated and inserted. In other words, excessive errors related with
3980 a single opcode are disregarded. */
3981 operand_errors operand_error_report;
3982
3983 /* Free record nodes. */
3984 static operand_error_record *free_opnd_error_record_nodes = NULL;
3985
3986 /* Initialize the data structure that stores the operand mismatch
3987 information on assembling one line of the assembly code. */
3988 static void
3989 init_operand_error_report (void)
3990 {
3991 if (operand_error_report.head != NULL)
3992 {
3993 gas_assert (operand_error_report.tail != NULL);
3994 operand_error_report.tail->next = free_opnd_error_record_nodes;
3995 free_opnd_error_record_nodes = operand_error_report.head;
3996 operand_error_report.head = NULL;
3997 operand_error_report.tail = NULL;
3998 return;
3999 }
4000 gas_assert (operand_error_report.tail == NULL);
4001 }
4002
4003 /* Return TRUE if some operand error has been recorded during the
4004 parsing of the current assembly line using the opcode *OPCODE;
4005 otherwise return FALSE. */
4006 static inline bfd_boolean
4007 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4008 {
4009 operand_error_record *record = operand_error_report.head;
4010 return record && record->opcode == opcode;
4011 }
4012
4013 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4014 OPCODE field is initialized with OPCODE.
4015 N.B. only one record for each opcode, i.e. the maximum of one error is
4016 recorded for each instruction template. */
4017
4018 static void
4019 add_operand_error_record (const operand_error_record* new_record)
4020 {
4021 const aarch64_opcode *opcode = new_record->opcode;
4022 operand_error_record* record = operand_error_report.head;
4023
4024 /* The record may have been created for this opcode. If not, we need
4025 to prepare one. */
4026 if (! opcode_has_operand_error_p (opcode))
4027 {
4028 /* Get one empty record. */
4029 if (free_opnd_error_record_nodes == NULL)
4030 {
4031 record = XNEW (operand_error_record);
4032 }
4033 else
4034 {
4035 record = free_opnd_error_record_nodes;
4036 free_opnd_error_record_nodes = record->next;
4037 }
4038 record->opcode = opcode;
4039 /* Insert at the head. */
4040 record->next = operand_error_report.head;
4041 operand_error_report.head = record;
4042 if (operand_error_report.tail == NULL)
4043 operand_error_report.tail = record;
4044 }
4045 else if (record->detail.kind != AARCH64_OPDE_NIL
4046 && record->detail.index <= new_record->detail.index
4047 && operand_error_higher_severity_p (record->detail.kind,
4048 new_record->detail.kind))
4049 {
4050 /* In the case of multiple errors found on operands related with a
4051 single opcode, only record the error of the leftmost operand and
4052 only if the error is of higher severity. */
4053 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4054 " the existing error %s on operand %d",
4055 operand_mismatch_kind_names[new_record->detail.kind],
4056 new_record->detail.index,
4057 operand_mismatch_kind_names[record->detail.kind],
4058 record->detail.index);
4059 return;
4060 }
4061
4062 record->detail = new_record->detail;
4063 }
4064
4065 static inline void
4066 record_operand_error_info (const aarch64_opcode *opcode,
4067 aarch64_operand_error *error_info)
4068 {
4069 operand_error_record record;
4070 record.opcode = opcode;
4071 record.detail = *error_info;
4072 add_operand_error_record (&record);
4073 }
4074
4075 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4076 error message *ERROR, for operand IDX (count from 0). */
4077
4078 static void
4079 record_operand_error (const aarch64_opcode *opcode, int idx,
4080 enum aarch64_operand_error_kind kind,
4081 const char* error)
4082 {
4083 aarch64_operand_error info;
4084 memset(&info, 0, sizeof (info));
4085 info.index = idx;
4086 info.kind = kind;
4087 info.error = error;
4088 record_operand_error_info (opcode, &info);
4089 }
4090
4091 static void
4092 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4093 enum aarch64_operand_error_kind kind,
4094 const char* error, const int *extra_data)
4095 {
4096 aarch64_operand_error info;
4097 info.index = idx;
4098 info.kind = kind;
4099 info.error = error;
4100 info.data[0] = extra_data[0];
4101 info.data[1] = extra_data[1];
4102 info.data[2] = extra_data[2];
4103 record_operand_error_info (opcode, &info);
4104 }
4105
4106 static void
4107 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4108 const char* error, int lower_bound,
4109 int upper_bound)
4110 {
4111 int data[3] = {lower_bound, upper_bound, 0};
4112 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4113 error, data);
4114 }
4115
4116 /* Remove the operand error record for *OPCODE. */
4117 static void ATTRIBUTE_UNUSED
4118 remove_operand_error_record (const aarch64_opcode *opcode)
4119 {
4120 if (opcode_has_operand_error_p (opcode))
4121 {
4122 operand_error_record* record = operand_error_report.head;
4123 gas_assert (record != NULL && operand_error_report.tail != NULL);
4124 operand_error_report.head = record->next;
4125 record->next = free_opnd_error_record_nodes;
4126 free_opnd_error_record_nodes = record;
4127 if (operand_error_report.head == NULL)
4128 {
4129 gas_assert (operand_error_report.tail == record);
4130 operand_error_report.tail = NULL;
4131 }
4132 }
4133 }
4134
4135 /* Given the instruction in *INSTR, return the index of the best matched
4136 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4137
4138 Return -1 if there is no qualifier sequence; return the first match
4139 if there is multiple matches found. */
4140
4141 static int
4142 find_best_match (const aarch64_inst *instr,
4143 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4144 {
4145 int i, num_opnds, max_num_matched, idx;
4146
4147 num_opnds = aarch64_num_of_operands (instr->opcode);
4148 if (num_opnds == 0)
4149 {
4150 DEBUG_TRACE ("no operand");
4151 return -1;
4152 }
4153
4154 max_num_matched = 0;
4155 idx = -1;
4156
4157 /* For each pattern. */
4158 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4159 {
4160 int j, num_matched;
4161 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4162
4163 /* Most opcodes has much fewer patterns in the list. */
4164 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4165 {
4166 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4167 if (i != 0 && idx == -1)
4168 /* If nothing has been matched, return the 1st sequence. */
4169 idx = 0;
4170 break;
4171 }
4172
4173 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4174 if (*qualifiers == instr->operands[j].qualifier)
4175 ++num_matched;
4176
4177 if (num_matched > max_num_matched)
4178 {
4179 max_num_matched = num_matched;
4180 idx = i;
4181 }
4182 }
4183
4184 DEBUG_TRACE ("return with %d", idx);
4185 return idx;
4186 }
4187
4188 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4189 corresponding operands in *INSTR. */
4190
4191 static inline void
4192 assign_qualifier_sequence (aarch64_inst *instr,
4193 const aarch64_opnd_qualifier_t *qualifiers)
4194 {
4195 int i = 0;
4196 int num_opnds = aarch64_num_of_operands (instr->opcode);
4197 gas_assert (num_opnds);
4198 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4199 instr->operands[i].qualifier = *qualifiers;
4200 }
4201
4202 /* Print operands for the diagnosis purpose. */
4203
4204 static void
4205 print_operands (char *buf, const aarch64_opcode *opcode,
4206 const aarch64_opnd_info *opnds)
4207 {
4208 int i;
4209
4210 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4211 {
4212 char str[128];
4213
4214 /* We regard the opcode operand info more, however we also look into
4215 the inst->operands to support the disassembling of the optional
4216 operand.
4217 The two operand code should be the same in all cases, apart from
4218 when the operand can be optional. */
4219 if (opcode->operands[i] == AARCH64_OPND_NIL
4220 || opnds[i].type == AARCH64_OPND_NIL)
4221 break;
4222
4223 /* Generate the operand string in STR. */
4224 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4225
4226 /* Delimiter. */
4227 if (str[0] != '\0')
4228 strcat (buf, i == 0 ? " " : ",");
4229
4230 /* Append the operand string. */
4231 strcat (buf, str);
4232 }
4233 }
4234
4235 /* Send to stderr a string as information. */
4236
4237 static void
4238 output_info (const char *format, ...)
4239 {
4240 const char *file;
4241 unsigned int line;
4242 va_list args;
4243
4244 file = as_where (&line);
4245 if (file)
4246 {
4247 if (line != 0)
4248 fprintf (stderr, "%s:%u: ", file, line);
4249 else
4250 fprintf (stderr, "%s: ", file);
4251 }
4252 fprintf (stderr, _("Info: "));
4253 va_start (args, format);
4254 vfprintf (stderr, format, args);
4255 va_end (args);
4256 (void) putc ('\n', stderr);
4257 }
4258
4259 /* Output one operand error record. */
4260
4261 static void
4262 output_operand_error_record (const operand_error_record *record, char *str)
4263 {
4264 const aarch64_operand_error *detail = &record->detail;
4265 int idx = detail->index;
4266 const aarch64_opcode *opcode = record->opcode;
4267 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4268 : AARCH64_OPND_NIL);
4269
4270 switch (detail->kind)
4271 {
4272 case AARCH64_OPDE_NIL:
4273 gas_assert (0);
4274 break;
4275
4276 case AARCH64_OPDE_SYNTAX_ERROR:
4277 case AARCH64_OPDE_RECOVERABLE:
4278 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4279 case AARCH64_OPDE_OTHER_ERROR:
4280 /* Use the prepared error message if there is, otherwise use the
4281 operand description string to describe the error. */
4282 if (detail->error != NULL)
4283 {
4284 if (idx < 0)
4285 as_bad (_("%s -- `%s'"), detail->error, str);
4286 else
4287 as_bad (_("%s at operand %d -- `%s'"),
4288 detail->error, idx + 1, str);
4289 }
4290 else
4291 {
4292 gas_assert (idx >= 0);
4293 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4294 aarch64_get_operand_desc (opd_code), str);
4295 }
4296 break;
4297
4298 case AARCH64_OPDE_INVALID_VARIANT:
4299 as_bad (_("operand mismatch -- `%s'"), str);
4300 if (verbose_error_p)
4301 {
4302 /* We will try to correct the erroneous instruction and also provide
4303 more information e.g. all other valid variants.
4304
4305 The string representation of the corrected instruction and other
4306 valid variants are generated by
4307
4308 1) obtaining the intermediate representation of the erroneous
4309 instruction;
4310 2) manipulating the IR, e.g. replacing the operand qualifier;
4311 3) printing out the instruction by calling the printer functions
4312 shared with the disassembler.
4313
4314 The limitation of this method is that the exact input assembly
4315 line cannot be accurately reproduced in some cases, for example an
4316 optional operand present in the actual assembly line will be
4317 omitted in the output; likewise for the optional syntax rules,
4318 e.g. the # before the immediate. Another limitation is that the
4319 assembly symbols and relocation operations in the assembly line
4320 currently cannot be printed out in the error report. Last but not
4321 least, when there is other error(s) co-exist with this error, the
4322 'corrected' instruction may be still incorrect, e.g. given
4323 'ldnp h0,h1,[x0,#6]!'
4324 this diagnosis will provide the version:
4325 'ldnp s0,s1,[x0,#6]!'
4326 which is still not right. */
4327 size_t len = strlen (get_mnemonic_name (str));
4328 int i, qlf_idx;
4329 bfd_boolean result;
4330 char buf[2048];
4331 aarch64_inst *inst_base = &inst.base;
4332 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4333
4334 /* Init inst. */
4335 reset_aarch64_instruction (&inst);
4336 inst_base->opcode = opcode;
4337
4338 /* Reset the error report so that there is no side effect on the
4339 following operand parsing. */
4340 init_operand_error_report ();
4341
4342 /* Fill inst. */
4343 result = parse_operands (str + len, opcode)
4344 && programmer_friendly_fixup (&inst);
4345 gas_assert (result);
4346 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4347 NULL, NULL);
4348 gas_assert (!result);
4349
4350 /* Find the most matched qualifier sequence. */
4351 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4352 gas_assert (qlf_idx > -1);
4353
4354 /* Assign the qualifiers. */
4355 assign_qualifier_sequence (inst_base,
4356 opcode->qualifiers_list[qlf_idx]);
4357
4358 /* Print the hint. */
4359 output_info (_(" did you mean this?"));
4360 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4361 print_operands (buf, opcode, inst_base->operands);
4362 output_info (_(" %s"), buf);
4363
4364 /* Print out other variant(s) if there is any. */
4365 if (qlf_idx != 0 ||
4366 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4367 output_info (_(" other valid variant(s):"));
4368
4369 /* For each pattern. */
4370 qualifiers_list = opcode->qualifiers_list;
4371 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4372 {
4373 /* Most opcodes has much fewer patterns in the list.
4374 First NIL qualifier indicates the end in the list. */
4375 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4376 break;
4377
4378 if (i != qlf_idx)
4379 {
4380 /* Mnemonics name. */
4381 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4382
4383 /* Assign the qualifiers. */
4384 assign_qualifier_sequence (inst_base, *qualifiers_list);
4385
4386 /* Print instruction. */
4387 print_operands (buf, opcode, inst_base->operands);
4388
4389 output_info (_(" %s"), buf);
4390 }
4391 }
4392 }
4393 break;
4394
4395 case AARCH64_OPDE_OUT_OF_RANGE:
4396 if (detail->data[0] != detail->data[1])
4397 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4398 detail->error ? detail->error : _("immediate value"),
4399 detail->data[0], detail->data[1], idx + 1, str);
4400 else
4401 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4402 detail->error ? detail->error : _("immediate value"),
4403 detail->data[0], idx + 1, str);
4404 break;
4405
4406 case AARCH64_OPDE_REG_LIST:
4407 if (detail->data[0] == 1)
4408 as_bad (_("invalid number of registers in the list; "
4409 "only 1 register is expected at operand %d -- `%s'"),
4410 idx + 1, str);
4411 else
4412 as_bad (_("invalid number of registers in the list; "
4413 "%d registers are expected at operand %d -- `%s'"),
4414 detail->data[0], idx + 1, str);
4415 break;
4416
4417 case AARCH64_OPDE_UNALIGNED:
4418 as_bad (_("immediate value should be a multiple of "
4419 "%d at operand %d -- `%s'"),
4420 detail->data[0], idx + 1, str);
4421 break;
4422
4423 default:
4424 gas_assert (0);
4425 break;
4426 }
4427 }
4428
4429 /* Process and output the error message about the operand mismatching.
4430
4431 When this function is called, the operand error information had
4432 been collected for an assembly line and there will be multiple
4433 errors in the case of mulitple instruction templates; output the
4434 error message that most closely describes the problem. */
4435
4436 static void
4437 output_operand_error_report (char *str)
4438 {
4439 int largest_error_pos;
4440 const char *msg = NULL;
4441 enum aarch64_operand_error_kind kind;
4442 operand_error_record *curr;
4443 operand_error_record *head = operand_error_report.head;
4444 operand_error_record *record = NULL;
4445
4446 /* No error to report. */
4447 if (head == NULL)
4448 return;
4449
4450 gas_assert (head != NULL && operand_error_report.tail != NULL);
4451
4452 /* Only one error. */
4453 if (head == operand_error_report.tail)
4454 {
4455 DEBUG_TRACE ("single opcode entry with error kind: %s",
4456 operand_mismatch_kind_names[head->detail.kind]);
4457 output_operand_error_record (head, str);
4458 return;
4459 }
4460
4461 /* Find the error kind of the highest severity. */
4462 DEBUG_TRACE ("multiple opcode entres with error kind");
4463 kind = AARCH64_OPDE_NIL;
4464 for (curr = head; curr != NULL; curr = curr->next)
4465 {
4466 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4467 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4468 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4469 kind = curr->detail.kind;
4470 }
4471 gas_assert (kind != AARCH64_OPDE_NIL);
4472
4473 /* Pick up one of errors of KIND to report. */
4474 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4475 for (curr = head; curr != NULL; curr = curr->next)
4476 {
4477 if (curr->detail.kind != kind)
4478 continue;
4479 /* If there are multiple errors, pick up the one with the highest
4480 mismatching operand index. In the case of multiple errors with
4481 the equally highest operand index, pick up the first one or the
4482 first one with non-NULL error message. */
4483 if (curr->detail.index > largest_error_pos
4484 || (curr->detail.index == largest_error_pos && msg == NULL
4485 && curr->detail.error != NULL))
4486 {
4487 largest_error_pos = curr->detail.index;
4488 record = curr;
4489 msg = record->detail.error;
4490 }
4491 }
4492
4493 gas_assert (largest_error_pos != -2 && record != NULL);
4494 DEBUG_TRACE ("Pick up error kind %s to report",
4495 operand_mismatch_kind_names[record->detail.kind]);
4496
4497 /* Output. */
4498 output_operand_error_record (record, str);
4499 }
4500 \f
4501 /* Write an AARCH64 instruction to buf - always little-endian. */
4502 static void
4503 put_aarch64_insn (char *buf, uint32_t insn)
4504 {
4505 unsigned char *where = (unsigned char *) buf;
4506 where[0] = insn;
4507 where[1] = insn >> 8;
4508 where[2] = insn >> 16;
4509 where[3] = insn >> 24;
4510 }
4511
4512 static uint32_t
4513 get_aarch64_insn (char *buf)
4514 {
4515 unsigned char *where = (unsigned char *) buf;
4516 uint32_t result;
4517 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4518 return result;
4519 }
4520
4521 static void
4522 output_inst (struct aarch64_inst *new_inst)
4523 {
4524 char *to = NULL;
4525
4526 to = frag_more (INSN_SIZE);
4527
4528 frag_now->tc_frag_data.recorded = 1;
4529
4530 put_aarch64_insn (to, inst.base.value);
4531
4532 if (inst.reloc.type != BFD_RELOC_UNUSED)
4533 {
4534 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4535 INSN_SIZE, &inst.reloc.exp,
4536 inst.reloc.pc_rel,
4537 inst.reloc.type);
4538 DEBUG_TRACE ("Prepared relocation fix up");
4539 /* Don't check the addend value against the instruction size,
4540 that's the job of our code in md_apply_fix(). */
4541 fixp->fx_no_overflow = 1;
4542 if (new_inst != NULL)
4543 fixp->tc_fix_data.inst = new_inst;
4544 if (aarch64_gas_internal_fixup_p ())
4545 {
4546 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4547 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4548 fixp->fx_addnumber = inst.reloc.flags;
4549 }
4550 }
4551
4552 dwarf2_emit_insn (INSN_SIZE);
4553 }
4554
4555 /* Link together opcodes of the same name. */
4556
4557 struct templates
4558 {
4559 aarch64_opcode *opcode;
4560 struct templates *next;
4561 };
4562
4563 typedef struct templates templates;
4564
4565 static templates *
4566 lookup_mnemonic (const char *start, int len)
4567 {
4568 templates *templ = NULL;
4569
4570 templ = hash_find_n (aarch64_ops_hsh, start, len);
4571 return templ;
4572 }
4573
4574 /* Subroutine of md_assemble, responsible for looking up the primary
4575 opcode from the mnemonic the user wrote. STR points to the
4576 beginning of the mnemonic. */
4577
4578 static templates *
4579 opcode_lookup (char **str)
4580 {
4581 char *end, *base;
4582 const aarch64_cond *cond;
4583 char condname[16];
4584 int len;
4585
4586 /* Scan up to the end of the mnemonic, which must end in white space,
4587 '.', or end of string. */
4588 for (base = end = *str; is_part_of_name(*end); end++)
4589 if (*end == '.')
4590 break;
4591
4592 if (end == base)
4593 return 0;
4594
4595 inst.cond = COND_ALWAYS;
4596
4597 /* Handle a possible condition. */
4598 if (end[0] == '.')
4599 {
4600 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4601 if (cond)
4602 {
4603 inst.cond = cond->value;
4604 *str = end + 3;
4605 }
4606 else
4607 {
4608 *str = end;
4609 return 0;
4610 }
4611 }
4612 else
4613 *str = end;
4614
4615 len = end - base;
4616
4617 if (inst.cond == COND_ALWAYS)
4618 {
4619 /* Look for unaffixed mnemonic. */
4620 return lookup_mnemonic (base, len);
4621 }
4622 else if (len <= 13)
4623 {
4624 /* append ".c" to mnemonic if conditional */
4625 memcpy (condname, base, len);
4626 memcpy (condname + len, ".c", 2);
4627 base = condname;
4628 len += 2;
4629 return lookup_mnemonic (base, len);
4630 }
4631
4632 return NULL;
4633 }
4634
4635 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4636 to a corresponding operand qualifier. */
4637
4638 static inline aarch64_opnd_qualifier_t
4639 vectype_to_qualifier (const struct vector_type_el *vectype)
4640 {
4641 /* Element size in bytes indexed by vector_el_type. */
4642 const unsigned char ele_size[5]
4643 = {1, 2, 4, 8, 16};
4644 const unsigned int ele_base [5] =
4645 {
4646 AARCH64_OPND_QLF_V_8B,
4647 AARCH64_OPND_QLF_V_2H,
4648 AARCH64_OPND_QLF_V_2S,
4649 AARCH64_OPND_QLF_V_1D,
4650 AARCH64_OPND_QLF_V_1Q
4651 };
4652
4653 if (!vectype->defined || vectype->type == NT_invtype)
4654 goto vectype_conversion_fail;
4655
4656 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4657
4658 if (vectype->defined & NTA_HASINDEX)
4659 /* Vector element register. */
4660 return AARCH64_OPND_QLF_S_B + vectype->type;
4661 else
4662 {
4663 /* Vector register. */
4664 int reg_size = ele_size[vectype->type] * vectype->width;
4665 unsigned offset;
4666 unsigned shift;
4667 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4668 goto vectype_conversion_fail;
4669
4670 /* The conversion is by calculating the offset from the base operand
4671 qualifier for the vector type. The operand qualifiers are regular
4672 enough that the offset can established by shifting the vector width by
4673 a vector-type dependent amount. */
4674 shift = 0;
4675 if (vectype->type == NT_b)
4676 shift = 4;
4677 else if (vectype->type == NT_h || vectype->type == NT_s)
4678 shift = 2;
4679 else if (vectype->type >= NT_d)
4680 shift = 1;
4681 else
4682 gas_assert (0);
4683
4684 offset = ele_base [vectype->type] + (vectype->width >> shift);
4685 gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4686 && offset <= AARCH64_OPND_QLF_V_1Q);
4687 return offset;
4688 }
4689
4690 vectype_conversion_fail:
4691 first_error (_("bad vector arrangement type"));
4692 return AARCH64_OPND_QLF_NIL;
4693 }
4694
4695 /* Process an optional operand that is found omitted from the assembly line.
4696 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4697 instruction's opcode entry while IDX is the index of this omitted operand.
4698 */
4699
4700 static void
4701 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4702 int idx, aarch64_opnd_info *operand)
4703 {
4704 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4705 gas_assert (optional_operand_p (opcode, idx));
4706 gas_assert (!operand->present);
4707
4708 switch (type)
4709 {
4710 case AARCH64_OPND_Rd:
4711 case AARCH64_OPND_Rn:
4712 case AARCH64_OPND_Rm:
4713 case AARCH64_OPND_Rt:
4714 case AARCH64_OPND_Rt2:
4715 case AARCH64_OPND_Rs:
4716 case AARCH64_OPND_Ra:
4717 case AARCH64_OPND_Rt_SYS:
4718 case AARCH64_OPND_Rd_SP:
4719 case AARCH64_OPND_Rn_SP:
4720 case AARCH64_OPND_Fd:
4721 case AARCH64_OPND_Fn:
4722 case AARCH64_OPND_Fm:
4723 case AARCH64_OPND_Fa:
4724 case AARCH64_OPND_Ft:
4725 case AARCH64_OPND_Ft2:
4726 case AARCH64_OPND_Sd:
4727 case AARCH64_OPND_Sn:
4728 case AARCH64_OPND_Sm:
4729 case AARCH64_OPND_Vd:
4730 case AARCH64_OPND_Vn:
4731 case AARCH64_OPND_Vm:
4732 case AARCH64_OPND_VdD1:
4733 case AARCH64_OPND_VnD1:
4734 operand->reg.regno = default_value;
4735 break;
4736
4737 case AARCH64_OPND_Ed:
4738 case AARCH64_OPND_En:
4739 case AARCH64_OPND_Em:
4740 operand->reglane.regno = default_value;
4741 break;
4742
4743 case AARCH64_OPND_IDX:
4744 case AARCH64_OPND_BIT_NUM:
4745 case AARCH64_OPND_IMMR:
4746 case AARCH64_OPND_IMMS:
4747 case AARCH64_OPND_SHLL_IMM:
4748 case AARCH64_OPND_IMM_VLSL:
4749 case AARCH64_OPND_IMM_VLSR:
4750 case AARCH64_OPND_CCMP_IMM:
4751 case AARCH64_OPND_FBITS:
4752 case AARCH64_OPND_UIMM4:
4753 case AARCH64_OPND_UIMM3_OP1:
4754 case AARCH64_OPND_UIMM3_OP2:
4755 case AARCH64_OPND_IMM:
4756 case AARCH64_OPND_WIDTH:
4757 case AARCH64_OPND_UIMM7:
4758 case AARCH64_OPND_NZCV:
4759 operand->imm.value = default_value;
4760 break;
4761
4762 case AARCH64_OPND_EXCEPTION:
4763 inst.reloc.type = BFD_RELOC_UNUSED;
4764 break;
4765
4766 case AARCH64_OPND_BARRIER_ISB:
4767 operand->barrier = aarch64_barrier_options + default_value;
4768
4769 default:
4770 break;
4771 }
4772 }
4773
4774 /* Process the relocation type for move wide instructions.
4775 Return TRUE on success; otherwise return FALSE. */
4776
4777 static bfd_boolean
4778 process_movw_reloc_info (void)
4779 {
4780 int is32;
4781 unsigned shift;
4782
4783 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4784
4785 if (inst.base.opcode->op == OP_MOVK)
4786 switch (inst.reloc.type)
4787 {
4788 case BFD_RELOC_AARCH64_MOVW_G0_S:
4789 case BFD_RELOC_AARCH64_MOVW_G1_S:
4790 case BFD_RELOC_AARCH64_MOVW_G2_S:
4791 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4792 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4793 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4794 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4795 set_syntax_error
4796 (_("the specified relocation type is not allowed for MOVK"));
4797 return FALSE;
4798 default:
4799 break;
4800 }
4801
4802 switch (inst.reloc.type)
4803 {
4804 case BFD_RELOC_AARCH64_MOVW_G0:
4805 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4806 case BFD_RELOC_AARCH64_MOVW_G0_S:
4807 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4808 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4809 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4810 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
4811 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
4812 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
4813 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4814 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4815 shift = 0;
4816 break;
4817 case BFD_RELOC_AARCH64_MOVW_G1:
4818 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4819 case BFD_RELOC_AARCH64_MOVW_G1_S:
4820 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4821 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4822 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4823 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
4824 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
4825 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
4826 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4827 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4828 shift = 16;
4829 break;
4830 case BFD_RELOC_AARCH64_MOVW_G2:
4831 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4832 case BFD_RELOC_AARCH64_MOVW_G2_S:
4833 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
4834 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4835 if (is32)
4836 {
4837 set_fatal_syntax_error
4838 (_("the specified relocation type is not allowed for 32-bit "
4839 "register"));
4840 return FALSE;
4841 }
4842 shift = 32;
4843 break;
4844 case BFD_RELOC_AARCH64_MOVW_G3:
4845 if (is32)
4846 {
4847 set_fatal_syntax_error
4848 (_("the specified relocation type is not allowed for 32-bit "
4849 "register"));
4850 return FALSE;
4851 }
4852 shift = 48;
4853 break;
4854 default:
4855 /* More cases should be added when more MOVW-related relocation types
4856 are supported in GAS. */
4857 gas_assert (aarch64_gas_internal_fixup_p ());
4858 /* The shift amount should have already been set by the parser. */
4859 return TRUE;
4860 }
4861 inst.base.operands[1].shifter.amount = shift;
4862 return TRUE;
4863 }
4864
4865 /* A primitive log caculator. */
4866
4867 static inline unsigned int
4868 get_logsz (unsigned int size)
4869 {
4870 const unsigned char ls[16] =
4871 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4872 if (size > 16)
4873 {
4874 gas_assert (0);
4875 return -1;
4876 }
4877 gas_assert (ls[size - 1] != (unsigned char)-1);
4878 return ls[size - 1];
4879 }
4880
4881 /* Determine and return the real reloc type code for an instruction
4882 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4883
4884 static inline bfd_reloc_code_real_type
4885 ldst_lo12_determine_real_reloc_type (void)
4886 {
4887 unsigned logsz;
4888 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4889 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4890
4891 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
4892 {
4893 BFD_RELOC_AARCH64_LDST8_LO12,
4894 BFD_RELOC_AARCH64_LDST16_LO12,
4895 BFD_RELOC_AARCH64_LDST32_LO12,
4896 BFD_RELOC_AARCH64_LDST64_LO12,
4897 BFD_RELOC_AARCH64_LDST128_LO12
4898 },
4899 {
4900 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
4901 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
4902 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
4903 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
4904 BFD_RELOC_AARCH64_NONE
4905 },
4906 {
4907 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
4908 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
4909 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
4910 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
4911 BFD_RELOC_AARCH64_NONE
4912 }
4913 };
4914
4915 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
4916 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4917 || (inst.reloc.type
4918 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
4919 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4920
4921 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4922 opd1_qlf =
4923 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4924 1, opd0_qlf, 0);
4925 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4926
4927 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4928 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4929 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
4930 gas_assert (logsz <= 3);
4931 else
4932 gas_assert (logsz <= 4);
4933
4934 /* In reloc.c, these pseudo relocation types should be defined in similar
4935 order as above reloc_ldst_lo12 array. Because the array index calcuation
4936 below relies on this. */
4937 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
4938 }
4939
4940 /* Check whether a register list REGINFO is valid. The registers must be
4941 numbered in increasing order (modulo 32), in increments of one or two.
4942
4943 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4944 increments of two.
4945
4946 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4947
4948 static bfd_boolean
4949 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4950 {
4951 uint32_t i, nb_regs, prev_regno, incr;
4952
4953 nb_regs = 1 + (reginfo & 0x3);
4954 reginfo >>= 2;
4955 prev_regno = reginfo & 0x1f;
4956 incr = accept_alternate ? 2 : 1;
4957
4958 for (i = 1; i < nb_regs; ++i)
4959 {
4960 uint32_t curr_regno;
4961 reginfo >>= 5;
4962 curr_regno = reginfo & 0x1f;
4963 if (curr_regno != ((prev_regno + incr) & 0x1f))
4964 return FALSE;
4965 prev_regno = curr_regno;
4966 }
4967
4968 return TRUE;
4969 }
4970
4971 /* Generic instruction operand parser. This does no encoding and no
4972 semantic validation; it merely squirrels values away in the inst
4973 structure. Returns TRUE or FALSE depending on whether the
4974 specified grammar matched. */
4975
4976 static bfd_boolean
4977 parse_operands (char *str, const aarch64_opcode *opcode)
4978 {
4979 int i;
4980 char *backtrack_pos = 0;
4981 const enum aarch64_opnd *operands = opcode->operands;
4982 aarch64_reg_type imm_reg_type;
4983
4984 clear_error ();
4985 skip_whitespace (str);
4986
4987 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
4988
4989 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4990 {
4991 int64_t val;
4992 int isreg32, isregzero;
4993 int comma_skipped_p = 0;
4994 aarch64_reg_type rtype;
4995 struct vector_type_el vectype;
4996 aarch64_opnd_info *info = &inst.base.operands[i];
4997
4998 DEBUG_TRACE ("parse operand %d", i);
4999
5000 /* Assign the operand code. */
5001 info->type = operands[i];
5002
5003 if (optional_operand_p (opcode, i))
5004 {
5005 /* Remember where we are in case we need to backtrack. */
5006 gas_assert (!backtrack_pos);
5007 backtrack_pos = str;
5008 }
5009
5010 /* Expect comma between operands; the backtrack mechanizm will take
5011 care of cases of omitted optional operand. */
5012 if (i > 0 && ! skip_past_char (&str, ','))
5013 {
5014 set_syntax_error (_("comma expected between operands"));
5015 goto failure;
5016 }
5017 else
5018 comma_skipped_p = 1;
5019
5020 switch (operands[i])
5021 {
5022 case AARCH64_OPND_Rd:
5023 case AARCH64_OPND_Rn:
5024 case AARCH64_OPND_Rm:
5025 case AARCH64_OPND_Rt:
5026 case AARCH64_OPND_Rt2:
5027 case AARCH64_OPND_Rs:
5028 case AARCH64_OPND_Ra:
5029 case AARCH64_OPND_Rt_SYS:
5030 case AARCH64_OPND_PAIRREG:
5031 po_int_reg_or_fail (1, 0);
5032 break;
5033
5034 case AARCH64_OPND_Rd_SP:
5035 case AARCH64_OPND_Rn_SP:
5036 po_int_reg_or_fail (0, 1);
5037 break;
5038
5039 case AARCH64_OPND_Rm_EXT:
5040 case AARCH64_OPND_Rm_SFT:
5041 po_misc_or_fail (parse_shifter_operand
5042 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5043 ? SHIFTED_ARITH_IMM
5044 : SHIFTED_LOGIC_IMM)));
5045 if (!info->shifter.operator_present)
5046 {
5047 /* Default to LSL if not present. Libopcodes prefers shifter
5048 kind to be explicit. */
5049 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5050 info->shifter.kind = AARCH64_MOD_LSL;
5051 /* For Rm_EXT, libopcodes will carry out further check on whether
5052 or not stack pointer is used in the instruction (Recall that
5053 "the extend operator is not optional unless at least one of
5054 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5055 }
5056 break;
5057
5058 case AARCH64_OPND_Fd:
5059 case AARCH64_OPND_Fn:
5060 case AARCH64_OPND_Fm:
5061 case AARCH64_OPND_Fa:
5062 case AARCH64_OPND_Ft:
5063 case AARCH64_OPND_Ft2:
5064 case AARCH64_OPND_Sd:
5065 case AARCH64_OPND_Sn:
5066 case AARCH64_OPND_Sm:
5067 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5068 if (val == PARSE_FAIL)
5069 {
5070 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5071 goto failure;
5072 }
5073 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5074
5075 info->reg.regno = val;
5076 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5077 break;
5078
5079 case AARCH64_OPND_Vd:
5080 case AARCH64_OPND_Vn:
5081 case AARCH64_OPND_Vm:
5082 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5083 if (val == PARSE_FAIL)
5084 {
5085 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5086 goto failure;
5087 }
5088 if (vectype.defined & NTA_HASINDEX)
5089 goto failure;
5090
5091 info->reg.regno = val;
5092 info->qualifier = vectype_to_qualifier (&vectype);
5093 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5094 goto failure;
5095 break;
5096
5097 case AARCH64_OPND_VdD1:
5098 case AARCH64_OPND_VnD1:
5099 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5100 if (val == PARSE_FAIL)
5101 {
5102 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5103 goto failure;
5104 }
5105 if (vectype.type != NT_d || vectype.index != 1)
5106 {
5107 set_fatal_syntax_error
5108 (_("the top half of a 128-bit FP/SIMD register is expected"));
5109 goto failure;
5110 }
5111 info->reg.regno = val;
5112 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5113 here; it is correct for the purpose of encoding/decoding since
5114 only the register number is explicitly encoded in the related
5115 instructions, although this appears a bit hacky. */
5116 info->qualifier = AARCH64_OPND_QLF_S_D;
5117 break;
5118
5119 case AARCH64_OPND_Ed:
5120 case AARCH64_OPND_En:
5121 case AARCH64_OPND_Em:
5122 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5123 if (val == PARSE_FAIL)
5124 {
5125 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5126 goto failure;
5127 }
5128 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5129 goto failure;
5130
5131 info->reglane.regno = val;
5132 info->reglane.index = vectype.index;
5133 info->qualifier = vectype_to_qualifier (&vectype);
5134 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5135 goto failure;
5136 break;
5137
5138 case AARCH64_OPND_LVn:
5139 case AARCH64_OPND_LVt:
5140 case AARCH64_OPND_LVt_AL:
5141 case AARCH64_OPND_LEt:
5142 if ((val = parse_vector_reg_list (&str, REG_TYPE_VN,
5143 &vectype)) == PARSE_FAIL)
5144 goto failure;
5145 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5146 {
5147 set_fatal_syntax_error (_("invalid register list"));
5148 goto failure;
5149 }
5150 info->reglist.first_regno = (val >> 2) & 0x1f;
5151 info->reglist.num_regs = (val & 0x3) + 1;
5152 if (operands[i] == AARCH64_OPND_LEt)
5153 {
5154 if (!(vectype.defined & NTA_HASINDEX))
5155 goto failure;
5156 info->reglist.has_index = 1;
5157 info->reglist.index = vectype.index;
5158 }
5159 else if (!(vectype.defined & NTA_HASTYPE))
5160 goto failure;
5161 info->qualifier = vectype_to_qualifier (&vectype);
5162 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5163 goto failure;
5164 break;
5165
5166 case AARCH64_OPND_Cn:
5167 case AARCH64_OPND_Cm:
5168 po_reg_or_fail (REG_TYPE_CN);
5169 if (val > 15)
5170 {
5171 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5172 goto failure;
5173 }
5174 inst.base.operands[i].reg.regno = val;
5175 break;
5176
5177 case AARCH64_OPND_SHLL_IMM:
5178 case AARCH64_OPND_IMM_VLSR:
5179 po_imm_or_fail (1, 64);
5180 info->imm.value = val;
5181 break;
5182
5183 case AARCH64_OPND_CCMP_IMM:
5184 case AARCH64_OPND_FBITS:
5185 case AARCH64_OPND_UIMM4:
5186 case AARCH64_OPND_UIMM3_OP1:
5187 case AARCH64_OPND_UIMM3_OP2:
5188 case AARCH64_OPND_IMM_VLSL:
5189 case AARCH64_OPND_IMM:
5190 case AARCH64_OPND_WIDTH:
5191 po_imm_nc_or_fail ();
5192 info->imm.value = val;
5193 break;
5194
5195 case AARCH64_OPND_UIMM7:
5196 po_imm_or_fail (0, 127);
5197 info->imm.value = val;
5198 break;
5199
5200 case AARCH64_OPND_IDX:
5201 case AARCH64_OPND_BIT_NUM:
5202 case AARCH64_OPND_IMMR:
5203 case AARCH64_OPND_IMMS:
5204 po_imm_or_fail (0, 63);
5205 info->imm.value = val;
5206 break;
5207
5208 case AARCH64_OPND_IMM0:
5209 po_imm_nc_or_fail ();
5210 if (val != 0)
5211 {
5212 set_fatal_syntax_error (_("immediate zero expected"));
5213 goto failure;
5214 }
5215 info->imm.value = 0;
5216 break;
5217
5218 case AARCH64_OPND_FPIMM0:
5219 {
5220 int qfloat;
5221 bfd_boolean res1 = FALSE, res2 = FALSE;
5222 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5223 it is probably not worth the effort to support it. */
5224 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5225 imm_reg_type))
5226 && !(res2 = parse_constant_immediate (&str, &val,
5227 imm_reg_type)))
5228 goto failure;
5229 if ((res1 && qfloat == 0) || (res2 && val == 0))
5230 {
5231 info->imm.value = 0;
5232 info->imm.is_fp = 1;
5233 break;
5234 }
5235 set_fatal_syntax_error (_("immediate zero expected"));
5236 goto failure;
5237 }
5238
5239 case AARCH64_OPND_IMM_MOV:
5240 {
5241 char *saved = str;
5242 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5243 reg_name_p (str, REG_TYPE_VN))
5244 goto failure;
5245 str = saved;
5246 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5247 GE_OPT_PREFIX, 1));
5248 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5249 later. fix_mov_imm_insn will try to determine a machine
5250 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5251 message if the immediate cannot be moved by a single
5252 instruction. */
5253 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5254 inst.base.operands[i].skip = 1;
5255 }
5256 break;
5257
5258 case AARCH64_OPND_SIMD_IMM:
5259 case AARCH64_OPND_SIMD_IMM_SFT:
5260 if (! parse_big_immediate (&str, &val, imm_reg_type))
5261 goto failure;
5262 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5263 /* addr_off_p */ 0,
5264 /* need_libopcodes_p */ 1,
5265 /* skip_p */ 1);
5266 /* Parse shift.
5267 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5268 shift, we don't check it here; we leave the checking to
5269 the libopcodes (operand_general_constraint_met_p). By
5270 doing this, we achieve better diagnostics. */
5271 if (skip_past_comma (&str)
5272 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5273 goto failure;
5274 if (!info->shifter.operator_present
5275 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5276 {
5277 /* Default to LSL if not present. Libopcodes prefers shifter
5278 kind to be explicit. */
5279 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5280 info->shifter.kind = AARCH64_MOD_LSL;
5281 }
5282 break;
5283
5284 case AARCH64_OPND_FPIMM:
5285 case AARCH64_OPND_SIMD_FPIMM:
5286 {
5287 int qfloat;
5288 bfd_boolean dp_p
5289 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5290 == 8);
5291 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5292 goto failure;
5293 if (qfloat == 0)
5294 {
5295 set_fatal_syntax_error (_("invalid floating-point constant"));
5296 goto failure;
5297 }
5298 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5299 inst.base.operands[i].imm.is_fp = 1;
5300 }
5301 break;
5302
5303 case AARCH64_OPND_LIMM:
5304 po_misc_or_fail (parse_shifter_operand (&str, info,
5305 SHIFTED_LOGIC_IMM));
5306 if (info->shifter.operator_present)
5307 {
5308 set_fatal_syntax_error
5309 (_("shift not allowed for bitmask immediate"));
5310 goto failure;
5311 }
5312 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5313 /* addr_off_p */ 0,
5314 /* need_libopcodes_p */ 1,
5315 /* skip_p */ 1);
5316 break;
5317
5318 case AARCH64_OPND_AIMM:
5319 if (opcode->op == OP_ADD)
5320 /* ADD may have relocation types. */
5321 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5322 SHIFTED_ARITH_IMM));
5323 else
5324 po_misc_or_fail (parse_shifter_operand (&str, info,
5325 SHIFTED_ARITH_IMM));
5326 switch (inst.reloc.type)
5327 {
5328 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5329 info->shifter.amount = 12;
5330 break;
5331 case BFD_RELOC_UNUSED:
5332 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5333 if (info->shifter.kind != AARCH64_MOD_NONE)
5334 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5335 inst.reloc.pc_rel = 0;
5336 break;
5337 default:
5338 break;
5339 }
5340 info->imm.value = 0;
5341 if (!info->shifter.operator_present)
5342 {
5343 /* Default to LSL if not present. Libopcodes prefers shifter
5344 kind to be explicit. */
5345 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5346 info->shifter.kind = AARCH64_MOD_LSL;
5347 }
5348 break;
5349
5350 case AARCH64_OPND_HALF:
5351 {
5352 /* #<imm16> or relocation. */
5353 int internal_fixup_p;
5354 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5355 if (internal_fixup_p)
5356 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5357 skip_whitespace (str);
5358 if (skip_past_comma (&str))
5359 {
5360 /* {, LSL #<shift>} */
5361 if (! aarch64_gas_internal_fixup_p ())
5362 {
5363 set_fatal_syntax_error (_("can't mix relocation modifier "
5364 "with explicit shift"));
5365 goto failure;
5366 }
5367 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5368 }
5369 else
5370 inst.base.operands[i].shifter.amount = 0;
5371 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5372 inst.base.operands[i].imm.value = 0;
5373 if (! process_movw_reloc_info ())
5374 goto failure;
5375 }
5376 break;
5377
5378 case AARCH64_OPND_EXCEPTION:
5379 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5380 imm_reg_type));
5381 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5382 /* addr_off_p */ 0,
5383 /* need_libopcodes_p */ 0,
5384 /* skip_p */ 1);
5385 break;
5386
5387 case AARCH64_OPND_NZCV:
5388 {
5389 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5390 if (nzcv != NULL)
5391 {
5392 str += 4;
5393 info->imm.value = nzcv->value;
5394 break;
5395 }
5396 po_imm_or_fail (0, 15);
5397 info->imm.value = val;
5398 }
5399 break;
5400
5401 case AARCH64_OPND_COND:
5402 case AARCH64_OPND_COND1:
5403 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5404 str += 2;
5405 if (info->cond == NULL)
5406 {
5407 set_syntax_error (_("invalid condition"));
5408 goto failure;
5409 }
5410 else if (operands[i] == AARCH64_OPND_COND1
5411 && (info->cond->value & 0xe) == 0xe)
5412 {
5413 /* Not allow AL or NV. */
5414 set_default_error ();
5415 goto failure;
5416 }
5417 break;
5418
5419 case AARCH64_OPND_ADDR_ADRP:
5420 po_misc_or_fail (parse_adrp (&str));
5421 /* Clear the value as operand needs to be relocated. */
5422 info->imm.value = 0;
5423 break;
5424
5425 case AARCH64_OPND_ADDR_PCREL14:
5426 case AARCH64_OPND_ADDR_PCREL19:
5427 case AARCH64_OPND_ADDR_PCREL21:
5428 case AARCH64_OPND_ADDR_PCREL26:
5429 po_misc_or_fail (parse_address_reloc (&str, info));
5430 if (!info->addr.pcrel)
5431 {
5432 set_syntax_error (_("invalid pc-relative address"));
5433 goto failure;
5434 }
5435 if (inst.gen_lit_pool
5436 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5437 {
5438 /* Only permit "=value" in the literal load instructions.
5439 The literal will be generated by programmer_friendly_fixup. */
5440 set_syntax_error (_("invalid use of \"=immediate\""));
5441 goto failure;
5442 }
5443 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5444 {
5445 set_syntax_error (_("unrecognized relocation suffix"));
5446 goto failure;
5447 }
5448 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5449 {
5450 info->imm.value = inst.reloc.exp.X_add_number;
5451 inst.reloc.type = BFD_RELOC_UNUSED;
5452 }
5453 else
5454 {
5455 info->imm.value = 0;
5456 if (inst.reloc.type == BFD_RELOC_UNUSED)
5457 switch (opcode->iclass)
5458 {
5459 case compbranch:
5460 case condbranch:
5461 /* e.g. CBZ or B.COND */
5462 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5463 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5464 break;
5465 case testbranch:
5466 /* e.g. TBZ */
5467 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5468 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5469 break;
5470 case branch_imm:
5471 /* e.g. B or BL */
5472 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5473 inst.reloc.type =
5474 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5475 : BFD_RELOC_AARCH64_JUMP26;
5476 break;
5477 case loadlit:
5478 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5479 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5480 break;
5481 case pcreladdr:
5482 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5483 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5484 break;
5485 default:
5486 gas_assert (0);
5487 abort ();
5488 }
5489 inst.reloc.pc_rel = 1;
5490 }
5491 break;
5492
5493 case AARCH64_OPND_ADDR_SIMPLE:
5494 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5495 /* [<Xn|SP>{, #<simm>}] */
5496 po_char_or_fail ('[');
5497 po_reg_or_fail (REG_TYPE_R64_SP);
5498 /* Accept optional ", #0". */
5499 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5500 && skip_past_char (&str, ','))
5501 {
5502 skip_past_char (&str, '#');
5503 if (! skip_past_char (&str, '0'))
5504 {
5505 set_fatal_syntax_error
5506 (_("the optional immediate offset can only be 0"));
5507 goto failure;
5508 }
5509 }
5510 po_char_or_fail (']');
5511 info->addr.base_regno = val;
5512 break;
5513
5514 case AARCH64_OPND_ADDR_REGOFF:
5515 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5516 po_misc_or_fail (parse_address (&str, info, 0));
5517 if (info->addr.pcrel || !info->addr.offset.is_reg
5518 || !info->addr.preind || info->addr.postind
5519 || info->addr.writeback)
5520 {
5521 set_syntax_error (_("invalid addressing mode"));
5522 goto failure;
5523 }
5524 if (!info->shifter.operator_present)
5525 {
5526 /* Default to LSL if not present. Libopcodes prefers shifter
5527 kind to be explicit. */
5528 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5529 info->shifter.kind = AARCH64_MOD_LSL;
5530 }
5531 /* Qualifier to be deduced by libopcodes. */
5532 break;
5533
5534 case AARCH64_OPND_ADDR_SIMM7:
5535 po_misc_or_fail (parse_address (&str, info, 0));
5536 if (info->addr.pcrel || info->addr.offset.is_reg
5537 || (!info->addr.preind && !info->addr.postind))
5538 {
5539 set_syntax_error (_("invalid addressing mode"));
5540 goto failure;
5541 }
5542 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5543 /* addr_off_p */ 1,
5544 /* need_libopcodes_p */ 1,
5545 /* skip_p */ 0);
5546 break;
5547
5548 case AARCH64_OPND_ADDR_SIMM9:
5549 case AARCH64_OPND_ADDR_SIMM9_2:
5550 po_misc_or_fail (parse_address_reloc (&str, info));
5551 if (info->addr.pcrel || info->addr.offset.is_reg
5552 || (!info->addr.preind && !info->addr.postind)
5553 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5554 && info->addr.writeback))
5555 {
5556 set_syntax_error (_("invalid addressing mode"));
5557 goto failure;
5558 }
5559 if (inst.reloc.type != BFD_RELOC_UNUSED)
5560 {
5561 set_syntax_error (_("relocation not allowed"));
5562 goto failure;
5563 }
5564 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5565 /* addr_off_p */ 1,
5566 /* need_libopcodes_p */ 1,
5567 /* skip_p */ 0);
5568 break;
5569
5570 case AARCH64_OPND_ADDR_UIMM12:
5571 po_misc_or_fail (parse_address_reloc (&str, info));
5572 if (info->addr.pcrel || info->addr.offset.is_reg
5573 || !info->addr.preind || info->addr.writeback)
5574 {
5575 set_syntax_error (_("invalid addressing mode"));
5576 goto failure;
5577 }
5578 if (inst.reloc.type == BFD_RELOC_UNUSED)
5579 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5580 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5581 || (inst.reloc.type
5582 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
5583 || (inst.reloc.type
5584 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
5585 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5586 /* Leave qualifier to be determined by libopcodes. */
5587 break;
5588
5589 case AARCH64_OPND_SIMD_ADDR_POST:
5590 /* [<Xn|SP>], <Xm|#<amount>> */
5591 po_misc_or_fail (parse_address (&str, info, 1));
5592 if (!info->addr.postind || !info->addr.writeback)
5593 {
5594 set_syntax_error (_("invalid addressing mode"));
5595 goto failure;
5596 }
5597 if (!info->addr.offset.is_reg)
5598 {
5599 if (inst.reloc.exp.X_op == O_constant)
5600 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5601 else
5602 {
5603 set_fatal_syntax_error
5604 (_("writeback value should be an immediate constant"));
5605 goto failure;
5606 }
5607 }
5608 /* No qualifier. */
5609 break;
5610
5611 case AARCH64_OPND_SYSREG:
5612 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5613 == PARSE_FAIL)
5614 {
5615 set_syntax_error (_("unknown or missing system register name"));
5616 goto failure;
5617 }
5618 inst.base.operands[i].sysreg = val;
5619 break;
5620
5621 case AARCH64_OPND_PSTATEFIELD:
5622 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5623 == PARSE_FAIL)
5624 {
5625 set_syntax_error (_("unknown or missing PSTATE field name"));
5626 goto failure;
5627 }
5628 inst.base.operands[i].pstatefield = val;
5629 break;
5630
5631 case AARCH64_OPND_SYSREG_IC:
5632 inst.base.operands[i].sysins_op =
5633 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5634 goto sys_reg_ins;
5635 case AARCH64_OPND_SYSREG_DC:
5636 inst.base.operands[i].sysins_op =
5637 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5638 goto sys_reg_ins;
5639 case AARCH64_OPND_SYSREG_AT:
5640 inst.base.operands[i].sysins_op =
5641 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5642 goto sys_reg_ins;
5643 case AARCH64_OPND_SYSREG_TLBI:
5644 inst.base.operands[i].sysins_op =
5645 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5646 sys_reg_ins:
5647 if (inst.base.operands[i].sysins_op == NULL)
5648 {
5649 set_fatal_syntax_error ( _("unknown or missing operation name"));
5650 goto failure;
5651 }
5652 break;
5653
5654 case AARCH64_OPND_BARRIER:
5655 case AARCH64_OPND_BARRIER_ISB:
5656 val = parse_barrier (&str);
5657 if (val != PARSE_FAIL
5658 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5659 {
5660 /* ISB only accepts options name 'sy'. */
5661 set_syntax_error
5662 (_("the specified option is not accepted in ISB"));
5663 /* Turn off backtrack as this optional operand is present. */
5664 backtrack_pos = 0;
5665 goto failure;
5666 }
5667 /* This is an extension to accept a 0..15 immediate. */
5668 if (val == PARSE_FAIL)
5669 po_imm_or_fail (0, 15);
5670 info->barrier = aarch64_barrier_options + val;
5671 break;
5672
5673 case AARCH64_OPND_PRFOP:
5674 val = parse_pldop (&str);
5675 /* This is an extension to accept a 0..31 immediate. */
5676 if (val == PARSE_FAIL)
5677 po_imm_or_fail (0, 31);
5678 inst.base.operands[i].prfop = aarch64_prfops + val;
5679 break;
5680
5681 case AARCH64_OPND_BARRIER_PSB:
5682 val = parse_barrier_psb (&str, &(info->hint_option));
5683 if (val == PARSE_FAIL)
5684 goto failure;
5685 break;
5686
5687 default:
5688 as_fatal (_("unhandled operand code %d"), operands[i]);
5689 }
5690
5691 /* If we get here, this operand was successfully parsed. */
5692 inst.base.operands[i].present = 1;
5693 continue;
5694
5695 failure:
5696 /* The parse routine should already have set the error, but in case
5697 not, set a default one here. */
5698 if (! error_p ())
5699 set_default_error ();
5700
5701 if (! backtrack_pos)
5702 goto parse_operands_return;
5703
5704 {
5705 /* We reach here because this operand is marked as optional, and
5706 either no operand was supplied or the operand was supplied but it
5707 was syntactically incorrect. In the latter case we report an
5708 error. In the former case we perform a few more checks before
5709 dropping through to the code to insert the default operand. */
5710
5711 char *tmp = backtrack_pos;
5712 char endchar = END_OF_INSN;
5713
5714 if (i != (aarch64_num_of_operands (opcode) - 1))
5715 endchar = ',';
5716 skip_past_char (&tmp, ',');
5717
5718 if (*tmp != endchar)
5719 /* The user has supplied an operand in the wrong format. */
5720 goto parse_operands_return;
5721
5722 /* Make sure there is not a comma before the optional operand.
5723 For example the fifth operand of 'sys' is optional:
5724
5725 sys #0,c0,c0,#0, <--- wrong
5726 sys #0,c0,c0,#0 <--- correct. */
5727 if (comma_skipped_p && i && endchar == END_OF_INSN)
5728 {
5729 set_fatal_syntax_error
5730 (_("unexpected comma before the omitted optional operand"));
5731 goto parse_operands_return;
5732 }
5733 }
5734
5735 /* Reaching here means we are dealing with an optional operand that is
5736 omitted from the assembly line. */
5737 gas_assert (optional_operand_p (opcode, i));
5738 info->present = 0;
5739 process_omitted_operand (operands[i], opcode, i, info);
5740
5741 /* Try again, skipping the optional operand at backtrack_pos. */
5742 str = backtrack_pos;
5743 backtrack_pos = 0;
5744
5745 /* Clear any error record after the omitted optional operand has been
5746 successfully handled. */
5747 clear_error ();
5748 }
5749
5750 /* Check if we have parsed all the operands. */
5751 if (*str != '\0' && ! error_p ())
5752 {
5753 /* Set I to the index of the last present operand; this is
5754 for the purpose of diagnostics. */
5755 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5756 ;
5757 set_fatal_syntax_error
5758 (_("unexpected characters following instruction"));
5759 }
5760
5761 parse_operands_return:
5762
5763 if (error_p ())
5764 {
5765 DEBUG_TRACE ("parsing FAIL: %s - %s",
5766 operand_mismatch_kind_names[get_error_kind ()],
5767 get_error_message ());
5768 /* Record the operand error properly; this is useful when there
5769 are multiple instruction templates for a mnemonic name, so that
5770 later on, we can select the error that most closely describes
5771 the problem. */
5772 record_operand_error (opcode, i, get_error_kind (),
5773 get_error_message ());
5774 return FALSE;
5775 }
5776 else
5777 {
5778 DEBUG_TRACE ("parsing SUCCESS");
5779 return TRUE;
5780 }
5781 }
5782
5783 /* It does some fix-up to provide some programmer friendly feature while
5784 keeping the libopcodes happy, i.e. libopcodes only accepts
5785 the preferred architectural syntax.
5786 Return FALSE if there is any failure; otherwise return TRUE. */
5787
5788 static bfd_boolean
5789 programmer_friendly_fixup (aarch64_instruction *instr)
5790 {
5791 aarch64_inst *base = &instr->base;
5792 const aarch64_opcode *opcode = base->opcode;
5793 enum aarch64_op op = opcode->op;
5794 aarch64_opnd_info *operands = base->operands;
5795
5796 DEBUG_TRACE ("enter");
5797
5798 switch (opcode->iclass)
5799 {
5800 case testbranch:
5801 /* TBNZ Xn|Wn, #uimm6, label
5802 Test and Branch Not Zero: conditionally jumps to label if bit number
5803 uimm6 in register Xn is not zero. The bit number implies the width of
5804 the register, which may be written and should be disassembled as Wn if
5805 uimm is less than 32. */
5806 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5807 {
5808 if (operands[1].imm.value >= 32)
5809 {
5810 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5811 0, 31);
5812 return FALSE;
5813 }
5814 operands[0].qualifier = AARCH64_OPND_QLF_X;
5815 }
5816 break;
5817 case loadlit:
5818 /* LDR Wt, label | =value
5819 As a convenience assemblers will typically permit the notation
5820 "=value" in conjunction with the pc-relative literal load instructions
5821 to automatically place an immediate value or symbolic address in a
5822 nearby literal pool and generate a hidden label which references it.
5823 ISREG has been set to 0 in the case of =value. */
5824 if (instr->gen_lit_pool
5825 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5826 {
5827 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5828 if (op == OP_LDRSW_LIT)
5829 size = 4;
5830 if (instr->reloc.exp.X_op != O_constant
5831 && instr->reloc.exp.X_op != O_big
5832 && instr->reloc.exp.X_op != O_symbol)
5833 {
5834 record_operand_error (opcode, 1,
5835 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5836 _("constant expression expected"));
5837 return FALSE;
5838 }
5839 if (! add_to_lit_pool (&instr->reloc.exp, size))
5840 {
5841 record_operand_error (opcode, 1,
5842 AARCH64_OPDE_OTHER_ERROR,
5843 _("literal pool insertion failed"));
5844 return FALSE;
5845 }
5846 }
5847 break;
5848 case log_shift:
5849 case bitfield:
5850 /* UXT[BHW] Wd, Wn
5851 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5852 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5853 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5854 A programmer-friendly assembler should accept a destination Xd in
5855 place of Wd, however that is not the preferred form for disassembly.
5856 */
5857 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5858 && operands[1].qualifier == AARCH64_OPND_QLF_W
5859 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5860 operands[0].qualifier = AARCH64_OPND_QLF_W;
5861 break;
5862
5863 case addsub_ext:
5864 {
5865 /* In the 64-bit form, the final register operand is written as Wm
5866 for all but the (possibly omitted) UXTX/LSL and SXTX
5867 operators.
5868 As a programmer-friendly assembler, we accept e.g.
5869 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5870 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5871 int idx = aarch64_operand_index (opcode->operands,
5872 AARCH64_OPND_Rm_EXT);
5873 gas_assert (idx == 1 || idx == 2);
5874 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5875 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5876 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5877 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5878 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5879 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5880 }
5881 break;
5882
5883 default:
5884 break;
5885 }
5886
5887 DEBUG_TRACE ("exit with SUCCESS");
5888 return TRUE;
5889 }
5890
5891 /* Check for loads and stores that will cause unpredictable behavior. */
5892
5893 static void
5894 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5895 {
5896 aarch64_inst *base = &instr->base;
5897 const aarch64_opcode *opcode = base->opcode;
5898 const aarch64_opnd_info *opnds = base->operands;
5899 switch (opcode->iclass)
5900 {
5901 case ldst_pos:
5902 case ldst_imm9:
5903 case ldst_unscaled:
5904 case ldst_unpriv:
5905 /* Loading/storing the base register is unpredictable if writeback. */
5906 if ((aarch64_get_operand_class (opnds[0].type)
5907 == AARCH64_OPND_CLASS_INT_REG)
5908 && opnds[0].reg.regno == opnds[1].addr.base_regno
5909 && opnds[1].addr.base_regno != REG_SP
5910 && opnds[1].addr.writeback)
5911 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5912 break;
5913 case ldstpair_off:
5914 case ldstnapair_offs:
5915 case ldstpair_indexed:
5916 /* Loading/storing the base register is unpredictable if writeback. */
5917 if ((aarch64_get_operand_class (opnds[0].type)
5918 == AARCH64_OPND_CLASS_INT_REG)
5919 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5920 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5921 && opnds[2].addr.base_regno != REG_SP
5922 && opnds[2].addr.writeback)
5923 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5924 /* Load operations must load different registers. */
5925 if ((opcode->opcode & (1 << 22))
5926 && opnds[0].reg.regno == opnds[1].reg.regno)
5927 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5928 break;
5929 default:
5930 break;
5931 }
5932 }
5933
5934 /* A wrapper function to interface with libopcodes on encoding and
5935 record the error message if there is any.
5936
5937 Return TRUE on success; otherwise return FALSE. */
5938
5939 static bfd_boolean
5940 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5941 aarch64_insn *code)
5942 {
5943 aarch64_operand_error error_info;
5944 error_info.kind = AARCH64_OPDE_NIL;
5945 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5946 return TRUE;
5947 else
5948 {
5949 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5950 record_operand_error_info (opcode, &error_info);
5951 return FALSE;
5952 }
5953 }
5954
5955 #ifdef DEBUG_AARCH64
5956 static inline void
5957 dump_opcode_operands (const aarch64_opcode *opcode)
5958 {
5959 int i = 0;
5960 while (opcode->operands[i] != AARCH64_OPND_NIL)
5961 {
5962 aarch64_verbose ("\t\t opnd%d: %s", i,
5963 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5964 ? aarch64_get_operand_name (opcode->operands[i])
5965 : aarch64_get_operand_desc (opcode->operands[i]));
5966 ++i;
5967 }
5968 }
5969 #endif /* DEBUG_AARCH64 */
5970
5971 /* This is the guts of the machine-dependent assembler. STR points to a
5972 machine dependent instruction. This function is supposed to emit
5973 the frags/bytes it assembles to. */
5974
5975 void
5976 md_assemble (char *str)
5977 {
5978 char *p = str;
5979 templates *template;
5980 aarch64_opcode *opcode;
5981 aarch64_inst *inst_base;
5982 unsigned saved_cond;
5983
5984 /* Align the previous label if needed. */
5985 if (last_label_seen != NULL)
5986 {
5987 symbol_set_frag (last_label_seen, frag_now);
5988 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5989 S_SET_SEGMENT (last_label_seen, now_seg);
5990 }
5991
5992 inst.reloc.type = BFD_RELOC_UNUSED;
5993
5994 DEBUG_TRACE ("\n\n");
5995 DEBUG_TRACE ("==============================");
5996 DEBUG_TRACE ("Enter md_assemble with %s", str);
5997
5998 template = opcode_lookup (&p);
5999 if (!template)
6000 {
6001 /* It wasn't an instruction, but it might be a register alias of
6002 the form alias .req reg directive. */
6003 if (!create_register_alias (str, p))
6004 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6005 str);
6006 return;
6007 }
6008
6009 skip_whitespace (p);
6010 if (*p == ',')
6011 {
6012 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6013 get_mnemonic_name (str), str);
6014 return;
6015 }
6016
6017 init_operand_error_report ();
6018
6019 /* Sections are assumed to start aligned. In executable section, there is no
6020 MAP_DATA symbol pending. So we only align the address during
6021 MAP_DATA --> MAP_INSN transition.
6022 For other sections, this is not guaranteed. */
6023 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6024 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6025 frag_align_code (2, 0);
6026
6027 saved_cond = inst.cond;
6028 reset_aarch64_instruction (&inst);
6029 inst.cond = saved_cond;
6030
6031 /* Iterate through all opcode entries with the same mnemonic name. */
6032 do
6033 {
6034 opcode = template->opcode;
6035
6036 DEBUG_TRACE ("opcode %s found", opcode->name);
6037 #ifdef DEBUG_AARCH64
6038 if (debug_dump)
6039 dump_opcode_operands (opcode);
6040 #endif /* DEBUG_AARCH64 */
6041
6042 mapping_state (MAP_INSN);
6043
6044 inst_base = &inst.base;
6045 inst_base->opcode = opcode;
6046
6047 /* Truly conditionally executed instructions, e.g. b.cond. */
6048 if (opcode->flags & F_COND)
6049 {
6050 gas_assert (inst.cond != COND_ALWAYS);
6051 inst_base->cond = get_cond_from_value (inst.cond);
6052 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6053 }
6054 else if (inst.cond != COND_ALWAYS)
6055 {
6056 /* It shouldn't arrive here, where the assembly looks like a
6057 conditional instruction but the found opcode is unconditional. */
6058 gas_assert (0);
6059 continue;
6060 }
6061
6062 if (parse_operands (p, opcode)
6063 && programmer_friendly_fixup (&inst)
6064 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6065 {
6066 /* Check that this instruction is supported for this CPU. */
6067 if (!opcode->avariant
6068 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6069 {
6070 as_bad (_("selected processor does not support `%s'"), str);
6071 return;
6072 }
6073
6074 warn_unpredictable_ldst (&inst, str);
6075
6076 if (inst.reloc.type == BFD_RELOC_UNUSED
6077 || !inst.reloc.need_libopcodes_p)
6078 output_inst (NULL);
6079 else
6080 {
6081 /* If there is relocation generated for the instruction,
6082 store the instruction information for the future fix-up. */
6083 struct aarch64_inst *copy;
6084 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6085 copy = XNEW (struct aarch64_inst);
6086 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6087 output_inst (copy);
6088 }
6089 return;
6090 }
6091
6092 template = template->next;
6093 if (template != NULL)
6094 {
6095 reset_aarch64_instruction (&inst);
6096 inst.cond = saved_cond;
6097 }
6098 }
6099 while (template != NULL);
6100
6101 /* Issue the error messages if any. */
6102 output_operand_error_report (str);
6103 }
6104
6105 /* Various frobbings of labels and their addresses. */
6106
6107 void
6108 aarch64_start_line_hook (void)
6109 {
6110 last_label_seen = NULL;
6111 }
6112
6113 void
6114 aarch64_frob_label (symbolS * sym)
6115 {
6116 last_label_seen = sym;
6117
6118 dwarf2_emit_label (sym);
6119 }
6120
6121 int
6122 aarch64_data_in_code (void)
6123 {
6124 if (!strncmp (input_line_pointer + 1, "data:", 5))
6125 {
6126 *input_line_pointer = '/';
6127 input_line_pointer += 5;
6128 *input_line_pointer = 0;
6129 return 1;
6130 }
6131
6132 return 0;
6133 }
6134
6135 char *
6136 aarch64_canonicalize_symbol_name (char *name)
6137 {
6138 int len;
6139
6140 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6141 *(name + len - 5) = 0;
6142
6143 return name;
6144 }
6145 \f
6146 /* Table of all register names defined by default. The user can
6147 define additional names with .req. Note that all register names
6148 should appear in both upper and lowercase variants. Some registers
6149 also have mixed-case names. */
6150
6151 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6152 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6153 #define REGSET31(p,t) \
6154 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6155 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6156 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6157 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
6158 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6159 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6160 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6161 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6162 #define REGSET(p,t) \
6163 REGSET31(p,t), REGNUM(p,31,t)
6164
6165 /* These go into aarch64_reg_hsh hash-table. */
6166 static const reg_entry reg_names[] = {
6167 /* Integer registers. */
6168 REGSET31 (x, R_64), REGSET31 (X, R_64),
6169 REGSET31 (w, R_32), REGSET31 (W, R_32),
6170
6171 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6172 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6173
6174 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6175 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6176
6177 /* Coprocessor register numbers. */
6178 REGSET (c, CN), REGSET (C, CN),
6179
6180 /* Floating-point single precision registers. */
6181 REGSET (s, FP_S), REGSET (S, FP_S),
6182
6183 /* Floating-point double precision registers. */
6184 REGSET (d, FP_D), REGSET (D, FP_D),
6185
6186 /* Floating-point half precision registers. */
6187 REGSET (h, FP_H), REGSET (H, FP_H),
6188
6189 /* Floating-point byte precision registers. */
6190 REGSET (b, FP_B), REGSET (B, FP_B),
6191
6192 /* Floating-point quad precision registers. */
6193 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6194
6195 /* FP/SIMD registers. */
6196 REGSET (v, VN), REGSET (V, VN),
6197 };
6198
6199 #undef REGDEF
6200 #undef REGNUM
6201 #undef REGSET
6202
6203 #define N 1
6204 #define n 0
6205 #define Z 1
6206 #define z 0
6207 #define C 1
6208 #define c 0
6209 #define V 1
6210 #define v 0
6211 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6212 static const asm_nzcv nzcv_names[] = {
6213 {"nzcv", B (n, z, c, v)},
6214 {"nzcV", B (n, z, c, V)},
6215 {"nzCv", B (n, z, C, v)},
6216 {"nzCV", B (n, z, C, V)},
6217 {"nZcv", B (n, Z, c, v)},
6218 {"nZcV", B (n, Z, c, V)},
6219 {"nZCv", B (n, Z, C, v)},
6220 {"nZCV", B (n, Z, C, V)},
6221 {"Nzcv", B (N, z, c, v)},
6222 {"NzcV", B (N, z, c, V)},
6223 {"NzCv", B (N, z, C, v)},
6224 {"NzCV", B (N, z, C, V)},
6225 {"NZcv", B (N, Z, c, v)},
6226 {"NZcV", B (N, Z, c, V)},
6227 {"NZCv", B (N, Z, C, v)},
6228 {"NZCV", B (N, Z, C, V)}
6229 };
6230
6231 #undef N
6232 #undef n
6233 #undef Z
6234 #undef z
6235 #undef C
6236 #undef c
6237 #undef V
6238 #undef v
6239 #undef B
6240 \f
6241 /* MD interface: bits in the object file. */
6242
6243 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6244 for use in the a.out file, and stores them in the array pointed to by buf.
6245 This knows about the endian-ness of the target machine and does
6246 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6247 2 (short) and 4 (long) Floating numbers are put out as a series of
6248 LITTLENUMS (shorts, here at least). */
6249
6250 void
6251 md_number_to_chars (char *buf, valueT val, int n)
6252 {
6253 if (target_big_endian)
6254 number_to_chars_bigendian (buf, val, n);
6255 else
6256 number_to_chars_littleendian (buf, val, n);
6257 }
6258
6259 /* MD interface: Sections. */
6260
6261 /* Estimate the size of a frag before relaxing. Assume everything fits in
6262 4 bytes. */
6263
6264 int
6265 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6266 {
6267 fragp->fr_var = 4;
6268 return 4;
6269 }
6270
6271 /* Round up a section size to the appropriate boundary. */
6272
6273 valueT
6274 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6275 {
6276 return size;
6277 }
6278
6279 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6280 of an rs_align_code fragment.
6281
6282 Here we fill the frag with the appropriate info for padding the
6283 output stream. The resulting frag will consist of a fixed (fr_fix)
6284 and of a repeating (fr_var) part.
6285
6286 The fixed content is always emitted before the repeating content and
6287 these two parts are used as follows in constructing the output:
6288 - the fixed part will be used to align to a valid instruction word
6289 boundary, in case that we start at a misaligned address; as no
6290 executable instruction can live at the misaligned location, we
6291 simply fill with zeros;
6292 - the variable part will be used to cover the remaining padding and
6293 we fill using the AArch64 NOP instruction.
6294
6295 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6296 enough storage space for up to 3 bytes for padding the back to a valid
6297 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6298
6299 void
6300 aarch64_handle_align (fragS * fragP)
6301 {
6302 /* NOP = d503201f */
6303 /* AArch64 instructions are always little-endian. */
6304 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6305
6306 int bytes, fix, noop_size;
6307 char *p;
6308
6309 if (fragP->fr_type != rs_align_code)
6310 return;
6311
6312 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6313 p = fragP->fr_literal + fragP->fr_fix;
6314
6315 #ifdef OBJ_ELF
6316 gas_assert (fragP->tc_frag_data.recorded);
6317 #endif
6318
6319 noop_size = sizeof (aarch64_noop);
6320
6321 fix = bytes & (noop_size - 1);
6322 if (fix)
6323 {
6324 #ifdef OBJ_ELF
6325 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6326 #endif
6327 memset (p, 0, fix);
6328 p += fix;
6329 fragP->fr_fix += fix;
6330 }
6331
6332 if (noop_size)
6333 memcpy (p, aarch64_noop, noop_size);
6334 fragP->fr_var = noop_size;
6335 }
6336
6337 /* Perform target specific initialisation of a frag.
6338 Note - despite the name this initialisation is not done when the frag
6339 is created, but only when its type is assigned. A frag can be created
6340 and used a long time before its type is set, so beware of assuming that
6341 this initialisationis performed first. */
6342
6343 #ifndef OBJ_ELF
6344 void
6345 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6346 int max_chars ATTRIBUTE_UNUSED)
6347 {
6348 }
6349
6350 #else /* OBJ_ELF is defined. */
6351 void
6352 aarch64_init_frag (fragS * fragP, int max_chars)
6353 {
6354 /* Record a mapping symbol for alignment frags. We will delete this
6355 later if the alignment ends up empty. */
6356 if (!fragP->tc_frag_data.recorded)
6357 fragP->tc_frag_data.recorded = 1;
6358
6359 switch (fragP->fr_type)
6360 {
6361 case rs_align_test:
6362 case rs_fill:
6363 mapping_state_2 (MAP_DATA, max_chars);
6364 break;
6365 case rs_align:
6366 /* PR 20364: We can get alignment frags in code sections,
6367 so do not just assume that we should use the MAP_DATA state. */
6368 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
6369 break;
6370 case rs_align_code:
6371 mapping_state_2 (MAP_INSN, max_chars);
6372 break;
6373 default:
6374 break;
6375 }
6376 }
6377 \f
6378 /* Initialize the DWARF-2 unwind information for this procedure. */
6379
6380 void
6381 tc_aarch64_frame_initial_instructions (void)
6382 {
6383 cfi_add_CFA_def_cfa (REG_SP, 0);
6384 }
6385 #endif /* OBJ_ELF */
6386
6387 /* Convert REGNAME to a DWARF-2 register number. */
6388
6389 int
6390 tc_aarch64_regname_to_dw2regnum (char *regname)
6391 {
6392 const reg_entry *reg = parse_reg (&regname);
6393 if (reg == NULL)
6394 return -1;
6395
6396 switch (reg->type)
6397 {
6398 case REG_TYPE_SP_32:
6399 case REG_TYPE_SP_64:
6400 case REG_TYPE_R_32:
6401 case REG_TYPE_R_64:
6402 return reg->number;
6403
6404 case REG_TYPE_FP_B:
6405 case REG_TYPE_FP_H:
6406 case REG_TYPE_FP_S:
6407 case REG_TYPE_FP_D:
6408 case REG_TYPE_FP_Q:
6409 return reg->number + 64;
6410
6411 default:
6412 break;
6413 }
6414 return -1;
6415 }
6416
6417 /* Implement DWARF2_ADDR_SIZE. */
6418
6419 int
6420 aarch64_dwarf2_addr_size (void)
6421 {
6422 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6423 if (ilp32_p)
6424 return 4;
6425 #endif
6426 return bfd_arch_bits_per_address (stdoutput) / 8;
6427 }
6428
6429 /* MD interface: Symbol and relocation handling. */
6430
6431 /* Return the address within the segment that a PC-relative fixup is
6432 relative to. For AArch64 PC-relative fixups applied to instructions
6433 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6434
6435 long
6436 md_pcrel_from_section (fixS * fixP, segT seg)
6437 {
6438 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6439
6440 /* If this is pc-relative and we are going to emit a relocation
6441 then we just want to put out any pipeline compensation that the linker
6442 will need. Otherwise we want to use the calculated base. */
6443 if (fixP->fx_pcrel
6444 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6445 || aarch64_force_relocation (fixP)))
6446 base = 0;
6447
6448 /* AArch64 should be consistent for all pc-relative relocations. */
6449 return base + AARCH64_PCREL_OFFSET;
6450 }
6451
6452 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6453 Otherwise we have no need to default values of symbols. */
6454
6455 symbolS *
6456 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6457 {
6458 #ifdef OBJ_ELF
6459 if (name[0] == '_' && name[1] == 'G'
6460 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6461 {
6462 if (!GOT_symbol)
6463 {
6464 if (symbol_find (name))
6465 as_bad (_("GOT already in the symbol table"));
6466
6467 GOT_symbol = symbol_new (name, undefined_section,
6468 (valueT) 0, &zero_address_frag);
6469 }
6470
6471 return GOT_symbol;
6472 }
6473 #endif
6474
6475 return 0;
6476 }
6477
6478 /* Return non-zero if the indicated VALUE has overflowed the maximum
6479 range expressible by a unsigned number with the indicated number of
6480 BITS. */
6481
6482 static bfd_boolean
6483 unsigned_overflow (valueT value, unsigned bits)
6484 {
6485 valueT lim;
6486 if (bits >= sizeof (valueT) * 8)
6487 return FALSE;
6488 lim = (valueT) 1 << bits;
6489 return (value >= lim);
6490 }
6491
6492
6493 /* Return non-zero if the indicated VALUE has overflowed the maximum
6494 range expressible by an signed number with the indicated number of
6495 BITS. */
6496
6497 static bfd_boolean
6498 signed_overflow (offsetT value, unsigned bits)
6499 {
6500 offsetT lim;
6501 if (bits >= sizeof (offsetT) * 8)
6502 return FALSE;
6503 lim = (offsetT) 1 << (bits - 1);
6504 return (value < -lim || value >= lim);
6505 }
6506
6507 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6508 unsigned immediate offset load/store instruction, try to encode it as
6509 an unscaled, 9-bit, signed immediate offset load/store instruction.
6510 Return TRUE if it is successful; otherwise return FALSE.
6511
6512 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6513 in response to the standard LDR/STR mnemonics when the immediate offset is
6514 unambiguous, i.e. when it is negative or unaligned. */
6515
6516 static bfd_boolean
6517 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6518 {
6519 int idx;
6520 enum aarch64_op new_op;
6521 const aarch64_opcode *new_opcode;
6522
6523 gas_assert (instr->opcode->iclass == ldst_pos);
6524
6525 switch (instr->opcode->op)
6526 {
6527 case OP_LDRB_POS:new_op = OP_LDURB; break;
6528 case OP_STRB_POS: new_op = OP_STURB; break;
6529 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6530 case OP_LDRH_POS: new_op = OP_LDURH; break;
6531 case OP_STRH_POS: new_op = OP_STURH; break;
6532 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6533 case OP_LDR_POS: new_op = OP_LDUR; break;
6534 case OP_STR_POS: new_op = OP_STUR; break;
6535 case OP_LDRF_POS: new_op = OP_LDURV; break;
6536 case OP_STRF_POS: new_op = OP_STURV; break;
6537 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6538 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6539 default: new_op = OP_NIL; break;
6540 }
6541
6542 if (new_op == OP_NIL)
6543 return FALSE;
6544
6545 new_opcode = aarch64_get_opcode (new_op);
6546 gas_assert (new_opcode != NULL);
6547
6548 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6549 instr->opcode->op, new_opcode->op);
6550
6551 aarch64_replace_opcode (instr, new_opcode);
6552
6553 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6554 qualifier matching may fail because the out-of-date qualifier will
6555 prevent the operand being updated with a new and correct qualifier. */
6556 idx = aarch64_operand_index (instr->opcode->operands,
6557 AARCH64_OPND_ADDR_SIMM9);
6558 gas_assert (idx == 1);
6559 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6560
6561 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6562
6563 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6564 return FALSE;
6565
6566 return TRUE;
6567 }
6568
6569 /* Called by fix_insn to fix a MOV immediate alias instruction.
6570
6571 Operand for a generic move immediate instruction, which is an alias
6572 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6573 a 32-bit/64-bit immediate value into general register. An assembler error
6574 shall result if the immediate cannot be created by a single one of these
6575 instructions. If there is a choice, then to ensure reversability an
6576 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6577
6578 static void
6579 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6580 {
6581 const aarch64_opcode *opcode;
6582
6583 /* Need to check if the destination is SP/ZR. The check has to be done
6584 before any aarch64_replace_opcode. */
6585 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6586 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6587
6588 instr->operands[1].imm.value = value;
6589 instr->operands[1].skip = 0;
6590
6591 if (try_mov_wide_p)
6592 {
6593 /* Try the MOVZ alias. */
6594 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6595 aarch64_replace_opcode (instr, opcode);
6596 if (aarch64_opcode_encode (instr->opcode, instr,
6597 &instr->value, NULL, NULL))
6598 {
6599 put_aarch64_insn (buf, instr->value);
6600 return;
6601 }
6602 /* Try the MOVK alias. */
6603 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6604 aarch64_replace_opcode (instr, opcode);
6605 if (aarch64_opcode_encode (instr->opcode, instr,
6606 &instr->value, NULL, NULL))
6607 {
6608 put_aarch64_insn (buf, instr->value);
6609 return;
6610 }
6611 }
6612
6613 if (try_mov_bitmask_p)
6614 {
6615 /* Try the ORR alias. */
6616 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6617 aarch64_replace_opcode (instr, opcode);
6618 if (aarch64_opcode_encode (instr->opcode, instr,
6619 &instr->value, NULL, NULL))
6620 {
6621 put_aarch64_insn (buf, instr->value);
6622 return;
6623 }
6624 }
6625
6626 as_bad_where (fixP->fx_file, fixP->fx_line,
6627 _("immediate cannot be moved by a single instruction"));
6628 }
6629
6630 /* An instruction operand which is immediate related may have symbol used
6631 in the assembly, e.g.
6632
6633 mov w0, u32
6634 .set u32, 0x00ffff00
6635
6636 At the time when the assembly instruction is parsed, a referenced symbol,
6637 like 'u32' in the above example may not have been seen; a fixS is created
6638 in such a case and is handled here after symbols have been resolved.
6639 Instruction is fixed up with VALUE using the information in *FIXP plus
6640 extra information in FLAGS.
6641
6642 This function is called by md_apply_fix to fix up instructions that need
6643 a fix-up described above but does not involve any linker-time relocation. */
6644
6645 static void
6646 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6647 {
6648 int idx;
6649 uint32_t insn;
6650 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6651 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6652 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6653
6654 if (new_inst)
6655 {
6656 /* Now the instruction is about to be fixed-up, so the operand that
6657 was previously marked as 'ignored' needs to be unmarked in order
6658 to get the encoding done properly. */
6659 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6660 new_inst->operands[idx].skip = 0;
6661 }
6662
6663 gas_assert (opnd != AARCH64_OPND_NIL);
6664
6665 switch (opnd)
6666 {
6667 case AARCH64_OPND_EXCEPTION:
6668 if (unsigned_overflow (value, 16))
6669 as_bad_where (fixP->fx_file, fixP->fx_line,
6670 _("immediate out of range"));
6671 insn = get_aarch64_insn (buf);
6672 insn |= encode_svc_imm (value);
6673 put_aarch64_insn (buf, insn);
6674 break;
6675
6676 case AARCH64_OPND_AIMM:
6677 /* ADD or SUB with immediate.
6678 NOTE this assumes we come here with a add/sub shifted reg encoding
6679 3 322|2222|2 2 2 21111 111111
6680 1 098|7654|3 2 1 09876 543210 98765 43210
6681 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6682 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6683 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6684 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6685 ->
6686 3 322|2222|2 2 221111111111
6687 1 098|7654|3 2 109876543210 98765 43210
6688 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6689 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6690 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6691 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6692 Fields sf Rn Rd are already set. */
6693 insn = get_aarch64_insn (buf);
6694 if (value < 0)
6695 {
6696 /* Add <-> sub. */
6697 insn = reencode_addsub_switch_add_sub (insn);
6698 value = -value;
6699 }
6700
6701 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6702 && unsigned_overflow (value, 12))
6703 {
6704 /* Try to shift the value by 12 to make it fit. */
6705 if (((value >> 12) << 12) == value
6706 && ! unsigned_overflow (value, 12 + 12))
6707 {
6708 value >>= 12;
6709 insn |= encode_addsub_imm_shift_amount (1);
6710 }
6711 }
6712
6713 if (unsigned_overflow (value, 12))
6714 as_bad_where (fixP->fx_file, fixP->fx_line,
6715 _("immediate out of range"));
6716
6717 insn |= encode_addsub_imm (value);
6718
6719 put_aarch64_insn (buf, insn);
6720 break;
6721
6722 case AARCH64_OPND_SIMD_IMM:
6723 case AARCH64_OPND_SIMD_IMM_SFT:
6724 case AARCH64_OPND_LIMM:
6725 /* Bit mask immediate. */
6726 gas_assert (new_inst != NULL);
6727 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6728 new_inst->operands[idx].imm.value = value;
6729 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6730 &new_inst->value, NULL, NULL))
6731 put_aarch64_insn (buf, new_inst->value);
6732 else
6733 as_bad_where (fixP->fx_file, fixP->fx_line,
6734 _("invalid immediate"));
6735 break;
6736
6737 case AARCH64_OPND_HALF:
6738 /* 16-bit unsigned immediate. */
6739 if (unsigned_overflow (value, 16))
6740 as_bad_where (fixP->fx_file, fixP->fx_line,
6741 _("immediate out of range"));
6742 insn = get_aarch64_insn (buf);
6743 insn |= encode_movw_imm (value & 0xffff);
6744 put_aarch64_insn (buf, insn);
6745 break;
6746
6747 case AARCH64_OPND_IMM_MOV:
6748 /* Operand for a generic move immediate instruction, which is
6749 an alias instruction that generates a single MOVZ, MOVN or ORR
6750 instruction to loads a 32-bit/64-bit immediate value into general
6751 register. An assembler error shall result if the immediate cannot be
6752 created by a single one of these instructions. If there is a choice,
6753 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6754 and MOVZ or MOVN to ORR. */
6755 gas_assert (new_inst != NULL);
6756 fix_mov_imm_insn (fixP, buf, new_inst, value);
6757 break;
6758
6759 case AARCH64_OPND_ADDR_SIMM7:
6760 case AARCH64_OPND_ADDR_SIMM9:
6761 case AARCH64_OPND_ADDR_SIMM9_2:
6762 case AARCH64_OPND_ADDR_UIMM12:
6763 /* Immediate offset in an address. */
6764 insn = get_aarch64_insn (buf);
6765
6766 gas_assert (new_inst != NULL && new_inst->value == insn);
6767 gas_assert (new_inst->opcode->operands[1] == opnd
6768 || new_inst->opcode->operands[2] == opnd);
6769
6770 /* Get the index of the address operand. */
6771 if (new_inst->opcode->operands[1] == opnd)
6772 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6773 idx = 1;
6774 else
6775 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6776 idx = 2;
6777
6778 /* Update the resolved offset value. */
6779 new_inst->operands[idx].addr.offset.imm = value;
6780
6781 /* Encode/fix-up. */
6782 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6783 &new_inst->value, NULL, NULL))
6784 {
6785 put_aarch64_insn (buf, new_inst->value);
6786 break;
6787 }
6788 else if (new_inst->opcode->iclass == ldst_pos
6789 && try_to_encode_as_unscaled_ldst (new_inst))
6790 {
6791 put_aarch64_insn (buf, new_inst->value);
6792 break;
6793 }
6794
6795 as_bad_where (fixP->fx_file, fixP->fx_line,
6796 _("immediate offset out of range"));
6797 break;
6798
6799 default:
6800 gas_assert (0);
6801 as_fatal (_("unhandled operand code %d"), opnd);
6802 }
6803 }
6804
6805 /* Apply a fixup (fixP) to segment data, once it has been determined
6806 by our caller that we have all the info we need to fix it up.
6807
6808 Parameter valP is the pointer to the value of the bits. */
6809
6810 void
6811 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6812 {
6813 offsetT value = *valP;
6814 uint32_t insn;
6815 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6816 int scale;
6817 unsigned flags = fixP->fx_addnumber;
6818
6819 DEBUG_TRACE ("\n\n");
6820 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6821 DEBUG_TRACE ("Enter md_apply_fix");
6822
6823 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6824
6825 /* Note whether this will delete the relocation. */
6826
6827 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6828 fixP->fx_done = 1;
6829
6830 /* Process the relocations. */
6831 switch (fixP->fx_r_type)
6832 {
6833 case BFD_RELOC_NONE:
6834 /* This will need to go in the object file. */
6835 fixP->fx_done = 0;
6836 break;
6837
6838 case BFD_RELOC_8:
6839 case BFD_RELOC_8_PCREL:
6840 if (fixP->fx_done || !seg->use_rela_p)
6841 md_number_to_chars (buf, value, 1);
6842 break;
6843
6844 case BFD_RELOC_16:
6845 case BFD_RELOC_16_PCREL:
6846 if (fixP->fx_done || !seg->use_rela_p)
6847 md_number_to_chars (buf, value, 2);
6848 break;
6849
6850 case BFD_RELOC_32:
6851 case BFD_RELOC_32_PCREL:
6852 if (fixP->fx_done || !seg->use_rela_p)
6853 md_number_to_chars (buf, value, 4);
6854 break;
6855
6856 case BFD_RELOC_64:
6857 case BFD_RELOC_64_PCREL:
6858 if (fixP->fx_done || !seg->use_rela_p)
6859 md_number_to_chars (buf, value, 8);
6860 break;
6861
6862 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6863 /* We claim that these fixups have been processed here, even if
6864 in fact we generate an error because we do not have a reloc
6865 for them, so tc_gen_reloc() will reject them. */
6866 fixP->fx_done = 1;
6867 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6868 {
6869 as_bad_where (fixP->fx_file, fixP->fx_line,
6870 _("undefined symbol %s used as an immediate value"),
6871 S_GET_NAME (fixP->fx_addsy));
6872 goto apply_fix_return;
6873 }
6874 fix_insn (fixP, flags, value);
6875 break;
6876
6877 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6878 if (fixP->fx_done || !seg->use_rela_p)
6879 {
6880 if (value & 3)
6881 as_bad_where (fixP->fx_file, fixP->fx_line,
6882 _("pc-relative load offset not word aligned"));
6883 if (signed_overflow (value, 21))
6884 as_bad_where (fixP->fx_file, fixP->fx_line,
6885 _("pc-relative load offset out of range"));
6886 insn = get_aarch64_insn (buf);
6887 insn |= encode_ld_lit_ofs_19 (value >> 2);
6888 put_aarch64_insn (buf, insn);
6889 }
6890 break;
6891
6892 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6893 if (fixP->fx_done || !seg->use_rela_p)
6894 {
6895 if (signed_overflow (value, 21))
6896 as_bad_where (fixP->fx_file, fixP->fx_line,
6897 _("pc-relative address offset out of range"));
6898 insn = get_aarch64_insn (buf);
6899 insn |= encode_adr_imm (value);
6900 put_aarch64_insn (buf, insn);
6901 }
6902 break;
6903
6904 case BFD_RELOC_AARCH64_BRANCH19:
6905 if (fixP->fx_done || !seg->use_rela_p)
6906 {
6907 if (value & 3)
6908 as_bad_where (fixP->fx_file, fixP->fx_line,
6909 _("conditional branch target not word aligned"));
6910 if (signed_overflow (value, 21))
6911 as_bad_where (fixP->fx_file, fixP->fx_line,
6912 _("conditional branch out of range"));
6913 insn = get_aarch64_insn (buf);
6914 insn |= encode_cond_branch_ofs_19 (value >> 2);
6915 put_aarch64_insn (buf, insn);
6916 }
6917 break;
6918
6919 case BFD_RELOC_AARCH64_TSTBR14:
6920 if (fixP->fx_done || !seg->use_rela_p)
6921 {
6922 if (value & 3)
6923 as_bad_where (fixP->fx_file, fixP->fx_line,
6924 _("conditional branch target not word aligned"));
6925 if (signed_overflow (value, 16))
6926 as_bad_where (fixP->fx_file, fixP->fx_line,
6927 _("conditional branch out of range"));
6928 insn = get_aarch64_insn (buf);
6929 insn |= encode_tst_branch_ofs_14 (value >> 2);
6930 put_aarch64_insn (buf, insn);
6931 }
6932 break;
6933
6934 case BFD_RELOC_AARCH64_CALL26:
6935 case BFD_RELOC_AARCH64_JUMP26:
6936 if (fixP->fx_done || !seg->use_rela_p)
6937 {
6938 if (value & 3)
6939 as_bad_where (fixP->fx_file, fixP->fx_line,
6940 _("branch target not word aligned"));
6941 if (signed_overflow (value, 28))
6942 as_bad_where (fixP->fx_file, fixP->fx_line,
6943 _("branch out of range"));
6944 insn = get_aarch64_insn (buf);
6945 insn |= encode_branch_ofs_26 (value >> 2);
6946 put_aarch64_insn (buf, insn);
6947 }
6948 break;
6949
6950 case BFD_RELOC_AARCH64_MOVW_G0:
6951 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6952 case BFD_RELOC_AARCH64_MOVW_G0_S:
6953 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6954 scale = 0;
6955 goto movw_common;
6956 case BFD_RELOC_AARCH64_MOVW_G1:
6957 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6958 case BFD_RELOC_AARCH64_MOVW_G1_S:
6959 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6960 scale = 16;
6961 goto movw_common;
6962 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6963 scale = 0;
6964 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6965 /* Should always be exported to object file, see
6966 aarch64_force_relocation(). */
6967 gas_assert (!fixP->fx_done);
6968 gas_assert (seg->use_rela_p);
6969 goto movw_common;
6970 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6971 scale = 16;
6972 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6973 /* Should always be exported to object file, see
6974 aarch64_force_relocation(). */
6975 gas_assert (!fixP->fx_done);
6976 gas_assert (seg->use_rela_p);
6977 goto movw_common;
6978 case BFD_RELOC_AARCH64_MOVW_G2:
6979 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6980 case BFD_RELOC_AARCH64_MOVW_G2_S:
6981 scale = 32;
6982 goto movw_common;
6983 case BFD_RELOC_AARCH64_MOVW_G3:
6984 scale = 48;
6985 movw_common:
6986 if (fixP->fx_done || !seg->use_rela_p)
6987 {
6988 insn = get_aarch64_insn (buf);
6989
6990 if (!fixP->fx_done)
6991 {
6992 /* REL signed addend must fit in 16 bits */
6993 if (signed_overflow (value, 16))
6994 as_bad_where (fixP->fx_file, fixP->fx_line,
6995 _("offset out of range"));
6996 }
6997 else
6998 {
6999 /* Check for overflow and scale. */
7000 switch (fixP->fx_r_type)
7001 {
7002 case BFD_RELOC_AARCH64_MOVW_G0:
7003 case BFD_RELOC_AARCH64_MOVW_G1:
7004 case BFD_RELOC_AARCH64_MOVW_G2:
7005 case BFD_RELOC_AARCH64_MOVW_G3:
7006 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7007 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7008 if (unsigned_overflow (value, scale + 16))
7009 as_bad_where (fixP->fx_file, fixP->fx_line,
7010 _("unsigned value out of range"));
7011 break;
7012 case BFD_RELOC_AARCH64_MOVW_G0_S:
7013 case BFD_RELOC_AARCH64_MOVW_G1_S:
7014 case BFD_RELOC_AARCH64_MOVW_G2_S:
7015 /* NOTE: We can only come here with movz or movn. */
7016 if (signed_overflow (value, scale + 16))
7017 as_bad_where (fixP->fx_file, fixP->fx_line,
7018 _("signed value out of range"));
7019 if (value < 0)
7020 {
7021 /* Force use of MOVN. */
7022 value = ~value;
7023 insn = reencode_movzn_to_movn (insn);
7024 }
7025 else
7026 {
7027 /* Force use of MOVZ. */
7028 insn = reencode_movzn_to_movz (insn);
7029 }
7030 break;
7031 default:
7032 /* Unchecked relocations. */
7033 break;
7034 }
7035 value >>= scale;
7036 }
7037
7038 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7039 insn |= encode_movw_imm (value & 0xffff);
7040
7041 put_aarch64_insn (buf, insn);
7042 }
7043 break;
7044
7045 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7046 fixP->fx_r_type = (ilp32_p
7047 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7048 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7049 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7050 /* Should always be exported to object file, see
7051 aarch64_force_relocation(). */
7052 gas_assert (!fixP->fx_done);
7053 gas_assert (seg->use_rela_p);
7054 break;
7055
7056 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7057 fixP->fx_r_type = (ilp32_p
7058 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7059 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
7060 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7061 /* Should always be exported to object file, see
7062 aarch64_force_relocation(). */
7063 gas_assert (!fixP->fx_done);
7064 gas_assert (seg->use_rela_p);
7065 break;
7066
7067 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7068 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7069 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7070 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7071 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7072 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7073 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7074 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7075 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7076 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7077 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7078 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7079 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7080 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7081 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7082 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7083 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7084 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7085 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7086 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7087 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7088 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7089 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7090 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7091 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7092 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7093 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7094 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7095 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7096 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7097 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7098 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7099 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7100 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7101 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7102 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7103 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7104 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7105 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7106 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7107 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7108 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7109 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7110 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7111 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7112 /* Should always be exported to object file, see
7113 aarch64_force_relocation(). */
7114 gas_assert (!fixP->fx_done);
7115 gas_assert (seg->use_rela_p);
7116 break;
7117
7118 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7119 /* Should always be exported to object file, see
7120 aarch64_force_relocation(). */
7121 fixP->fx_r_type = (ilp32_p
7122 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7123 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7124 gas_assert (!fixP->fx_done);
7125 gas_assert (seg->use_rela_p);
7126 break;
7127
7128 case BFD_RELOC_AARCH64_ADD_LO12:
7129 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7130 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7131 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7132 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7133 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7134 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7135 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7136 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7137 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7138 case BFD_RELOC_AARCH64_LDST128_LO12:
7139 case BFD_RELOC_AARCH64_LDST16_LO12:
7140 case BFD_RELOC_AARCH64_LDST32_LO12:
7141 case BFD_RELOC_AARCH64_LDST64_LO12:
7142 case BFD_RELOC_AARCH64_LDST8_LO12:
7143 /* Should always be exported to object file, see
7144 aarch64_force_relocation(). */
7145 gas_assert (!fixP->fx_done);
7146 gas_assert (seg->use_rela_p);
7147 break;
7148
7149 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7150 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7151 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7152 break;
7153
7154 case BFD_RELOC_UNUSED:
7155 /* An error will already have been reported. */
7156 break;
7157
7158 default:
7159 as_bad_where (fixP->fx_file, fixP->fx_line,
7160 _("unexpected %s fixup"),
7161 bfd_get_reloc_code_name (fixP->fx_r_type));
7162 break;
7163 }
7164
7165 apply_fix_return:
7166 /* Free the allocated the struct aarch64_inst.
7167 N.B. currently there are very limited number of fix-up types actually use
7168 this field, so the impact on the performance should be minimal . */
7169 if (fixP->tc_fix_data.inst != NULL)
7170 free (fixP->tc_fix_data.inst);
7171
7172 return;
7173 }
7174
7175 /* Translate internal representation of relocation info to BFD target
7176 format. */
7177
7178 arelent *
7179 tc_gen_reloc (asection * section, fixS * fixp)
7180 {
7181 arelent *reloc;
7182 bfd_reloc_code_real_type code;
7183
7184 reloc = XNEW (arelent);
7185
7186 reloc->sym_ptr_ptr = XNEW (asymbol *);
7187 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7188 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7189
7190 if (fixp->fx_pcrel)
7191 {
7192 if (section->use_rela_p)
7193 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7194 else
7195 fixp->fx_offset = reloc->address;
7196 }
7197 reloc->addend = fixp->fx_offset;
7198
7199 code = fixp->fx_r_type;
7200 switch (code)
7201 {
7202 case BFD_RELOC_16:
7203 if (fixp->fx_pcrel)
7204 code = BFD_RELOC_16_PCREL;
7205 break;
7206
7207 case BFD_RELOC_32:
7208 if (fixp->fx_pcrel)
7209 code = BFD_RELOC_32_PCREL;
7210 break;
7211
7212 case BFD_RELOC_64:
7213 if (fixp->fx_pcrel)
7214 code = BFD_RELOC_64_PCREL;
7215 break;
7216
7217 default:
7218 break;
7219 }
7220
7221 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7222 if (reloc->howto == NULL)
7223 {
7224 as_bad_where (fixp->fx_file, fixp->fx_line,
7225 _
7226 ("cannot represent %s relocation in this object file format"),
7227 bfd_get_reloc_code_name (code));
7228 return NULL;
7229 }
7230
7231 return reloc;
7232 }
7233
7234 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7235
7236 void
7237 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7238 {
7239 bfd_reloc_code_real_type type;
7240 int pcrel = 0;
7241
7242 /* Pick a reloc.
7243 FIXME: @@ Should look at CPU word size. */
7244 switch (size)
7245 {
7246 case 1:
7247 type = BFD_RELOC_8;
7248 break;
7249 case 2:
7250 type = BFD_RELOC_16;
7251 break;
7252 case 4:
7253 type = BFD_RELOC_32;
7254 break;
7255 case 8:
7256 type = BFD_RELOC_64;
7257 break;
7258 default:
7259 as_bad (_("cannot do %u-byte relocation"), size);
7260 type = BFD_RELOC_UNUSED;
7261 break;
7262 }
7263
7264 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7265 }
7266
7267 int
7268 aarch64_force_relocation (struct fix *fixp)
7269 {
7270 switch (fixp->fx_r_type)
7271 {
7272 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7273 /* Perform these "immediate" internal relocations
7274 even if the symbol is extern or weak. */
7275 return 0;
7276
7277 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7278 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7279 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7280 /* Pseudo relocs that need to be fixed up according to
7281 ilp32_p. */
7282 return 0;
7283
7284 case BFD_RELOC_AARCH64_ADD_LO12:
7285 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7286 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7287 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7288 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7289 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7290 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7291 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7292 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7293 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7294 case BFD_RELOC_AARCH64_LDST128_LO12:
7295 case BFD_RELOC_AARCH64_LDST16_LO12:
7296 case BFD_RELOC_AARCH64_LDST32_LO12:
7297 case BFD_RELOC_AARCH64_LDST64_LO12:
7298 case BFD_RELOC_AARCH64_LDST8_LO12:
7299 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7300 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7301 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7302 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7303 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7304 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7305 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7306 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7307 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7308 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7309 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7310 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7311 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7312 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7313 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7314 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7315 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7316 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7317 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7318 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7319 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7320 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7321 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7322 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7323 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7324 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7325 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7326 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7327 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7328 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7329 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7330 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7331 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7332 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7333 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7334 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7335 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7336 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7337 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7338 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7339 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7340 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7341 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7342 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7343 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7344 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7345 /* Always leave these relocations for the linker. */
7346 return 1;
7347
7348 default:
7349 break;
7350 }
7351
7352 return generic_force_reloc (fixp);
7353 }
7354
7355 #ifdef OBJ_ELF
7356
7357 const char *
7358 elf64_aarch64_target_format (void)
7359 {
7360 if (strcmp (TARGET_OS, "cloudabi") == 0)
7361 {
7362 /* FIXME: What to do for ilp32_p ? */
7363 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
7364 }
7365 if (target_big_endian)
7366 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7367 else
7368 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7369 }
7370
7371 void
7372 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7373 {
7374 elf_frob_symbol (symp, puntp);
7375 }
7376 #endif
7377
7378 /* MD interface: Finalization. */
7379
7380 /* A good place to do this, although this was probably not intended
7381 for this kind of use. We need to dump the literal pool before
7382 references are made to a null symbol pointer. */
7383
7384 void
7385 aarch64_cleanup (void)
7386 {
7387 literal_pool *pool;
7388
7389 for (pool = list_of_pools; pool; pool = pool->next)
7390 {
7391 /* Put it at the end of the relevant section. */
7392 subseg_set (pool->section, pool->sub_section);
7393 s_ltorg (0);
7394 }
7395 }
7396
7397 #ifdef OBJ_ELF
7398 /* Remove any excess mapping symbols generated for alignment frags in
7399 SEC. We may have created a mapping symbol before a zero byte
7400 alignment; remove it if there's a mapping symbol after the
7401 alignment. */
7402 static void
7403 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7404 void *dummy ATTRIBUTE_UNUSED)
7405 {
7406 segment_info_type *seginfo = seg_info (sec);
7407 fragS *fragp;
7408
7409 if (seginfo == NULL || seginfo->frchainP == NULL)
7410 return;
7411
7412 for (fragp = seginfo->frchainP->frch_root;
7413 fragp != NULL; fragp = fragp->fr_next)
7414 {
7415 symbolS *sym = fragp->tc_frag_data.last_map;
7416 fragS *next = fragp->fr_next;
7417
7418 /* Variable-sized frags have been converted to fixed size by
7419 this point. But if this was variable-sized to start with,
7420 there will be a fixed-size frag after it. So don't handle
7421 next == NULL. */
7422 if (sym == NULL || next == NULL)
7423 continue;
7424
7425 if (S_GET_VALUE (sym) < next->fr_address)
7426 /* Not at the end of this frag. */
7427 continue;
7428 know (S_GET_VALUE (sym) == next->fr_address);
7429
7430 do
7431 {
7432 if (next->tc_frag_data.first_map != NULL)
7433 {
7434 /* Next frag starts with a mapping symbol. Discard this
7435 one. */
7436 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7437 break;
7438 }
7439
7440 if (next->fr_next == NULL)
7441 {
7442 /* This mapping symbol is at the end of the section. Discard
7443 it. */
7444 know (next->fr_fix == 0 && next->fr_var == 0);
7445 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7446 break;
7447 }
7448
7449 /* As long as we have empty frags without any mapping symbols,
7450 keep looking. */
7451 /* If the next frag is non-empty and does not start with a
7452 mapping symbol, then this mapping symbol is required. */
7453 if (next->fr_address != next->fr_next->fr_address)
7454 break;
7455
7456 next = next->fr_next;
7457 }
7458 while (next != NULL);
7459 }
7460 }
7461 #endif
7462
7463 /* Adjust the symbol table. */
7464
7465 void
7466 aarch64_adjust_symtab (void)
7467 {
7468 #ifdef OBJ_ELF
7469 /* Remove any overlapping mapping symbols generated by alignment frags. */
7470 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7471 /* Now do generic ELF adjustments. */
7472 elf_adjust_symtab ();
7473 #endif
7474 }
7475
7476 static void
7477 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7478 {
7479 const char *hash_err;
7480
7481 hash_err = hash_insert (table, key, value);
7482 if (hash_err)
7483 printf ("Internal Error: Can't hash %s\n", key);
7484 }
7485
7486 static void
7487 fill_instruction_hash_table (void)
7488 {
7489 aarch64_opcode *opcode = aarch64_opcode_table;
7490
7491 while (opcode->name != NULL)
7492 {
7493 templates *templ, *new_templ;
7494 templ = hash_find (aarch64_ops_hsh, opcode->name);
7495
7496 new_templ = XNEW (templates);
7497 new_templ->opcode = opcode;
7498 new_templ->next = NULL;
7499
7500 if (!templ)
7501 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7502 else
7503 {
7504 new_templ->next = templ->next;
7505 templ->next = new_templ;
7506 }
7507 ++opcode;
7508 }
7509 }
7510
7511 static inline void
7512 convert_to_upper (char *dst, const char *src, size_t num)
7513 {
7514 unsigned int i;
7515 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7516 *dst = TOUPPER (*src);
7517 *dst = '\0';
7518 }
7519
7520 /* Assume STR point to a lower-case string, allocate, convert and return
7521 the corresponding upper-case string. */
7522 static inline const char*
7523 get_upper_str (const char *str)
7524 {
7525 char *ret;
7526 size_t len = strlen (str);
7527 ret = XNEWVEC (char, len + 1);
7528 convert_to_upper (ret, str, len);
7529 return ret;
7530 }
7531
7532 /* MD interface: Initialization. */
7533
7534 void
7535 md_begin (void)
7536 {
7537 unsigned mach;
7538 unsigned int i;
7539
7540 if ((aarch64_ops_hsh = hash_new ()) == NULL
7541 || (aarch64_cond_hsh = hash_new ()) == NULL
7542 || (aarch64_shift_hsh = hash_new ()) == NULL
7543 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7544 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7545 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7546 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7547 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7548 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7549 || (aarch64_reg_hsh = hash_new ()) == NULL
7550 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7551 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7552 || (aarch64_pldop_hsh = hash_new ()) == NULL
7553 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
7554 as_fatal (_("virtual memory exhausted"));
7555
7556 fill_instruction_hash_table ();
7557
7558 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7559 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7560 (void *) (aarch64_sys_regs + i));
7561
7562 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7563 checked_hash_insert (aarch64_pstatefield_hsh,
7564 aarch64_pstatefields[i].name,
7565 (void *) (aarch64_pstatefields + i));
7566
7567 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
7568 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7569 aarch64_sys_regs_ic[i].name,
7570 (void *) (aarch64_sys_regs_ic + i));
7571
7572 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
7573 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7574 aarch64_sys_regs_dc[i].name,
7575 (void *) (aarch64_sys_regs_dc + i));
7576
7577 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
7578 checked_hash_insert (aarch64_sys_regs_at_hsh,
7579 aarch64_sys_regs_at[i].name,
7580 (void *) (aarch64_sys_regs_at + i));
7581
7582 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
7583 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7584 aarch64_sys_regs_tlbi[i].name,
7585 (void *) (aarch64_sys_regs_tlbi + i));
7586
7587 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7588 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7589 (void *) (reg_names + i));
7590
7591 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7592 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7593 (void *) (nzcv_names + i));
7594
7595 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7596 {
7597 const char *name = aarch64_operand_modifiers[i].name;
7598 checked_hash_insert (aarch64_shift_hsh, name,
7599 (void *) (aarch64_operand_modifiers + i));
7600 /* Also hash the name in the upper case. */
7601 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7602 (void *) (aarch64_operand_modifiers + i));
7603 }
7604
7605 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7606 {
7607 unsigned int j;
7608 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7609 the same condition code. */
7610 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7611 {
7612 const char *name = aarch64_conds[i].names[j];
7613 if (name == NULL)
7614 break;
7615 checked_hash_insert (aarch64_cond_hsh, name,
7616 (void *) (aarch64_conds + i));
7617 /* Also hash the name in the upper case. */
7618 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7619 (void *) (aarch64_conds + i));
7620 }
7621 }
7622
7623 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7624 {
7625 const char *name = aarch64_barrier_options[i].name;
7626 /* Skip xx00 - the unallocated values of option. */
7627 if ((i & 0x3) == 0)
7628 continue;
7629 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7630 (void *) (aarch64_barrier_options + i));
7631 /* Also hash the name in the upper case. */
7632 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7633 (void *) (aarch64_barrier_options + i));
7634 }
7635
7636 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7637 {
7638 const char* name = aarch64_prfops[i].name;
7639 /* Skip the unallocated hint encodings. */
7640 if (name == NULL)
7641 continue;
7642 checked_hash_insert (aarch64_pldop_hsh, name,
7643 (void *) (aarch64_prfops + i));
7644 /* Also hash the name in the upper case. */
7645 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7646 (void *) (aarch64_prfops + i));
7647 }
7648
7649 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
7650 {
7651 const char* name = aarch64_hint_options[i].name;
7652
7653 checked_hash_insert (aarch64_hint_opt_hsh, name,
7654 (void *) (aarch64_hint_options + i));
7655 /* Also hash the name in the upper case. */
7656 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7657 (void *) (aarch64_hint_options + i));
7658 }
7659
7660 /* Set the cpu variant based on the command-line options. */
7661 if (!mcpu_cpu_opt)
7662 mcpu_cpu_opt = march_cpu_opt;
7663
7664 if (!mcpu_cpu_opt)
7665 mcpu_cpu_opt = &cpu_default;
7666
7667 cpu_variant = *mcpu_cpu_opt;
7668
7669 /* Record the CPU type. */
7670 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7671
7672 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7673 }
7674
7675 /* Command line processing. */
7676
7677 const char *md_shortopts = "m:";
7678
7679 #ifdef AARCH64_BI_ENDIAN
7680 #define OPTION_EB (OPTION_MD_BASE + 0)
7681 #define OPTION_EL (OPTION_MD_BASE + 1)
7682 #else
7683 #if TARGET_BYTES_BIG_ENDIAN
7684 #define OPTION_EB (OPTION_MD_BASE + 0)
7685 #else
7686 #define OPTION_EL (OPTION_MD_BASE + 1)
7687 #endif
7688 #endif
7689
7690 struct option md_longopts[] = {
7691 #ifdef OPTION_EB
7692 {"EB", no_argument, NULL, OPTION_EB},
7693 #endif
7694 #ifdef OPTION_EL
7695 {"EL", no_argument, NULL, OPTION_EL},
7696 #endif
7697 {NULL, no_argument, NULL, 0}
7698 };
7699
7700 size_t md_longopts_size = sizeof (md_longopts);
7701
7702 struct aarch64_option_table
7703 {
7704 const char *option; /* Option name to match. */
7705 const char *help; /* Help information. */
7706 int *var; /* Variable to change. */
7707 int value; /* What to change it to. */
7708 char *deprecated; /* If non-null, print this message. */
7709 };
7710
7711 static struct aarch64_option_table aarch64_opts[] = {
7712 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7713 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7714 NULL},
7715 #ifdef DEBUG_AARCH64
7716 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7717 #endif /* DEBUG_AARCH64 */
7718 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7719 NULL},
7720 {"mno-verbose-error", N_("do not output verbose error messages"),
7721 &verbose_error_p, 0, NULL},
7722 {NULL, NULL, NULL, 0, NULL}
7723 };
7724
7725 struct aarch64_cpu_option_table
7726 {
7727 const char *name;
7728 const aarch64_feature_set value;
7729 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7730 case. */
7731 const char *canonical_name;
7732 };
7733
7734 /* This list should, at a minimum, contain all the cpu names
7735 recognized by GCC. */
7736 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7737 {"all", AARCH64_ANY, NULL},
7738 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
7739 AARCH64_FEATURE_CRC), "Cortex-A35"},
7740 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7741 AARCH64_FEATURE_CRC), "Cortex-A53"},
7742 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7743 AARCH64_FEATURE_CRC), "Cortex-A57"},
7744 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7745 AARCH64_FEATURE_CRC), "Cortex-A72"},
7746 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
7747 AARCH64_FEATURE_CRC), "Cortex-A73"},
7748 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7749 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7750 "Samsung Exynos M1"},
7751 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7752 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7753 "Qualcomm QDF24XX"},
7754 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7755 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7756 "Cavium ThunderX"},
7757 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
7758 AARCH64_FEATURE_CRYPTO),
7759 "Broadcom Vulcan"},
7760 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7761 in earlier releases and is superseded by 'xgene1' in all
7762 tools. */
7763 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7764 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7765 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7766 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7767 {"generic", AARCH64_ARCH_V8, NULL},
7768
7769 {NULL, AARCH64_ARCH_NONE, NULL}
7770 };
7771
7772 struct aarch64_arch_option_table
7773 {
7774 const char *name;
7775 const aarch64_feature_set value;
7776 };
7777
7778 /* This list should, at a minimum, contain all the architecture names
7779 recognized by GCC. */
7780 static const struct aarch64_arch_option_table aarch64_archs[] = {
7781 {"all", AARCH64_ANY},
7782 {"armv8-a", AARCH64_ARCH_V8},
7783 {"armv8.1-a", AARCH64_ARCH_V8_1},
7784 {"armv8.2-a", AARCH64_ARCH_V8_2},
7785 {NULL, AARCH64_ARCH_NONE}
7786 };
7787
7788 /* ISA extensions. */
7789 struct aarch64_option_cpu_value_table
7790 {
7791 const char *name;
7792 const aarch64_feature_set value;
7793 const aarch64_feature_set require; /* Feature dependencies. */
7794 };
7795
7796 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7797 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
7798 AARCH64_ARCH_NONE},
7799 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
7800 AARCH64_ARCH_NONE},
7801 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
7802 AARCH64_ARCH_NONE},
7803 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
7804 AARCH64_ARCH_NONE},
7805 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
7806 AARCH64_ARCH_NONE},
7807 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
7808 AARCH64_ARCH_NONE},
7809 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
7810 AARCH64_ARCH_NONE},
7811 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
7812 AARCH64_ARCH_NONE},
7813 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
7814 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7815 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
7816 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7817 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
7818 AARCH64_ARCH_NONE},
7819 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
7820 };
7821
7822 struct aarch64_long_option_table
7823 {
7824 const char *option; /* Substring to match. */
7825 const char *help; /* Help information. */
7826 int (*func) (const char *subopt); /* Function to decode sub-option. */
7827 char *deprecated; /* If non-null, print this message. */
7828 };
7829
7830 /* Transitive closure of features depending on set. */
7831 static aarch64_feature_set
7832 aarch64_feature_disable_set (aarch64_feature_set set)
7833 {
7834 const struct aarch64_option_cpu_value_table *opt;
7835 aarch64_feature_set prev = 0;
7836
7837 while (prev != set) {
7838 prev = set;
7839 for (opt = aarch64_features; opt->name != NULL; opt++)
7840 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
7841 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
7842 }
7843 return set;
7844 }
7845
7846 /* Transitive closure of dependencies of set. */
7847 static aarch64_feature_set
7848 aarch64_feature_enable_set (aarch64_feature_set set)
7849 {
7850 const struct aarch64_option_cpu_value_table *opt;
7851 aarch64_feature_set prev = 0;
7852
7853 while (prev != set) {
7854 prev = set;
7855 for (opt = aarch64_features; opt->name != NULL; opt++)
7856 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
7857 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
7858 }
7859 return set;
7860 }
7861
7862 static int
7863 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
7864 bfd_boolean ext_only)
7865 {
7866 /* We insist on extensions being added before being removed. We achieve
7867 this by using the ADDING_VALUE variable to indicate whether we are
7868 adding an extension (1) or removing it (0) and only allowing it to
7869 change in the order -1 -> 1 -> 0. */
7870 int adding_value = -1;
7871 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
7872
7873 /* Copy the feature set, so that we can modify it. */
7874 *ext_set = **opt_p;
7875 *opt_p = ext_set;
7876
7877 while (str != NULL && *str != 0)
7878 {
7879 const struct aarch64_option_cpu_value_table *opt;
7880 const char *ext = NULL;
7881 int optlen;
7882
7883 if (!ext_only)
7884 {
7885 if (*str != '+')
7886 {
7887 as_bad (_("invalid architectural extension"));
7888 return 0;
7889 }
7890
7891 ext = strchr (++str, '+');
7892 }
7893
7894 if (ext != NULL)
7895 optlen = ext - str;
7896 else
7897 optlen = strlen (str);
7898
7899 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7900 {
7901 if (adding_value != 0)
7902 adding_value = 0;
7903 optlen -= 2;
7904 str += 2;
7905 }
7906 else if (optlen > 0)
7907 {
7908 if (adding_value == -1)
7909 adding_value = 1;
7910 else if (adding_value != 1)
7911 {
7912 as_bad (_("must specify extensions to add before specifying "
7913 "those to remove"));
7914 return FALSE;
7915 }
7916 }
7917
7918 if (optlen == 0)
7919 {
7920 as_bad (_("missing architectural extension"));
7921 return 0;
7922 }
7923
7924 gas_assert (adding_value != -1);
7925
7926 for (opt = aarch64_features; opt->name != NULL; opt++)
7927 if (strncmp (opt->name, str, optlen) == 0)
7928 {
7929 aarch64_feature_set set;
7930
7931 /* Add or remove the extension. */
7932 if (adding_value)
7933 {
7934 set = aarch64_feature_enable_set (opt->value);
7935 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
7936 }
7937 else
7938 {
7939 set = aarch64_feature_disable_set (opt->value);
7940 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
7941 }
7942 break;
7943 }
7944
7945 if (opt->name == NULL)
7946 {
7947 as_bad (_("unknown architectural extension `%s'"), str);
7948 return 0;
7949 }
7950
7951 str = ext;
7952 };
7953
7954 return 1;
7955 }
7956
7957 static int
7958 aarch64_parse_cpu (const char *str)
7959 {
7960 const struct aarch64_cpu_option_table *opt;
7961 const char *ext = strchr (str, '+');
7962 size_t optlen;
7963
7964 if (ext != NULL)
7965 optlen = ext - str;
7966 else
7967 optlen = strlen (str);
7968
7969 if (optlen == 0)
7970 {
7971 as_bad (_("missing cpu name `%s'"), str);
7972 return 0;
7973 }
7974
7975 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7976 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7977 {
7978 mcpu_cpu_opt = &opt->value;
7979 if (ext != NULL)
7980 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7981
7982 return 1;
7983 }
7984
7985 as_bad (_("unknown cpu `%s'"), str);
7986 return 0;
7987 }
7988
7989 static int
7990 aarch64_parse_arch (const char *str)
7991 {
7992 const struct aarch64_arch_option_table *opt;
7993 const char *ext = strchr (str, '+');
7994 size_t optlen;
7995
7996 if (ext != NULL)
7997 optlen = ext - str;
7998 else
7999 optlen = strlen (str);
8000
8001 if (optlen == 0)
8002 {
8003 as_bad (_("missing architecture name `%s'"), str);
8004 return 0;
8005 }
8006
8007 for (opt = aarch64_archs; opt->name != NULL; opt++)
8008 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8009 {
8010 march_cpu_opt = &opt->value;
8011 if (ext != NULL)
8012 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8013
8014 return 1;
8015 }
8016
8017 as_bad (_("unknown architecture `%s'\n"), str);
8018 return 0;
8019 }
8020
8021 /* ABIs. */
8022 struct aarch64_option_abi_value_table
8023 {
8024 const char *name;
8025 enum aarch64_abi_type value;
8026 };
8027
8028 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8029 {"ilp32", AARCH64_ABI_ILP32},
8030 {"lp64", AARCH64_ABI_LP64},
8031 };
8032
8033 static int
8034 aarch64_parse_abi (const char *str)
8035 {
8036 unsigned int i;
8037
8038 if (str[0] == '\0')
8039 {
8040 as_bad (_("missing abi name `%s'"), str);
8041 return 0;
8042 }
8043
8044 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8045 if (strcmp (str, aarch64_abis[i].name) == 0)
8046 {
8047 aarch64_abi = aarch64_abis[i].value;
8048 return 1;
8049 }
8050
8051 as_bad (_("unknown abi `%s'\n"), str);
8052 return 0;
8053 }
8054
8055 static struct aarch64_long_option_table aarch64_long_opts[] = {
8056 #ifdef OBJ_ELF
8057 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8058 aarch64_parse_abi, NULL},
8059 #endif /* OBJ_ELF */
8060 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8061 aarch64_parse_cpu, NULL},
8062 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8063 aarch64_parse_arch, NULL},
8064 {NULL, NULL, 0, NULL}
8065 };
8066
8067 int
8068 md_parse_option (int c, const char *arg)
8069 {
8070 struct aarch64_option_table *opt;
8071 struct aarch64_long_option_table *lopt;
8072
8073 switch (c)
8074 {
8075 #ifdef OPTION_EB
8076 case OPTION_EB:
8077 target_big_endian = 1;
8078 break;
8079 #endif
8080
8081 #ifdef OPTION_EL
8082 case OPTION_EL:
8083 target_big_endian = 0;
8084 break;
8085 #endif
8086
8087 case 'a':
8088 /* Listing option. Just ignore these, we don't support additional
8089 ones. */
8090 return 0;
8091
8092 default:
8093 for (opt = aarch64_opts; opt->option != NULL; opt++)
8094 {
8095 if (c == opt->option[0]
8096 && ((arg == NULL && opt->option[1] == 0)
8097 || streq (arg, opt->option + 1)))
8098 {
8099 /* If the option is deprecated, tell the user. */
8100 if (opt->deprecated != NULL)
8101 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8102 arg ? arg : "", _(opt->deprecated));
8103
8104 if (opt->var != NULL)
8105 *opt->var = opt->value;
8106
8107 return 1;
8108 }
8109 }
8110
8111 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8112 {
8113 /* These options are expected to have an argument. */
8114 if (c == lopt->option[0]
8115 && arg != NULL
8116 && strncmp (arg, lopt->option + 1,
8117 strlen (lopt->option + 1)) == 0)
8118 {
8119 /* If the option is deprecated, tell the user. */
8120 if (lopt->deprecated != NULL)
8121 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8122 _(lopt->deprecated));
8123
8124 /* Call the sup-option parser. */
8125 return lopt->func (arg + strlen (lopt->option) - 1);
8126 }
8127 }
8128
8129 return 0;
8130 }
8131
8132 return 1;
8133 }
8134
8135 void
8136 md_show_usage (FILE * fp)
8137 {
8138 struct aarch64_option_table *opt;
8139 struct aarch64_long_option_table *lopt;
8140
8141 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8142
8143 for (opt = aarch64_opts; opt->option != NULL; opt++)
8144 if (opt->help != NULL)
8145 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8146
8147 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8148 if (lopt->help != NULL)
8149 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8150
8151 #ifdef OPTION_EB
8152 fprintf (fp, _("\
8153 -EB assemble code for a big-endian cpu\n"));
8154 #endif
8155
8156 #ifdef OPTION_EL
8157 fprintf (fp, _("\
8158 -EL assemble code for a little-endian cpu\n"));
8159 #endif
8160 }
8161
8162 /* Parse a .cpu directive. */
8163
8164 static void
8165 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8166 {
8167 const struct aarch64_cpu_option_table *opt;
8168 char saved_char;
8169 char *name;
8170 char *ext;
8171 size_t optlen;
8172
8173 name = input_line_pointer;
8174 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8175 input_line_pointer++;
8176 saved_char = *input_line_pointer;
8177 *input_line_pointer = 0;
8178
8179 ext = strchr (name, '+');
8180
8181 if (ext != NULL)
8182 optlen = ext - name;
8183 else
8184 optlen = strlen (name);
8185
8186 /* Skip the first "all" entry. */
8187 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8188 if (strlen (opt->name) == optlen
8189 && strncmp (name, opt->name, optlen) == 0)
8190 {
8191 mcpu_cpu_opt = &opt->value;
8192 if (ext != NULL)
8193 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8194 return;
8195
8196 cpu_variant = *mcpu_cpu_opt;
8197
8198 *input_line_pointer = saved_char;
8199 demand_empty_rest_of_line ();
8200 return;
8201 }
8202 as_bad (_("unknown cpu `%s'"), name);
8203 *input_line_pointer = saved_char;
8204 ignore_rest_of_line ();
8205 }
8206
8207
8208 /* Parse a .arch directive. */
8209
8210 static void
8211 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8212 {
8213 const struct aarch64_arch_option_table *opt;
8214 char saved_char;
8215 char *name;
8216 char *ext;
8217 size_t optlen;
8218
8219 name = input_line_pointer;
8220 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8221 input_line_pointer++;
8222 saved_char = *input_line_pointer;
8223 *input_line_pointer = 0;
8224
8225 ext = strchr (name, '+');
8226
8227 if (ext != NULL)
8228 optlen = ext - name;
8229 else
8230 optlen = strlen (name);
8231
8232 /* Skip the first "all" entry. */
8233 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8234 if (strlen (opt->name) == optlen
8235 && strncmp (name, opt->name, optlen) == 0)
8236 {
8237 mcpu_cpu_opt = &opt->value;
8238 if (ext != NULL)
8239 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8240 return;
8241
8242 cpu_variant = *mcpu_cpu_opt;
8243
8244 *input_line_pointer = saved_char;
8245 demand_empty_rest_of_line ();
8246 return;
8247 }
8248
8249 as_bad (_("unknown architecture `%s'\n"), name);
8250 *input_line_pointer = saved_char;
8251 ignore_rest_of_line ();
8252 }
8253
8254 /* Parse a .arch_extension directive. */
8255
8256 static void
8257 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8258 {
8259 char saved_char;
8260 char *ext = input_line_pointer;;
8261
8262 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8263 input_line_pointer++;
8264 saved_char = *input_line_pointer;
8265 *input_line_pointer = 0;
8266
8267 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8268 return;
8269
8270 cpu_variant = *mcpu_cpu_opt;
8271
8272 *input_line_pointer = saved_char;
8273 demand_empty_rest_of_line ();
8274 }
8275
8276 /* Copy symbol information. */
8277
8278 void
8279 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8280 {
8281 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8282 }
This page took 0.204371 seconds and 5 git commands to generate.