[AArch64] GAS Support BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Structure for a hash table entry for a register. */
251 typedef struct
252 {
253 const char *name;
254 unsigned char number;
255 unsigned char type;
256 unsigned char builtin;
257 } reg_entry;
258
259 /* Macros to define the register types and masks for the purpose
260 of parsing. */
261
262 #undef AARCH64_REG_TYPES
263 #define AARCH64_REG_TYPES \
264 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
265 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
266 BASIC_REG_TYPE(SP_32) /* wsp */ \
267 BASIC_REG_TYPE(SP_64) /* sp */ \
268 BASIC_REG_TYPE(Z_32) /* wzr */ \
269 BASIC_REG_TYPE(Z_64) /* xzr */ \
270 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
271 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
272 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
273 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
274 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
275 BASIC_REG_TYPE(CN) /* c[0-7] */ \
276 BASIC_REG_TYPE(VN) /* v[0-31] */ \
277 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
278 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
279 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
280 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
281 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
283 /* Typecheck: any [BHSDQ]P FP. */ \
284 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
285 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
286 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
287 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
289 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
290 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
291 /* Any integer register; used for error messages only. */ \
292 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Pseudo type to mark the end of the enumerator sequence. */ \
296 BASIC_REG_TYPE(MAX)
297
298 #undef BASIC_REG_TYPE
299 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
300 #undef MULTI_REG_TYPE
301 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
302
303 /* Register type enumerators. */
304 typedef enum
305 {
306 /* A list of REG_TYPE_*. */
307 AARCH64_REG_TYPES
308 } aarch64_reg_type;
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
312 #undef REG_TYPE
313 #define REG_TYPE(T) (1 << REG_TYPE_##T)
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) V,
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409
410 /* Stuff needed to resolve the label ambiguity
411 As:
412 ...
413 label: <insn>
414 may differ from:
415 ...
416 label:
417 <insn> */
418
419 static symbolS *last_label_seen;
420
421 /* Literal pool structure. Held on a per-section
422 and per-sub-section basis. */
423
424 #define MAX_LITERAL_POOL_SIZE 1024
425 typedef struct literal_expression
426 {
427 expressionS exp;
428 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
429 LITTLENUM_TYPE * bignum;
430 } literal_expression;
431
432 typedef struct literal_pool
433 {
434 literal_expression literals[MAX_LITERAL_POOL_SIZE];
435 unsigned int next_free_entry;
436 unsigned int id;
437 symbolS *symbol;
438 segT section;
439 subsegT sub_section;
440 int size;
441 struct literal_pool *next;
442 } literal_pool;
443
444 /* Pointer to a linked list of literal pools. */
445 static literal_pool *list_of_pools = NULL;
446 \f
447 /* Pure syntax. */
448
449 /* This array holds the chars that always start a comment. If the
450 pre-processor is disabled, these aren't very useful. */
451 const char comment_chars[] = "";
452
453 /* This array holds the chars that only start a comment at the beginning of
454 a line. If the line seems to have the form '# 123 filename'
455 .line and .file directives will appear in the pre-processed output. */
456 /* Note that input_file.c hand checks for '#' at the beginning of the
457 first line of the input file. This is because the compiler outputs
458 #NO_APP at the beginning of its output. */
459 /* Also note that comments like this one will always work. */
460 const char line_comment_chars[] = "#";
461
462 const char line_separator_chars[] = ";";
463
464 /* Chars that can be used to separate mant
465 from exp in floating point numbers. */
466 const char EXP_CHARS[] = "eE";
467
468 /* Chars that mean this number is a floating point constant. */
469 /* As in 0f12.456 */
470 /* or 0d1.2345e12 */
471
472 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
473
474 /* Prefix character that indicates the start of an immediate value. */
475 #define is_immediate_prefix(C) ((C) == '#')
476
477 /* Separator character handling. */
478
479 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
480
481 static inline bfd_boolean
482 skip_past_char (char **str, char c)
483 {
484 if (**str == c)
485 {
486 (*str)++;
487 return TRUE;
488 }
489 else
490 return FALSE;
491 }
492
493 #define skip_past_comma(str) skip_past_char (str, ',')
494
495 /* Arithmetic expressions (possibly involving symbols). */
496
497 static bfd_boolean in_my_get_expression_p = FALSE;
498
499 /* Third argument to my_get_expression. */
500 #define GE_NO_PREFIX 0
501 #define GE_OPT_PREFIX 1
502
503 /* Return TRUE if the string pointed by *STR is successfully parsed
504 as an valid expression; *EP will be filled with the information of
505 such an expression. Otherwise return FALSE. */
506
507 static bfd_boolean
508 my_get_expression (expressionS * ep, char **str, int prefix_mode,
509 int reject_absent)
510 {
511 char *save_in;
512 segT seg;
513 int prefix_present_p = 0;
514
515 switch (prefix_mode)
516 {
517 case GE_NO_PREFIX:
518 break;
519 case GE_OPT_PREFIX:
520 if (is_immediate_prefix (**str))
521 {
522 (*str)++;
523 prefix_present_p = 1;
524 }
525 break;
526 default:
527 abort ();
528 }
529
530 memset (ep, 0, sizeof (expressionS));
531
532 save_in = input_line_pointer;
533 input_line_pointer = *str;
534 in_my_get_expression_p = TRUE;
535 seg = expression (ep);
536 in_my_get_expression_p = FALSE;
537
538 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
539 {
540 /* We found a bad expression in md_operand(). */
541 *str = input_line_pointer;
542 input_line_pointer = save_in;
543 if (prefix_present_p && ! error_p ())
544 set_fatal_syntax_error (_("bad expression"));
545 else
546 set_first_syntax_error (_("bad expression"));
547 return FALSE;
548 }
549
550 #ifdef OBJ_AOUT
551 if (seg != absolute_section
552 && seg != text_section
553 && seg != data_section
554 && seg != bss_section && seg != undefined_section)
555 {
556 set_syntax_error (_("bad segment"));
557 *str = input_line_pointer;
558 input_line_pointer = save_in;
559 return FALSE;
560 }
561 #else
562 (void) seg;
563 #endif
564
565 *str = input_line_pointer;
566 input_line_pointer = save_in;
567 return TRUE;
568 }
569
570 /* Turn a string in input_line_pointer into a floating point constant
571 of type TYPE, and store the appropriate bytes in *LITP. The number
572 of LITTLENUMS emitted is stored in *SIZEP. An error message is
573 returned, or NULL on OK. */
574
575 char *
576 md_atof (int type, char *litP, int *sizeP)
577 {
578 return ieee_md_atof (type, litP, sizeP, target_big_endian);
579 }
580
581 /* We handle all bad expressions here, so that we can report the faulty
582 instruction in the error message. */
583 void
584 md_operand (expressionS * exp)
585 {
586 if (in_my_get_expression_p)
587 exp->X_op = O_illegal;
588 }
589
590 /* Immediate values. */
591
592 /* Errors may be set multiple times during parsing or bit encoding
593 (particularly in the Neon bits), but usually the earliest error which is set
594 will be the most meaningful. Avoid overwriting it with later (cascading)
595 errors by calling this function. */
596
597 static void
598 first_error (const char *error)
599 {
600 if (! error_p ())
601 set_syntax_error (error);
602 }
603
604 /* Similiar to first_error, but this function accepts formatted error
605 message. */
606 static void
607 first_error_fmt (const char *format, ...)
608 {
609 va_list args;
610 enum
611 { size = 100 };
612 /* N.B. this single buffer will not cause error messages for different
613 instructions to pollute each other; this is because at the end of
614 processing of each assembly line, error message if any will be
615 collected by as_bad. */
616 static char buffer[size];
617
618 if (! error_p ())
619 {
620 int ret ATTRIBUTE_UNUSED;
621 va_start (args, format);
622 ret = vsnprintf (buffer, size, format, args);
623 know (ret <= size - 1 && ret >= 0);
624 va_end (args);
625 set_syntax_error (buffer);
626 }
627 }
628
629 /* Register parsing. */
630
631 /* Generic register parser which is called by other specialized
632 register parsers.
633 CCP points to what should be the beginning of a register name.
634 If it is indeed a valid register name, advance CCP over it and
635 return the reg_entry structure; otherwise return NULL.
636 It does not issue diagnostics. */
637
638 static reg_entry *
639 parse_reg (char **ccp)
640 {
641 char *start = *ccp;
642 char *p;
643 reg_entry *reg;
644
645 #ifdef REGISTER_PREFIX
646 if (*start != REGISTER_PREFIX)
647 return NULL;
648 start++;
649 #endif
650
651 p = start;
652 if (!ISALPHA (*p) || !is_name_beginner (*p))
653 return NULL;
654
655 do
656 p++;
657 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
658
659 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
660
661 if (!reg)
662 return NULL;
663
664 *ccp = p;
665 return reg;
666 }
667
668 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
669 return FALSE. */
670 static bfd_boolean
671 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
672 {
673 if (reg->type == type)
674 return TRUE;
675
676 switch (type)
677 {
678 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
679 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
680 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
681 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
682 case REG_TYPE_VN: /* Vector register. */
683 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
684 return ((reg_type_masks[reg->type] & reg_type_masks[type])
685 == reg_type_masks[reg->type]);
686 default:
687 as_fatal ("unhandled type %d", type);
688 abort ();
689 }
690 }
691
692 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
693 Return the register number otherwise. *ISREG32 is set to one if the
694 register is 32-bit wide; *ISREGZERO is set to one if the register is
695 of type Z_32 or Z_64.
696 Note that this function does not issue any diagnostics. */
697
698 static int
699 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
700 int *isreg32, int *isregzero)
701 {
702 char *str = *ccp;
703 const reg_entry *reg = parse_reg (&str);
704
705 if (reg == NULL)
706 return PARSE_FAIL;
707
708 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
709 return PARSE_FAIL;
710
711 switch (reg->type)
712 {
713 case REG_TYPE_SP_32:
714 case REG_TYPE_SP_64:
715 if (reject_sp)
716 return PARSE_FAIL;
717 *isreg32 = reg->type == REG_TYPE_SP_32;
718 *isregzero = 0;
719 break;
720 case REG_TYPE_R_32:
721 case REG_TYPE_R_64:
722 *isreg32 = reg->type == REG_TYPE_R_32;
723 *isregzero = 0;
724 break;
725 case REG_TYPE_Z_32:
726 case REG_TYPE_Z_64:
727 if (reject_rz)
728 return PARSE_FAIL;
729 *isreg32 = reg->type == REG_TYPE_Z_32;
730 *isregzero = 1;
731 break;
732 default:
733 return PARSE_FAIL;
734 }
735
736 *ccp = str;
737
738 return reg->number;
739 }
740
741 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
742 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
743 otherwise return FALSE.
744
745 Accept only one occurrence of:
746 8b 16b 4h 8h 2s 4s 1d 2d
747 b h s d q */
748 static bfd_boolean
749 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
750 {
751 char *ptr = *str;
752 unsigned width;
753 unsigned element_size;
754 enum neon_el_type type;
755
756 /* skip '.' */
757 ptr++;
758
759 if (!ISDIGIT (*ptr))
760 {
761 width = 0;
762 goto elt_size;
763 }
764 width = strtoul (ptr, &ptr, 10);
765 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
766 {
767 first_error_fmt (_("bad size %d in vector width specifier"), width);
768 return FALSE;
769 }
770
771 elt_size:
772 switch (TOLOWER (*ptr))
773 {
774 case 'b':
775 type = NT_b;
776 element_size = 8;
777 break;
778 case 'h':
779 type = NT_h;
780 element_size = 16;
781 break;
782 case 's':
783 type = NT_s;
784 element_size = 32;
785 break;
786 case 'd':
787 type = NT_d;
788 element_size = 64;
789 break;
790 case 'q':
791 if (width == 1)
792 {
793 type = NT_q;
794 element_size = 128;
795 break;
796 }
797 /* fall through. */
798 default:
799 if (*ptr != '\0')
800 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
801 else
802 first_error (_("missing element size"));
803 return FALSE;
804 }
805 if (width != 0 && width * element_size != 64 && width * element_size != 128)
806 {
807 first_error_fmt (_
808 ("invalid element size %d and vector size combination %c"),
809 width, *ptr);
810 return FALSE;
811 }
812 ptr++;
813
814 parsed_type->type = type;
815 parsed_type->width = width;
816
817 *str = ptr;
818
819 return TRUE;
820 }
821
822 /* Parse a single type, e.g. ".8b", leading period included.
823 Only applicable to Vn registers.
824
825 Return TRUE on success; otherwise return FALSE. */
826 static bfd_boolean
827 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
828 {
829 char *str = *ccp;
830
831 if (*str == '.')
832 {
833 if (! parse_neon_type_for_operand (vectype, &str))
834 {
835 first_error (_("vector type expected"));
836 return FALSE;
837 }
838 }
839 else
840 return FALSE;
841
842 *ccp = str;
843
844 return TRUE;
845 }
846
847 /* Parse a register of the type TYPE.
848
849 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
850 name or the parsed register is not of TYPE.
851
852 Otherwise return the register number, and optionally fill in the actual
853 type of the register in *RTYPE when multiple alternatives were given, and
854 return the register shape and element index information in *TYPEINFO.
855
856 IN_REG_LIST should be set with TRUE if the caller is parsing a register
857 list. */
858
859 static int
860 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
861 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
862 {
863 char *str = *ccp;
864 const reg_entry *reg = parse_reg (&str);
865 struct neon_type_el atype;
866 struct neon_type_el parsetype;
867 bfd_boolean is_typed_vecreg = FALSE;
868
869 atype.defined = 0;
870 atype.type = NT_invtype;
871 atype.width = -1;
872 atype.index = 0;
873
874 if (reg == NULL)
875 {
876 if (typeinfo)
877 *typeinfo = atype;
878 set_default_error ();
879 return PARSE_FAIL;
880 }
881
882 if (! aarch64_check_reg_type (reg, type))
883 {
884 DEBUG_TRACE ("reg type check failed");
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888 type = reg->type;
889
890 if (type == REG_TYPE_VN
891 && parse_neon_operand_type (&parsetype, &str))
892 {
893 /* Register if of the form Vn.[bhsdq]. */
894 is_typed_vecreg = TRUE;
895
896 if (parsetype.width == 0)
897 /* Expect index. In the new scheme we cannot have
898 Vn.[bhsdq] represent a scalar. Therefore any
899 Vn.[bhsdq] should have an index following it.
900 Except in reglists ofcourse. */
901 atype.defined |= NTA_HASINDEX;
902 else
903 atype.defined |= NTA_HASTYPE;
904
905 atype.type = parsetype.type;
906 atype.width = parsetype.width;
907 }
908
909 if (skip_past_char (&str, '['))
910 {
911 expressionS exp;
912
913 /* Reject Sn[index] syntax. */
914 if (!is_typed_vecreg)
915 {
916 first_error (_("this type of register can't be indexed"));
917 return PARSE_FAIL;
918 }
919
920 if (in_reg_list == TRUE)
921 {
922 first_error (_("index not allowed inside register list"));
923 return PARSE_FAIL;
924 }
925
926 atype.defined |= NTA_HASINDEX;
927
928 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
929
930 if (exp.X_op != O_constant)
931 {
932 first_error (_("constant expression required"));
933 return PARSE_FAIL;
934 }
935
936 if (! skip_past_char (&str, ']'))
937 return PARSE_FAIL;
938
939 atype.index = exp.X_add_number;
940 }
941 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
942 {
943 /* Indexed vector register expected. */
944 first_error (_("indexed vector register expected"));
945 return PARSE_FAIL;
946 }
947
948 /* A vector reg Vn should be typed or indexed. */
949 if (type == REG_TYPE_VN && atype.defined == 0)
950 {
951 first_error (_("invalid use of vector register"));
952 }
953
954 if (typeinfo)
955 *typeinfo = atype;
956
957 if (rtype)
958 *rtype = type;
959
960 *ccp = str;
961
962 return reg->number;
963 }
964
965 /* Parse register.
966
967 Return the register number on success; return PARSE_FAIL otherwise.
968
969 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
970 the register (e.g. NEON double or quad reg when either has been requested).
971
972 If this is a NEON vector register with additional type information, fill
973 in the struct pointed to by VECTYPE (if non-NULL).
974
975 This parser does not handle register list. */
976
977 static int
978 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
979 aarch64_reg_type *rtype, struct neon_type_el *vectype)
980 {
981 struct neon_type_el atype;
982 char *str = *ccp;
983 int reg = parse_typed_reg (&str, type, rtype, &atype,
984 /*in_reg_list= */ FALSE);
985
986 if (reg == PARSE_FAIL)
987 return PARSE_FAIL;
988
989 if (vectype)
990 *vectype = atype;
991
992 *ccp = str;
993
994 return reg;
995 }
996
997 static inline bfd_boolean
998 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
999 {
1000 return
1001 e1.type == e2.type
1002 && e1.defined == e2.defined
1003 && e1.width == e2.width && e1.index == e2.index;
1004 }
1005
1006 /* This function parses the NEON register list. On success, it returns
1007 the parsed register list information in the following encoded format:
1008
1009 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1010 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1011
1012 The information of the register shape and/or index is returned in
1013 *VECTYPE.
1014
1015 It returns PARSE_FAIL if the register list is invalid.
1016
1017 The list contains one to four registers.
1018 Each register can be one of:
1019 <Vt>.<T>[<index>]
1020 <Vt>.<T>
1021 All <T> should be identical.
1022 All <index> should be identical.
1023 There are restrictions on <Vt> numbers which are checked later
1024 (by reg_list_valid_p). */
1025
1026 static int
1027 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1028 {
1029 char *str = *ccp;
1030 int nb_regs;
1031 struct neon_type_el typeinfo, typeinfo_first;
1032 int val, val_range;
1033 int in_range;
1034 int ret_val;
1035 int i;
1036 bfd_boolean error = FALSE;
1037 bfd_boolean expect_index = FALSE;
1038
1039 if (*str != '{')
1040 {
1041 set_syntax_error (_("expecting {"));
1042 return PARSE_FAIL;
1043 }
1044 str++;
1045
1046 nb_regs = 0;
1047 typeinfo_first.defined = 0;
1048 typeinfo_first.type = NT_invtype;
1049 typeinfo_first.width = -1;
1050 typeinfo_first.index = 0;
1051 ret_val = 0;
1052 val = -1;
1053 val_range = -1;
1054 in_range = 0;
1055 do
1056 {
1057 if (in_range)
1058 {
1059 str++; /* skip over '-' */
1060 val_range = val;
1061 }
1062 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1063 /*in_reg_list= */ TRUE);
1064 if (val == PARSE_FAIL)
1065 {
1066 set_first_syntax_error (_("invalid vector register in list"));
1067 error = TRUE;
1068 continue;
1069 }
1070 /* reject [bhsd]n */
1071 if (typeinfo.defined == 0)
1072 {
1073 set_first_syntax_error (_("invalid scalar register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077
1078 if (typeinfo.defined & NTA_HASINDEX)
1079 expect_index = TRUE;
1080
1081 if (in_range)
1082 {
1083 if (val < val_range)
1084 {
1085 set_first_syntax_error
1086 (_("invalid range in vector register list"));
1087 error = TRUE;
1088 }
1089 val_range++;
1090 }
1091 else
1092 {
1093 val_range = val;
1094 if (nb_regs == 0)
1095 typeinfo_first = typeinfo;
1096 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1097 {
1098 set_first_syntax_error
1099 (_("type mismatch in vector register list"));
1100 error = TRUE;
1101 }
1102 }
1103 if (! error)
1104 for (i = val_range; i <= val; i++)
1105 {
1106 ret_val |= i << (5 * nb_regs);
1107 nb_regs++;
1108 }
1109 in_range = 0;
1110 }
1111 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1112
1113 skip_whitespace (str);
1114 if (*str != '}')
1115 {
1116 set_first_syntax_error (_("end of vector register list not found"));
1117 error = TRUE;
1118 }
1119 str++;
1120
1121 skip_whitespace (str);
1122
1123 if (expect_index)
1124 {
1125 if (skip_past_char (&str, '['))
1126 {
1127 expressionS exp;
1128
1129 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1130 if (exp.X_op != O_constant)
1131 {
1132 set_first_syntax_error (_("constant expression required."));
1133 error = TRUE;
1134 }
1135 if (! skip_past_char (&str, ']'))
1136 error = TRUE;
1137 else
1138 typeinfo_first.index = exp.X_add_number;
1139 }
1140 else
1141 {
1142 set_first_syntax_error (_("expected index"));
1143 error = TRUE;
1144 }
1145 }
1146
1147 if (nb_regs > 4)
1148 {
1149 set_first_syntax_error (_("too many registers in vector register list"));
1150 error = TRUE;
1151 }
1152 else if (nb_regs == 0)
1153 {
1154 set_first_syntax_error (_("empty vector register list"));
1155 error = TRUE;
1156 }
1157
1158 *ccp = str;
1159 if (! error)
1160 *vectype = typeinfo_first;
1161
1162 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1163 }
1164
1165 /* Directives: register aliases. */
1166
1167 static reg_entry *
1168 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1169 {
1170 reg_entry *new;
1171 const char *name;
1172
1173 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1174 {
1175 if (new->builtin)
1176 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1177 str);
1178
1179 /* Only warn about a redefinition if it's not defined as the
1180 same register. */
1181 else if (new->number != number || new->type != type)
1182 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1183
1184 return NULL;
1185 }
1186
1187 name = xstrdup (str);
1188 new = xmalloc (sizeof (reg_entry));
1189
1190 new->name = name;
1191 new->number = number;
1192 new->type = type;
1193 new->builtin = FALSE;
1194
1195 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1196 abort ();
1197
1198 return new;
1199 }
1200
1201 /* Look for the .req directive. This is of the form:
1202
1203 new_register_name .req existing_register_name
1204
1205 If we find one, or if it looks sufficiently like one that we want to
1206 handle any error here, return TRUE. Otherwise return FALSE. */
1207
1208 static bfd_boolean
1209 create_register_alias (char *newname, char *p)
1210 {
1211 const reg_entry *old;
1212 char *oldname, *nbuf;
1213 size_t nlen;
1214
1215 /* The input scrubber ensures that whitespace after the mnemonic is
1216 collapsed to single spaces. */
1217 oldname = p;
1218 if (strncmp (oldname, " .req ", 6) != 0)
1219 return FALSE;
1220
1221 oldname += 6;
1222 if (*oldname == '\0')
1223 return FALSE;
1224
1225 old = hash_find (aarch64_reg_hsh, oldname);
1226 if (!old)
1227 {
1228 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1229 return TRUE;
1230 }
1231
1232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1233 the desired alias name, and p points to its end. If not, then
1234 the desired alias name is in the global original_case_string. */
1235 #ifdef TC_CASE_SENSITIVE
1236 nlen = p - newname;
1237 #else
1238 newname = original_case_string;
1239 nlen = strlen (newname);
1240 #endif
1241
1242 nbuf = alloca (nlen + 1);
1243 memcpy (nbuf, newname, nlen);
1244 nbuf[nlen] = '\0';
1245
1246 /* Create aliases under the new name as stated; an all-lowercase
1247 version of the new name; and an all-uppercase version of the new
1248 name. */
1249 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1250 {
1251 for (p = nbuf; *p; p++)
1252 *p = TOUPPER (*p);
1253
1254 if (strncmp (nbuf, newname, nlen))
1255 {
1256 /* If this attempt to create an additional alias fails, do not bother
1257 trying to create the all-lower case alias. We will fail and issue
1258 a second, duplicate error message. This situation arises when the
1259 programmer does something like:
1260 foo .req r0
1261 Foo .req r1
1262 The second .req creates the "Foo" alias but then fails to create
1263 the artificial FOO alias because it has already been created by the
1264 first .req. */
1265 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1266 return TRUE;
1267 }
1268
1269 for (p = nbuf; *p; p++)
1270 *p = TOLOWER (*p);
1271
1272 if (strncmp (nbuf, newname, nlen))
1273 insert_reg_alias (nbuf, old->number, old->type);
1274 }
1275
1276 return TRUE;
1277 }
1278
1279 /* Should never be called, as .req goes between the alias and the
1280 register name, not at the beginning of the line. */
1281 static void
1282 s_req (int a ATTRIBUTE_UNUSED)
1283 {
1284 as_bad (_("invalid syntax for .req directive"));
1285 }
1286
1287 /* The .unreq directive deletes an alias which was previously defined
1288 by .req. For example:
1289
1290 my_alias .req r11
1291 .unreq my_alias */
1292
1293 static void
1294 s_unreq (int a ATTRIBUTE_UNUSED)
1295 {
1296 char *name;
1297 char saved_char;
1298
1299 name = input_line_pointer;
1300
1301 while (*input_line_pointer != 0
1302 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1303 ++input_line_pointer;
1304
1305 saved_char = *input_line_pointer;
1306 *input_line_pointer = 0;
1307
1308 if (!*name)
1309 as_bad (_("invalid syntax for .unreq directive"));
1310 else
1311 {
1312 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1313
1314 if (!reg)
1315 as_bad (_("unknown register alias '%s'"), name);
1316 else if (reg->builtin)
1317 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1318 name);
1319 else
1320 {
1321 char *p;
1322 char *nbuf;
1323
1324 hash_delete (aarch64_reg_hsh, name, FALSE);
1325 free ((char *) reg->name);
1326 free (reg);
1327
1328 /* Also locate the all upper case and all lower case versions.
1329 Do not complain if we cannot find one or the other as it
1330 was probably deleted above. */
1331
1332 nbuf = strdup (name);
1333 for (p = nbuf; *p; p++)
1334 *p = TOUPPER (*p);
1335 reg = hash_find (aarch64_reg_hsh, nbuf);
1336 if (reg)
1337 {
1338 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1339 free ((char *) reg->name);
1340 free (reg);
1341 }
1342
1343 for (p = nbuf; *p; p++)
1344 *p = TOLOWER (*p);
1345 reg = hash_find (aarch64_reg_hsh, nbuf);
1346 if (reg)
1347 {
1348 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1349 free ((char *) reg->name);
1350 free (reg);
1351 }
1352
1353 free (nbuf);
1354 }
1355 }
1356
1357 *input_line_pointer = saved_char;
1358 demand_empty_rest_of_line ();
1359 }
1360
1361 /* Directives: Instruction set selection. */
1362
1363 #ifdef OBJ_ELF
1364 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1365 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1366 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1367 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1368
1369 /* Create a new mapping symbol for the transition to STATE. */
1370
1371 static void
1372 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1373 {
1374 symbolS *symbolP;
1375 const char *symname;
1376 int type;
1377
1378 switch (state)
1379 {
1380 case MAP_DATA:
1381 symname = "$d";
1382 type = BSF_NO_FLAGS;
1383 break;
1384 case MAP_INSN:
1385 symname = "$x";
1386 type = BSF_NO_FLAGS;
1387 break;
1388 default:
1389 abort ();
1390 }
1391
1392 symbolP = symbol_new (symname, now_seg, value, frag);
1393 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1394
1395 /* Save the mapping symbols for future reference. Also check that
1396 we do not place two mapping symbols at the same offset within a
1397 frag. We'll handle overlap between frags in
1398 check_mapping_symbols.
1399
1400 If .fill or other data filling directive generates zero sized data,
1401 the mapping symbol for the following code will have the same value
1402 as the one generated for the data filling directive. In this case,
1403 we replace the old symbol with the new one at the same address. */
1404 if (value == 0)
1405 {
1406 if (frag->tc_frag_data.first_map != NULL)
1407 {
1408 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1409 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1410 &symbol_lastP);
1411 }
1412 frag->tc_frag_data.first_map = symbolP;
1413 }
1414 if (frag->tc_frag_data.last_map != NULL)
1415 {
1416 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1417 S_GET_VALUE (symbolP));
1418 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1419 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1420 &symbol_lastP);
1421 }
1422 frag->tc_frag_data.last_map = symbolP;
1423 }
1424
1425 /* We must sometimes convert a region marked as code to data during
1426 code alignment, if an odd number of bytes have to be padded. The
1427 code mapping symbol is pushed to an aligned address. */
1428
1429 static void
1430 insert_data_mapping_symbol (enum mstate state,
1431 valueT value, fragS * frag, offsetT bytes)
1432 {
1433 /* If there was already a mapping symbol, remove it. */
1434 if (frag->tc_frag_data.last_map != NULL
1435 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1436 frag->fr_address + value)
1437 {
1438 symbolS *symp = frag->tc_frag_data.last_map;
1439
1440 if (value == 0)
1441 {
1442 know (frag->tc_frag_data.first_map == symp);
1443 frag->tc_frag_data.first_map = NULL;
1444 }
1445 frag->tc_frag_data.last_map = NULL;
1446 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1447 }
1448
1449 make_mapping_symbol (MAP_DATA, value, frag);
1450 make_mapping_symbol (state, value + bytes, frag);
1451 }
1452
1453 static void mapping_state_2 (enum mstate state, int max_chars);
1454
1455 /* Set the mapping state to STATE. Only call this when about to
1456 emit some STATE bytes to the file. */
1457
1458 void
1459 mapping_state (enum mstate state)
1460 {
1461 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1462
1463 if (state == MAP_INSN)
1464 /* AArch64 instructions require 4-byte alignment. When emitting
1465 instructions into any section, record the appropriate section
1466 alignment. */
1467 record_alignment (now_seg, 2);
1468
1469 if (mapstate == state)
1470 /* The mapping symbol has already been emitted.
1471 There is nothing else to do. */
1472 return;
1473
1474 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1475 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1476 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1477 evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492 #undef TRANSITION
1493
1494 mapping_state_2 (state, 0);
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 expressionS * litexp = & pool->literals[entry].exp;
1621
1622 if ((litexp->X_op == exp->X_op)
1623 && (exp->X_op == O_constant)
1624 && (litexp->X_add_number == exp->X_add_number)
1625 && (litexp->X_unsigned == exp->X_unsigned))
1626 break;
1627
1628 if ((litexp->X_op == exp->X_op)
1629 && (exp->X_op == O_symbol)
1630 && (litexp->X_add_number == exp->X_add_number)
1631 && (litexp->X_add_symbol == exp->X_add_symbol)
1632 && (litexp->X_op_symbol == exp->X_op_symbol))
1633 break;
1634 }
1635
1636 /* Do we need to create a new entry? */
1637 if (entry == pool->next_free_entry)
1638 {
1639 if (entry >= MAX_LITERAL_POOL_SIZE)
1640 {
1641 set_syntax_error (_("literal pool overflow"));
1642 return FALSE;
1643 }
1644
1645 pool->literals[entry].exp = *exp;
1646 pool->next_free_entry += 1;
1647 if (exp->X_op == O_big)
1648 {
1649 /* PR 16688: Bignums are held in a single global array. We must
1650 copy and preserve that value now, before it is overwritten. */
1651 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1652 memcpy (pool->literals[entry].bignum, generic_bignum,
1653 CHARS_PER_LITTLENUM * exp->X_add_number);
1654 }
1655 else
1656 pool->literals[entry].bignum = NULL;
1657 }
1658
1659 exp->X_op = O_symbol;
1660 exp->X_add_number = ((int) entry) * size;
1661 exp->X_add_symbol = pool->symbol;
1662
1663 return TRUE;
1664 }
1665
1666 /* Can't use symbol_new here, so have to create a symbol and then at
1667 a later date assign it a value. Thats what these functions do. */
1668
1669 static void
1670 symbol_locate (symbolS * symbolP,
1671 const char *name,/* It is copied, the caller can modify. */
1672 segT segment, /* Segment identifier (SEG_<something>). */
1673 valueT valu, /* Symbol value. */
1674 fragS * frag) /* Associated fragment. */
1675 {
1676 size_t name_length;
1677 char *preserved_copy_of_name;
1678
1679 name_length = strlen (name) + 1; /* +1 for \0. */
1680 obstack_grow (&notes, name, name_length);
1681 preserved_copy_of_name = obstack_finish (&notes);
1682
1683 #ifdef tc_canonicalize_symbol_name
1684 preserved_copy_of_name =
1685 tc_canonicalize_symbol_name (preserved_copy_of_name);
1686 #endif
1687
1688 S_SET_NAME (symbolP, preserved_copy_of_name);
1689
1690 S_SET_SEGMENT (symbolP, segment);
1691 S_SET_VALUE (symbolP, valu);
1692 symbol_clear_list_pointers (symbolP);
1693
1694 symbol_set_frag (symbolP, frag);
1695
1696 /* Link to end of symbol chain. */
1697 {
1698 extern int symbol_table_frozen;
1699
1700 if (symbol_table_frozen)
1701 abort ();
1702 }
1703
1704 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1705
1706 obj_symbol_new_hook (symbolP);
1707
1708 #ifdef tc_symbol_new_hook
1709 tc_symbol_new_hook (symbolP);
1710 #endif
1711
1712 #ifdef DEBUG_SYMS
1713 verify_symbol_chain (symbol_rootP, symbol_lastP);
1714 #endif /* DEBUG_SYMS */
1715 }
1716
1717
1718 static void
1719 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1720 {
1721 unsigned int entry;
1722 literal_pool *pool;
1723 char sym_name[20];
1724 int align;
1725
1726 for (align = 2; align <= 4; align++)
1727 {
1728 int size = 1 << align;
1729
1730 pool = find_literal_pool (size);
1731 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1732 continue;
1733
1734 mapping_state (MAP_DATA);
1735
1736 /* Align pool as you have word accesses.
1737 Only make a frag if we have to. */
1738 if (!need_pass_2)
1739 frag_align (align, 0, 0);
1740
1741 record_alignment (now_seg, align);
1742
1743 sprintf (sym_name, "$$lit_\002%x", pool->id);
1744
1745 symbol_locate (pool->symbol, sym_name, now_seg,
1746 (valueT) frag_now_fix (), frag_now);
1747 symbol_table_insert (pool->symbol);
1748
1749 for (entry = 0; entry < pool->next_free_entry; entry++)
1750 {
1751 expressionS * exp = & pool->literals[entry].exp;
1752
1753 if (exp->X_op == O_big)
1754 {
1755 /* PR 16688: Restore the global bignum value. */
1756 gas_assert (pool->literals[entry].bignum != NULL);
1757 memcpy (generic_bignum, pool->literals[entry].bignum,
1758 CHARS_PER_LITTLENUM * exp->X_add_number);
1759 }
1760
1761 /* First output the expression in the instruction to the pool. */
1762 emit_expr (exp, size); /* .word|.xword */
1763
1764 if (exp->X_op == O_big)
1765 {
1766 free (pool->literals[entry].bignum);
1767 pool->literals[entry].bignum = NULL;
1768 }
1769 }
1770
1771 /* Mark the pool as empty. */
1772 pool->next_free_entry = 0;
1773 pool->symbol = NULL;
1774 }
1775 }
1776
1777 #ifdef OBJ_ELF
1778 /* Forward declarations for functions below, in the MD interface
1779 section. */
1780 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1781 static struct reloc_table_entry * find_reloc_table_entry (char **);
1782
1783 /* Directives: Data. */
1784 /* N.B. the support for relocation suffix in this directive needs to be
1785 implemented properly. */
1786
1787 static void
1788 s_aarch64_elf_cons (int nbytes)
1789 {
1790 expressionS exp;
1791
1792 #ifdef md_flush_pending_output
1793 md_flush_pending_output ();
1794 #endif
1795
1796 if (is_it_end_of_statement ())
1797 {
1798 demand_empty_rest_of_line ();
1799 return;
1800 }
1801
1802 #ifdef md_cons_align
1803 md_cons_align (nbytes);
1804 #endif
1805
1806 mapping_state (MAP_DATA);
1807 do
1808 {
1809 struct reloc_table_entry *reloc;
1810
1811 expression (&exp);
1812
1813 if (exp.X_op != O_symbol)
1814 emit_expr (&exp, (unsigned int) nbytes);
1815 else
1816 {
1817 skip_past_char (&input_line_pointer, '#');
1818 if (skip_past_char (&input_line_pointer, ':'))
1819 {
1820 reloc = find_reloc_table_entry (&input_line_pointer);
1821 if (reloc == NULL)
1822 as_bad (_("unrecognized relocation suffix"));
1823 else
1824 as_bad (_("unimplemented relocation suffix"));
1825 ignore_rest_of_line ();
1826 return;
1827 }
1828 else
1829 emit_expr (&exp, (unsigned int) nbytes);
1830 }
1831 }
1832 while (*input_line_pointer++ == ',');
1833
1834 /* Put terminator back into stream. */
1835 input_line_pointer--;
1836 demand_empty_rest_of_line ();
1837 }
1838
1839 #endif /* OBJ_ELF */
1840
1841 /* Output a 32-bit word, but mark as an instruction. */
1842
1843 static void
1844 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1845 {
1846 expressionS exp;
1847
1848 #ifdef md_flush_pending_output
1849 md_flush_pending_output ();
1850 #endif
1851
1852 if (is_it_end_of_statement ())
1853 {
1854 demand_empty_rest_of_line ();
1855 return;
1856 }
1857
1858 /* Sections are assumed to start aligned. In executable section, there is no
1859 MAP_DATA symbol pending. So we only align the address during
1860 MAP_DATA --> MAP_INSN transition.
1861 For other sections, this is not guaranteed. */
1862 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1863 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1864 frag_align_code (2, 0);
1865
1866 #ifdef OBJ_ELF
1867 mapping_state (MAP_INSN);
1868 #endif
1869
1870 do
1871 {
1872 expression (&exp);
1873 if (exp.X_op != O_constant)
1874 {
1875 as_bad (_("constant expression required"));
1876 ignore_rest_of_line ();
1877 return;
1878 }
1879
1880 if (target_big_endian)
1881 {
1882 unsigned int val = exp.X_add_number;
1883 exp.X_add_number = SWAP_32 (val);
1884 }
1885 emit_expr (&exp, 4);
1886 }
1887 while (*input_line_pointer++ == ',');
1888
1889 /* Put terminator back into stream. */
1890 input_line_pointer--;
1891 demand_empty_rest_of_line ();
1892 }
1893
1894 #ifdef OBJ_ELF
1895 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1896
1897 static void
1898 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1899 {
1900 expressionS exp;
1901
1902 /* Since we're just labelling the code, there's no need to define a
1903 mapping symbol. */
1904 expression (&exp);
1905 /* Make sure there is enough room in this frag for the following
1906 blr. This trick only works if the blr follows immediately after
1907 the .tlsdesc directive. */
1908 frag_grow (4);
1909 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1910 BFD_RELOC_AARCH64_TLSDESC_CALL);
1911
1912 demand_empty_rest_of_line ();
1913 }
1914 #endif /* OBJ_ELF */
1915
1916 static void s_aarch64_arch (int);
1917 static void s_aarch64_cpu (int);
1918 static void s_aarch64_arch_extension (int);
1919
1920 /* This table describes all the machine specific pseudo-ops the assembler
1921 has to support. The fields are:
1922 pseudo-op name without dot
1923 function to call to execute this pseudo-op
1924 Integer arg to pass to the function. */
1925
1926 const pseudo_typeS md_pseudo_table[] = {
1927 /* Never called because '.req' does not start a line. */
1928 {"req", s_req, 0},
1929 {"unreq", s_unreq, 0},
1930 {"bss", s_bss, 0},
1931 {"even", s_even, 0},
1932 {"ltorg", s_ltorg, 0},
1933 {"pool", s_ltorg, 0},
1934 {"cpu", s_aarch64_cpu, 0},
1935 {"arch", s_aarch64_arch, 0},
1936 {"arch_extension", s_aarch64_arch_extension, 0},
1937 {"inst", s_aarch64_inst, 0},
1938 #ifdef OBJ_ELF
1939 {"tlsdesccall", s_tlsdesccall, 0},
1940 {"word", s_aarch64_elf_cons, 4},
1941 {"long", s_aarch64_elf_cons, 4},
1942 {"xword", s_aarch64_elf_cons, 8},
1943 {"dword", s_aarch64_elf_cons, 8},
1944 #endif
1945 {0, 0, 0}
1946 };
1947 \f
1948
1949 /* Check whether STR points to a register name followed by a comma or the
1950 end of line; REG_TYPE indicates which register types are checked
1951 against. Return TRUE if STR is such a register name; otherwise return
1952 FALSE. The function does not intend to produce any diagnostics, but since
1953 the register parser aarch64_reg_parse, which is called by this function,
1954 does produce diagnostics, we call clear_error to clear any diagnostics
1955 that may be generated by aarch64_reg_parse.
1956 Also, the function returns FALSE directly if there is any user error
1957 present at the function entry. This prevents the existing diagnostics
1958 state from being spoiled.
1959 The function currently serves parse_constant_immediate and
1960 parse_big_immediate only. */
1961 static bfd_boolean
1962 reg_name_p (char *str, aarch64_reg_type reg_type)
1963 {
1964 int reg;
1965
1966 /* Prevent the diagnostics state from being spoiled. */
1967 if (error_p ())
1968 return FALSE;
1969
1970 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1971
1972 /* Clear the parsing error that may be set by the reg parser. */
1973 clear_error ();
1974
1975 if (reg == PARSE_FAIL)
1976 return FALSE;
1977
1978 skip_whitespace (str);
1979 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1980 return TRUE;
1981
1982 return FALSE;
1983 }
1984
1985 /* Parser functions used exclusively in instruction operands. */
1986
1987 /* Parse an immediate expression which may not be constant.
1988
1989 To prevent the expression parser from pushing a register name
1990 into the symbol table as an undefined symbol, firstly a check is
1991 done to find out whether STR is a valid register name followed
1992 by a comma or the end of line. Return FALSE if STR is such a
1993 string. */
1994
1995 static bfd_boolean
1996 parse_immediate_expression (char **str, expressionS *exp)
1997 {
1998 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1999 {
2000 set_recoverable_error (_("immediate operand required"));
2001 return FALSE;
2002 }
2003
2004 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2005
2006 if (exp->X_op == O_absent)
2007 {
2008 set_fatal_syntax_error (_("missing immediate expression"));
2009 return FALSE;
2010 }
2011
2012 return TRUE;
2013 }
2014
2015 /* Constant immediate-value read function for use in insn parsing.
2016 STR points to the beginning of the immediate (with the optional
2017 leading #); *VAL receives the value.
2018
2019 Return TRUE on success; otherwise return FALSE. */
2020
2021 static bfd_boolean
2022 parse_constant_immediate (char **str, int64_t * val)
2023 {
2024 expressionS exp;
2025
2026 if (! parse_immediate_expression (str, &exp))
2027 return FALSE;
2028
2029 if (exp.X_op != O_constant)
2030 {
2031 set_syntax_error (_("constant expression required"));
2032 return FALSE;
2033 }
2034
2035 *val = exp.X_add_number;
2036 return TRUE;
2037 }
2038
2039 static uint32_t
2040 encode_imm_float_bits (uint32_t imm)
2041 {
2042 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2043 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2044 }
2045
2046 /* Return TRUE if the single-precision floating-point value encoded in IMM
2047 can be expressed in the AArch64 8-bit signed floating-point format with
2048 3-bit exponent and normalized 4 bits of precision; in other words, the
2049 floating-point value must be expressable as
2050 (+/-) n / 16 * power (2, r)
2051 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2052
2053 static bfd_boolean
2054 aarch64_imm_float_p (uint32_t imm)
2055 {
2056 /* If a single-precision floating-point value has the following bit
2057 pattern, it can be expressed in the AArch64 8-bit floating-point
2058 format:
2059
2060 3 32222222 2221111111111
2061 1 09876543 21098765432109876543210
2062 n Eeeeeexx xxxx0000000000000000000
2063
2064 where n, e and each x are either 0 or 1 independently, with
2065 E == ~ e. */
2066
2067 uint32_t pattern;
2068
2069 /* Prepare the pattern for 'Eeeeee'. */
2070 if (((imm >> 30) & 0x1) == 0)
2071 pattern = 0x3e000000;
2072 else
2073 pattern = 0x40000000;
2074
2075 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2076 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2077 }
2078
2079 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2080
2081 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2082 8-bit signed floating-point format with 3-bit exponent and normalized 4
2083 bits of precision (i.e. can be used in an FMOV instruction); return the
2084 equivalent single-precision encoding in *FPWORD.
2085
2086 Otherwise return FALSE. */
2087
2088 static bfd_boolean
2089 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2090 {
2091 /* If a double-precision floating-point value has the following bit
2092 pattern, it can be expressed in the AArch64 8-bit floating-point
2093 format:
2094
2095 6 66655555555 554444444...21111111111
2096 3 21098765432 109876543...098765432109876543210
2097 n Eeeeeeeeexx xxxx00000...000000000000000000000
2098
2099 where n, e and each x are either 0 or 1 independently, with
2100 E == ~ e. */
2101
2102 uint32_t pattern;
2103 uint32_t high32 = imm >> 32;
2104
2105 /* Lower 32 bits need to be 0s. */
2106 if ((imm & 0xffffffff) != 0)
2107 return FALSE;
2108
2109 /* Prepare the pattern for 'Eeeeeeeee'. */
2110 if (((high32 >> 30) & 0x1) == 0)
2111 pattern = 0x3fc00000;
2112 else
2113 pattern = 0x40000000;
2114
2115 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2116 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2117 {
2118 /* Convert to the single-precision encoding.
2119 i.e. convert
2120 n Eeeeeeeeexx xxxx00000...000000000000000000000
2121 to
2122 n Eeeeeexx xxxx0000000000000000000. */
2123 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2124 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2125 return TRUE;
2126 }
2127 else
2128 return FALSE;
2129 }
2130
2131 /* Parse a floating-point immediate. Return TRUE on success and return the
2132 value in *IMMED in the format of IEEE754 single-precision encoding.
2133 *CCP points to the start of the string; DP_P is TRUE when the immediate
2134 is expected to be in double-precision (N.B. this only matters when
2135 hexadecimal representation is involved).
2136
2137 N.B. 0.0 is accepted by this function. */
2138
2139 static bfd_boolean
2140 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2141 {
2142 char *str = *ccp;
2143 char *fpnum;
2144 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2145 int found_fpchar = 0;
2146 int64_t val = 0;
2147 unsigned fpword = 0;
2148 bfd_boolean hex_p = FALSE;
2149
2150 skip_past_char (&str, '#');
2151
2152 fpnum = str;
2153 skip_whitespace (fpnum);
2154
2155 if (strncmp (fpnum, "0x", 2) == 0)
2156 {
2157 /* Support the hexadecimal representation of the IEEE754 encoding.
2158 Double-precision is expected when DP_P is TRUE, otherwise the
2159 representation should be in single-precision. */
2160 if (! parse_constant_immediate (&str, &val))
2161 goto invalid_fp;
2162
2163 if (dp_p)
2164 {
2165 if (! aarch64_double_precision_fmovable (val, &fpword))
2166 goto invalid_fp;
2167 }
2168 else if ((uint64_t) val > 0xffffffff)
2169 goto invalid_fp;
2170 else
2171 fpword = val;
2172
2173 hex_p = TRUE;
2174 }
2175 else
2176 {
2177 /* We must not accidentally parse an integer as a floating-point number.
2178 Make sure that the value we parse is not an integer by checking for
2179 special characters '.' or 'e'. */
2180 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2181 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2182 {
2183 found_fpchar = 1;
2184 break;
2185 }
2186
2187 if (!found_fpchar)
2188 return FALSE;
2189 }
2190
2191 if (! hex_p)
2192 {
2193 int i;
2194
2195 if ((str = atof_ieee (str, 's', words)) == NULL)
2196 goto invalid_fp;
2197
2198 /* Our FP word must be 32 bits (single-precision FP). */
2199 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2200 {
2201 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2202 fpword |= words[i];
2203 }
2204 }
2205
2206 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2207 {
2208 *immed = fpword;
2209 *ccp = str;
2210 return TRUE;
2211 }
2212
2213 invalid_fp:
2214 set_fatal_syntax_error (_("invalid floating-point constant"));
2215 return FALSE;
2216 }
2217
2218 /* Less-generic immediate-value read function with the possibility of loading
2219 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2220 instructions.
2221
2222 To prevent the expression parser from pushing a register name into the
2223 symbol table as an undefined symbol, a check is firstly done to find
2224 out whether STR is a valid register name followed by a comma or the end
2225 of line. Return FALSE if STR is such a register. */
2226
2227 static bfd_boolean
2228 parse_big_immediate (char **str, int64_t *imm)
2229 {
2230 char *ptr = *str;
2231
2232 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2233 {
2234 set_syntax_error (_("immediate operand required"));
2235 return FALSE;
2236 }
2237
2238 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2239
2240 if (inst.reloc.exp.X_op == O_constant)
2241 *imm = inst.reloc.exp.X_add_number;
2242
2243 *str = ptr;
2244
2245 return TRUE;
2246 }
2247
2248 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2249 if NEED_LIBOPCODES is non-zero, the fixup will need
2250 assistance from the libopcodes. */
2251
2252 static inline void
2253 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2254 const aarch64_opnd_info *operand,
2255 int need_libopcodes_p)
2256 {
2257 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2258 reloc->opnd = operand->type;
2259 if (need_libopcodes_p)
2260 reloc->need_libopcodes_p = 1;
2261 };
2262
2263 /* Return TRUE if the instruction needs to be fixed up later internally by
2264 the GAS; otherwise return FALSE. */
2265
2266 static inline bfd_boolean
2267 aarch64_gas_internal_fixup_p (void)
2268 {
2269 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2270 }
2271
2272 /* Assign the immediate value to the relavant field in *OPERAND if
2273 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2274 needs an internal fixup in a later stage.
2275 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2276 IMM.VALUE that may get assigned with the constant. */
2277 static inline void
2278 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2279 aarch64_opnd_info *operand,
2280 int addr_off_p,
2281 int need_libopcodes_p,
2282 int skip_p)
2283 {
2284 if (reloc->exp.X_op == O_constant)
2285 {
2286 if (addr_off_p)
2287 operand->addr.offset.imm = reloc->exp.X_add_number;
2288 else
2289 operand->imm.value = reloc->exp.X_add_number;
2290 reloc->type = BFD_RELOC_UNUSED;
2291 }
2292 else
2293 {
2294 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2295 /* Tell libopcodes to ignore this operand or not. This is helpful
2296 when one of the operands needs to be fixed up later but we need
2297 libopcodes to check the other operands. */
2298 operand->skip = skip_p;
2299 }
2300 }
2301
2302 /* Relocation modifiers. Each entry in the table contains the textual
2303 name for the relocation which may be placed before a symbol used as
2304 a load/store offset, or add immediate. It must be surrounded by a
2305 leading and trailing colon, for example:
2306
2307 ldr x0, [x1, #:rello:varsym]
2308 add x0, x1, #:rello:varsym */
2309
2310 struct reloc_table_entry
2311 {
2312 const char *name;
2313 int pc_rel;
2314 bfd_reloc_code_real_type adr_type;
2315 bfd_reloc_code_real_type adrp_type;
2316 bfd_reloc_code_real_type movw_type;
2317 bfd_reloc_code_real_type add_type;
2318 bfd_reloc_code_real_type ldst_type;
2319 bfd_reloc_code_real_type ld_literal_type;
2320 };
2321
2322 static struct reloc_table_entry reloc_table[] = {
2323 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2324 {"lo12", 0,
2325 0, /* adr_type */
2326 0,
2327 0,
2328 BFD_RELOC_AARCH64_ADD_LO12,
2329 BFD_RELOC_AARCH64_LDST_LO12,
2330 0},
2331
2332 /* Higher 21 bits of pc-relative page offset: ADRP */
2333 {"pg_hi21", 1,
2334 0, /* adr_type */
2335 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2336 0,
2337 0,
2338 0,
2339 0},
2340
2341 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2342 {"pg_hi21_nc", 1,
2343 0, /* adr_type */
2344 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2345 0,
2346 0,
2347 0,
2348 0},
2349
2350 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2351 {"abs_g0", 0,
2352 0, /* adr_type */
2353 0,
2354 BFD_RELOC_AARCH64_MOVW_G0,
2355 0,
2356 0,
2357 0},
2358
2359 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2360 {"abs_g0_s", 0,
2361 0, /* adr_type */
2362 0,
2363 BFD_RELOC_AARCH64_MOVW_G0_S,
2364 0,
2365 0,
2366 0},
2367
2368 /* Less significant bits 0-15 of address/value: MOVK, no check */
2369 {"abs_g0_nc", 0,
2370 0, /* adr_type */
2371 0,
2372 BFD_RELOC_AARCH64_MOVW_G0_NC,
2373 0,
2374 0,
2375 0},
2376
2377 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2378 {"abs_g1", 0,
2379 0, /* adr_type */
2380 0,
2381 BFD_RELOC_AARCH64_MOVW_G1,
2382 0,
2383 0,
2384 0},
2385
2386 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2387 {"abs_g1_s", 0,
2388 0, /* adr_type */
2389 0,
2390 BFD_RELOC_AARCH64_MOVW_G1_S,
2391 0,
2392 0,
2393 0},
2394
2395 /* Less significant bits 16-31 of address/value: MOVK, no check */
2396 {"abs_g1_nc", 0,
2397 0, /* adr_type */
2398 0,
2399 BFD_RELOC_AARCH64_MOVW_G1_NC,
2400 0,
2401 0,
2402 0},
2403
2404 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2405 {"abs_g2", 0,
2406 0, /* adr_type */
2407 0,
2408 BFD_RELOC_AARCH64_MOVW_G2,
2409 0,
2410 0,
2411 0},
2412
2413 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2414 {"abs_g2_s", 0,
2415 0, /* adr_type */
2416 0,
2417 BFD_RELOC_AARCH64_MOVW_G2_S,
2418 0,
2419 0,
2420 0},
2421
2422 /* Less significant bits 32-47 of address/value: MOVK, no check */
2423 {"abs_g2_nc", 0,
2424 0, /* adr_type */
2425 0,
2426 BFD_RELOC_AARCH64_MOVW_G2_NC,
2427 0,
2428 0,
2429 0},
2430
2431 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2432 {"abs_g3", 0,
2433 0, /* adr_type */
2434 0,
2435 BFD_RELOC_AARCH64_MOVW_G3,
2436 0,
2437 0,
2438 0},
2439
2440 /* Get to the page containing GOT entry for a symbol. */
2441 {"got", 1,
2442 0, /* adr_type */
2443 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2444 0,
2445 0,
2446 0,
2447 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2448
2449 /* 12 bit offset into the page containing GOT entry for that symbol. */
2450 {"got_lo12", 0,
2451 0, /* adr_type */
2452 0,
2453 0,
2454 0,
2455 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2456 0},
2457
2458 /* Get to the page containing GOT TLS entry for a symbol */
2459 {"tlsgd", 0,
2460 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2461 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2462 0,
2463 0,
2464 0,
2465 0},
2466
2467 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2468 {"tlsgd_lo12", 0,
2469 0, /* adr_type */
2470 0,
2471 0,
2472 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2473 0,
2474 0},
2475
2476 /* Get to the page containing GOT TLS entry for a symbol */
2477 {"tlsdesc", 0,
2478 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2479 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2480 0,
2481 0,
2482 0,
2483 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2484
2485 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2486 {"tlsdesc_lo12", 0,
2487 0, /* adr_type */
2488 0,
2489 0,
2490 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2491 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2492 0},
2493
2494 /* Get to the page containing GOT TLS entry for a symbol */
2495 {"gottprel", 0,
2496 0, /* adr_type */
2497 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2498 0,
2499 0,
2500 0,
2501 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2502
2503 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2504 {"gottprel_lo12", 0,
2505 0, /* adr_type */
2506 0,
2507 0,
2508 0,
2509 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2510 0},
2511
2512 /* Get tp offset for a symbol. */
2513 {"tprel", 0,
2514 0, /* adr_type */
2515 0,
2516 0,
2517 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2518 0,
2519 0},
2520
2521 /* Get tp offset for a symbol. */
2522 {"tprel_lo12", 0,
2523 0, /* adr_type */
2524 0,
2525 0,
2526 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2527 0,
2528 0},
2529
2530 /* Get tp offset for a symbol. */
2531 {"tprel_hi12", 0,
2532 0, /* adr_type */
2533 0,
2534 0,
2535 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2536 0,
2537 0},
2538
2539 /* Get tp offset for a symbol. */
2540 {"tprel_lo12_nc", 0,
2541 0, /* adr_type */
2542 0,
2543 0,
2544 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2545 0,
2546 0},
2547
2548 /* Most significant bits 32-47 of address/value: MOVZ. */
2549 {"tprel_g2", 0,
2550 0, /* adr_type */
2551 0,
2552 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2553 0,
2554 0,
2555 0},
2556
2557 /* Most significant bits 16-31 of address/value: MOVZ. */
2558 {"tprel_g1", 0,
2559 0, /* adr_type */
2560 0,
2561 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2562 0,
2563 0,
2564 0},
2565
2566 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2567 {"tprel_g1_nc", 0,
2568 0, /* adr_type */
2569 0,
2570 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2571 0,
2572 0,
2573 0},
2574
2575 /* Most significant bits 0-15 of address/value: MOVZ. */
2576 {"tprel_g0", 0,
2577 0, /* adr_type */
2578 0,
2579 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2580 0,
2581 0,
2582 0},
2583
2584 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2585 {"tprel_g0_nc", 0,
2586 0, /* adr_type */
2587 0,
2588 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2589 0,
2590 0,
2591 0},
2592
2593 /* 15bit offset from got entry to base address of GOT table. */
2594 {"gotpage_lo15", 0,
2595 0,
2596 0,
2597 0,
2598 0,
2599 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2600 0},
2601 };
2602
2603 /* Given the address of a pointer pointing to the textual name of a
2604 relocation as may appear in assembler source, attempt to find its
2605 details in reloc_table. The pointer will be updated to the character
2606 after the trailing colon. On failure, NULL will be returned;
2607 otherwise return the reloc_table_entry. */
2608
2609 static struct reloc_table_entry *
2610 find_reloc_table_entry (char **str)
2611 {
2612 unsigned int i;
2613 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2614 {
2615 int length = strlen (reloc_table[i].name);
2616
2617 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2618 && (*str)[length] == ':')
2619 {
2620 *str += (length + 1);
2621 return &reloc_table[i];
2622 }
2623 }
2624
2625 return NULL;
2626 }
2627
2628 /* Mode argument to parse_shift and parser_shifter_operand. */
2629 enum parse_shift_mode
2630 {
2631 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2632 "#imm{,lsl #n}" */
2633 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2634 "#imm" */
2635 SHIFTED_LSL, /* bare "lsl #n" */
2636 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2637 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2638 };
2639
2640 /* Parse a <shift> operator on an AArch64 data processing instruction.
2641 Return TRUE on success; otherwise return FALSE. */
2642 static bfd_boolean
2643 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2644 {
2645 const struct aarch64_name_value_pair *shift_op;
2646 enum aarch64_modifier_kind kind;
2647 expressionS exp;
2648 int exp_has_prefix;
2649 char *s = *str;
2650 char *p = s;
2651
2652 for (p = *str; ISALPHA (*p); p++)
2653 ;
2654
2655 if (p == *str)
2656 {
2657 set_syntax_error (_("shift expression expected"));
2658 return FALSE;
2659 }
2660
2661 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2662
2663 if (shift_op == NULL)
2664 {
2665 set_syntax_error (_("shift operator expected"));
2666 return FALSE;
2667 }
2668
2669 kind = aarch64_get_operand_modifier (shift_op);
2670
2671 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2672 {
2673 set_syntax_error (_("invalid use of 'MSL'"));
2674 return FALSE;
2675 }
2676
2677 switch (mode)
2678 {
2679 case SHIFTED_LOGIC_IMM:
2680 if (aarch64_extend_operator_p (kind) == TRUE)
2681 {
2682 set_syntax_error (_("extending shift is not permitted"));
2683 return FALSE;
2684 }
2685 break;
2686
2687 case SHIFTED_ARITH_IMM:
2688 if (kind == AARCH64_MOD_ROR)
2689 {
2690 set_syntax_error (_("'ROR' shift is not permitted"));
2691 return FALSE;
2692 }
2693 break;
2694
2695 case SHIFTED_LSL:
2696 if (kind != AARCH64_MOD_LSL)
2697 {
2698 set_syntax_error (_("only 'LSL' shift is permitted"));
2699 return FALSE;
2700 }
2701 break;
2702
2703 case SHIFTED_REG_OFFSET:
2704 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2705 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2706 {
2707 set_fatal_syntax_error
2708 (_("invalid shift for the register offset addressing mode"));
2709 return FALSE;
2710 }
2711 break;
2712
2713 case SHIFTED_LSL_MSL:
2714 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2715 {
2716 set_syntax_error (_("invalid shift operator"));
2717 return FALSE;
2718 }
2719 break;
2720
2721 default:
2722 abort ();
2723 }
2724
2725 /* Whitespace can appear here if the next thing is a bare digit. */
2726 skip_whitespace (p);
2727
2728 /* Parse shift amount. */
2729 exp_has_prefix = 0;
2730 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2731 exp.X_op = O_absent;
2732 else
2733 {
2734 if (is_immediate_prefix (*p))
2735 {
2736 p++;
2737 exp_has_prefix = 1;
2738 }
2739 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2740 }
2741 if (exp.X_op == O_absent)
2742 {
2743 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2744 {
2745 set_syntax_error (_("missing shift amount"));
2746 return FALSE;
2747 }
2748 operand->shifter.amount = 0;
2749 }
2750 else if (exp.X_op != O_constant)
2751 {
2752 set_syntax_error (_("constant shift amount required"));
2753 return FALSE;
2754 }
2755 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2756 {
2757 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2758 return FALSE;
2759 }
2760 else
2761 {
2762 operand->shifter.amount = exp.X_add_number;
2763 operand->shifter.amount_present = 1;
2764 }
2765
2766 operand->shifter.operator_present = 1;
2767 operand->shifter.kind = kind;
2768
2769 *str = p;
2770 return TRUE;
2771 }
2772
2773 /* Parse a <shifter_operand> for a data processing instruction:
2774
2775 #<immediate>
2776 #<immediate>, LSL #imm
2777
2778 Validation of immediate operands is deferred to md_apply_fix.
2779
2780 Return TRUE on success; otherwise return FALSE. */
2781
2782 static bfd_boolean
2783 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2784 enum parse_shift_mode mode)
2785 {
2786 char *p;
2787
2788 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2789 return FALSE;
2790
2791 p = *str;
2792
2793 /* Accept an immediate expression. */
2794 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2795 return FALSE;
2796
2797 /* Accept optional LSL for arithmetic immediate values. */
2798 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2799 if (! parse_shift (&p, operand, SHIFTED_LSL))
2800 return FALSE;
2801
2802 /* Not accept any shifter for logical immediate values. */
2803 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2804 && parse_shift (&p, operand, mode))
2805 {
2806 set_syntax_error (_("unexpected shift operator"));
2807 return FALSE;
2808 }
2809
2810 *str = p;
2811 return TRUE;
2812 }
2813
2814 /* Parse a <shifter_operand> for a data processing instruction:
2815
2816 <Rm>
2817 <Rm>, <shift>
2818 #<immediate>
2819 #<immediate>, LSL #imm
2820
2821 where <shift> is handled by parse_shift above, and the last two
2822 cases are handled by the function above.
2823
2824 Validation of immediate operands is deferred to md_apply_fix.
2825
2826 Return TRUE on success; otherwise return FALSE. */
2827
2828 static bfd_boolean
2829 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2830 enum parse_shift_mode mode)
2831 {
2832 int reg;
2833 int isreg32, isregzero;
2834 enum aarch64_operand_class opd_class
2835 = aarch64_get_operand_class (operand->type);
2836
2837 if ((reg =
2838 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2839 {
2840 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2841 {
2842 set_syntax_error (_("unexpected register in the immediate operand"));
2843 return FALSE;
2844 }
2845
2846 if (!isregzero && reg == REG_SP)
2847 {
2848 set_syntax_error (BAD_SP);
2849 return FALSE;
2850 }
2851
2852 operand->reg.regno = reg;
2853 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2854
2855 /* Accept optional shift operation on register. */
2856 if (! skip_past_comma (str))
2857 return TRUE;
2858
2859 if (! parse_shift (str, operand, mode))
2860 return FALSE;
2861
2862 return TRUE;
2863 }
2864 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2865 {
2866 set_syntax_error
2867 (_("integer register expected in the extended/shifted operand "
2868 "register"));
2869 return FALSE;
2870 }
2871
2872 /* We have a shifted immediate variable. */
2873 return parse_shifter_operand_imm (str, operand, mode);
2874 }
2875
2876 /* Return TRUE on success; return FALSE otherwise. */
2877
2878 static bfd_boolean
2879 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2880 enum parse_shift_mode mode)
2881 {
2882 char *p = *str;
2883
2884 /* Determine if we have the sequence of characters #: or just :
2885 coming next. If we do, then we check for a :rello: relocation
2886 modifier. If we don't, punt the whole lot to
2887 parse_shifter_operand. */
2888
2889 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2890 {
2891 struct reloc_table_entry *entry;
2892
2893 if (p[0] == '#')
2894 p += 2;
2895 else
2896 p++;
2897 *str = p;
2898
2899 /* Try to parse a relocation. Anything else is an error. */
2900 if (!(entry = find_reloc_table_entry (str)))
2901 {
2902 set_syntax_error (_("unknown relocation modifier"));
2903 return FALSE;
2904 }
2905
2906 if (entry->add_type == 0)
2907 {
2908 set_syntax_error
2909 (_("this relocation modifier is not allowed on this instruction"));
2910 return FALSE;
2911 }
2912
2913 /* Save str before we decompose it. */
2914 p = *str;
2915
2916 /* Next, we parse the expression. */
2917 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2918 return FALSE;
2919
2920 /* Record the relocation type (use the ADD variant here). */
2921 inst.reloc.type = entry->add_type;
2922 inst.reloc.pc_rel = entry->pc_rel;
2923
2924 /* If str is empty, we've reached the end, stop here. */
2925 if (**str == '\0')
2926 return TRUE;
2927
2928 /* Otherwise, we have a shifted reloc modifier, so rewind to
2929 recover the variable name and continue parsing for the shifter. */
2930 *str = p;
2931 return parse_shifter_operand_imm (str, operand, mode);
2932 }
2933
2934 return parse_shifter_operand (str, operand, mode);
2935 }
2936
2937 /* Parse all forms of an address expression. Information is written
2938 to *OPERAND and/or inst.reloc.
2939
2940 The A64 instruction set has the following addressing modes:
2941
2942 Offset
2943 [base] // in SIMD ld/st structure
2944 [base{,#0}] // in ld/st exclusive
2945 [base{,#imm}]
2946 [base,Xm{,LSL #imm}]
2947 [base,Xm,SXTX {#imm}]
2948 [base,Wm,(S|U)XTW {#imm}]
2949 Pre-indexed
2950 [base,#imm]!
2951 Post-indexed
2952 [base],#imm
2953 [base],Xm // in SIMD ld/st structure
2954 PC-relative (literal)
2955 label
2956 =immediate
2957
2958 (As a convenience, the notation "=immediate" is permitted in conjunction
2959 with the pc-relative literal load instructions to automatically place an
2960 immediate value or symbolic address in a nearby literal pool and generate
2961 a hidden label which references it.)
2962
2963 Upon a successful parsing, the address structure in *OPERAND will be
2964 filled in the following way:
2965
2966 .base_regno = <base>
2967 .offset.is_reg // 1 if the offset is a register
2968 .offset.imm = <imm>
2969 .offset.regno = <Rm>
2970
2971 For different addressing modes defined in the A64 ISA:
2972
2973 Offset
2974 .pcrel=0; .preind=1; .postind=0; .writeback=0
2975 Pre-indexed
2976 .pcrel=0; .preind=1; .postind=0; .writeback=1
2977 Post-indexed
2978 .pcrel=0; .preind=0; .postind=1; .writeback=1
2979 PC-relative (literal)
2980 .pcrel=1; .preind=1; .postind=0; .writeback=0
2981
2982 The shift/extension information, if any, will be stored in .shifter.
2983
2984 It is the caller's responsibility to check for addressing modes not
2985 supported by the instruction, and to set inst.reloc.type. */
2986
2987 static bfd_boolean
2988 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2989 int accept_reg_post_index)
2990 {
2991 char *p = *str;
2992 int reg;
2993 int isreg32, isregzero;
2994 expressionS *exp = &inst.reloc.exp;
2995
2996 if (! skip_past_char (&p, '['))
2997 {
2998 /* =immediate or label. */
2999 operand->addr.pcrel = 1;
3000 operand->addr.preind = 1;
3001
3002 /* #:<reloc_op>:<symbol> */
3003 skip_past_char (&p, '#');
3004 if (reloc && skip_past_char (&p, ':'))
3005 {
3006 bfd_reloc_code_real_type ty;
3007 struct reloc_table_entry *entry;
3008
3009 /* Try to parse a relocation modifier. Anything else is
3010 an error. */
3011 entry = find_reloc_table_entry (&p);
3012 if (! entry)
3013 {
3014 set_syntax_error (_("unknown relocation modifier"));
3015 return FALSE;
3016 }
3017
3018 switch (operand->type)
3019 {
3020 case AARCH64_OPND_ADDR_PCREL21:
3021 /* adr */
3022 ty = entry->adr_type;
3023 break;
3024
3025 default:
3026 ty = entry->ld_literal_type;
3027 break;
3028 }
3029
3030 if (ty == 0)
3031 {
3032 set_syntax_error
3033 (_("this relocation modifier is not allowed on this "
3034 "instruction"));
3035 return FALSE;
3036 }
3037
3038 /* #:<reloc_op>: */
3039 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3040 {
3041 set_syntax_error (_("invalid relocation expression"));
3042 return FALSE;
3043 }
3044
3045 /* #:<reloc_op>:<expr> */
3046 /* Record the relocation type. */
3047 inst.reloc.type = ty;
3048 inst.reloc.pc_rel = entry->pc_rel;
3049 }
3050 else
3051 {
3052
3053 if (skip_past_char (&p, '='))
3054 /* =immediate; need to generate the literal in the literal pool. */
3055 inst.gen_lit_pool = 1;
3056
3057 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3058 {
3059 set_syntax_error (_("invalid address"));
3060 return FALSE;
3061 }
3062 }
3063
3064 *str = p;
3065 return TRUE;
3066 }
3067
3068 /* [ */
3069
3070 /* Accept SP and reject ZR */
3071 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3072 if (reg == PARSE_FAIL || isreg32)
3073 {
3074 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3075 return FALSE;
3076 }
3077 operand->addr.base_regno = reg;
3078
3079 /* [Xn */
3080 if (skip_past_comma (&p))
3081 {
3082 /* [Xn, */
3083 operand->addr.preind = 1;
3084
3085 /* Reject SP and accept ZR */
3086 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3087 if (reg != PARSE_FAIL)
3088 {
3089 /* [Xn,Rm */
3090 operand->addr.offset.regno = reg;
3091 operand->addr.offset.is_reg = 1;
3092 /* Shifted index. */
3093 if (skip_past_comma (&p))
3094 {
3095 /* [Xn,Rm, */
3096 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3097 /* Use the diagnostics set in parse_shift, so not set new
3098 error message here. */
3099 return FALSE;
3100 }
3101 /* We only accept:
3102 [base,Xm{,LSL #imm}]
3103 [base,Xm,SXTX {#imm}]
3104 [base,Wm,(S|U)XTW {#imm}] */
3105 if (operand->shifter.kind == AARCH64_MOD_NONE
3106 || operand->shifter.kind == AARCH64_MOD_LSL
3107 || operand->shifter.kind == AARCH64_MOD_SXTX)
3108 {
3109 if (isreg32)
3110 {
3111 set_syntax_error (_("invalid use of 32-bit register offset"));
3112 return FALSE;
3113 }
3114 }
3115 else if (!isreg32)
3116 {
3117 set_syntax_error (_("invalid use of 64-bit register offset"));
3118 return FALSE;
3119 }
3120 }
3121 else
3122 {
3123 /* [Xn,#:<reloc_op>:<symbol> */
3124 skip_past_char (&p, '#');
3125 if (reloc && skip_past_char (&p, ':'))
3126 {
3127 struct reloc_table_entry *entry;
3128
3129 /* Try to parse a relocation modifier. Anything else is
3130 an error. */
3131 if (!(entry = find_reloc_table_entry (&p)))
3132 {
3133 set_syntax_error (_("unknown relocation modifier"));
3134 return FALSE;
3135 }
3136
3137 if (entry->ldst_type == 0)
3138 {
3139 set_syntax_error
3140 (_("this relocation modifier is not allowed on this "
3141 "instruction"));
3142 return FALSE;
3143 }
3144
3145 /* [Xn,#:<reloc_op>: */
3146 /* We now have the group relocation table entry corresponding to
3147 the name in the assembler source. Next, we parse the
3148 expression. */
3149 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3150 {
3151 set_syntax_error (_("invalid relocation expression"));
3152 return FALSE;
3153 }
3154
3155 /* [Xn,#:<reloc_op>:<expr> */
3156 /* Record the load/store relocation type. */
3157 inst.reloc.type = entry->ldst_type;
3158 inst.reloc.pc_rel = entry->pc_rel;
3159 }
3160 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3161 {
3162 set_syntax_error (_("invalid expression in the address"));
3163 return FALSE;
3164 }
3165 /* [Xn,<expr> */
3166 }
3167 }
3168
3169 if (! skip_past_char (&p, ']'))
3170 {
3171 set_syntax_error (_("']' expected"));
3172 return FALSE;
3173 }
3174
3175 if (skip_past_char (&p, '!'))
3176 {
3177 if (operand->addr.preind && operand->addr.offset.is_reg)
3178 {
3179 set_syntax_error (_("register offset not allowed in pre-indexed "
3180 "addressing mode"));
3181 return FALSE;
3182 }
3183 /* [Xn]! */
3184 operand->addr.writeback = 1;
3185 }
3186 else if (skip_past_comma (&p))
3187 {
3188 /* [Xn], */
3189 operand->addr.postind = 1;
3190 operand->addr.writeback = 1;
3191
3192 if (operand->addr.preind)
3193 {
3194 set_syntax_error (_("cannot combine pre- and post-indexing"));
3195 return FALSE;
3196 }
3197
3198 if (accept_reg_post_index
3199 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3200 &isregzero)) != PARSE_FAIL)
3201 {
3202 /* [Xn],Xm */
3203 if (isreg32)
3204 {
3205 set_syntax_error (_("invalid 32-bit register offset"));
3206 return FALSE;
3207 }
3208 operand->addr.offset.regno = reg;
3209 operand->addr.offset.is_reg = 1;
3210 }
3211 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3212 {
3213 /* [Xn],#expr */
3214 set_syntax_error (_("invalid expression in the address"));
3215 return FALSE;
3216 }
3217 }
3218
3219 /* If at this point neither .preind nor .postind is set, we have a
3220 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3221 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3222 {
3223 if (operand->addr.writeback)
3224 {
3225 /* Reject [Rn]! */
3226 set_syntax_error (_("missing offset in the pre-indexed address"));
3227 return FALSE;
3228 }
3229 operand->addr.preind = 1;
3230 inst.reloc.exp.X_op = O_constant;
3231 inst.reloc.exp.X_add_number = 0;
3232 }
3233
3234 *str = p;
3235 return TRUE;
3236 }
3237
3238 /* Return TRUE on success; otherwise return FALSE. */
3239 static bfd_boolean
3240 parse_address (char **str, aarch64_opnd_info *operand,
3241 int accept_reg_post_index)
3242 {
3243 return parse_address_main (str, operand, 0, accept_reg_post_index);
3244 }
3245
3246 /* Return TRUE on success; otherwise return FALSE. */
3247 static bfd_boolean
3248 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3249 {
3250 return parse_address_main (str, operand, 1, 0);
3251 }
3252
3253 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3254 Return TRUE on success; otherwise return FALSE. */
3255 static bfd_boolean
3256 parse_half (char **str, int *internal_fixup_p)
3257 {
3258 char *p, *saved;
3259 int dummy;
3260
3261 p = *str;
3262 skip_past_char (&p, '#');
3263
3264 gas_assert (internal_fixup_p);
3265 *internal_fixup_p = 0;
3266
3267 if (*p == ':')
3268 {
3269 struct reloc_table_entry *entry;
3270
3271 /* Try to parse a relocation. Anything else is an error. */
3272 ++p;
3273 if (!(entry = find_reloc_table_entry (&p)))
3274 {
3275 set_syntax_error (_("unknown relocation modifier"));
3276 return FALSE;
3277 }
3278
3279 if (entry->movw_type == 0)
3280 {
3281 set_syntax_error
3282 (_("this relocation modifier is not allowed on this instruction"));
3283 return FALSE;
3284 }
3285
3286 inst.reloc.type = entry->movw_type;
3287 }
3288 else
3289 *internal_fixup_p = 1;
3290
3291 /* Avoid parsing a register as a general symbol. */
3292 saved = p;
3293 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3294 return FALSE;
3295 p = saved;
3296
3297 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3298 return FALSE;
3299
3300 *str = p;
3301 return TRUE;
3302 }
3303
3304 /* Parse an operand for an ADRP instruction:
3305 ADRP <Xd>, <label>
3306 Return TRUE on success; otherwise return FALSE. */
3307
3308 static bfd_boolean
3309 parse_adrp (char **str)
3310 {
3311 char *p;
3312
3313 p = *str;
3314 if (*p == ':')
3315 {
3316 struct reloc_table_entry *entry;
3317
3318 /* Try to parse a relocation. Anything else is an error. */
3319 ++p;
3320 if (!(entry = find_reloc_table_entry (&p)))
3321 {
3322 set_syntax_error (_("unknown relocation modifier"));
3323 return FALSE;
3324 }
3325
3326 if (entry->adrp_type == 0)
3327 {
3328 set_syntax_error
3329 (_("this relocation modifier is not allowed on this instruction"));
3330 return FALSE;
3331 }
3332
3333 inst.reloc.type = entry->adrp_type;
3334 }
3335 else
3336 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3337
3338 inst.reloc.pc_rel = 1;
3339
3340 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3341 return FALSE;
3342
3343 *str = p;
3344 return TRUE;
3345 }
3346
3347 /* Miscellaneous. */
3348
3349 /* Parse an option for a preload instruction. Returns the encoding for the
3350 option, or PARSE_FAIL. */
3351
3352 static int
3353 parse_pldop (char **str)
3354 {
3355 char *p, *q;
3356 const struct aarch64_name_value_pair *o;
3357
3358 p = q = *str;
3359 while (ISALNUM (*q))
3360 q++;
3361
3362 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3363 if (!o)
3364 return PARSE_FAIL;
3365
3366 *str = q;
3367 return o->value;
3368 }
3369
3370 /* Parse an option for a barrier instruction. Returns the encoding for the
3371 option, or PARSE_FAIL. */
3372
3373 static int
3374 parse_barrier (char **str)
3375 {
3376 char *p, *q;
3377 const asm_barrier_opt *o;
3378
3379 p = q = *str;
3380 while (ISALPHA (*q))
3381 q++;
3382
3383 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3384 if (!o)
3385 return PARSE_FAIL;
3386
3387 *str = q;
3388 return o->value;
3389 }
3390
3391 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3392 Returns the encoding for the option, or PARSE_FAIL.
3393
3394 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3395 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3396
3397 static int
3398 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3399 {
3400 char *p, *q;
3401 char buf[32];
3402 const aarch64_sys_reg *o;
3403 int value;
3404
3405 p = buf;
3406 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3407 if (p < buf + 31)
3408 *p++ = TOLOWER (*q);
3409 *p = '\0';
3410 /* Assert that BUF be large enough. */
3411 gas_assert (p - buf == q - *str);
3412
3413 o = hash_find (sys_regs, buf);
3414 if (!o)
3415 {
3416 if (!imple_defined_p)
3417 return PARSE_FAIL;
3418 else
3419 {
3420 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3421 unsigned int op0, op1, cn, cm, op2;
3422
3423 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3424 != 5)
3425 return PARSE_FAIL;
3426 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3427 return PARSE_FAIL;
3428 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3429 }
3430 }
3431 else
3432 {
3433 if (aarch64_sys_reg_deprecated_p (o))
3434 as_warn (_("system register name '%s' is deprecated and may be "
3435 "removed in a future release"), buf);
3436 value = o->value;
3437 }
3438
3439 *str = q;
3440 return value;
3441 }
3442
3443 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3444 for the option, or NULL. */
3445
3446 static const aarch64_sys_ins_reg *
3447 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3448 {
3449 char *p, *q;
3450 char buf[32];
3451 const aarch64_sys_ins_reg *o;
3452
3453 p = buf;
3454 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3455 if (p < buf + 31)
3456 *p++ = TOLOWER (*q);
3457 *p = '\0';
3458
3459 o = hash_find (sys_ins_regs, buf);
3460 if (!o)
3461 return NULL;
3462
3463 *str = q;
3464 return o;
3465 }
3466 \f
3467 #define po_char_or_fail(chr) do { \
3468 if (! skip_past_char (&str, chr)) \
3469 goto failure; \
3470 } while (0)
3471
3472 #define po_reg_or_fail(regtype) do { \
3473 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3474 if (val == PARSE_FAIL) \
3475 { \
3476 set_default_error (); \
3477 goto failure; \
3478 } \
3479 } while (0)
3480
3481 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3482 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3483 &isreg32, &isregzero); \
3484 if (val == PARSE_FAIL) \
3485 { \
3486 set_default_error (); \
3487 goto failure; \
3488 } \
3489 info->reg.regno = val; \
3490 if (isreg32) \
3491 info->qualifier = AARCH64_OPND_QLF_W; \
3492 else \
3493 info->qualifier = AARCH64_OPND_QLF_X; \
3494 } while (0)
3495
3496 #define po_imm_nc_or_fail() do { \
3497 if (! parse_constant_immediate (&str, &val)) \
3498 goto failure; \
3499 } while (0)
3500
3501 #define po_imm_or_fail(min, max) do { \
3502 if (! parse_constant_immediate (&str, &val)) \
3503 goto failure; \
3504 if (val < min || val > max) \
3505 { \
3506 set_fatal_syntax_error (_("immediate value out of range "\
3507 #min " to "#max)); \
3508 goto failure; \
3509 } \
3510 } while (0)
3511
3512 #define po_misc_or_fail(expr) do { \
3513 if (!expr) \
3514 goto failure; \
3515 } while (0)
3516 \f
3517 /* encode the 12-bit imm field of Add/sub immediate */
3518 static inline uint32_t
3519 encode_addsub_imm (uint32_t imm)
3520 {
3521 return imm << 10;
3522 }
3523
3524 /* encode the shift amount field of Add/sub immediate */
3525 static inline uint32_t
3526 encode_addsub_imm_shift_amount (uint32_t cnt)
3527 {
3528 return cnt << 22;
3529 }
3530
3531
3532 /* encode the imm field of Adr instruction */
3533 static inline uint32_t
3534 encode_adr_imm (uint32_t imm)
3535 {
3536 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3537 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3538 }
3539
3540 /* encode the immediate field of Move wide immediate */
3541 static inline uint32_t
3542 encode_movw_imm (uint32_t imm)
3543 {
3544 return imm << 5;
3545 }
3546
3547 /* encode the 26-bit offset of unconditional branch */
3548 static inline uint32_t
3549 encode_branch_ofs_26 (uint32_t ofs)
3550 {
3551 return ofs & ((1 << 26) - 1);
3552 }
3553
3554 /* encode the 19-bit offset of conditional branch and compare & branch */
3555 static inline uint32_t
3556 encode_cond_branch_ofs_19 (uint32_t ofs)
3557 {
3558 return (ofs & ((1 << 19) - 1)) << 5;
3559 }
3560
3561 /* encode the 19-bit offset of ld literal */
3562 static inline uint32_t
3563 encode_ld_lit_ofs_19 (uint32_t ofs)
3564 {
3565 return (ofs & ((1 << 19) - 1)) << 5;
3566 }
3567
3568 /* Encode the 14-bit offset of test & branch. */
3569 static inline uint32_t
3570 encode_tst_branch_ofs_14 (uint32_t ofs)
3571 {
3572 return (ofs & ((1 << 14) - 1)) << 5;
3573 }
3574
3575 /* Encode the 16-bit imm field of svc/hvc/smc. */
3576 static inline uint32_t
3577 encode_svc_imm (uint32_t imm)
3578 {
3579 return imm << 5;
3580 }
3581
3582 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3583 static inline uint32_t
3584 reencode_addsub_switch_add_sub (uint32_t opcode)
3585 {
3586 return opcode ^ (1 << 30);
3587 }
3588
3589 static inline uint32_t
3590 reencode_movzn_to_movz (uint32_t opcode)
3591 {
3592 return opcode | (1 << 30);
3593 }
3594
3595 static inline uint32_t
3596 reencode_movzn_to_movn (uint32_t opcode)
3597 {
3598 return opcode & ~(1 << 30);
3599 }
3600
3601 /* Overall per-instruction processing. */
3602
3603 /* We need to be able to fix up arbitrary expressions in some statements.
3604 This is so that we can handle symbols that are an arbitrary distance from
3605 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3606 which returns part of an address in a form which will be valid for
3607 a data instruction. We do this by pushing the expression into a symbol
3608 in the expr_section, and creating a fix for that. */
3609
3610 static fixS *
3611 fix_new_aarch64 (fragS * frag,
3612 int where,
3613 short int size, expressionS * exp, int pc_rel, int reloc)
3614 {
3615 fixS *new_fix;
3616
3617 switch (exp->X_op)
3618 {
3619 case O_constant:
3620 case O_symbol:
3621 case O_add:
3622 case O_subtract:
3623 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3624 break;
3625
3626 default:
3627 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3628 pc_rel, reloc);
3629 break;
3630 }
3631 return new_fix;
3632 }
3633 \f
3634 /* Diagnostics on operands errors. */
3635
3636 /* By default, output verbose error message.
3637 Disable the verbose error message by -mno-verbose-error. */
3638 static int verbose_error_p = 1;
3639
3640 #ifdef DEBUG_AARCH64
3641 /* N.B. this is only for the purpose of debugging. */
3642 const char* operand_mismatch_kind_names[] =
3643 {
3644 "AARCH64_OPDE_NIL",
3645 "AARCH64_OPDE_RECOVERABLE",
3646 "AARCH64_OPDE_SYNTAX_ERROR",
3647 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3648 "AARCH64_OPDE_INVALID_VARIANT",
3649 "AARCH64_OPDE_OUT_OF_RANGE",
3650 "AARCH64_OPDE_UNALIGNED",
3651 "AARCH64_OPDE_REG_LIST",
3652 "AARCH64_OPDE_OTHER_ERROR",
3653 };
3654 #endif /* DEBUG_AARCH64 */
3655
3656 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3657
3658 When multiple errors of different kinds are found in the same assembly
3659 line, only the error of the highest severity will be picked up for
3660 issuing the diagnostics. */
3661
3662 static inline bfd_boolean
3663 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3664 enum aarch64_operand_error_kind rhs)
3665 {
3666 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3667 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3668 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3669 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3670 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3671 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3672 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3673 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3674 return lhs > rhs;
3675 }
3676
3677 /* Helper routine to get the mnemonic name from the assembly instruction
3678 line; should only be called for the diagnosis purpose, as there is
3679 string copy operation involved, which may affect the runtime
3680 performance if used in elsewhere. */
3681
3682 static const char*
3683 get_mnemonic_name (const char *str)
3684 {
3685 static char mnemonic[32];
3686 char *ptr;
3687
3688 /* Get the first 15 bytes and assume that the full name is included. */
3689 strncpy (mnemonic, str, 31);
3690 mnemonic[31] = '\0';
3691
3692 /* Scan up to the end of the mnemonic, which must end in white space,
3693 '.', or end of string. */
3694 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3695 ;
3696
3697 *ptr = '\0';
3698
3699 /* Append '...' to the truncated long name. */
3700 if (ptr - mnemonic == 31)
3701 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3702
3703 return mnemonic;
3704 }
3705
3706 static void
3707 reset_aarch64_instruction (aarch64_instruction *instruction)
3708 {
3709 memset (instruction, '\0', sizeof (aarch64_instruction));
3710 instruction->reloc.type = BFD_RELOC_UNUSED;
3711 }
3712
3713 /* Data strutures storing one user error in the assembly code related to
3714 operands. */
3715
3716 struct operand_error_record
3717 {
3718 const aarch64_opcode *opcode;
3719 aarch64_operand_error detail;
3720 struct operand_error_record *next;
3721 };
3722
3723 typedef struct operand_error_record operand_error_record;
3724
3725 struct operand_errors
3726 {
3727 operand_error_record *head;
3728 operand_error_record *tail;
3729 };
3730
3731 typedef struct operand_errors operand_errors;
3732
3733 /* Top-level data structure reporting user errors for the current line of
3734 the assembly code.
3735 The way md_assemble works is that all opcodes sharing the same mnemonic
3736 name are iterated to find a match to the assembly line. In this data
3737 structure, each of the such opcodes will have one operand_error_record
3738 allocated and inserted. In other words, excessive errors related with
3739 a single opcode are disregarded. */
3740 operand_errors operand_error_report;
3741
3742 /* Free record nodes. */
3743 static operand_error_record *free_opnd_error_record_nodes = NULL;
3744
3745 /* Initialize the data structure that stores the operand mismatch
3746 information on assembling one line of the assembly code. */
3747 static void
3748 init_operand_error_report (void)
3749 {
3750 if (operand_error_report.head != NULL)
3751 {
3752 gas_assert (operand_error_report.tail != NULL);
3753 operand_error_report.tail->next = free_opnd_error_record_nodes;
3754 free_opnd_error_record_nodes = operand_error_report.head;
3755 operand_error_report.head = NULL;
3756 operand_error_report.tail = NULL;
3757 return;
3758 }
3759 gas_assert (operand_error_report.tail == NULL);
3760 }
3761
3762 /* Return TRUE if some operand error has been recorded during the
3763 parsing of the current assembly line using the opcode *OPCODE;
3764 otherwise return FALSE. */
3765 static inline bfd_boolean
3766 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3767 {
3768 operand_error_record *record = operand_error_report.head;
3769 return record && record->opcode == opcode;
3770 }
3771
3772 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3773 OPCODE field is initialized with OPCODE.
3774 N.B. only one record for each opcode, i.e. the maximum of one error is
3775 recorded for each instruction template. */
3776
3777 static void
3778 add_operand_error_record (const operand_error_record* new_record)
3779 {
3780 const aarch64_opcode *opcode = new_record->opcode;
3781 operand_error_record* record = operand_error_report.head;
3782
3783 /* The record may have been created for this opcode. If not, we need
3784 to prepare one. */
3785 if (! opcode_has_operand_error_p (opcode))
3786 {
3787 /* Get one empty record. */
3788 if (free_opnd_error_record_nodes == NULL)
3789 {
3790 record = xmalloc (sizeof (operand_error_record));
3791 if (record == NULL)
3792 abort ();
3793 }
3794 else
3795 {
3796 record = free_opnd_error_record_nodes;
3797 free_opnd_error_record_nodes = record->next;
3798 }
3799 record->opcode = opcode;
3800 /* Insert at the head. */
3801 record->next = operand_error_report.head;
3802 operand_error_report.head = record;
3803 if (operand_error_report.tail == NULL)
3804 operand_error_report.tail = record;
3805 }
3806 else if (record->detail.kind != AARCH64_OPDE_NIL
3807 && record->detail.index <= new_record->detail.index
3808 && operand_error_higher_severity_p (record->detail.kind,
3809 new_record->detail.kind))
3810 {
3811 /* In the case of multiple errors found on operands related with a
3812 single opcode, only record the error of the leftmost operand and
3813 only if the error is of higher severity. */
3814 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3815 " the existing error %s on operand %d",
3816 operand_mismatch_kind_names[new_record->detail.kind],
3817 new_record->detail.index,
3818 operand_mismatch_kind_names[record->detail.kind],
3819 record->detail.index);
3820 return;
3821 }
3822
3823 record->detail = new_record->detail;
3824 }
3825
3826 static inline void
3827 record_operand_error_info (const aarch64_opcode *opcode,
3828 aarch64_operand_error *error_info)
3829 {
3830 operand_error_record record;
3831 record.opcode = opcode;
3832 record.detail = *error_info;
3833 add_operand_error_record (&record);
3834 }
3835
3836 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3837 error message *ERROR, for operand IDX (count from 0). */
3838
3839 static void
3840 record_operand_error (const aarch64_opcode *opcode, int idx,
3841 enum aarch64_operand_error_kind kind,
3842 const char* error)
3843 {
3844 aarch64_operand_error info;
3845 memset(&info, 0, sizeof (info));
3846 info.index = idx;
3847 info.kind = kind;
3848 info.error = error;
3849 record_operand_error_info (opcode, &info);
3850 }
3851
3852 static void
3853 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3854 enum aarch64_operand_error_kind kind,
3855 const char* error, const int *extra_data)
3856 {
3857 aarch64_operand_error info;
3858 info.index = idx;
3859 info.kind = kind;
3860 info.error = error;
3861 info.data[0] = extra_data[0];
3862 info.data[1] = extra_data[1];
3863 info.data[2] = extra_data[2];
3864 record_operand_error_info (opcode, &info);
3865 }
3866
3867 static void
3868 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3869 const char* error, int lower_bound,
3870 int upper_bound)
3871 {
3872 int data[3] = {lower_bound, upper_bound, 0};
3873 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3874 error, data);
3875 }
3876
3877 /* Remove the operand error record for *OPCODE. */
3878 static void ATTRIBUTE_UNUSED
3879 remove_operand_error_record (const aarch64_opcode *opcode)
3880 {
3881 if (opcode_has_operand_error_p (opcode))
3882 {
3883 operand_error_record* record = operand_error_report.head;
3884 gas_assert (record != NULL && operand_error_report.tail != NULL);
3885 operand_error_report.head = record->next;
3886 record->next = free_opnd_error_record_nodes;
3887 free_opnd_error_record_nodes = record;
3888 if (operand_error_report.head == NULL)
3889 {
3890 gas_assert (operand_error_report.tail == record);
3891 operand_error_report.tail = NULL;
3892 }
3893 }
3894 }
3895
3896 /* Given the instruction in *INSTR, return the index of the best matched
3897 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3898
3899 Return -1 if there is no qualifier sequence; return the first match
3900 if there is multiple matches found. */
3901
3902 static int
3903 find_best_match (const aarch64_inst *instr,
3904 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3905 {
3906 int i, num_opnds, max_num_matched, idx;
3907
3908 num_opnds = aarch64_num_of_operands (instr->opcode);
3909 if (num_opnds == 0)
3910 {
3911 DEBUG_TRACE ("no operand");
3912 return -1;
3913 }
3914
3915 max_num_matched = 0;
3916 idx = -1;
3917
3918 /* For each pattern. */
3919 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3920 {
3921 int j, num_matched;
3922 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3923
3924 /* Most opcodes has much fewer patterns in the list. */
3925 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3926 {
3927 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3928 if (i != 0 && idx == -1)
3929 /* If nothing has been matched, return the 1st sequence. */
3930 idx = 0;
3931 break;
3932 }
3933
3934 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3935 if (*qualifiers == instr->operands[j].qualifier)
3936 ++num_matched;
3937
3938 if (num_matched > max_num_matched)
3939 {
3940 max_num_matched = num_matched;
3941 idx = i;
3942 }
3943 }
3944
3945 DEBUG_TRACE ("return with %d", idx);
3946 return idx;
3947 }
3948
3949 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3950 corresponding operands in *INSTR. */
3951
3952 static inline void
3953 assign_qualifier_sequence (aarch64_inst *instr,
3954 const aarch64_opnd_qualifier_t *qualifiers)
3955 {
3956 int i = 0;
3957 int num_opnds = aarch64_num_of_operands (instr->opcode);
3958 gas_assert (num_opnds);
3959 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3960 instr->operands[i].qualifier = *qualifiers;
3961 }
3962
3963 /* Print operands for the diagnosis purpose. */
3964
3965 static void
3966 print_operands (char *buf, const aarch64_opcode *opcode,
3967 const aarch64_opnd_info *opnds)
3968 {
3969 int i;
3970
3971 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3972 {
3973 const size_t size = 128;
3974 char str[size];
3975
3976 /* We regard the opcode operand info more, however we also look into
3977 the inst->operands to support the disassembling of the optional
3978 operand.
3979 The two operand code should be the same in all cases, apart from
3980 when the operand can be optional. */
3981 if (opcode->operands[i] == AARCH64_OPND_NIL
3982 || opnds[i].type == AARCH64_OPND_NIL)
3983 break;
3984
3985 /* Generate the operand string in STR. */
3986 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3987
3988 /* Delimiter. */
3989 if (str[0] != '\0')
3990 strcat (buf, i == 0 ? " " : ",");
3991
3992 /* Append the operand string. */
3993 strcat (buf, str);
3994 }
3995 }
3996
3997 /* Send to stderr a string as information. */
3998
3999 static void
4000 output_info (const char *format, ...)
4001 {
4002 char *file;
4003 unsigned int line;
4004 va_list args;
4005
4006 as_where (&file, &line);
4007 if (file)
4008 {
4009 if (line != 0)
4010 fprintf (stderr, "%s:%u: ", file, line);
4011 else
4012 fprintf (stderr, "%s: ", file);
4013 }
4014 fprintf (stderr, _("Info: "));
4015 va_start (args, format);
4016 vfprintf (stderr, format, args);
4017 va_end (args);
4018 (void) putc ('\n', stderr);
4019 }
4020
4021 /* Output one operand error record. */
4022
4023 static void
4024 output_operand_error_record (const operand_error_record *record, char *str)
4025 {
4026 const aarch64_operand_error *detail = &record->detail;
4027 int idx = detail->index;
4028 const aarch64_opcode *opcode = record->opcode;
4029 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4030 : AARCH64_OPND_NIL);
4031
4032 switch (detail->kind)
4033 {
4034 case AARCH64_OPDE_NIL:
4035 gas_assert (0);
4036 break;
4037
4038 case AARCH64_OPDE_SYNTAX_ERROR:
4039 case AARCH64_OPDE_RECOVERABLE:
4040 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4041 case AARCH64_OPDE_OTHER_ERROR:
4042 /* Use the prepared error message if there is, otherwise use the
4043 operand description string to describe the error. */
4044 if (detail->error != NULL)
4045 {
4046 if (idx < 0)
4047 as_bad (_("%s -- `%s'"), detail->error, str);
4048 else
4049 as_bad (_("%s at operand %d -- `%s'"),
4050 detail->error, idx + 1, str);
4051 }
4052 else
4053 {
4054 gas_assert (idx >= 0);
4055 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4056 aarch64_get_operand_desc (opd_code), str);
4057 }
4058 break;
4059
4060 case AARCH64_OPDE_INVALID_VARIANT:
4061 as_bad (_("operand mismatch -- `%s'"), str);
4062 if (verbose_error_p)
4063 {
4064 /* We will try to correct the erroneous instruction and also provide
4065 more information e.g. all other valid variants.
4066
4067 The string representation of the corrected instruction and other
4068 valid variants are generated by
4069
4070 1) obtaining the intermediate representation of the erroneous
4071 instruction;
4072 2) manipulating the IR, e.g. replacing the operand qualifier;
4073 3) printing out the instruction by calling the printer functions
4074 shared with the disassembler.
4075
4076 The limitation of this method is that the exact input assembly
4077 line cannot be accurately reproduced in some cases, for example an
4078 optional operand present in the actual assembly line will be
4079 omitted in the output; likewise for the optional syntax rules,
4080 e.g. the # before the immediate. Another limitation is that the
4081 assembly symbols and relocation operations in the assembly line
4082 currently cannot be printed out in the error report. Last but not
4083 least, when there is other error(s) co-exist with this error, the
4084 'corrected' instruction may be still incorrect, e.g. given
4085 'ldnp h0,h1,[x0,#6]!'
4086 this diagnosis will provide the version:
4087 'ldnp s0,s1,[x0,#6]!'
4088 which is still not right. */
4089 size_t len = strlen (get_mnemonic_name (str));
4090 int i, qlf_idx;
4091 bfd_boolean result;
4092 const size_t size = 2048;
4093 char buf[size];
4094 aarch64_inst *inst_base = &inst.base;
4095 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4096
4097 /* Init inst. */
4098 reset_aarch64_instruction (&inst);
4099 inst_base->opcode = opcode;
4100
4101 /* Reset the error report so that there is no side effect on the
4102 following operand parsing. */
4103 init_operand_error_report ();
4104
4105 /* Fill inst. */
4106 result = parse_operands (str + len, opcode)
4107 && programmer_friendly_fixup (&inst);
4108 gas_assert (result);
4109 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4110 NULL, NULL);
4111 gas_assert (!result);
4112
4113 /* Find the most matched qualifier sequence. */
4114 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4115 gas_assert (qlf_idx > -1);
4116
4117 /* Assign the qualifiers. */
4118 assign_qualifier_sequence (inst_base,
4119 opcode->qualifiers_list[qlf_idx]);
4120
4121 /* Print the hint. */
4122 output_info (_(" did you mean this?"));
4123 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4124 print_operands (buf, opcode, inst_base->operands);
4125 output_info (_(" %s"), buf);
4126
4127 /* Print out other variant(s) if there is any. */
4128 if (qlf_idx != 0 ||
4129 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4130 output_info (_(" other valid variant(s):"));
4131
4132 /* For each pattern. */
4133 qualifiers_list = opcode->qualifiers_list;
4134 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4135 {
4136 /* Most opcodes has much fewer patterns in the list.
4137 First NIL qualifier indicates the end in the list. */
4138 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4139 break;
4140
4141 if (i != qlf_idx)
4142 {
4143 /* Mnemonics name. */
4144 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4145
4146 /* Assign the qualifiers. */
4147 assign_qualifier_sequence (inst_base, *qualifiers_list);
4148
4149 /* Print instruction. */
4150 print_operands (buf, opcode, inst_base->operands);
4151
4152 output_info (_(" %s"), buf);
4153 }
4154 }
4155 }
4156 break;
4157
4158 case AARCH64_OPDE_OUT_OF_RANGE:
4159 if (detail->data[0] != detail->data[1])
4160 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4161 detail->error ? detail->error : _("immediate value"),
4162 detail->data[0], detail->data[1], idx + 1, str);
4163 else
4164 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4165 detail->error ? detail->error : _("immediate value"),
4166 detail->data[0], idx + 1, str);
4167 break;
4168
4169 case AARCH64_OPDE_REG_LIST:
4170 if (detail->data[0] == 1)
4171 as_bad (_("invalid number of registers in the list; "
4172 "only 1 register is expected at operand %d -- `%s'"),
4173 idx + 1, str);
4174 else
4175 as_bad (_("invalid number of registers in the list; "
4176 "%d registers are expected at operand %d -- `%s'"),
4177 detail->data[0], idx + 1, str);
4178 break;
4179
4180 case AARCH64_OPDE_UNALIGNED:
4181 as_bad (_("immediate value should be a multiple of "
4182 "%d at operand %d -- `%s'"),
4183 detail->data[0], idx + 1, str);
4184 break;
4185
4186 default:
4187 gas_assert (0);
4188 break;
4189 }
4190 }
4191
4192 /* Process and output the error message about the operand mismatching.
4193
4194 When this function is called, the operand error information had
4195 been collected for an assembly line and there will be multiple
4196 errors in the case of mulitple instruction templates; output the
4197 error message that most closely describes the problem. */
4198
4199 static void
4200 output_operand_error_report (char *str)
4201 {
4202 int largest_error_pos;
4203 const char *msg = NULL;
4204 enum aarch64_operand_error_kind kind;
4205 operand_error_record *curr;
4206 operand_error_record *head = operand_error_report.head;
4207 operand_error_record *record = NULL;
4208
4209 /* No error to report. */
4210 if (head == NULL)
4211 return;
4212
4213 gas_assert (head != NULL && operand_error_report.tail != NULL);
4214
4215 /* Only one error. */
4216 if (head == operand_error_report.tail)
4217 {
4218 DEBUG_TRACE ("single opcode entry with error kind: %s",
4219 operand_mismatch_kind_names[head->detail.kind]);
4220 output_operand_error_record (head, str);
4221 return;
4222 }
4223
4224 /* Find the error kind of the highest severity. */
4225 DEBUG_TRACE ("multiple opcode entres with error kind");
4226 kind = AARCH64_OPDE_NIL;
4227 for (curr = head; curr != NULL; curr = curr->next)
4228 {
4229 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4230 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4231 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4232 kind = curr->detail.kind;
4233 }
4234 gas_assert (kind != AARCH64_OPDE_NIL);
4235
4236 /* Pick up one of errors of KIND to report. */
4237 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4238 for (curr = head; curr != NULL; curr = curr->next)
4239 {
4240 if (curr->detail.kind != kind)
4241 continue;
4242 /* If there are multiple errors, pick up the one with the highest
4243 mismatching operand index. In the case of multiple errors with
4244 the equally highest operand index, pick up the first one or the
4245 first one with non-NULL error message. */
4246 if (curr->detail.index > largest_error_pos
4247 || (curr->detail.index == largest_error_pos && msg == NULL
4248 && curr->detail.error != NULL))
4249 {
4250 largest_error_pos = curr->detail.index;
4251 record = curr;
4252 msg = record->detail.error;
4253 }
4254 }
4255
4256 gas_assert (largest_error_pos != -2 && record != NULL);
4257 DEBUG_TRACE ("Pick up error kind %s to report",
4258 operand_mismatch_kind_names[record->detail.kind]);
4259
4260 /* Output. */
4261 output_operand_error_record (record, str);
4262 }
4263 \f
4264 /* Write an AARCH64 instruction to buf - always little-endian. */
4265 static void
4266 put_aarch64_insn (char *buf, uint32_t insn)
4267 {
4268 unsigned char *where = (unsigned char *) buf;
4269 where[0] = insn;
4270 where[1] = insn >> 8;
4271 where[2] = insn >> 16;
4272 where[3] = insn >> 24;
4273 }
4274
4275 static uint32_t
4276 get_aarch64_insn (char *buf)
4277 {
4278 unsigned char *where = (unsigned char *) buf;
4279 uint32_t result;
4280 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4281 return result;
4282 }
4283
4284 static void
4285 output_inst (struct aarch64_inst *new_inst)
4286 {
4287 char *to = NULL;
4288
4289 to = frag_more (INSN_SIZE);
4290
4291 frag_now->tc_frag_data.recorded = 1;
4292
4293 put_aarch64_insn (to, inst.base.value);
4294
4295 if (inst.reloc.type != BFD_RELOC_UNUSED)
4296 {
4297 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4298 INSN_SIZE, &inst.reloc.exp,
4299 inst.reloc.pc_rel,
4300 inst.reloc.type);
4301 DEBUG_TRACE ("Prepared relocation fix up");
4302 /* Don't check the addend value against the instruction size,
4303 that's the job of our code in md_apply_fix(). */
4304 fixp->fx_no_overflow = 1;
4305 if (new_inst != NULL)
4306 fixp->tc_fix_data.inst = new_inst;
4307 if (aarch64_gas_internal_fixup_p ())
4308 {
4309 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4310 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4311 fixp->fx_addnumber = inst.reloc.flags;
4312 }
4313 }
4314
4315 dwarf2_emit_insn (INSN_SIZE);
4316 }
4317
4318 /* Link together opcodes of the same name. */
4319
4320 struct templates
4321 {
4322 aarch64_opcode *opcode;
4323 struct templates *next;
4324 };
4325
4326 typedef struct templates templates;
4327
4328 static templates *
4329 lookup_mnemonic (const char *start, int len)
4330 {
4331 templates *templ = NULL;
4332
4333 templ = hash_find_n (aarch64_ops_hsh, start, len);
4334 return templ;
4335 }
4336
4337 /* Subroutine of md_assemble, responsible for looking up the primary
4338 opcode from the mnemonic the user wrote. STR points to the
4339 beginning of the mnemonic. */
4340
4341 static templates *
4342 opcode_lookup (char **str)
4343 {
4344 char *end, *base;
4345 const aarch64_cond *cond;
4346 char condname[16];
4347 int len;
4348
4349 /* Scan up to the end of the mnemonic, which must end in white space,
4350 '.', or end of string. */
4351 for (base = end = *str; is_part_of_name(*end); end++)
4352 if (*end == '.')
4353 break;
4354
4355 if (end == base)
4356 return 0;
4357
4358 inst.cond = COND_ALWAYS;
4359
4360 /* Handle a possible condition. */
4361 if (end[0] == '.')
4362 {
4363 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4364 if (cond)
4365 {
4366 inst.cond = cond->value;
4367 *str = end + 3;
4368 }
4369 else
4370 {
4371 *str = end;
4372 return 0;
4373 }
4374 }
4375 else
4376 *str = end;
4377
4378 len = end - base;
4379
4380 if (inst.cond == COND_ALWAYS)
4381 {
4382 /* Look for unaffixed mnemonic. */
4383 return lookup_mnemonic (base, len);
4384 }
4385 else if (len <= 13)
4386 {
4387 /* append ".c" to mnemonic if conditional */
4388 memcpy (condname, base, len);
4389 memcpy (condname + len, ".c", 2);
4390 base = condname;
4391 len += 2;
4392 return lookup_mnemonic (base, len);
4393 }
4394
4395 return NULL;
4396 }
4397
4398 /* Internal helper routine converting a vector neon_type_el structure
4399 *VECTYPE to a corresponding operand qualifier. */
4400
4401 static inline aarch64_opnd_qualifier_t
4402 vectype_to_qualifier (const struct neon_type_el *vectype)
4403 {
4404 /* Element size in bytes indexed by neon_el_type. */
4405 const unsigned char ele_size[5]
4406 = {1, 2, 4, 8, 16};
4407
4408 if (!vectype->defined || vectype->type == NT_invtype)
4409 goto vectype_conversion_fail;
4410
4411 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4412
4413 if (vectype->defined & NTA_HASINDEX)
4414 /* Vector element register. */
4415 return AARCH64_OPND_QLF_S_B + vectype->type;
4416 else
4417 {
4418 /* Vector register. */
4419 int reg_size = ele_size[vectype->type] * vectype->width;
4420 unsigned offset;
4421 if (reg_size != 16 && reg_size != 8)
4422 goto vectype_conversion_fail;
4423 /* The conversion is calculated based on the relation of the order of
4424 qualifiers to the vector element size and vector register size. */
4425 offset = (vectype->type == NT_q)
4426 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4427 gas_assert (offset <= 8);
4428 return AARCH64_OPND_QLF_V_8B + offset;
4429 }
4430
4431 vectype_conversion_fail:
4432 first_error (_("bad vector arrangement type"));
4433 return AARCH64_OPND_QLF_NIL;
4434 }
4435
4436 /* Process an optional operand that is found omitted from the assembly line.
4437 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4438 instruction's opcode entry while IDX is the index of this omitted operand.
4439 */
4440
4441 static void
4442 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4443 int idx, aarch64_opnd_info *operand)
4444 {
4445 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4446 gas_assert (optional_operand_p (opcode, idx));
4447 gas_assert (!operand->present);
4448
4449 switch (type)
4450 {
4451 case AARCH64_OPND_Rd:
4452 case AARCH64_OPND_Rn:
4453 case AARCH64_OPND_Rm:
4454 case AARCH64_OPND_Rt:
4455 case AARCH64_OPND_Rt2:
4456 case AARCH64_OPND_Rs:
4457 case AARCH64_OPND_Ra:
4458 case AARCH64_OPND_Rt_SYS:
4459 case AARCH64_OPND_Rd_SP:
4460 case AARCH64_OPND_Rn_SP:
4461 case AARCH64_OPND_Fd:
4462 case AARCH64_OPND_Fn:
4463 case AARCH64_OPND_Fm:
4464 case AARCH64_OPND_Fa:
4465 case AARCH64_OPND_Ft:
4466 case AARCH64_OPND_Ft2:
4467 case AARCH64_OPND_Sd:
4468 case AARCH64_OPND_Sn:
4469 case AARCH64_OPND_Sm:
4470 case AARCH64_OPND_Vd:
4471 case AARCH64_OPND_Vn:
4472 case AARCH64_OPND_Vm:
4473 case AARCH64_OPND_VdD1:
4474 case AARCH64_OPND_VnD1:
4475 operand->reg.regno = default_value;
4476 break;
4477
4478 case AARCH64_OPND_Ed:
4479 case AARCH64_OPND_En:
4480 case AARCH64_OPND_Em:
4481 operand->reglane.regno = default_value;
4482 break;
4483
4484 case AARCH64_OPND_IDX:
4485 case AARCH64_OPND_BIT_NUM:
4486 case AARCH64_OPND_IMMR:
4487 case AARCH64_OPND_IMMS:
4488 case AARCH64_OPND_SHLL_IMM:
4489 case AARCH64_OPND_IMM_VLSL:
4490 case AARCH64_OPND_IMM_VLSR:
4491 case AARCH64_OPND_CCMP_IMM:
4492 case AARCH64_OPND_FBITS:
4493 case AARCH64_OPND_UIMM4:
4494 case AARCH64_OPND_UIMM3_OP1:
4495 case AARCH64_OPND_UIMM3_OP2:
4496 case AARCH64_OPND_IMM:
4497 case AARCH64_OPND_WIDTH:
4498 case AARCH64_OPND_UIMM7:
4499 case AARCH64_OPND_NZCV:
4500 operand->imm.value = default_value;
4501 break;
4502
4503 case AARCH64_OPND_EXCEPTION:
4504 inst.reloc.type = BFD_RELOC_UNUSED;
4505 break;
4506
4507 case AARCH64_OPND_BARRIER_ISB:
4508 operand->barrier = aarch64_barrier_options + default_value;
4509
4510 default:
4511 break;
4512 }
4513 }
4514
4515 /* Process the relocation type for move wide instructions.
4516 Return TRUE on success; otherwise return FALSE. */
4517
4518 static bfd_boolean
4519 process_movw_reloc_info (void)
4520 {
4521 int is32;
4522 unsigned shift;
4523
4524 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4525
4526 if (inst.base.opcode->op == OP_MOVK)
4527 switch (inst.reloc.type)
4528 {
4529 case BFD_RELOC_AARCH64_MOVW_G0_S:
4530 case BFD_RELOC_AARCH64_MOVW_G1_S:
4531 case BFD_RELOC_AARCH64_MOVW_G2_S:
4532 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4533 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4534 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4535 set_syntax_error
4536 (_("the specified relocation type is not allowed for MOVK"));
4537 return FALSE;
4538 default:
4539 break;
4540 }
4541
4542 switch (inst.reloc.type)
4543 {
4544 case BFD_RELOC_AARCH64_MOVW_G0:
4545 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4546 case BFD_RELOC_AARCH64_MOVW_G0_S:
4547 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4548 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4549 shift = 0;
4550 break;
4551 case BFD_RELOC_AARCH64_MOVW_G1:
4552 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4553 case BFD_RELOC_AARCH64_MOVW_G1_S:
4554 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4555 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4556 shift = 16;
4557 break;
4558 case BFD_RELOC_AARCH64_MOVW_G2:
4559 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4560 case BFD_RELOC_AARCH64_MOVW_G2_S:
4561 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4562 if (is32)
4563 {
4564 set_fatal_syntax_error
4565 (_("the specified relocation type is not allowed for 32-bit "
4566 "register"));
4567 return FALSE;
4568 }
4569 shift = 32;
4570 break;
4571 case BFD_RELOC_AARCH64_MOVW_G3:
4572 if (is32)
4573 {
4574 set_fatal_syntax_error
4575 (_("the specified relocation type is not allowed for 32-bit "
4576 "register"));
4577 return FALSE;
4578 }
4579 shift = 48;
4580 break;
4581 default:
4582 /* More cases should be added when more MOVW-related relocation types
4583 are supported in GAS. */
4584 gas_assert (aarch64_gas_internal_fixup_p ());
4585 /* The shift amount should have already been set by the parser. */
4586 return TRUE;
4587 }
4588 inst.base.operands[1].shifter.amount = shift;
4589 return TRUE;
4590 }
4591
4592 /* A primitive log caculator. */
4593
4594 static inline unsigned int
4595 get_logsz (unsigned int size)
4596 {
4597 const unsigned char ls[16] =
4598 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4599 if (size > 16)
4600 {
4601 gas_assert (0);
4602 return -1;
4603 }
4604 gas_assert (ls[size - 1] != (unsigned char)-1);
4605 return ls[size - 1];
4606 }
4607
4608 /* Determine and return the real reloc type code for an instruction
4609 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4610
4611 static inline bfd_reloc_code_real_type
4612 ldst_lo12_determine_real_reloc_type (void)
4613 {
4614 int logsz;
4615 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4616 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4617
4618 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4619 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4620 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4621 BFD_RELOC_AARCH64_LDST128_LO12
4622 };
4623
4624 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4625 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4626
4627 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4628 opd1_qlf =
4629 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4630 1, opd0_qlf, 0);
4631 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4632
4633 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4634 gas_assert (logsz >= 0 && logsz <= 4);
4635
4636 return reloc_ldst_lo12[logsz];
4637 }
4638
4639 /* Check whether a register list REGINFO is valid. The registers must be
4640 numbered in increasing order (modulo 32), in increments of one or two.
4641
4642 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4643 increments of two.
4644
4645 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4646
4647 static bfd_boolean
4648 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4649 {
4650 uint32_t i, nb_regs, prev_regno, incr;
4651
4652 nb_regs = 1 + (reginfo & 0x3);
4653 reginfo >>= 2;
4654 prev_regno = reginfo & 0x1f;
4655 incr = accept_alternate ? 2 : 1;
4656
4657 for (i = 1; i < nb_regs; ++i)
4658 {
4659 uint32_t curr_regno;
4660 reginfo >>= 5;
4661 curr_regno = reginfo & 0x1f;
4662 if (curr_regno != ((prev_regno + incr) & 0x1f))
4663 return FALSE;
4664 prev_regno = curr_regno;
4665 }
4666
4667 return TRUE;
4668 }
4669
4670 /* Generic instruction operand parser. This does no encoding and no
4671 semantic validation; it merely squirrels values away in the inst
4672 structure. Returns TRUE or FALSE depending on whether the
4673 specified grammar matched. */
4674
4675 static bfd_boolean
4676 parse_operands (char *str, const aarch64_opcode *opcode)
4677 {
4678 int i;
4679 char *backtrack_pos = 0;
4680 const enum aarch64_opnd *operands = opcode->operands;
4681
4682 clear_error ();
4683 skip_whitespace (str);
4684
4685 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4686 {
4687 int64_t val;
4688 int isreg32, isregzero;
4689 int comma_skipped_p = 0;
4690 aarch64_reg_type rtype;
4691 struct neon_type_el vectype;
4692 aarch64_opnd_info *info = &inst.base.operands[i];
4693
4694 DEBUG_TRACE ("parse operand %d", i);
4695
4696 /* Assign the operand code. */
4697 info->type = operands[i];
4698
4699 if (optional_operand_p (opcode, i))
4700 {
4701 /* Remember where we are in case we need to backtrack. */
4702 gas_assert (!backtrack_pos);
4703 backtrack_pos = str;
4704 }
4705
4706 /* Expect comma between operands; the backtrack mechanizm will take
4707 care of cases of omitted optional operand. */
4708 if (i > 0 && ! skip_past_char (&str, ','))
4709 {
4710 set_syntax_error (_("comma expected between operands"));
4711 goto failure;
4712 }
4713 else
4714 comma_skipped_p = 1;
4715
4716 switch (operands[i])
4717 {
4718 case AARCH64_OPND_Rd:
4719 case AARCH64_OPND_Rn:
4720 case AARCH64_OPND_Rm:
4721 case AARCH64_OPND_Rt:
4722 case AARCH64_OPND_Rt2:
4723 case AARCH64_OPND_Rs:
4724 case AARCH64_OPND_Ra:
4725 case AARCH64_OPND_Rt_SYS:
4726 case AARCH64_OPND_PAIRREG:
4727 po_int_reg_or_fail (1, 0);
4728 break;
4729
4730 case AARCH64_OPND_Rd_SP:
4731 case AARCH64_OPND_Rn_SP:
4732 po_int_reg_or_fail (0, 1);
4733 break;
4734
4735 case AARCH64_OPND_Rm_EXT:
4736 case AARCH64_OPND_Rm_SFT:
4737 po_misc_or_fail (parse_shifter_operand
4738 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4739 ? SHIFTED_ARITH_IMM
4740 : SHIFTED_LOGIC_IMM)));
4741 if (!info->shifter.operator_present)
4742 {
4743 /* Default to LSL if not present. Libopcodes prefers shifter
4744 kind to be explicit. */
4745 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4746 info->shifter.kind = AARCH64_MOD_LSL;
4747 /* For Rm_EXT, libopcodes will carry out further check on whether
4748 or not stack pointer is used in the instruction (Recall that
4749 "the extend operator is not optional unless at least one of
4750 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4751 }
4752 break;
4753
4754 case AARCH64_OPND_Fd:
4755 case AARCH64_OPND_Fn:
4756 case AARCH64_OPND_Fm:
4757 case AARCH64_OPND_Fa:
4758 case AARCH64_OPND_Ft:
4759 case AARCH64_OPND_Ft2:
4760 case AARCH64_OPND_Sd:
4761 case AARCH64_OPND_Sn:
4762 case AARCH64_OPND_Sm:
4763 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4764 if (val == PARSE_FAIL)
4765 {
4766 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4767 goto failure;
4768 }
4769 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4770
4771 info->reg.regno = val;
4772 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4773 break;
4774
4775 case AARCH64_OPND_Vd:
4776 case AARCH64_OPND_Vn:
4777 case AARCH64_OPND_Vm:
4778 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4779 if (val == PARSE_FAIL)
4780 {
4781 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4782 goto failure;
4783 }
4784 if (vectype.defined & NTA_HASINDEX)
4785 goto failure;
4786
4787 info->reg.regno = val;
4788 info->qualifier = vectype_to_qualifier (&vectype);
4789 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4790 goto failure;
4791 break;
4792
4793 case AARCH64_OPND_VdD1:
4794 case AARCH64_OPND_VnD1:
4795 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4796 if (val == PARSE_FAIL)
4797 {
4798 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4799 goto failure;
4800 }
4801 if (vectype.type != NT_d || vectype.index != 1)
4802 {
4803 set_fatal_syntax_error
4804 (_("the top half of a 128-bit FP/SIMD register is expected"));
4805 goto failure;
4806 }
4807 info->reg.regno = val;
4808 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4809 here; it is correct for the purpose of encoding/decoding since
4810 only the register number is explicitly encoded in the related
4811 instructions, although this appears a bit hacky. */
4812 info->qualifier = AARCH64_OPND_QLF_S_D;
4813 break;
4814
4815 case AARCH64_OPND_Ed:
4816 case AARCH64_OPND_En:
4817 case AARCH64_OPND_Em:
4818 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4819 if (val == PARSE_FAIL)
4820 {
4821 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4822 goto failure;
4823 }
4824 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4825 goto failure;
4826
4827 info->reglane.regno = val;
4828 info->reglane.index = vectype.index;
4829 info->qualifier = vectype_to_qualifier (&vectype);
4830 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4831 goto failure;
4832 break;
4833
4834 case AARCH64_OPND_LVn:
4835 case AARCH64_OPND_LVt:
4836 case AARCH64_OPND_LVt_AL:
4837 case AARCH64_OPND_LEt:
4838 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4839 goto failure;
4840 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4841 {
4842 set_fatal_syntax_error (_("invalid register list"));
4843 goto failure;
4844 }
4845 info->reglist.first_regno = (val >> 2) & 0x1f;
4846 info->reglist.num_regs = (val & 0x3) + 1;
4847 if (operands[i] == AARCH64_OPND_LEt)
4848 {
4849 if (!(vectype.defined & NTA_HASINDEX))
4850 goto failure;
4851 info->reglist.has_index = 1;
4852 info->reglist.index = vectype.index;
4853 }
4854 else if (!(vectype.defined & NTA_HASTYPE))
4855 goto failure;
4856 info->qualifier = vectype_to_qualifier (&vectype);
4857 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4858 goto failure;
4859 break;
4860
4861 case AARCH64_OPND_Cn:
4862 case AARCH64_OPND_Cm:
4863 po_reg_or_fail (REG_TYPE_CN);
4864 if (val > 15)
4865 {
4866 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4867 goto failure;
4868 }
4869 inst.base.operands[i].reg.regno = val;
4870 break;
4871
4872 case AARCH64_OPND_SHLL_IMM:
4873 case AARCH64_OPND_IMM_VLSR:
4874 po_imm_or_fail (1, 64);
4875 info->imm.value = val;
4876 break;
4877
4878 case AARCH64_OPND_CCMP_IMM:
4879 case AARCH64_OPND_FBITS:
4880 case AARCH64_OPND_UIMM4:
4881 case AARCH64_OPND_UIMM3_OP1:
4882 case AARCH64_OPND_UIMM3_OP2:
4883 case AARCH64_OPND_IMM_VLSL:
4884 case AARCH64_OPND_IMM:
4885 case AARCH64_OPND_WIDTH:
4886 po_imm_nc_or_fail ();
4887 info->imm.value = val;
4888 break;
4889
4890 case AARCH64_OPND_UIMM7:
4891 po_imm_or_fail (0, 127);
4892 info->imm.value = val;
4893 break;
4894
4895 case AARCH64_OPND_IDX:
4896 case AARCH64_OPND_BIT_NUM:
4897 case AARCH64_OPND_IMMR:
4898 case AARCH64_OPND_IMMS:
4899 po_imm_or_fail (0, 63);
4900 info->imm.value = val;
4901 break;
4902
4903 case AARCH64_OPND_IMM0:
4904 po_imm_nc_or_fail ();
4905 if (val != 0)
4906 {
4907 set_fatal_syntax_error (_("immediate zero expected"));
4908 goto failure;
4909 }
4910 info->imm.value = 0;
4911 break;
4912
4913 case AARCH64_OPND_FPIMM0:
4914 {
4915 int qfloat;
4916 bfd_boolean res1 = FALSE, res2 = FALSE;
4917 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4918 it is probably not worth the effort to support it. */
4919 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4920 && !(res2 = parse_constant_immediate (&str, &val)))
4921 goto failure;
4922 if ((res1 && qfloat == 0) || (res2 && val == 0))
4923 {
4924 info->imm.value = 0;
4925 info->imm.is_fp = 1;
4926 break;
4927 }
4928 set_fatal_syntax_error (_("immediate zero expected"));
4929 goto failure;
4930 }
4931
4932 case AARCH64_OPND_IMM_MOV:
4933 {
4934 char *saved = str;
4935 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4936 reg_name_p (str, REG_TYPE_VN))
4937 goto failure;
4938 str = saved;
4939 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4940 GE_OPT_PREFIX, 1));
4941 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4942 later. fix_mov_imm_insn will try to determine a machine
4943 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4944 message if the immediate cannot be moved by a single
4945 instruction. */
4946 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4947 inst.base.operands[i].skip = 1;
4948 }
4949 break;
4950
4951 case AARCH64_OPND_SIMD_IMM:
4952 case AARCH64_OPND_SIMD_IMM_SFT:
4953 if (! parse_big_immediate (&str, &val))
4954 goto failure;
4955 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4956 /* addr_off_p */ 0,
4957 /* need_libopcodes_p */ 1,
4958 /* skip_p */ 1);
4959 /* Parse shift.
4960 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4961 shift, we don't check it here; we leave the checking to
4962 the libopcodes (operand_general_constraint_met_p). By
4963 doing this, we achieve better diagnostics. */
4964 if (skip_past_comma (&str)
4965 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4966 goto failure;
4967 if (!info->shifter.operator_present
4968 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4969 {
4970 /* Default to LSL if not present. Libopcodes prefers shifter
4971 kind to be explicit. */
4972 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4973 info->shifter.kind = AARCH64_MOD_LSL;
4974 }
4975 break;
4976
4977 case AARCH64_OPND_FPIMM:
4978 case AARCH64_OPND_SIMD_FPIMM:
4979 {
4980 int qfloat;
4981 bfd_boolean dp_p
4982 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4983 == 8);
4984 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4985 goto failure;
4986 if (qfloat == 0)
4987 {
4988 set_fatal_syntax_error (_("invalid floating-point constant"));
4989 goto failure;
4990 }
4991 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4992 inst.base.operands[i].imm.is_fp = 1;
4993 }
4994 break;
4995
4996 case AARCH64_OPND_LIMM:
4997 po_misc_or_fail (parse_shifter_operand (&str, info,
4998 SHIFTED_LOGIC_IMM));
4999 if (info->shifter.operator_present)
5000 {
5001 set_fatal_syntax_error
5002 (_("shift not allowed for bitmask immediate"));
5003 goto failure;
5004 }
5005 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5006 /* addr_off_p */ 0,
5007 /* need_libopcodes_p */ 1,
5008 /* skip_p */ 1);
5009 break;
5010
5011 case AARCH64_OPND_AIMM:
5012 if (opcode->op == OP_ADD)
5013 /* ADD may have relocation types. */
5014 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5015 SHIFTED_ARITH_IMM));
5016 else
5017 po_misc_or_fail (parse_shifter_operand (&str, info,
5018 SHIFTED_ARITH_IMM));
5019 switch (inst.reloc.type)
5020 {
5021 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5022 info->shifter.amount = 12;
5023 break;
5024 case BFD_RELOC_UNUSED:
5025 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5026 if (info->shifter.kind != AARCH64_MOD_NONE)
5027 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5028 inst.reloc.pc_rel = 0;
5029 break;
5030 default:
5031 break;
5032 }
5033 info->imm.value = 0;
5034 if (!info->shifter.operator_present)
5035 {
5036 /* Default to LSL if not present. Libopcodes prefers shifter
5037 kind to be explicit. */
5038 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5039 info->shifter.kind = AARCH64_MOD_LSL;
5040 }
5041 break;
5042
5043 case AARCH64_OPND_HALF:
5044 {
5045 /* #<imm16> or relocation. */
5046 int internal_fixup_p;
5047 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5048 if (internal_fixup_p)
5049 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5050 skip_whitespace (str);
5051 if (skip_past_comma (&str))
5052 {
5053 /* {, LSL #<shift>} */
5054 if (! aarch64_gas_internal_fixup_p ())
5055 {
5056 set_fatal_syntax_error (_("can't mix relocation modifier "
5057 "with explicit shift"));
5058 goto failure;
5059 }
5060 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5061 }
5062 else
5063 inst.base.operands[i].shifter.amount = 0;
5064 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5065 inst.base.operands[i].imm.value = 0;
5066 if (! process_movw_reloc_info ())
5067 goto failure;
5068 }
5069 break;
5070
5071 case AARCH64_OPND_EXCEPTION:
5072 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5073 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5074 /* addr_off_p */ 0,
5075 /* need_libopcodes_p */ 0,
5076 /* skip_p */ 1);
5077 break;
5078
5079 case AARCH64_OPND_NZCV:
5080 {
5081 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5082 if (nzcv != NULL)
5083 {
5084 str += 4;
5085 info->imm.value = nzcv->value;
5086 break;
5087 }
5088 po_imm_or_fail (0, 15);
5089 info->imm.value = val;
5090 }
5091 break;
5092
5093 case AARCH64_OPND_COND:
5094 case AARCH64_OPND_COND1:
5095 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5096 str += 2;
5097 if (info->cond == NULL)
5098 {
5099 set_syntax_error (_("invalid condition"));
5100 goto failure;
5101 }
5102 else if (operands[i] == AARCH64_OPND_COND1
5103 && (info->cond->value & 0xe) == 0xe)
5104 {
5105 /* Not allow AL or NV. */
5106 set_default_error ();
5107 goto failure;
5108 }
5109 break;
5110
5111 case AARCH64_OPND_ADDR_ADRP:
5112 po_misc_or_fail (parse_adrp (&str));
5113 /* Clear the value as operand needs to be relocated. */
5114 info->imm.value = 0;
5115 break;
5116
5117 case AARCH64_OPND_ADDR_PCREL14:
5118 case AARCH64_OPND_ADDR_PCREL19:
5119 case AARCH64_OPND_ADDR_PCREL21:
5120 case AARCH64_OPND_ADDR_PCREL26:
5121 po_misc_or_fail (parse_address_reloc (&str, info));
5122 if (!info->addr.pcrel)
5123 {
5124 set_syntax_error (_("invalid pc-relative address"));
5125 goto failure;
5126 }
5127 if (inst.gen_lit_pool
5128 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5129 {
5130 /* Only permit "=value" in the literal load instructions.
5131 The literal will be generated by programmer_friendly_fixup. */
5132 set_syntax_error (_("invalid use of \"=immediate\""));
5133 goto failure;
5134 }
5135 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5136 {
5137 set_syntax_error (_("unrecognized relocation suffix"));
5138 goto failure;
5139 }
5140 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5141 {
5142 info->imm.value = inst.reloc.exp.X_add_number;
5143 inst.reloc.type = BFD_RELOC_UNUSED;
5144 }
5145 else
5146 {
5147 info->imm.value = 0;
5148 if (inst.reloc.type == BFD_RELOC_UNUSED)
5149 switch (opcode->iclass)
5150 {
5151 case compbranch:
5152 case condbranch:
5153 /* e.g. CBZ or B.COND */
5154 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5155 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5156 break;
5157 case testbranch:
5158 /* e.g. TBZ */
5159 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5160 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5161 break;
5162 case branch_imm:
5163 /* e.g. B or BL */
5164 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5165 inst.reloc.type =
5166 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5167 : BFD_RELOC_AARCH64_JUMP26;
5168 break;
5169 case loadlit:
5170 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5171 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5172 break;
5173 case pcreladdr:
5174 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5175 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5176 break;
5177 default:
5178 gas_assert (0);
5179 abort ();
5180 }
5181 inst.reloc.pc_rel = 1;
5182 }
5183 break;
5184
5185 case AARCH64_OPND_ADDR_SIMPLE:
5186 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5187 /* [<Xn|SP>{, #<simm>}] */
5188 po_char_or_fail ('[');
5189 po_reg_or_fail (REG_TYPE_R64_SP);
5190 /* Accept optional ", #0". */
5191 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5192 && skip_past_char (&str, ','))
5193 {
5194 skip_past_char (&str, '#');
5195 if (! skip_past_char (&str, '0'))
5196 {
5197 set_fatal_syntax_error
5198 (_("the optional immediate offset can only be 0"));
5199 goto failure;
5200 }
5201 }
5202 po_char_or_fail (']');
5203 info->addr.base_regno = val;
5204 break;
5205
5206 case AARCH64_OPND_ADDR_REGOFF:
5207 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5208 po_misc_or_fail (parse_address (&str, info, 0));
5209 if (info->addr.pcrel || !info->addr.offset.is_reg
5210 || !info->addr.preind || info->addr.postind
5211 || info->addr.writeback)
5212 {
5213 set_syntax_error (_("invalid addressing mode"));
5214 goto failure;
5215 }
5216 if (!info->shifter.operator_present)
5217 {
5218 /* Default to LSL if not present. Libopcodes prefers shifter
5219 kind to be explicit. */
5220 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5221 info->shifter.kind = AARCH64_MOD_LSL;
5222 }
5223 /* Qualifier to be deduced by libopcodes. */
5224 break;
5225
5226 case AARCH64_OPND_ADDR_SIMM7:
5227 po_misc_or_fail (parse_address (&str, info, 0));
5228 if (info->addr.pcrel || info->addr.offset.is_reg
5229 || (!info->addr.preind && !info->addr.postind))
5230 {
5231 set_syntax_error (_("invalid addressing mode"));
5232 goto failure;
5233 }
5234 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5235 /* addr_off_p */ 1,
5236 /* need_libopcodes_p */ 1,
5237 /* skip_p */ 0);
5238 break;
5239
5240 case AARCH64_OPND_ADDR_SIMM9:
5241 case AARCH64_OPND_ADDR_SIMM9_2:
5242 po_misc_or_fail (parse_address_reloc (&str, info));
5243 if (info->addr.pcrel || info->addr.offset.is_reg
5244 || (!info->addr.preind && !info->addr.postind)
5245 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5246 && info->addr.writeback))
5247 {
5248 set_syntax_error (_("invalid addressing mode"));
5249 goto failure;
5250 }
5251 if (inst.reloc.type != BFD_RELOC_UNUSED)
5252 {
5253 set_syntax_error (_("relocation not allowed"));
5254 goto failure;
5255 }
5256 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5257 /* addr_off_p */ 1,
5258 /* need_libopcodes_p */ 1,
5259 /* skip_p */ 0);
5260 break;
5261
5262 case AARCH64_OPND_ADDR_UIMM12:
5263 po_misc_or_fail (parse_address_reloc (&str, info));
5264 if (info->addr.pcrel || info->addr.offset.is_reg
5265 || !info->addr.preind || info->addr.writeback)
5266 {
5267 set_syntax_error (_("invalid addressing mode"));
5268 goto failure;
5269 }
5270 if (inst.reloc.type == BFD_RELOC_UNUSED)
5271 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5272 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5273 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5274 /* Leave qualifier to be determined by libopcodes. */
5275 break;
5276
5277 case AARCH64_OPND_SIMD_ADDR_POST:
5278 /* [<Xn|SP>], <Xm|#<amount>> */
5279 po_misc_or_fail (parse_address (&str, info, 1));
5280 if (!info->addr.postind || !info->addr.writeback)
5281 {
5282 set_syntax_error (_("invalid addressing mode"));
5283 goto failure;
5284 }
5285 if (!info->addr.offset.is_reg)
5286 {
5287 if (inst.reloc.exp.X_op == O_constant)
5288 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5289 else
5290 {
5291 set_fatal_syntax_error
5292 (_("writeback value should be an immediate constant"));
5293 goto failure;
5294 }
5295 }
5296 /* No qualifier. */
5297 break;
5298
5299 case AARCH64_OPND_SYSREG:
5300 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5301 == PARSE_FAIL)
5302 {
5303 set_syntax_error (_("unknown or missing system register name"));
5304 goto failure;
5305 }
5306 inst.base.operands[i].sysreg = val;
5307 break;
5308
5309 case AARCH64_OPND_PSTATEFIELD:
5310 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5311 == PARSE_FAIL)
5312 {
5313 set_syntax_error (_("unknown or missing PSTATE field name"));
5314 goto failure;
5315 }
5316 inst.base.operands[i].pstatefield = val;
5317 break;
5318
5319 case AARCH64_OPND_SYSREG_IC:
5320 inst.base.operands[i].sysins_op =
5321 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5322 goto sys_reg_ins;
5323 case AARCH64_OPND_SYSREG_DC:
5324 inst.base.operands[i].sysins_op =
5325 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5326 goto sys_reg_ins;
5327 case AARCH64_OPND_SYSREG_AT:
5328 inst.base.operands[i].sysins_op =
5329 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5330 goto sys_reg_ins;
5331 case AARCH64_OPND_SYSREG_TLBI:
5332 inst.base.operands[i].sysins_op =
5333 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5334 sys_reg_ins:
5335 if (inst.base.operands[i].sysins_op == NULL)
5336 {
5337 set_fatal_syntax_error ( _("unknown or missing operation name"));
5338 goto failure;
5339 }
5340 break;
5341
5342 case AARCH64_OPND_BARRIER:
5343 case AARCH64_OPND_BARRIER_ISB:
5344 val = parse_barrier (&str);
5345 if (val != PARSE_FAIL
5346 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5347 {
5348 /* ISB only accepts options name 'sy'. */
5349 set_syntax_error
5350 (_("the specified option is not accepted in ISB"));
5351 /* Turn off backtrack as this optional operand is present. */
5352 backtrack_pos = 0;
5353 goto failure;
5354 }
5355 /* This is an extension to accept a 0..15 immediate. */
5356 if (val == PARSE_FAIL)
5357 po_imm_or_fail (0, 15);
5358 info->barrier = aarch64_barrier_options + val;
5359 break;
5360
5361 case AARCH64_OPND_PRFOP:
5362 val = parse_pldop (&str);
5363 /* This is an extension to accept a 0..31 immediate. */
5364 if (val == PARSE_FAIL)
5365 po_imm_or_fail (0, 31);
5366 inst.base.operands[i].prfop = aarch64_prfops + val;
5367 break;
5368
5369 default:
5370 as_fatal (_("unhandled operand code %d"), operands[i]);
5371 }
5372
5373 /* If we get here, this operand was successfully parsed. */
5374 inst.base.operands[i].present = 1;
5375 continue;
5376
5377 failure:
5378 /* The parse routine should already have set the error, but in case
5379 not, set a default one here. */
5380 if (! error_p ())
5381 set_default_error ();
5382
5383 if (! backtrack_pos)
5384 goto parse_operands_return;
5385
5386 {
5387 /* We reach here because this operand is marked as optional, and
5388 either no operand was supplied or the operand was supplied but it
5389 was syntactically incorrect. In the latter case we report an
5390 error. In the former case we perform a few more checks before
5391 dropping through to the code to insert the default operand. */
5392
5393 char *tmp = backtrack_pos;
5394 char endchar = END_OF_INSN;
5395
5396 if (i != (aarch64_num_of_operands (opcode) - 1))
5397 endchar = ',';
5398 skip_past_char (&tmp, ',');
5399
5400 if (*tmp != endchar)
5401 /* The user has supplied an operand in the wrong format. */
5402 goto parse_operands_return;
5403
5404 /* Make sure there is not a comma before the optional operand.
5405 For example the fifth operand of 'sys' is optional:
5406
5407 sys #0,c0,c0,#0, <--- wrong
5408 sys #0,c0,c0,#0 <--- correct. */
5409 if (comma_skipped_p && i && endchar == END_OF_INSN)
5410 {
5411 set_fatal_syntax_error
5412 (_("unexpected comma before the omitted optional operand"));
5413 goto parse_operands_return;
5414 }
5415 }
5416
5417 /* Reaching here means we are dealing with an optional operand that is
5418 omitted from the assembly line. */
5419 gas_assert (optional_operand_p (opcode, i));
5420 info->present = 0;
5421 process_omitted_operand (operands[i], opcode, i, info);
5422
5423 /* Try again, skipping the optional operand at backtrack_pos. */
5424 str = backtrack_pos;
5425 backtrack_pos = 0;
5426
5427 /* Clear any error record after the omitted optional operand has been
5428 successfully handled. */
5429 clear_error ();
5430 }
5431
5432 /* Check if we have parsed all the operands. */
5433 if (*str != '\0' && ! error_p ())
5434 {
5435 /* Set I to the index of the last present operand; this is
5436 for the purpose of diagnostics. */
5437 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5438 ;
5439 set_fatal_syntax_error
5440 (_("unexpected characters following instruction"));
5441 }
5442
5443 parse_operands_return:
5444
5445 if (error_p ())
5446 {
5447 DEBUG_TRACE ("parsing FAIL: %s - %s",
5448 operand_mismatch_kind_names[get_error_kind ()],
5449 get_error_message ());
5450 /* Record the operand error properly; this is useful when there
5451 are multiple instruction templates for a mnemonic name, so that
5452 later on, we can select the error that most closely describes
5453 the problem. */
5454 record_operand_error (opcode, i, get_error_kind (),
5455 get_error_message ());
5456 return FALSE;
5457 }
5458 else
5459 {
5460 DEBUG_TRACE ("parsing SUCCESS");
5461 return TRUE;
5462 }
5463 }
5464
5465 /* It does some fix-up to provide some programmer friendly feature while
5466 keeping the libopcodes happy, i.e. libopcodes only accepts
5467 the preferred architectural syntax.
5468 Return FALSE if there is any failure; otherwise return TRUE. */
5469
5470 static bfd_boolean
5471 programmer_friendly_fixup (aarch64_instruction *instr)
5472 {
5473 aarch64_inst *base = &instr->base;
5474 const aarch64_opcode *opcode = base->opcode;
5475 enum aarch64_op op = opcode->op;
5476 aarch64_opnd_info *operands = base->operands;
5477
5478 DEBUG_TRACE ("enter");
5479
5480 switch (opcode->iclass)
5481 {
5482 case testbranch:
5483 /* TBNZ Xn|Wn, #uimm6, label
5484 Test and Branch Not Zero: conditionally jumps to label if bit number
5485 uimm6 in register Xn is not zero. The bit number implies the width of
5486 the register, which may be written and should be disassembled as Wn if
5487 uimm is less than 32. */
5488 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5489 {
5490 if (operands[1].imm.value >= 32)
5491 {
5492 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5493 0, 31);
5494 return FALSE;
5495 }
5496 operands[0].qualifier = AARCH64_OPND_QLF_X;
5497 }
5498 break;
5499 case loadlit:
5500 /* LDR Wt, label | =value
5501 As a convenience assemblers will typically permit the notation
5502 "=value" in conjunction with the pc-relative literal load instructions
5503 to automatically place an immediate value or symbolic address in a
5504 nearby literal pool and generate a hidden label which references it.
5505 ISREG has been set to 0 in the case of =value. */
5506 if (instr->gen_lit_pool
5507 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5508 {
5509 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5510 if (op == OP_LDRSW_LIT)
5511 size = 4;
5512 if (instr->reloc.exp.X_op != O_constant
5513 && instr->reloc.exp.X_op != O_big
5514 && instr->reloc.exp.X_op != O_symbol)
5515 {
5516 record_operand_error (opcode, 1,
5517 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5518 _("constant expression expected"));
5519 return FALSE;
5520 }
5521 if (! add_to_lit_pool (&instr->reloc.exp, size))
5522 {
5523 record_operand_error (opcode, 1,
5524 AARCH64_OPDE_OTHER_ERROR,
5525 _("literal pool insertion failed"));
5526 return FALSE;
5527 }
5528 }
5529 break;
5530 case log_shift:
5531 case bitfield:
5532 /* UXT[BHW] Wd, Wn
5533 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5534 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5535 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5536 A programmer-friendly assembler should accept a destination Xd in
5537 place of Wd, however that is not the preferred form for disassembly.
5538 */
5539 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5540 && operands[1].qualifier == AARCH64_OPND_QLF_W
5541 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5542 operands[0].qualifier = AARCH64_OPND_QLF_W;
5543 break;
5544
5545 case addsub_ext:
5546 {
5547 /* In the 64-bit form, the final register operand is written as Wm
5548 for all but the (possibly omitted) UXTX/LSL and SXTX
5549 operators.
5550 As a programmer-friendly assembler, we accept e.g.
5551 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5552 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5553 int idx = aarch64_operand_index (opcode->operands,
5554 AARCH64_OPND_Rm_EXT);
5555 gas_assert (idx == 1 || idx == 2);
5556 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5557 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5558 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5559 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5560 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5561 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5562 }
5563 break;
5564
5565 default:
5566 break;
5567 }
5568
5569 DEBUG_TRACE ("exit with SUCCESS");
5570 return TRUE;
5571 }
5572
5573 /* Check for loads and stores that will cause unpredictable behavior. */
5574
5575 static void
5576 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5577 {
5578 aarch64_inst *base = &instr->base;
5579 const aarch64_opcode *opcode = base->opcode;
5580 const aarch64_opnd_info *opnds = base->operands;
5581 switch (opcode->iclass)
5582 {
5583 case ldst_pos:
5584 case ldst_imm9:
5585 case ldst_unscaled:
5586 case ldst_unpriv:
5587 /* Loading/storing the base register is unpredictable if writeback. */
5588 if ((aarch64_get_operand_class (opnds[0].type)
5589 == AARCH64_OPND_CLASS_INT_REG)
5590 && opnds[0].reg.regno == opnds[1].addr.base_regno
5591 && opnds[1].addr.base_regno != REG_SP
5592 && opnds[1].addr.writeback)
5593 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5594 break;
5595 case ldstpair_off:
5596 case ldstnapair_offs:
5597 case ldstpair_indexed:
5598 /* Loading/storing the base register is unpredictable if writeback. */
5599 if ((aarch64_get_operand_class (opnds[0].type)
5600 == AARCH64_OPND_CLASS_INT_REG)
5601 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5602 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5603 && opnds[2].addr.base_regno != REG_SP
5604 && opnds[2].addr.writeback)
5605 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5606 /* Load operations must load different registers. */
5607 if ((opcode->opcode & (1 << 22))
5608 && opnds[0].reg.regno == opnds[1].reg.regno)
5609 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5610 break;
5611 default:
5612 break;
5613 }
5614 }
5615
5616 /* A wrapper function to interface with libopcodes on encoding and
5617 record the error message if there is any.
5618
5619 Return TRUE on success; otherwise return FALSE. */
5620
5621 static bfd_boolean
5622 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5623 aarch64_insn *code)
5624 {
5625 aarch64_operand_error error_info;
5626 error_info.kind = AARCH64_OPDE_NIL;
5627 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5628 return TRUE;
5629 else
5630 {
5631 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5632 record_operand_error_info (opcode, &error_info);
5633 return FALSE;
5634 }
5635 }
5636
5637 #ifdef DEBUG_AARCH64
5638 static inline void
5639 dump_opcode_operands (const aarch64_opcode *opcode)
5640 {
5641 int i = 0;
5642 while (opcode->operands[i] != AARCH64_OPND_NIL)
5643 {
5644 aarch64_verbose ("\t\t opnd%d: %s", i,
5645 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5646 ? aarch64_get_operand_name (opcode->operands[i])
5647 : aarch64_get_operand_desc (opcode->operands[i]));
5648 ++i;
5649 }
5650 }
5651 #endif /* DEBUG_AARCH64 */
5652
5653 /* This is the guts of the machine-dependent assembler. STR points to a
5654 machine dependent instruction. This function is supposed to emit
5655 the frags/bytes it assembles to. */
5656
5657 void
5658 md_assemble (char *str)
5659 {
5660 char *p = str;
5661 templates *template;
5662 aarch64_opcode *opcode;
5663 aarch64_inst *inst_base;
5664 unsigned saved_cond;
5665
5666 /* Align the previous label if needed. */
5667 if (last_label_seen != NULL)
5668 {
5669 symbol_set_frag (last_label_seen, frag_now);
5670 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5671 S_SET_SEGMENT (last_label_seen, now_seg);
5672 }
5673
5674 inst.reloc.type = BFD_RELOC_UNUSED;
5675
5676 DEBUG_TRACE ("\n\n");
5677 DEBUG_TRACE ("==============================");
5678 DEBUG_TRACE ("Enter md_assemble with %s", str);
5679
5680 template = opcode_lookup (&p);
5681 if (!template)
5682 {
5683 /* It wasn't an instruction, but it might be a register alias of
5684 the form alias .req reg directive. */
5685 if (!create_register_alias (str, p))
5686 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5687 str);
5688 return;
5689 }
5690
5691 skip_whitespace (p);
5692 if (*p == ',')
5693 {
5694 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5695 get_mnemonic_name (str), str);
5696 return;
5697 }
5698
5699 init_operand_error_report ();
5700
5701 /* Sections are assumed to start aligned. In executable section, there is no
5702 MAP_DATA symbol pending. So we only align the address during
5703 MAP_DATA --> MAP_INSN transition.
5704 For other sections, this is not guaranteed. */
5705 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
5706 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
5707 frag_align_code (2, 0);
5708
5709 saved_cond = inst.cond;
5710 reset_aarch64_instruction (&inst);
5711 inst.cond = saved_cond;
5712
5713 /* Iterate through all opcode entries with the same mnemonic name. */
5714 do
5715 {
5716 opcode = template->opcode;
5717
5718 DEBUG_TRACE ("opcode %s found", opcode->name);
5719 #ifdef DEBUG_AARCH64
5720 if (debug_dump)
5721 dump_opcode_operands (opcode);
5722 #endif /* DEBUG_AARCH64 */
5723
5724 mapping_state (MAP_INSN);
5725
5726 inst_base = &inst.base;
5727 inst_base->opcode = opcode;
5728
5729 /* Truly conditionally executed instructions, e.g. b.cond. */
5730 if (opcode->flags & F_COND)
5731 {
5732 gas_assert (inst.cond != COND_ALWAYS);
5733 inst_base->cond = get_cond_from_value (inst.cond);
5734 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5735 }
5736 else if (inst.cond != COND_ALWAYS)
5737 {
5738 /* It shouldn't arrive here, where the assembly looks like a
5739 conditional instruction but the found opcode is unconditional. */
5740 gas_assert (0);
5741 continue;
5742 }
5743
5744 if (parse_operands (p, opcode)
5745 && programmer_friendly_fixup (&inst)
5746 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5747 {
5748 /* Check that this instruction is supported for this CPU. */
5749 if (!opcode->avariant
5750 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5751 {
5752 as_bad (_("selected processor does not support `%s'"), str);
5753 return;
5754 }
5755
5756 warn_unpredictable_ldst (&inst, str);
5757
5758 if (inst.reloc.type == BFD_RELOC_UNUSED
5759 || !inst.reloc.need_libopcodes_p)
5760 output_inst (NULL);
5761 else
5762 {
5763 /* If there is relocation generated for the instruction,
5764 store the instruction information for the future fix-up. */
5765 struct aarch64_inst *copy;
5766 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5767 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5768 abort ();
5769 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5770 output_inst (copy);
5771 }
5772 return;
5773 }
5774
5775 template = template->next;
5776 if (template != NULL)
5777 {
5778 reset_aarch64_instruction (&inst);
5779 inst.cond = saved_cond;
5780 }
5781 }
5782 while (template != NULL);
5783
5784 /* Issue the error messages if any. */
5785 output_operand_error_report (str);
5786 }
5787
5788 /* Various frobbings of labels and their addresses. */
5789
5790 void
5791 aarch64_start_line_hook (void)
5792 {
5793 last_label_seen = NULL;
5794 }
5795
5796 void
5797 aarch64_frob_label (symbolS * sym)
5798 {
5799 last_label_seen = sym;
5800
5801 dwarf2_emit_label (sym);
5802 }
5803
5804 int
5805 aarch64_data_in_code (void)
5806 {
5807 if (!strncmp (input_line_pointer + 1, "data:", 5))
5808 {
5809 *input_line_pointer = '/';
5810 input_line_pointer += 5;
5811 *input_line_pointer = 0;
5812 return 1;
5813 }
5814
5815 return 0;
5816 }
5817
5818 char *
5819 aarch64_canonicalize_symbol_name (char *name)
5820 {
5821 int len;
5822
5823 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5824 *(name + len - 5) = 0;
5825
5826 return name;
5827 }
5828 \f
5829 /* Table of all register names defined by default. The user can
5830 define additional names with .req. Note that all register names
5831 should appear in both upper and lowercase variants. Some registers
5832 also have mixed-case names. */
5833
5834 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5835 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5836 #define REGSET31(p,t) \
5837 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5838 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5839 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5840 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5841 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5842 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5843 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5844 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5845 #define REGSET(p,t) \
5846 REGSET31(p,t), REGNUM(p,31,t)
5847
5848 /* These go into aarch64_reg_hsh hash-table. */
5849 static const reg_entry reg_names[] = {
5850 /* Integer registers. */
5851 REGSET31 (x, R_64), REGSET31 (X, R_64),
5852 REGSET31 (w, R_32), REGSET31 (W, R_32),
5853
5854 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5855 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5856
5857 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5858 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5859
5860 /* Coprocessor register numbers. */
5861 REGSET (c, CN), REGSET (C, CN),
5862
5863 /* Floating-point single precision registers. */
5864 REGSET (s, FP_S), REGSET (S, FP_S),
5865
5866 /* Floating-point double precision registers. */
5867 REGSET (d, FP_D), REGSET (D, FP_D),
5868
5869 /* Floating-point half precision registers. */
5870 REGSET (h, FP_H), REGSET (H, FP_H),
5871
5872 /* Floating-point byte precision registers. */
5873 REGSET (b, FP_B), REGSET (B, FP_B),
5874
5875 /* Floating-point quad precision registers. */
5876 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5877
5878 /* FP/SIMD registers. */
5879 REGSET (v, VN), REGSET (V, VN),
5880 };
5881
5882 #undef REGDEF
5883 #undef REGNUM
5884 #undef REGSET
5885
5886 #define N 1
5887 #define n 0
5888 #define Z 1
5889 #define z 0
5890 #define C 1
5891 #define c 0
5892 #define V 1
5893 #define v 0
5894 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5895 static const asm_nzcv nzcv_names[] = {
5896 {"nzcv", B (n, z, c, v)},
5897 {"nzcV", B (n, z, c, V)},
5898 {"nzCv", B (n, z, C, v)},
5899 {"nzCV", B (n, z, C, V)},
5900 {"nZcv", B (n, Z, c, v)},
5901 {"nZcV", B (n, Z, c, V)},
5902 {"nZCv", B (n, Z, C, v)},
5903 {"nZCV", B (n, Z, C, V)},
5904 {"Nzcv", B (N, z, c, v)},
5905 {"NzcV", B (N, z, c, V)},
5906 {"NzCv", B (N, z, C, v)},
5907 {"NzCV", B (N, z, C, V)},
5908 {"NZcv", B (N, Z, c, v)},
5909 {"NZcV", B (N, Z, c, V)},
5910 {"NZCv", B (N, Z, C, v)},
5911 {"NZCV", B (N, Z, C, V)}
5912 };
5913
5914 #undef N
5915 #undef n
5916 #undef Z
5917 #undef z
5918 #undef C
5919 #undef c
5920 #undef V
5921 #undef v
5922 #undef B
5923 \f
5924 /* MD interface: bits in the object file. */
5925
5926 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5927 for use in the a.out file, and stores them in the array pointed to by buf.
5928 This knows about the endian-ness of the target machine and does
5929 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5930 2 (short) and 4 (long) Floating numbers are put out as a series of
5931 LITTLENUMS (shorts, here at least). */
5932
5933 void
5934 md_number_to_chars (char *buf, valueT val, int n)
5935 {
5936 if (target_big_endian)
5937 number_to_chars_bigendian (buf, val, n);
5938 else
5939 number_to_chars_littleendian (buf, val, n);
5940 }
5941
5942 /* MD interface: Sections. */
5943
5944 /* Estimate the size of a frag before relaxing. Assume everything fits in
5945 4 bytes. */
5946
5947 int
5948 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5949 {
5950 fragp->fr_var = 4;
5951 return 4;
5952 }
5953
5954 /* Round up a section size to the appropriate boundary. */
5955
5956 valueT
5957 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5958 {
5959 return size;
5960 }
5961
5962 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5963 of an rs_align_code fragment.
5964
5965 Here we fill the frag with the appropriate info for padding the
5966 output stream. The resulting frag will consist of a fixed (fr_fix)
5967 and of a repeating (fr_var) part.
5968
5969 The fixed content is always emitted before the repeating content and
5970 these two parts are used as follows in constructing the output:
5971 - the fixed part will be used to align to a valid instruction word
5972 boundary, in case that we start at a misaligned address; as no
5973 executable instruction can live at the misaligned location, we
5974 simply fill with zeros;
5975 - the variable part will be used to cover the remaining padding and
5976 we fill using the AArch64 NOP instruction.
5977
5978 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
5979 enough storage space for up to 3 bytes for padding the back to a valid
5980 instruction alignment and exactly 4 bytes to store the NOP pattern. */
5981
5982 void
5983 aarch64_handle_align (fragS * fragP)
5984 {
5985 /* NOP = d503201f */
5986 /* AArch64 instructions are always little-endian. */
5987 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5988
5989 int bytes, fix, noop_size;
5990 char *p;
5991
5992 if (fragP->fr_type != rs_align_code)
5993 return;
5994
5995 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5996 p = fragP->fr_literal + fragP->fr_fix;
5997
5998 #ifdef OBJ_ELF
5999 gas_assert (fragP->tc_frag_data.recorded);
6000 #endif
6001
6002 noop_size = sizeof (aarch64_noop);
6003
6004 fix = bytes & (noop_size - 1);
6005 if (fix)
6006 {
6007 #ifdef OBJ_ELF
6008 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6009 #endif
6010 memset (p, 0, fix);
6011 p += fix;
6012 fragP->fr_fix += fix;
6013 }
6014
6015 if (noop_size)
6016 memcpy (p, aarch64_noop, noop_size);
6017 fragP->fr_var = noop_size;
6018 }
6019
6020 /* Perform target specific initialisation of a frag.
6021 Note - despite the name this initialisation is not done when the frag
6022 is created, but only when its type is assigned. A frag can be created
6023 and used a long time before its type is set, so beware of assuming that
6024 this initialisationis performed first. */
6025
6026 #ifndef OBJ_ELF
6027 void
6028 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6029 int max_chars ATTRIBUTE_UNUSED)
6030 {
6031 }
6032
6033 #else /* OBJ_ELF is defined. */
6034 void
6035 aarch64_init_frag (fragS * fragP, int max_chars)
6036 {
6037 /* Record a mapping symbol for alignment frags. We will delete this
6038 later if the alignment ends up empty. */
6039 if (!fragP->tc_frag_data.recorded)
6040 fragP->tc_frag_data.recorded = 1;
6041
6042 switch (fragP->fr_type)
6043 {
6044 case rs_align:
6045 case rs_align_test:
6046 case rs_fill:
6047 mapping_state_2 (MAP_DATA, max_chars);
6048 break;
6049 case rs_align_code:
6050 mapping_state_2 (MAP_INSN, max_chars);
6051 break;
6052 default:
6053 break;
6054 }
6055 }
6056 \f
6057 /* Initialize the DWARF-2 unwind information for this procedure. */
6058
6059 void
6060 tc_aarch64_frame_initial_instructions (void)
6061 {
6062 cfi_add_CFA_def_cfa (REG_SP, 0);
6063 }
6064 #endif /* OBJ_ELF */
6065
6066 /* Convert REGNAME to a DWARF-2 register number. */
6067
6068 int
6069 tc_aarch64_regname_to_dw2regnum (char *regname)
6070 {
6071 const reg_entry *reg = parse_reg (&regname);
6072 if (reg == NULL)
6073 return -1;
6074
6075 switch (reg->type)
6076 {
6077 case REG_TYPE_SP_32:
6078 case REG_TYPE_SP_64:
6079 case REG_TYPE_R_32:
6080 case REG_TYPE_R_64:
6081 return reg->number;
6082
6083 case REG_TYPE_FP_B:
6084 case REG_TYPE_FP_H:
6085 case REG_TYPE_FP_S:
6086 case REG_TYPE_FP_D:
6087 case REG_TYPE_FP_Q:
6088 return reg->number + 64;
6089
6090 default:
6091 break;
6092 }
6093 return -1;
6094 }
6095
6096 /* Implement DWARF2_ADDR_SIZE. */
6097
6098 int
6099 aarch64_dwarf2_addr_size (void)
6100 {
6101 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6102 if (ilp32_p)
6103 return 4;
6104 #endif
6105 return bfd_arch_bits_per_address (stdoutput) / 8;
6106 }
6107
6108 /* MD interface: Symbol and relocation handling. */
6109
6110 /* Return the address within the segment that a PC-relative fixup is
6111 relative to. For AArch64 PC-relative fixups applied to instructions
6112 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6113
6114 long
6115 md_pcrel_from_section (fixS * fixP, segT seg)
6116 {
6117 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6118
6119 /* If this is pc-relative and we are going to emit a relocation
6120 then we just want to put out any pipeline compensation that the linker
6121 will need. Otherwise we want to use the calculated base. */
6122 if (fixP->fx_pcrel
6123 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6124 || aarch64_force_relocation (fixP)))
6125 base = 0;
6126
6127 /* AArch64 should be consistent for all pc-relative relocations. */
6128 return base + AARCH64_PCREL_OFFSET;
6129 }
6130
6131 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6132 Otherwise we have no need to default values of symbols. */
6133
6134 symbolS *
6135 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6136 {
6137 #ifdef OBJ_ELF
6138 if (name[0] == '_' && name[1] == 'G'
6139 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6140 {
6141 if (!GOT_symbol)
6142 {
6143 if (symbol_find (name))
6144 as_bad (_("GOT already in the symbol table"));
6145
6146 GOT_symbol = symbol_new (name, undefined_section,
6147 (valueT) 0, &zero_address_frag);
6148 }
6149
6150 return GOT_symbol;
6151 }
6152 #endif
6153
6154 return 0;
6155 }
6156
6157 /* Return non-zero if the indicated VALUE has overflowed the maximum
6158 range expressible by a unsigned number with the indicated number of
6159 BITS. */
6160
6161 static bfd_boolean
6162 unsigned_overflow (valueT value, unsigned bits)
6163 {
6164 valueT lim;
6165 if (bits >= sizeof (valueT) * 8)
6166 return FALSE;
6167 lim = (valueT) 1 << bits;
6168 return (value >= lim);
6169 }
6170
6171
6172 /* Return non-zero if the indicated VALUE has overflowed the maximum
6173 range expressible by an signed number with the indicated number of
6174 BITS. */
6175
6176 static bfd_boolean
6177 signed_overflow (offsetT value, unsigned bits)
6178 {
6179 offsetT lim;
6180 if (bits >= sizeof (offsetT) * 8)
6181 return FALSE;
6182 lim = (offsetT) 1 << (bits - 1);
6183 return (value < -lim || value >= lim);
6184 }
6185
6186 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6187 unsigned immediate offset load/store instruction, try to encode it as
6188 an unscaled, 9-bit, signed immediate offset load/store instruction.
6189 Return TRUE if it is successful; otherwise return FALSE.
6190
6191 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6192 in response to the standard LDR/STR mnemonics when the immediate offset is
6193 unambiguous, i.e. when it is negative or unaligned. */
6194
6195 static bfd_boolean
6196 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6197 {
6198 int idx;
6199 enum aarch64_op new_op;
6200 const aarch64_opcode *new_opcode;
6201
6202 gas_assert (instr->opcode->iclass == ldst_pos);
6203
6204 switch (instr->opcode->op)
6205 {
6206 case OP_LDRB_POS:new_op = OP_LDURB; break;
6207 case OP_STRB_POS: new_op = OP_STURB; break;
6208 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6209 case OP_LDRH_POS: new_op = OP_LDURH; break;
6210 case OP_STRH_POS: new_op = OP_STURH; break;
6211 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6212 case OP_LDR_POS: new_op = OP_LDUR; break;
6213 case OP_STR_POS: new_op = OP_STUR; break;
6214 case OP_LDRF_POS: new_op = OP_LDURV; break;
6215 case OP_STRF_POS: new_op = OP_STURV; break;
6216 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6217 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6218 default: new_op = OP_NIL; break;
6219 }
6220
6221 if (new_op == OP_NIL)
6222 return FALSE;
6223
6224 new_opcode = aarch64_get_opcode (new_op);
6225 gas_assert (new_opcode != NULL);
6226
6227 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6228 instr->opcode->op, new_opcode->op);
6229
6230 aarch64_replace_opcode (instr, new_opcode);
6231
6232 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6233 qualifier matching may fail because the out-of-date qualifier will
6234 prevent the operand being updated with a new and correct qualifier. */
6235 idx = aarch64_operand_index (instr->opcode->operands,
6236 AARCH64_OPND_ADDR_SIMM9);
6237 gas_assert (idx == 1);
6238 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6239
6240 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6241
6242 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6243 return FALSE;
6244
6245 return TRUE;
6246 }
6247
6248 /* Called by fix_insn to fix a MOV immediate alias instruction.
6249
6250 Operand for a generic move immediate instruction, which is an alias
6251 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6252 a 32-bit/64-bit immediate value into general register. An assembler error
6253 shall result if the immediate cannot be created by a single one of these
6254 instructions. If there is a choice, then to ensure reversability an
6255 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6256
6257 static void
6258 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6259 {
6260 const aarch64_opcode *opcode;
6261
6262 /* Need to check if the destination is SP/ZR. The check has to be done
6263 before any aarch64_replace_opcode. */
6264 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6265 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6266
6267 instr->operands[1].imm.value = value;
6268 instr->operands[1].skip = 0;
6269
6270 if (try_mov_wide_p)
6271 {
6272 /* Try the MOVZ alias. */
6273 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6274 aarch64_replace_opcode (instr, opcode);
6275 if (aarch64_opcode_encode (instr->opcode, instr,
6276 &instr->value, NULL, NULL))
6277 {
6278 put_aarch64_insn (buf, instr->value);
6279 return;
6280 }
6281 /* Try the MOVK alias. */
6282 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6283 aarch64_replace_opcode (instr, opcode);
6284 if (aarch64_opcode_encode (instr->opcode, instr,
6285 &instr->value, NULL, NULL))
6286 {
6287 put_aarch64_insn (buf, instr->value);
6288 return;
6289 }
6290 }
6291
6292 if (try_mov_bitmask_p)
6293 {
6294 /* Try the ORR alias. */
6295 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6296 aarch64_replace_opcode (instr, opcode);
6297 if (aarch64_opcode_encode (instr->opcode, instr,
6298 &instr->value, NULL, NULL))
6299 {
6300 put_aarch64_insn (buf, instr->value);
6301 return;
6302 }
6303 }
6304
6305 as_bad_where (fixP->fx_file, fixP->fx_line,
6306 _("immediate cannot be moved by a single instruction"));
6307 }
6308
6309 /* An instruction operand which is immediate related may have symbol used
6310 in the assembly, e.g.
6311
6312 mov w0, u32
6313 .set u32, 0x00ffff00
6314
6315 At the time when the assembly instruction is parsed, a referenced symbol,
6316 like 'u32' in the above example may not have been seen; a fixS is created
6317 in such a case and is handled here after symbols have been resolved.
6318 Instruction is fixed up with VALUE using the information in *FIXP plus
6319 extra information in FLAGS.
6320
6321 This function is called by md_apply_fix to fix up instructions that need
6322 a fix-up described above but does not involve any linker-time relocation. */
6323
6324 static void
6325 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6326 {
6327 int idx;
6328 uint32_t insn;
6329 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6330 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6331 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6332
6333 if (new_inst)
6334 {
6335 /* Now the instruction is about to be fixed-up, so the operand that
6336 was previously marked as 'ignored' needs to be unmarked in order
6337 to get the encoding done properly. */
6338 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6339 new_inst->operands[idx].skip = 0;
6340 }
6341
6342 gas_assert (opnd != AARCH64_OPND_NIL);
6343
6344 switch (opnd)
6345 {
6346 case AARCH64_OPND_EXCEPTION:
6347 if (unsigned_overflow (value, 16))
6348 as_bad_where (fixP->fx_file, fixP->fx_line,
6349 _("immediate out of range"));
6350 insn = get_aarch64_insn (buf);
6351 insn |= encode_svc_imm (value);
6352 put_aarch64_insn (buf, insn);
6353 break;
6354
6355 case AARCH64_OPND_AIMM:
6356 /* ADD or SUB with immediate.
6357 NOTE this assumes we come here with a add/sub shifted reg encoding
6358 3 322|2222|2 2 2 21111 111111
6359 1 098|7654|3 2 1 09876 543210 98765 43210
6360 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6361 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6362 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6363 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6364 ->
6365 3 322|2222|2 2 221111111111
6366 1 098|7654|3 2 109876543210 98765 43210
6367 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6368 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6369 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6370 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6371 Fields sf Rn Rd are already set. */
6372 insn = get_aarch64_insn (buf);
6373 if (value < 0)
6374 {
6375 /* Add <-> sub. */
6376 insn = reencode_addsub_switch_add_sub (insn);
6377 value = -value;
6378 }
6379
6380 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6381 && unsigned_overflow (value, 12))
6382 {
6383 /* Try to shift the value by 12 to make it fit. */
6384 if (((value >> 12) << 12) == value
6385 && ! unsigned_overflow (value, 12 + 12))
6386 {
6387 value >>= 12;
6388 insn |= encode_addsub_imm_shift_amount (1);
6389 }
6390 }
6391
6392 if (unsigned_overflow (value, 12))
6393 as_bad_where (fixP->fx_file, fixP->fx_line,
6394 _("immediate out of range"));
6395
6396 insn |= encode_addsub_imm (value);
6397
6398 put_aarch64_insn (buf, insn);
6399 break;
6400
6401 case AARCH64_OPND_SIMD_IMM:
6402 case AARCH64_OPND_SIMD_IMM_SFT:
6403 case AARCH64_OPND_LIMM:
6404 /* Bit mask immediate. */
6405 gas_assert (new_inst != NULL);
6406 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6407 new_inst->operands[idx].imm.value = value;
6408 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6409 &new_inst->value, NULL, NULL))
6410 put_aarch64_insn (buf, new_inst->value);
6411 else
6412 as_bad_where (fixP->fx_file, fixP->fx_line,
6413 _("invalid immediate"));
6414 break;
6415
6416 case AARCH64_OPND_HALF:
6417 /* 16-bit unsigned immediate. */
6418 if (unsigned_overflow (value, 16))
6419 as_bad_where (fixP->fx_file, fixP->fx_line,
6420 _("immediate out of range"));
6421 insn = get_aarch64_insn (buf);
6422 insn |= encode_movw_imm (value & 0xffff);
6423 put_aarch64_insn (buf, insn);
6424 break;
6425
6426 case AARCH64_OPND_IMM_MOV:
6427 /* Operand for a generic move immediate instruction, which is
6428 an alias instruction that generates a single MOVZ, MOVN or ORR
6429 instruction to loads a 32-bit/64-bit immediate value into general
6430 register. An assembler error shall result if the immediate cannot be
6431 created by a single one of these instructions. If there is a choice,
6432 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6433 and MOVZ or MOVN to ORR. */
6434 gas_assert (new_inst != NULL);
6435 fix_mov_imm_insn (fixP, buf, new_inst, value);
6436 break;
6437
6438 case AARCH64_OPND_ADDR_SIMM7:
6439 case AARCH64_OPND_ADDR_SIMM9:
6440 case AARCH64_OPND_ADDR_SIMM9_2:
6441 case AARCH64_OPND_ADDR_UIMM12:
6442 /* Immediate offset in an address. */
6443 insn = get_aarch64_insn (buf);
6444
6445 gas_assert (new_inst != NULL && new_inst->value == insn);
6446 gas_assert (new_inst->opcode->operands[1] == opnd
6447 || new_inst->opcode->operands[2] == opnd);
6448
6449 /* Get the index of the address operand. */
6450 if (new_inst->opcode->operands[1] == opnd)
6451 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6452 idx = 1;
6453 else
6454 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6455 idx = 2;
6456
6457 /* Update the resolved offset value. */
6458 new_inst->operands[idx].addr.offset.imm = value;
6459
6460 /* Encode/fix-up. */
6461 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6462 &new_inst->value, NULL, NULL))
6463 {
6464 put_aarch64_insn (buf, new_inst->value);
6465 break;
6466 }
6467 else if (new_inst->opcode->iclass == ldst_pos
6468 && try_to_encode_as_unscaled_ldst (new_inst))
6469 {
6470 put_aarch64_insn (buf, new_inst->value);
6471 break;
6472 }
6473
6474 as_bad_where (fixP->fx_file, fixP->fx_line,
6475 _("immediate offset out of range"));
6476 break;
6477
6478 default:
6479 gas_assert (0);
6480 as_fatal (_("unhandled operand code %d"), opnd);
6481 }
6482 }
6483
6484 /* Apply a fixup (fixP) to segment data, once it has been determined
6485 by our caller that we have all the info we need to fix it up.
6486
6487 Parameter valP is the pointer to the value of the bits. */
6488
6489 void
6490 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6491 {
6492 offsetT value = *valP;
6493 uint32_t insn;
6494 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6495 int scale;
6496 unsigned flags = fixP->fx_addnumber;
6497
6498 DEBUG_TRACE ("\n\n");
6499 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6500 DEBUG_TRACE ("Enter md_apply_fix");
6501
6502 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6503
6504 /* Note whether this will delete the relocation. */
6505
6506 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6507 fixP->fx_done = 1;
6508
6509 /* Process the relocations. */
6510 switch (fixP->fx_r_type)
6511 {
6512 case BFD_RELOC_NONE:
6513 /* This will need to go in the object file. */
6514 fixP->fx_done = 0;
6515 break;
6516
6517 case BFD_RELOC_8:
6518 case BFD_RELOC_8_PCREL:
6519 if (fixP->fx_done || !seg->use_rela_p)
6520 md_number_to_chars (buf, value, 1);
6521 break;
6522
6523 case BFD_RELOC_16:
6524 case BFD_RELOC_16_PCREL:
6525 if (fixP->fx_done || !seg->use_rela_p)
6526 md_number_to_chars (buf, value, 2);
6527 break;
6528
6529 case BFD_RELOC_32:
6530 case BFD_RELOC_32_PCREL:
6531 if (fixP->fx_done || !seg->use_rela_p)
6532 md_number_to_chars (buf, value, 4);
6533 break;
6534
6535 case BFD_RELOC_64:
6536 case BFD_RELOC_64_PCREL:
6537 if (fixP->fx_done || !seg->use_rela_p)
6538 md_number_to_chars (buf, value, 8);
6539 break;
6540
6541 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6542 /* We claim that these fixups have been processed here, even if
6543 in fact we generate an error because we do not have a reloc
6544 for them, so tc_gen_reloc() will reject them. */
6545 fixP->fx_done = 1;
6546 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6547 {
6548 as_bad_where (fixP->fx_file, fixP->fx_line,
6549 _("undefined symbol %s used as an immediate value"),
6550 S_GET_NAME (fixP->fx_addsy));
6551 goto apply_fix_return;
6552 }
6553 fix_insn (fixP, flags, value);
6554 break;
6555
6556 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6557 if (fixP->fx_done || !seg->use_rela_p)
6558 {
6559 if (value & 3)
6560 as_bad_where (fixP->fx_file, fixP->fx_line,
6561 _("pc-relative load offset not word aligned"));
6562 if (signed_overflow (value, 21))
6563 as_bad_where (fixP->fx_file, fixP->fx_line,
6564 _("pc-relative load offset out of range"));
6565 insn = get_aarch64_insn (buf);
6566 insn |= encode_ld_lit_ofs_19 (value >> 2);
6567 put_aarch64_insn (buf, insn);
6568 }
6569 break;
6570
6571 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6572 if (fixP->fx_done || !seg->use_rela_p)
6573 {
6574 if (signed_overflow (value, 21))
6575 as_bad_where (fixP->fx_file, fixP->fx_line,
6576 _("pc-relative address offset out of range"));
6577 insn = get_aarch64_insn (buf);
6578 insn |= encode_adr_imm (value);
6579 put_aarch64_insn (buf, insn);
6580 }
6581 break;
6582
6583 case BFD_RELOC_AARCH64_BRANCH19:
6584 if (fixP->fx_done || !seg->use_rela_p)
6585 {
6586 if (value & 3)
6587 as_bad_where (fixP->fx_file, fixP->fx_line,
6588 _("conditional branch target not word aligned"));
6589 if (signed_overflow (value, 21))
6590 as_bad_where (fixP->fx_file, fixP->fx_line,
6591 _("conditional branch out of range"));
6592 insn = get_aarch64_insn (buf);
6593 insn |= encode_cond_branch_ofs_19 (value >> 2);
6594 put_aarch64_insn (buf, insn);
6595 }
6596 break;
6597
6598 case BFD_RELOC_AARCH64_TSTBR14:
6599 if (fixP->fx_done || !seg->use_rela_p)
6600 {
6601 if (value & 3)
6602 as_bad_where (fixP->fx_file, fixP->fx_line,
6603 _("conditional branch target not word aligned"));
6604 if (signed_overflow (value, 16))
6605 as_bad_where (fixP->fx_file, fixP->fx_line,
6606 _("conditional branch out of range"));
6607 insn = get_aarch64_insn (buf);
6608 insn |= encode_tst_branch_ofs_14 (value >> 2);
6609 put_aarch64_insn (buf, insn);
6610 }
6611 break;
6612
6613 case BFD_RELOC_AARCH64_CALL26:
6614 case BFD_RELOC_AARCH64_JUMP26:
6615 if (fixP->fx_done || !seg->use_rela_p)
6616 {
6617 if (value & 3)
6618 as_bad_where (fixP->fx_file, fixP->fx_line,
6619 _("branch target not word aligned"));
6620 if (signed_overflow (value, 28))
6621 as_bad_where (fixP->fx_file, fixP->fx_line,
6622 _("branch out of range"));
6623 insn = get_aarch64_insn (buf);
6624 insn |= encode_branch_ofs_26 (value >> 2);
6625 put_aarch64_insn (buf, insn);
6626 }
6627 break;
6628
6629 case BFD_RELOC_AARCH64_MOVW_G0:
6630 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6631 case BFD_RELOC_AARCH64_MOVW_G0_S:
6632 scale = 0;
6633 goto movw_common;
6634 case BFD_RELOC_AARCH64_MOVW_G1:
6635 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6636 case BFD_RELOC_AARCH64_MOVW_G1_S:
6637 scale = 16;
6638 goto movw_common;
6639 case BFD_RELOC_AARCH64_MOVW_G2:
6640 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6641 case BFD_RELOC_AARCH64_MOVW_G2_S:
6642 scale = 32;
6643 goto movw_common;
6644 case BFD_RELOC_AARCH64_MOVW_G3:
6645 scale = 48;
6646 movw_common:
6647 if (fixP->fx_done || !seg->use_rela_p)
6648 {
6649 insn = get_aarch64_insn (buf);
6650
6651 if (!fixP->fx_done)
6652 {
6653 /* REL signed addend must fit in 16 bits */
6654 if (signed_overflow (value, 16))
6655 as_bad_where (fixP->fx_file, fixP->fx_line,
6656 _("offset out of range"));
6657 }
6658 else
6659 {
6660 /* Check for overflow and scale. */
6661 switch (fixP->fx_r_type)
6662 {
6663 case BFD_RELOC_AARCH64_MOVW_G0:
6664 case BFD_RELOC_AARCH64_MOVW_G1:
6665 case BFD_RELOC_AARCH64_MOVW_G2:
6666 case BFD_RELOC_AARCH64_MOVW_G3:
6667 if (unsigned_overflow (value, scale + 16))
6668 as_bad_where (fixP->fx_file, fixP->fx_line,
6669 _("unsigned value out of range"));
6670 break;
6671 case BFD_RELOC_AARCH64_MOVW_G0_S:
6672 case BFD_RELOC_AARCH64_MOVW_G1_S:
6673 case BFD_RELOC_AARCH64_MOVW_G2_S:
6674 /* NOTE: We can only come here with movz or movn. */
6675 if (signed_overflow (value, scale + 16))
6676 as_bad_where (fixP->fx_file, fixP->fx_line,
6677 _("signed value out of range"));
6678 if (value < 0)
6679 {
6680 /* Force use of MOVN. */
6681 value = ~value;
6682 insn = reencode_movzn_to_movn (insn);
6683 }
6684 else
6685 {
6686 /* Force use of MOVZ. */
6687 insn = reencode_movzn_to_movz (insn);
6688 }
6689 break;
6690 default:
6691 /* Unchecked relocations. */
6692 break;
6693 }
6694 value >>= scale;
6695 }
6696
6697 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6698 insn |= encode_movw_imm (value & 0xffff);
6699
6700 put_aarch64_insn (buf, insn);
6701 }
6702 break;
6703
6704 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6705 fixP->fx_r_type = (ilp32_p
6706 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6707 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6708 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6709 /* Should always be exported to object file, see
6710 aarch64_force_relocation(). */
6711 gas_assert (!fixP->fx_done);
6712 gas_assert (seg->use_rela_p);
6713 break;
6714
6715 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6716 fixP->fx_r_type = (ilp32_p
6717 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6718 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6719 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6720 /* Should always be exported to object file, see
6721 aarch64_force_relocation(). */
6722 gas_assert (!fixP->fx_done);
6723 gas_assert (seg->use_rela_p);
6724 break;
6725
6726 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6727 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6728 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6729 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6730 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6731 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6732 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6733 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6734 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6735 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6736 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6737 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6738 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6739 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6740 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6741 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6742 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6743 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6744 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6745 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6746 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6747 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6748 /* Should always be exported to object file, see
6749 aarch64_force_relocation(). */
6750 gas_assert (!fixP->fx_done);
6751 gas_assert (seg->use_rela_p);
6752 break;
6753
6754 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6755 /* Should always be exported to object file, see
6756 aarch64_force_relocation(). */
6757 fixP->fx_r_type = (ilp32_p
6758 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6759 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6760 gas_assert (!fixP->fx_done);
6761 gas_assert (seg->use_rela_p);
6762 break;
6763
6764 case BFD_RELOC_AARCH64_ADD_LO12:
6765 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6766 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6767 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6768 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6769 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6770 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6771 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6772 case BFD_RELOC_AARCH64_LDST128_LO12:
6773 case BFD_RELOC_AARCH64_LDST16_LO12:
6774 case BFD_RELOC_AARCH64_LDST32_LO12:
6775 case BFD_RELOC_AARCH64_LDST64_LO12:
6776 case BFD_RELOC_AARCH64_LDST8_LO12:
6777 /* Should always be exported to object file, see
6778 aarch64_force_relocation(). */
6779 gas_assert (!fixP->fx_done);
6780 gas_assert (seg->use_rela_p);
6781 break;
6782
6783 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6784 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6785 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6786 break;
6787
6788 case BFD_RELOC_UNUSED:
6789 /* An error will already have been reported. */
6790 break;
6791
6792 default:
6793 as_bad_where (fixP->fx_file, fixP->fx_line,
6794 _("unexpected %s fixup"),
6795 bfd_get_reloc_code_name (fixP->fx_r_type));
6796 break;
6797 }
6798
6799 apply_fix_return:
6800 /* Free the allocated the struct aarch64_inst.
6801 N.B. currently there are very limited number of fix-up types actually use
6802 this field, so the impact on the performance should be minimal . */
6803 if (fixP->tc_fix_data.inst != NULL)
6804 free (fixP->tc_fix_data.inst);
6805
6806 return;
6807 }
6808
6809 /* Translate internal representation of relocation info to BFD target
6810 format. */
6811
6812 arelent *
6813 tc_gen_reloc (asection * section, fixS * fixp)
6814 {
6815 arelent *reloc;
6816 bfd_reloc_code_real_type code;
6817
6818 reloc = xmalloc (sizeof (arelent));
6819
6820 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6821 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6822 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6823
6824 if (fixp->fx_pcrel)
6825 {
6826 if (section->use_rela_p)
6827 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6828 else
6829 fixp->fx_offset = reloc->address;
6830 }
6831 reloc->addend = fixp->fx_offset;
6832
6833 code = fixp->fx_r_type;
6834 switch (code)
6835 {
6836 case BFD_RELOC_16:
6837 if (fixp->fx_pcrel)
6838 code = BFD_RELOC_16_PCREL;
6839 break;
6840
6841 case BFD_RELOC_32:
6842 if (fixp->fx_pcrel)
6843 code = BFD_RELOC_32_PCREL;
6844 break;
6845
6846 case BFD_RELOC_64:
6847 if (fixp->fx_pcrel)
6848 code = BFD_RELOC_64_PCREL;
6849 break;
6850
6851 default:
6852 break;
6853 }
6854
6855 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6856 if (reloc->howto == NULL)
6857 {
6858 as_bad_where (fixp->fx_file, fixp->fx_line,
6859 _
6860 ("cannot represent %s relocation in this object file format"),
6861 bfd_get_reloc_code_name (code));
6862 return NULL;
6863 }
6864
6865 return reloc;
6866 }
6867
6868 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6869
6870 void
6871 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6872 {
6873 bfd_reloc_code_real_type type;
6874 int pcrel = 0;
6875
6876 /* Pick a reloc.
6877 FIXME: @@ Should look at CPU word size. */
6878 switch (size)
6879 {
6880 case 1:
6881 type = BFD_RELOC_8;
6882 break;
6883 case 2:
6884 type = BFD_RELOC_16;
6885 break;
6886 case 4:
6887 type = BFD_RELOC_32;
6888 break;
6889 case 8:
6890 type = BFD_RELOC_64;
6891 break;
6892 default:
6893 as_bad (_("cannot do %u-byte relocation"), size);
6894 type = BFD_RELOC_UNUSED;
6895 break;
6896 }
6897
6898 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6899 }
6900
6901 int
6902 aarch64_force_relocation (struct fix *fixp)
6903 {
6904 switch (fixp->fx_r_type)
6905 {
6906 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6907 /* Perform these "immediate" internal relocations
6908 even if the symbol is extern or weak. */
6909 return 0;
6910
6911 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6912 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6913 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6914 /* Pseudo relocs that need to be fixed up according to
6915 ilp32_p. */
6916 return 0;
6917
6918 case BFD_RELOC_AARCH64_ADD_LO12:
6919 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6920 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6921 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6922 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6923 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6924 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6925 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6926 case BFD_RELOC_AARCH64_LDST128_LO12:
6927 case BFD_RELOC_AARCH64_LDST16_LO12:
6928 case BFD_RELOC_AARCH64_LDST32_LO12:
6929 case BFD_RELOC_AARCH64_LDST64_LO12:
6930 case BFD_RELOC_AARCH64_LDST8_LO12:
6931 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6932 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6933 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6934 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6935 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6936 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6937 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6938 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6939 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6940 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6941 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6942 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6943 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6944 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6945 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6946 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6947 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6948 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6949 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6950 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6951 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6952 /* Always leave these relocations for the linker. */
6953 return 1;
6954
6955 default:
6956 break;
6957 }
6958
6959 return generic_force_reloc (fixp);
6960 }
6961
6962 #ifdef OBJ_ELF
6963
6964 const char *
6965 elf64_aarch64_target_format (void)
6966 {
6967 if (target_big_endian)
6968 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6969 else
6970 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6971 }
6972
6973 void
6974 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6975 {
6976 elf_frob_symbol (symp, puntp);
6977 }
6978 #endif
6979
6980 /* MD interface: Finalization. */
6981
6982 /* A good place to do this, although this was probably not intended
6983 for this kind of use. We need to dump the literal pool before
6984 references are made to a null symbol pointer. */
6985
6986 void
6987 aarch64_cleanup (void)
6988 {
6989 literal_pool *pool;
6990
6991 for (pool = list_of_pools; pool; pool = pool->next)
6992 {
6993 /* Put it at the end of the relevant section. */
6994 subseg_set (pool->section, pool->sub_section);
6995 s_ltorg (0);
6996 }
6997 }
6998
6999 #ifdef OBJ_ELF
7000 /* Remove any excess mapping symbols generated for alignment frags in
7001 SEC. We may have created a mapping symbol before a zero byte
7002 alignment; remove it if there's a mapping symbol after the
7003 alignment. */
7004 static void
7005 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7006 void *dummy ATTRIBUTE_UNUSED)
7007 {
7008 segment_info_type *seginfo = seg_info (sec);
7009 fragS *fragp;
7010
7011 if (seginfo == NULL || seginfo->frchainP == NULL)
7012 return;
7013
7014 for (fragp = seginfo->frchainP->frch_root;
7015 fragp != NULL; fragp = fragp->fr_next)
7016 {
7017 symbolS *sym = fragp->tc_frag_data.last_map;
7018 fragS *next = fragp->fr_next;
7019
7020 /* Variable-sized frags have been converted to fixed size by
7021 this point. But if this was variable-sized to start with,
7022 there will be a fixed-size frag after it. So don't handle
7023 next == NULL. */
7024 if (sym == NULL || next == NULL)
7025 continue;
7026
7027 if (S_GET_VALUE (sym) < next->fr_address)
7028 /* Not at the end of this frag. */
7029 continue;
7030 know (S_GET_VALUE (sym) == next->fr_address);
7031
7032 do
7033 {
7034 if (next->tc_frag_data.first_map != NULL)
7035 {
7036 /* Next frag starts with a mapping symbol. Discard this
7037 one. */
7038 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7039 break;
7040 }
7041
7042 if (next->fr_next == NULL)
7043 {
7044 /* This mapping symbol is at the end of the section. Discard
7045 it. */
7046 know (next->fr_fix == 0 && next->fr_var == 0);
7047 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7048 break;
7049 }
7050
7051 /* As long as we have empty frags without any mapping symbols,
7052 keep looking. */
7053 /* If the next frag is non-empty and does not start with a
7054 mapping symbol, then this mapping symbol is required. */
7055 if (next->fr_address != next->fr_next->fr_address)
7056 break;
7057
7058 next = next->fr_next;
7059 }
7060 while (next != NULL);
7061 }
7062 }
7063 #endif
7064
7065 /* Adjust the symbol table. */
7066
7067 void
7068 aarch64_adjust_symtab (void)
7069 {
7070 #ifdef OBJ_ELF
7071 /* Remove any overlapping mapping symbols generated by alignment frags. */
7072 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7073 /* Now do generic ELF adjustments. */
7074 elf_adjust_symtab ();
7075 #endif
7076 }
7077
7078 static void
7079 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7080 {
7081 const char *hash_err;
7082
7083 hash_err = hash_insert (table, key, value);
7084 if (hash_err)
7085 printf ("Internal Error: Can't hash %s\n", key);
7086 }
7087
7088 static void
7089 fill_instruction_hash_table (void)
7090 {
7091 aarch64_opcode *opcode = aarch64_opcode_table;
7092
7093 while (opcode->name != NULL)
7094 {
7095 templates *templ, *new_templ;
7096 templ = hash_find (aarch64_ops_hsh, opcode->name);
7097
7098 new_templ = (templates *) xmalloc (sizeof (templates));
7099 new_templ->opcode = opcode;
7100 new_templ->next = NULL;
7101
7102 if (!templ)
7103 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7104 else
7105 {
7106 new_templ->next = templ->next;
7107 templ->next = new_templ;
7108 }
7109 ++opcode;
7110 }
7111 }
7112
7113 static inline void
7114 convert_to_upper (char *dst, const char *src, size_t num)
7115 {
7116 unsigned int i;
7117 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7118 *dst = TOUPPER (*src);
7119 *dst = '\0';
7120 }
7121
7122 /* Assume STR point to a lower-case string, allocate, convert and return
7123 the corresponding upper-case string. */
7124 static inline const char*
7125 get_upper_str (const char *str)
7126 {
7127 char *ret;
7128 size_t len = strlen (str);
7129 if ((ret = xmalloc (len + 1)) == NULL)
7130 abort ();
7131 convert_to_upper (ret, str, len);
7132 return ret;
7133 }
7134
7135 /* MD interface: Initialization. */
7136
7137 void
7138 md_begin (void)
7139 {
7140 unsigned mach;
7141 unsigned int i;
7142
7143 if ((aarch64_ops_hsh = hash_new ()) == NULL
7144 || (aarch64_cond_hsh = hash_new ()) == NULL
7145 || (aarch64_shift_hsh = hash_new ()) == NULL
7146 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7147 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7148 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7149 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7150 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7151 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7152 || (aarch64_reg_hsh = hash_new ()) == NULL
7153 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7154 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7155 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7156 as_fatal (_("virtual memory exhausted"));
7157
7158 fill_instruction_hash_table ();
7159
7160 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7161 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7162 (void *) (aarch64_sys_regs + i));
7163
7164 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7165 checked_hash_insert (aarch64_pstatefield_hsh,
7166 aarch64_pstatefields[i].name,
7167 (void *) (aarch64_pstatefields + i));
7168
7169 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7170 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7171 aarch64_sys_regs_ic[i].template,
7172 (void *) (aarch64_sys_regs_ic + i));
7173
7174 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7175 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7176 aarch64_sys_regs_dc[i].template,
7177 (void *) (aarch64_sys_regs_dc + i));
7178
7179 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7180 checked_hash_insert (aarch64_sys_regs_at_hsh,
7181 aarch64_sys_regs_at[i].template,
7182 (void *) (aarch64_sys_regs_at + i));
7183
7184 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7185 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7186 aarch64_sys_regs_tlbi[i].template,
7187 (void *) (aarch64_sys_regs_tlbi + i));
7188
7189 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7190 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7191 (void *) (reg_names + i));
7192
7193 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7194 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7195 (void *) (nzcv_names + i));
7196
7197 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7198 {
7199 const char *name = aarch64_operand_modifiers[i].name;
7200 checked_hash_insert (aarch64_shift_hsh, name,
7201 (void *) (aarch64_operand_modifiers + i));
7202 /* Also hash the name in the upper case. */
7203 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7204 (void *) (aarch64_operand_modifiers + i));
7205 }
7206
7207 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7208 {
7209 unsigned int j;
7210 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7211 the same condition code. */
7212 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7213 {
7214 const char *name = aarch64_conds[i].names[j];
7215 if (name == NULL)
7216 break;
7217 checked_hash_insert (aarch64_cond_hsh, name,
7218 (void *) (aarch64_conds + i));
7219 /* Also hash the name in the upper case. */
7220 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7221 (void *) (aarch64_conds + i));
7222 }
7223 }
7224
7225 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7226 {
7227 const char *name = aarch64_barrier_options[i].name;
7228 /* Skip xx00 - the unallocated values of option. */
7229 if ((i & 0x3) == 0)
7230 continue;
7231 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7232 (void *) (aarch64_barrier_options + i));
7233 /* Also hash the name in the upper case. */
7234 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7235 (void *) (aarch64_barrier_options + i));
7236 }
7237
7238 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7239 {
7240 const char* name = aarch64_prfops[i].name;
7241 /* Skip the unallocated hint encodings. */
7242 if (name == NULL)
7243 continue;
7244 checked_hash_insert (aarch64_pldop_hsh, name,
7245 (void *) (aarch64_prfops + i));
7246 /* Also hash the name in the upper case. */
7247 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7248 (void *) (aarch64_prfops + i));
7249 }
7250
7251 /* Set the cpu variant based on the command-line options. */
7252 if (!mcpu_cpu_opt)
7253 mcpu_cpu_opt = march_cpu_opt;
7254
7255 if (!mcpu_cpu_opt)
7256 mcpu_cpu_opt = &cpu_default;
7257
7258 cpu_variant = *mcpu_cpu_opt;
7259
7260 /* Record the CPU type. */
7261 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7262
7263 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7264 }
7265
7266 /* Command line processing. */
7267
7268 const char *md_shortopts = "m:";
7269
7270 #ifdef AARCH64_BI_ENDIAN
7271 #define OPTION_EB (OPTION_MD_BASE + 0)
7272 #define OPTION_EL (OPTION_MD_BASE + 1)
7273 #else
7274 #if TARGET_BYTES_BIG_ENDIAN
7275 #define OPTION_EB (OPTION_MD_BASE + 0)
7276 #else
7277 #define OPTION_EL (OPTION_MD_BASE + 1)
7278 #endif
7279 #endif
7280
7281 struct option md_longopts[] = {
7282 #ifdef OPTION_EB
7283 {"EB", no_argument, NULL, OPTION_EB},
7284 #endif
7285 #ifdef OPTION_EL
7286 {"EL", no_argument, NULL, OPTION_EL},
7287 #endif
7288 {NULL, no_argument, NULL, 0}
7289 };
7290
7291 size_t md_longopts_size = sizeof (md_longopts);
7292
7293 struct aarch64_option_table
7294 {
7295 char *option; /* Option name to match. */
7296 char *help; /* Help information. */
7297 int *var; /* Variable to change. */
7298 int value; /* What to change it to. */
7299 char *deprecated; /* If non-null, print this message. */
7300 };
7301
7302 static struct aarch64_option_table aarch64_opts[] = {
7303 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7304 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7305 NULL},
7306 #ifdef DEBUG_AARCH64
7307 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7308 #endif /* DEBUG_AARCH64 */
7309 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7310 NULL},
7311 {"mno-verbose-error", N_("do not output verbose error messages"),
7312 &verbose_error_p, 0, NULL},
7313 {NULL, NULL, NULL, 0, NULL}
7314 };
7315
7316 struct aarch64_cpu_option_table
7317 {
7318 char *name;
7319 const aarch64_feature_set value;
7320 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7321 case. */
7322 const char *canonical_name;
7323 };
7324
7325 /* This list should, at a minimum, contain all the cpu names
7326 recognized by GCC. */
7327 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7328 {"all", AARCH64_ANY, NULL},
7329 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7330 AARCH64_FEATURE_CRC), "Cortex-A53"},
7331 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7332 AARCH64_FEATURE_CRC), "Cortex-A57"},
7333 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7334 AARCH64_FEATURE_CRC), "Cortex-A72"},
7335 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7336 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7337 "Samsung Exynos M1"},
7338 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7339 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7340 "Cavium ThunderX"},
7341 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7342 in earlier releases and is superseded by 'xgene1' in all
7343 tools. */
7344 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7345 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7346 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7347 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7348 {"generic", AARCH64_ARCH_V8, NULL},
7349
7350 {NULL, AARCH64_ARCH_NONE, NULL}
7351 };
7352
7353 struct aarch64_arch_option_table
7354 {
7355 char *name;
7356 const aarch64_feature_set value;
7357 };
7358
7359 /* This list should, at a minimum, contain all the architecture names
7360 recognized by GCC. */
7361 static const struct aarch64_arch_option_table aarch64_archs[] = {
7362 {"all", AARCH64_ANY},
7363 {"armv8-a", AARCH64_ARCH_V8},
7364 {NULL, AARCH64_ARCH_NONE}
7365 };
7366
7367 /* ISA extensions. */
7368 struct aarch64_option_cpu_value_table
7369 {
7370 char *name;
7371 const aarch64_feature_set value;
7372 };
7373
7374 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7375 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7376 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7377 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7378 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7379 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7380 {NULL, AARCH64_ARCH_NONE}
7381 };
7382
7383 struct aarch64_long_option_table
7384 {
7385 char *option; /* Substring to match. */
7386 char *help; /* Help information. */
7387 int (*func) (char *subopt); /* Function to decode sub-option. */
7388 char *deprecated; /* If non-null, print this message. */
7389 };
7390
7391 static int
7392 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7393 bfd_boolean ext_only)
7394 {
7395 /* We insist on extensions being added before being removed. We achieve
7396 this by using the ADDING_VALUE variable to indicate whether we are
7397 adding an extension (1) or removing it (0) and only allowing it to
7398 change in the order -1 -> 1 -> 0. */
7399 int adding_value = -1;
7400 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7401
7402 /* Copy the feature set, so that we can modify it. */
7403 *ext_set = **opt_p;
7404 *opt_p = ext_set;
7405
7406 while (str != NULL && *str != 0)
7407 {
7408 const struct aarch64_option_cpu_value_table *opt;
7409 char *ext = NULL;
7410 int optlen;
7411
7412 if (!ext_only)
7413 {
7414 if (*str != '+')
7415 {
7416 as_bad (_("invalid architectural extension"));
7417 return 0;
7418 }
7419
7420 ext = strchr (++str, '+');
7421 }
7422
7423 if (ext != NULL)
7424 optlen = ext - str;
7425 else
7426 optlen = strlen (str);
7427
7428 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7429 {
7430 if (adding_value != 0)
7431 adding_value = 0;
7432 optlen -= 2;
7433 str += 2;
7434 }
7435 else if (optlen > 0)
7436 {
7437 if (adding_value == -1)
7438 adding_value = 1;
7439 else if (adding_value != 1)
7440 {
7441 as_bad (_("must specify extensions to add before specifying "
7442 "those to remove"));
7443 return FALSE;
7444 }
7445 }
7446
7447 if (optlen == 0)
7448 {
7449 as_bad (_("missing architectural extension"));
7450 return 0;
7451 }
7452
7453 gas_assert (adding_value != -1);
7454
7455 for (opt = aarch64_features; opt->name != NULL; opt++)
7456 if (strncmp (opt->name, str, optlen) == 0)
7457 {
7458 /* Add or remove the extension. */
7459 if (adding_value)
7460 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7461 else
7462 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7463 break;
7464 }
7465
7466 if (opt->name == NULL)
7467 {
7468 as_bad (_("unknown architectural extension `%s'"), str);
7469 return 0;
7470 }
7471
7472 str = ext;
7473 };
7474
7475 return 1;
7476 }
7477
7478 static int
7479 aarch64_parse_cpu (char *str)
7480 {
7481 const struct aarch64_cpu_option_table *opt;
7482 char *ext = strchr (str, '+');
7483 size_t optlen;
7484
7485 if (ext != NULL)
7486 optlen = ext - str;
7487 else
7488 optlen = strlen (str);
7489
7490 if (optlen == 0)
7491 {
7492 as_bad (_("missing cpu name `%s'"), str);
7493 return 0;
7494 }
7495
7496 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7497 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7498 {
7499 mcpu_cpu_opt = &opt->value;
7500 if (ext != NULL)
7501 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7502
7503 return 1;
7504 }
7505
7506 as_bad (_("unknown cpu `%s'"), str);
7507 return 0;
7508 }
7509
7510 static int
7511 aarch64_parse_arch (char *str)
7512 {
7513 const struct aarch64_arch_option_table *opt;
7514 char *ext = strchr (str, '+');
7515 size_t optlen;
7516
7517 if (ext != NULL)
7518 optlen = ext - str;
7519 else
7520 optlen = strlen (str);
7521
7522 if (optlen == 0)
7523 {
7524 as_bad (_("missing architecture name `%s'"), str);
7525 return 0;
7526 }
7527
7528 for (opt = aarch64_archs; opt->name != NULL; opt++)
7529 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7530 {
7531 march_cpu_opt = &opt->value;
7532 if (ext != NULL)
7533 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7534
7535 return 1;
7536 }
7537
7538 as_bad (_("unknown architecture `%s'\n"), str);
7539 return 0;
7540 }
7541
7542 /* ABIs. */
7543 struct aarch64_option_abi_value_table
7544 {
7545 char *name;
7546 enum aarch64_abi_type value;
7547 };
7548
7549 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7550 {"ilp32", AARCH64_ABI_ILP32},
7551 {"lp64", AARCH64_ABI_LP64},
7552 {NULL, 0}
7553 };
7554
7555 static int
7556 aarch64_parse_abi (char *str)
7557 {
7558 const struct aarch64_option_abi_value_table *opt;
7559 size_t optlen = strlen (str);
7560
7561 if (optlen == 0)
7562 {
7563 as_bad (_("missing abi name `%s'"), str);
7564 return 0;
7565 }
7566
7567 for (opt = aarch64_abis; opt->name != NULL; opt++)
7568 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7569 {
7570 aarch64_abi = opt->value;
7571 return 1;
7572 }
7573
7574 as_bad (_("unknown abi `%s'\n"), str);
7575 return 0;
7576 }
7577
7578 static struct aarch64_long_option_table aarch64_long_opts[] = {
7579 #ifdef OBJ_ELF
7580 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7581 aarch64_parse_abi, NULL},
7582 #endif /* OBJ_ELF */
7583 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7584 aarch64_parse_cpu, NULL},
7585 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7586 aarch64_parse_arch, NULL},
7587 {NULL, NULL, 0, NULL}
7588 };
7589
7590 int
7591 md_parse_option (int c, char *arg)
7592 {
7593 struct aarch64_option_table *opt;
7594 struct aarch64_long_option_table *lopt;
7595
7596 switch (c)
7597 {
7598 #ifdef OPTION_EB
7599 case OPTION_EB:
7600 target_big_endian = 1;
7601 break;
7602 #endif
7603
7604 #ifdef OPTION_EL
7605 case OPTION_EL:
7606 target_big_endian = 0;
7607 break;
7608 #endif
7609
7610 case 'a':
7611 /* Listing option. Just ignore these, we don't support additional
7612 ones. */
7613 return 0;
7614
7615 default:
7616 for (opt = aarch64_opts; opt->option != NULL; opt++)
7617 {
7618 if (c == opt->option[0]
7619 && ((arg == NULL && opt->option[1] == 0)
7620 || streq (arg, opt->option + 1)))
7621 {
7622 /* If the option is deprecated, tell the user. */
7623 if (opt->deprecated != NULL)
7624 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7625 arg ? arg : "", _(opt->deprecated));
7626
7627 if (opt->var != NULL)
7628 *opt->var = opt->value;
7629
7630 return 1;
7631 }
7632 }
7633
7634 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7635 {
7636 /* These options are expected to have an argument. */
7637 if (c == lopt->option[0]
7638 && arg != NULL
7639 && strncmp (arg, lopt->option + 1,
7640 strlen (lopt->option + 1)) == 0)
7641 {
7642 /* If the option is deprecated, tell the user. */
7643 if (lopt->deprecated != NULL)
7644 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7645 _(lopt->deprecated));
7646
7647 /* Call the sup-option parser. */
7648 return lopt->func (arg + strlen (lopt->option) - 1);
7649 }
7650 }
7651
7652 return 0;
7653 }
7654
7655 return 1;
7656 }
7657
7658 void
7659 md_show_usage (FILE * fp)
7660 {
7661 struct aarch64_option_table *opt;
7662 struct aarch64_long_option_table *lopt;
7663
7664 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7665
7666 for (opt = aarch64_opts; opt->option != NULL; opt++)
7667 if (opt->help != NULL)
7668 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7669
7670 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7671 if (lopt->help != NULL)
7672 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7673
7674 #ifdef OPTION_EB
7675 fprintf (fp, _("\
7676 -EB assemble code for a big-endian cpu\n"));
7677 #endif
7678
7679 #ifdef OPTION_EL
7680 fprintf (fp, _("\
7681 -EL assemble code for a little-endian cpu\n"));
7682 #endif
7683 }
7684
7685 /* Parse a .cpu directive. */
7686
7687 static void
7688 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7689 {
7690 const struct aarch64_cpu_option_table *opt;
7691 char saved_char;
7692 char *name;
7693 char *ext;
7694 size_t optlen;
7695
7696 name = input_line_pointer;
7697 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7698 input_line_pointer++;
7699 saved_char = *input_line_pointer;
7700 *input_line_pointer = 0;
7701
7702 ext = strchr (name, '+');
7703
7704 if (ext != NULL)
7705 optlen = ext - name;
7706 else
7707 optlen = strlen (name);
7708
7709 /* Skip the first "all" entry. */
7710 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7711 if (strlen (opt->name) == optlen
7712 && strncmp (name, opt->name, optlen) == 0)
7713 {
7714 mcpu_cpu_opt = &opt->value;
7715 if (ext != NULL)
7716 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7717 return;
7718
7719 cpu_variant = *mcpu_cpu_opt;
7720
7721 *input_line_pointer = saved_char;
7722 demand_empty_rest_of_line ();
7723 return;
7724 }
7725 as_bad (_("unknown cpu `%s'"), name);
7726 *input_line_pointer = saved_char;
7727 ignore_rest_of_line ();
7728 }
7729
7730
7731 /* Parse a .arch directive. */
7732
7733 static void
7734 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7735 {
7736 const struct aarch64_arch_option_table *opt;
7737 char saved_char;
7738 char *name;
7739 char *ext;
7740 size_t optlen;
7741
7742 name = input_line_pointer;
7743 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7744 input_line_pointer++;
7745 saved_char = *input_line_pointer;
7746 *input_line_pointer = 0;
7747
7748 ext = strchr (name, '+');
7749
7750 if (ext != NULL)
7751 optlen = ext - name;
7752 else
7753 optlen = strlen (name);
7754
7755 /* Skip the first "all" entry. */
7756 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7757 if (strlen (opt->name) == optlen
7758 && strncmp (name, opt->name, optlen) == 0)
7759 {
7760 mcpu_cpu_opt = &opt->value;
7761 if (ext != NULL)
7762 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7763 return;
7764
7765 cpu_variant = *mcpu_cpu_opt;
7766
7767 *input_line_pointer = saved_char;
7768 demand_empty_rest_of_line ();
7769 return;
7770 }
7771
7772 as_bad (_("unknown architecture `%s'\n"), name);
7773 *input_line_pointer = saved_char;
7774 ignore_rest_of_line ();
7775 }
7776
7777 /* Parse a .arch_extension directive. */
7778
7779 static void
7780 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
7781 {
7782 char saved_char;
7783 char *ext = input_line_pointer;;
7784
7785 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7786 input_line_pointer++;
7787 saved_char = *input_line_pointer;
7788 *input_line_pointer = 0;
7789
7790 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
7791 return;
7792
7793 cpu_variant = *mcpu_cpu_opt;
7794
7795 *input_line_pointer = saved_char;
7796 demand_empty_rest_of_line ();
7797 }
7798
7799 /* Copy symbol information. */
7800
7801 void
7802 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7803 {
7804 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7805 }
This page took 0.206141 seconds and 5 git commands to generate.