[AArch64] GAS support BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Structure for a hash table entry for a register. */
251 typedef struct
252 {
253 const char *name;
254 unsigned char number;
255 unsigned char type;
256 unsigned char builtin;
257 } reg_entry;
258
259 /* Macros to define the register types and masks for the purpose
260 of parsing. */
261
262 #undef AARCH64_REG_TYPES
263 #define AARCH64_REG_TYPES \
264 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
265 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
266 BASIC_REG_TYPE(SP_32) /* wsp */ \
267 BASIC_REG_TYPE(SP_64) /* sp */ \
268 BASIC_REG_TYPE(Z_32) /* wzr */ \
269 BASIC_REG_TYPE(Z_64) /* xzr */ \
270 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
271 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
272 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
273 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
274 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
275 BASIC_REG_TYPE(CN) /* c[0-7] */ \
276 BASIC_REG_TYPE(VN) /* v[0-31] */ \
277 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
278 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
279 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
280 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
281 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
283 /* Typecheck: any [BHSDQ]P FP. */ \
284 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
285 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
286 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
287 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
289 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
290 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
291 /* Any integer register; used for error messages only. */ \
292 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Pseudo type to mark the end of the enumerator sequence. */ \
296 BASIC_REG_TYPE(MAX)
297
298 #undef BASIC_REG_TYPE
299 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
300 #undef MULTI_REG_TYPE
301 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
302
303 /* Register type enumerators. */
304 typedef enum
305 {
306 /* A list of REG_TYPE_*. */
307 AARCH64_REG_TYPES
308 } aarch64_reg_type;
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
312 #undef REG_TYPE
313 #define REG_TYPE(T) (1 << REG_TYPE_##T)
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) V,
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409
410 /* Stuff needed to resolve the label ambiguity
411 As:
412 ...
413 label: <insn>
414 may differ from:
415 ...
416 label:
417 <insn> */
418
419 static symbolS *last_label_seen;
420
421 /* Literal pool structure. Held on a per-section
422 and per-sub-section basis. */
423
424 #define MAX_LITERAL_POOL_SIZE 1024
425 typedef struct literal_expression
426 {
427 expressionS exp;
428 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
429 LITTLENUM_TYPE * bignum;
430 } literal_expression;
431
432 typedef struct literal_pool
433 {
434 literal_expression literals[MAX_LITERAL_POOL_SIZE];
435 unsigned int next_free_entry;
436 unsigned int id;
437 symbolS *symbol;
438 segT section;
439 subsegT sub_section;
440 int size;
441 struct literal_pool *next;
442 } literal_pool;
443
444 /* Pointer to a linked list of literal pools. */
445 static literal_pool *list_of_pools = NULL;
446 \f
447 /* Pure syntax. */
448
449 /* This array holds the chars that always start a comment. If the
450 pre-processor is disabled, these aren't very useful. */
451 const char comment_chars[] = "";
452
453 /* This array holds the chars that only start a comment at the beginning of
454 a line. If the line seems to have the form '# 123 filename'
455 .line and .file directives will appear in the pre-processed output. */
456 /* Note that input_file.c hand checks for '#' at the beginning of the
457 first line of the input file. This is because the compiler outputs
458 #NO_APP at the beginning of its output. */
459 /* Also note that comments like this one will always work. */
460 const char line_comment_chars[] = "#";
461
462 const char line_separator_chars[] = ";";
463
464 /* Chars that can be used to separate mant
465 from exp in floating point numbers. */
466 const char EXP_CHARS[] = "eE";
467
468 /* Chars that mean this number is a floating point constant. */
469 /* As in 0f12.456 */
470 /* or 0d1.2345e12 */
471
472 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
473
474 /* Prefix character that indicates the start of an immediate value. */
475 #define is_immediate_prefix(C) ((C) == '#')
476
477 /* Separator character handling. */
478
479 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
480
481 static inline bfd_boolean
482 skip_past_char (char **str, char c)
483 {
484 if (**str == c)
485 {
486 (*str)++;
487 return TRUE;
488 }
489 else
490 return FALSE;
491 }
492
493 #define skip_past_comma(str) skip_past_char (str, ',')
494
495 /* Arithmetic expressions (possibly involving symbols). */
496
497 static bfd_boolean in_my_get_expression_p = FALSE;
498
499 /* Third argument to my_get_expression. */
500 #define GE_NO_PREFIX 0
501 #define GE_OPT_PREFIX 1
502
503 /* Return TRUE if the string pointed by *STR is successfully parsed
504 as an valid expression; *EP will be filled with the information of
505 such an expression. Otherwise return FALSE. */
506
507 static bfd_boolean
508 my_get_expression (expressionS * ep, char **str, int prefix_mode,
509 int reject_absent)
510 {
511 char *save_in;
512 segT seg;
513 int prefix_present_p = 0;
514
515 switch (prefix_mode)
516 {
517 case GE_NO_PREFIX:
518 break;
519 case GE_OPT_PREFIX:
520 if (is_immediate_prefix (**str))
521 {
522 (*str)++;
523 prefix_present_p = 1;
524 }
525 break;
526 default:
527 abort ();
528 }
529
530 memset (ep, 0, sizeof (expressionS));
531
532 save_in = input_line_pointer;
533 input_line_pointer = *str;
534 in_my_get_expression_p = TRUE;
535 seg = expression (ep);
536 in_my_get_expression_p = FALSE;
537
538 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
539 {
540 /* We found a bad expression in md_operand(). */
541 *str = input_line_pointer;
542 input_line_pointer = save_in;
543 if (prefix_present_p && ! error_p ())
544 set_fatal_syntax_error (_("bad expression"));
545 else
546 set_first_syntax_error (_("bad expression"));
547 return FALSE;
548 }
549
550 #ifdef OBJ_AOUT
551 if (seg != absolute_section
552 && seg != text_section
553 && seg != data_section
554 && seg != bss_section && seg != undefined_section)
555 {
556 set_syntax_error (_("bad segment"));
557 *str = input_line_pointer;
558 input_line_pointer = save_in;
559 return FALSE;
560 }
561 #else
562 (void) seg;
563 #endif
564
565 *str = input_line_pointer;
566 input_line_pointer = save_in;
567 return TRUE;
568 }
569
570 /* Turn a string in input_line_pointer into a floating point constant
571 of type TYPE, and store the appropriate bytes in *LITP. The number
572 of LITTLENUMS emitted is stored in *SIZEP. An error message is
573 returned, or NULL on OK. */
574
575 char *
576 md_atof (int type, char *litP, int *sizeP)
577 {
578 return ieee_md_atof (type, litP, sizeP, target_big_endian);
579 }
580
581 /* We handle all bad expressions here, so that we can report the faulty
582 instruction in the error message. */
583 void
584 md_operand (expressionS * exp)
585 {
586 if (in_my_get_expression_p)
587 exp->X_op = O_illegal;
588 }
589
590 /* Immediate values. */
591
592 /* Errors may be set multiple times during parsing or bit encoding
593 (particularly in the Neon bits), but usually the earliest error which is set
594 will be the most meaningful. Avoid overwriting it with later (cascading)
595 errors by calling this function. */
596
597 static void
598 first_error (const char *error)
599 {
600 if (! error_p ())
601 set_syntax_error (error);
602 }
603
604 /* Similiar to first_error, but this function accepts formatted error
605 message. */
606 static void
607 first_error_fmt (const char *format, ...)
608 {
609 va_list args;
610 enum
611 { size = 100 };
612 /* N.B. this single buffer will not cause error messages for different
613 instructions to pollute each other; this is because at the end of
614 processing of each assembly line, error message if any will be
615 collected by as_bad. */
616 static char buffer[size];
617
618 if (! error_p ())
619 {
620 int ret ATTRIBUTE_UNUSED;
621 va_start (args, format);
622 ret = vsnprintf (buffer, size, format, args);
623 know (ret <= size - 1 && ret >= 0);
624 va_end (args);
625 set_syntax_error (buffer);
626 }
627 }
628
629 /* Register parsing. */
630
631 /* Generic register parser which is called by other specialized
632 register parsers.
633 CCP points to what should be the beginning of a register name.
634 If it is indeed a valid register name, advance CCP over it and
635 return the reg_entry structure; otherwise return NULL.
636 It does not issue diagnostics. */
637
638 static reg_entry *
639 parse_reg (char **ccp)
640 {
641 char *start = *ccp;
642 char *p;
643 reg_entry *reg;
644
645 #ifdef REGISTER_PREFIX
646 if (*start != REGISTER_PREFIX)
647 return NULL;
648 start++;
649 #endif
650
651 p = start;
652 if (!ISALPHA (*p) || !is_name_beginner (*p))
653 return NULL;
654
655 do
656 p++;
657 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
658
659 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
660
661 if (!reg)
662 return NULL;
663
664 *ccp = p;
665 return reg;
666 }
667
668 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
669 return FALSE. */
670 static bfd_boolean
671 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
672 {
673 if (reg->type == type)
674 return TRUE;
675
676 switch (type)
677 {
678 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
679 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
680 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
681 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
682 case REG_TYPE_VN: /* Vector register. */
683 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
684 return ((reg_type_masks[reg->type] & reg_type_masks[type])
685 == reg_type_masks[reg->type]);
686 default:
687 as_fatal ("unhandled type %d", type);
688 abort ();
689 }
690 }
691
692 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
693 Return the register number otherwise. *ISREG32 is set to one if the
694 register is 32-bit wide; *ISREGZERO is set to one if the register is
695 of type Z_32 or Z_64.
696 Note that this function does not issue any diagnostics. */
697
698 static int
699 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
700 int *isreg32, int *isregzero)
701 {
702 char *str = *ccp;
703 const reg_entry *reg = parse_reg (&str);
704
705 if (reg == NULL)
706 return PARSE_FAIL;
707
708 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
709 return PARSE_FAIL;
710
711 switch (reg->type)
712 {
713 case REG_TYPE_SP_32:
714 case REG_TYPE_SP_64:
715 if (reject_sp)
716 return PARSE_FAIL;
717 *isreg32 = reg->type == REG_TYPE_SP_32;
718 *isregzero = 0;
719 break;
720 case REG_TYPE_R_32:
721 case REG_TYPE_R_64:
722 *isreg32 = reg->type == REG_TYPE_R_32;
723 *isregzero = 0;
724 break;
725 case REG_TYPE_Z_32:
726 case REG_TYPE_Z_64:
727 if (reject_rz)
728 return PARSE_FAIL;
729 *isreg32 = reg->type == REG_TYPE_Z_32;
730 *isregzero = 1;
731 break;
732 default:
733 return PARSE_FAIL;
734 }
735
736 *ccp = str;
737
738 return reg->number;
739 }
740
741 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
742 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
743 otherwise return FALSE.
744
745 Accept only one occurrence of:
746 8b 16b 4h 8h 2s 4s 1d 2d
747 b h s d q */
748 static bfd_boolean
749 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
750 {
751 char *ptr = *str;
752 unsigned width;
753 unsigned element_size;
754 enum neon_el_type type;
755
756 /* skip '.' */
757 ptr++;
758
759 if (!ISDIGIT (*ptr))
760 {
761 width = 0;
762 goto elt_size;
763 }
764 width = strtoul (ptr, &ptr, 10);
765 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
766 {
767 first_error_fmt (_("bad size %d in vector width specifier"), width);
768 return FALSE;
769 }
770
771 elt_size:
772 switch (TOLOWER (*ptr))
773 {
774 case 'b':
775 type = NT_b;
776 element_size = 8;
777 break;
778 case 'h':
779 type = NT_h;
780 element_size = 16;
781 break;
782 case 's':
783 type = NT_s;
784 element_size = 32;
785 break;
786 case 'd':
787 type = NT_d;
788 element_size = 64;
789 break;
790 case 'q':
791 if (width == 1)
792 {
793 type = NT_q;
794 element_size = 128;
795 break;
796 }
797 /* fall through. */
798 default:
799 if (*ptr != '\0')
800 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
801 else
802 first_error (_("missing element size"));
803 return FALSE;
804 }
805 if (width != 0 && width * element_size != 64 && width * element_size != 128)
806 {
807 first_error_fmt (_
808 ("invalid element size %d and vector size combination %c"),
809 width, *ptr);
810 return FALSE;
811 }
812 ptr++;
813
814 parsed_type->type = type;
815 parsed_type->width = width;
816
817 *str = ptr;
818
819 return TRUE;
820 }
821
822 /* Parse a single type, e.g. ".8b", leading period included.
823 Only applicable to Vn registers.
824
825 Return TRUE on success; otherwise return FALSE. */
826 static bfd_boolean
827 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
828 {
829 char *str = *ccp;
830
831 if (*str == '.')
832 {
833 if (! parse_neon_type_for_operand (vectype, &str))
834 {
835 first_error (_("vector type expected"));
836 return FALSE;
837 }
838 }
839 else
840 return FALSE;
841
842 *ccp = str;
843
844 return TRUE;
845 }
846
847 /* Parse a register of the type TYPE.
848
849 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
850 name or the parsed register is not of TYPE.
851
852 Otherwise return the register number, and optionally fill in the actual
853 type of the register in *RTYPE when multiple alternatives were given, and
854 return the register shape and element index information in *TYPEINFO.
855
856 IN_REG_LIST should be set with TRUE if the caller is parsing a register
857 list. */
858
859 static int
860 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
861 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
862 {
863 char *str = *ccp;
864 const reg_entry *reg = parse_reg (&str);
865 struct neon_type_el atype;
866 struct neon_type_el parsetype;
867 bfd_boolean is_typed_vecreg = FALSE;
868
869 atype.defined = 0;
870 atype.type = NT_invtype;
871 atype.width = -1;
872 atype.index = 0;
873
874 if (reg == NULL)
875 {
876 if (typeinfo)
877 *typeinfo = atype;
878 set_default_error ();
879 return PARSE_FAIL;
880 }
881
882 if (! aarch64_check_reg_type (reg, type))
883 {
884 DEBUG_TRACE ("reg type check failed");
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888 type = reg->type;
889
890 if (type == REG_TYPE_VN
891 && parse_neon_operand_type (&parsetype, &str))
892 {
893 /* Register if of the form Vn.[bhsdq]. */
894 is_typed_vecreg = TRUE;
895
896 if (parsetype.width == 0)
897 /* Expect index. In the new scheme we cannot have
898 Vn.[bhsdq] represent a scalar. Therefore any
899 Vn.[bhsdq] should have an index following it.
900 Except in reglists ofcourse. */
901 atype.defined |= NTA_HASINDEX;
902 else
903 atype.defined |= NTA_HASTYPE;
904
905 atype.type = parsetype.type;
906 atype.width = parsetype.width;
907 }
908
909 if (skip_past_char (&str, '['))
910 {
911 expressionS exp;
912
913 /* Reject Sn[index] syntax. */
914 if (!is_typed_vecreg)
915 {
916 first_error (_("this type of register can't be indexed"));
917 return PARSE_FAIL;
918 }
919
920 if (in_reg_list == TRUE)
921 {
922 first_error (_("index not allowed inside register list"));
923 return PARSE_FAIL;
924 }
925
926 atype.defined |= NTA_HASINDEX;
927
928 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
929
930 if (exp.X_op != O_constant)
931 {
932 first_error (_("constant expression required"));
933 return PARSE_FAIL;
934 }
935
936 if (! skip_past_char (&str, ']'))
937 return PARSE_FAIL;
938
939 atype.index = exp.X_add_number;
940 }
941 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
942 {
943 /* Indexed vector register expected. */
944 first_error (_("indexed vector register expected"));
945 return PARSE_FAIL;
946 }
947
948 /* A vector reg Vn should be typed or indexed. */
949 if (type == REG_TYPE_VN && atype.defined == 0)
950 {
951 first_error (_("invalid use of vector register"));
952 }
953
954 if (typeinfo)
955 *typeinfo = atype;
956
957 if (rtype)
958 *rtype = type;
959
960 *ccp = str;
961
962 return reg->number;
963 }
964
965 /* Parse register.
966
967 Return the register number on success; return PARSE_FAIL otherwise.
968
969 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
970 the register (e.g. NEON double or quad reg when either has been requested).
971
972 If this is a NEON vector register with additional type information, fill
973 in the struct pointed to by VECTYPE (if non-NULL).
974
975 This parser does not handle register list. */
976
977 static int
978 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
979 aarch64_reg_type *rtype, struct neon_type_el *vectype)
980 {
981 struct neon_type_el atype;
982 char *str = *ccp;
983 int reg = parse_typed_reg (&str, type, rtype, &atype,
984 /*in_reg_list= */ FALSE);
985
986 if (reg == PARSE_FAIL)
987 return PARSE_FAIL;
988
989 if (vectype)
990 *vectype = atype;
991
992 *ccp = str;
993
994 return reg;
995 }
996
997 static inline bfd_boolean
998 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
999 {
1000 return
1001 e1.type == e2.type
1002 && e1.defined == e2.defined
1003 && e1.width == e2.width && e1.index == e2.index;
1004 }
1005
1006 /* This function parses the NEON register list. On success, it returns
1007 the parsed register list information in the following encoded format:
1008
1009 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1010 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1011
1012 The information of the register shape and/or index is returned in
1013 *VECTYPE.
1014
1015 It returns PARSE_FAIL if the register list is invalid.
1016
1017 The list contains one to four registers.
1018 Each register can be one of:
1019 <Vt>.<T>[<index>]
1020 <Vt>.<T>
1021 All <T> should be identical.
1022 All <index> should be identical.
1023 There are restrictions on <Vt> numbers which are checked later
1024 (by reg_list_valid_p). */
1025
1026 static int
1027 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1028 {
1029 char *str = *ccp;
1030 int nb_regs;
1031 struct neon_type_el typeinfo, typeinfo_first;
1032 int val, val_range;
1033 int in_range;
1034 int ret_val;
1035 int i;
1036 bfd_boolean error = FALSE;
1037 bfd_boolean expect_index = FALSE;
1038
1039 if (*str != '{')
1040 {
1041 set_syntax_error (_("expecting {"));
1042 return PARSE_FAIL;
1043 }
1044 str++;
1045
1046 nb_regs = 0;
1047 typeinfo_first.defined = 0;
1048 typeinfo_first.type = NT_invtype;
1049 typeinfo_first.width = -1;
1050 typeinfo_first.index = 0;
1051 ret_val = 0;
1052 val = -1;
1053 val_range = -1;
1054 in_range = 0;
1055 do
1056 {
1057 if (in_range)
1058 {
1059 str++; /* skip over '-' */
1060 val_range = val;
1061 }
1062 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1063 /*in_reg_list= */ TRUE);
1064 if (val == PARSE_FAIL)
1065 {
1066 set_first_syntax_error (_("invalid vector register in list"));
1067 error = TRUE;
1068 continue;
1069 }
1070 /* reject [bhsd]n */
1071 if (typeinfo.defined == 0)
1072 {
1073 set_first_syntax_error (_("invalid scalar register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077
1078 if (typeinfo.defined & NTA_HASINDEX)
1079 expect_index = TRUE;
1080
1081 if (in_range)
1082 {
1083 if (val < val_range)
1084 {
1085 set_first_syntax_error
1086 (_("invalid range in vector register list"));
1087 error = TRUE;
1088 }
1089 val_range++;
1090 }
1091 else
1092 {
1093 val_range = val;
1094 if (nb_regs == 0)
1095 typeinfo_first = typeinfo;
1096 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1097 {
1098 set_first_syntax_error
1099 (_("type mismatch in vector register list"));
1100 error = TRUE;
1101 }
1102 }
1103 if (! error)
1104 for (i = val_range; i <= val; i++)
1105 {
1106 ret_val |= i << (5 * nb_regs);
1107 nb_regs++;
1108 }
1109 in_range = 0;
1110 }
1111 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1112
1113 skip_whitespace (str);
1114 if (*str != '}')
1115 {
1116 set_first_syntax_error (_("end of vector register list not found"));
1117 error = TRUE;
1118 }
1119 str++;
1120
1121 skip_whitespace (str);
1122
1123 if (expect_index)
1124 {
1125 if (skip_past_char (&str, '['))
1126 {
1127 expressionS exp;
1128
1129 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1130 if (exp.X_op != O_constant)
1131 {
1132 set_first_syntax_error (_("constant expression required."));
1133 error = TRUE;
1134 }
1135 if (! skip_past_char (&str, ']'))
1136 error = TRUE;
1137 else
1138 typeinfo_first.index = exp.X_add_number;
1139 }
1140 else
1141 {
1142 set_first_syntax_error (_("expected index"));
1143 error = TRUE;
1144 }
1145 }
1146
1147 if (nb_regs > 4)
1148 {
1149 set_first_syntax_error (_("too many registers in vector register list"));
1150 error = TRUE;
1151 }
1152 else if (nb_regs == 0)
1153 {
1154 set_first_syntax_error (_("empty vector register list"));
1155 error = TRUE;
1156 }
1157
1158 *ccp = str;
1159 if (! error)
1160 *vectype = typeinfo_first;
1161
1162 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1163 }
1164
1165 /* Directives: register aliases. */
1166
1167 static reg_entry *
1168 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1169 {
1170 reg_entry *new;
1171 const char *name;
1172
1173 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1174 {
1175 if (new->builtin)
1176 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1177 str);
1178
1179 /* Only warn about a redefinition if it's not defined as the
1180 same register. */
1181 else if (new->number != number || new->type != type)
1182 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1183
1184 return NULL;
1185 }
1186
1187 name = xstrdup (str);
1188 new = xmalloc (sizeof (reg_entry));
1189
1190 new->name = name;
1191 new->number = number;
1192 new->type = type;
1193 new->builtin = FALSE;
1194
1195 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1196 abort ();
1197
1198 return new;
1199 }
1200
1201 /* Look for the .req directive. This is of the form:
1202
1203 new_register_name .req existing_register_name
1204
1205 If we find one, or if it looks sufficiently like one that we want to
1206 handle any error here, return TRUE. Otherwise return FALSE. */
1207
1208 static bfd_boolean
1209 create_register_alias (char *newname, char *p)
1210 {
1211 const reg_entry *old;
1212 char *oldname, *nbuf;
1213 size_t nlen;
1214
1215 /* The input scrubber ensures that whitespace after the mnemonic is
1216 collapsed to single spaces. */
1217 oldname = p;
1218 if (strncmp (oldname, " .req ", 6) != 0)
1219 return FALSE;
1220
1221 oldname += 6;
1222 if (*oldname == '\0')
1223 return FALSE;
1224
1225 old = hash_find (aarch64_reg_hsh, oldname);
1226 if (!old)
1227 {
1228 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1229 return TRUE;
1230 }
1231
1232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1233 the desired alias name, and p points to its end. If not, then
1234 the desired alias name is in the global original_case_string. */
1235 #ifdef TC_CASE_SENSITIVE
1236 nlen = p - newname;
1237 #else
1238 newname = original_case_string;
1239 nlen = strlen (newname);
1240 #endif
1241
1242 nbuf = alloca (nlen + 1);
1243 memcpy (nbuf, newname, nlen);
1244 nbuf[nlen] = '\0';
1245
1246 /* Create aliases under the new name as stated; an all-lowercase
1247 version of the new name; and an all-uppercase version of the new
1248 name. */
1249 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1250 {
1251 for (p = nbuf; *p; p++)
1252 *p = TOUPPER (*p);
1253
1254 if (strncmp (nbuf, newname, nlen))
1255 {
1256 /* If this attempt to create an additional alias fails, do not bother
1257 trying to create the all-lower case alias. We will fail and issue
1258 a second, duplicate error message. This situation arises when the
1259 programmer does something like:
1260 foo .req r0
1261 Foo .req r1
1262 The second .req creates the "Foo" alias but then fails to create
1263 the artificial FOO alias because it has already been created by the
1264 first .req. */
1265 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1266 return TRUE;
1267 }
1268
1269 for (p = nbuf; *p; p++)
1270 *p = TOLOWER (*p);
1271
1272 if (strncmp (nbuf, newname, nlen))
1273 insert_reg_alias (nbuf, old->number, old->type);
1274 }
1275
1276 return TRUE;
1277 }
1278
1279 /* Should never be called, as .req goes between the alias and the
1280 register name, not at the beginning of the line. */
1281 static void
1282 s_req (int a ATTRIBUTE_UNUSED)
1283 {
1284 as_bad (_("invalid syntax for .req directive"));
1285 }
1286
1287 /* The .unreq directive deletes an alias which was previously defined
1288 by .req. For example:
1289
1290 my_alias .req r11
1291 .unreq my_alias */
1292
1293 static void
1294 s_unreq (int a ATTRIBUTE_UNUSED)
1295 {
1296 char *name;
1297 char saved_char;
1298
1299 name = input_line_pointer;
1300
1301 while (*input_line_pointer != 0
1302 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1303 ++input_line_pointer;
1304
1305 saved_char = *input_line_pointer;
1306 *input_line_pointer = 0;
1307
1308 if (!*name)
1309 as_bad (_("invalid syntax for .unreq directive"));
1310 else
1311 {
1312 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1313
1314 if (!reg)
1315 as_bad (_("unknown register alias '%s'"), name);
1316 else if (reg->builtin)
1317 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1318 name);
1319 else
1320 {
1321 char *p;
1322 char *nbuf;
1323
1324 hash_delete (aarch64_reg_hsh, name, FALSE);
1325 free ((char *) reg->name);
1326 free (reg);
1327
1328 /* Also locate the all upper case and all lower case versions.
1329 Do not complain if we cannot find one or the other as it
1330 was probably deleted above. */
1331
1332 nbuf = strdup (name);
1333 for (p = nbuf; *p; p++)
1334 *p = TOUPPER (*p);
1335 reg = hash_find (aarch64_reg_hsh, nbuf);
1336 if (reg)
1337 {
1338 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1339 free ((char *) reg->name);
1340 free (reg);
1341 }
1342
1343 for (p = nbuf; *p; p++)
1344 *p = TOLOWER (*p);
1345 reg = hash_find (aarch64_reg_hsh, nbuf);
1346 if (reg)
1347 {
1348 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1349 free ((char *) reg->name);
1350 free (reg);
1351 }
1352
1353 free (nbuf);
1354 }
1355 }
1356
1357 *input_line_pointer = saved_char;
1358 demand_empty_rest_of_line ();
1359 }
1360
1361 /* Directives: Instruction set selection. */
1362
1363 #ifdef OBJ_ELF
1364 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1365 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1366 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1367 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1368
1369 /* Create a new mapping symbol for the transition to STATE. */
1370
1371 static void
1372 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1373 {
1374 symbolS *symbolP;
1375 const char *symname;
1376 int type;
1377
1378 switch (state)
1379 {
1380 case MAP_DATA:
1381 symname = "$d";
1382 type = BSF_NO_FLAGS;
1383 break;
1384 case MAP_INSN:
1385 symname = "$x";
1386 type = BSF_NO_FLAGS;
1387 break;
1388 default:
1389 abort ();
1390 }
1391
1392 symbolP = symbol_new (symname, now_seg, value, frag);
1393 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1394
1395 /* Save the mapping symbols for future reference. Also check that
1396 we do not place two mapping symbols at the same offset within a
1397 frag. We'll handle overlap between frags in
1398 check_mapping_symbols.
1399
1400 If .fill or other data filling directive generates zero sized data,
1401 the mapping symbol for the following code will have the same value
1402 as the one generated for the data filling directive. In this case,
1403 we replace the old symbol with the new one at the same address. */
1404 if (value == 0)
1405 {
1406 if (frag->tc_frag_data.first_map != NULL)
1407 {
1408 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1409 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1410 &symbol_lastP);
1411 }
1412 frag->tc_frag_data.first_map = symbolP;
1413 }
1414 if (frag->tc_frag_data.last_map != NULL)
1415 {
1416 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1417 S_GET_VALUE (symbolP));
1418 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1419 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1420 &symbol_lastP);
1421 }
1422 frag->tc_frag_data.last_map = symbolP;
1423 }
1424
1425 /* We must sometimes convert a region marked as code to data during
1426 code alignment, if an odd number of bytes have to be padded. The
1427 code mapping symbol is pushed to an aligned address. */
1428
1429 static void
1430 insert_data_mapping_symbol (enum mstate state,
1431 valueT value, fragS * frag, offsetT bytes)
1432 {
1433 /* If there was already a mapping symbol, remove it. */
1434 if (frag->tc_frag_data.last_map != NULL
1435 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1436 frag->fr_address + value)
1437 {
1438 symbolS *symp = frag->tc_frag_data.last_map;
1439
1440 if (value == 0)
1441 {
1442 know (frag->tc_frag_data.first_map == symp);
1443 frag->tc_frag_data.first_map = NULL;
1444 }
1445 frag->tc_frag_data.last_map = NULL;
1446 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1447 }
1448
1449 make_mapping_symbol (MAP_DATA, value, frag);
1450 make_mapping_symbol (state, value + bytes, frag);
1451 }
1452
1453 static void mapping_state_2 (enum mstate state, int max_chars);
1454
1455 /* Set the mapping state to STATE. Only call this when about to
1456 emit some STATE bytes to the file. */
1457
1458 void
1459 mapping_state (enum mstate state)
1460 {
1461 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1462
1463 if (state == MAP_INSN)
1464 /* AArch64 instructions require 4-byte alignment. When emitting
1465 instructions into any section, record the appropriate section
1466 alignment. */
1467 record_alignment (now_seg, 2);
1468
1469 if (mapstate == state)
1470 /* The mapping symbol has already been emitted.
1471 There is nothing else to do. */
1472 return;
1473
1474 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1475 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1476 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1477 evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492 #undef TRANSITION
1493
1494 mapping_state_2 (state, 0);
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 expressionS * litexp = & pool->literals[entry].exp;
1621
1622 if ((litexp->X_op == exp->X_op)
1623 && (exp->X_op == O_constant)
1624 && (litexp->X_add_number == exp->X_add_number)
1625 && (litexp->X_unsigned == exp->X_unsigned))
1626 break;
1627
1628 if ((litexp->X_op == exp->X_op)
1629 && (exp->X_op == O_symbol)
1630 && (litexp->X_add_number == exp->X_add_number)
1631 && (litexp->X_add_symbol == exp->X_add_symbol)
1632 && (litexp->X_op_symbol == exp->X_op_symbol))
1633 break;
1634 }
1635
1636 /* Do we need to create a new entry? */
1637 if (entry == pool->next_free_entry)
1638 {
1639 if (entry >= MAX_LITERAL_POOL_SIZE)
1640 {
1641 set_syntax_error (_("literal pool overflow"));
1642 return FALSE;
1643 }
1644
1645 pool->literals[entry].exp = *exp;
1646 pool->next_free_entry += 1;
1647 if (exp->X_op == O_big)
1648 {
1649 /* PR 16688: Bignums are held in a single global array. We must
1650 copy and preserve that value now, before it is overwritten. */
1651 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1652 memcpy (pool->literals[entry].bignum, generic_bignum,
1653 CHARS_PER_LITTLENUM * exp->X_add_number);
1654 }
1655 else
1656 pool->literals[entry].bignum = NULL;
1657 }
1658
1659 exp->X_op = O_symbol;
1660 exp->X_add_number = ((int) entry) * size;
1661 exp->X_add_symbol = pool->symbol;
1662
1663 return TRUE;
1664 }
1665
1666 /* Can't use symbol_new here, so have to create a symbol and then at
1667 a later date assign it a value. Thats what these functions do. */
1668
1669 static void
1670 symbol_locate (symbolS * symbolP,
1671 const char *name,/* It is copied, the caller can modify. */
1672 segT segment, /* Segment identifier (SEG_<something>). */
1673 valueT valu, /* Symbol value. */
1674 fragS * frag) /* Associated fragment. */
1675 {
1676 size_t name_length;
1677 char *preserved_copy_of_name;
1678
1679 name_length = strlen (name) + 1; /* +1 for \0. */
1680 obstack_grow (&notes, name, name_length);
1681 preserved_copy_of_name = obstack_finish (&notes);
1682
1683 #ifdef tc_canonicalize_symbol_name
1684 preserved_copy_of_name =
1685 tc_canonicalize_symbol_name (preserved_copy_of_name);
1686 #endif
1687
1688 S_SET_NAME (symbolP, preserved_copy_of_name);
1689
1690 S_SET_SEGMENT (symbolP, segment);
1691 S_SET_VALUE (symbolP, valu);
1692 symbol_clear_list_pointers (symbolP);
1693
1694 symbol_set_frag (symbolP, frag);
1695
1696 /* Link to end of symbol chain. */
1697 {
1698 extern int symbol_table_frozen;
1699
1700 if (symbol_table_frozen)
1701 abort ();
1702 }
1703
1704 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1705
1706 obj_symbol_new_hook (symbolP);
1707
1708 #ifdef tc_symbol_new_hook
1709 tc_symbol_new_hook (symbolP);
1710 #endif
1711
1712 #ifdef DEBUG_SYMS
1713 verify_symbol_chain (symbol_rootP, symbol_lastP);
1714 #endif /* DEBUG_SYMS */
1715 }
1716
1717
1718 static void
1719 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1720 {
1721 unsigned int entry;
1722 literal_pool *pool;
1723 char sym_name[20];
1724 int align;
1725
1726 for (align = 2; align <= 4; align++)
1727 {
1728 int size = 1 << align;
1729
1730 pool = find_literal_pool (size);
1731 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1732 continue;
1733
1734 mapping_state (MAP_DATA);
1735
1736 /* Align pool as you have word accesses.
1737 Only make a frag if we have to. */
1738 if (!need_pass_2)
1739 frag_align (align, 0, 0);
1740
1741 record_alignment (now_seg, align);
1742
1743 sprintf (sym_name, "$$lit_\002%x", pool->id);
1744
1745 symbol_locate (pool->symbol, sym_name, now_seg,
1746 (valueT) frag_now_fix (), frag_now);
1747 symbol_table_insert (pool->symbol);
1748
1749 for (entry = 0; entry < pool->next_free_entry; entry++)
1750 {
1751 expressionS * exp = & pool->literals[entry].exp;
1752
1753 if (exp->X_op == O_big)
1754 {
1755 /* PR 16688: Restore the global bignum value. */
1756 gas_assert (pool->literals[entry].bignum != NULL);
1757 memcpy (generic_bignum, pool->literals[entry].bignum,
1758 CHARS_PER_LITTLENUM * exp->X_add_number);
1759 }
1760
1761 /* First output the expression in the instruction to the pool. */
1762 emit_expr (exp, size); /* .word|.xword */
1763
1764 if (exp->X_op == O_big)
1765 {
1766 free (pool->literals[entry].bignum);
1767 pool->literals[entry].bignum = NULL;
1768 }
1769 }
1770
1771 /* Mark the pool as empty. */
1772 pool->next_free_entry = 0;
1773 pool->symbol = NULL;
1774 }
1775 }
1776
1777 #ifdef OBJ_ELF
1778 /* Forward declarations for functions below, in the MD interface
1779 section. */
1780 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1781 static struct reloc_table_entry * find_reloc_table_entry (char **);
1782
1783 /* Directives: Data. */
1784 /* N.B. the support for relocation suffix in this directive needs to be
1785 implemented properly. */
1786
1787 static void
1788 s_aarch64_elf_cons (int nbytes)
1789 {
1790 expressionS exp;
1791
1792 #ifdef md_flush_pending_output
1793 md_flush_pending_output ();
1794 #endif
1795
1796 if (is_it_end_of_statement ())
1797 {
1798 demand_empty_rest_of_line ();
1799 return;
1800 }
1801
1802 #ifdef md_cons_align
1803 md_cons_align (nbytes);
1804 #endif
1805
1806 mapping_state (MAP_DATA);
1807 do
1808 {
1809 struct reloc_table_entry *reloc;
1810
1811 expression (&exp);
1812
1813 if (exp.X_op != O_symbol)
1814 emit_expr (&exp, (unsigned int) nbytes);
1815 else
1816 {
1817 skip_past_char (&input_line_pointer, '#');
1818 if (skip_past_char (&input_line_pointer, ':'))
1819 {
1820 reloc = find_reloc_table_entry (&input_line_pointer);
1821 if (reloc == NULL)
1822 as_bad (_("unrecognized relocation suffix"));
1823 else
1824 as_bad (_("unimplemented relocation suffix"));
1825 ignore_rest_of_line ();
1826 return;
1827 }
1828 else
1829 emit_expr (&exp, (unsigned int) nbytes);
1830 }
1831 }
1832 while (*input_line_pointer++ == ',');
1833
1834 /* Put terminator back into stream. */
1835 input_line_pointer--;
1836 demand_empty_rest_of_line ();
1837 }
1838
1839 #endif /* OBJ_ELF */
1840
1841 /* Output a 32-bit word, but mark as an instruction. */
1842
1843 static void
1844 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1845 {
1846 expressionS exp;
1847
1848 #ifdef md_flush_pending_output
1849 md_flush_pending_output ();
1850 #endif
1851
1852 if (is_it_end_of_statement ())
1853 {
1854 demand_empty_rest_of_line ();
1855 return;
1856 }
1857
1858 /* Sections are assumed to start aligned. In executable section, there is no
1859 MAP_DATA symbol pending. So we only align the address during
1860 MAP_DATA --> MAP_INSN transition.
1861 For other sections, this is not guaranteed. */
1862 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1863 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1864 frag_align_code (2, 0);
1865
1866 #ifdef OBJ_ELF
1867 mapping_state (MAP_INSN);
1868 #endif
1869
1870 do
1871 {
1872 expression (&exp);
1873 if (exp.X_op != O_constant)
1874 {
1875 as_bad (_("constant expression required"));
1876 ignore_rest_of_line ();
1877 return;
1878 }
1879
1880 if (target_big_endian)
1881 {
1882 unsigned int val = exp.X_add_number;
1883 exp.X_add_number = SWAP_32 (val);
1884 }
1885 emit_expr (&exp, 4);
1886 }
1887 while (*input_line_pointer++ == ',');
1888
1889 /* Put terminator back into stream. */
1890 input_line_pointer--;
1891 demand_empty_rest_of_line ();
1892 }
1893
1894 #ifdef OBJ_ELF
1895 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1896
1897 static void
1898 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1899 {
1900 expressionS exp;
1901
1902 /* Since we're just labelling the code, there's no need to define a
1903 mapping symbol. */
1904 expression (&exp);
1905 /* Make sure there is enough room in this frag for the following
1906 blr. This trick only works if the blr follows immediately after
1907 the .tlsdesc directive. */
1908 frag_grow (4);
1909 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1910 BFD_RELOC_AARCH64_TLSDESC_CALL);
1911
1912 demand_empty_rest_of_line ();
1913 }
1914 #endif /* OBJ_ELF */
1915
1916 static void s_aarch64_arch (int);
1917 static void s_aarch64_cpu (int);
1918 static void s_aarch64_arch_extension (int);
1919
1920 /* This table describes all the machine specific pseudo-ops the assembler
1921 has to support. The fields are:
1922 pseudo-op name without dot
1923 function to call to execute this pseudo-op
1924 Integer arg to pass to the function. */
1925
1926 const pseudo_typeS md_pseudo_table[] = {
1927 /* Never called because '.req' does not start a line. */
1928 {"req", s_req, 0},
1929 {"unreq", s_unreq, 0},
1930 {"bss", s_bss, 0},
1931 {"even", s_even, 0},
1932 {"ltorg", s_ltorg, 0},
1933 {"pool", s_ltorg, 0},
1934 {"cpu", s_aarch64_cpu, 0},
1935 {"arch", s_aarch64_arch, 0},
1936 {"arch_extension", s_aarch64_arch_extension, 0},
1937 {"inst", s_aarch64_inst, 0},
1938 #ifdef OBJ_ELF
1939 {"tlsdesccall", s_tlsdesccall, 0},
1940 {"word", s_aarch64_elf_cons, 4},
1941 {"long", s_aarch64_elf_cons, 4},
1942 {"xword", s_aarch64_elf_cons, 8},
1943 {"dword", s_aarch64_elf_cons, 8},
1944 #endif
1945 {0, 0, 0}
1946 };
1947 \f
1948
1949 /* Check whether STR points to a register name followed by a comma or the
1950 end of line; REG_TYPE indicates which register types are checked
1951 against. Return TRUE if STR is such a register name; otherwise return
1952 FALSE. The function does not intend to produce any diagnostics, but since
1953 the register parser aarch64_reg_parse, which is called by this function,
1954 does produce diagnostics, we call clear_error to clear any diagnostics
1955 that may be generated by aarch64_reg_parse.
1956 Also, the function returns FALSE directly if there is any user error
1957 present at the function entry. This prevents the existing diagnostics
1958 state from being spoiled.
1959 The function currently serves parse_constant_immediate and
1960 parse_big_immediate only. */
1961 static bfd_boolean
1962 reg_name_p (char *str, aarch64_reg_type reg_type)
1963 {
1964 int reg;
1965
1966 /* Prevent the diagnostics state from being spoiled. */
1967 if (error_p ())
1968 return FALSE;
1969
1970 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1971
1972 /* Clear the parsing error that may be set by the reg parser. */
1973 clear_error ();
1974
1975 if (reg == PARSE_FAIL)
1976 return FALSE;
1977
1978 skip_whitespace (str);
1979 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1980 return TRUE;
1981
1982 return FALSE;
1983 }
1984
1985 /* Parser functions used exclusively in instruction operands. */
1986
1987 /* Parse an immediate expression which may not be constant.
1988
1989 To prevent the expression parser from pushing a register name
1990 into the symbol table as an undefined symbol, firstly a check is
1991 done to find out whether STR is a valid register name followed
1992 by a comma or the end of line. Return FALSE if STR is such a
1993 string. */
1994
1995 static bfd_boolean
1996 parse_immediate_expression (char **str, expressionS *exp)
1997 {
1998 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1999 {
2000 set_recoverable_error (_("immediate operand required"));
2001 return FALSE;
2002 }
2003
2004 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2005
2006 if (exp->X_op == O_absent)
2007 {
2008 set_fatal_syntax_error (_("missing immediate expression"));
2009 return FALSE;
2010 }
2011
2012 return TRUE;
2013 }
2014
2015 /* Constant immediate-value read function for use in insn parsing.
2016 STR points to the beginning of the immediate (with the optional
2017 leading #); *VAL receives the value.
2018
2019 Return TRUE on success; otherwise return FALSE. */
2020
2021 static bfd_boolean
2022 parse_constant_immediate (char **str, int64_t * val)
2023 {
2024 expressionS exp;
2025
2026 if (! parse_immediate_expression (str, &exp))
2027 return FALSE;
2028
2029 if (exp.X_op != O_constant)
2030 {
2031 set_syntax_error (_("constant expression required"));
2032 return FALSE;
2033 }
2034
2035 *val = exp.X_add_number;
2036 return TRUE;
2037 }
2038
2039 static uint32_t
2040 encode_imm_float_bits (uint32_t imm)
2041 {
2042 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2043 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2044 }
2045
2046 /* Return TRUE if the single-precision floating-point value encoded in IMM
2047 can be expressed in the AArch64 8-bit signed floating-point format with
2048 3-bit exponent and normalized 4 bits of precision; in other words, the
2049 floating-point value must be expressable as
2050 (+/-) n / 16 * power (2, r)
2051 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2052
2053 static bfd_boolean
2054 aarch64_imm_float_p (uint32_t imm)
2055 {
2056 /* If a single-precision floating-point value has the following bit
2057 pattern, it can be expressed in the AArch64 8-bit floating-point
2058 format:
2059
2060 3 32222222 2221111111111
2061 1 09876543 21098765432109876543210
2062 n Eeeeeexx xxxx0000000000000000000
2063
2064 where n, e and each x are either 0 or 1 independently, with
2065 E == ~ e. */
2066
2067 uint32_t pattern;
2068
2069 /* Prepare the pattern for 'Eeeeee'. */
2070 if (((imm >> 30) & 0x1) == 0)
2071 pattern = 0x3e000000;
2072 else
2073 pattern = 0x40000000;
2074
2075 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2076 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2077 }
2078
2079 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2080
2081 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2082 8-bit signed floating-point format with 3-bit exponent and normalized 4
2083 bits of precision (i.e. can be used in an FMOV instruction); return the
2084 equivalent single-precision encoding in *FPWORD.
2085
2086 Otherwise return FALSE. */
2087
2088 static bfd_boolean
2089 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2090 {
2091 /* If a double-precision floating-point value has the following bit
2092 pattern, it can be expressed in the AArch64 8-bit floating-point
2093 format:
2094
2095 6 66655555555 554444444...21111111111
2096 3 21098765432 109876543...098765432109876543210
2097 n Eeeeeeeeexx xxxx00000...000000000000000000000
2098
2099 where n, e and each x are either 0 or 1 independently, with
2100 E == ~ e. */
2101
2102 uint32_t pattern;
2103 uint32_t high32 = imm >> 32;
2104
2105 /* Lower 32 bits need to be 0s. */
2106 if ((imm & 0xffffffff) != 0)
2107 return FALSE;
2108
2109 /* Prepare the pattern for 'Eeeeeeeee'. */
2110 if (((high32 >> 30) & 0x1) == 0)
2111 pattern = 0x3fc00000;
2112 else
2113 pattern = 0x40000000;
2114
2115 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2116 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2117 {
2118 /* Convert to the single-precision encoding.
2119 i.e. convert
2120 n Eeeeeeeeexx xxxx00000...000000000000000000000
2121 to
2122 n Eeeeeexx xxxx0000000000000000000. */
2123 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2124 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2125 return TRUE;
2126 }
2127 else
2128 return FALSE;
2129 }
2130
2131 /* Parse a floating-point immediate. Return TRUE on success and return the
2132 value in *IMMED in the format of IEEE754 single-precision encoding.
2133 *CCP points to the start of the string; DP_P is TRUE when the immediate
2134 is expected to be in double-precision (N.B. this only matters when
2135 hexadecimal representation is involved).
2136
2137 N.B. 0.0 is accepted by this function. */
2138
2139 static bfd_boolean
2140 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2141 {
2142 char *str = *ccp;
2143 char *fpnum;
2144 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2145 int found_fpchar = 0;
2146 int64_t val = 0;
2147 unsigned fpword = 0;
2148 bfd_boolean hex_p = FALSE;
2149
2150 skip_past_char (&str, '#');
2151
2152 fpnum = str;
2153 skip_whitespace (fpnum);
2154
2155 if (strncmp (fpnum, "0x", 2) == 0)
2156 {
2157 /* Support the hexadecimal representation of the IEEE754 encoding.
2158 Double-precision is expected when DP_P is TRUE, otherwise the
2159 representation should be in single-precision. */
2160 if (! parse_constant_immediate (&str, &val))
2161 goto invalid_fp;
2162
2163 if (dp_p)
2164 {
2165 if (! aarch64_double_precision_fmovable (val, &fpword))
2166 goto invalid_fp;
2167 }
2168 else if ((uint64_t) val > 0xffffffff)
2169 goto invalid_fp;
2170 else
2171 fpword = val;
2172
2173 hex_p = TRUE;
2174 }
2175 else
2176 {
2177 /* We must not accidentally parse an integer as a floating-point number.
2178 Make sure that the value we parse is not an integer by checking for
2179 special characters '.' or 'e'. */
2180 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2181 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2182 {
2183 found_fpchar = 1;
2184 break;
2185 }
2186
2187 if (!found_fpchar)
2188 return FALSE;
2189 }
2190
2191 if (! hex_p)
2192 {
2193 int i;
2194
2195 if ((str = atof_ieee (str, 's', words)) == NULL)
2196 goto invalid_fp;
2197
2198 /* Our FP word must be 32 bits (single-precision FP). */
2199 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2200 {
2201 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2202 fpword |= words[i];
2203 }
2204 }
2205
2206 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2207 {
2208 *immed = fpword;
2209 *ccp = str;
2210 return TRUE;
2211 }
2212
2213 invalid_fp:
2214 set_fatal_syntax_error (_("invalid floating-point constant"));
2215 return FALSE;
2216 }
2217
2218 /* Less-generic immediate-value read function with the possibility of loading
2219 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2220 instructions.
2221
2222 To prevent the expression parser from pushing a register name into the
2223 symbol table as an undefined symbol, a check is firstly done to find
2224 out whether STR is a valid register name followed by a comma or the end
2225 of line. Return FALSE if STR is such a register. */
2226
2227 static bfd_boolean
2228 parse_big_immediate (char **str, int64_t *imm)
2229 {
2230 char *ptr = *str;
2231
2232 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2233 {
2234 set_syntax_error (_("immediate operand required"));
2235 return FALSE;
2236 }
2237
2238 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2239
2240 if (inst.reloc.exp.X_op == O_constant)
2241 *imm = inst.reloc.exp.X_add_number;
2242
2243 *str = ptr;
2244
2245 return TRUE;
2246 }
2247
2248 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2249 if NEED_LIBOPCODES is non-zero, the fixup will need
2250 assistance from the libopcodes. */
2251
2252 static inline void
2253 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2254 const aarch64_opnd_info *operand,
2255 int need_libopcodes_p)
2256 {
2257 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2258 reloc->opnd = operand->type;
2259 if (need_libopcodes_p)
2260 reloc->need_libopcodes_p = 1;
2261 };
2262
2263 /* Return TRUE if the instruction needs to be fixed up later internally by
2264 the GAS; otherwise return FALSE. */
2265
2266 static inline bfd_boolean
2267 aarch64_gas_internal_fixup_p (void)
2268 {
2269 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2270 }
2271
2272 /* Assign the immediate value to the relavant field in *OPERAND if
2273 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2274 needs an internal fixup in a later stage.
2275 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2276 IMM.VALUE that may get assigned with the constant. */
2277 static inline void
2278 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2279 aarch64_opnd_info *operand,
2280 int addr_off_p,
2281 int need_libopcodes_p,
2282 int skip_p)
2283 {
2284 if (reloc->exp.X_op == O_constant)
2285 {
2286 if (addr_off_p)
2287 operand->addr.offset.imm = reloc->exp.X_add_number;
2288 else
2289 operand->imm.value = reloc->exp.X_add_number;
2290 reloc->type = BFD_RELOC_UNUSED;
2291 }
2292 else
2293 {
2294 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2295 /* Tell libopcodes to ignore this operand or not. This is helpful
2296 when one of the operands needs to be fixed up later but we need
2297 libopcodes to check the other operands. */
2298 operand->skip = skip_p;
2299 }
2300 }
2301
2302 /* Relocation modifiers. Each entry in the table contains the textual
2303 name for the relocation which may be placed before a symbol used as
2304 a load/store offset, or add immediate. It must be surrounded by a
2305 leading and trailing colon, for example:
2306
2307 ldr x0, [x1, #:rello:varsym]
2308 add x0, x1, #:rello:varsym */
2309
2310 struct reloc_table_entry
2311 {
2312 const char *name;
2313 int pc_rel;
2314 bfd_reloc_code_real_type adr_type;
2315 bfd_reloc_code_real_type adrp_type;
2316 bfd_reloc_code_real_type movw_type;
2317 bfd_reloc_code_real_type add_type;
2318 bfd_reloc_code_real_type ldst_type;
2319 bfd_reloc_code_real_type ld_literal_type;
2320 };
2321
2322 static struct reloc_table_entry reloc_table[] = {
2323 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2324 {"lo12", 0,
2325 0, /* adr_type */
2326 0,
2327 0,
2328 BFD_RELOC_AARCH64_ADD_LO12,
2329 BFD_RELOC_AARCH64_LDST_LO12,
2330 0},
2331
2332 /* Higher 21 bits of pc-relative page offset: ADRP */
2333 {"pg_hi21", 1,
2334 0, /* adr_type */
2335 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2336 0,
2337 0,
2338 0,
2339 0},
2340
2341 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2342 {"pg_hi21_nc", 1,
2343 0, /* adr_type */
2344 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2345 0,
2346 0,
2347 0,
2348 0},
2349
2350 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2351 {"abs_g0", 0,
2352 0, /* adr_type */
2353 0,
2354 BFD_RELOC_AARCH64_MOVW_G0,
2355 0,
2356 0,
2357 0},
2358
2359 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2360 {"abs_g0_s", 0,
2361 0, /* adr_type */
2362 0,
2363 BFD_RELOC_AARCH64_MOVW_G0_S,
2364 0,
2365 0,
2366 0},
2367
2368 /* Less significant bits 0-15 of address/value: MOVK, no check */
2369 {"abs_g0_nc", 0,
2370 0, /* adr_type */
2371 0,
2372 BFD_RELOC_AARCH64_MOVW_G0_NC,
2373 0,
2374 0,
2375 0},
2376
2377 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2378 {"abs_g1", 0,
2379 0, /* adr_type */
2380 0,
2381 BFD_RELOC_AARCH64_MOVW_G1,
2382 0,
2383 0,
2384 0},
2385
2386 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2387 {"abs_g1_s", 0,
2388 0, /* adr_type */
2389 0,
2390 BFD_RELOC_AARCH64_MOVW_G1_S,
2391 0,
2392 0,
2393 0},
2394
2395 /* Less significant bits 16-31 of address/value: MOVK, no check */
2396 {"abs_g1_nc", 0,
2397 0, /* adr_type */
2398 0,
2399 BFD_RELOC_AARCH64_MOVW_G1_NC,
2400 0,
2401 0,
2402 0},
2403
2404 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2405 {"abs_g2", 0,
2406 0, /* adr_type */
2407 0,
2408 BFD_RELOC_AARCH64_MOVW_G2,
2409 0,
2410 0,
2411 0},
2412
2413 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2414 {"abs_g2_s", 0,
2415 0, /* adr_type */
2416 0,
2417 BFD_RELOC_AARCH64_MOVW_G2_S,
2418 0,
2419 0,
2420 0},
2421
2422 /* Less significant bits 32-47 of address/value: MOVK, no check */
2423 {"abs_g2_nc", 0,
2424 0, /* adr_type */
2425 0,
2426 BFD_RELOC_AARCH64_MOVW_G2_NC,
2427 0,
2428 0,
2429 0},
2430
2431 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2432 {"abs_g3", 0,
2433 0, /* adr_type */
2434 0,
2435 BFD_RELOC_AARCH64_MOVW_G3,
2436 0,
2437 0,
2438 0},
2439
2440 /* Get to the page containing GOT entry for a symbol. */
2441 {"got", 1,
2442 0, /* adr_type */
2443 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2444 0,
2445 0,
2446 0,
2447 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2448
2449 /* 12 bit offset into the page containing GOT entry for that symbol. */
2450 {"got_lo12", 0,
2451 0, /* adr_type */
2452 0,
2453 0,
2454 0,
2455 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2456 0},
2457
2458 /* Get to the page containing GOT TLS entry for a symbol */
2459 {"tlsgd", 0,
2460 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2461 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2462 0,
2463 0,
2464 0,
2465 0},
2466
2467 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2468 {"tlsgd_lo12", 0,
2469 0, /* adr_type */
2470 0,
2471 0,
2472 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2473 0,
2474 0},
2475
2476 /* Get to the page containing GOT TLS entry for a symbol */
2477 {"tlsdesc", 0,
2478 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2479 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2480 0,
2481 0,
2482 0,
2483 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2484
2485 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2486 {"tlsdesc_lo12", 0,
2487 0, /* adr_type */
2488 0,
2489 0,
2490 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2491 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2492 0},
2493
2494 /* Get to the page containing GOT TLS entry for a symbol */
2495 {"gottprel", 0,
2496 0, /* adr_type */
2497 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2498 0,
2499 0,
2500 0,
2501 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2502
2503 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2504 {"gottprel_lo12", 0,
2505 0, /* adr_type */
2506 0,
2507 0,
2508 0,
2509 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2510 0},
2511
2512 /* Get tp offset for a symbol. */
2513 {"tprel", 0,
2514 0, /* adr_type */
2515 0,
2516 0,
2517 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2518 0,
2519 0},
2520
2521 /* Get tp offset for a symbol. */
2522 {"tprel_lo12", 0,
2523 0, /* adr_type */
2524 0,
2525 0,
2526 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2527 0,
2528 0},
2529
2530 /* Get tp offset for a symbol. */
2531 {"tprel_hi12", 0,
2532 0, /* adr_type */
2533 0,
2534 0,
2535 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2536 0,
2537 0},
2538
2539 /* Get tp offset for a symbol. */
2540 {"tprel_lo12_nc", 0,
2541 0, /* adr_type */
2542 0,
2543 0,
2544 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2545 0,
2546 0},
2547
2548 /* Most significant bits 32-47 of address/value: MOVZ. */
2549 {"tprel_g2", 0,
2550 0, /* adr_type */
2551 0,
2552 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2553 0,
2554 0,
2555 0},
2556
2557 /* Most significant bits 16-31 of address/value: MOVZ. */
2558 {"tprel_g1", 0,
2559 0, /* adr_type */
2560 0,
2561 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2562 0,
2563 0,
2564 0},
2565
2566 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2567 {"tprel_g1_nc", 0,
2568 0, /* adr_type */
2569 0,
2570 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2571 0,
2572 0,
2573 0},
2574
2575 /* Most significant bits 0-15 of address/value: MOVZ. */
2576 {"tprel_g0", 0,
2577 0, /* adr_type */
2578 0,
2579 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2580 0,
2581 0,
2582 0},
2583
2584 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2585 {"tprel_g0_nc", 0,
2586 0, /* adr_type */
2587 0,
2588 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2589 0,
2590 0,
2591 0},
2592
2593 /* 15bit offset from got entry to base address of GOT table. */
2594 {"gotpage_lo15", 0,
2595 0,
2596 0,
2597 0,
2598 0,
2599 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2600 0},
2601
2602 /* 14bit offset from got entry to base address of GOT table. */
2603 {"gotpage_lo14", 0,
2604 0,
2605 0,
2606 0,
2607 0,
2608 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2609 0},
2610 };
2611
2612 /* Given the address of a pointer pointing to the textual name of a
2613 relocation as may appear in assembler source, attempt to find its
2614 details in reloc_table. The pointer will be updated to the character
2615 after the trailing colon. On failure, NULL will be returned;
2616 otherwise return the reloc_table_entry. */
2617
2618 static struct reloc_table_entry *
2619 find_reloc_table_entry (char **str)
2620 {
2621 unsigned int i;
2622 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2623 {
2624 int length = strlen (reloc_table[i].name);
2625
2626 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2627 && (*str)[length] == ':')
2628 {
2629 *str += (length + 1);
2630 return &reloc_table[i];
2631 }
2632 }
2633
2634 return NULL;
2635 }
2636
2637 /* Mode argument to parse_shift and parser_shifter_operand. */
2638 enum parse_shift_mode
2639 {
2640 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2641 "#imm{,lsl #n}" */
2642 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2643 "#imm" */
2644 SHIFTED_LSL, /* bare "lsl #n" */
2645 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2646 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2647 };
2648
2649 /* Parse a <shift> operator on an AArch64 data processing instruction.
2650 Return TRUE on success; otherwise return FALSE. */
2651 static bfd_boolean
2652 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2653 {
2654 const struct aarch64_name_value_pair *shift_op;
2655 enum aarch64_modifier_kind kind;
2656 expressionS exp;
2657 int exp_has_prefix;
2658 char *s = *str;
2659 char *p = s;
2660
2661 for (p = *str; ISALPHA (*p); p++)
2662 ;
2663
2664 if (p == *str)
2665 {
2666 set_syntax_error (_("shift expression expected"));
2667 return FALSE;
2668 }
2669
2670 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2671
2672 if (shift_op == NULL)
2673 {
2674 set_syntax_error (_("shift operator expected"));
2675 return FALSE;
2676 }
2677
2678 kind = aarch64_get_operand_modifier (shift_op);
2679
2680 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2681 {
2682 set_syntax_error (_("invalid use of 'MSL'"));
2683 return FALSE;
2684 }
2685
2686 switch (mode)
2687 {
2688 case SHIFTED_LOGIC_IMM:
2689 if (aarch64_extend_operator_p (kind) == TRUE)
2690 {
2691 set_syntax_error (_("extending shift is not permitted"));
2692 return FALSE;
2693 }
2694 break;
2695
2696 case SHIFTED_ARITH_IMM:
2697 if (kind == AARCH64_MOD_ROR)
2698 {
2699 set_syntax_error (_("'ROR' shift is not permitted"));
2700 return FALSE;
2701 }
2702 break;
2703
2704 case SHIFTED_LSL:
2705 if (kind != AARCH64_MOD_LSL)
2706 {
2707 set_syntax_error (_("only 'LSL' shift is permitted"));
2708 return FALSE;
2709 }
2710 break;
2711
2712 case SHIFTED_REG_OFFSET:
2713 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2714 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2715 {
2716 set_fatal_syntax_error
2717 (_("invalid shift for the register offset addressing mode"));
2718 return FALSE;
2719 }
2720 break;
2721
2722 case SHIFTED_LSL_MSL:
2723 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2724 {
2725 set_syntax_error (_("invalid shift operator"));
2726 return FALSE;
2727 }
2728 break;
2729
2730 default:
2731 abort ();
2732 }
2733
2734 /* Whitespace can appear here if the next thing is a bare digit. */
2735 skip_whitespace (p);
2736
2737 /* Parse shift amount. */
2738 exp_has_prefix = 0;
2739 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2740 exp.X_op = O_absent;
2741 else
2742 {
2743 if (is_immediate_prefix (*p))
2744 {
2745 p++;
2746 exp_has_prefix = 1;
2747 }
2748 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2749 }
2750 if (exp.X_op == O_absent)
2751 {
2752 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2753 {
2754 set_syntax_error (_("missing shift amount"));
2755 return FALSE;
2756 }
2757 operand->shifter.amount = 0;
2758 }
2759 else if (exp.X_op != O_constant)
2760 {
2761 set_syntax_error (_("constant shift amount required"));
2762 return FALSE;
2763 }
2764 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2765 {
2766 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2767 return FALSE;
2768 }
2769 else
2770 {
2771 operand->shifter.amount = exp.X_add_number;
2772 operand->shifter.amount_present = 1;
2773 }
2774
2775 operand->shifter.operator_present = 1;
2776 operand->shifter.kind = kind;
2777
2778 *str = p;
2779 return TRUE;
2780 }
2781
2782 /* Parse a <shifter_operand> for a data processing instruction:
2783
2784 #<immediate>
2785 #<immediate>, LSL #imm
2786
2787 Validation of immediate operands is deferred to md_apply_fix.
2788
2789 Return TRUE on success; otherwise return FALSE. */
2790
2791 static bfd_boolean
2792 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2793 enum parse_shift_mode mode)
2794 {
2795 char *p;
2796
2797 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2798 return FALSE;
2799
2800 p = *str;
2801
2802 /* Accept an immediate expression. */
2803 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2804 return FALSE;
2805
2806 /* Accept optional LSL for arithmetic immediate values. */
2807 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2808 if (! parse_shift (&p, operand, SHIFTED_LSL))
2809 return FALSE;
2810
2811 /* Not accept any shifter for logical immediate values. */
2812 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2813 && parse_shift (&p, operand, mode))
2814 {
2815 set_syntax_error (_("unexpected shift operator"));
2816 return FALSE;
2817 }
2818
2819 *str = p;
2820 return TRUE;
2821 }
2822
2823 /* Parse a <shifter_operand> for a data processing instruction:
2824
2825 <Rm>
2826 <Rm>, <shift>
2827 #<immediate>
2828 #<immediate>, LSL #imm
2829
2830 where <shift> is handled by parse_shift above, and the last two
2831 cases are handled by the function above.
2832
2833 Validation of immediate operands is deferred to md_apply_fix.
2834
2835 Return TRUE on success; otherwise return FALSE. */
2836
2837 static bfd_boolean
2838 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2839 enum parse_shift_mode mode)
2840 {
2841 int reg;
2842 int isreg32, isregzero;
2843 enum aarch64_operand_class opd_class
2844 = aarch64_get_operand_class (operand->type);
2845
2846 if ((reg =
2847 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2848 {
2849 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2850 {
2851 set_syntax_error (_("unexpected register in the immediate operand"));
2852 return FALSE;
2853 }
2854
2855 if (!isregzero && reg == REG_SP)
2856 {
2857 set_syntax_error (BAD_SP);
2858 return FALSE;
2859 }
2860
2861 operand->reg.regno = reg;
2862 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2863
2864 /* Accept optional shift operation on register. */
2865 if (! skip_past_comma (str))
2866 return TRUE;
2867
2868 if (! parse_shift (str, operand, mode))
2869 return FALSE;
2870
2871 return TRUE;
2872 }
2873 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2874 {
2875 set_syntax_error
2876 (_("integer register expected in the extended/shifted operand "
2877 "register"));
2878 return FALSE;
2879 }
2880
2881 /* We have a shifted immediate variable. */
2882 return parse_shifter_operand_imm (str, operand, mode);
2883 }
2884
2885 /* Return TRUE on success; return FALSE otherwise. */
2886
2887 static bfd_boolean
2888 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2889 enum parse_shift_mode mode)
2890 {
2891 char *p = *str;
2892
2893 /* Determine if we have the sequence of characters #: or just :
2894 coming next. If we do, then we check for a :rello: relocation
2895 modifier. If we don't, punt the whole lot to
2896 parse_shifter_operand. */
2897
2898 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2899 {
2900 struct reloc_table_entry *entry;
2901
2902 if (p[0] == '#')
2903 p += 2;
2904 else
2905 p++;
2906 *str = p;
2907
2908 /* Try to parse a relocation. Anything else is an error. */
2909 if (!(entry = find_reloc_table_entry (str)))
2910 {
2911 set_syntax_error (_("unknown relocation modifier"));
2912 return FALSE;
2913 }
2914
2915 if (entry->add_type == 0)
2916 {
2917 set_syntax_error
2918 (_("this relocation modifier is not allowed on this instruction"));
2919 return FALSE;
2920 }
2921
2922 /* Save str before we decompose it. */
2923 p = *str;
2924
2925 /* Next, we parse the expression. */
2926 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2927 return FALSE;
2928
2929 /* Record the relocation type (use the ADD variant here). */
2930 inst.reloc.type = entry->add_type;
2931 inst.reloc.pc_rel = entry->pc_rel;
2932
2933 /* If str is empty, we've reached the end, stop here. */
2934 if (**str == '\0')
2935 return TRUE;
2936
2937 /* Otherwise, we have a shifted reloc modifier, so rewind to
2938 recover the variable name and continue parsing for the shifter. */
2939 *str = p;
2940 return parse_shifter_operand_imm (str, operand, mode);
2941 }
2942
2943 return parse_shifter_operand (str, operand, mode);
2944 }
2945
2946 /* Parse all forms of an address expression. Information is written
2947 to *OPERAND and/or inst.reloc.
2948
2949 The A64 instruction set has the following addressing modes:
2950
2951 Offset
2952 [base] // in SIMD ld/st structure
2953 [base{,#0}] // in ld/st exclusive
2954 [base{,#imm}]
2955 [base,Xm{,LSL #imm}]
2956 [base,Xm,SXTX {#imm}]
2957 [base,Wm,(S|U)XTW {#imm}]
2958 Pre-indexed
2959 [base,#imm]!
2960 Post-indexed
2961 [base],#imm
2962 [base],Xm // in SIMD ld/st structure
2963 PC-relative (literal)
2964 label
2965 =immediate
2966
2967 (As a convenience, the notation "=immediate" is permitted in conjunction
2968 with the pc-relative literal load instructions to automatically place an
2969 immediate value or symbolic address in a nearby literal pool and generate
2970 a hidden label which references it.)
2971
2972 Upon a successful parsing, the address structure in *OPERAND will be
2973 filled in the following way:
2974
2975 .base_regno = <base>
2976 .offset.is_reg // 1 if the offset is a register
2977 .offset.imm = <imm>
2978 .offset.regno = <Rm>
2979
2980 For different addressing modes defined in the A64 ISA:
2981
2982 Offset
2983 .pcrel=0; .preind=1; .postind=0; .writeback=0
2984 Pre-indexed
2985 .pcrel=0; .preind=1; .postind=0; .writeback=1
2986 Post-indexed
2987 .pcrel=0; .preind=0; .postind=1; .writeback=1
2988 PC-relative (literal)
2989 .pcrel=1; .preind=1; .postind=0; .writeback=0
2990
2991 The shift/extension information, if any, will be stored in .shifter.
2992
2993 It is the caller's responsibility to check for addressing modes not
2994 supported by the instruction, and to set inst.reloc.type. */
2995
2996 static bfd_boolean
2997 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2998 int accept_reg_post_index)
2999 {
3000 char *p = *str;
3001 int reg;
3002 int isreg32, isregzero;
3003 expressionS *exp = &inst.reloc.exp;
3004
3005 if (! skip_past_char (&p, '['))
3006 {
3007 /* =immediate or label. */
3008 operand->addr.pcrel = 1;
3009 operand->addr.preind = 1;
3010
3011 /* #:<reloc_op>:<symbol> */
3012 skip_past_char (&p, '#');
3013 if (reloc && skip_past_char (&p, ':'))
3014 {
3015 bfd_reloc_code_real_type ty;
3016 struct reloc_table_entry *entry;
3017
3018 /* Try to parse a relocation modifier. Anything else is
3019 an error. */
3020 entry = find_reloc_table_entry (&p);
3021 if (! entry)
3022 {
3023 set_syntax_error (_("unknown relocation modifier"));
3024 return FALSE;
3025 }
3026
3027 switch (operand->type)
3028 {
3029 case AARCH64_OPND_ADDR_PCREL21:
3030 /* adr */
3031 ty = entry->adr_type;
3032 break;
3033
3034 default:
3035 ty = entry->ld_literal_type;
3036 break;
3037 }
3038
3039 if (ty == 0)
3040 {
3041 set_syntax_error
3042 (_("this relocation modifier is not allowed on this "
3043 "instruction"));
3044 return FALSE;
3045 }
3046
3047 /* #:<reloc_op>: */
3048 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3049 {
3050 set_syntax_error (_("invalid relocation expression"));
3051 return FALSE;
3052 }
3053
3054 /* #:<reloc_op>:<expr> */
3055 /* Record the relocation type. */
3056 inst.reloc.type = ty;
3057 inst.reloc.pc_rel = entry->pc_rel;
3058 }
3059 else
3060 {
3061
3062 if (skip_past_char (&p, '='))
3063 /* =immediate; need to generate the literal in the literal pool. */
3064 inst.gen_lit_pool = 1;
3065
3066 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3067 {
3068 set_syntax_error (_("invalid address"));
3069 return FALSE;
3070 }
3071 }
3072
3073 *str = p;
3074 return TRUE;
3075 }
3076
3077 /* [ */
3078
3079 /* Accept SP and reject ZR */
3080 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3081 if (reg == PARSE_FAIL || isreg32)
3082 {
3083 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3084 return FALSE;
3085 }
3086 operand->addr.base_regno = reg;
3087
3088 /* [Xn */
3089 if (skip_past_comma (&p))
3090 {
3091 /* [Xn, */
3092 operand->addr.preind = 1;
3093
3094 /* Reject SP and accept ZR */
3095 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3096 if (reg != PARSE_FAIL)
3097 {
3098 /* [Xn,Rm */
3099 operand->addr.offset.regno = reg;
3100 operand->addr.offset.is_reg = 1;
3101 /* Shifted index. */
3102 if (skip_past_comma (&p))
3103 {
3104 /* [Xn,Rm, */
3105 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3106 /* Use the diagnostics set in parse_shift, so not set new
3107 error message here. */
3108 return FALSE;
3109 }
3110 /* We only accept:
3111 [base,Xm{,LSL #imm}]
3112 [base,Xm,SXTX {#imm}]
3113 [base,Wm,(S|U)XTW {#imm}] */
3114 if (operand->shifter.kind == AARCH64_MOD_NONE
3115 || operand->shifter.kind == AARCH64_MOD_LSL
3116 || operand->shifter.kind == AARCH64_MOD_SXTX)
3117 {
3118 if (isreg32)
3119 {
3120 set_syntax_error (_("invalid use of 32-bit register offset"));
3121 return FALSE;
3122 }
3123 }
3124 else if (!isreg32)
3125 {
3126 set_syntax_error (_("invalid use of 64-bit register offset"));
3127 return FALSE;
3128 }
3129 }
3130 else
3131 {
3132 /* [Xn,#:<reloc_op>:<symbol> */
3133 skip_past_char (&p, '#');
3134 if (reloc && skip_past_char (&p, ':'))
3135 {
3136 struct reloc_table_entry *entry;
3137
3138 /* Try to parse a relocation modifier. Anything else is
3139 an error. */
3140 if (!(entry = find_reloc_table_entry (&p)))
3141 {
3142 set_syntax_error (_("unknown relocation modifier"));
3143 return FALSE;
3144 }
3145
3146 if (entry->ldst_type == 0)
3147 {
3148 set_syntax_error
3149 (_("this relocation modifier is not allowed on this "
3150 "instruction"));
3151 return FALSE;
3152 }
3153
3154 /* [Xn,#:<reloc_op>: */
3155 /* We now have the group relocation table entry corresponding to
3156 the name in the assembler source. Next, we parse the
3157 expression. */
3158 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3159 {
3160 set_syntax_error (_("invalid relocation expression"));
3161 return FALSE;
3162 }
3163
3164 /* [Xn,#:<reloc_op>:<expr> */
3165 /* Record the load/store relocation type. */
3166 inst.reloc.type = entry->ldst_type;
3167 inst.reloc.pc_rel = entry->pc_rel;
3168 }
3169 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3170 {
3171 set_syntax_error (_("invalid expression in the address"));
3172 return FALSE;
3173 }
3174 /* [Xn,<expr> */
3175 }
3176 }
3177
3178 if (! skip_past_char (&p, ']'))
3179 {
3180 set_syntax_error (_("']' expected"));
3181 return FALSE;
3182 }
3183
3184 if (skip_past_char (&p, '!'))
3185 {
3186 if (operand->addr.preind && operand->addr.offset.is_reg)
3187 {
3188 set_syntax_error (_("register offset not allowed in pre-indexed "
3189 "addressing mode"));
3190 return FALSE;
3191 }
3192 /* [Xn]! */
3193 operand->addr.writeback = 1;
3194 }
3195 else if (skip_past_comma (&p))
3196 {
3197 /* [Xn], */
3198 operand->addr.postind = 1;
3199 operand->addr.writeback = 1;
3200
3201 if (operand->addr.preind)
3202 {
3203 set_syntax_error (_("cannot combine pre- and post-indexing"));
3204 return FALSE;
3205 }
3206
3207 if (accept_reg_post_index
3208 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3209 &isregzero)) != PARSE_FAIL)
3210 {
3211 /* [Xn],Xm */
3212 if (isreg32)
3213 {
3214 set_syntax_error (_("invalid 32-bit register offset"));
3215 return FALSE;
3216 }
3217 operand->addr.offset.regno = reg;
3218 operand->addr.offset.is_reg = 1;
3219 }
3220 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3221 {
3222 /* [Xn],#expr */
3223 set_syntax_error (_("invalid expression in the address"));
3224 return FALSE;
3225 }
3226 }
3227
3228 /* If at this point neither .preind nor .postind is set, we have a
3229 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3230 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3231 {
3232 if (operand->addr.writeback)
3233 {
3234 /* Reject [Rn]! */
3235 set_syntax_error (_("missing offset in the pre-indexed address"));
3236 return FALSE;
3237 }
3238 operand->addr.preind = 1;
3239 inst.reloc.exp.X_op = O_constant;
3240 inst.reloc.exp.X_add_number = 0;
3241 }
3242
3243 *str = p;
3244 return TRUE;
3245 }
3246
3247 /* Return TRUE on success; otherwise return FALSE. */
3248 static bfd_boolean
3249 parse_address (char **str, aarch64_opnd_info *operand,
3250 int accept_reg_post_index)
3251 {
3252 return parse_address_main (str, operand, 0, accept_reg_post_index);
3253 }
3254
3255 /* Return TRUE on success; otherwise return FALSE. */
3256 static bfd_boolean
3257 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3258 {
3259 return parse_address_main (str, operand, 1, 0);
3260 }
3261
3262 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3263 Return TRUE on success; otherwise return FALSE. */
3264 static bfd_boolean
3265 parse_half (char **str, int *internal_fixup_p)
3266 {
3267 char *p, *saved;
3268 int dummy;
3269
3270 p = *str;
3271 skip_past_char (&p, '#');
3272
3273 gas_assert (internal_fixup_p);
3274 *internal_fixup_p = 0;
3275
3276 if (*p == ':')
3277 {
3278 struct reloc_table_entry *entry;
3279
3280 /* Try to parse a relocation. Anything else is an error. */
3281 ++p;
3282 if (!(entry = find_reloc_table_entry (&p)))
3283 {
3284 set_syntax_error (_("unknown relocation modifier"));
3285 return FALSE;
3286 }
3287
3288 if (entry->movw_type == 0)
3289 {
3290 set_syntax_error
3291 (_("this relocation modifier is not allowed on this instruction"));
3292 return FALSE;
3293 }
3294
3295 inst.reloc.type = entry->movw_type;
3296 }
3297 else
3298 *internal_fixup_p = 1;
3299
3300 /* Avoid parsing a register as a general symbol. */
3301 saved = p;
3302 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3303 return FALSE;
3304 p = saved;
3305
3306 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3307 return FALSE;
3308
3309 *str = p;
3310 return TRUE;
3311 }
3312
3313 /* Parse an operand for an ADRP instruction:
3314 ADRP <Xd>, <label>
3315 Return TRUE on success; otherwise return FALSE. */
3316
3317 static bfd_boolean
3318 parse_adrp (char **str)
3319 {
3320 char *p;
3321
3322 p = *str;
3323 if (*p == ':')
3324 {
3325 struct reloc_table_entry *entry;
3326
3327 /* Try to parse a relocation. Anything else is an error. */
3328 ++p;
3329 if (!(entry = find_reloc_table_entry (&p)))
3330 {
3331 set_syntax_error (_("unknown relocation modifier"));
3332 return FALSE;
3333 }
3334
3335 if (entry->adrp_type == 0)
3336 {
3337 set_syntax_error
3338 (_("this relocation modifier is not allowed on this instruction"));
3339 return FALSE;
3340 }
3341
3342 inst.reloc.type = entry->adrp_type;
3343 }
3344 else
3345 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3346
3347 inst.reloc.pc_rel = 1;
3348
3349 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3350 return FALSE;
3351
3352 *str = p;
3353 return TRUE;
3354 }
3355
3356 /* Miscellaneous. */
3357
3358 /* Parse an option for a preload instruction. Returns the encoding for the
3359 option, or PARSE_FAIL. */
3360
3361 static int
3362 parse_pldop (char **str)
3363 {
3364 char *p, *q;
3365 const struct aarch64_name_value_pair *o;
3366
3367 p = q = *str;
3368 while (ISALNUM (*q))
3369 q++;
3370
3371 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3372 if (!o)
3373 return PARSE_FAIL;
3374
3375 *str = q;
3376 return o->value;
3377 }
3378
3379 /* Parse an option for a barrier instruction. Returns the encoding for the
3380 option, or PARSE_FAIL. */
3381
3382 static int
3383 parse_barrier (char **str)
3384 {
3385 char *p, *q;
3386 const asm_barrier_opt *o;
3387
3388 p = q = *str;
3389 while (ISALPHA (*q))
3390 q++;
3391
3392 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3393 if (!o)
3394 return PARSE_FAIL;
3395
3396 *str = q;
3397 return o->value;
3398 }
3399
3400 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3401 Returns the encoding for the option, or PARSE_FAIL.
3402
3403 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3404 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3405
3406 static int
3407 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3408 {
3409 char *p, *q;
3410 char buf[32];
3411 const aarch64_sys_reg *o;
3412 int value;
3413
3414 p = buf;
3415 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3416 if (p < buf + 31)
3417 *p++ = TOLOWER (*q);
3418 *p = '\0';
3419 /* Assert that BUF be large enough. */
3420 gas_assert (p - buf == q - *str);
3421
3422 o = hash_find (sys_regs, buf);
3423 if (!o)
3424 {
3425 if (!imple_defined_p)
3426 return PARSE_FAIL;
3427 else
3428 {
3429 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3430 unsigned int op0, op1, cn, cm, op2;
3431
3432 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3433 != 5)
3434 return PARSE_FAIL;
3435 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3436 return PARSE_FAIL;
3437 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3438 }
3439 }
3440 else
3441 {
3442 if (aarch64_sys_reg_deprecated_p (o))
3443 as_warn (_("system register name '%s' is deprecated and may be "
3444 "removed in a future release"), buf);
3445 value = o->value;
3446 }
3447
3448 *str = q;
3449 return value;
3450 }
3451
3452 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3453 for the option, or NULL. */
3454
3455 static const aarch64_sys_ins_reg *
3456 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3457 {
3458 char *p, *q;
3459 char buf[32];
3460 const aarch64_sys_ins_reg *o;
3461
3462 p = buf;
3463 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3464 if (p < buf + 31)
3465 *p++ = TOLOWER (*q);
3466 *p = '\0';
3467
3468 o = hash_find (sys_ins_regs, buf);
3469 if (!o)
3470 return NULL;
3471
3472 *str = q;
3473 return o;
3474 }
3475 \f
3476 #define po_char_or_fail(chr) do { \
3477 if (! skip_past_char (&str, chr)) \
3478 goto failure; \
3479 } while (0)
3480
3481 #define po_reg_or_fail(regtype) do { \
3482 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3483 if (val == PARSE_FAIL) \
3484 { \
3485 set_default_error (); \
3486 goto failure; \
3487 } \
3488 } while (0)
3489
3490 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3491 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3492 &isreg32, &isregzero); \
3493 if (val == PARSE_FAIL) \
3494 { \
3495 set_default_error (); \
3496 goto failure; \
3497 } \
3498 info->reg.regno = val; \
3499 if (isreg32) \
3500 info->qualifier = AARCH64_OPND_QLF_W; \
3501 else \
3502 info->qualifier = AARCH64_OPND_QLF_X; \
3503 } while (0)
3504
3505 #define po_imm_nc_or_fail() do { \
3506 if (! parse_constant_immediate (&str, &val)) \
3507 goto failure; \
3508 } while (0)
3509
3510 #define po_imm_or_fail(min, max) do { \
3511 if (! parse_constant_immediate (&str, &val)) \
3512 goto failure; \
3513 if (val < min || val > max) \
3514 { \
3515 set_fatal_syntax_error (_("immediate value out of range "\
3516 #min " to "#max)); \
3517 goto failure; \
3518 } \
3519 } while (0)
3520
3521 #define po_misc_or_fail(expr) do { \
3522 if (!expr) \
3523 goto failure; \
3524 } while (0)
3525 \f
3526 /* encode the 12-bit imm field of Add/sub immediate */
3527 static inline uint32_t
3528 encode_addsub_imm (uint32_t imm)
3529 {
3530 return imm << 10;
3531 }
3532
3533 /* encode the shift amount field of Add/sub immediate */
3534 static inline uint32_t
3535 encode_addsub_imm_shift_amount (uint32_t cnt)
3536 {
3537 return cnt << 22;
3538 }
3539
3540
3541 /* encode the imm field of Adr instruction */
3542 static inline uint32_t
3543 encode_adr_imm (uint32_t imm)
3544 {
3545 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3546 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3547 }
3548
3549 /* encode the immediate field of Move wide immediate */
3550 static inline uint32_t
3551 encode_movw_imm (uint32_t imm)
3552 {
3553 return imm << 5;
3554 }
3555
3556 /* encode the 26-bit offset of unconditional branch */
3557 static inline uint32_t
3558 encode_branch_ofs_26 (uint32_t ofs)
3559 {
3560 return ofs & ((1 << 26) - 1);
3561 }
3562
3563 /* encode the 19-bit offset of conditional branch and compare & branch */
3564 static inline uint32_t
3565 encode_cond_branch_ofs_19 (uint32_t ofs)
3566 {
3567 return (ofs & ((1 << 19) - 1)) << 5;
3568 }
3569
3570 /* encode the 19-bit offset of ld literal */
3571 static inline uint32_t
3572 encode_ld_lit_ofs_19 (uint32_t ofs)
3573 {
3574 return (ofs & ((1 << 19) - 1)) << 5;
3575 }
3576
3577 /* Encode the 14-bit offset of test & branch. */
3578 static inline uint32_t
3579 encode_tst_branch_ofs_14 (uint32_t ofs)
3580 {
3581 return (ofs & ((1 << 14) - 1)) << 5;
3582 }
3583
3584 /* Encode the 16-bit imm field of svc/hvc/smc. */
3585 static inline uint32_t
3586 encode_svc_imm (uint32_t imm)
3587 {
3588 return imm << 5;
3589 }
3590
3591 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3592 static inline uint32_t
3593 reencode_addsub_switch_add_sub (uint32_t opcode)
3594 {
3595 return opcode ^ (1 << 30);
3596 }
3597
3598 static inline uint32_t
3599 reencode_movzn_to_movz (uint32_t opcode)
3600 {
3601 return opcode | (1 << 30);
3602 }
3603
3604 static inline uint32_t
3605 reencode_movzn_to_movn (uint32_t opcode)
3606 {
3607 return opcode & ~(1 << 30);
3608 }
3609
3610 /* Overall per-instruction processing. */
3611
3612 /* We need to be able to fix up arbitrary expressions in some statements.
3613 This is so that we can handle symbols that are an arbitrary distance from
3614 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3615 which returns part of an address in a form which will be valid for
3616 a data instruction. We do this by pushing the expression into a symbol
3617 in the expr_section, and creating a fix for that. */
3618
3619 static fixS *
3620 fix_new_aarch64 (fragS * frag,
3621 int where,
3622 short int size, expressionS * exp, int pc_rel, int reloc)
3623 {
3624 fixS *new_fix;
3625
3626 switch (exp->X_op)
3627 {
3628 case O_constant:
3629 case O_symbol:
3630 case O_add:
3631 case O_subtract:
3632 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3633 break;
3634
3635 default:
3636 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3637 pc_rel, reloc);
3638 break;
3639 }
3640 return new_fix;
3641 }
3642 \f
3643 /* Diagnostics on operands errors. */
3644
3645 /* By default, output verbose error message.
3646 Disable the verbose error message by -mno-verbose-error. */
3647 static int verbose_error_p = 1;
3648
3649 #ifdef DEBUG_AARCH64
3650 /* N.B. this is only for the purpose of debugging. */
3651 const char* operand_mismatch_kind_names[] =
3652 {
3653 "AARCH64_OPDE_NIL",
3654 "AARCH64_OPDE_RECOVERABLE",
3655 "AARCH64_OPDE_SYNTAX_ERROR",
3656 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3657 "AARCH64_OPDE_INVALID_VARIANT",
3658 "AARCH64_OPDE_OUT_OF_RANGE",
3659 "AARCH64_OPDE_UNALIGNED",
3660 "AARCH64_OPDE_REG_LIST",
3661 "AARCH64_OPDE_OTHER_ERROR",
3662 };
3663 #endif /* DEBUG_AARCH64 */
3664
3665 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3666
3667 When multiple errors of different kinds are found in the same assembly
3668 line, only the error of the highest severity will be picked up for
3669 issuing the diagnostics. */
3670
3671 static inline bfd_boolean
3672 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3673 enum aarch64_operand_error_kind rhs)
3674 {
3675 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3676 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3677 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3678 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3679 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3680 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3681 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3682 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3683 return lhs > rhs;
3684 }
3685
3686 /* Helper routine to get the mnemonic name from the assembly instruction
3687 line; should only be called for the diagnosis purpose, as there is
3688 string copy operation involved, which may affect the runtime
3689 performance if used in elsewhere. */
3690
3691 static const char*
3692 get_mnemonic_name (const char *str)
3693 {
3694 static char mnemonic[32];
3695 char *ptr;
3696
3697 /* Get the first 15 bytes and assume that the full name is included. */
3698 strncpy (mnemonic, str, 31);
3699 mnemonic[31] = '\0';
3700
3701 /* Scan up to the end of the mnemonic, which must end in white space,
3702 '.', or end of string. */
3703 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3704 ;
3705
3706 *ptr = '\0';
3707
3708 /* Append '...' to the truncated long name. */
3709 if (ptr - mnemonic == 31)
3710 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3711
3712 return mnemonic;
3713 }
3714
3715 static void
3716 reset_aarch64_instruction (aarch64_instruction *instruction)
3717 {
3718 memset (instruction, '\0', sizeof (aarch64_instruction));
3719 instruction->reloc.type = BFD_RELOC_UNUSED;
3720 }
3721
3722 /* Data strutures storing one user error in the assembly code related to
3723 operands. */
3724
3725 struct operand_error_record
3726 {
3727 const aarch64_opcode *opcode;
3728 aarch64_operand_error detail;
3729 struct operand_error_record *next;
3730 };
3731
3732 typedef struct operand_error_record operand_error_record;
3733
3734 struct operand_errors
3735 {
3736 operand_error_record *head;
3737 operand_error_record *tail;
3738 };
3739
3740 typedef struct operand_errors operand_errors;
3741
3742 /* Top-level data structure reporting user errors for the current line of
3743 the assembly code.
3744 The way md_assemble works is that all opcodes sharing the same mnemonic
3745 name are iterated to find a match to the assembly line. In this data
3746 structure, each of the such opcodes will have one operand_error_record
3747 allocated and inserted. In other words, excessive errors related with
3748 a single opcode are disregarded. */
3749 operand_errors operand_error_report;
3750
3751 /* Free record nodes. */
3752 static operand_error_record *free_opnd_error_record_nodes = NULL;
3753
3754 /* Initialize the data structure that stores the operand mismatch
3755 information on assembling one line of the assembly code. */
3756 static void
3757 init_operand_error_report (void)
3758 {
3759 if (operand_error_report.head != NULL)
3760 {
3761 gas_assert (operand_error_report.tail != NULL);
3762 operand_error_report.tail->next = free_opnd_error_record_nodes;
3763 free_opnd_error_record_nodes = operand_error_report.head;
3764 operand_error_report.head = NULL;
3765 operand_error_report.tail = NULL;
3766 return;
3767 }
3768 gas_assert (operand_error_report.tail == NULL);
3769 }
3770
3771 /* Return TRUE if some operand error has been recorded during the
3772 parsing of the current assembly line using the opcode *OPCODE;
3773 otherwise return FALSE. */
3774 static inline bfd_boolean
3775 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3776 {
3777 operand_error_record *record = operand_error_report.head;
3778 return record && record->opcode == opcode;
3779 }
3780
3781 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3782 OPCODE field is initialized with OPCODE.
3783 N.B. only one record for each opcode, i.e. the maximum of one error is
3784 recorded for each instruction template. */
3785
3786 static void
3787 add_operand_error_record (const operand_error_record* new_record)
3788 {
3789 const aarch64_opcode *opcode = new_record->opcode;
3790 operand_error_record* record = operand_error_report.head;
3791
3792 /* The record may have been created for this opcode. If not, we need
3793 to prepare one. */
3794 if (! opcode_has_operand_error_p (opcode))
3795 {
3796 /* Get one empty record. */
3797 if (free_opnd_error_record_nodes == NULL)
3798 {
3799 record = xmalloc (sizeof (operand_error_record));
3800 if (record == NULL)
3801 abort ();
3802 }
3803 else
3804 {
3805 record = free_opnd_error_record_nodes;
3806 free_opnd_error_record_nodes = record->next;
3807 }
3808 record->opcode = opcode;
3809 /* Insert at the head. */
3810 record->next = operand_error_report.head;
3811 operand_error_report.head = record;
3812 if (operand_error_report.tail == NULL)
3813 operand_error_report.tail = record;
3814 }
3815 else if (record->detail.kind != AARCH64_OPDE_NIL
3816 && record->detail.index <= new_record->detail.index
3817 && operand_error_higher_severity_p (record->detail.kind,
3818 new_record->detail.kind))
3819 {
3820 /* In the case of multiple errors found on operands related with a
3821 single opcode, only record the error of the leftmost operand and
3822 only if the error is of higher severity. */
3823 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3824 " the existing error %s on operand %d",
3825 operand_mismatch_kind_names[new_record->detail.kind],
3826 new_record->detail.index,
3827 operand_mismatch_kind_names[record->detail.kind],
3828 record->detail.index);
3829 return;
3830 }
3831
3832 record->detail = new_record->detail;
3833 }
3834
3835 static inline void
3836 record_operand_error_info (const aarch64_opcode *opcode,
3837 aarch64_operand_error *error_info)
3838 {
3839 operand_error_record record;
3840 record.opcode = opcode;
3841 record.detail = *error_info;
3842 add_operand_error_record (&record);
3843 }
3844
3845 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3846 error message *ERROR, for operand IDX (count from 0). */
3847
3848 static void
3849 record_operand_error (const aarch64_opcode *opcode, int idx,
3850 enum aarch64_operand_error_kind kind,
3851 const char* error)
3852 {
3853 aarch64_operand_error info;
3854 memset(&info, 0, sizeof (info));
3855 info.index = idx;
3856 info.kind = kind;
3857 info.error = error;
3858 record_operand_error_info (opcode, &info);
3859 }
3860
3861 static void
3862 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3863 enum aarch64_operand_error_kind kind,
3864 const char* error, const int *extra_data)
3865 {
3866 aarch64_operand_error info;
3867 info.index = idx;
3868 info.kind = kind;
3869 info.error = error;
3870 info.data[0] = extra_data[0];
3871 info.data[1] = extra_data[1];
3872 info.data[2] = extra_data[2];
3873 record_operand_error_info (opcode, &info);
3874 }
3875
3876 static void
3877 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3878 const char* error, int lower_bound,
3879 int upper_bound)
3880 {
3881 int data[3] = {lower_bound, upper_bound, 0};
3882 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3883 error, data);
3884 }
3885
3886 /* Remove the operand error record for *OPCODE. */
3887 static void ATTRIBUTE_UNUSED
3888 remove_operand_error_record (const aarch64_opcode *opcode)
3889 {
3890 if (opcode_has_operand_error_p (opcode))
3891 {
3892 operand_error_record* record = operand_error_report.head;
3893 gas_assert (record != NULL && operand_error_report.tail != NULL);
3894 operand_error_report.head = record->next;
3895 record->next = free_opnd_error_record_nodes;
3896 free_opnd_error_record_nodes = record;
3897 if (operand_error_report.head == NULL)
3898 {
3899 gas_assert (operand_error_report.tail == record);
3900 operand_error_report.tail = NULL;
3901 }
3902 }
3903 }
3904
3905 /* Given the instruction in *INSTR, return the index of the best matched
3906 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3907
3908 Return -1 if there is no qualifier sequence; return the first match
3909 if there is multiple matches found. */
3910
3911 static int
3912 find_best_match (const aarch64_inst *instr,
3913 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3914 {
3915 int i, num_opnds, max_num_matched, idx;
3916
3917 num_opnds = aarch64_num_of_operands (instr->opcode);
3918 if (num_opnds == 0)
3919 {
3920 DEBUG_TRACE ("no operand");
3921 return -1;
3922 }
3923
3924 max_num_matched = 0;
3925 idx = -1;
3926
3927 /* For each pattern. */
3928 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3929 {
3930 int j, num_matched;
3931 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3932
3933 /* Most opcodes has much fewer patterns in the list. */
3934 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3935 {
3936 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3937 if (i != 0 && idx == -1)
3938 /* If nothing has been matched, return the 1st sequence. */
3939 idx = 0;
3940 break;
3941 }
3942
3943 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3944 if (*qualifiers == instr->operands[j].qualifier)
3945 ++num_matched;
3946
3947 if (num_matched > max_num_matched)
3948 {
3949 max_num_matched = num_matched;
3950 idx = i;
3951 }
3952 }
3953
3954 DEBUG_TRACE ("return with %d", idx);
3955 return idx;
3956 }
3957
3958 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3959 corresponding operands in *INSTR. */
3960
3961 static inline void
3962 assign_qualifier_sequence (aarch64_inst *instr,
3963 const aarch64_opnd_qualifier_t *qualifiers)
3964 {
3965 int i = 0;
3966 int num_opnds = aarch64_num_of_operands (instr->opcode);
3967 gas_assert (num_opnds);
3968 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3969 instr->operands[i].qualifier = *qualifiers;
3970 }
3971
3972 /* Print operands for the diagnosis purpose. */
3973
3974 static void
3975 print_operands (char *buf, const aarch64_opcode *opcode,
3976 const aarch64_opnd_info *opnds)
3977 {
3978 int i;
3979
3980 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3981 {
3982 const size_t size = 128;
3983 char str[size];
3984
3985 /* We regard the opcode operand info more, however we also look into
3986 the inst->operands to support the disassembling of the optional
3987 operand.
3988 The two operand code should be the same in all cases, apart from
3989 when the operand can be optional. */
3990 if (opcode->operands[i] == AARCH64_OPND_NIL
3991 || opnds[i].type == AARCH64_OPND_NIL)
3992 break;
3993
3994 /* Generate the operand string in STR. */
3995 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3996
3997 /* Delimiter. */
3998 if (str[0] != '\0')
3999 strcat (buf, i == 0 ? " " : ",");
4000
4001 /* Append the operand string. */
4002 strcat (buf, str);
4003 }
4004 }
4005
4006 /* Send to stderr a string as information. */
4007
4008 static void
4009 output_info (const char *format, ...)
4010 {
4011 char *file;
4012 unsigned int line;
4013 va_list args;
4014
4015 as_where (&file, &line);
4016 if (file)
4017 {
4018 if (line != 0)
4019 fprintf (stderr, "%s:%u: ", file, line);
4020 else
4021 fprintf (stderr, "%s: ", file);
4022 }
4023 fprintf (stderr, _("Info: "));
4024 va_start (args, format);
4025 vfprintf (stderr, format, args);
4026 va_end (args);
4027 (void) putc ('\n', stderr);
4028 }
4029
4030 /* Output one operand error record. */
4031
4032 static void
4033 output_operand_error_record (const operand_error_record *record, char *str)
4034 {
4035 const aarch64_operand_error *detail = &record->detail;
4036 int idx = detail->index;
4037 const aarch64_opcode *opcode = record->opcode;
4038 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4039 : AARCH64_OPND_NIL);
4040
4041 switch (detail->kind)
4042 {
4043 case AARCH64_OPDE_NIL:
4044 gas_assert (0);
4045 break;
4046
4047 case AARCH64_OPDE_SYNTAX_ERROR:
4048 case AARCH64_OPDE_RECOVERABLE:
4049 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4050 case AARCH64_OPDE_OTHER_ERROR:
4051 /* Use the prepared error message if there is, otherwise use the
4052 operand description string to describe the error. */
4053 if (detail->error != NULL)
4054 {
4055 if (idx < 0)
4056 as_bad (_("%s -- `%s'"), detail->error, str);
4057 else
4058 as_bad (_("%s at operand %d -- `%s'"),
4059 detail->error, idx + 1, str);
4060 }
4061 else
4062 {
4063 gas_assert (idx >= 0);
4064 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4065 aarch64_get_operand_desc (opd_code), str);
4066 }
4067 break;
4068
4069 case AARCH64_OPDE_INVALID_VARIANT:
4070 as_bad (_("operand mismatch -- `%s'"), str);
4071 if (verbose_error_p)
4072 {
4073 /* We will try to correct the erroneous instruction and also provide
4074 more information e.g. all other valid variants.
4075
4076 The string representation of the corrected instruction and other
4077 valid variants are generated by
4078
4079 1) obtaining the intermediate representation of the erroneous
4080 instruction;
4081 2) manipulating the IR, e.g. replacing the operand qualifier;
4082 3) printing out the instruction by calling the printer functions
4083 shared with the disassembler.
4084
4085 The limitation of this method is that the exact input assembly
4086 line cannot be accurately reproduced in some cases, for example an
4087 optional operand present in the actual assembly line will be
4088 omitted in the output; likewise for the optional syntax rules,
4089 e.g. the # before the immediate. Another limitation is that the
4090 assembly symbols and relocation operations in the assembly line
4091 currently cannot be printed out in the error report. Last but not
4092 least, when there is other error(s) co-exist with this error, the
4093 'corrected' instruction may be still incorrect, e.g. given
4094 'ldnp h0,h1,[x0,#6]!'
4095 this diagnosis will provide the version:
4096 'ldnp s0,s1,[x0,#6]!'
4097 which is still not right. */
4098 size_t len = strlen (get_mnemonic_name (str));
4099 int i, qlf_idx;
4100 bfd_boolean result;
4101 const size_t size = 2048;
4102 char buf[size];
4103 aarch64_inst *inst_base = &inst.base;
4104 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4105
4106 /* Init inst. */
4107 reset_aarch64_instruction (&inst);
4108 inst_base->opcode = opcode;
4109
4110 /* Reset the error report so that there is no side effect on the
4111 following operand parsing. */
4112 init_operand_error_report ();
4113
4114 /* Fill inst. */
4115 result = parse_operands (str + len, opcode)
4116 && programmer_friendly_fixup (&inst);
4117 gas_assert (result);
4118 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4119 NULL, NULL);
4120 gas_assert (!result);
4121
4122 /* Find the most matched qualifier sequence. */
4123 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4124 gas_assert (qlf_idx > -1);
4125
4126 /* Assign the qualifiers. */
4127 assign_qualifier_sequence (inst_base,
4128 opcode->qualifiers_list[qlf_idx]);
4129
4130 /* Print the hint. */
4131 output_info (_(" did you mean this?"));
4132 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4133 print_operands (buf, opcode, inst_base->operands);
4134 output_info (_(" %s"), buf);
4135
4136 /* Print out other variant(s) if there is any. */
4137 if (qlf_idx != 0 ||
4138 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4139 output_info (_(" other valid variant(s):"));
4140
4141 /* For each pattern. */
4142 qualifiers_list = opcode->qualifiers_list;
4143 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4144 {
4145 /* Most opcodes has much fewer patterns in the list.
4146 First NIL qualifier indicates the end in the list. */
4147 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4148 break;
4149
4150 if (i != qlf_idx)
4151 {
4152 /* Mnemonics name. */
4153 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4154
4155 /* Assign the qualifiers. */
4156 assign_qualifier_sequence (inst_base, *qualifiers_list);
4157
4158 /* Print instruction. */
4159 print_operands (buf, opcode, inst_base->operands);
4160
4161 output_info (_(" %s"), buf);
4162 }
4163 }
4164 }
4165 break;
4166
4167 case AARCH64_OPDE_OUT_OF_RANGE:
4168 if (detail->data[0] != detail->data[1])
4169 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4170 detail->error ? detail->error : _("immediate value"),
4171 detail->data[0], detail->data[1], idx + 1, str);
4172 else
4173 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4174 detail->error ? detail->error : _("immediate value"),
4175 detail->data[0], idx + 1, str);
4176 break;
4177
4178 case AARCH64_OPDE_REG_LIST:
4179 if (detail->data[0] == 1)
4180 as_bad (_("invalid number of registers in the list; "
4181 "only 1 register is expected at operand %d -- `%s'"),
4182 idx + 1, str);
4183 else
4184 as_bad (_("invalid number of registers in the list; "
4185 "%d registers are expected at operand %d -- `%s'"),
4186 detail->data[0], idx + 1, str);
4187 break;
4188
4189 case AARCH64_OPDE_UNALIGNED:
4190 as_bad (_("immediate value should be a multiple of "
4191 "%d at operand %d -- `%s'"),
4192 detail->data[0], idx + 1, str);
4193 break;
4194
4195 default:
4196 gas_assert (0);
4197 break;
4198 }
4199 }
4200
4201 /* Process and output the error message about the operand mismatching.
4202
4203 When this function is called, the operand error information had
4204 been collected for an assembly line and there will be multiple
4205 errors in the case of mulitple instruction templates; output the
4206 error message that most closely describes the problem. */
4207
4208 static void
4209 output_operand_error_report (char *str)
4210 {
4211 int largest_error_pos;
4212 const char *msg = NULL;
4213 enum aarch64_operand_error_kind kind;
4214 operand_error_record *curr;
4215 operand_error_record *head = operand_error_report.head;
4216 operand_error_record *record = NULL;
4217
4218 /* No error to report. */
4219 if (head == NULL)
4220 return;
4221
4222 gas_assert (head != NULL && operand_error_report.tail != NULL);
4223
4224 /* Only one error. */
4225 if (head == operand_error_report.tail)
4226 {
4227 DEBUG_TRACE ("single opcode entry with error kind: %s",
4228 operand_mismatch_kind_names[head->detail.kind]);
4229 output_operand_error_record (head, str);
4230 return;
4231 }
4232
4233 /* Find the error kind of the highest severity. */
4234 DEBUG_TRACE ("multiple opcode entres with error kind");
4235 kind = AARCH64_OPDE_NIL;
4236 for (curr = head; curr != NULL; curr = curr->next)
4237 {
4238 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4239 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4240 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4241 kind = curr->detail.kind;
4242 }
4243 gas_assert (kind != AARCH64_OPDE_NIL);
4244
4245 /* Pick up one of errors of KIND to report. */
4246 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4247 for (curr = head; curr != NULL; curr = curr->next)
4248 {
4249 if (curr->detail.kind != kind)
4250 continue;
4251 /* If there are multiple errors, pick up the one with the highest
4252 mismatching operand index. In the case of multiple errors with
4253 the equally highest operand index, pick up the first one or the
4254 first one with non-NULL error message. */
4255 if (curr->detail.index > largest_error_pos
4256 || (curr->detail.index == largest_error_pos && msg == NULL
4257 && curr->detail.error != NULL))
4258 {
4259 largest_error_pos = curr->detail.index;
4260 record = curr;
4261 msg = record->detail.error;
4262 }
4263 }
4264
4265 gas_assert (largest_error_pos != -2 && record != NULL);
4266 DEBUG_TRACE ("Pick up error kind %s to report",
4267 operand_mismatch_kind_names[record->detail.kind]);
4268
4269 /* Output. */
4270 output_operand_error_record (record, str);
4271 }
4272 \f
4273 /* Write an AARCH64 instruction to buf - always little-endian. */
4274 static void
4275 put_aarch64_insn (char *buf, uint32_t insn)
4276 {
4277 unsigned char *where = (unsigned char *) buf;
4278 where[0] = insn;
4279 where[1] = insn >> 8;
4280 where[2] = insn >> 16;
4281 where[3] = insn >> 24;
4282 }
4283
4284 static uint32_t
4285 get_aarch64_insn (char *buf)
4286 {
4287 unsigned char *where = (unsigned char *) buf;
4288 uint32_t result;
4289 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4290 return result;
4291 }
4292
4293 static void
4294 output_inst (struct aarch64_inst *new_inst)
4295 {
4296 char *to = NULL;
4297
4298 to = frag_more (INSN_SIZE);
4299
4300 frag_now->tc_frag_data.recorded = 1;
4301
4302 put_aarch64_insn (to, inst.base.value);
4303
4304 if (inst.reloc.type != BFD_RELOC_UNUSED)
4305 {
4306 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4307 INSN_SIZE, &inst.reloc.exp,
4308 inst.reloc.pc_rel,
4309 inst.reloc.type);
4310 DEBUG_TRACE ("Prepared relocation fix up");
4311 /* Don't check the addend value against the instruction size,
4312 that's the job of our code in md_apply_fix(). */
4313 fixp->fx_no_overflow = 1;
4314 if (new_inst != NULL)
4315 fixp->tc_fix_data.inst = new_inst;
4316 if (aarch64_gas_internal_fixup_p ())
4317 {
4318 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4319 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4320 fixp->fx_addnumber = inst.reloc.flags;
4321 }
4322 }
4323
4324 dwarf2_emit_insn (INSN_SIZE);
4325 }
4326
4327 /* Link together opcodes of the same name. */
4328
4329 struct templates
4330 {
4331 aarch64_opcode *opcode;
4332 struct templates *next;
4333 };
4334
4335 typedef struct templates templates;
4336
4337 static templates *
4338 lookup_mnemonic (const char *start, int len)
4339 {
4340 templates *templ = NULL;
4341
4342 templ = hash_find_n (aarch64_ops_hsh, start, len);
4343 return templ;
4344 }
4345
4346 /* Subroutine of md_assemble, responsible for looking up the primary
4347 opcode from the mnemonic the user wrote. STR points to the
4348 beginning of the mnemonic. */
4349
4350 static templates *
4351 opcode_lookup (char **str)
4352 {
4353 char *end, *base;
4354 const aarch64_cond *cond;
4355 char condname[16];
4356 int len;
4357
4358 /* Scan up to the end of the mnemonic, which must end in white space,
4359 '.', or end of string. */
4360 for (base = end = *str; is_part_of_name(*end); end++)
4361 if (*end == '.')
4362 break;
4363
4364 if (end == base)
4365 return 0;
4366
4367 inst.cond = COND_ALWAYS;
4368
4369 /* Handle a possible condition. */
4370 if (end[0] == '.')
4371 {
4372 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4373 if (cond)
4374 {
4375 inst.cond = cond->value;
4376 *str = end + 3;
4377 }
4378 else
4379 {
4380 *str = end;
4381 return 0;
4382 }
4383 }
4384 else
4385 *str = end;
4386
4387 len = end - base;
4388
4389 if (inst.cond == COND_ALWAYS)
4390 {
4391 /* Look for unaffixed mnemonic. */
4392 return lookup_mnemonic (base, len);
4393 }
4394 else if (len <= 13)
4395 {
4396 /* append ".c" to mnemonic if conditional */
4397 memcpy (condname, base, len);
4398 memcpy (condname + len, ".c", 2);
4399 base = condname;
4400 len += 2;
4401 return lookup_mnemonic (base, len);
4402 }
4403
4404 return NULL;
4405 }
4406
4407 /* Internal helper routine converting a vector neon_type_el structure
4408 *VECTYPE to a corresponding operand qualifier. */
4409
4410 static inline aarch64_opnd_qualifier_t
4411 vectype_to_qualifier (const struct neon_type_el *vectype)
4412 {
4413 /* Element size in bytes indexed by neon_el_type. */
4414 const unsigned char ele_size[5]
4415 = {1, 2, 4, 8, 16};
4416
4417 if (!vectype->defined || vectype->type == NT_invtype)
4418 goto vectype_conversion_fail;
4419
4420 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4421
4422 if (vectype->defined & NTA_HASINDEX)
4423 /* Vector element register. */
4424 return AARCH64_OPND_QLF_S_B + vectype->type;
4425 else
4426 {
4427 /* Vector register. */
4428 int reg_size = ele_size[vectype->type] * vectype->width;
4429 unsigned offset;
4430 if (reg_size != 16 && reg_size != 8)
4431 goto vectype_conversion_fail;
4432 /* The conversion is calculated based on the relation of the order of
4433 qualifiers to the vector element size and vector register size. */
4434 offset = (vectype->type == NT_q)
4435 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4436 gas_assert (offset <= 8);
4437 return AARCH64_OPND_QLF_V_8B + offset;
4438 }
4439
4440 vectype_conversion_fail:
4441 first_error (_("bad vector arrangement type"));
4442 return AARCH64_OPND_QLF_NIL;
4443 }
4444
4445 /* Process an optional operand that is found omitted from the assembly line.
4446 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4447 instruction's opcode entry while IDX is the index of this omitted operand.
4448 */
4449
4450 static void
4451 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4452 int idx, aarch64_opnd_info *operand)
4453 {
4454 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4455 gas_assert (optional_operand_p (opcode, idx));
4456 gas_assert (!operand->present);
4457
4458 switch (type)
4459 {
4460 case AARCH64_OPND_Rd:
4461 case AARCH64_OPND_Rn:
4462 case AARCH64_OPND_Rm:
4463 case AARCH64_OPND_Rt:
4464 case AARCH64_OPND_Rt2:
4465 case AARCH64_OPND_Rs:
4466 case AARCH64_OPND_Ra:
4467 case AARCH64_OPND_Rt_SYS:
4468 case AARCH64_OPND_Rd_SP:
4469 case AARCH64_OPND_Rn_SP:
4470 case AARCH64_OPND_Fd:
4471 case AARCH64_OPND_Fn:
4472 case AARCH64_OPND_Fm:
4473 case AARCH64_OPND_Fa:
4474 case AARCH64_OPND_Ft:
4475 case AARCH64_OPND_Ft2:
4476 case AARCH64_OPND_Sd:
4477 case AARCH64_OPND_Sn:
4478 case AARCH64_OPND_Sm:
4479 case AARCH64_OPND_Vd:
4480 case AARCH64_OPND_Vn:
4481 case AARCH64_OPND_Vm:
4482 case AARCH64_OPND_VdD1:
4483 case AARCH64_OPND_VnD1:
4484 operand->reg.regno = default_value;
4485 break;
4486
4487 case AARCH64_OPND_Ed:
4488 case AARCH64_OPND_En:
4489 case AARCH64_OPND_Em:
4490 operand->reglane.regno = default_value;
4491 break;
4492
4493 case AARCH64_OPND_IDX:
4494 case AARCH64_OPND_BIT_NUM:
4495 case AARCH64_OPND_IMMR:
4496 case AARCH64_OPND_IMMS:
4497 case AARCH64_OPND_SHLL_IMM:
4498 case AARCH64_OPND_IMM_VLSL:
4499 case AARCH64_OPND_IMM_VLSR:
4500 case AARCH64_OPND_CCMP_IMM:
4501 case AARCH64_OPND_FBITS:
4502 case AARCH64_OPND_UIMM4:
4503 case AARCH64_OPND_UIMM3_OP1:
4504 case AARCH64_OPND_UIMM3_OP2:
4505 case AARCH64_OPND_IMM:
4506 case AARCH64_OPND_WIDTH:
4507 case AARCH64_OPND_UIMM7:
4508 case AARCH64_OPND_NZCV:
4509 operand->imm.value = default_value;
4510 break;
4511
4512 case AARCH64_OPND_EXCEPTION:
4513 inst.reloc.type = BFD_RELOC_UNUSED;
4514 break;
4515
4516 case AARCH64_OPND_BARRIER_ISB:
4517 operand->barrier = aarch64_barrier_options + default_value;
4518
4519 default:
4520 break;
4521 }
4522 }
4523
4524 /* Process the relocation type for move wide instructions.
4525 Return TRUE on success; otherwise return FALSE. */
4526
4527 static bfd_boolean
4528 process_movw_reloc_info (void)
4529 {
4530 int is32;
4531 unsigned shift;
4532
4533 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4534
4535 if (inst.base.opcode->op == OP_MOVK)
4536 switch (inst.reloc.type)
4537 {
4538 case BFD_RELOC_AARCH64_MOVW_G0_S:
4539 case BFD_RELOC_AARCH64_MOVW_G1_S:
4540 case BFD_RELOC_AARCH64_MOVW_G2_S:
4541 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4542 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4543 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4544 set_syntax_error
4545 (_("the specified relocation type is not allowed for MOVK"));
4546 return FALSE;
4547 default:
4548 break;
4549 }
4550
4551 switch (inst.reloc.type)
4552 {
4553 case BFD_RELOC_AARCH64_MOVW_G0:
4554 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4555 case BFD_RELOC_AARCH64_MOVW_G0_S:
4556 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4557 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4558 shift = 0;
4559 break;
4560 case BFD_RELOC_AARCH64_MOVW_G1:
4561 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4562 case BFD_RELOC_AARCH64_MOVW_G1_S:
4563 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4564 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4565 shift = 16;
4566 break;
4567 case BFD_RELOC_AARCH64_MOVW_G2:
4568 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4569 case BFD_RELOC_AARCH64_MOVW_G2_S:
4570 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4571 if (is32)
4572 {
4573 set_fatal_syntax_error
4574 (_("the specified relocation type is not allowed for 32-bit "
4575 "register"));
4576 return FALSE;
4577 }
4578 shift = 32;
4579 break;
4580 case BFD_RELOC_AARCH64_MOVW_G3:
4581 if (is32)
4582 {
4583 set_fatal_syntax_error
4584 (_("the specified relocation type is not allowed for 32-bit "
4585 "register"));
4586 return FALSE;
4587 }
4588 shift = 48;
4589 break;
4590 default:
4591 /* More cases should be added when more MOVW-related relocation types
4592 are supported in GAS. */
4593 gas_assert (aarch64_gas_internal_fixup_p ());
4594 /* The shift amount should have already been set by the parser. */
4595 return TRUE;
4596 }
4597 inst.base.operands[1].shifter.amount = shift;
4598 return TRUE;
4599 }
4600
4601 /* A primitive log caculator. */
4602
4603 static inline unsigned int
4604 get_logsz (unsigned int size)
4605 {
4606 const unsigned char ls[16] =
4607 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4608 if (size > 16)
4609 {
4610 gas_assert (0);
4611 return -1;
4612 }
4613 gas_assert (ls[size - 1] != (unsigned char)-1);
4614 return ls[size - 1];
4615 }
4616
4617 /* Determine and return the real reloc type code for an instruction
4618 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4619
4620 static inline bfd_reloc_code_real_type
4621 ldst_lo12_determine_real_reloc_type (void)
4622 {
4623 int logsz;
4624 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4625 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4626
4627 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4628 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4629 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4630 BFD_RELOC_AARCH64_LDST128_LO12
4631 };
4632
4633 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4634 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4635
4636 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4637 opd1_qlf =
4638 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4639 1, opd0_qlf, 0);
4640 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4641
4642 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4643 gas_assert (logsz >= 0 && logsz <= 4);
4644
4645 return reloc_ldst_lo12[logsz];
4646 }
4647
4648 /* Check whether a register list REGINFO is valid. The registers must be
4649 numbered in increasing order (modulo 32), in increments of one or two.
4650
4651 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4652 increments of two.
4653
4654 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4655
4656 static bfd_boolean
4657 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4658 {
4659 uint32_t i, nb_regs, prev_regno, incr;
4660
4661 nb_regs = 1 + (reginfo & 0x3);
4662 reginfo >>= 2;
4663 prev_regno = reginfo & 0x1f;
4664 incr = accept_alternate ? 2 : 1;
4665
4666 for (i = 1; i < nb_regs; ++i)
4667 {
4668 uint32_t curr_regno;
4669 reginfo >>= 5;
4670 curr_regno = reginfo & 0x1f;
4671 if (curr_regno != ((prev_regno + incr) & 0x1f))
4672 return FALSE;
4673 prev_regno = curr_regno;
4674 }
4675
4676 return TRUE;
4677 }
4678
4679 /* Generic instruction operand parser. This does no encoding and no
4680 semantic validation; it merely squirrels values away in the inst
4681 structure. Returns TRUE or FALSE depending on whether the
4682 specified grammar matched. */
4683
4684 static bfd_boolean
4685 parse_operands (char *str, const aarch64_opcode *opcode)
4686 {
4687 int i;
4688 char *backtrack_pos = 0;
4689 const enum aarch64_opnd *operands = opcode->operands;
4690
4691 clear_error ();
4692 skip_whitespace (str);
4693
4694 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4695 {
4696 int64_t val;
4697 int isreg32, isregzero;
4698 int comma_skipped_p = 0;
4699 aarch64_reg_type rtype;
4700 struct neon_type_el vectype;
4701 aarch64_opnd_info *info = &inst.base.operands[i];
4702
4703 DEBUG_TRACE ("parse operand %d", i);
4704
4705 /* Assign the operand code. */
4706 info->type = operands[i];
4707
4708 if (optional_operand_p (opcode, i))
4709 {
4710 /* Remember where we are in case we need to backtrack. */
4711 gas_assert (!backtrack_pos);
4712 backtrack_pos = str;
4713 }
4714
4715 /* Expect comma between operands; the backtrack mechanizm will take
4716 care of cases of omitted optional operand. */
4717 if (i > 0 && ! skip_past_char (&str, ','))
4718 {
4719 set_syntax_error (_("comma expected between operands"));
4720 goto failure;
4721 }
4722 else
4723 comma_skipped_p = 1;
4724
4725 switch (operands[i])
4726 {
4727 case AARCH64_OPND_Rd:
4728 case AARCH64_OPND_Rn:
4729 case AARCH64_OPND_Rm:
4730 case AARCH64_OPND_Rt:
4731 case AARCH64_OPND_Rt2:
4732 case AARCH64_OPND_Rs:
4733 case AARCH64_OPND_Ra:
4734 case AARCH64_OPND_Rt_SYS:
4735 case AARCH64_OPND_PAIRREG:
4736 po_int_reg_or_fail (1, 0);
4737 break;
4738
4739 case AARCH64_OPND_Rd_SP:
4740 case AARCH64_OPND_Rn_SP:
4741 po_int_reg_or_fail (0, 1);
4742 break;
4743
4744 case AARCH64_OPND_Rm_EXT:
4745 case AARCH64_OPND_Rm_SFT:
4746 po_misc_or_fail (parse_shifter_operand
4747 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4748 ? SHIFTED_ARITH_IMM
4749 : SHIFTED_LOGIC_IMM)));
4750 if (!info->shifter.operator_present)
4751 {
4752 /* Default to LSL if not present. Libopcodes prefers shifter
4753 kind to be explicit. */
4754 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4755 info->shifter.kind = AARCH64_MOD_LSL;
4756 /* For Rm_EXT, libopcodes will carry out further check on whether
4757 or not stack pointer is used in the instruction (Recall that
4758 "the extend operator is not optional unless at least one of
4759 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4760 }
4761 break;
4762
4763 case AARCH64_OPND_Fd:
4764 case AARCH64_OPND_Fn:
4765 case AARCH64_OPND_Fm:
4766 case AARCH64_OPND_Fa:
4767 case AARCH64_OPND_Ft:
4768 case AARCH64_OPND_Ft2:
4769 case AARCH64_OPND_Sd:
4770 case AARCH64_OPND_Sn:
4771 case AARCH64_OPND_Sm:
4772 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4773 if (val == PARSE_FAIL)
4774 {
4775 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4776 goto failure;
4777 }
4778 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4779
4780 info->reg.regno = val;
4781 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4782 break;
4783
4784 case AARCH64_OPND_Vd:
4785 case AARCH64_OPND_Vn:
4786 case AARCH64_OPND_Vm:
4787 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4788 if (val == PARSE_FAIL)
4789 {
4790 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4791 goto failure;
4792 }
4793 if (vectype.defined & NTA_HASINDEX)
4794 goto failure;
4795
4796 info->reg.regno = val;
4797 info->qualifier = vectype_to_qualifier (&vectype);
4798 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4799 goto failure;
4800 break;
4801
4802 case AARCH64_OPND_VdD1:
4803 case AARCH64_OPND_VnD1:
4804 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4805 if (val == PARSE_FAIL)
4806 {
4807 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4808 goto failure;
4809 }
4810 if (vectype.type != NT_d || vectype.index != 1)
4811 {
4812 set_fatal_syntax_error
4813 (_("the top half of a 128-bit FP/SIMD register is expected"));
4814 goto failure;
4815 }
4816 info->reg.regno = val;
4817 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4818 here; it is correct for the purpose of encoding/decoding since
4819 only the register number is explicitly encoded in the related
4820 instructions, although this appears a bit hacky. */
4821 info->qualifier = AARCH64_OPND_QLF_S_D;
4822 break;
4823
4824 case AARCH64_OPND_Ed:
4825 case AARCH64_OPND_En:
4826 case AARCH64_OPND_Em:
4827 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4828 if (val == PARSE_FAIL)
4829 {
4830 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4831 goto failure;
4832 }
4833 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4834 goto failure;
4835
4836 info->reglane.regno = val;
4837 info->reglane.index = vectype.index;
4838 info->qualifier = vectype_to_qualifier (&vectype);
4839 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4840 goto failure;
4841 break;
4842
4843 case AARCH64_OPND_LVn:
4844 case AARCH64_OPND_LVt:
4845 case AARCH64_OPND_LVt_AL:
4846 case AARCH64_OPND_LEt:
4847 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4848 goto failure;
4849 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4850 {
4851 set_fatal_syntax_error (_("invalid register list"));
4852 goto failure;
4853 }
4854 info->reglist.first_regno = (val >> 2) & 0x1f;
4855 info->reglist.num_regs = (val & 0x3) + 1;
4856 if (operands[i] == AARCH64_OPND_LEt)
4857 {
4858 if (!(vectype.defined & NTA_HASINDEX))
4859 goto failure;
4860 info->reglist.has_index = 1;
4861 info->reglist.index = vectype.index;
4862 }
4863 else if (!(vectype.defined & NTA_HASTYPE))
4864 goto failure;
4865 info->qualifier = vectype_to_qualifier (&vectype);
4866 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4867 goto failure;
4868 break;
4869
4870 case AARCH64_OPND_Cn:
4871 case AARCH64_OPND_Cm:
4872 po_reg_or_fail (REG_TYPE_CN);
4873 if (val > 15)
4874 {
4875 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4876 goto failure;
4877 }
4878 inst.base.operands[i].reg.regno = val;
4879 break;
4880
4881 case AARCH64_OPND_SHLL_IMM:
4882 case AARCH64_OPND_IMM_VLSR:
4883 po_imm_or_fail (1, 64);
4884 info->imm.value = val;
4885 break;
4886
4887 case AARCH64_OPND_CCMP_IMM:
4888 case AARCH64_OPND_FBITS:
4889 case AARCH64_OPND_UIMM4:
4890 case AARCH64_OPND_UIMM3_OP1:
4891 case AARCH64_OPND_UIMM3_OP2:
4892 case AARCH64_OPND_IMM_VLSL:
4893 case AARCH64_OPND_IMM:
4894 case AARCH64_OPND_WIDTH:
4895 po_imm_nc_or_fail ();
4896 info->imm.value = val;
4897 break;
4898
4899 case AARCH64_OPND_UIMM7:
4900 po_imm_or_fail (0, 127);
4901 info->imm.value = val;
4902 break;
4903
4904 case AARCH64_OPND_IDX:
4905 case AARCH64_OPND_BIT_NUM:
4906 case AARCH64_OPND_IMMR:
4907 case AARCH64_OPND_IMMS:
4908 po_imm_or_fail (0, 63);
4909 info->imm.value = val;
4910 break;
4911
4912 case AARCH64_OPND_IMM0:
4913 po_imm_nc_or_fail ();
4914 if (val != 0)
4915 {
4916 set_fatal_syntax_error (_("immediate zero expected"));
4917 goto failure;
4918 }
4919 info->imm.value = 0;
4920 break;
4921
4922 case AARCH64_OPND_FPIMM0:
4923 {
4924 int qfloat;
4925 bfd_boolean res1 = FALSE, res2 = FALSE;
4926 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4927 it is probably not worth the effort to support it. */
4928 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4929 && !(res2 = parse_constant_immediate (&str, &val)))
4930 goto failure;
4931 if ((res1 && qfloat == 0) || (res2 && val == 0))
4932 {
4933 info->imm.value = 0;
4934 info->imm.is_fp = 1;
4935 break;
4936 }
4937 set_fatal_syntax_error (_("immediate zero expected"));
4938 goto failure;
4939 }
4940
4941 case AARCH64_OPND_IMM_MOV:
4942 {
4943 char *saved = str;
4944 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4945 reg_name_p (str, REG_TYPE_VN))
4946 goto failure;
4947 str = saved;
4948 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4949 GE_OPT_PREFIX, 1));
4950 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4951 later. fix_mov_imm_insn will try to determine a machine
4952 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4953 message if the immediate cannot be moved by a single
4954 instruction. */
4955 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4956 inst.base.operands[i].skip = 1;
4957 }
4958 break;
4959
4960 case AARCH64_OPND_SIMD_IMM:
4961 case AARCH64_OPND_SIMD_IMM_SFT:
4962 if (! parse_big_immediate (&str, &val))
4963 goto failure;
4964 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4965 /* addr_off_p */ 0,
4966 /* need_libopcodes_p */ 1,
4967 /* skip_p */ 1);
4968 /* Parse shift.
4969 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4970 shift, we don't check it here; we leave the checking to
4971 the libopcodes (operand_general_constraint_met_p). By
4972 doing this, we achieve better diagnostics. */
4973 if (skip_past_comma (&str)
4974 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4975 goto failure;
4976 if (!info->shifter.operator_present
4977 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4978 {
4979 /* Default to LSL if not present. Libopcodes prefers shifter
4980 kind to be explicit. */
4981 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4982 info->shifter.kind = AARCH64_MOD_LSL;
4983 }
4984 break;
4985
4986 case AARCH64_OPND_FPIMM:
4987 case AARCH64_OPND_SIMD_FPIMM:
4988 {
4989 int qfloat;
4990 bfd_boolean dp_p
4991 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4992 == 8);
4993 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4994 goto failure;
4995 if (qfloat == 0)
4996 {
4997 set_fatal_syntax_error (_("invalid floating-point constant"));
4998 goto failure;
4999 }
5000 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5001 inst.base.operands[i].imm.is_fp = 1;
5002 }
5003 break;
5004
5005 case AARCH64_OPND_LIMM:
5006 po_misc_or_fail (parse_shifter_operand (&str, info,
5007 SHIFTED_LOGIC_IMM));
5008 if (info->shifter.operator_present)
5009 {
5010 set_fatal_syntax_error
5011 (_("shift not allowed for bitmask immediate"));
5012 goto failure;
5013 }
5014 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5015 /* addr_off_p */ 0,
5016 /* need_libopcodes_p */ 1,
5017 /* skip_p */ 1);
5018 break;
5019
5020 case AARCH64_OPND_AIMM:
5021 if (opcode->op == OP_ADD)
5022 /* ADD may have relocation types. */
5023 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5024 SHIFTED_ARITH_IMM));
5025 else
5026 po_misc_or_fail (parse_shifter_operand (&str, info,
5027 SHIFTED_ARITH_IMM));
5028 switch (inst.reloc.type)
5029 {
5030 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5031 info->shifter.amount = 12;
5032 break;
5033 case BFD_RELOC_UNUSED:
5034 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5035 if (info->shifter.kind != AARCH64_MOD_NONE)
5036 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5037 inst.reloc.pc_rel = 0;
5038 break;
5039 default:
5040 break;
5041 }
5042 info->imm.value = 0;
5043 if (!info->shifter.operator_present)
5044 {
5045 /* Default to LSL if not present. Libopcodes prefers shifter
5046 kind to be explicit. */
5047 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5048 info->shifter.kind = AARCH64_MOD_LSL;
5049 }
5050 break;
5051
5052 case AARCH64_OPND_HALF:
5053 {
5054 /* #<imm16> or relocation. */
5055 int internal_fixup_p;
5056 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5057 if (internal_fixup_p)
5058 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5059 skip_whitespace (str);
5060 if (skip_past_comma (&str))
5061 {
5062 /* {, LSL #<shift>} */
5063 if (! aarch64_gas_internal_fixup_p ())
5064 {
5065 set_fatal_syntax_error (_("can't mix relocation modifier "
5066 "with explicit shift"));
5067 goto failure;
5068 }
5069 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5070 }
5071 else
5072 inst.base.operands[i].shifter.amount = 0;
5073 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5074 inst.base.operands[i].imm.value = 0;
5075 if (! process_movw_reloc_info ())
5076 goto failure;
5077 }
5078 break;
5079
5080 case AARCH64_OPND_EXCEPTION:
5081 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5082 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5083 /* addr_off_p */ 0,
5084 /* need_libopcodes_p */ 0,
5085 /* skip_p */ 1);
5086 break;
5087
5088 case AARCH64_OPND_NZCV:
5089 {
5090 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5091 if (nzcv != NULL)
5092 {
5093 str += 4;
5094 info->imm.value = nzcv->value;
5095 break;
5096 }
5097 po_imm_or_fail (0, 15);
5098 info->imm.value = val;
5099 }
5100 break;
5101
5102 case AARCH64_OPND_COND:
5103 case AARCH64_OPND_COND1:
5104 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5105 str += 2;
5106 if (info->cond == NULL)
5107 {
5108 set_syntax_error (_("invalid condition"));
5109 goto failure;
5110 }
5111 else if (operands[i] == AARCH64_OPND_COND1
5112 && (info->cond->value & 0xe) == 0xe)
5113 {
5114 /* Not allow AL or NV. */
5115 set_default_error ();
5116 goto failure;
5117 }
5118 break;
5119
5120 case AARCH64_OPND_ADDR_ADRP:
5121 po_misc_or_fail (parse_adrp (&str));
5122 /* Clear the value as operand needs to be relocated. */
5123 info->imm.value = 0;
5124 break;
5125
5126 case AARCH64_OPND_ADDR_PCREL14:
5127 case AARCH64_OPND_ADDR_PCREL19:
5128 case AARCH64_OPND_ADDR_PCREL21:
5129 case AARCH64_OPND_ADDR_PCREL26:
5130 po_misc_or_fail (parse_address_reloc (&str, info));
5131 if (!info->addr.pcrel)
5132 {
5133 set_syntax_error (_("invalid pc-relative address"));
5134 goto failure;
5135 }
5136 if (inst.gen_lit_pool
5137 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5138 {
5139 /* Only permit "=value" in the literal load instructions.
5140 The literal will be generated by programmer_friendly_fixup. */
5141 set_syntax_error (_("invalid use of \"=immediate\""));
5142 goto failure;
5143 }
5144 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5145 {
5146 set_syntax_error (_("unrecognized relocation suffix"));
5147 goto failure;
5148 }
5149 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5150 {
5151 info->imm.value = inst.reloc.exp.X_add_number;
5152 inst.reloc.type = BFD_RELOC_UNUSED;
5153 }
5154 else
5155 {
5156 info->imm.value = 0;
5157 if (inst.reloc.type == BFD_RELOC_UNUSED)
5158 switch (opcode->iclass)
5159 {
5160 case compbranch:
5161 case condbranch:
5162 /* e.g. CBZ or B.COND */
5163 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5164 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5165 break;
5166 case testbranch:
5167 /* e.g. TBZ */
5168 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5169 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5170 break;
5171 case branch_imm:
5172 /* e.g. B or BL */
5173 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5174 inst.reloc.type =
5175 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5176 : BFD_RELOC_AARCH64_JUMP26;
5177 break;
5178 case loadlit:
5179 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5180 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5181 break;
5182 case pcreladdr:
5183 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5184 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5185 break;
5186 default:
5187 gas_assert (0);
5188 abort ();
5189 }
5190 inst.reloc.pc_rel = 1;
5191 }
5192 break;
5193
5194 case AARCH64_OPND_ADDR_SIMPLE:
5195 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5196 /* [<Xn|SP>{, #<simm>}] */
5197 po_char_or_fail ('[');
5198 po_reg_or_fail (REG_TYPE_R64_SP);
5199 /* Accept optional ", #0". */
5200 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5201 && skip_past_char (&str, ','))
5202 {
5203 skip_past_char (&str, '#');
5204 if (! skip_past_char (&str, '0'))
5205 {
5206 set_fatal_syntax_error
5207 (_("the optional immediate offset can only be 0"));
5208 goto failure;
5209 }
5210 }
5211 po_char_or_fail (']');
5212 info->addr.base_regno = val;
5213 break;
5214
5215 case AARCH64_OPND_ADDR_REGOFF:
5216 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5217 po_misc_or_fail (parse_address (&str, info, 0));
5218 if (info->addr.pcrel || !info->addr.offset.is_reg
5219 || !info->addr.preind || info->addr.postind
5220 || info->addr.writeback)
5221 {
5222 set_syntax_error (_("invalid addressing mode"));
5223 goto failure;
5224 }
5225 if (!info->shifter.operator_present)
5226 {
5227 /* Default to LSL if not present. Libopcodes prefers shifter
5228 kind to be explicit. */
5229 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5230 info->shifter.kind = AARCH64_MOD_LSL;
5231 }
5232 /* Qualifier to be deduced by libopcodes. */
5233 break;
5234
5235 case AARCH64_OPND_ADDR_SIMM7:
5236 po_misc_or_fail (parse_address (&str, info, 0));
5237 if (info->addr.pcrel || info->addr.offset.is_reg
5238 || (!info->addr.preind && !info->addr.postind))
5239 {
5240 set_syntax_error (_("invalid addressing mode"));
5241 goto failure;
5242 }
5243 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5244 /* addr_off_p */ 1,
5245 /* need_libopcodes_p */ 1,
5246 /* skip_p */ 0);
5247 break;
5248
5249 case AARCH64_OPND_ADDR_SIMM9:
5250 case AARCH64_OPND_ADDR_SIMM9_2:
5251 po_misc_or_fail (parse_address_reloc (&str, info));
5252 if (info->addr.pcrel || info->addr.offset.is_reg
5253 || (!info->addr.preind && !info->addr.postind)
5254 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5255 && info->addr.writeback))
5256 {
5257 set_syntax_error (_("invalid addressing mode"));
5258 goto failure;
5259 }
5260 if (inst.reloc.type != BFD_RELOC_UNUSED)
5261 {
5262 set_syntax_error (_("relocation not allowed"));
5263 goto failure;
5264 }
5265 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5266 /* addr_off_p */ 1,
5267 /* need_libopcodes_p */ 1,
5268 /* skip_p */ 0);
5269 break;
5270
5271 case AARCH64_OPND_ADDR_UIMM12:
5272 po_misc_or_fail (parse_address_reloc (&str, info));
5273 if (info->addr.pcrel || info->addr.offset.is_reg
5274 || !info->addr.preind || info->addr.writeback)
5275 {
5276 set_syntax_error (_("invalid addressing mode"));
5277 goto failure;
5278 }
5279 if (inst.reloc.type == BFD_RELOC_UNUSED)
5280 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5281 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5282 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5283 /* Leave qualifier to be determined by libopcodes. */
5284 break;
5285
5286 case AARCH64_OPND_SIMD_ADDR_POST:
5287 /* [<Xn|SP>], <Xm|#<amount>> */
5288 po_misc_or_fail (parse_address (&str, info, 1));
5289 if (!info->addr.postind || !info->addr.writeback)
5290 {
5291 set_syntax_error (_("invalid addressing mode"));
5292 goto failure;
5293 }
5294 if (!info->addr.offset.is_reg)
5295 {
5296 if (inst.reloc.exp.X_op == O_constant)
5297 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5298 else
5299 {
5300 set_fatal_syntax_error
5301 (_("writeback value should be an immediate constant"));
5302 goto failure;
5303 }
5304 }
5305 /* No qualifier. */
5306 break;
5307
5308 case AARCH64_OPND_SYSREG:
5309 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5310 == PARSE_FAIL)
5311 {
5312 set_syntax_error (_("unknown or missing system register name"));
5313 goto failure;
5314 }
5315 inst.base.operands[i].sysreg = val;
5316 break;
5317
5318 case AARCH64_OPND_PSTATEFIELD:
5319 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5320 == PARSE_FAIL)
5321 {
5322 set_syntax_error (_("unknown or missing PSTATE field name"));
5323 goto failure;
5324 }
5325 inst.base.operands[i].pstatefield = val;
5326 break;
5327
5328 case AARCH64_OPND_SYSREG_IC:
5329 inst.base.operands[i].sysins_op =
5330 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5331 goto sys_reg_ins;
5332 case AARCH64_OPND_SYSREG_DC:
5333 inst.base.operands[i].sysins_op =
5334 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5335 goto sys_reg_ins;
5336 case AARCH64_OPND_SYSREG_AT:
5337 inst.base.operands[i].sysins_op =
5338 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5339 goto sys_reg_ins;
5340 case AARCH64_OPND_SYSREG_TLBI:
5341 inst.base.operands[i].sysins_op =
5342 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5343 sys_reg_ins:
5344 if (inst.base.operands[i].sysins_op == NULL)
5345 {
5346 set_fatal_syntax_error ( _("unknown or missing operation name"));
5347 goto failure;
5348 }
5349 break;
5350
5351 case AARCH64_OPND_BARRIER:
5352 case AARCH64_OPND_BARRIER_ISB:
5353 val = parse_barrier (&str);
5354 if (val != PARSE_FAIL
5355 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5356 {
5357 /* ISB only accepts options name 'sy'. */
5358 set_syntax_error
5359 (_("the specified option is not accepted in ISB"));
5360 /* Turn off backtrack as this optional operand is present. */
5361 backtrack_pos = 0;
5362 goto failure;
5363 }
5364 /* This is an extension to accept a 0..15 immediate. */
5365 if (val == PARSE_FAIL)
5366 po_imm_or_fail (0, 15);
5367 info->barrier = aarch64_barrier_options + val;
5368 break;
5369
5370 case AARCH64_OPND_PRFOP:
5371 val = parse_pldop (&str);
5372 /* This is an extension to accept a 0..31 immediate. */
5373 if (val == PARSE_FAIL)
5374 po_imm_or_fail (0, 31);
5375 inst.base.operands[i].prfop = aarch64_prfops + val;
5376 break;
5377
5378 default:
5379 as_fatal (_("unhandled operand code %d"), operands[i]);
5380 }
5381
5382 /* If we get here, this operand was successfully parsed. */
5383 inst.base.operands[i].present = 1;
5384 continue;
5385
5386 failure:
5387 /* The parse routine should already have set the error, but in case
5388 not, set a default one here. */
5389 if (! error_p ())
5390 set_default_error ();
5391
5392 if (! backtrack_pos)
5393 goto parse_operands_return;
5394
5395 {
5396 /* We reach here because this operand is marked as optional, and
5397 either no operand was supplied or the operand was supplied but it
5398 was syntactically incorrect. In the latter case we report an
5399 error. In the former case we perform a few more checks before
5400 dropping through to the code to insert the default operand. */
5401
5402 char *tmp = backtrack_pos;
5403 char endchar = END_OF_INSN;
5404
5405 if (i != (aarch64_num_of_operands (opcode) - 1))
5406 endchar = ',';
5407 skip_past_char (&tmp, ',');
5408
5409 if (*tmp != endchar)
5410 /* The user has supplied an operand in the wrong format. */
5411 goto parse_operands_return;
5412
5413 /* Make sure there is not a comma before the optional operand.
5414 For example the fifth operand of 'sys' is optional:
5415
5416 sys #0,c0,c0,#0, <--- wrong
5417 sys #0,c0,c0,#0 <--- correct. */
5418 if (comma_skipped_p && i && endchar == END_OF_INSN)
5419 {
5420 set_fatal_syntax_error
5421 (_("unexpected comma before the omitted optional operand"));
5422 goto parse_operands_return;
5423 }
5424 }
5425
5426 /* Reaching here means we are dealing with an optional operand that is
5427 omitted from the assembly line. */
5428 gas_assert (optional_operand_p (opcode, i));
5429 info->present = 0;
5430 process_omitted_operand (operands[i], opcode, i, info);
5431
5432 /* Try again, skipping the optional operand at backtrack_pos. */
5433 str = backtrack_pos;
5434 backtrack_pos = 0;
5435
5436 /* Clear any error record after the omitted optional operand has been
5437 successfully handled. */
5438 clear_error ();
5439 }
5440
5441 /* Check if we have parsed all the operands. */
5442 if (*str != '\0' && ! error_p ())
5443 {
5444 /* Set I to the index of the last present operand; this is
5445 for the purpose of diagnostics. */
5446 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5447 ;
5448 set_fatal_syntax_error
5449 (_("unexpected characters following instruction"));
5450 }
5451
5452 parse_operands_return:
5453
5454 if (error_p ())
5455 {
5456 DEBUG_TRACE ("parsing FAIL: %s - %s",
5457 operand_mismatch_kind_names[get_error_kind ()],
5458 get_error_message ());
5459 /* Record the operand error properly; this is useful when there
5460 are multiple instruction templates for a mnemonic name, so that
5461 later on, we can select the error that most closely describes
5462 the problem. */
5463 record_operand_error (opcode, i, get_error_kind (),
5464 get_error_message ());
5465 return FALSE;
5466 }
5467 else
5468 {
5469 DEBUG_TRACE ("parsing SUCCESS");
5470 return TRUE;
5471 }
5472 }
5473
5474 /* It does some fix-up to provide some programmer friendly feature while
5475 keeping the libopcodes happy, i.e. libopcodes only accepts
5476 the preferred architectural syntax.
5477 Return FALSE if there is any failure; otherwise return TRUE. */
5478
5479 static bfd_boolean
5480 programmer_friendly_fixup (aarch64_instruction *instr)
5481 {
5482 aarch64_inst *base = &instr->base;
5483 const aarch64_opcode *opcode = base->opcode;
5484 enum aarch64_op op = opcode->op;
5485 aarch64_opnd_info *operands = base->operands;
5486
5487 DEBUG_TRACE ("enter");
5488
5489 switch (opcode->iclass)
5490 {
5491 case testbranch:
5492 /* TBNZ Xn|Wn, #uimm6, label
5493 Test and Branch Not Zero: conditionally jumps to label if bit number
5494 uimm6 in register Xn is not zero. The bit number implies the width of
5495 the register, which may be written and should be disassembled as Wn if
5496 uimm is less than 32. */
5497 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5498 {
5499 if (operands[1].imm.value >= 32)
5500 {
5501 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5502 0, 31);
5503 return FALSE;
5504 }
5505 operands[0].qualifier = AARCH64_OPND_QLF_X;
5506 }
5507 break;
5508 case loadlit:
5509 /* LDR Wt, label | =value
5510 As a convenience assemblers will typically permit the notation
5511 "=value" in conjunction with the pc-relative literal load instructions
5512 to automatically place an immediate value or symbolic address in a
5513 nearby literal pool and generate a hidden label which references it.
5514 ISREG has been set to 0 in the case of =value. */
5515 if (instr->gen_lit_pool
5516 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5517 {
5518 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5519 if (op == OP_LDRSW_LIT)
5520 size = 4;
5521 if (instr->reloc.exp.X_op != O_constant
5522 && instr->reloc.exp.X_op != O_big
5523 && instr->reloc.exp.X_op != O_symbol)
5524 {
5525 record_operand_error (opcode, 1,
5526 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5527 _("constant expression expected"));
5528 return FALSE;
5529 }
5530 if (! add_to_lit_pool (&instr->reloc.exp, size))
5531 {
5532 record_operand_error (opcode, 1,
5533 AARCH64_OPDE_OTHER_ERROR,
5534 _("literal pool insertion failed"));
5535 return FALSE;
5536 }
5537 }
5538 break;
5539 case log_shift:
5540 case bitfield:
5541 /* UXT[BHW] Wd, Wn
5542 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5543 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5544 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5545 A programmer-friendly assembler should accept a destination Xd in
5546 place of Wd, however that is not the preferred form for disassembly.
5547 */
5548 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5549 && operands[1].qualifier == AARCH64_OPND_QLF_W
5550 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5551 operands[0].qualifier = AARCH64_OPND_QLF_W;
5552 break;
5553
5554 case addsub_ext:
5555 {
5556 /* In the 64-bit form, the final register operand is written as Wm
5557 for all but the (possibly omitted) UXTX/LSL and SXTX
5558 operators.
5559 As a programmer-friendly assembler, we accept e.g.
5560 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5561 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5562 int idx = aarch64_operand_index (opcode->operands,
5563 AARCH64_OPND_Rm_EXT);
5564 gas_assert (idx == 1 || idx == 2);
5565 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5566 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5567 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5568 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5569 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5570 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5571 }
5572 break;
5573
5574 default:
5575 break;
5576 }
5577
5578 DEBUG_TRACE ("exit with SUCCESS");
5579 return TRUE;
5580 }
5581
5582 /* Check for loads and stores that will cause unpredictable behavior. */
5583
5584 static void
5585 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5586 {
5587 aarch64_inst *base = &instr->base;
5588 const aarch64_opcode *opcode = base->opcode;
5589 const aarch64_opnd_info *opnds = base->operands;
5590 switch (opcode->iclass)
5591 {
5592 case ldst_pos:
5593 case ldst_imm9:
5594 case ldst_unscaled:
5595 case ldst_unpriv:
5596 /* Loading/storing the base register is unpredictable if writeback. */
5597 if ((aarch64_get_operand_class (opnds[0].type)
5598 == AARCH64_OPND_CLASS_INT_REG)
5599 && opnds[0].reg.regno == opnds[1].addr.base_regno
5600 && opnds[1].addr.base_regno != REG_SP
5601 && opnds[1].addr.writeback)
5602 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5603 break;
5604 case ldstpair_off:
5605 case ldstnapair_offs:
5606 case ldstpair_indexed:
5607 /* Loading/storing the base register is unpredictable if writeback. */
5608 if ((aarch64_get_operand_class (opnds[0].type)
5609 == AARCH64_OPND_CLASS_INT_REG)
5610 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5611 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5612 && opnds[2].addr.base_regno != REG_SP
5613 && opnds[2].addr.writeback)
5614 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5615 /* Load operations must load different registers. */
5616 if ((opcode->opcode & (1 << 22))
5617 && opnds[0].reg.regno == opnds[1].reg.regno)
5618 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5619 break;
5620 default:
5621 break;
5622 }
5623 }
5624
5625 /* A wrapper function to interface with libopcodes on encoding and
5626 record the error message if there is any.
5627
5628 Return TRUE on success; otherwise return FALSE. */
5629
5630 static bfd_boolean
5631 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5632 aarch64_insn *code)
5633 {
5634 aarch64_operand_error error_info;
5635 error_info.kind = AARCH64_OPDE_NIL;
5636 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5637 return TRUE;
5638 else
5639 {
5640 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5641 record_operand_error_info (opcode, &error_info);
5642 return FALSE;
5643 }
5644 }
5645
5646 #ifdef DEBUG_AARCH64
5647 static inline void
5648 dump_opcode_operands (const aarch64_opcode *opcode)
5649 {
5650 int i = 0;
5651 while (opcode->operands[i] != AARCH64_OPND_NIL)
5652 {
5653 aarch64_verbose ("\t\t opnd%d: %s", i,
5654 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5655 ? aarch64_get_operand_name (opcode->operands[i])
5656 : aarch64_get_operand_desc (opcode->operands[i]));
5657 ++i;
5658 }
5659 }
5660 #endif /* DEBUG_AARCH64 */
5661
5662 /* This is the guts of the machine-dependent assembler. STR points to a
5663 machine dependent instruction. This function is supposed to emit
5664 the frags/bytes it assembles to. */
5665
5666 void
5667 md_assemble (char *str)
5668 {
5669 char *p = str;
5670 templates *template;
5671 aarch64_opcode *opcode;
5672 aarch64_inst *inst_base;
5673 unsigned saved_cond;
5674
5675 /* Align the previous label if needed. */
5676 if (last_label_seen != NULL)
5677 {
5678 symbol_set_frag (last_label_seen, frag_now);
5679 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5680 S_SET_SEGMENT (last_label_seen, now_seg);
5681 }
5682
5683 inst.reloc.type = BFD_RELOC_UNUSED;
5684
5685 DEBUG_TRACE ("\n\n");
5686 DEBUG_TRACE ("==============================");
5687 DEBUG_TRACE ("Enter md_assemble with %s", str);
5688
5689 template = opcode_lookup (&p);
5690 if (!template)
5691 {
5692 /* It wasn't an instruction, but it might be a register alias of
5693 the form alias .req reg directive. */
5694 if (!create_register_alias (str, p))
5695 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5696 str);
5697 return;
5698 }
5699
5700 skip_whitespace (p);
5701 if (*p == ',')
5702 {
5703 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5704 get_mnemonic_name (str), str);
5705 return;
5706 }
5707
5708 init_operand_error_report ();
5709
5710 /* Sections are assumed to start aligned. In executable section, there is no
5711 MAP_DATA symbol pending. So we only align the address during
5712 MAP_DATA --> MAP_INSN transition.
5713 For other sections, this is not guaranteed. */
5714 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
5715 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
5716 frag_align_code (2, 0);
5717
5718 saved_cond = inst.cond;
5719 reset_aarch64_instruction (&inst);
5720 inst.cond = saved_cond;
5721
5722 /* Iterate through all opcode entries with the same mnemonic name. */
5723 do
5724 {
5725 opcode = template->opcode;
5726
5727 DEBUG_TRACE ("opcode %s found", opcode->name);
5728 #ifdef DEBUG_AARCH64
5729 if (debug_dump)
5730 dump_opcode_operands (opcode);
5731 #endif /* DEBUG_AARCH64 */
5732
5733 mapping_state (MAP_INSN);
5734
5735 inst_base = &inst.base;
5736 inst_base->opcode = opcode;
5737
5738 /* Truly conditionally executed instructions, e.g. b.cond. */
5739 if (opcode->flags & F_COND)
5740 {
5741 gas_assert (inst.cond != COND_ALWAYS);
5742 inst_base->cond = get_cond_from_value (inst.cond);
5743 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5744 }
5745 else if (inst.cond != COND_ALWAYS)
5746 {
5747 /* It shouldn't arrive here, where the assembly looks like a
5748 conditional instruction but the found opcode is unconditional. */
5749 gas_assert (0);
5750 continue;
5751 }
5752
5753 if (parse_operands (p, opcode)
5754 && programmer_friendly_fixup (&inst)
5755 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5756 {
5757 /* Check that this instruction is supported for this CPU. */
5758 if (!opcode->avariant
5759 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5760 {
5761 as_bad (_("selected processor does not support `%s'"), str);
5762 return;
5763 }
5764
5765 warn_unpredictable_ldst (&inst, str);
5766
5767 if (inst.reloc.type == BFD_RELOC_UNUSED
5768 || !inst.reloc.need_libopcodes_p)
5769 output_inst (NULL);
5770 else
5771 {
5772 /* If there is relocation generated for the instruction,
5773 store the instruction information for the future fix-up. */
5774 struct aarch64_inst *copy;
5775 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5776 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5777 abort ();
5778 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5779 output_inst (copy);
5780 }
5781 return;
5782 }
5783
5784 template = template->next;
5785 if (template != NULL)
5786 {
5787 reset_aarch64_instruction (&inst);
5788 inst.cond = saved_cond;
5789 }
5790 }
5791 while (template != NULL);
5792
5793 /* Issue the error messages if any. */
5794 output_operand_error_report (str);
5795 }
5796
5797 /* Various frobbings of labels and their addresses. */
5798
5799 void
5800 aarch64_start_line_hook (void)
5801 {
5802 last_label_seen = NULL;
5803 }
5804
5805 void
5806 aarch64_frob_label (symbolS * sym)
5807 {
5808 last_label_seen = sym;
5809
5810 dwarf2_emit_label (sym);
5811 }
5812
5813 int
5814 aarch64_data_in_code (void)
5815 {
5816 if (!strncmp (input_line_pointer + 1, "data:", 5))
5817 {
5818 *input_line_pointer = '/';
5819 input_line_pointer += 5;
5820 *input_line_pointer = 0;
5821 return 1;
5822 }
5823
5824 return 0;
5825 }
5826
5827 char *
5828 aarch64_canonicalize_symbol_name (char *name)
5829 {
5830 int len;
5831
5832 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5833 *(name + len - 5) = 0;
5834
5835 return name;
5836 }
5837 \f
5838 /* Table of all register names defined by default. The user can
5839 define additional names with .req. Note that all register names
5840 should appear in both upper and lowercase variants. Some registers
5841 also have mixed-case names. */
5842
5843 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5844 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5845 #define REGSET31(p,t) \
5846 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5847 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5848 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5849 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5850 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5851 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5852 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5853 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5854 #define REGSET(p,t) \
5855 REGSET31(p,t), REGNUM(p,31,t)
5856
5857 /* These go into aarch64_reg_hsh hash-table. */
5858 static const reg_entry reg_names[] = {
5859 /* Integer registers. */
5860 REGSET31 (x, R_64), REGSET31 (X, R_64),
5861 REGSET31 (w, R_32), REGSET31 (W, R_32),
5862
5863 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5864 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5865
5866 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5867 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5868
5869 /* Coprocessor register numbers. */
5870 REGSET (c, CN), REGSET (C, CN),
5871
5872 /* Floating-point single precision registers. */
5873 REGSET (s, FP_S), REGSET (S, FP_S),
5874
5875 /* Floating-point double precision registers. */
5876 REGSET (d, FP_D), REGSET (D, FP_D),
5877
5878 /* Floating-point half precision registers. */
5879 REGSET (h, FP_H), REGSET (H, FP_H),
5880
5881 /* Floating-point byte precision registers. */
5882 REGSET (b, FP_B), REGSET (B, FP_B),
5883
5884 /* Floating-point quad precision registers. */
5885 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5886
5887 /* FP/SIMD registers. */
5888 REGSET (v, VN), REGSET (V, VN),
5889 };
5890
5891 #undef REGDEF
5892 #undef REGNUM
5893 #undef REGSET
5894
5895 #define N 1
5896 #define n 0
5897 #define Z 1
5898 #define z 0
5899 #define C 1
5900 #define c 0
5901 #define V 1
5902 #define v 0
5903 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5904 static const asm_nzcv nzcv_names[] = {
5905 {"nzcv", B (n, z, c, v)},
5906 {"nzcV", B (n, z, c, V)},
5907 {"nzCv", B (n, z, C, v)},
5908 {"nzCV", B (n, z, C, V)},
5909 {"nZcv", B (n, Z, c, v)},
5910 {"nZcV", B (n, Z, c, V)},
5911 {"nZCv", B (n, Z, C, v)},
5912 {"nZCV", B (n, Z, C, V)},
5913 {"Nzcv", B (N, z, c, v)},
5914 {"NzcV", B (N, z, c, V)},
5915 {"NzCv", B (N, z, C, v)},
5916 {"NzCV", B (N, z, C, V)},
5917 {"NZcv", B (N, Z, c, v)},
5918 {"NZcV", B (N, Z, c, V)},
5919 {"NZCv", B (N, Z, C, v)},
5920 {"NZCV", B (N, Z, C, V)}
5921 };
5922
5923 #undef N
5924 #undef n
5925 #undef Z
5926 #undef z
5927 #undef C
5928 #undef c
5929 #undef V
5930 #undef v
5931 #undef B
5932 \f
5933 /* MD interface: bits in the object file. */
5934
5935 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5936 for use in the a.out file, and stores them in the array pointed to by buf.
5937 This knows about the endian-ness of the target machine and does
5938 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5939 2 (short) and 4 (long) Floating numbers are put out as a series of
5940 LITTLENUMS (shorts, here at least). */
5941
5942 void
5943 md_number_to_chars (char *buf, valueT val, int n)
5944 {
5945 if (target_big_endian)
5946 number_to_chars_bigendian (buf, val, n);
5947 else
5948 number_to_chars_littleendian (buf, val, n);
5949 }
5950
5951 /* MD interface: Sections. */
5952
5953 /* Estimate the size of a frag before relaxing. Assume everything fits in
5954 4 bytes. */
5955
5956 int
5957 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5958 {
5959 fragp->fr_var = 4;
5960 return 4;
5961 }
5962
5963 /* Round up a section size to the appropriate boundary. */
5964
5965 valueT
5966 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5967 {
5968 return size;
5969 }
5970
5971 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5972 of an rs_align_code fragment.
5973
5974 Here we fill the frag with the appropriate info for padding the
5975 output stream. The resulting frag will consist of a fixed (fr_fix)
5976 and of a repeating (fr_var) part.
5977
5978 The fixed content is always emitted before the repeating content and
5979 these two parts are used as follows in constructing the output:
5980 - the fixed part will be used to align to a valid instruction word
5981 boundary, in case that we start at a misaligned address; as no
5982 executable instruction can live at the misaligned location, we
5983 simply fill with zeros;
5984 - the variable part will be used to cover the remaining padding and
5985 we fill using the AArch64 NOP instruction.
5986
5987 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
5988 enough storage space for up to 3 bytes for padding the back to a valid
5989 instruction alignment and exactly 4 bytes to store the NOP pattern. */
5990
5991 void
5992 aarch64_handle_align (fragS * fragP)
5993 {
5994 /* NOP = d503201f */
5995 /* AArch64 instructions are always little-endian. */
5996 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5997
5998 int bytes, fix, noop_size;
5999 char *p;
6000
6001 if (fragP->fr_type != rs_align_code)
6002 return;
6003
6004 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6005 p = fragP->fr_literal + fragP->fr_fix;
6006
6007 #ifdef OBJ_ELF
6008 gas_assert (fragP->tc_frag_data.recorded);
6009 #endif
6010
6011 noop_size = sizeof (aarch64_noop);
6012
6013 fix = bytes & (noop_size - 1);
6014 if (fix)
6015 {
6016 #ifdef OBJ_ELF
6017 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6018 #endif
6019 memset (p, 0, fix);
6020 p += fix;
6021 fragP->fr_fix += fix;
6022 }
6023
6024 if (noop_size)
6025 memcpy (p, aarch64_noop, noop_size);
6026 fragP->fr_var = noop_size;
6027 }
6028
6029 /* Perform target specific initialisation of a frag.
6030 Note - despite the name this initialisation is not done when the frag
6031 is created, but only when its type is assigned. A frag can be created
6032 and used a long time before its type is set, so beware of assuming that
6033 this initialisationis performed first. */
6034
6035 #ifndef OBJ_ELF
6036 void
6037 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6038 int max_chars ATTRIBUTE_UNUSED)
6039 {
6040 }
6041
6042 #else /* OBJ_ELF is defined. */
6043 void
6044 aarch64_init_frag (fragS * fragP, int max_chars)
6045 {
6046 /* Record a mapping symbol for alignment frags. We will delete this
6047 later if the alignment ends up empty. */
6048 if (!fragP->tc_frag_data.recorded)
6049 fragP->tc_frag_data.recorded = 1;
6050
6051 switch (fragP->fr_type)
6052 {
6053 case rs_align:
6054 case rs_align_test:
6055 case rs_fill:
6056 mapping_state_2 (MAP_DATA, max_chars);
6057 break;
6058 case rs_align_code:
6059 mapping_state_2 (MAP_INSN, max_chars);
6060 break;
6061 default:
6062 break;
6063 }
6064 }
6065 \f
6066 /* Initialize the DWARF-2 unwind information for this procedure. */
6067
6068 void
6069 tc_aarch64_frame_initial_instructions (void)
6070 {
6071 cfi_add_CFA_def_cfa (REG_SP, 0);
6072 }
6073 #endif /* OBJ_ELF */
6074
6075 /* Convert REGNAME to a DWARF-2 register number. */
6076
6077 int
6078 tc_aarch64_regname_to_dw2regnum (char *regname)
6079 {
6080 const reg_entry *reg = parse_reg (&regname);
6081 if (reg == NULL)
6082 return -1;
6083
6084 switch (reg->type)
6085 {
6086 case REG_TYPE_SP_32:
6087 case REG_TYPE_SP_64:
6088 case REG_TYPE_R_32:
6089 case REG_TYPE_R_64:
6090 return reg->number;
6091
6092 case REG_TYPE_FP_B:
6093 case REG_TYPE_FP_H:
6094 case REG_TYPE_FP_S:
6095 case REG_TYPE_FP_D:
6096 case REG_TYPE_FP_Q:
6097 return reg->number + 64;
6098
6099 default:
6100 break;
6101 }
6102 return -1;
6103 }
6104
6105 /* Implement DWARF2_ADDR_SIZE. */
6106
6107 int
6108 aarch64_dwarf2_addr_size (void)
6109 {
6110 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6111 if (ilp32_p)
6112 return 4;
6113 #endif
6114 return bfd_arch_bits_per_address (stdoutput) / 8;
6115 }
6116
6117 /* MD interface: Symbol and relocation handling. */
6118
6119 /* Return the address within the segment that a PC-relative fixup is
6120 relative to. For AArch64 PC-relative fixups applied to instructions
6121 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6122
6123 long
6124 md_pcrel_from_section (fixS * fixP, segT seg)
6125 {
6126 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6127
6128 /* If this is pc-relative and we are going to emit a relocation
6129 then we just want to put out any pipeline compensation that the linker
6130 will need. Otherwise we want to use the calculated base. */
6131 if (fixP->fx_pcrel
6132 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6133 || aarch64_force_relocation (fixP)))
6134 base = 0;
6135
6136 /* AArch64 should be consistent for all pc-relative relocations. */
6137 return base + AARCH64_PCREL_OFFSET;
6138 }
6139
6140 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6141 Otherwise we have no need to default values of symbols. */
6142
6143 symbolS *
6144 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6145 {
6146 #ifdef OBJ_ELF
6147 if (name[0] == '_' && name[1] == 'G'
6148 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6149 {
6150 if (!GOT_symbol)
6151 {
6152 if (symbol_find (name))
6153 as_bad (_("GOT already in the symbol table"));
6154
6155 GOT_symbol = symbol_new (name, undefined_section,
6156 (valueT) 0, &zero_address_frag);
6157 }
6158
6159 return GOT_symbol;
6160 }
6161 #endif
6162
6163 return 0;
6164 }
6165
6166 /* Return non-zero if the indicated VALUE has overflowed the maximum
6167 range expressible by a unsigned number with the indicated number of
6168 BITS. */
6169
6170 static bfd_boolean
6171 unsigned_overflow (valueT value, unsigned bits)
6172 {
6173 valueT lim;
6174 if (bits >= sizeof (valueT) * 8)
6175 return FALSE;
6176 lim = (valueT) 1 << bits;
6177 return (value >= lim);
6178 }
6179
6180
6181 /* Return non-zero if the indicated VALUE has overflowed the maximum
6182 range expressible by an signed number with the indicated number of
6183 BITS. */
6184
6185 static bfd_boolean
6186 signed_overflow (offsetT value, unsigned bits)
6187 {
6188 offsetT lim;
6189 if (bits >= sizeof (offsetT) * 8)
6190 return FALSE;
6191 lim = (offsetT) 1 << (bits - 1);
6192 return (value < -lim || value >= lim);
6193 }
6194
6195 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6196 unsigned immediate offset load/store instruction, try to encode it as
6197 an unscaled, 9-bit, signed immediate offset load/store instruction.
6198 Return TRUE if it is successful; otherwise return FALSE.
6199
6200 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6201 in response to the standard LDR/STR mnemonics when the immediate offset is
6202 unambiguous, i.e. when it is negative or unaligned. */
6203
6204 static bfd_boolean
6205 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6206 {
6207 int idx;
6208 enum aarch64_op new_op;
6209 const aarch64_opcode *new_opcode;
6210
6211 gas_assert (instr->opcode->iclass == ldst_pos);
6212
6213 switch (instr->opcode->op)
6214 {
6215 case OP_LDRB_POS:new_op = OP_LDURB; break;
6216 case OP_STRB_POS: new_op = OP_STURB; break;
6217 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6218 case OP_LDRH_POS: new_op = OP_LDURH; break;
6219 case OP_STRH_POS: new_op = OP_STURH; break;
6220 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6221 case OP_LDR_POS: new_op = OP_LDUR; break;
6222 case OP_STR_POS: new_op = OP_STUR; break;
6223 case OP_LDRF_POS: new_op = OP_LDURV; break;
6224 case OP_STRF_POS: new_op = OP_STURV; break;
6225 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6226 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6227 default: new_op = OP_NIL; break;
6228 }
6229
6230 if (new_op == OP_NIL)
6231 return FALSE;
6232
6233 new_opcode = aarch64_get_opcode (new_op);
6234 gas_assert (new_opcode != NULL);
6235
6236 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6237 instr->opcode->op, new_opcode->op);
6238
6239 aarch64_replace_opcode (instr, new_opcode);
6240
6241 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6242 qualifier matching may fail because the out-of-date qualifier will
6243 prevent the operand being updated with a new and correct qualifier. */
6244 idx = aarch64_operand_index (instr->opcode->operands,
6245 AARCH64_OPND_ADDR_SIMM9);
6246 gas_assert (idx == 1);
6247 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6248
6249 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6250
6251 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6252 return FALSE;
6253
6254 return TRUE;
6255 }
6256
6257 /* Called by fix_insn to fix a MOV immediate alias instruction.
6258
6259 Operand for a generic move immediate instruction, which is an alias
6260 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6261 a 32-bit/64-bit immediate value into general register. An assembler error
6262 shall result if the immediate cannot be created by a single one of these
6263 instructions. If there is a choice, then to ensure reversability an
6264 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6265
6266 static void
6267 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6268 {
6269 const aarch64_opcode *opcode;
6270
6271 /* Need to check if the destination is SP/ZR. The check has to be done
6272 before any aarch64_replace_opcode. */
6273 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6274 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6275
6276 instr->operands[1].imm.value = value;
6277 instr->operands[1].skip = 0;
6278
6279 if (try_mov_wide_p)
6280 {
6281 /* Try the MOVZ alias. */
6282 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6283 aarch64_replace_opcode (instr, opcode);
6284 if (aarch64_opcode_encode (instr->opcode, instr,
6285 &instr->value, NULL, NULL))
6286 {
6287 put_aarch64_insn (buf, instr->value);
6288 return;
6289 }
6290 /* Try the MOVK alias. */
6291 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6292 aarch64_replace_opcode (instr, opcode);
6293 if (aarch64_opcode_encode (instr->opcode, instr,
6294 &instr->value, NULL, NULL))
6295 {
6296 put_aarch64_insn (buf, instr->value);
6297 return;
6298 }
6299 }
6300
6301 if (try_mov_bitmask_p)
6302 {
6303 /* Try the ORR alias. */
6304 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6305 aarch64_replace_opcode (instr, opcode);
6306 if (aarch64_opcode_encode (instr->opcode, instr,
6307 &instr->value, NULL, NULL))
6308 {
6309 put_aarch64_insn (buf, instr->value);
6310 return;
6311 }
6312 }
6313
6314 as_bad_where (fixP->fx_file, fixP->fx_line,
6315 _("immediate cannot be moved by a single instruction"));
6316 }
6317
6318 /* An instruction operand which is immediate related may have symbol used
6319 in the assembly, e.g.
6320
6321 mov w0, u32
6322 .set u32, 0x00ffff00
6323
6324 At the time when the assembly instruction is parsed, a referenced symbol,
6325 like 'u32' in the above example may not have been seen; a fixS is created
6326 in such a case and is handled here after symbols have been resolved.
6327 Instruction is fixed up with VALUE using the information in *FIXP plus
6328 extra information in FLAGS.
6329
6330 This function is called by md_apply_fix to fix up instructions that need
6331 a fix-up described above but does not involve any linker-time relocation. */
6332
6333 static void
6334 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6335 {
6336 int idx;
6337 uint32_t insn;
6338 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6339 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6340 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6341
6342 if (new_inst)
6343 {
6344 /* Now the instruction is about to be fixed-up, so the operand that
6345 was previously marked as 'ignored' needs to be unmarked in order
6346 to get the encoding done properly. */
6347 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6348 new_inst->operands[idx].skip = 0;
6349 }
6350
6351 gas_assert (opnd != AARCH64_OPND_NIL);
6352
6353 switch (opnd)
6354 {
6355 case AARCH64_OPND_EXCEPTION:
6356 if (unsigned_overflow (value, 16))
6357 as_bad_where (fixP->fx_file, fixP->fx_line,
6358 _("immediate out of range"));
6359 insn = get_aarch64_insn (buf);
6360 insn |= encode_svc_imm (value);
6361 put_aarch64_insn (buf, insn);
6362 break;
6363
6364 case AARCH64_OPND_AIMM:
6365 /* ADD or SUB with immediate.
6366 NOTE this assumes we come here with a add/sub shifted reg encoding
6367 3 322|2222|2 2 2 21111 111111
6368 1 098|7654|3 2 1 09876 543210 98765 43210
6369 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6370 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6371 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6372 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6373 ->
6374 3 322|2222|2 2 221111111111
6375 1 098|7654|3 2 109876543210 98765 43210
6376 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6377 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6378 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6379 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6380 Fields sf Rn Rd are already set. */
6381 insn = get_aarch64_insn (buf);
6382 if (value < 0)
6383 {
6384 /* Add <-> sub. */
6385 insn = reencode_addsub_switch_add_sub (insn);
6386 value = -value;
6387 }
6388
6389 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6390 && unsigned_overflow (value, 12))
6391 {
6392 /* Try to shift the value by 12 to make it fit. */
6393 if (((value >> 12) << 12) == value
6394 && ! unsigned_overflow (value, 12 + 12))
6395 {
6396 value >>= 12;
6397 insn |= encode_addsub_imm_shift_amount (1);
6398 }
6399 }
6400
6401 if (unsigned_overflow (value, 12))
6402 as_bad_where (fixP->fx_file, fixP->fx_line,
6403 _("immediate out of range"));
6404
6405 insn |= encode_addsub_imm (value);
6406
6407 put_aarch64_insn (buf, insn);
6408 break;
6409
6410 case AARCH64_OPND_SIMD_IMM:
6411 case AARCH64_OPND_SIMD_IMM_SFT:
6412 case AARCH64_OPND_LIMM:
6413 /* Bit mask immediate. */
6414 gas_assert (new_inst != NULL);
6415 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6416 new_inst->operands[idx].imm.value = value;
6417 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6418 &new_inst->value, NULL, NULL))
6419 put_aarch64_insn (buf, new_inst->value);
6420 else
6421 as_bad_where (fixP->fx_file, fixP->fx_line,
6422 _("invalid immediate"));
6423 break;
6424
6425 case AARCH64_OPND_HALF:
6426 /* 16-bit unsigned immediate. */
6427 if (unsigned_overflow (value, 16))
6428 as_bad_where (fixP->fx_file, fixP->fx_line,
6429 _("immediate out of range"));
6430 insn = get_aarch64_insn (buf);
6431 insn |= encode_movw_imm (value & 0xffff);
6432 put_aarch64_insn (buf, insn);
6433 break;
6434
6435 case AARCH64_OPND_IMM_MOV:
6436 /* Operand for a generic move immediate instruction, which is
6437 an alias instruction that generates a single MOVZ, MOVN or ORR
6438 instruction to loads a 32-bit/64-bit immediate value into general
6439 register. An assembler error shall result if the immediate cannot be
6440 created by a single one of these instructions. If there is a choice,
6441 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6442 and MOVZ or MOVN to ORR. */
6443 gas_assert (new_inst != NULL);
6444 fix_mov_imm_insn (fixP, buf, new_inst, value);
6445 break;
6446
6447 case AARCH64_OPND_ADDR_SIMM7:
6448 case AARCH64_OPND_ADDR_SIMM9:
6449 case AARCH64_OPND_ADDR_SIMM9_2:
6450 case AARCH64_OPND_ADDR_UIMM12:
6451 /* Immediate offset in an address. */
6452 insn = get_aarch64_insn (buf);
6453
6454 gas_assert (new_inst != NULL && new_inst->value == insn);
6455 gas_assert (new_inst->opcode->operands[1] == opnd
6456 || new_inst->opcode->operands[2] == opnd);
6457
6458 /* Get the index of the address operand. */
6459 if (new_inst->opcode->operands[1] == opnd)
6460 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6461 idx = 1;
6462 else
6463 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6464 idx = 2;
6465
6466 /* Update the resolved offset value. */
6467 new_inst->operands[idx].addr.offset.imm = value;
6468
6469 /* Encode/fix-up. */
6470 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6471 &new_inst->value, NULL, NULL))
6472 {
6473 put_aarch64_insn (buf, new_inst->value);
6474 break;
6475 }
6476 else if (new_inst->opcode->iclass == ldst_pos
6477 && try_to_encode_as_unscaled_ldst (new_inst))
6478 {
6479 put_aarch64_insn (buf, new_inst->value);
6480 break;
6481 }
6482
6483 as_bad_where (fixP->fx_file, fixP->fx_line,
6484 _("immediate offset out of range"));
6485 break;
6486
6487 default:
6488 gas_assert (0);
6489 as_fatal (_("unhandled operand code %d"), opnd);
6490 }
6491 }
6492
6493 /* Apply a fixup (fixP) to segment data, once it has been determined
6494 by our caller that we have all the info we need to fix it up.
6495
6496 Parameter valP is the pointer to the value of the bits. */
6497
6498 void
6499 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6500 {
6501 offsetT value = *valP;
6502 uint32_t insn;
6503 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6504 int scale;
6505 unsigned flags = fixP->fx_addnumber;
6506
6507 DEBUG_TRACE ("\n\n");
6508 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6509 DEBUG_TRACE ("Enter md_apply_fix");
6510
6511 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6512
6513 /* Note whether this will delete the relocation. */
6514
6515 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6516 fixP->fx_done = 1;
6517
6518 /* Process the relocations. */
6519 switch (fixP->fx_r_type)
6520 {
6521 case BFD_RELOC_NONE:
6522 /* This will need to go in the object file. */
6523 fixP->fx_done = 0;
6524 break;
6525
6526 case BFD_RELOC_8:
6527 case BFD_RELOC_8_PCREL:
6528 if (fixP->fx_done || !seg->use_rela_p)
6529 md_number_to_chars (buf, value, 1);
6530 break;
6531
6532 case BFD_RELOC_16:
6533 case BFD_RELOC_16_PCREL:
6534 if (fixP->fx_done || !seg->use_rela_p)
6535 md_number_to_chars (buf, value, 2);
6536 break;
6537
6538 case BFD_RELOC_32:
6539 case BFD_RELOC_32_PCREL:
6540 if (fixP->fx_done || !seg->use_rela_p)
6541 md_number_to_chars (buf, value, 4);
6542 break;
6543
6544 case BFD_RELOC_64:
6545 case BFD_RELOC_64_PCREL:
6546 if (fixP->fx_done || !seg->use_rela_p)
6547 md_number_to_chars (buf, value, 8);
6548 break;
6549
6550 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6551 /* We claim that these fixups have been processed here, even if
6552 in fact we generate an error because we do not have a reloc
6553 for them, so tc_gen_reloc() will reject them. */
6554 fixP->fx_done = 1;
6555 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6556 {
6557 as_bad_where (fixP->fx_file, fixP->fx_line,
6558 _("undefined symbol %s used as an immediate value"),
6559 S_GET_NAME (fixP->fx_addsy));
6560 goto apply_fix_return;
6561 }
6562 fix_insn (fixP, flags, value);
6563 break;
6564
6565 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6566 if (fixP->fx_done || !seg->use_rela_p)
6567 {
6568 if (value & 3)
6569 as_bad_where (fixP->fx_file, fixP->fx_line,
6570 _("pc-relative load offset not word aligned"));
6571 if (signed_overflow (value, 21))
6572 as_bad_where (fixP->fx_file, fixP->fx_line,
6573 _("pc-relative load offset out of range"));
6574 insn = get_aarch64_insn (buf);
6575 insn |= encode_ld_lit_ofs_19 (value >> 2);
6576 put_aarch64_insn (buf, insn);
6577 }
6578 break;
6579
6580 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6581 if (fixP->fx_done || !seg->use_rela_p)
6582 {
6583 if (signed_overflow (value, 21))
6584 as_bad_where (fixP->fx_file, fixP->fx_line,
6585 _("pc-relative address offset out of range"));
6586 insn = get_aarch64_insn (buf);
6587 insn |= encode_adr_imm (value);
6588 put_aarch64_insn (buf, insn);
6589 }
6590 break;
6591
6592 case BFD_RELOC_AARCH64_BRANCH19:
6593 if (fixP->fx_done || !seg->use_rela_p)
6594 {
6595 if (value & 3)
6596 as_bad_where (fixP->fx_file, fixP->fx_line,
6597 _("conditional branch target not word aligned"));
6598 if (signed_overflow (value, 21))
6599 as_bad_where (fixP->fx_file, fixP->fx_line,
6600 _("conditional branch out of range"));
6601 insn = get_aarch64_insn (buf);
6602 insn |= encode_cond_branch_ofs_19 (value >> 2);
6603 put_aarch64_insn (buf, insn);
6604 }
6605 break;
6606
6607 case BFD_RELOC_AARCH64_TSTBR14:
6608 if (fixP->fx_done || !seg->use_rela_p)
6609 {
6610 if (value & 3)
6611 as_bad_where (fixP->fx_file, fixP->fx_line,
6612 _("conditional branch target not word aligned"));
6613 if (signed_overflow (value, 16))
6614 as_bad_where (fixP->fx_file, fixP->fx_line,
6615 _("conditional branch out of range"));
6616 insn = get_aarch64_insn (buf);
6617 insn |= encode_tst_branch_ofs_14 (value >> 2);
6618 put_aarch64_insn (buf, insn);
6619 }
6620 break;
6621
6622 case BFD_RELOC_AARCH64_CALL26:
6623 case BFD_RELOC_AARCH64_JUMP26:
6624 if (fixP->fx_done || !seg->use_rela_p)
6625 {
6626 if (value & 3)
6627 as_bad_where (fixP->fx_file, fixP->fx_line,
6628 _("branch target not word aligned"));
6629 if (signed_overflow (value, 28))
6630 as_bad_where (fixP->fx_file, fixP->fx_line,
6631 _("branch out of range"));
6632 insn = get_aarch64_insn (buf);
6633 insn |= encode_branch_ofs_26 (value >> 2);
6634 put_aarch64_insn (buf, insn);
6635 }
6636 break;
6637
6638 case BFD_RELOC_AARCH64_MOVW_G0:
6639 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6640 case BFD_RELOC_AARCH64_MOVW_G0_S:
6641 scale = 0;
6642 goto movw_common;
6643 case BFD_RELOC_AARCH64_MOVW_G1:
6644 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6645 case BFD_RELOC_AARCH64_MOVW_G1_S:
6646 scale = 16;
6647 goto movw_common;
6648 case BFD_RELOC_AARCH64_MOVW_G2:
6649 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6650 case BFD_RELOC_AARCH64_MOVW_G2_S:
6651 scale = 32;
6652 goto movw_common;
6653 case BFD_RELOC_AARCH64_MOVW_G3:
6654 scale = 48;
6655 movw_common:
6656 if (fixP->fx_done || !seg->use_rela_p)
6657 {
6658 insn = get_aarch64_insn (buf);
6659
6660 if (!fixP->fx_done)
6661 {
6662 /* REL signed addend must fit in 16 bits */
6663 if (signed_overflow (value, 16))
6664 as_bad_where (fixP->fx_file, fixP->fx_line,
6665 _("offset out of range"));
6666 }
6667 else
6668 {
6669 /* Check for overflow and scale. */
6670 switch (fixP->fx_r_type)
6671 {
6672 case BFD_RELOC_AARCH64_MOVW_G0:
6673 case BFD_RELOC_AARCH64_MOVW_G1:
6674 case BFD_RELOC_AARCH64_MOVW_G2:
6675 case BFD_RELOC_AARCH64_MOVW_G3:
6676 if (unsigned_overflow (value, scale + 16))
6677 as_bad_where (fixP->fx_file, fixP->fx_line,
6678 _("unsigned value out of range"));
6679 break;
6680 case BFD_RELOC_AARCH64_MOVW_G0_S:
6681 case BFD_RELOC_AARCH64_MOVW_G1_S:
6682 case BFD_RELOC_AARCH64_MOVW_G2_S:
6683 /* NOTE: We can only come here with movz or movn. */
6684 if (signed_overflow (value, scale + 16))
6685 as_bad_where (fixP->fx_file, fixP->fx_line,
6686 _("signed value out of range"));
6687 if (value < 0)
6688 {
6689 /* Force use of MOVN. */
6690 value = ~value;
6691 insn = reencode_movzn_to_movn (insn);
6692 }
6693 else
6694 {
6695 /* Force use of MOVZ. */
6696 insn = reencode_movzn_to_movz (insn);
6697 }
6698 break;
6699 default:
6700 /* Unchecked relocations. */
6701 break;
6702 }
6703 value >>= scale;
6704 }
6705
6706 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6707 insn |= encode_movw_imm (value & 0xffff);
6708
6709 put_aarch64_insn (buf, insn);
6710 }
6711 break;
6712
6713 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6714 fixP->fx_r_type = (ilp32_p
6715 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6716 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6717 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6718 /* Should always be exported to object file, see
6719 aarch64_force_relocation(). */
6720 gas_assert (!fixP->fx_done);
6721 gas_assert (seg->use_rela_p);
6722 break;
6723
6724 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6725 fixP->fx_r_type = (ilp32_p
6726 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6727 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6728 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6729 /* Should always be exported to object file, see
6730 aarch64_force_relocation(). */
6731 gas_assert (!fixP->fx_done);
6732 gas_assert (seg->use_rela_p);
6733 break;
6734
6735 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6736 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6737 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6738 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6739 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6740 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6741 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6742 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6743 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6744 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6745 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6746 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6747 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6748 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6749 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6750 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6751 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6752 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6753 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6754 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6755 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6756 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6757 /* Should always be exported to object file, see
6758 aarch64_force_relocation(). */
6759 gas_assert (!fixP->fx_done);
6760 gas_assert (seg->use_rela_p);
6761 break;
6762
6763 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6764 /* Should always be exported to object file, see
6765 aarch64_force_relocation(). */
6766 fixP->fx_r_type = (ilp32_p
6767 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6768 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6769 gas_assert (!fixP->fx_done);
6770 gas_assert (seg->use_rela_p);
6771 break;
6772
6773 case BFD_RELOC_AARCH64_ADD_LO12:
6774 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6775 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6776 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6777 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6778 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6779 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6780 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6781 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6782 case BFD_RELOC_AARCH64_LDST128_LO12:
6783 case BFD_RELOC_AARCH64_LDST16_LO12:
6784 case BFD_RELOC_AARCH64_LDST32_LO12:
6785 case BFD_RELOC_AARCH64_LDST64_LO12:
6786 case BFD_RELOC_AARCH64_LDST8_LO12:
6787 /* Should always be exported to object file, see
6788 aarch64_force_relocation(). */
6789 gas_assert (!fixP->fx_done);
6790 gas_assert (seg->use_rela_p);
6791 break;
6792
6793 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6794 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6795 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6796 break;
6797
6798 case BFD_RELOC_UNUSED:
6799 /* An error will already have been reported. */
6800 break;
6801
6802 default:
6803 as_bad_where (fixP->fx_file, fixP->fx_line,
6804 _("unexpected %s fixup"),
6805 bfd_get_reloc_code_name (fixP->fx_r_type));
6806 break;
6807 }
6808
6809 apply_fix_return:
6810 /* Free the allocated the struct aarch64_inst.
6811 N.B. currently there are very limited number of fix-up types actually use
6812 this field, so the impact on the performance should be minimal . */
6813 if (fixP->tc_fix_data.inst != NULL)
6814 free (fixP->tc_fix_data.inst);
6815
6816 return;
6817 }
6818
6819 /* Translate internal representation of relocation info to BFD target
6820 format. */
6821
6822 arelent *
6823 tc_gen_reloc (asection * section, fixS * fixp)
6824 {
6825 arelent *reloc;
6826 bfd_reloc_code_real_type code;
6827
6828 reloc = xmalloc (sizeof (arelent));
6829
6830 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6831 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6832 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6833
6834 if (fixp->fx_pcrel)
6835 {
6836 if (section->use_rela_p)
6837 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6838 else
6839 fixp->fx_offset = reloc->address;
6840 }
6841 reloc->addend = fixp->fx_offset;
6842
6843 code = fixp->fx_r_type;
6844 switch (code)
6845 {
6846 case BFD_RELOC_16:
6847 if (fixp->fx_pcrel)
6848 code = BFD_RELOC_16_PCREL;
6849 break;
6850
6851 case BFD_RELOC_32:
6852 if (fixp->fx_pcrel)
6853 code = BFD_RELOC_32_PCREL;
6854 break;
6855
6856 case BFD_RELOC_64:
6857 if (fixp->fx_pcrel)
6858 code = BFD_RELOC_64_PCREL;
6859 break;
6860
6861 default:
6862 break;
6863 }
6864
6865 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6866 if (reloc->howto == NULL)
6867 {
6868 as_bad_where (fixp->fx_file, fixp->fx_line,
6869 _
6870 ("cannot represent %s relocation in this object file format"),
6871 bfd_get_reloc_code_name (code));
6872 return NULL;
6873 }
6874
6875 return reloc;
6876 }
6877
6878 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6879
6880 void
6881 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6882 {
6883 bfd_reloc_code_real_type type;
6884 int pcrel = 0;
6885
6886 /* Pick a reloc.
6887 FIXME: @@ Should look at CPU word size. */
6888 switch (size)
6889 {
6890 case 1:
6891 type = BFD_RELOC_8;
6892 break;
6893 case 2:
6894 type = BFD_RELOC_16;
6895 break;
6896 case 4:
6897 type = BFD_RELOC_32;
6898 break;
6899 case 8:
6900 type = BFD_RELOC_64;
6901 break;
6902 default:
6903 as_bad (_("cannot do %u-byte relocation"), size);
6904 type = BFD_RELOC_UNUSED;
6905 break;
6906 }
6907
6908 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6909 }
6910
6911 int
6912 aarch64_force_relocation (struct fix *fixp)
6913 {
6914 switch (fixp->fx_r_type)
6915 {
6916 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6917 /* Perform these "immediate" internal relocations
6918 even if the symbol is extern or weak. */
6919 return 0;
6920
6921 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6922 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6923 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6924 /* Pseudo relocs that need to be fixed up according to
6925 ilp32_p. */
6926 return 0;
6927
6928 case BFD_RELOC_AARCH64_ADD_LO12:
6929 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6930 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6931 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6932 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6933 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6934 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6935 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6936 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6937 case BFD_RELOC_AARCH64_LDST128_LO12:
6938 case BFD_RELOC_AARCH64_LDST16_LO12:
6939 case BFD_RELOC_AARCH64_LDST32_LO12:
6940 case BFD_RELOC_AARCH64_LDST64_LO12:
6941 case BFD_RELOC_AARCH64_LDST8_LO12:
6942 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6943 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6944 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6945 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6946 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6947 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6948 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6949 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6950 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6951 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6952 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6953 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6954 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6955 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6956 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6957 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6958 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6959 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6960 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6961 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6962 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6963 /* Always leave these relocations for the linker. */
6964 return 1;
6965
6966 default:
6967 break;
6968 }
6969
6970 return generic_force_reloc (fixp);
6971 }
6972
6973 #ifdef OBJ_ELF
6974
6975 const char *
6976 elf64_aarch64_target_format (void)
6977 {
6978 if (target_big_endian)
6979 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6980 else
6981 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6982 }
6983
6984 void
6985 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6986 {
6987 elf_frob_symbol (symp, puntp);
6988 }
6989 #endif
6990
6991 /* MD interface: Finalization. */
6992
6993 /* A good place to do this, although this was probably not intended
6994 for this kind of use. We need to dump the literal pool before
6995 references are made to a null symbol pointer. */
6996
6997 void
6998 aarch64_cleanup (void)
6999 {
7000 literal_pool *pool;
7001
7002 for (pool = list_of_pools; pool; pool = pool->next)
7003 {
7004 /* Put it at the end of the relevant section. */
7005 subseg_set (pool->section, pool->sub_section);
7006 s_ltorg (0);
7007 }
7008 }
7009
7010 #ifdef OBJ_ELF
7011 /* Remove any excess mapping symbols generated for alignment frags in
7012 SEC. We may have created a mapping symbol before a zero byte
7013 alignment; remove it if there's a mapping symbol after the
7014 alignment. */
7015 static void
7016 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7017 void *dummy ATTRIBUTE_UNUSED)
7018 {
7019 segment_info_type *seginfo = seg_info (sec);
7020 fragS *fragp;
7021
7022 if (seginfo == NULL || seginfo->frchainP == NULL)
7023 return;
7024
7025 for (fragp = seginfo->frchainP->frch_root;
7026 fragp != NULL; fragp = fragp->fr_next)
7027 {
7028 symbolS *sym = fragp->tc_frag_data.last_map;
7029 fragS *next = fragp->fr_next;
7030
7031 /* Variable-sized frags have been converted to fixed size by
7032 this point. But if this was variable-sized to start with,
7033 there will be a fixed-size frag after it. So don't handle
7034 next == NULL. */
7035 if (sym == NULL || next == NULL)
7036 continue;
7037
7038 if (S_GET_VALUE (sym) < next->fr_address)
7039 /* Not at the end of this frag. */
7040 continue;
7041 know (S_GET_VALUE (sym) == next->fr_address);
7042
7043 do
7044 {
7045 if (next->tc_frag_data.first_map != NULL)
7046 {
7047 /* Next frag starts with a mapping symbol. Discard this
7048 one. */
7049 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7050 break;
7051 }
7052
7053 if (next->fr_next == NULL)
7054 {
7055 /* This mapping symbol is at the end of the section. Discard
7056 it. */
7057 know (next->fr_fix == 0 && next->fr_var == 0);
7058 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7059 break;
7060 }
7061
7062 /* As long as we have empty frags without any mapping symbols,
7063 keep looking. */
7064 /* If the next frag is non-empty and does not start with a
7065 mapping symbol, then this mapping symbol is required. */
7066 if (next->fr_address != next->fr_next->fr_address)
7067 break;
7068
7069 next = next->fr_next;
7070 }
7071 while (next != NULL);
7072 }
7073 }
7074 #endif
7075
7076 /* Adjust the symbol table. */
7077
7078 void
7079 aarch64_adjust_symtab (void)
7080 {
7081 #ifdef OBJ_ELF
7082 /* Remove any overlapping mapping symbols generated by alignment frags. */
7083 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7084 /* Now do generic ELF adjustments. */
7085 elf_adjust_symtab ();
7086 #endif
7087 }
7088
7089 static void
7090 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7091 {
7092 const char *hash_err;
7093
7094 hash_err = hash_insert (table, key, value);
7095 if (hash_err)
7096 printf ("Internal Error: Can't hash %s\n", key);
7097 }
7098
7099 static void
7100 fill_instruction_hash_table (void)
7101 {
7102 aarch64_opcode *opcode = aarch64_opcode_table;
7103
7104 while (opcode->name != NULL)
7105 {
7106 templates *templ, *new_templ;
7107 templ = hash_find (aarch64_ops_hsh, opcode->name);
7108
7109 new_templ = (templates *) xmalloc (sizeof (templates));
7110 new_templ->opcode = opcode;
7111 new_templ->next = NULL;
7112
7113 if (!templ)
7114 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7115 else
7116 {
7117 new_templ->next = templ->next;
7118 templ->next = new_templ;
7119 }
7120 ++opcode;
7121 }
7122 }
7123
7124 static inline void
7125 convert_to_upper (char *dst, const char *src, size_t num)
7126 {
7127 unsigned int i;
7128 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7129 *dst = TOUPPER (*src);
7130 *dst = '\0';
7131 }
7132
7133 /* Assume STR point to a lower-case string, allocate, convert and return
7134 the corresponding upper-case string. */
7135 static inline const char*
7136 get_upper_str (const char *str)
7137 {
7138 char *ret;
7139 size_t len = strlen (str);
7140 if ((ret = xmalloc (len + 1)) == NULL)
7141 abort ();
7142 convert_to_upper (ret, str, len);
7143 return ret;
7144 }
7145
7146 /* MD interface: Initialization. */
7147
7148 void
7149 md_begin (void)
7150 {
7151 unsigned mach;
7152 unsigned int i;
7153
7154 if ((aarch64_ops_hsh = hash_new ()) == NULL
7155 || (aarch64_cond_hsh = hash_new ()) == NULL
7156 || (aarch64_shift_hsh = hash_new ()) == NULL
7157 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7158 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7159 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7160 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7161 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7162 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7163 || (aarch64_reg_hsh = hash_new ()) == NULL
7164 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7165 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7166 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7167 as_fatal (_("virtual memory exhausted"));
7168
7169 fill_instruction_hash_table ();
7170
7171 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7172 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7173 (void *) (aarch64_sys_regs + i));
7174
7175 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7176 checked_hash_insert (aarch64_pstatefield_hsh,
7177 aarch64_pstatefields[i].name,
7178 (void *) (aarch64_pstatefields + i));
7179
7180 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7181 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7182 aarch64_sys_regs_ic[i].template,
7183 (void *) (aarch64_sys_regs_ic + i));
7184
7185 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7186 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7187 aarch64_sys_regs_dc[i].template,
7188 (void *) (aarch64_sys_regs_dc + i));
7189
7190 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7191 checked_hash_insert (aarch64_sys_regs_at_hsh,
7192 aarch64_sys_regs_at[i].template,
7193 (void *) (aarch64_sys_regs_at + i));
7194
7195 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7196 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7197 aarch64_sys_regs_tlbi[i].template,
7198 (void *) (aarch64_sys_regs_tlbi + i));
7199
7200 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7201 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7202 (void *) (reg_names + i));
7203
7204 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7205 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7206 (void *) (nzcv_names + i));
7207
7208 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7209 {
7210 const char *name = aarch64_operand_modifiers[i].name;
7211 checked_hash_insert (aarch64_shift_hsh, name,
7212 (void *) (aarch64_operand_modifiers + i));
7213 /* Also hash the name in the upper case. */
7214 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7215 (void *) (aarch64_operand_modifiers + i));
7216 }
7217
7218 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7219 {
7220 unsigned int j;
7221 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7222 the same condition code. */
7223 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7224 {
7225 const char *name = aarch64_conds[i].names[j];
7226 if (name == NULL)
7227 break;
7228 checked_hash_insert (aarch64_cond_hsh, name,
7229 (void *) (aarch64_conds + i));
7230 /* Also hash the name in the upper case. */
7231 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7232 (void *) (aarch64_conds + i));
7233 }
7234 }
7235
7236 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7237 {
7238 const char *name = aarch64_barrier_options[i].name;
7239 /* Skip xx00 - the unallocated values of option. */
7240 if ((i & 0x3) == 0)
7241 continue;
7242 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7243 (void *) (aarch64_barrier_options + i));
7244 /* Also hash the name in the upper case. */
7245 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7246 (void *) (aarch64_barrier_options + i));
7247 }
7248
7249 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7250 {
7251 const char* name = aarch64_prfops[i].name;
7252 /* Skip the unallocated hint encodings. */
7253 if (name == NULL)
7254 continue;
7255 checked_hash_insert (aarch64_pldop_hsh, name,
7256 (void *) (aarch64_prfops + i));
7257 /* Also hash the name in the upper case. */
7258 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7259 (void *) (aarch64_prfops + i));
7260 }
7261
7262 /* Set the cpu variant based on the command-line options. */
7263 if (!mcpu_cpu_opt)
7264 mcpu_cpu_opt = march_cpu_opt;
7265
7266 if (!mcpu_cpu_opt)
7267 mcpu_cpu_opt = &cpu_default;
7268
7269 cpu_variant = *mcpu_cpu_opt;
7270
7271 /* Record the CPU type. */
7272 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7273
7274 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7275 }
7276
7277 /* Command line processing. */
7278
7279 const char *md_shortopts = "m:";
7280
7281 #ifdef AARCH64_BI_ENDIAN
7282 #define OPTION_EB (OPTION_MD_BASE + 0)
7283 #define OPTION_EL (OPTION_MD_BASE + 1)
7284 #else
7285 #if TARGET_BYTES_BIG_ENDIAN
7286 #define OPTION_EB (OPTION_MD_BASE + 0)
7287 #else
7288 #define OPTION_EL (OPTION_MD_BASE + 1)
7289 #endif
7290 #endif
7291
7292 struct option md_longopts[] = {
7293 #ifdef OPTION_EB
7294 {"EB", no_argument, NULL, OPTION_EB},
7295 #endif
7296 #ifdef OPTION_EL
7297 {"EL", no_argument, NULL, OPTION_EL},
7298 #endif
7299 {NULL, no_argument, NULL, 0}
7300 };
7301
7302 size_t md_longopts_size = sizeof (md_longopts);
7303
7304 struct aarch64_option_table
7305 {
7306 char *option; /* Option name to match. */
7307 char *help; /* Help information. */
7308 int *var; /* Variable to change. */
7309 int value; /* What to change it to. */
7310 char *deprecated; /* If non-null, print this message. */
7311 };
7312
7313 static struct aarch64_option_table aarch64_opts[] = {
7314 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7315 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7316 NULL},
7317 #ifdef DEBUG_AARCH64
7318 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7319 #endif /* DEBUG_AARCH64 */
7320 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7321 NULL},
7322 {"mno-verbose-error", N_("do not output verbose error messages"),
7323 &verbose_error_p, 0, NULL},
7324 {NULL, NULL, NULL, 0, NULL}
7325 };
7326
7327 struct aarch64_cpu_option_table
7328 {
7329 char *name;
7330 const aarch64_feature_set value;
7331 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7332 case. */
7333 const char *canonical_name;
7334 };
7335
7336 /* This list should, at a minimum, contain all the cpu names
7337 recognized by GCC. */
7338 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7339 {"all", AARCH64_ANY, NULL},
7340 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7341 AARCH64_FEATURE_CRC), "Cortex-A53"},
7342 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7343 AARCH64_FEATURE_CRC), "Cortex-A57"},
7344 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7345 AARCH64_FEATURE_CRC), "Cortex-A72"},
7346 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7347 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7348 "Samsung Exynos M1"},
7349 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7350 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7351 "Cavium ThunderX"},
7352 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7353 in earlier releases and is superseded by 'xgene1' in all
7354 tools. */
7355 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7356 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7357 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7358 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7359 {"generic", AARCH64_ARCH_V8, NULL},
7360
7361 {NULL, AARCH64_ARCH_NONE, NULL}
7362 };
7363
7364 struct aarch64_arch_option_table
7365 {
7366 char *name;
7367 const aarch64_feature_set value;
7368 };
7369
7370 /* This list should, at a minimum, contain all the architecture names
7371 recognized by GCC. */
7372 static const struct aarch64_arch_option_table aarch64_archs[] = {
7373 {"all", AARCH64_ANY},
7374 {"armv8-a", AARCH64_ARCH_V8},
7375 {NULL, AARCH64_ARCH_NONE}
7376 };
7377
7378 /* ISA extensions. */
7379 struct aarch64_option_cpu_value_table
7380 {
7381 char *name;
7382 const aarch64_feature_set value;
7383 };
7384
7385 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7386 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7387 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7388 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7389 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7390 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7391 {NULL, AARCH64_ARCH_NONE}
7392 };
7393
7394 struct aarch64_long_option_table
7395 {
7396 char *option; /* Substring to match. */
7397 char *help; /* Help information. */
7398 int (*func) (char *subopt); /* Function to decode sub-option. */
7399 char *deprecated; /* If non-null, print this message. */
7400 };
7401
7402 static int
7403 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7404 bfd_boolean ext_only)
7405 {
7406 /* We insist on extensions being added before being removed. We achieve
7407 this by using the ADDING_VALUE variable to indicate whether we are
7408 adding an extension (1) or removing it (0) and only allowing it to
7409 change in the order -1 -> 1 -> 0. */
7410 int adding_value = -1;
7411 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7412
7413 /* Copy the feature set, so that we can modify it. */
7414 *ext_set = **opt_p;
7415 *opt_p = ext_set;
7416
7417 while (str != NULL && *str != 0)
7418 {
7419 const struct aarch64_option_cpu_value_table *opt;
7420 char *ext = NULL;
7421 int optlen;
7422
7423 if (!ext_only)
7424 {
7425 if (*str != '+')
7426 {
7427 as_bad (_("invalid architectural extension"));
7428 return 0;
7429 }
7430
7431 ext = strchr (++str, '+');
7432 }
7433
7434 if (ext != NULL)
7435 optlen = ext - str;
7436 else
7437 optlen = strlen (str);
7438
7439 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7440 {
7441 if (adding_value != 0)
7442 adding_value = 0;
7443 optlen -= 2;
7444 str += 2;
7445 }
7446 else if (optlen > 0)
7447 {
7448 if (adding_value == -1)
7449 adding_value = 1;
7450 else if (adding_value != 1)
7451 {
7452 as_bad (_("must specify extensions to add before specifying "
7453 "those to remove"));
7454 return FALSE;
7455 }
7456 }
7457
7458 if (optlen == 0)
7459 {
7460 as_bad (_("missing architectural extension"));
7461 return 0;
7462 }
7463
7464 gas_assert (adding_value != -1);
7465
7466 for (opt = aarch64_features; opt->name != NULL; opt++)
7467 if (strncmp (opt->name, str, optlen) == 0)
7468 {
7469 /* Add or remove the extension. */
7470 if (adding_value)
7471 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7472 else
7473 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7474 break;
7475 }
7476
7477 if (opt->name == NULL)
7478 {
7479 as_bad (_("unknown architectural extension `%s'"), str);
7480 return 0;
7481 }
7482
7483 str = ext;
7484 };
7485
7486 return 1;
7487 }
7488
7489 static int
7490 aarch64_parse_cpu (char *str)
7491 {
7492 const struct aarch64_cpu_option_table *opt;
7493 char *ext = strchr (str, '+');
7494 size_t optlen;
7495
7496 if (ext != NULL)
7497 optlen = ext - str;
7498 else
7499 optlen = strlen (str);
7500
7501 if (optlen == 0)
7502 {
7503 as_bad (_("missing cpu name `%s'"), str);
7504 return 0;
7505 }
7506
7507 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7508 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7509 {
7510 mcpu_cpu_opt = &opt->value;
7511 if (ext != NULL)
7512 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7513
7514 return 1;
7515 }
7516
7517 as_bad (_("unknown cpu `%s'"), str);
7518 return 0;
7519 }
7520
7521 static int
7522 aarch64_parse_arch (char *str)
7523 {
7524 const struct aarch64_arch_option_table *opt;
7525 char *ext = strchr (str, '+');
7526 size_t optlen;
7527
7528 if (ext != NULL)
7529 optlen = ext - str;
7530 else
7531 optlen = strlen (str);
7532
7533 if (optlen == 0)
7534 {
7535 as_bad (_("missing architecture name `%s'"), str);
7536 return 0;
7537 }
7538
7539 for (opt = aarch64_archs; opt->name != NULL; opt++)
7540 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7541 {
7542 march_cpu_opt = &opt->value;
7543 if (ext != NULL)
7544 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7545
7546 return 1;
7547 }
7548
7549 as_bad (_("unknown architecture `%s'\n"), str);
7550 return 0;
7551 }
7552
7553 /* ABIs. */
7554 struct aarch64_option_abi_value_table
7555 {
7556 char *name;
7557 enum aarch64_abi_type value;
7558 };
7559
7560 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7561 {"ilp32", AARCH64_ABI_ILP32},
7562 {"lp64", AARCH64_ABI_LP64},
7563 {NULL, 0}
7564 };
7565
7566 static int
7567 aarch64_parse_abi (char *str)
7568 {
7569 const struct aarch64_option_abi_value_table *opt;
7570 size_t optlen = strlen (str);
7571
7572 if (optlen == 0)
7573 {
7574 as_bad (_("missing abi name `%s'"), str);
7575 return 0;
7576 }
7577
7578 for (opt = aarch64_abis; opt->name != NULL; opt++)
7579 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7580 {
7581 aarch64_abi = opt->value;
7582 return 1;
7583 }
7584
7585 as_bad (_("unknown abi `%s'\n"), str);
7586 return 0;
7587 }
7588
7589 static struct aarch64_long_option_table aarch64_long_opts[] = {
7590 #ifdef OBJ_ELF
7591 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7592 aarch64_parse_abi, NULL},
7593 #endif /* OBJ_ELF */
7594 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7595 aarch64_parse_cpu, NULL},
7596 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7597 aarch64_parse_arch, NULL},
7598 {NULL, NULL, 0, NULL}
7599 };
7600
7601 int
7602 md_parse_option (int c, char *arg)
7603 {
7604 struct aarch64_option_table *opt;
7605 struct aarch64_long_option_table *lopt;
7606
7607 switch (c)
7608 {
7609 #ifdef OPTION_EB
7610 case OPTION_EB:
7611 target_big_endian = 1;
7612 break;
7613 #endif
7614
7615 #ifdef OPTION_EL
7616 case OPTION_EL:
7617 target_big_endian = 0;
7618 break;
7619 #endif
7620
7621 case 'a':
7622 /* Listing option. Just ignore these, we don't support additional
7623 ones. */
7624 return 0;
7625
7626 default:
7627 for (opt = aarch64_opts; opt->option != NULL; opt++)
7628 {
7629 if (c == opt->option[0]
7630 && ((arg == NULL && opt->option[1] == 0)
7631 || streq (arg, opt->option + 1)))
7632 {
7633 /* If the option is deprecated, tell the user. */
7634 if (opt->deprecated != NULL)
7635 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7636 arg ? arg : "", _(opt->deprecated));
7637
7638 if (opt->var != NULL)
7639 *opt->var = opt->value;
7640
7641 return 1;
7642 }
7643 }
7644
7645 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7646 {
7647 /* These options are expected to have an argument. */
7648 if (c == lopt->option[0]
7649 && arg != NULL
7650 && strncmp (arg, lopt->option + 1,
7651 strlen (lopt->option + 1)) == 0)
7652 {
7653 /* If the option is deprecated, tell the user. */
7654 if (lopt->deprecated != NULL)
7655 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7656 _(lopt->deprecated));
7657
7658 /* Call the sup-option parser. */
7659 return lopt->func (arg + strlen (lopt->option) - 1);
7660 }
7661 }
7662
7663 return 0;
7664 }
7665
7666 return 1;
7667 }
7668
7669 void
7670 md_show_usage (FILE * fp)
7671 {
7672 struct aarch64_option_table *opt;
7673 struct aarch64_long_option_table *lopt;
7674
7675 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7676
7677 for (opt = aarch64_opts; opt->option != NULL; opt++)
7678 if (opt->help != NULL)
7679 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7680
7681 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7682 if (lopt->help != NULL)
7683 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7684
7685 #ifdef OPTION_EB
7686 fprintf (fp, _("\
7687 -EB assemble code for a big-endian cpu\n"));
7688 #endif
7689
7690 #ifdef OPTION_EL
7691 fprintf (fp, _("\
7692 -EL assemble code for a little-endian cpu\n"));
7693 #endif
7694 }
7695
7696 /* Parse a .cpu directive. */
7697
7698 static void
7699 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7700 {
7701 const struct aarch64_cpu_option_table *opt;
7702 char saved_char;
7703 char *name;
7704 char *ext;
7705 size_t optlen;
7706
7707 name = input_line_pointer;
7708 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7709 input_line_pointer++;
7710 saved_char = *input_line_pointer;
7711 *input_line_pointer = 0;
7712
7713 ext = strchr (name, '+');
7714
7715 if (ext != NULL)
7716 optlen = ext - name;
7717 else
7718 optlen = strlen (name);
7719
7720 /* Skip the first "all" entry. */
7721 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7722 if (strlen (opt->name) == optlen
7723 && strncmp (name, opt->name, optlen) == 0)
7724 {
7725 mcpu_cpu_opt = &opt->value;
7726 if (ext != NULL)
7727 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7728 return;
7729
7730 cpu_variant = *mcpu_cpu_opt;
7731
7732 *input_line_pointer = saved_char;
7733 demand_empty_rest_of_line ();
7734 return;
7735 }
7736 as_bad (_("unknown cpu `%s'"), name);
7737 *input_line_pointer = saved_char;
7738 ignore_rest_of_line ();
7739 }
7740
7741
7742 /* Parse a .arch directive. */
7743
7744 static void
7745 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7746 {
7747 const struct aarch64_arch_option_table *opt;
7748 char saved_char;
7749 char *name;
7750 char *ext;
7751 size_t optlen;
7752
7753 name = input_line_pointer;
7754 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7755 input_line_pointer++;
7756 saved_char = *input_line_pointer;
7757 *input_line_pointer = 0;
7758
7759 ext = strchr (name, '+');
7760
7761 if (ext != NULL)
7762 optlen = ext - name;
7763 else
7764 optlen = strlen (name);
7765
7766 /* Skip the first "all" entry. */
7767 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7768 if (strlen (opt->name) == optlen
7769 && strncmp (name, opt->name, optlen) == 0)
7770 {
7771 mcpu_cpu_opt = &opt->value;
7772 if (ext != NULL)
7773 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7774 return;
7775
7776 cpu_variant = *mcpu_cpu_opt;
7777
7778 *input_line_pointer = saved_char;
7779 demand_empty_rest_of_line ();
7780 return;
7781 }
7782
7783 as_bad (_("unknown architecture `%s'\n"), name);
7784 *input_line_pointer = saved_char;
7785 ignore_rest_of_line ();
7786 }
7787
7788 /* Parse a .arch_extension directive. */
7789
7790 static void
7791 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
7792 {
7793 char saved_char;
7794 char *ext = input_line_pointer;;
7795
7796 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7797 input_line_pointer++;
7798 saved_char = *input_line_pointer;
7799 *input_line_pointer = 0;
7800
7801 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
7802 return;
7803
7804 cpu_variant = *mcpu_cpu_opt;
7805
7806 *input_line_pointer = saved_char;
7807 demand_empty_rest_of_line ();
7808 }
7809
7810 /* Copy symbol information. */
7811
7812 void
7813 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7814 {
7815 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7816 }
This page took 0.270838 seconds and 4 git commands to generate.