[AArch64][1/6] GAS support BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Structure for a hash table entry for a register. */
251 typedef struct
252 {
253 const char *name;
254 unsigned char number;
255 unsigned char type;
256 unsigned char builtin;
257 } reg_entry;
258
259 /* Macros to define the register types and masks for the purpose
260 of parsing. */
261
262 #undef AARCH64_REG_TYPES
263 #define AARCH64_REG_TYPES \
264 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
265 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
266 BASIC_REG_TYPE(SP_32) /* wsp */ \
267 BASIC_REG_TYPE(SP_64) /* sp */ \
268 BASIC_REG_TYPE(Z_32) /* wzr */ \
269 BASIC_REG_TYPE(Z_64) /* xzr */ \
270 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
271 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
272 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
273 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
274 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
275 BASIC_REG_TYPE(CN) /* c[0-7] */ \
276 BASIC_REG_TYPE(VN) /* v[0-31] */ \
277 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
278 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
279 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
280 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
281 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
283 /* Typecheck: any [BHSDQ]P FP. */ \
284 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
285 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
286 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
287 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
289 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
290 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
291 /* Any integer register; used for error messages only. */ \
292 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Pseudo type to mark the end of the enumerator sequence. */ \
296 BASIC_REG_TYPE(MAX)
297
298 #undef BASIC_REG_TYPE
299 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
300 #undef MULTI_REG_TYPE
301 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
302
303 /* Register type enumerators. */
304 typedef enum
305 {
306 /* A list of REG_TYPE_*. */
307 AARCH64_REG_TYPES
308 } aarch64_reg_type;
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
312 #undef REG_TYPE
313 #define REG_TYPE(T) (1 << REG_TYPE_##T)
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) V,
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409
410 /* Stuff needed to resolve the label ambiguity
411 As:
412 ...
413 label: <insn>
414 may differ from:
415 ...
416 label:
417 <insn> */
418
419 static symbolS *last_label_seen;
420
421 /* Literal pool structure. Held on a per-section
422 and per-sub-section basis. */
423
424 #define MAX_LITERAL_POOL_SIZE 1024
425 typedef struct literal_expression
426 {
427 expressionS exp;
428 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
429 LITTLENUM_TYPE * bignum;
430 } literal_expression;
431
432 typedef struct literal_pool
433 {
434 literal_expression literals[MAX_LITERAL_POOL_SIZE];
435 unsigned int next_free_entry;
436 unsigned int id;
437 symbolS *symbol;
438 segT section;
439 subsegT sub_section;
440 int size;
441 struct literal_pool *next;
442 } literal_pool;
443
444 /* Pointer to a linked list of literal pools. */
445 static literal_pool *list_of_pools = NULL;
446 \f
447 /* Pure syntax. */
448
449 /* This array holds the chars that always start a comment. If the
450 pre-processor is disabled, these aren't very useful. */
451 const char comment_chars[] = "";
452
453 /* This array holds the chars that only start a comment at the beginning of
454 a line. If the line seems to have the form '# 123 filename'
455 .line and .file directives will appear in the pre-processed output. */
456 /* Note that input_file.c hand checks for '#' at the beginning of the
457 first line of the input file. This is because the compiler outputs
458 #NO_APP at the beginning of its output. */
459 /* Also note that comments like this one will always work. */
460 const char line_comment_chars[] = "#";
461
462 const char line_separator_chars[] = ";";
463
464 /* Chars that can be used to separate mant
465 from exp in floating point numbers. */
466 const char EXP_CHARS[] = "eE";
467
468 /* Chars that mean this number is a floating point constant. */
469 /* As in 0f12.456 */
470 /* or 0d1.2345e12 */
471
472 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
473
474 /* Prefix character that indicates the start of an immediate value. */
475 #define is_immediate_prefix(C) ((C) == '#')
476
477 /* Separator character handling. */
478
479 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
480
481 static inline bfd_boolean
482 skip_past_char (char **str, char c)
483 {
484 if (**str == c)
485 {
486 (*str)++;
487 return TRUE;
488 }
489 else
490 return FALSE;
491 }
492
493 #define skip_past_comma(str) skip_past_char (str, ',')
494
495 /* Arithmetic expressions (possibly involving symbols). */
496
497 static bfd_boolean in_my_get_expression_p = FALSE;
498
499 /* Third argument to my_get_expression. */
500 #define GE_NO_PREFIX 0
501 #define GE_OPT_PREFIX 1
502
503 /* Return TRUE if the string pointed by *STR is successfully parsed
504 as an valid expression; *EP will be filled with the information of
505 such an expression. Otherwise return FALSE. */
506
507 static bfd_boolean
508 my_get_expression (expressionS * ep, char **str, int prefix_mode,
509 int reject_absent)
510 {
511 char *save_in;
512 segT seg;
513 int prefix_present_p = 0;
514
515 switch (prefix_mode)
516 {
517 case GE_NO_PREFIX:
518 break;
519 case GE_OPT_PREFIX:
520 if (is_immediate_prefix (**str))
521 {
522 (*str)++;
523 prefix_present_p = 1;
524 }
525 break;
526 default:
527 abort ();
528 }
529
530 memset (ep, 0, sizeof (expressionS));
531
532 save_in = input_line_pointer;
533 input_line_pointer = *str;
534 in_my_get_expression_p = TRUE;
535 seg = expression (ep);
536 in_my_get_expression_p = FALSE;
537
538 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
539 {
540 /* We found a bad expression in md_operand(). */
541 *str = input_line_pointer;
542 input_line_pointer = save_in;
543 if (prefix_present_p && ! error_p ())
544 set_fatal_syntax_error (_("bad expression"));
545 else
546 set_first_syntax_error (_("bad expression"));
547 return FALSE;
548 }
549
550 #ifdef OBJ_AOUT
551 if (seg != absolute_section
552 && seg != text_section
553 && seg != data_section
554 && seg != bss_section && seg != undefined_section)
555 {
556 set_syntax_error (_("bad segment"));
557 *str = input_line_pointer;
558 input_line_pointer = save_in;
559 return FALSE;
560 }
561 #else
562 (void) seg;
563 #endif
564
565 *str = input_line_pointer;
566 input_line_pointer = save_in;
567 return TRUE;
568 }
569
570 /* Turn a string in input_line_pointer into a floating point constant
571 of type TYPE, and store the appropriate bytes in *LITP. The number
572 of LITTLENUMS emitted is stored in *SIZEP. An error message is
573 returned, or NULL on OK. */
574
575 char *
576 md_atof (int type, char *litP, int *sizeP)
577 {
578 return ieee_md_atof (type, litP, sizeP, target_big_endian);
579 }
580
581 /* We handle all bad expressions here, so that we can report the faulty
582 instruction in the error message. */
583 void
584 md_operand (expressionS * exp)
585 {
586 if (in_my_get_expression_p)
587 exp->X_op = O_illegal;
588 }
589
590 /* Immediate values. */
591
592 /* Errors may be set multiple times during parsing or bit encoding
593 (particularly in the Neon bits), but usually the earliest error which is set
594 will be the most meaningful. Avoid overwriting it with later (cascading)
595 errors by calling this function. */
596
597 static void
598 first_error (const char *error)
599 {
600 if (! error_p ())
601 set_syntax_error (error);
602 }
603
604 /* Similiar to first_error, but this function accepts formatted error
605 message. */
606 static void
607 first_error_fmt (const char *format, ...)
608 {
609 va_list args;
610 enum
611 { size = 100 };
612 /* N.B. this single buffer will not cause error messages for different
613 instructions to pollute each other; this is because at the end of
614 processing of each assembly line, error message if any will be
615 collected by as_bad. */
616 static char buffer[size];
617
618 if (! error_p ())
619 {
620 int ret ATTRIBUTE_UNUSED;
621 va_start (args, format);
622 ret = vsnprintf (buffer, size, format, args);
623 know (ret <= size - 1 && ret >= 0);
624 va_end (args);
625 set_syntax_error (buffer);
626 }
627 }
628
629 /* Register parsing. */
630
631 /* Generic register parser which is called by other specialized
632 register parsers.
633 CCP points to what should be the beginning of a register name.
634 If it is indeed a valid register name, advance CCP over it and
635 return the reg_entry structure; otherwise return NULL.
636 It does not issue diagnostics. */
637
638 static reg_entry *
639 parse_reg (char **ccp)
640 {
641 char *start = *ccp;
642 char *p;
643 reg_entry *reg;
644
645 #ifdef REGISTER_PREFIX
646 if (*start != REGISTER_PREFIX)
647 return NULL;
648 start++;
649 #endif
650
651 p = start;
652 if (!ISALPHA (*p) || !is_name_beginner (*p))
653 return NULL;
654
655 do
656 p++;
657 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
658
659 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
660
661 if (!reg)
662 return NULL;
663
664 *ccp = p;
665 return reg;
666 }
667
668 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
669 return FALSE. */
670 static bfd_boolean
671 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
672 {
673 if (reg->type == type)
674 return TRUE;
675
676 switch (type)
677 {
678 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
679 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
680 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
681 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
682 case REG_TYPE_VN: /* Vector register. */
683 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
684 return ((reg_type_masks[reg->type] & reg_type_masks[type])
685 == reg_type_masks[reg->type]);
686 default:
687 as_fatal ("unhandled type %d", type);
688 abort ();
689 }
690 }
691
692 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
693 Return the register number otherwise. *ISREG32 is set to one if the
694 register is 32-bit wide; *ISREGZERO is set to one if the register is
695 of type Z_32 or Z_64.
696 Note that this function does not issue any diagnostics. */
697
698 static int
699 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
700 int *isreg32, int *isregzero)
701 {
702 char *str = *ccp;
703 const reg_entry *reg = parse_reg (&str);
704
705 if (reg == NULL)
706 return PARSE_FAIL;
707
708 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
709 return PARSE_FAIL;
710
711 switch (reg->type)
712 {
713 case REG_TYPE_SP_32:
714 case REG_TYPE_SP_64:
715 if (reject_sp)
716 return PARSE_FAIL;
717 *isreg32 = reg->type == REG_TYPE_SP_32;
718 *isregzero = 0;
719 break;
720 case REG_TYPE_R_32:
721 case REG_TYPE_R_64:
722 *isreg32 = reg->type == REG_TYPE_R_32;
723 *isregzero = 0;
724 break;
725 case REG_TYPE_Z_32:
726 case REG_TYPE_Z_64:
727 if (reject_rz)
728 return PARSE_FAIL;
729 *isreg32 = reg->type == REG_TYPE_Z_32;
730 *isregzero = 1;
731 break;
732 default:
733 return PARSE_FAIL;
734 }
735
736 *ccp = str;
737
738 return reg->number;
739 }
740
741 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
742 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
743 otherwise return FALSE.
744
745 Accept only one occurrence of:
746 8b 16b 4h 8h 2s 4s 1d 2d
747 b h s d q */
748 static bfd_boolean
749 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
750 {
751 char *ptr = *str;
752 unsigned width;
753 unsigned element_size;
754 enum neon_el_type type;
755
756 /* skip '.' */
757 ptr++;
758
759 if (!ISDIGIT (*ptr))
760 {
761 width = 0;
762 goto elt_size;
763 }
764 width = strtoul (ptr, &ptr, 10);
765 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
766 {
767 first_error_fmt (_("bad size %d in vector width specifier"), width);
768 return FALSE;
769 }
770
771 elt_size:
772 switch (TOLOWER (*ptr))
773 {
774 case 'b':
775 type = NT_b;
776 element_size = 8;
777 break;
778 case 'h':
779 type = NT_h;
780 element_size = 16;
781 break;
782 case 's':
783 type = NT_s;
784 element_size = 32;
785 break;
786 case 'd':
787 type = NT_d;
788 element_size = 64;
789 break;
790 case 'q':
791 if (width == 1)
792 {
793 type = NT_q;
794 element_size = 128;
795 break;
796 }
797 /* fall through. */
798 default:
799 if (*ptr != '\0')
800 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
801 else
802 first_error (_("missing element size"));
803 return FALSE;
804 }
805 if (width != 0 && width * element_size != 64 && width * element_size != 128)
806 {
807 first_error_fmt (_
808 ("invalid element size %d and vector size combination %c"),
809 width, *ptr);
810 return FALSE;
811 }
812 ptr++;
813
814 parsed_type->type = type;
815 parsed_type->width = width;
816
817 *str = ptr;
818
819 return TRUE;
820 }
821
822 /* Parse a single type, e.g. ".8b", leading period included.
823 Only applicable to Vn registers.
824
825 Return TRUE on success; otherwise return FALSE. */
826 static bfd_boolean
827 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
828 {
829 char *str = *ccp;
830
831 if (*str == '.')
832 {
833 if (! parse_neon_type_for_operand (vectype, &str))
834 {
835 first_error (_("vector type expected"));
836 return FALSE;
837 }
838 }
839 else
840 return FALSE;
841
842 *ccp = str;
843
844 return TRUE;
845 }
846
847 /* Parse a register of the type TYPE.
848
849 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
850 name or the parsed register is not of TYPE.
851
852 Otherwise return the register number, and optionally fill in the actual
853 type of the register in *RTYPE when multiple alternatives were given, and
854 return the register shape and element index information in *TYPEINFO.
855
856 IN_REG_LIST should be set with TRUE if the caller is parsing a register
857 list. */
858
859 static int
860 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
861 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
862 {
863 char *str = *ccp;
864 const reg_entry *reg = parse_reg (&str);
865 struct neon_type_el atype;
866 struct neon_type_el parsetype;
867 bfd_boolean is_typed_vecreg = FALSE;
868
869 atype.defined = 0;
870 atype.type = NT_invtype;
871 atype.width = -1;
872 atype.index = 0;
873
874 if (reg == NULL)
875 {
876 if (typeinfo)
877 *typeinfo = atype;
878 set_default_error ();
879 return PARSE_FAIL;
880 }
881
882 if (! aarch64_check_reg_type (reg, type))
883 {
884 DEBUG_TRACE ("reg type check failed");
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888 type = reg->type;
889
890 if (type == REG_TYPE_VN
891 && parse_neon_operand_type (&parsetype, &str))
892 {
893 /* Register if of the form Vn.[bhsdq]. */
894 is_typed_vecreg = TRUE;
895
896 if (parsetype.width == 0)
897 /* Expect index. In the new scheme we cannot have
898 Vn.[bhsdq] represent a scalar. Therefore any
899 Vn.[bhsdq] should have an index following it.
900 Except in reglists ofcourse. */
901 atype.defined |= NTA_HASINDEX;
902 else
903 atype.defined |= NTA_HASTYPE;
904
905 atype.type = parsetype.type;
906 atype.width = parsetype.width;
907 }
908
909 if (skip_past_char (&str, '['))
910 {
911 expressionS exp;
912
913 /* Reject Sn[index] syntax. */
914 if (!is_typed_vecreg)
915 {
916 first_error (_("this type of register can't be indexed"));
917 return PARSE_FAIL;
918 }
919
920 if (in_reg_list == TRUE)
921 {
922 first_error (_("index not allowed inside register list"));
923 return PARSE_FAIL;
924 }
925
926 atype.defined |= NTA_HASINDEX;
927
928 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
929
930 if (exp.X_op != O_constant)
931 {
932 first_error (_("constant expression required"));
933 return PARSE_FAIL;
934 }
935
936 if (! skip_past_char (&str, ']'))
937 return PARSE_FAIL;
938
939 atype.index = exp.X_add_number;
940 }
941 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
942 {
943 /* Indexed vector register expected. */
944 first_error (_("indexed vector register expected"));
945 return PARSE_FAIL;
946 }
947
948 /* A vector reg Vn should be typed or indexed. */
949 if (type == REG_TYPE_VN && atype.defined == 0)
950 {
951 first_error (_("invalid use of vector register"));
952 }
953
954 if (typeinfo)
955 *typeinfo = atype;
956
957 if (rtype)
958 *rtype = type;
959
960 *ccp = str;
961
962 return reg->number;
963 }
964
965 /* Parse register.
966
967 Return the register number on success; return PARSE_FAIL otherwise.
968
969 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
970 the register (e.g. NEON double or quad reg when either has been requested).
971
972 If this is a NEON vector register with additional type information, fill
973 in the struct pointed to by VECTYPE (if non-NULL).
974
975 This parser does not handle register list. */
976
977 static int
978 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
979 aarch64_reg_type *rtype, struct neon_type_el *vectype)
980 {
981 struct neon_type_el atype;
982 char *str = *ccp;
983 int reg = parse_typed_reg (&str, type, rtype, &atype,
984 /*in_reg_list= */ FALSE);
985
986 if (reg == PARSE_FAIL)
987 return PARSE_FAIL;
988
989 if (vectype)
990 *vectype = atype;
991
992 *ccp = str;
993
994 return reg;
995 }
996
997 static inline bfd_boolean
998 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
999 {
1000 return
1001 e1.type == e2.type
1002 && e1.defined == e2.defined
1003 && e1.width == e2.width && e1.index == e2.index;
1004 }
1005
1006 /* This function parses the NEON register list. On success, it returns
1007 the parsed register list information in the following encoded format:
1008
1009 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1010 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1011
1012 The information of the register shape and/or index is returned in
1013 *VECTYPE.
1014
1015 It returns PARSE_FAIL if the register list is invalid.
1016
1017 The list contains one to four registers.
1018 Each register can be one of:
1019 <Vt>.<T>[<index>]
1020 <Vt>.<T>
1021 All <T> should be identical.
1022 All <index> should be identical.
1023 There are restrictions on <Vt> numbers which are checked later
1024 (by reg_list_valid_p). */
1025
1026 static int
1027 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1028 {
1029 char *str = *ccp;
1030 int nb_regs;
1031 struct neon_type_el typeinfo, typeinfo_first;
1032 int val, val_range;
1033 int in_range;
1034 int ret_val;
1035 int i;
1036 bfd_boolean error = FALSE;
1037 bfd_boolean expect_index = FALSE;
1038
1039 if (*str != '{')
1040 {
1041 set_syntax_error (_("expecting {"));
1042 return PARSE_FAIL;
1043 }
1044 str++;
1045
1046 nb_regs = 0;
1047 typeinfo_first.defined = 0;
1048 typeinfo_first.type = NT_invtype;
1049 typeinfo_first.width = -1;
1050 typeinfo_first.index = 0;
1051 ret_val = 0;
1052 val = -1;
1053 val_range = -1;
1054 in_range = 0;
1055 do
1056 {
1057 if (in_range)
1058 {
1059 str++; /* skip over '-' */
1060 val_range = val;
1061 }
1062 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1063 /*in_reg_list= */ TRUE);
1064 if (val == PARSE_FAIL)
1065 {
1066 set_first_syntax_error (_("invalid vector register in list"));
1067 error = TRUE;
1068 continue;
1069 }
1070 /* reject [bhsd]n */
1071 if (typeinfo.defined == 0)
1072 {
1073 set_first_syntax_error (_("invalid scalar register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077
1078 if (typeinfo.defined & NTA_HASINDEX)
1079 expect_index = TRUE;
1080
1081 if (in_range)
1082 {
1083 if (val < val_range)
1084 {
1085 set_first_syntax_error
1086 (_("invalid range in vector register list"));
1087 error = TRUE;
1088 }
1089 val_range++;
1090 }
1091 else
1092 {
1093 val_range = val;
1094 if (nb_regs == 0)
1095 typeinfo_first = typeinfo;
1096 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1097 {
1098 set_first_syntax_error
1099 (_("type mismatch in vector register list"));
1100 error = TRUE;
1101 }
1102 }
1103 if (! error)
1104 for (i = val_range; i <= val; i++)
1105 {
1106 ret_val |= i << (5 * nb_regs);
1107 nb_regs++;
1108 }
1109 in_range = 0;
1110 }
1111 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1112
1113 skip_whitespace (str);
1114 if (*str != '}')
1115 {
1116 set_first_syntax_error (_("end of vector register list not found"));
1117 error = TRUE;
1118 }
1119 str++;
1120
1121 skip_whitespace (str);
1122
1123 if (expect_index)
1124 {
1125 if (skip_past_char (&str, '['))
1126 {
1127 expressionS exp;
1128
1129 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1130 if (exp.X_op != O_constant)
1131 {
1132 set_first_syntax_error (_("constant expression required."));
1133 error = TRUE;
1134 }
1135 if (! skip_past_char (&str, ']'))
1136 error = TRUE;
1137 else
1138 typeinfo_first.index = exp.X_add_number;
1139 }
1140 else
1141 {
1142 set_first_syntax_error (_("expected index"));
1143 error = TRUE;
1144 }
1145 }
1146
1147 if (nb_regs > 4)
1148 {
1149 set_first_syntax_error (_("too many registers in vector register list"));
1150 error = TRUE;
1151 }
1152 else if (nb_regs == 0)
1153 {
1154 set_first_syntax_error (_("empty vector register list"));
1155 error = TRUE;
1156 }
1157
1158 *ccp = str;
1159 if (! error)
1160 *vectype = typeinfo_first;
1161
1162 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1163 }
1164
1165 /* Directives: register aliases. */
1166
1167 static reg_entry *
1168 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1169 {
1170 reg_entry *new;
1171 const char *name;
1172
1173 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1174 {
1175 if (new->builtin)
1176 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1177 str);
1178
1179 /* Only warn about a redefinition if it's not defined as the
1180 same register. */
1181 else if (new->number != number || new->type != type)
1182 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1183
1184 return NULL;
1185 }
1186
1187 name = xstrdup (str);
1188 new = xmalloc (sizeof (reg_entry));
1189
1190 new->name = name;
1191 new->number = number;
1192 new->type = type;
1193 new->builtin = FALSE;
1194
1195 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1196 abort ();
1197
1198 return new;
1199 }
1200
1201 /* Look for the .req directive. This is of the form:
1202
1203 new_register_name .req existing_register_name
1204
1205 If we find one, or if it looks sufficiently like one that we want to
1206 handle any error here, return TRUE. Otherwise return FALSE. */
1207
1208 static bfd_boolean
1209 create_register_alias (char *newname, char *p)
1210 {
1211 const reg_entry *old;
1212 char *oldname, *nbuf;
1213 size_t nlen;
1214
1215 /* The input scrubber ensures that whitespace after the mnemonic is
1216 collapsed to single spaces. */
1217 oldname = p;
1218 if (strncmp (oldname, " .req ", 6) != 0)
1219 return FALSE;
1220
1221 oldname += 6;
1222 if (*oldname == '\0')
1223 return FALSE;
1224
1225 old = hash_find (aarch64_reg_hsh, oldname);
1226 if (!old)
1227 {
1228 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1229 return TRUE;
1230 }
1231
1232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1233 the desired alias name, and p points to its end. If not, then
1234 the desired alias name is in the global original_case_string. */
1235 #ifdef TC_CASE_SENSITIVE
1236 nlen = p - newname;
1237 #else
1238 newname = original_case_string;
1239 nlen = strlen (newname);
1240 #endif
1241
1242 nbuf = alloca (nlen + 1);
1243 memcpy (nbuf, newname, nlen);
1244 nbuf[nlen] = '\0';
1245
1246 /* Create aliases under the new name as stated; an all-lowercase
1247 version of the new name; and an all-uppercase version of the new
1248 name. */
1249 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1250 {
1251 for (p = nbuf; *p; p++)
1252 *p = TOUPPER (*p);
1253
1254 if (strncmp (nbuf, newname, nlen))
1255 {
1256 /* If this attempt to create an additional alias fails, do not bother
1257 trying to create the all-lower case alias. We will fail and issue
1258 a second, duplicate error message. This situation arises when the
1259 programmer does something like:
1260 foo .req r0
1261 Foo .req r1
1262 The second .req creates the "Foo" alias but then fails to create
1263 the artificial FOO alias because it has already been created by the
1264 first .req. */
1265 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1266 return TRUE;
1267 }
1268
1269 for (p = nbuf; *p; p++)
1270 *p = TOLOWER (*p);
1271
1272 if (strncmp (nbuf, newname, nlen))
1273 insert_reg_alias (nbuf, old->number, old->type);
1274 }
1275
1276 return TRUE;
1277 }
1278
1279 /* Should never be called, as .req goes between the alias and the
1280 register name, not at the beginning of the line. */
1281 static void
1282 s_req (int a ATTRIBUTE_UNUSED)
1283 {
1284 as_bad (_("invalid syntax for .req directive"));
1285 }
1286
1287 /* The .unreq directive deletes an alias which was previously defined
1288 by .req. For example:
1289
1290 my_alias .req r11
1291 .unreq my_alias */
1292
1293 static void
1294 s_unreq (int a ATTRIBUTE_UNUSED)
1295 {
1296 char *name;
1297 char saved_char;
1298
1299 name = input_line_pointer;
1300
1301 while (*input_line_pointer != 0
1302 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1303 ++input_line_pointer;
1304
1305 saved_char = *input_line_pointer;
1306 *input_line_pointer = 0;
1307
1308 if (!*name)
1309 as_bad (_("invalid syntax for .unreq directive"));
1310 else
1311 {
1312 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1313
1314 if (!reg)
1315 as_bad (_("unknown register alias '%s'"), name);
1316 else if (reg->builtin)
1317 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1318 name);
1319 else
1320 {
1321 char *p;
1322 char *nbuf;
1323
1324 hash_delete (aarch64_reg_hsh, name, FALSE);
1325 free ((char *) reg->name);
1326 free (reg);
1327
1328 /* Also locate the all upper case and all lower case versions.
1329 Do not complain if we cannot find one or the other as it
1330 was probably deleted above. */
1331
1332 nbuf = strdup (name);
1333 for (p = nbuf; *p; p++)
1334 *p = TOUPPER (*p);
1335 reg = hash_find (aarch64_reg_hsh, nbuf);
1336 if (reg)
1337 {
1338 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1339 free ((char *) reg->name);
1340 free (reg);
1341 }
1342
1343 for (p = nbuf; *p; p++)
1344 *p = TOLOWER (*p);
1345 reg = hash_find (aarch64_reg_hsh, nbuf);
1346 if (reg)
1347 {
1348 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1349 free ((char *) reg->name);
1350 free (reg);
1351 }
1352
1353 free (nbuf);
1354 }
1355 }
1356
1357 *input_line_pointer = saved_char;
1358 demand_empty_rest_of_line ();
1359 }
1360
1361 /* Directives: Instruction set selection. */
1362
1363 #ifdef OBJ_ELF
1364 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1365 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1366 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1367 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1368
1369 /* Create a new mapping symbol for the transition to STATE. */
1370
1371 static void
1372 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1373 {
1374 symbolS *symbolP;
1375 const char *symname;
1376 int type;
1377
1378 switch (state)
1379 {
1380 case MAP_DATA:
1381 symname = "$d";
1382 type = BSF_NO_FLAGS;
1383 break;
1384 case MAP_INSN:
1385 symname = "$x";
1386 type = BSF_NO_FLAGS;
1387 break;
1388 default:
1389 abort ();
1390 }
1391
1392 symbolP = symbol_new (symname, now_seg, value, frag);
1393 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1394
1395 /* Save the mapping symbols for future reference. Also check that
1396 we do not place two mapping symbols at the same offset within a
1397 frag. We'll handle overlap between frags in
1398 check_mapping_symbols.
1399
1400 If .fill or other data filling directive generates zero sized data,
1401 the mapping symbol for the following code will have the same value
1402 as the one generated for the data filling directive. In this case,
1403 we replace the old symbol with the new one at the same address. */
1404 if (value == 0)
1405 {
1406 if (frag->tc_frag_data.first_map != NULL)
1407 {
1408 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1409 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1410 &symbol_lastP);
1411 }
1412 frag->tc_frag_data.first_map = symbolP;
1413 }
1414 if (frag->tc_frag_data.last_map != NULL)
1415 {
1416 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1417 S_GET_VALUE (symbolP));
1418 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1419 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1420 &symbol_lastP);
1421 }
1422 frag->tc_frag_data.last_map = symbolP;
1423 }
1424
1425 /* We must sometimes convert a region marked as code to data during
1426 code alignment, if an odd number of bytes have to be padded. The
1427 code mapping symbol is pushed to an aligned address. */
1428
1429 static void
1430 insert_data_mapping_symbol (enum mstate state,
1431 valueT value, fragS * frag, offsetT bytes)
1432 {
1433 /* If there was already a mapping symbol, remove it. */
1434 if (frag->tc_frag_data.last_map != NULL
1435 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1436 frag->fr_address + value)
1437 {
1438 symbolS *symp = frag->tc_frag_data.last_map;
1439
1440 if (value == 0)
1441 {
1442 know (frag->tc_frag_data.first_map == symp);
1443 frag->tc_frag_data.first_map = NULL;
1444 }
1445 frag->tc_frag_data.last_map = NULL;
1446 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1447 }
1448
1449 make_mapping_symbol (MAP_DATA, value, frag);
1450 make_mapping_symbol (state, value + bytes, frag);
1451 }
1452
1453 static void mapping_state_2 (enum mstate state, int max_chars);
1454
1455 /* Set the mapping state to STATE. Only call this when about to
1456 emit some STATE bytes to the file. */
1457
1458 void
1459 mapping_state (enum mstate state)
1460 {
1461 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1462
1463 if (state == MAP_INSN)
1464 /* AArch64 instructions require 4-byte alignment. When emitting
1465 instructions into any section, record the appropriate section
1466 alignment. */
1467 record_alignment (now_seg, 2);
1468
1469 if (mapstate == state)
1470 /* The mapping symbol has already been emitted.
1471 There is nothing else to do. */
1472 return;
1473
1474 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1475 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1476 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1477 evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492 #undef TRANSITION
1493
1494 mapping_state_2 (state, 0);
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 expressionS * litexp = & pool->literals[entry].exp;
1621
1622 if ((litexp->X_op == exp->X_op)
1623 && (exp->X_op == O_constant)
1624 && (litexp->X_add_number == exp->X_add_number)
1625 && (litexp->X_unsigned == exp->X_unsigned))
1626 break;
1627
1628 if ((litexp->X_op == exp->X_op)
1629 && (exp->X_op == O_symbol)
1630 && (litexp->X_add_number == exp->X_add_number)
1631 && (litexp->X_add_symbol == exp->X_add_symbol)
1632 && (litexp->X_op_symbol == exp->X_op_symbol))
1633 break;
1634 }
1635
1636 /* Do we need to create a new entry? */
1637 if (entry == pool->next_free_entry)
1638 {
1639 if (entry >= MAX_LITERAL_POOL_SIZE)
1640 {
1641 set_syntax_error (_("literal pool overflow"));
1642 return FALSE;
1643 }
1644
1645 pool->literals[entry].exp = *exp;
1646 pool->next_free_entry += 1;
1647 if (exp->X_op == O_big)
1648 {
1649 /* PR 16688: Bignums are held in a single global array. We must
1650 copy and preserve that value now, before it is overwritten. */
1651 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1652 memcpy (pool->literals[entry].bignum, generic_bignum,
1653 CHARS_PER_LITTLENUM * exp->X_add_number);
1654 }
1655 else
1656 pool->literals[entry].bignum = NULL;
1657 }
1658
1659 exp->X_op = O_symbol;
1660 exp->X_add_number = ((int) entry) * size;
1661 exp->X_add_symbol = pool->symbol;
1662
1663 return TRUE;
1664 }
1665
1666 /* Can't use symbol_new here, so have to create a symbol and then at
1667 a later date assign it a value. Thats what these functions do. */
1668
1669 static void
1670 symbol_locate (symbolS * symbolP,
1671 const char *name,/* It is copied, the caller can modify. */
1672 segT segment, /* Segment identifier (SEG_<something>). */
1673 valueT valu, /* Symbol value. */
1674 fragS * frag) /* Associated fragment. */
1675 {
1676 size_t name_length;
1677 char *preserved_copy_of_name;
1678
1679 name_length = strlen (name) + 1; /* +1 for \0. */
1680 obstack_grow (&notes, name, name_length);
1681 preserved_copy_of_name = obstack_finish (&notes);
1682
1683 #ifdef tc_canonicalize_symbol_name
1684 preserved_copy_of_name =
1685 tc_canonicalize_symbol_name (preserved_copy_of_name);
1686 #endif
1687
1688 S_SET_NAME (symbolP, preserved_copy_of_name);
1689
1690 S_SET_SEGMENT (symbolP, segment);
1691 S_SET_VALUE (symbolP, valu);
1692 symbol_clear_list_pointers (symbolP);
1693
1694 symbol_set_frag (symbolP, frag);
1695
1696 /* Link to end of symbol chain. */
1697 {
1698 extern int symbol_table_frozen;
1699
1700 if (symbol_table_frozen)
1701 abort ();
1702 }
1703
1704 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1705
1706 obj_symbol_new_hook (symbolP);
1707
1708 #ifdef tc_symbol_new_hook
1709 tc_symbol_new_hook (symbolP);
1710 #endif
1711
1712 #ifdef DEBUG_SYMS
1713 verify_symbol_chain (symbol_rootP, symbol_lastP);
1714 #endif /* DEBUG_SYMS */
1715 }
1716
1717
1718 static void
1719 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1720 {
1721 unsigned int entry;
1722 literal_pool *pool;
1723 char sym_name[20];
1724 int align;
1725
1726 for (align = 2; align <= 4; align++)
1727 {
1728 int size = 1 << align;
1729
1730 pool = find_literal_pool (size);
1731 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1732 continue;
1733
1734 mapping_state (MAP_DATA);
1735
1736 /* Align pool as you have word accesses.
1737 Only make a frag if we have to. */
1738 if (!need_pass_2)
1739 frag_align (align, 0, 0);
1740
1741 record_alignment (now_seg, align);
1742
1743 sprintf (sym_name, "$$lit_\002%x", pool->id);
1744
1745 symbol_locate (pool->symbol, sym_name, now_seg,
1746 (valueT) frag_now_fix (), frag_now);
1747 symbol_table_insert (pool->symbol);
1748
1749 for (entry = 0; entry < pool->next_free_entry; entry++)
1750 {
1751 expressionS * exp = & pool->literals[entry].exp;
1752
1753 if (exp->X_op == O_big)
1754 {
1755 /* PR 16688: Restore the global bignum value. */
1756 gas_assert (pool->literals[entry].bignum != NULL);
1757 memcpy (generic_bignum, pool->literals[entry].bignum,
1758 CHARS_PER_LITTLENUM * exp->X_add_number);
1759 }
1760
1761 /* First output the expression in the instruction to the pool. */
1762 emit_expr (exp, size); /* .word|.xword */
1763
1764 if (exp->X_op == O_big)
1765 {
1766 free (pool->literals[entry].bignum);
1767 pool->literals[entry].bignum = NULL;
1768 }
1769 }
1770
1771 /* Mark the pool as empty. */
1772 pool->next_free_entry = 0;
1773 pool->symbol = NULL;
1774 }
1775 }
1776
1777 #ifdef OBJ_ELF
1778 /* Forward declarations for functions below, in the MD interface
1779 section. */
1780 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1781 static struct reloc_table_entry * find_reloc_table_entry (char **);
1782
1783 /* Directives: Data. */
1784 /* N.B. the support for relocation suffix in this directive needs to be
1785 implemented properly. */
1786
1787 static void
1788 s_aarch64_elf_cons (int nbytes)
1789 {
1790 expressionS exp;
1791
1792 #ifdef md_flush_pending_output
1793 md_flush_pending_output ();
1794 #endif
1795
1796 if (is_it_end_of_statement ())
1797 {
1798 demand_empty_rest_of_line ();
1799 return;
1800 }
1801
1802 #ifdef md_cons_align
1803 md_cons_align (nbytes);
1804 #endif
1805
1806 mapping_state (MAP_DATA);
1807 do
1808 {
1809 struct reloc_table_entry *reloc;
1810
1811 expression (&exp);
1812
1813 if (exp.X_op != O_symbol)
1814 emit_expr (&exp, (unsigned int) nbytes);
1815 else
1816 {
1817 skip_past_char (&input_line_pointer, '#');
1818 if (skip_past_char (&input_line_pointer, ':'))
1819 {
1820 reloc = find_reloc_table_entry (&input_line_pointer);
1821 if (reloc == NULL)
1822 as_bad (_("unrecognized relocation suffix"));
1823 else
1824 as_bad (_("unimplemented relocation suffix"));
1825 ignore_rest_of_line ();
1826 return;
1827 }
1828 else
1829 emit_expr (&exp, (unsigned int) nbytes);
1830 }
1831 }
1832 while (*input_line_pointer++ == ',');
1833
1834 /* Put terminator back into stream. */
1835 input_line_pointer--;
1836 demand_empty_rest_of_line ();
1837 }
1838
1839 #endif /* OBJ_ELF */
1840
1841 /* Output a 32-bit word, but mark as an instruction. */
1842
1843 static void
1844 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1845 {
1846 expressionS exp;
1847
1848 #ifdef md_flush_pending_output
1849 md_flush_pending_output ();
1850 #endif
1851
1852 if (is_it_end_of_statement ())
1853 {
1854 demand_empty_rest_of_line ();
1855 return;
1856 }
1857
1858 /* Sections are assumed to start aligned. In executable section, there is no
1859 MAP_DATA symbol pending. So we only align the address during
1860 MAP_DATA --> MAP_INSN transition.
1861 For other sections, this is not guaranteed. */
1862 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1863 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1864 frag_align_code (2, 0);
1865
1866 #ifdef OBJ_ELF
1867 mapping_state (MAP_INSN);
1868 #endif
1869
1870 do
1871 {
1872 expression (&exp);
1873 if (exp.X_op != O_constant)
1874 {
1875 as_bad (_("constant expression required"));
1876 ignore_rest_of_line ();
1877 return;
1878 }
1879
1880 if (target_big_endian)
1881 {
1882 unsigned int val = exp.X_add_number;
1883 exp.X_add_number = SWAP_32 (val);
1884 }
1885 emit_expr (&exp, 4);
1886 }
1887 while (*input_line_pointer++ == ',');
1888
1889 /* Put terminator back into stream. */
1890 input_line_pointer--;
1891 demand_empty_rest_of_line ();
1892 }
1893
1894 #ifdef OBJ_ELF
1895 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1896
1897 static void
1898 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1899 {
1900 expressionS exp;
1901
1902 /* Since we're just labelling the code, there's no need to define a
1903 mapping symbol. */
1904 expression (&exp);
1905 /* Make sure there is enough room in this frag for the following
1906 blr. This trick only works if the blr follows immediately after
1907 the .tlsdesc directive. */
1908 frag_grow (4);
1909 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1910 BFD_RELOC_AARCH64_TLSDESC_CALL);
1911
1912 demand_empty_rest_of_line ();
1913 }
1914 #endif /* OBJ_ELF */
1915
1916 static void s_aarch64_arch (int);
1917 static void s_aarch64_cpu (int);
1918 static void s_aarch64_arch_extension (int);
1919
1920 /* This table describes all the machine specific pseudo-ops the assembler
1921 has to support. The fields are:
1922 pseudo-op name without dot
1923 function to call to execute this pseudo-op
1924 Integer arg to pass to the function. */
1925
1926 const pseudo_typeS md_pseudo_table[] = {
1927 /* Never called because '.req' does not start a line. */
1928 {"req", s_req, 0},
1929 {"unreq", s_unreq, 0},
1930 {"bss", s_bss, 0},
1931 {"even", s_even, 0},
1932 {"ltorg", s_ltorg, 0},
1933 {"pool", s_ltorg, 0},
1934 {"cpu", s_aarch64_cpu, 0},
1935 {"arch", s_aarch64_arch, 0},
1936 {"arch_extension", s_aarch64_arch_extension, 0},
1937 {"inst", s_aarch64_inst, 0},
1938 #ifdef OBJ_ELF
1939 {"tlsdesccall", s_tlsdesccall, 0},
1940 {"word", s_aarch64_elf_cons, 4},
1941 {"long", s_aarch64_elf_cons, 4},
1942 {"xword", s_aarch64_elf_cons, 8},
1943 {"dword", s_aarch64_elf_cons, 8},
1944 #endif
1945 {0, 0, 0}
1946 };
1947 \f
1948
1949 /* Check whether STR points to a register name followed by a comma or the
1950 end of line; REG_TYPE indicates which register types are checked
1951 against. Return TRUE if STR is such a register name; otherwise return
1952 FALSE. The function does not intend to produce any diagnostics, but since
1953 the register parser aarch64_reg_parse, which is called by this function,
1954 does produce diagnostics, we call clear_error to clear any diagnostics
1955 that may be generated by aarch64_reg_parse.
1956 Also, the function returns FALSE directly if there is any user error
1957 present at the function entry. This prevents the existing diagnostics
1958 state from being spoiled.
1959 The function currently serves parse_constant_immediate and
1960 parse_big_immediate only. */
1961 static bfd_boolean
1962 reg_name_p (char *str, aarch64_reg_type reg_type)
1963 {
1964 int reg;
1965
1966 /* Prevent the diagnostics state from being spoiled. */
1967 if (error_p ())
1968 return FALSE;
1969
1970 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1971
1972 /* Clear the parsing error that may be set by the reg parser. */
1973 clear_error ();
1974
1975 if (reg == PARSE_FAIL)
1976 return FALSE;
1977
1978 skip_whitespace (str);
1979 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1980 return TRUE;
1981
1982 return FALSE;
1983 }
1984
1985 /* Parser functions used exclusively in instruction operands. */
1986
1987 /* Parse an immediate expression which may not be constant.
1988
1989 To prevent the expression parser from pushing a register name
1990 into the symbol table as an undefined symbol, firstly a check is
1991 done to find out whether STR is a valid register name followed
1992 by a comma or the end of line. Return FALSE if STR is such a
1993 string. */
1994
1995 static bfd_boolean
1996 parse_immediate_expression (char **str, expressionS *exp)
1997 {
1998 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1999 {
2000 set_recoverable_error (_("immediate operand required"));
2001 return FALSE;
2002 }
2003
2004 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2005
2006 if (exp->X_op == O_absent)
2007 {
2008 set_fatal_syntax_error (_("missing immediate expression"));
2009 return FALSE;
2010 }
2011
2012 return TRUE;
2013 }
2014
2015 /* Constant immediate-value read function for use in insn parsing.
2016 STR points to the beginning of the immediate (with the optional
2017 leading #); *VAL receives the value.
2018
2019 Return TRUE on success; otherwise return FALSE. */
2020
2021 static bfd_boolean
2022 parse_constant_immediate (char **str, int64_t * val)
2023 {
2024 expressionS exp;
2025
2026 if (! parse_immediate_expression (str, &exp))
2027 return FALSE;
2028
2029 if (exp.X_op != O_constant)
2030 {
2031 set_syntax_error (_("constant expression required"));
2032 return FALSE;
2033 }
2034
2035 *val = exp.X_add_number;
2036 return TRUE;
2037 }
2038
2039 static uint32_t
2040 encode_imm_float_bits (uint32_t imm)
2041 {
2042 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2043 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2044 }
2045
2046 /* Return TRUE if the single-precision floating-point value encoded in IMM
2047 can be expressed in the AArch64 8-bit signed floating-point format with
2048 3-bit exponent and normalized 4 bits of precision; in other words, the
2049 floating-point value must be expressable as
2050 (+/-) n / 16 * power (2, r)
2051 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2052
2053 static bfd_boolean
2054 aarch64_imm_float_p (uint32_t imm)
2055 {
2056 /* If a single-precision floating-point value has the following bit
2057 pattern, it can be expressed in the AArch64 8-bit floating-point
2058 format:
2059
2060 3 32222222 2221111111111
2061 1 09876543 21098765432109876543210
2062 n Eeeeeexx xxxx0000000000000000000
2063
2064 where n, e and each x are either 0 or 1 independently, with
2065 E == ~ e. */
2066
2067 uint32_t pattern;
2068
2069 /* Prepare the pattern for 'Eeeeee'. */
2070 if (((imm >> 30) & 0x1) == 0)
2071 pattern = 0x3e000000;
2072 else
2073 pattern = 0x40000000;
2074
2075 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2076 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2077 }
2078
2079 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2080
2081 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2082 8-bit signed floating-point format with 3-bit exponent and normalized 4
2083 bits of precision (i.e. can be used in an FMOV instruction); return the
2084 equivalent single-precision encoding in *FPWORD.
2085
2086 Otherwise return FALSE. */
2087
2088 static bfd_boolean
2089 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2090 {
2091 /* If a double-precision floating-point value has the following bit
2092 pattern, it can be expressed in the AArch64 8-bit floating-point
2093 format:
2094
2095 6 66655555555 554444444...21111111111
2096 3 21098765432 109876543...098765432109876543210
2097 n Eeeeeeeeexx xxxx00000...000000000000000000000
2098
2099 where n, e and each x are either 0 or 1 independently, with
2100 E == ~ e. */
2101
2102 uint32_t pattern;
2103 uint32_t high32 = imm >> 32;
2104
2105 /* Lower 32 bits need to be 0s. */
2106 if ((imm & 0xffffffff) != 0)
2107 return FALSE;
2108
2109 /* Prepare the pattern for 'Eeeeeeeee'. */
2110 if (((high32 >> 30) & 0x1) == 0)
2111 pattern = 0x3fc00000;
2112 else
2113 pattern = 0x40000000;
2114
2115 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2116 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2117 {
2118 /* Convert to the single-precision encoding.
2119 i.e. convert
2120 n Eeeeeeeeexx xxxx00000...000000000000000000000
2121 to
2122 n Eeeeeexx xxxx0000000000000000000. */
2123 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2124 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2125 return TRUE;
2126 }
2127 else
2128 return FALSE;
2129 }
2130
2131 /* Parse a floating-point immediate. Return TRUE on success and return the
2132 value in *IMMED in the format of IEEE754 single-precision encoding.
2133 *CCP points to the start of the string; DP_P is TRUE when the immediate
2134 is expected to be in double-precision (N.B. this only matters when
2135 hexadecimal representation is involved).
2136
2137 N.B. 0.0 is accepted by this function. */
2138
2139 static bfd_boolean
2140 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2141 {
2142 char *str = *ccp;
2143 char *fpnum;
2144 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2145 int found_fpchar = 0;
2146 int64_t val = 0;
2147 unsigned fpword = 0;
2148 bfd_boolean hex_p = FALSE;
2149
2150 skip_past_char (&str, '#');
2151
2152 fpnum = str;
2153 skip_whitespace (fpnum);
2154
2155 if (strncmp (fpnum, "0x", 2) == 0)
2156 {
2157 /* Support the hexadecimal representation of the IEEE754 encoding.
2158 Double-precision is expected when DP_P is TRUE, otherwise the
2159 representation should be in single-precision. */
2160 if (! parse_constant_immediate (&str, &val))
2161 goto invalid_fp;
2162
2163 if (dp_p)
2164 {
2165 if (! aarch64_double_precision_fmovable (val, &fpword))
2166 goto invalid_fp;
2167 }
2168 else if ((uint64_t) val > 0xffffffff)
2169 goto invalid_fp;
2170 else
2171 fpword = val;
2172
2173 hex_p = TRUE;
2174 }
2175 else
2176 {
2177 /* We must not accidentally parse an integer as a floating-point number.
2178 Make sure that the value we parse is not an integer by checking for
2179 special characters '.' or 'e'. */
2180 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2181 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2182 {
2183 found_fpchar = 1;
2184 break;
2185 }
2186
2187 if (!found_fpchar)
2188 return FALSE;
2189 }
2190
2191 if (! hex_p)
2192 {
2193 int i;
2194
2195 if ((str = atof_ieee (str, 's', words)) == NULL)
2196 goto invalid_fp;
2197
2198 /* Our FP word must be 32 bits (single-precision FP). */
2199 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2200 {
2201 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2202 fpword |= words[i];
2203 }
2204 }
2205
2206 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2207 {
2208 *immed = fpword;
2209 *ccp = str;
2210 return TRUE;
2211 }
2212
2213 invalid_fp:
2214 set_fatal_syntax_error (_("invalid floating-point constant"));
2215 return FALSE;
2216 }
2217
2218 /* Less-generic immediate-value read function with the possibility of loading
2219 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2220 instructions.
2221
2222 To prevent the expression parser from pushing a register name into the
2223 symbol table as an undefined symbol, a check is firstly done to find
2224 out whether STR is a valid register name followed by a comma or the end
2225 of line. Return FALSE if STR is such a register. */
2226
2227 static bfd_boolean
2228 parse_big_immediate (char **str, int64_t *imm)
2229 {
2230 char *ptr = *str;
2231
2232 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2233 {
2234 set_syntax_error (_("immediate operand required"));
2235 return FALSE;
2236 }
2237
2238 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2239
2240 if (inst.reloc.exp.X_op == O_constant)
2241 *imm = inst.reloc.exp.X_add_number;
2242
2243 *str = ptr;
2244
2245 return TRUE;
2246 }
2247
2248 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2249 if NEED_LIBOPCODES is non-zero, the fixup will need
2250 assistance from the libopcodes. */
2251
2252 static inline void
2253 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2254 const aarch64_opnd_info *operand,
2255 int need_libopcodes_p)
2256 {
2257 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2258 reloc->opnd = operand->type;
2259 if (need_libopcodes_p)
2260 reloc->need_libopcodes_p = 1;
2261 };
2262
2263 /* Return TRUE if the instruction needs to be fixed up later internally by
2264 the GAS; otherwise return FALSE. */
2265
2266 static inline bfd_boolean
2267 aarch64_gas_internal_fixup_p (void)
2268 {
2269 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2270 }
2271
2272 /* Assign the immediate value to the relavant field in *OPERAND if
2273 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2274 needs an internal fixup in a later stage.
2275 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2276 IMM.VALUE that may get assigned with the constant. */
2277 static inline void
2278 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2279 aarch64_opnd_info *operand,
2280 int addr_off_p,
2281 int need_libopcodes_p,
2282 int skip_p)
2283 {
2284 if (reloc->exp.X_op == O_constant)
2285 {
2286 if (addr_off_p)
2287 operand->addr.offset.imm = reloc->exp.X_add_number;
2288 else
2289 operand->imm.value = reloc->exp.X_add_number;
2290 reloc->type = BFD_RELOC_UNUSED;
2291 }
2292 else
2293 {
2294 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2295 /* Tell libopcodes to ignore this operand or not. This is helpful
2296 when one of the operands needs to be fixed up later but we need
2297 libopcodes to check the other operands. */
2298 operand->skip = skip_p;
2299 }
2300 }
2301
2302 /* Relocation modifiers. Each entry in the table contains the textual
2303 name for the relocation which may be placed before a symbol used as
2304 a load/store offset, or add immediate. It must be surrounded by a
2305 leading and trailing colon, for example:
2306
2307 ldr x0, [x1, #:rello:varsym]
2308 add x0, x1, #:rello:varsym */
2309
2310 struct reloc_table_entry
2311 {
2312 const char *name;
2313 int pc_rel;
2314 bfd_reloc_code_real_type adr_type;
2315 bfd_reloc_code_real_type adrp_type;
2316 bfd_reloc_code_real_type movw_type;
2317 bfd_reloc_code_real_type add_type;
2318 bfd_reloc_code_real_type ldst_type;
2319 bfd_reloc_code_real_type ld_literal_type;
2320 };
2321
2322 static struct reloc_table_entry reloc_table[] = {
2323 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2324 {"lo12", 0,
2325 0, /* adr_type */
2326 0,
2327 0,
2328 BFD_RELOC_AARCH64_ADD_LO12,
2329 BFD_RELOC_AARCH64_LDST_LO12,
2330 0},
2331
2332 /* Higher 21 bits of pc-relative page offset: ADRP */
2333 {"pg_hi21", 1,
2334 0, /* adr_type */
2335 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2336 0,
2337 0,
2338 0,
2339 0},
2340
2341 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2342 {"pg_hi21_nc", 1,
2343 0, /* adr_type */
2344 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2345 0,
2346 0,
2347 0,
2348 0},
2349
2350 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2351 {"abs_g0", 0,
2352 0, /* adr_type */
2353 0,
2354 BFD_RELOC_AARCH64_MOVW_G0,
2355 0,
2356 0,
2357 0},
2358
2359 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2360 {"abs_g0_s", 0,
2361 0, /* adr_type */
2362 0,
2363 BFD_RELOC_AARCH64_MOVW_G0_S,
2364 0,
2365 0,
2366 0},
2367
2368 /* Less significant bits 0-15 of address/value: MOVK, no check */
2369 {"abs_g0_nc", 0,
2370 0, /* adr_type */
2371 0,
2372 BFD_RELOC_AARCH64_MOVW_G0_NC,
2373 0,
2374 0,
2375 0},
2376
2377 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2378 {"abs_g1", 0,
2379 0, /* adr_type */
2380 0,
2381 BFD_RELOC_AARCH64_MOVW_G1,
2382 0,
2383 0,
2384 0},
2385
2386 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2387 {"abs_g1_s", 0,
2388 0, /* adr_type */
2389 0,
2390 BFD_RELOC_AARCH64_MOVW_G1_S,
2391 0,
2392 0,
2393 0},
2394
2395 /* Less significant bits 16-31 of address/value: MOVK, no check */
2396 {"abs_g1_nc", 0,
2397 0, /* adr_type */
2398 0,
2399 BFD_RELOC_AARCH64_MOVW_G1_NC,
2400 0,
2401 0,
2402 0},
2403
2404 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2405 {"abs_g2", 0,
2406 0, /* adr_type */
2407 0,
2408 BFD_RELOC_AARCH64_MOVW_G2,
2409 0,
2410 0,
2411 0},
2412
2413 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2414 {"abs_g2_s", 0,
2415 0, /* adr_type */
2416 0,
2417 BFD_RELOC_AARCH64_MOVW_G2_S,
2418 0,
2419 0,
2420 0},
2421
2422 /* Less significant bits 32-47 of address/value: MOVK, no check */
2423 {"abs_g2_nc", 0,
2424 0, /* adr_type */
2425 0,
2426 BFD_RELOC_AARCH64_MOVW_G2_NC,
2427 0,
2428 0,
2429 0},
2430
2431 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2432 {"abs_g3", 0,
2433 0, /* adr_type */
2434 0,
2435 BFD_RELOC_AARCH64_MOVW_G3,
2436 0,
2437 0,
2438 0},
2439
2440 /* Get to the page containing GOT entry for a symbol. */
2441 {"got", 1,
2442 0, /* adr_type */
2443 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2444 0,
2445 0,
2446 0,
2447 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2448
2449 /* 12 bit offset into the page containing GOT entry for that symbol. */
2450 {"got_lo12", 0,
2451 0, /* adr_type */
2452 0,
2453 0,
2454 0,
2455 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2456 0},
2457
2458 /* 15 bit offset into the page containing GOT entry for that symbol. */
2459 {"gotoff_lo15", 0,
2460 0, /* adr_type */
2461 0,
2462 0,
2463 0,
2464 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2465 0},
2466
2467 /* Get to the page containing GOT TLS entry for a symbol */
2468 {"tlsgd", 0,
2469 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2470 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2471 0,
2472 0,
2473 0,
2474 0},
2475
2476 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2477 {"tlsgd_lo12", 0,
2478 0, /* adr_type */
2479 0,
2480 0,
2481 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2482 0,
2483 0},
2484
2485 /* Get to the page containing GOT TLS entry for a symbol */
2486 {"tlsdesc", 0,
2487 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2488 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2489 0,
2490 0,
2491 0,
2492 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2493
2494 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2495 {"tlsdesc_lo12", 0,
2496 0, /* adr_type */
2497 0,
2498 0,
2499 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2500 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2501 0},
2502
2503 /* Get to the page containing GOT TLS entry for a symbol.
2504 The same as GD, we allocate two consecutive GOT slots
2505 for module index and module offset, the only difference
2506 with GD is the module offset should be intialized to
2507 zero without any outstanding runtime relocation. */
2508 {"tlsldm", 0,
2509 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2510 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2511 0,
2512 0,
2513 0,
2514 0},
2515
2516 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2517 {"tlsldm_lo12_nc", 0,
2518 0, /* adr_type */
2519 0,
2520 0,
2521 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2522 0,
2523 0},
2524
2525 /* 12 bit offset into the module TLS base address. */
2526 {"dtprel_lo12", 0,
2527 0, /* adr_type */
2528 0,
2529 0,
2530 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2531 0,
2532 0},
2533
2534 /* Same as dtprel_lo12, no overflow check. */
2535 {"dtprel_lo12_nc", 0,
2536 0, /* adr_type */
2537 0,
2538 0,
2539 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2540 0,
2541 0},
2542
2543 /* Get to the page containing GOT TLS entry for a symbol */
2544 {"gottprel", 0,
2545 0, /* adr_type */
2546 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2547 0,
2548 0,
2549 0,
2550 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2551
2552 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2553 {"gottprel_lo12", 0,
2554 0, /* adr_type */
2555 0,
2556 0,
2557 0,
2558 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2559 0},
2560
2561 /* Get tp offset for a symbol. */
2562 {"tprel", 0,
2563 0, /* adr_type */
2564 0,
2565 0,
2566 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2567 0,
2568 0},
2569
2570 /* Get tp offset for a symbol. */
2571 {"tprel_lo12", 0,
2572 0, /* adr_type */
2573 0,
2574 0,
2575 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2576 0,
2577 0},
2578
2579 /* Get tp offset for a symbol. */
2580 {"tprel_hi12", 0,
2581 0, /* adr_type */
2582 0,
2583 0,
2584 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2585 0,
2586 0},
2587
2588 /* Get tp offset for a symbol. */
2589 {"tprel_lo12_nc", 0,
2590 0, /* adr_type */
2591 0,
2592 0,
2593 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2594 0,
2595 0},
2596
2597 /* Most significant bits 32-47 of address/value: MOVZ. */
2598 {"tprel_g2", 0,
2599 0, /* adr_type */
2600 0,
2601 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2602 0,
2603 0,
2604 0},
2605
2606 /* Most significant bits 16-31 of address/value: MOVZ. */
2607 {"tprel_g1", 0,
2608 0, /* adr_type */
2609 0,
2610 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2611 0,
2612 0,
2613 0},
2614
2615 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2616 {"tprel_g1_nc", 0,
2617 0, /* adr_type */
2618 0,
2619 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2620 0,
2621 0,
2622 0},
2623
2624 /* Most significant bits 0-15 of address/value: MOVZ. */
2625 {"tprel_g0", 0,
2626 0, /* adr_type */
2627 0,
2628 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2629 0,
2630 0,
2631 0},
2632
2633 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2634 {"tprel_g0_nc", 0,
2635 0, /* adr_type */
2636 0,
2637 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2638 0,
2639 0,
2640 0},
2641
2642 /* 15bit offset from got entry to base address of GOT table. */
2643 {"gotpage_lo15", 0,
2644 0,
2645 0,
2646 0,
2647 0,
2648 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2649 0},
2650
2651 /* 14bit offset from got entry to base address of GOT table. */
2652 {"gotpage_lo14", 0,
2653 0,
2654 0,
2655 0,
2656 0,
2657 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2658 0},
2659 };
2660
2661 /* Given the address of a pointer pointing to the textual name of a
2662 relocation as may appear in assembler source, attempt to find its
2663 details in reloc_table. The pointer will be updated to the character
2664 after the trailing colon. On failure, NULL will be returned;
2665 otherwise return the reloc_table_entry. */
2666
2667 static struct reloc_table_entry *
2668 find_reloc_table_entry (char **str)
2669 {
2670 unsigned int i;
2671 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2672 {
2673 int length = strlen (reloc_table[i].name);
2674
2675 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2676 && (*str)[length] == ':')
2677 {
2678 *str += (length + 1);
2679 return &reloc_table[i];
2680 }
2681 }
2682
2683 return NULL;
2684 }
2685
2686 /* Mode argument to parse_shift and parser_shifter_operand. */
2687 enum parse_shift_mode
2688 {
2689 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2690 "#imm{,lsl #n}" */
2691 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2692 "#imm" */
2693 SHIFTED_LSL, /* bare "lsl #n" */
2694 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2695 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2696 };
2697
2698 /* Parse a <shift> operator on an AArch64 data processing instruction.
2699 Return TRUE on success; otherwise return FALSE. */
2700 static bfd_boolean
2701 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2702 {
2703 const struct aarch64_name_value_pair *shift_op;
2704 enum aarch64_modifier_kind kind;
2705 expressionS exp;
2706 int exp_has_prefix;
2707 char *s = *str;
2708 char *p = s;
2709
2710 for (p = *str; ISALPHA (*p); p++)
2711 ;
2712
2713 if (p == *str)
2714 {
2715 set_syntax_error (_("shift expression expected"));
2716 return FALSE;
2717 }
2718
2719 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2720
2721 if (shift_op == NULL)
2722 {
2723 set_syntax_error (_("shift operator expected"));
2724 return FALSE;
2725 }
2726
2727 kind = aarch64_get_operand_modifier (shift_op);
2728
2729 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2730 {
2731 set_syntax_error (_("invalid use of 'MSL'"));
2732 return FALSE;
2733 }
2734
2735 switch (mode)
2736 {
2737 case SHIFTED_LOGIC_IMM:
2738 if (aarch64_extend_operator_p (kind) == TRUE)
2739 {
2740 set_syntax_error (_("extending shift is not permitted"));
2741 return FALSE;
2742 }
2743 break;
2744
2745 case SHIFTED_ARITH_IMM:
2746 if (kind == AARCH64_MOD_ROR)
2747 {
2748 set_syntax_error (_("'ROR' shift is not permitted"));
2749 return FALSE;
2750 }
2751 break;
2752
2753 case SHIFTED_LSL:
2754 if (kind != AARCH64_MOD_LSL)
2755 {
2756 set_syntax_error (_("only 'LSL' shift is permitted"));
2757 return FALSE;
2758 }
2759 break;
2760
2761 case SHIFTED_REG_OFFSET:
2762 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2763 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2764 {
2765 set_fatal_syntax_error
2766 (_("invalid shift for the register offset addressing mode"));
2767 return FALSE;
2768 }
2769 break;
2770
2771 case SHIFTED_LSL_MSL:
2772 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2773 {
2774 set_syntax_error (_("invalid shift operator"));
2775 return FALSE;
2776 }
2777 break;
2778
2779 default:
2780 abort ();
2781 }
2782
2783 /* Whitespace can appear here if the next thing is a bare digit. */
2784 skip_whitespace (p);
2785
2786 /* Parse shift amount. */
2787 exp_has_prefix = 0;
2788 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2789 exp.X_op = O_absent;
2790 else
2791 {
2792 if (is_immediate_prefix (*p))
2793 {
2794 p++;
2795 exp_has_prefix = 1;
2796 }
2797 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2798 }
2799 if (exp.X_op == O_absent)
2800 {
2801 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2802 {
2803 set_syntax_error (_("missing shift amount"));
2804 return FALSE;
2805 }
2806 operand->shifter.amount = 0;
2807 }
2808 else if (exp.X_op != O_constant)
2809 {
2810 set_syntax_error (_("constant shift amount required"));
2811 return FALSE;
2812 }
2813 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2814 {
2815 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2816 return FALSE;
2817 }
2818 else
2819 {
2820 operand->shifter.amount = exp.X_add_number;
2821 operand->shifter.amount_present = 1;
2822 }
2823
2824 operand->shifter.operator_present = 1;
2825 operand->shifter.kind = kind;
2826
2827 *str = p;
2828 return TRUE;
2829 }
2830
2831 /* Parse a <shifter_operand> for a data processing instruction:
2832
2833 #<immediate>
2834 #<immediate>, LSL #imm
2835
2836 Validation of immediate operands is deferred to md_apply_fix.
2837
2838 Return TRUE on success; otherwise return FALSE. */
2839
2840 static bfd_boolean
2841 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2842 enum parse_shift_mode mode)
2843 {
2844 char *p;
2845
2846 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2847 return FALSE;
2848
2849 p = *str;
2850
2851 /* Accept an immediate expression. */
2852 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2853 return FALSE;
2854
2855 /* Accept optional LSL for arithmetic immediate values. */
2856 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2857 if (! parse_shift (&p, operand, SHIFTED_LSL))
2858 return FALSE;
2859
2860 /* Not accept any shifter for logical immediate values. */
2861 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2862 && parse_shift (&p, operand, mode))
2863 {
2864 set_syntax_error (_("unexpected shift operator"));
2865 return FALSE;
2866 }
2867
2868 *str = p;
2869 return TRUE;
2870 }
2871
2872 /* Parse a <shifter_operand> for a data processing instruction:
2873
2874 <Rm>
2875 <Rm>, <shift>
2876 #<immediate>
2877 #<immediate>, LSL #imm
2878
2879 where <shift> is handled by parse_shift above, and the last two
2880 cases are handled by the function above.
2881
2882 Validation of immediate operands is deferred to md_apply_fix.
2883
2884 Return TRUE on success; otherwise return FALSE. */
2885
2886 static bfd_boolean
2887 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2888 enum parse_shift_mode mode)
2889 {
2890 int reg;
2891 int isreg32, isregzero;
2892 enum aarch64_operand_class opd_class
2893 = aarch64_get_operand_class (operand->type);
2894
2895 if ((reg =
2896 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2897 {
2898 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2899 {
2900 set_syntax_error (_("unexpected register in the immediate operand"));
2901 return FALSE;
2902 }
2903
2904 if (!isregzero && reg == REG_SP)
2905 {
2906 set_syntax_error (BAD_SP);
2907 return FALSE;
2908 }
2909
2910 operand->reg.regno = reg;
2911 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2912
2913 /* Accept optional shift operation on register. */
2914 if (! skip_past_comma (str))
2915 return TRUE;
2916
2917 if (! parse_shift (str, operand, mode))
2918 return FALSE;
2919
2920 return TRUE;
2921 }
2922 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2923 {
2924 set_syntax_error
2925 (_("integer register expected in the extended/shifted operand "
2926 "register"));
2927 return FALSE;
2928 }
2929
2930 /* We have a shifted immediate variable. */
2931 return parse_shifter_operand_imm (str, operand, mode);
2932 }
2933
2934 /* Return TRUE on success; return FALSE otherwise. */
2935
2936 static bfd_boolean
2937 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2938 enum parse_shift_mode mode)
2939 {
2940 char *p = *str;
2941
2942 /* Determine if we have the sequence of characters #: or just :
2943 coming next. If we do, then we check for a :rello: relocation
2944 modifier. If we don't, punt the whole lot to
2945 parse_shifter_operand. */
2946
2947 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2948 {
2949 struct reloc_table_entry *entry;
2950
2951 if (p[0] == '#')
2952 p += 2;
2953 else
2954 p++;
2955 *str = p;
2956
2957 /* Try to parse a relocation. Anything else is an error. */
2958 if (!(entry = find_reloc_table_entry (str)))
2959 {
2960 set_syntax_error (_("unknown relocation modifier"));
2961 return FALSE;
2962 }
2963
2964 if (entry->add_type == 0)
2965 {
2966 set_syntax_error
2967 (_("this relocation modifier is not allowed on this instruction"));
2968 return FALSE;
2969 }
2970
2971 /* Save str before we decompose it. */
2972 p = *str;
2973
2974 /* Next, we parse the expression. */
2975 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2976 return FALSE;
2977
2978 /* Record the relocation type (use the ADD variant here). */
2979 inst.reloc.type = entry->add_type;
2980 inst.reloc.pc_rel = entry->pc_rel;
2981
2982 /* If str is empty, we've reached the end, stop here. */
2983 if (**str == '\0')
2984 return TRUE;
2985
2986 /* Otherwise, we have a shifted reloc modifier, so rewind to
2987 recover the variable name and continue parsing for the shifter. */
2988 *str = p;
2989 return parse_shifter_operand_imm (str, operand, mode);
2990 }
2991
2992 return parse_shifter_operand (str, operand, mode);
2993 }
2994
2995 /* Parse all forms of an address expression. Information is written
2996 to *OPERAND and/or inst.reloc.
2997
2998 The A64 instruction set has the following addressing modes:
2999
3000 Offset
3001 [base] // in SIMD ld/st structure
3002 [base{,#0}] // in ld/st exclusive
3003 [base{,#imm}]
3004 [base,Xm{,LSL #imm}]
3005 [base,Xm,SXTX {#imm}]
3006 [base,Wm,(S|U)XTW {#imm}]
3007 Pre-indexed
3008 [base,#imm]!
3009 Post-indexed
3010 [base],#imm
3011 [base],Xm // in SIMD ld/st structure
3012 PC-relative (literal)
3013 label
3014 =immediate
3015
3016 (As a convenience, the notation "=immediate" is permitted in conjunction
3017 with the pc-relative literal load instructions to automatically place an
3018 immediate value or symbolic address in a nearby literal pool and generate
3019 a hidden label which references it.)
3020
3021 Upon a successful parsing, the address structure in *OPERAND will be
3022 filled in the following way:
3023
3024 .base_regno = <base>
3025 .offset.is_reg // 1 if the offset is a register
3026 .offset.imm = <imm>
3027 .offset.regno = <Rm>
3028
3029 For different addressing modes defined in the A64 ISA:
3030
3031 Offset
3032 .pcrel=0; .preind=1; .postind=0; .writeback=0
3033 Pre-indexed
3034 .pcrel=0; .preind=1; .postind=0; .writeback=1
3035 Post-indexed
3036 .pcrel=0; .preind=0; .postind=1; .writeback=1
3037 PC-relative (literal)
3038 .pcrel=1; .preind=1; .postind=0; .writeback=0
3039
3040 The shift/extension information, if any, will be stored in .shifter.
3041
3042 It is the caller's responsibility to check for addressing modes not
3043 supported by the instruction, and to set inst.reloc.type. */
3044
3045 static bfd_boolean
3046 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
3047 int accept_reg_post_index)
3048 {
3049 char *p = *str;
3050 int reg;
3051 int isreg32, isregzero;
3052 expressionS *exp = &inst.reloc.exp;
3053
3054 if (! skip_past_char (&p, '['))
3055 {
3056 /* =immediate or label. */
3057 operand->addr.pcrel = 1;
3058 operand->addr.preind = 1;
3059
3060 /* #:<reloc_op>:<symbol> */
3061 skip_past_char (&p, '#');
3062 if (reloc && skip_past_char (&p, ':'))
3063 {
3064 bfd_reloc_code_real_type ty;
3065 struct reloc_table_entry *entry;
3066
3067 /* Try to parse a relocation modifier. Anything else is
3068 an error. */
3069 entry = find_reloc_table_entry (&p);
3070 if (! entry)
3071 {
3072 set_syntax_error (_("unknown relocation modifier"));
3073 return FALSE;
3074 }
3075
3076 switch (operand->type)
3077 {
3078 case AARCH64_OPND_ADDR_PCREL21:
3079 /* adr */
3080 ty = entry->adr_type;
3081 break;
3082
3083 default:
3084 ty = entry->ld_literal_type;
3085 break;
3086 }
3087
3088 if (ty == 0)
3089 {
3090 set_syntax_error
3091 (_("this relocation modifier is not allowed on this "
3092 "instruction"));
3093 return FALSE;
3094 }
3095
3096 /* #:<reloc_op>: */
3097 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3098 {
3099 set_syntax_error (_("invalid relocation expression"));
3100 return FALSE;
3101 }
3102
3103 /* #:<reloc_op>:<expr> */
3104 /* Record the relocation type. */
3105 inst.reloc.type = ty;
3106 inst.reloc.pc_rel = entry->pc_rel;
3107 }
3108 else
3109 {
3110
3111 if (skip_past_char (&p, '='))
3112 /* =immediate; need to generate the literal in the literal pool. */
3113 inst.gen_lit_pool = 1;
3114
3115 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3116 {
3117 set_syntax_error (_("invalid address"));
3118 return FALSE;
3119 }
3120 }
3121
3122 *str = p;
3123 return TRUE;
3124 }
3125
3126 /* [ */
3127
3128 /* Accept SP and reject ZR */
3129 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3130 if (reg == PARSE_FAIL || isreg32)
3131 {
3132 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3133 return FALSE;
3134 }
3135 operand->addr.base_regno = reg;
3136
3137 /* [Xn */
3138 if (skip_past_comma (&p))
3139 {
3140 /* [Xn, */
3141 operand->addr.preind = 1;
3142
3143 /* Reject SP and accept ZR */
3144 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3145 if (reg != PARSE_FAIL)
3146 {
3147 /* [Xn,Rm */
3148 operand->addr.offset.regno = reg;
3149 operand->addr.offset.is_reg = 1;
3150 /* Shifted index. */
3151 if (skip_past_comma (&p))
3152 {
3153 /* [Xn,Rm, */
3154 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3155 /* Use the diagnostics set in parse_shift, so not set new
3156 error message here. */
3157 return FALSE;
3158 }
3159 /* We only accept:
3160 [base,Xm{,LSL #imm}]
3161 [base,Xm,SXTX {#imm}]
3162 [base,Wm,(S|U)XTW {#imm}] */
3163 if (operand->shifter.kind == AARCH64_MOD_NONE
3164 || operand->shifter.kind == AARCH64_MOD_LSL
3165 || operand->shifter.kind == AARCH64_MOD_SXTX)
3166 {
3167 if (isreg32)
3168 {
3169 set_syntax_error (_("invalid use of 32-bit register offset"));
3170 return FALSE;
3171 }
3172 }
3173 else if (!isreg32)
3174 {
3175 set_syntax_error (_("invalid use of 64-bit register offset"));
3176 return FALSE;
3177 }
3178 }
3179 else
3180 {
3181 /* [Xn,#:<reloc_op>:<symbol> */
3182 skip_past_char (&p, '#');
3183 if (reloc && skip_past_char (&p, ':'))
3184 {
3185 struct reloc_table_entry *entry;
3186
3187 /* Try to parse a relocation modifier. Anything else is
3188 an error. */
3189 if (!(entry = find_reloc_table_entry (&p)))
3190 {
3191 set_syntax_error (_("unknown relocation modifier"));
3192 return FALSE;
3193 }
3194
3195 if (entry->ldst_type == 0)
3196 {
3197 set_syntax_error
3198 (_("this relocation modifier is not allowed on this "
3199 "instruction"));
3200 return FALSE;
3201 }
3202
3203 /* [Xn,#:<reloc_op>: */
3204 /* We now have the group relocation table entry corresponding to
3205 the name in the assembler source. Next, we parse the
3206 expression. */
3207 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3208 {
3209 set_syntax_error (_("invalid relocation expression"));
3210 return FALSE;
3211 }
3212
3213 /* [Xn,#:<reloc_op>:<expr> */
3214 /* Record the load/store relocation type. */
3215 inst.reloc.type = entry->ldst_type;
3216 inst.reloc.pc_rel = entry->pc_rel;
3217 }
3218 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3219 {
3220 set_syntax_error (_("invalid expression in the address"));
3221 return FALSE;
3222 }
3223 /* [Xn,<expr> */
3224 }
3225 }
3226
3227 if (! skip_past_char (&p, ']'))
3228 {
3229 set_syntax_error (_("']' expected"));
3230 return FALSE;
3231 }
3232
3233 if (skip_past_char (&p, '!'))
3234 {
3235 if (operand->addr.preind && operand->addr.offset.is_reg)
3236 {
3237 set_syntax_error (_("register offset not allowed in pre-indexed "
3238 "addressing mode"));
3239 return FALSE;
3240 }
3241 /* [Xn]! */
3242 operand->addr.writeback = 1;
3243 }
3244 else if (skip_past_comma (&p))
3245 {
3246 /* [Xn], */
3247 operand->addr.postind = 1;
3248 operand->addr.writeback = 1;
3249
3250 if (operand->addr.preind)
3251 {
3252 set_syntax_error (_("cannot combine pre- and post-indexing"));
3253 return FALSE;
3254 }
3255
3256 if (accept_reg_post_index
3257 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3258 &isregzero)) != PARSE_FAIL)
3259 {
3260 /* [Xn],Xm */
3261 if (isreg32)
3262 {
3263 set_syntax_error (_("invalid 32-bit register offset"));
3264 return FALSE;
3265 }
3266 operand->addr.offset.regno = reg;
3267 operand->addr.offset.is_reg = 1;
3268 }
3269 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3270 {
3271 /* [Xn],#expr */
3272 set_syntax_error (_("invalid expression in the address"));
3273 return FALSE;
3274 }
3275 }
3276
3277 /* If at this point neither .preind nor .postind is set, we have a
3278 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3279 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3280 {
3281 if (operand->addr.writeback)
3282 {
3283 /* Reject [Rn]! */
3284 set_syntax_error (_("missing offset in the pre-indexed address"));
3285 return FALSE;
3286 }
3287 operand->addr.preind = 1;
3288 inst.reloc.exp.X_op = O_constant;
3289 inst.reloc.exp.X_add_number = 0;
3290 }
3291
3292 *str = p;
3293 return TRUE;
3294 }
3295
3296 /* Return TRUE on success; otherwise return FALSE. */
3297 static bfd_boolean
3298 parse_address (char **str, aarch64_opnd_info *operand,
3299 int accept_reg_post_index)
3300 {
3301 return parse_address_main (str, operand, 0, accept_reg_post_index);
3302 }
3303
3304 /* Return TRUE on success; otherwise return FALSE. */
3305 static bfd_boolean
3306 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3307 {
3308 return parse_address_main (str, operand, 1, 0);
3309 }
3310
3311 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3312 Return TRUE on success; otherwise return FALSE. */
3313 static bfd_boolean
3314 parse_half (char **str, int *internal_fixup_p)
3315 {
3316 char *p, *saved;
3317 int dummy;
3318
3319 p = *str;
3320 skip_past_char (&p, '#');
3321
3322 gas_assert (internal_fixup_p);
3323 *internal_fixup_p = 0;
3324
3325 if (*p == ':')
3326 {
3327 struct reloc_table_entry *entry;
3328
3329 /* Try to parse a relocation. Anything else is an error. */
3330 ++p;
3331 if (!(entry = find_reloc_table_entry (&p)))
3332 {
3333 set_syntax_error (_("unknown relocation modifier"));
3334 return FALSE;
3335 }
3336
3337 if (entry->movw_type == 0)
3338 {
3339 set_syntax_error
3340 (_("this relocation modifier is not allowed on this instruction"));
3341 return FALSE;
3342 }
3343
3344 inst.reloc.type = entry->movw_type;
3345 }
3346 else
3347 *internal_fixup_p = 1;
3348
3349 /* Avoid parsing a register as a general symbol. */
3350 saved = p;
3351 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3352 return FALSE;
3353 p = saved;
3354
3355 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3356 return FALSE;
3357
3358 *str = p;
3359 return TRUE;
3360 }
3361
3362 /* Parse an operand for an ADRP instruction:
3363 ADRP <Xd>, <label>
3364 Return TRUE on success; otherwise return FALSE. */
3365
3366 static bfd_boolean
3367 parse_adrp (char **str)
3368 {
3369 char *p;
3370
3371 p = *str;
3372 if (*p == ':')
3373 {
3374 struct reloc_table_entry *entry;
3375
3376 /* Try to parse a relocation. Anything else is an error. */
3377 ++p;
3378 if (!(entry = find_reloc_table_entry (&p)))
3379 {
3380 set_syntax_error (_("unknown relocation modifier"));
3381 return FALSE;
3382 }
3383
3384 if (entry->adrp_type == 0)
3385 {
3386 set_syntax_error
3387 (_("this relocation modifier is not allowed on this instruction"));
3388 return FALSE;
3389 }
3390
3391 inst.reloc.type = entry->adrp_type;
3392 }
3393 else
3394 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3395
3396 inst.reloc.pc_rel = 1;
3397
3398 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3399 return FALSE;
3400
3401 *str = p;
3402 return TRUE;
3403 }
3404
3405 /* Miscellaneous. */
3406
3407 /* Parse an option for a preload instruction. Returns the encoding for the
3408 option, or PARSE_FAIL. */
3409
3410 static int
3411 parse_pldop (char **str)
3412 {
3413 char *p, *q;
3414 const struct aarch64_name_value_pair *o;
3415
3416 p = q = *str;
3417 while (ISALNUM (*q))
3418 q++;
3419
3420 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3421 if (!o)
3422 return PARSE_FAIL;
3423
3424 *str = q;
3425 return o->value;
3426 }
3427
3428 /* Parse an option for a barrier instruction. Returns the encoding for the
3429 option, or PARSE_FAIL. */
3430
3431 static int
3432 parse_barrier (char **str)
3433 {
3434 char *p, *q;
3435 const asm_barrier_opt *o;
3436
3437 p = q = *str;
3438 while (ISALPHA (*q))
3439 q++;
3440
3441 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3442 if (!o)
3443 return PARSE_FAIL;
3444
3445 *str = q;
3446 return o->value;
3447 }
3448
3449 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3450 Returns the encoding for the option, or PARSE_FAIL.
3451
3452 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3453 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3454
3455 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3456 field, otherwise as a system register.
3457 */
3458
3459 static int
3460 parse_sys_reg (char **str, struct hash_control *sys_regs,
3461 int imple_defined_p, int pstatefield_p)
3462 {
3463 char *p, *q;
3464 char buf[32];
3465 const aarch64_sys_reg *o;
3466 int value;
3467
3468 p = buf;
3469 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3470 if (p < buf + 31)
3471 *p++ = TOLOWER (*q);
3472 *p = '\0';
3473 /* Assert that BUF be large enough. */
3474 gas_assert (p - buf == q - *str);
3475
3476 o = hash_find (sys_regs, buf);
3477 if (!o)
3478 {
3479 if (!imple_defined_p)
3480 return PARSE_FAIL;
3481 else
3482 {
3483 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3484 unsigned int op0, op1, cn, cm, op2;
3485
3486 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3487 != 5)
3488 return PARSE_FAIL;
3489 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3490 return PARSE_FAIL;
3491 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3492 }
3493 }
3494 else
3495 {
3496 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3497 as_bad (_("selected processor does not support PSTATE field "
3498 "name '%s'"), buf);
3499 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3500 as_bad (_("selected processor does not support system register "
3501 "name '%s'"), buf);
3502 if (aarch64_sys_reg_deprecated_p (o))
3503 as_warn (_("system register name '%s' is deprecated and may be "
3504 "removed in a future release"), buf);
3505 value = o->value;
3506 }
3507
3508 *str = q;
3509 return value;
3510 }
3511
3512 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3513 for the option, or NULL. */
3514
3515 static const aarch64_sys_ins_reg *
3516 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3517 {
3518 char *p, *q;
3519 char buf[32];
3520 const aarch64_sys_ins_reg *o;
3521
3522 p = buf;
3523 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3524 if (p < buf + 31)
3525 *p++ = TOLOWER (*q);
3526 *p = '\0';
3527
3528 o = hash_find (sys_ins_regs, buf);
3529 if (!o)
3530 return NULL;
3531
3532 *str = q;
3533 return o;
3534 }
3535 \f
3536 #define po_char_or_fail(chr) do { \
3537 if (! skip_past_char (&str, chr)) \
3538 goto failure; \
3539 } while (0)
3540
3541 #define po_reg_or_fail(regtype) do { \
3542 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3543 if (val == PARSE_FAIL) \
3544 { \
3545 set_default_error (); \
3546 goto failure; \
3547 } \
3548 } while (0)
3549
3550 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3551 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3552 &isreg32, &isregzero); \
3553 if (val == PARSE_FAIL) \
3554 { \
3555 set_default_error (); \
3556 goto failure; \
3557 } \
3558 info->reg.regno = val; \
3559 if (isreg32) \
3560 info->qualifier = AARCH64_OPND_QLF_W; \
3561 else \
3562 info->qualifier = AARCH64_OPND_QLF_X; \
3563 } while (0)
3564
3565 #define po_imm_nc_or_fail() do { \
3566 if (! parse_constant_immediate (&str, &val)) \
3567 goto failure; \
3568 } while (0)
3569
3570 #define po_imm_or_fail(min, max) do { \
3571 if (! parse_constant_immediate (&str, &val)) \
3572 goto failure; \
3573 if (val < min || val > max) \
3574 { \
3575 set_fatal_syntax_error (_("immediate value out of range "\
3576 #min " to "#max)); \
3577 goto failure; \
3578 } \
3579 } while (0)
3580
3581 #define po_misc_or_fail(expr) do { \
3582 if (!expr) \
3583 goto failure; \
3584 } while (0)
3585 \f
3586 /* encode the 12-bit imm field of Add/sub immediate */
3587 static inline uint32_t
3588 encode_addsub_imm (uint32_t imm)
3589 {
3590 return imm << 10;
3591 }
3592
3593 /* encode the shift amount field of Add/sub immediate */
3594 static inline uint32_t
3595 encode_addsub_imm_shift_amount (uint32_t cnt)
3596 {
3597 return cnt << 22;
3598 }
3599
3600
3601 /* encode the imm field of Adr instruction */
3602 static inline uint32_t
3603 encode_adr_imm (uint32_t imm)
3604 {
3605 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3606 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3607 }
3608
3609 /* encode the immediate field of Move wide immediate */
3610 static inline uint32_t
3611 encode_movw_imm (uint32_t imm)
3612 {
3613 return imm << 5;
3614 }
3615
3616 /* encode the 26-bit offset of unconditional branch */
3617 static inline uint32_t
3618 encode_branch_ofs_26 (uint32_t ofs)
3619 {
3620 return ofs & ((1 << 26) - 1);
3621 }
3622
3623 /* encode the 19-bit offset of conditional branch and compare & branch */
3624 static inline uint32_t
3625 encode_cond_branch_ofs_19 (uint32_t ofs)
3626 {
3627 return (ofs & ((1 << 19) - 1)) << 5;
3628 }
3629
3630 /* encode the 19-bit offset of ld literal */
3631 static inline uint32_t
3632 encode_ld_lit_ofs_19 (uint32_t ofs)
3633 {
3634 return (ofs & ((1 << 19) - 1)) << 5;
3635 }
3636
3637 /* Encode the 14-bit offset of test & branch. */
3638 static inline uint32_t
3639 encode_tst_branch_ofs_14 (uint32_t ofs)
3640 {
3641 return (ofs & ((1 << 14) - 1)) << 5;
3642 }
3643
3644 /* Encode the 16-bit imm field of svc/hvc/smc. */
3645 static inline uint32_t
3646 encode_svc_imm (uint32_t imm)
3647 {
3648 return imm << 5;
3649 }
3650
3651 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3652 static inline uint32_t
3653 reencode_addsub_switch_add_sub (uint32_t opcode)
3654 {
3655 return opcode ^ (1 << 30);
3656 }
3657
3658 static inline uint32_t
3659 reencode_movzn_to_movz (uint32_t opcode)
3660 {
3661 return opcode | (1 << 30);
3662 }
3663
3664 static inline uint32_t
3665 reencode_movzn_to_movn (uint32_t opcode)
3666 {
3667 return opcode & ~(1 << 30);
3668 }
3669
3670 /* Overall per-instruction processing. */
3671
3672 /* We need to be able to fix up arbitrary expressions in some statements.
3673 This is so that we can handle symbols that are an arbitrary distance from
3674 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3675 which returns part of an address in a form which will be valid for
3676 a data instruction. We do this by pushing the expression into a symbol
3677 in the expr_section, and creating a fix for that. */
3678
3679 static fixS *
3680 fix_new_aarch64 (fragS * frag,
3681 int where,
3682 short int size, expressionS * exp, int pc_rel, int reloc)
3683 {
3684 fixS *new_fix;
3685
3686 switch (exp->X_op)
3687 {
3688 case O_constant:
3689 case O_symbol:
3690 case O_add:
3691 case O_subtract:
3692 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3693 break;
3694
3695 default:
3696 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3697 pc_rel, reloc);
3698 break;
3699 }
3700 return new_fix;
3701 }
3702 \f
3703 /* Diagnostics on operands errors. */
3704
3705 /* By default, output verbose error message.
3706 Disable the verbose error message by -mno-verbose-error. */
3707 static int verbose_error_p = 1;
3708
3709 #ifdef DEBUG_AARCH64
3710 /* N.B. this is only for the purpose of debugging. */
3711 const char* operand_mismatch_kind_names[] =
3712 {
3713 "AARCH64_OPDE_NIL",
3714 "AARCH64_OPDE_RECOVERABLE",
3715 "AARCH64_OPDE_SYNTAX_ERROR",
3716 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3717 "AARCH64_OPDE_INVALID_VARIANT",
3718 "AARCH64_OPDE_OUT_OF_RANGE",
3719 "AARCH64_OPDE_UNALIGNED",
3720 "AARCH64_OPDE_REG_LIST",
3721 "AARCH64_OPDE_OTHER_ERROR",
3722 };
3723 #endif /* DEBUG_AARCH64 */
3724
3725 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3726
3727 When multiple errors of different kinds are found in the same assembly
3728 line, only the error of the highest severity will be picked up for
3729 issuing the diagnostics. */
3730
3731 static inline bfd_boolean
3732 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3733 enum aarch64_operand_error_kind rhs)
3734 {
3735 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3736 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3737 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3738 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3739 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3740 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3741 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3742 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3743 return lhs > rhs;
3744 }
3745
3746 /* Helper routine to get the mnemonic name from the assembly instruction
3747 line; should only be called for the diagnosis purpose, as there is
3748 string copy operation involved, which may affect the runtime
3749 performance if used in elsewhere. */
3750
3751 static const char*
3752 get_mnemonic_name (const char *str)
3753 {
3754 static char mnemonic[32];
3755 char *ptr;
3756
3757 /* Get the first 15 bytes and assume that the full name is included. */
3758 strncpy (mnemonic, str, 31);
3759 mnemonic[31] = '\0';
3760
3761 /* Scan up to the end of the mnemonic, which must end in white space,
3762 '.', or end of string. */
3763 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3764 ;
3765
3766 *ptr = '\0';
3767
3768 /* Append '...' to the truncated long name. */
3769 if (ptr - mnemonic == 31)
3770 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3771
3772 return mnemonic;
3773 }
3774
3775 static void
3776 reset_aarch64_instruction (aarch64_instruction *instruction)
3777 {
3778 memset (instruction, '\0', sizeof (aarch64_instruction));
3779 instruction->reloc.type = BFD_RELOC_UNUSED;
3780 }
3781
3782 /* Data strutures storing one user error in the assembly code related to
3783 operands. */
3784
3785 struct operand_error_record
3786 {
3787 const aarch64_opcode *opcode;
3788 aarch64_operand_error detail;
3789 struct operand_error_record *next;
3790 };
3791
3792 typedef struct operand_error_record operand_error_record;
3793
3794 struct operand_errors
3795 {
3796 operand_error_record *head;
3797 operand_error_record *tail;
3798 };
3799
3800 typedef struct operand_errors operand_errors;
3801
3802 /* Top-level data structure reporting user errors for the current line of
3803 the assembly code.
3804 The way md_assemble works is that all opcodes sharing the same mnemonic
3805 name are iterated to find a match to the assembly line. In this data
3806 structure, each of the such opcodes will have one operand_error_record
3807 allocated and inserted. In other words, excessive errors related with
3808 a single opcode are disregarded. */
3809 operand_errors operand_error_report;
3810
3811 /* Free record nodes. */
3812 static operand_error_record *free_opnd_error_record_nodes = NULL;
3813
3814 /* Initialize the data structure that stores the operand mismatch
3815 information on assembling one line of the assembly code. */
3816 static void
3817 init_operand_error_report (void)
3818 {
3819 if (operand_error_report.head != NULL)
3820 {
3821 gas_assert (operand_error_report.tail != NULL);
3822 operand_error_report.tail->next = free_opnd_error_record_nodes;
3823 free_opnd_error_record_nodes = operand_error_report.head;
3824 operand_error_report.head = NULL;
3825 operand_error_report.tail = NULL;
3826 return;
3827 }
3828 gas_assert (operand_error_report.tail == NULL);
3829 }
3830
3831 /* Return TRUE if some operand error has been recorded during the
3832 parsing of the current assembly line using the opcode *OPCODE;
3833 otherwise return FALSE. */
3834 static inline bfd_boolean
3835 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3836 {
3837 operand_error_record *record = operand_error_report.head;
3838 return record && record->opcode == opcode;
3839 }
3840
3841 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3842 OPCODE field is initialized with OPCODE.
3843 N.B. only one record for each opcode, i.e. the maximum of one error is
3844 recorded for each instruction template. */
3845
3846 static void
3847 add_operand_error_record (const operand_error_record* new_record)
3848 {
3849 const aarch64_opcode *opcode = new_record->opcode;
3850 operand_error_record* record = operand_error_report.head;
3851
3852 /* The record may have been created for this opcode. If not, we need
3853 to prepare one. */
3854 if (! opcode_has_operand_error_p (opcode))
3855 {
3856 /* Get one empty record. */
3857 if (free_opnd_error_record_nodes == NULL)
3858 {
3859 record = xmalloc (sizeof (operand_error_record));
3860 if (record == NULL)
3861 abort ();
3862 }
3863 else
3864 {
3865 record = free_opnd_error_record_nodes;
3866 free_opnd_error_record_nodes = record->next;
3867 }
3868 record->opcode = opcode;
3869 /* Insert at the head. */
3870 record->next = operand_error_report.head;
3871 operand_error_report.head = record;
3872 if (operand_error_report.tail == NULL)
3873 operand_error_report.tail = record;
3874 }
3875 else if (record->detail.kind != AARCH64_OPDE_NIL
3876 && record->detail.index <= new_record->detail.index
3877 && operand_error_higher_severity_p (record->detail.kind,
3878 new_record->detail.kind))
3879 {
3880 /* In the case of multiple errors found on operands related with a
3881 single opcode, only record the error of the leftmost operand and
3882 only if the error is of higher severity. */
3883 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3884 " the existing error %s on operand %d",
3885 operand_mismatch_kind_names[new_record->detail.kind],
3886 new_record->detail.index,
3887 operand_mismatch_kind_names[record->detail.kind],
3888 record->detail.index);
3889 return;
3890 }
3891
3892 record->detail = new_record->detail;
3893 }
3894
3895 static inline void
3896 record_operand_error_info (const aarch64_opcode *opcode,
3897 aarch64_operand_error *error_info)
3898 {
3899 operand_error_record record;
3900 record.opcode = opcode;
3901 record.detail = *error_info;
3902 add_operand_error_record (&record);
3903 }
3904
3905 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3906 error message *ERROR, for operand IDX (count from 0). */
3907
3908 static void
3909 record_operand_error (const aarch64_opcode *opcode, int idx,
3910 enum aarch64_operand_error_kind kind,
3911 const char* error)
3912 {
3913 aarch64_operand_error info;
3914 memset(&info, 0, sizeof (info));
3915 info.index = idx;
3916 info.kind = kind;
3917 info.error = error;
3918 record_operand_error_info (opcode, &info);
3919 }
3920
3921 static void
3922 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3923 enum aarch64_operand_error_kind kind,
3924 const char* error, const int *extra_data)
3925 {
3926 aarch64_operand_error info;
3927 info.index = idx;
3928 info.kind = kind;
3929 info.error = error;
3930 info.data[0] = extra_data[0];
3931 info.data[1] = extra_data[1];
3932 info.data[2] = extra_data[2];
3933 record_operand_error_info (opcode, &info);
3934 }
3935
3936 static void
3937 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3938 const char* error, int lower_bound,
3939 int upper_bound)
3940 {
3941 int data[3] = {lower_bound, upper_bound, 0};
3942 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3943 error, data);
3944 }
3945
3946 /* Remove the operand error record for *OPCODE. */
3947 static void ATTRIBUTE_UNUSED
3948 remove_operand_error_record (const aarch64_opcode *opcode)
3949 {
3950 if (opcode_has_operand_error_p (opcode))
3951 {
3952 operand_error_record* record = operand_error_report.head;
3953 gas_assert (record != NULL && operand_error_report.tail != NULL);
3954 operand_error_report.head = record->next;
3955 record->next = free_opnd_error_record_nodes;
3956 free_opnd_error_record_nodes = record;
3957 if (operand_error_report.head == NULL)
3958 {
3959 gas_assert (operand_error_report.tail == record);
3960 operand_error_report.tail = NULL;
3961 }
3962 }
3963 }
3964
3965 /* Given the instruction in *INSTR, return the index of the best matched
3966 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3967
3968 Return -1 if there is no qualifier sequence; return the first match
3969 if there is multiple matches found. */
3970
3971 static int
3972 find_best_match (const aarch64_inst *instr,
3973 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3974 {
3975 int i, num_opnds, max_num_matched, idx;
3976
3977 num_opnds = aarch64_num_of_operands (instr->opcode);
3978 if (num_opnds == 0)
3979 {
3980 DEBUG_TRACE ("no operand");
3981 return -1;
3982 }
3983
3984 max_num_matched = 0;
3985 idx = -1;
3986
3987 /* For each pattern. */
3988 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3989 {
3990 int j, num_matched;
3991 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3992
3993 /* Most opcodes has much fewer patterns in the list. */
3994 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3995 {
3996 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3997 if (i != 0 && idx == -1)
3998 /* If nothing has been matched, return the 1st sequence. */
3999 idx = 0;
4000 break;
4001 }
4002
4003 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4004 if (*qualifiers == instr->operands[j].qualifier)
4005 ++num_matched;
4006
4007 if (num_matched > max_num_matched)
4008 {
4009 max_num_matched = num_matched;
4010 idx = i;
4011 }
4012 }
4013
4014 DEBUG_TRACE ("return with %d", idx);
4015 return idx;
4016 }
4017
4018 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4019 corresponding operands in *INSTR. */
4020
4021 static inline void
4022 assign_qualifier_sequence (aarch64_inst *instr,
4023 const aarch64_opnd_qualifier_t *qualifiers)
4024 {
4025 int i = 0;
4026 int num_opnds = aarch64_num_of_operands (instr->opcode);
4027 gas_assert (num_opnds);
4028 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4029 instr->operands[i].qualifier = *qualifiers;
4030 }
4031
4032 /* Print operands for the diagnosis purpose. */
4033
4034 static void
4035 print_operands (char *buf, const aarch64_opcode *opcode,
4036 const aarch64_opnd_info *opnds)
4037 {
4038 int i;
4039
4040 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4041 {
4042 const size_t size = 128;
4043 char str[size];
4044
4045 /* We regard the opcode operand info more, however we also look into
4046 the inst->operands to support the disassembling of the optional
4047 operand.
4048 The two operand code should be the same in all cases, apart from
4049 when the operand can be optional. */
4050 if (opcode->operands[i] == AARCH64_OPND_NIL
4051 || opnds[i].type == AARCH64_OPND_NIL)
4052 break;
4053
4054 /* Generate the operand string in STR. */
4055 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
4056
4057 /* Delimiter. */
4058 if (str[0] != '\0')
4059 strcat (buf, i == 0 ? " " : ",");
4060
4061 /* Append the operand string. */
4062 strcat (buf, str);
4063 }
4064 }
4065
4066 /* Send to stderr a string as information. */
4067
4068 static void
4069 output_info (const char *format, ...)
4070 {
4071 char *file;
4072 unsigned int line;
4073 va_list args;
4074
4075 as_where (&file, &line);
4076 if (file)
4077 {
4078 if (line != 0)
4079 fprintf (stderr, "%s:%u: ", file, line);
4080 else
4081 fprintf (stderr, "%s: ", file);
4082 }
4083 fprintf (stderr, _("Info: "));
4084 va_start (args, format);
4085 vfprintf (stderr, format, args);
4086 va_end (args);
4087 (void) putc ('\n', stderr);
4088 }
4089
4090 /* Output one operand error record. */
4091
4092 static void
4093 output_operand_error_record (const operand_error_record *record, char *str)
4094 {
4095 const aarch64_operand_error *detail = &record->detail;
4096 int idx = detail->index;
4097 const aarch64_opcode *opcode = record->opcode;
4098 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4099 : AARCH64_OPND_NIL);
4100
4101 switch (detail->kind)
4102 {
4103 case AARCH64_OPDE_NIL:
4104 gas_assert (0);
4105 break;
4106
4107 case AARCH64_OPDE_SYNTAX_ERROR:
4108 case AARCH64_OPDE_RECOVERABLE:
4109 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4110 case AARCH64_OPDE_OTHER_ERROR:
4111 /* Use the prepared error message if there is, otherwise use the
4112 operand description string to describe the error. */
4113 if (detail->error != NULL)
4114 {
4115 if (idx < 0)
4116 as_bad (_("%s -- `%s'"), detail->error, str);
4117 else
4118 as_bad (_("%s at operand %d -- `%s'"),
4119 detail->error, idx + 1, str);
4120 }
4121 else
4122 {
4123 gas_assert (idx >= 0);
4124 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4125 aarch64_get_operand_desc (opd_code), str);
4126 }
4127 break;
4128
4129 case AARCH64_OPDE_INVALID_VARIANT:
4130 as_bad (_("operand mismatch -- `%s'"), str);
4131 if (verbose_error_p)
4132 {
4133 /* We will try to correct the erroneous instruction and also provide
4134 more information e.g. all other valid variants.
4135
4136 The string representation of the corrected instruction and other
4137 valid variants are generated by
4138
4139 1) obtaining the intermediate representation of the erroneous
4140 instruction;
4141 2) manipulating the IR, e.g. replacing the operand qualifier;
4142 3) printing out the instruction by calling the printer functions
4143 shared with the disassembler.
4144
4145 The limitation of this method is that the exact input assembly
4146 line cannot be accurately reproduced in some cases, for example an
4147 optional operand present in the actual assembly line will be
4148 omitted in the output; likewise for the optional syntax rules,
4149 e.g. the # before the immediate. Another limitation is that the
4150 assembly symbols and relocation operations in the assembly line
4151 currently cannot be printed out in the error report. Last but not
4152 least, when there is other error(s) co-exist with this error, the
4153 'corrected' instruction may be still incorrect, e.g. given
4154 'ldnp h0,h1,[x0,#6]!'
4155 this diagnosis will provide the version:
4156 'ldnp s0,s1,[x0,#6]!'
4157 which is still not right. */
4158 size_t len = strlen (get_mnemonic_name (str));
4159 int i, qlf_idx;
4160 bfd_boolean result;
4161 const size_t size = 2048;
4162 char buf[size];
4163 aarch64_inst *inst_base = &inst.base;
4164 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4165
4166 /* Init inst. */
4167 reset_aarch64_instruction (&inst);
4168 inst_base->opcode = opcode;
4169
4170 /* Reset the error report so that there is no side effect on the
4171 following operand parsing. */
4172 init_operand_error_report ();
4173
4174 /* Fill inst. */
4175 result = parse_operands (str + len, opcode)
4176 && programmer_friendly_fixup (&inst);
4177 gas_assert (result);
4178 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4179 NULL, NULL);
4180 gas_assert (!result);
4181
4182 /* Find the most matched qualifier sequence. */
4183 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4184 gas_assert (qlf_idx > -1);
4185
4186 /* Assign the qualifiers. */
4187 assign_qualifier_sequence (inst_base,
4188 opcode->qualifiers_list[qlf_idx]);
4189
4190 /* Print the hint. */
4191 output_info (_(" did you mean this?"));
4192 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4193 print_operands (buf, opcode, inst_base->operands);
4194 output_info (_(" %s"), buf);
4195
4196 /* Print out other variant(s) if there is any. */
4197 if (qlf_idx != 0 ||
4198 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4199 output_info (_(" other valid variant(s):"));
4200
4201 /* For each pattern. */
4202 qualifiers_list = opcode->qualifiers_list;
4203 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4204 {
4205 /* Most opcodes has much fewer patterns in the list.
4206 First NIL qualifier indicates the end in the list. */
4207 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4208 break;
4209
4210 if (i != qlf_idx)
4211 {
4212 /* Mnemonics name. */
4213 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4214
4215 /* Assign the qualifiers. */
4216 assign_qualifier_sequence (inst_base, *qualifiers_list);
4217
4218 /* Print instruction. */
4219 print_operands (buf, opcode, inst_base->operands);
4220
4221 output_info (_(" %s"), buf);
4222 }
4223 }
4224 }
4225 break;
4226
4227 case AARCH64_OPDE_OUT_OF_RANGE:
4228 if (detail->data[0] != detail->data[1])
4229 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4230 detail->error ? detail->error : _("immediate value"),
4231 detail->data[0], detail->data[1], idx + 1, str);
4232 else
4233 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4234 detail->error ? detail->error : _("immediate value"),
4235 detail->data[0], idx + 1, str);
4236 break;
4237
4238 case AARCH64_OPDE_REG_LIST:
4239 if (detail->data[0] == 1)
4240 as_bad (_("invalid number of registers in the list; "
4241 "only 1 register is expected at operand %d -- `%s'"),
4242 idx + 1, str);
4243 else
4244 as_bad (_("invalid number of registers in the list; "
4245 "%d registers are expected at operand %d -- `%s'"),
4246 detail->data[0], idx + 1, str);
4247 break;
4248
4249 case AARCH64_OPDE_UNALIGNED:
4250 as_bad (_("immediate value should be a multiple of "
4251 "%d at operand %d -- `%s'"),
4252 detail->data[0], idx + 1, str);
4253 break;
4254
4255 default:
4256 gas_assert (0);
4257 break;
4258 }
4259 }
4260
4261 /* Process and output the error message about the operand mismatching.
4262
4263 When this function is called, the operand error information had
4264 been collected for an assembly line and there will be multiple
4265 errors in the case of mulitple instruction templates; output the
4266 error message that most closely describes the problem. */
4267
4268 static void
4269 output_operand_error_report (char *str)
4270 {
4271 int largest_error_pos;
4272 const char *msg = NULL;
4273 enum aarch64_operand_error_kind kind;
4274 operand_error_record *curr;
4275 operand_error_record *head = operand_error_report.head;
4276 operand_error_record *record = NULL;
4277
4278 /* No error to report. */
4279 if (head == NULL)
4280 return;
4281
4282 gas_assert (head != NULL && operand_error_report.tail != NULL);
4283
4284 /* Only one error. */
4285 if (head == operand_error_report.tail)
4286 {
4287 DEBUG_TRACE ("single opcode entry with error kind: %s",
4288 operand_mismatch_kind_names[head->detail.kind]);
4289 output_operand_error_record (head, str);
4290 return;
4291 }
4292
4293 /* Find the error kind of the highest severity. */
4294 DEBUG_TRACE ("multiple opcode entres with error kind");
4295 kind = AARCH64_OPDE_NIL;
4296 for (curr = head; curr != NULL; curr = curr->next)
4297 {
4298 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4299 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4300 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4301 kind = curr->detail.kind;
4302 }
4303 gas_assert (kind != AARCH64_OPDE_NIL);
4304
4305 /* Pick up one of errors of KIND to report. */
4306 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4307 for (curr = head; curr != NULL; curr = curr->next)
4308 {
4309 if (curr->detail.kind != kind)
4310 continue;
4311 /* If there are multiple errors, pick up the one with the highest
4312 mismatching operand index. In the case of multiple errors with
4313 the equally highest operand index, pick up the first one or the
4314 first one with non-NULL error message. */
4315 if (curr->detail.index > largest_error_pos
4316 || (curr->detail.index == largest_error_pos && msg == NULL
4317 && curr->detail.error != NULL))
4318 {
4319 largest_error_pos = curr->detail.index;
4320 record = curr;
4321 msg = record->detail.error;
4322 }
4323 }
4324
4325 gas_assert (largest_error_pos != -2 && record != NULL);
4326 DEBUG_TRACE ("Pick up error kind %s to report",
4327 operand_mismatch_kind_names[record->detail.kind]);
4328
4329 /* Output. */
4330 output_operand_error_record (record, str);
4331 }
4332 \f
4333 /* Write an AARCH64 instruction to buf - always little-endian. */
4334 static void
4335 put_aarch64_insn (char *buf, uint32_t insn)
4336 {
4337 unsigned char *where = (unsigned char *) buf;
4338 where[0] = insn;
4339 where[1] = insn >> 8;
4340 where[2] = insn >> 16;
4341 where[3] = insn >> 24;
4342 }
4343
4344 static uint32_t
4345 get_aarch64_insn (char *buf)
4346 {
4347 unsigned char *where = (unsigned char *) buf;
4348 uint32_t result;
4349 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4350 return result;
4351 }
4352
4353 static void
4354 output_inst (struct aarch64_inst *new_inst)
4355 {
4356 char *to = NULL;
4357
4358 to = frag_more (INSN_SIZE);
4359
4360 frag_now->tc_frag_data.recorded = 1;
4361
4362 put_aarch64_insn (to, inst.base.value);
4363
4364 if (inst.reloc.type != BFD_RELOC_UNUSED)
4365 {
4366 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4367 INSN_SIZE, &inst.reloc.exp,
4368 inst.reloc.pc_rel,
4369 inst.reloc.type);
4370 DEBUG_TRACE ("Prepared relocation fix up");
4371 /* Don't check the addend value against the instruction size,
4372 that's the job of our code in md_apply_fix(). */
4373 fixp->fx_no_overflow = 1;
4374 if (new_inst != NULL)
4375 fixp->tc_fix_data.inst = new_inst;
4376 if (aarch64_gas_internal_fixup_p ())
4377 {
4378 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4379 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4380 fixp->fx_addnumber = inst.reloc.flags;
4381 }
4382 }
4383
4384 dwarf2_emit_insn (INSN_SIZE);
4385 }
4386
4387 /* Link together opcodes of the same name. */
4388
4389 struct templates
4390 {
4391 aarch64_opcode *opcode;
4392 struct templates *next;
4393 };
4394
4395 typedef struct templates templates;
4396
4397 static templates *
4398 lookup_mnemonic (const char *start, int len)
4399 {
4400 templates *templ = NULL;
4401
4402 templ = hash_find_n (aarch64_ops_hsh, start, len);
4403 return templ;
4404 }
4405
4406 /* Subroutine of md_assemble, responsible for looking up the primary
4407 opcode from the mnemonic the user wrote. STR points to the
4408 beginning of the mnemonic. */
4409
4410 static templates *
4411 opcode_lookup (char **str)
4412 {
4413 char *end, *base;
4414 const aarch64_cond *cond;
4415 char condname[16];
4416 int len;
4417
4418 /* Scan up to the end of the mnemonic, which must end in white space,
4419 '.', or end of string. */
4420 for (base = end = *str; is_part_of_name(*end); end++)
4421 if (*end == '.')
4422 break;
4423
4424 if (end == base)
4425 return 0;
4426
4427 inst.cond = COND_ALWAYS;
4428
4429 /* Handle a possible condition. */
4430 if (end[0] == '.')
4431 {
4432 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4433 if (cond)
4434 {
4435 inst.cond = cond->value;
4436 *str = end + 3;
4437 }
4438 else
4439 {
4440 *str = end;
4441 return 0;
4442 }
4443 }
4444 else
4445 *str = end;
4446
4447 len = end - base;
4448
4449 if (inst.cond == COND_ALWAYS)
4450 {
4451 /* Look for unaffixed mnemonic. */
4452 return lookup_mnemonic (base, len);
4453 }
4454 else if (len <= 13)
4455 {
4456 /* append ".c" to mnemonic if conditional */
4457 memcpy (condname, base, len);
4458 memcpy (condname + len, ".c", 2);
4459 base = condname;
4460 len += 2;
4461 return lookup_mnemonic (base, len);
4462 }
4463
4464 return NULL;
4465 }
4466
4467 /* Internal helper routine converting a vector neon_type_el structure
4468 *VECTYPE to a corresponding operand qualifier. */
4469
4470 static inline aarch64_opnd_qualifier_t
4471 vectype_to_qualifier (const struct neon_type_el *vectype)
4472 {
4473 /* Element size in bytes indexed by neon_el_type. */
4474 const unsigned char ele_size[5]
4475 = {1, 2, 4, 8, 16};
4476
4477 if (!vectype->defined || vectype->type == NT_invtype)
4478 goto vectype_conversion_fail;
4479
4480 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4481
4482 if (vectype->defined & NTA_HASINDEX)
4483 /* Vector element register. */
4484 return AARCH64_OPND_QLF_S_B + vectype->type;
4485 else
4486 {
4487 /* Vector register. */
4488 int reg_size = ele_size[vectype->type] * vectype->width;
4489 unsigned offset;
4490 if (reg_size != 16 && reg_size != 8)
4491 goto vectype_conversion_fail;
4492 /* The conversion is calculated based on the relation of the order of
4493 qualifiers to the vector element size and vector register size. */
4494 offset = (vectype->type == NT_q)
4495 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4496 gas_assert (offset <= 8);
4497 return AARCH64_OPND_QLF_V_8B + offset;
4498 }
4499
4500 vectype_conversion_fail:
4501 first_error (_("bad vector arrangement type"));
4502 return AARCH64_OPND_QLF_NIL;
4503 }
4504
4505 /* Process an optional operand that is found omitted from the assembly line.
4506 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4507 instruction's opcode entry while IDX is the index of this omitted operand.
4508 */
4509
4510 static void
4511 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4512 int idx, aarch64_opnd_info *operand)
4513 {
4514 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4515 gas_assert (optional_operand_p (opcode, idx));
4516 gas_assert (!operand->present);
4517
4518 switch (type)
4519 {
4520 case AARCH64_OPND_Rd:
4521 case AARCH64_OPND_Rn:
4522 case AARCH64_OPND_Rm:
4523 case AARCH64_OPND_Rt:
4524 case AARCH64_OPND_Rt2:
4525 case AARCH64_OPND_Rs:
4526 case AARCH64_OPND_Ra:
4527 case AARCH64_OPND_Rt_SYS:
4528 case AARCH64_OPND_Rd_SP:
4529 case AARCH64_OPND_Rn_SP:
4530 case AARCH64_OPND_Fd:
4531 case AARCH64_OPND_Fn:
4532 case AARCH64_OPND_Fm:
4533 case AARCH64_OPND_Fa:
4534 case AARCH64_OPND_Ft:
4535 case AARCH64_OPND_Ft2:
4536 case AARCH64_OPND_Sd:
4537 case AARCH64_OPND_Sn:
4538 case AARCH64_OPND_Sm:
4539 case AARCH64_OPND_Vd:
4540 case AARCH64_OPND_Vn:
4541 case AARCH64_OPND_Vm:
4542 case AARCH64_OPND_VdD1:
4543 case AARCH64_OPND_VnD1:
4544 operand->reg.regno = default_value;
4545 break;
4546
4547 case AARCH64_OPND_Ed:
4548 case AARCH64_OPND_En:
4549 case AARCH64_OPND_Em:
4550 operand->reglane.regno = default_value;
4551 break;
4552
4553 case AARCH64_OPND_IDX:
4554 case AARCH64_OPND_BIT_NUM:
4555 case AARCH64_OPND_IMMR:
4556 case AARCH64_OPND_IMMS:
4557 case AARCH64_OPND_SHLL_IMM:
4558 case AARCH64_OPND_IMM_VLSL:
4559 case AARCH64_OPND_IMM_VLSR:
4560 case AARCH64_OPND_CCMP_IMM:
4561 case AARCH64_OPND_FBITS:
4562 case AARCH64_OPND_UIMM4:
4563 case AARCH64_OPND_UIMM3_OP1:
4564 case AARCH64_OPND_UIMM3_OP2:
4565 case AARCH64_OPND_IMM:
4566 case AARCH64_OPND_WIDTH:
4567 case AARCH64_OPND_UIMM7:
4568 case AARCH64_OPND_NZCV:
4569 operand->imm.value = default_value;
4570 break;
4571
4572 case AARCH64_OPND_EXCEPTION:
4573 inst.reloc.type = BFD_RELOC_UNUSED;
4574 break;
4575
4576 case AARCH64_OPND_BARRIER_ISB:
4577 operand->barrier = aarch64_barrier_options + default_value;
4578
4579 default:
4580 break;
4581 }
4582 }
4583
4584 /* Process the relocation type for move wide instructions.
4585 Return TRUE on success; otherwise return FALSE. */
4586
4587 static bfd_boolean
4588 process_movw_reloc_info (void)
4589 {
4590 int is32;
4591 unsigned shift;
4592
4593 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4594
4595 if (inst.base.opcode->op == OP_MOVK)
4596 switch (inst.reloc.type)
4597 {
4598 case BFD_RELOC_AARCH64_MOVW_G0_S:
4599 case BFD_RELOC_AARCH64_MOVW_G1_S:
4600 case BFD_RELOC_AARCH64_MOVW_G2_S:
4601 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4602 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4603 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4604 set_syntax_error
4605 (_("the specified relocation type is not allowed for MOVK"));
4606 return FALSE;
4607 default:
4608 break;
4609 }
4610
4611 switch (inst.reloc.type)
4612 {
4613 case BFD_RELOC_AARCH64_MOVW_G0:
4614 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4615 case BFD_RELOC_AARCH64_MOVW_G0_S:
4616 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4617 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4618 shift = 0;
4619 break;
4620 case BFD_RELOC_AARCH64_MOVW_G1:
4621 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4622 case BFD_RELOC_AARCH64_MOVW_G1_S:
4623 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4624 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4625 shift = 16;
4626 break;
4627 case BFD_RELOC_AARCH64_MOVW_G2:
4628 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4629 case BFD_RELOC_AARCH64_MOVW_G2_S:
4630 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4631 if (is32)
4632 {
4633 set_fatal_syntax_error
4634 (_("the specified relocation type is not allowed for 32-bit "
4635 "register"));
4636 return FALSE;
4637 }
4638 shift = 32;
4639 break;
4640 case BFD_RELOC_AARCH64_MOVW_G3:
4641 if (is32)
4642 {
4643 set_fatal_syntax_error
4644 (_("the specified relocation type is not allowed for 32-bit "
4645 "register"));
4646 return FALSE;
4647 }
4648 shift = 48;
4649 break;
4650 default:
4651 /* More cases should be added when more MOVW-related relocation types
4652 are supported in GAS. */
4653 gas_assert (aarch64_gas_internal_fixup_p ());
4654 /* The shift amount should have already been set by the parser. */
4655 return TRUE;
4656 }
4657 inst.base.operands[1].shifter.amount = shift;
4658 return TRUE;
4659 }
4660
4661 /* A primitive log caculator. */
4662
4663 static inline unsigned int
4664 get_logsz (unsigned int size)
4665 {
4666 const unsigned char ls[16] =
4667 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4668 if (size > 16)
4669 {
4670 gas_assert (0);
4671 return -1;
4672 }
4673 gas_assert (ls[size - 1] != (unsigned char)-1);
4674 return ls[size - 1];
4675 }
4676
4677 /* Determine and return the real reloc type code for an instruction
4678 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4679
4680 static inline bfd_reloc_code_real_type
4681 ldst_lo12_determine_real_reloc_type (void)
4682 {
4683 int logsz;
4684 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4685 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4686
4687 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4688 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4689 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4690 BFD_RELOC_AARCH64_LDST128_LO12
4691 };
4692
4693 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4694 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4695
4696 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4697 opd1_qlf =
4698 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4699 1, opd0_qlf, 0);
4700 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4701
4702 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4703 gas_assert (logsz >= 0 && logsz <= 4);
4704
4705 return reloc_ldst_lo12[logsz];
4706 }
4707
4708 /* Check whether a register list REGINFO is valid. The registers must be
4709 numbered in increasing order (modulo 32), in increments of one or two.
4710
4711 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4712 increments of two.
4713
4714 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4715
4716 static bfd_boolean
4717 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4718 {
4719 uint32_t i, nb_regs, prev_regno, incr;
4720
4721 nb_regs = 1 + (reginfo & 0x3);
4722 reginfo >>= 2;
4723 prev_regno = reginfo & 0x1f;
4724 incr = accept_alternate ? 2 : 1;
4725
4726 for (i = 1; i < nb_regs; ++i)
4727 {
4728 uint32_t curr_regno;
4729 reginfo >>= 5;
4730 curr_regno = reginfo & 0x1f;
4731 if (curr_regno != ((prev_regno + incr) & 0x1f))
4732 return FALSE;
4733 prev_regno = curr_regno;
4734 }
4735
4736 return TRUE;
4737 }
4738
4739 /* Generic instruction operand parser. This does no encoding and no
4740 semantic validation; it merely squirrels values away in the inst
4741 structure. Returns TRUE or FALSE depending on whether the
4742 specified grammar matched. */
4743
4744 static bfd_boolean
4745 parse_operands (char *str, const aarch64_opcode *opcode)
4746 {
4747 int i;
4748 char *backtrack_pos = 0;
4749 const enum aarch64_opnd *operands = opcode->operands;
4750
4751 clear_error ();
4752 skip_whitespace (str);
4753
4754 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4755 {
4756 int64_t val;
4757 int isreg32, isregzero;
4758 int comma_skipped_p = 0;
4759 aarch64_reg_type rtype;
4760 struct neon_type_el vectype;
4761 aarch64_opnd_info *info = &inst.base.operands[i];
4762
4763 DEBUG_TRACE ("parse operand %d", i);
4764
4765 /* Assign the operand code. */
4766 info->type = operands[i];
4767
4768 if (optional_operand_p (opcode, i))
4769 {
4770 /* Remember where we are in case we need to backtrack. */
4771 gas_assert (!backtrack_pos);
4772 backtrack_pos = str;
4773 }
4774
4775 /* Expect comma between operands; the backtrack mechanizm will take
4776 care of cases of omitted optional operand. */
4777 if (i > 0 && ! skip_past_char (&str, ','))
4778 {
4779 set_syntax_error (_("comma expected between operands"));
4780 goto failure;
4781 }
4782 else
4783 comma_skipped_p = 1;
4784
4785 switch (operands[i])
4786 {
4787 case AARCH64_OPND_Rd:
4788 case AARCH64_OPND_Rn:
4789 case AARCH64_OPND_Rm:
4790 case AARCH64_OPND_Rt:
4791 case AARCH64_OPND_Rt2:
4792 case AARCH64_OPND_Rs:
4793 case AARCH64_OPND_Ra:
4794 case AARCH64_OPND_Rt_SYS:
4795 case AARCH64_OPND_PAIRREG:
4796 po_int_reg_or_fail (1, 0);
4797 break;
4798
4799 case AARCH64_OPND_Rd_SP:
4800 case AARCH64_OPND_Rn_SP:
4801 po_int_reg_or_fail (0, 1);
4802 break;
4803
4804 case AARCH64_OPND_Rm_EXT:
4805 case AARCH64_OPND_Rm_SFT:
4806 po_misc_or_fail (parse_shifter_operand
4807 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4808 ? SHIFTED_ARITH_IMM
4809 : SHIFTED_LOGIC_IMM)));
4810 if (!info->shifter.operator_present)
4811 {
4812 /* Default to LSL if not present. Libopcodes prefers shifter
4813 kind to be explicit. */
4814 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4815 info->shifter.kind = AARCH64_MOD_LSL;
4816 /* For Rm_EXT, libopcodes will carry out further check on whether
4817 or not stack pointer is used in the instruction (Recall that
4818 "the extend operator is not optional unless at least one of
4819 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4820 }
4821 break;
4822
4823 case AARCH64_OPND_Fd:
4824 case AARCH64_OPND_Fn:
4825 case AARCH64_OPND_Fm:
4826 case AARCH64_OPND_Fa:
4827 case AARCH64_OPND_Ft:
4828 case AARCH64_OPND_Ft2:
4829 case AARCH64_OPND_Sd:
4830 case AARCH64_OPND_Sn:
4831 case AARCH64_OPND_Sm:
4832 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4833 if (val == PARSE_FAIL)
4834 {
4835 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4836 goto failure;
4837 }
4838 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4839
4840 info->reg.regno = val;
4841 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4842 break;
4843
4844 case AARCH64_OPND_Vd:
4845 case AARCH64_OPND_Vn:
4846 case AARCH64_OPND_Vm:
4847 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4848 if (val == PARSE_FAIL)
4849 {
4850 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4851 goto failure;
4852 }
4853 if (vectype.defined & NTA_HASINDEX)
4854 goto failure;
4855
4856 info->reg.regno = val;
4857 info->qualifier = vectype_to_qualifier (&vectype);
4858 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4859 goto failure;
4860 break;
4861
4862 case AARCH64_OPND_VdD1:
4863 case AARCH64_OPND_VnD1:
4864 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4865 if (val == PARSE_FAIL)
4866 {
4867 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4868 goto failure;
4869 }
4870 if (vectype.type != NT_d || vectype.index != 1)
4871 {
4872 set_fatal_syntax_error
4873 (_("the top half of a 128-bit FP/SIMD register is expected"));
4874 goto failure;
4875 }
4876 info->reg.regno = val;
4877 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4878 here; it is correct for the purpose of encoding/decoding since
4879 only the register number is explicitly encoded in the related
4880 instructions, although this appears a bit hacky. */
4881 info->qualifier = AARCH64_OPND_QLF_S_D;
4882 break;
4883
4884 case AARCH64_OPND_Ed:
4885 case AARCH64_OPND_En:
4886 case AARCH64_OPND_Em:
4887 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4888 if (val == PARSE_FAIL)
4889 {
4890 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4891 goto failure;
4892 }
4893 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4894 goto failure;
4895
4896 info->reglane.regno = val;
4897 info->reglane.index = vectype.index;
4898 info->qualifier = vectype_to_qualifier (&vectype);
4899 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4900 goto failure;
4901 break;
4902
4903 case AARCH64_OPND_LVn:
4904 case AARCH64_OPND_LVt:
4905 case AARCH64_OPND_LVt_AL:
4906 case AARCH64_OPND_LEt:
4907 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4908 goto failure;
4909 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4910 {
4911 set_fatal_syntax_error (_("invalid register list"));
4912 goto failure;
4913 }
4914 info->reglist.first_regno = (val >> 2) & 0x1f;
4915 info->reglist.num_regs = (val & 0x3) + 1;
4916 if (operands[i] == AARCH64_OPND_LEt)
4917 {
4918 if (!(vectype.defined & NTA_HASINDEX))
4919 goto failure;
4920 info->reglist.has_index = 1;
4921 info->reglist.index = vectype.index;
4922 }
4923 else if (!(vectype.defined & NTA_HASTYPE))
4924 goto failure;
4925 info->qualifier = vectype_to_qualifier (&vectype);
4926 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4927 goto failure;
4928 break;
4929
4930 case AARCH64_OPND_Cn:
4931 case AARCH64_OPND_Cm:
4932 po_reg_or_fail (REG_TYPE_CN);
4933 if (val > 15)
4934 {
4935 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4936 goto failure;
4937 }
4938 inst.base.operands[i].reg.regno = val;
4939 break;
4940
4941 case AARCH64_OPND_SHLL_IMM:
4942 case AARCH64_OPND_IMM_VLSR:
4943 po_imm_or_fail (1, 64);
4944 info->imm.value = val;
4945 break;
4946
4947 case AARCH64_OPND_CCMP_IMM:
4948 case AARCH64_OPND_FBITS:
4949 case AARCH64_OPND_UIMM4:
4950 case AARCH64_OPND_UIMM3_OP1:
4951 case AARCH64_OPND_UIMM3_OP2:
4952 case AARCH64_OPND_IMM_VLSL:
4953 case AARCH64_OPND_IMM:
4954 case AARCH64_OPND_WIDTH:
4955 po_imm_nc_or_fail ();
4956 info->imm.value = val;
4957 break;
4958
4959 case AARCH64_OPND_UIMM7:
4960 po_imm_or_fail (0, 127);
4961 info->imm.value = val;
4962 break;
4963
4964 case AARCH64_OPND_IDX:
4965 case AARCH64_OPND_BIT_NUM:
4966 case AARCH64_OPND_IMMR:
4967 case AARCH64_OPND_IMMS:
4968 po_imm_or_fail (0, 63);
4969 info->imm.value = val;
4970 break;
4971
4972 case AARCH64_OPND_IMM0:
4973 po_imm_nc_or_fail ();
4974 if (val != 0)
4975 {
4976 set_fatal_syntax_error (_("immediate zero expected"));
4977 goto failure;
4978 }
4979 info->imm.value = 0;
4980 break;
4981
4982 case AARCH64_OPND_FPIMM0:
4983 {
4984 int qfloat;
4985 bfd_boolean res1 = FALSE, res2 = FALSE;
4986 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4987 it is probably not worth the effort to support it. */
4988 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4989 && !(res2 = parse_constant_immediate (&str, &val)))
4990 goto failure;
4991 if ((res1 && qfloat == 0) || (res2 && val == 0))
4992 {
4993 info->imm.value = 0;
4994 info->imm.is_fp = 1;
4995 break;
4996 }
4997 set_fatal_syntax_error (_("immediate zero expected"));
4998 goto failure;
4999 }
5000
5001 case AARCH64_OPND_IMM_MOV:
5002 {
5003 char *saved = str;
5004 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5005 reg_name_p (str, REG_TYPE_VN))
5006 goto failure;
5007 str = saved;
5008 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5009 GE_OPT_PREFIX, 1));
5010 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5011 later. fix_mov_imm_insn will try to determine a machine
5012 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5013 message if the immediate cannot be moved by a single
5014 instruction. */
5015 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5016 inst.base.operands[i].skip = 1;
5017 }
5018 break;
5019
5020 case AARCH64_OPND_SIMD_IMM:
5021 case AARCH64_OPND_SIMD_IMM_SFT:
5022 if (! parse_big_immediate (&str, &val))
5023 goto failure;
5024 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5025 /* addr_off_p */ 0,
5026 /* need_libopcodes_p */ 1,
5027 /* skip_p */ 1);
5028 /* Parse shift.
5029 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5030 shift, we don't check it here; we leave the checking to
5031 the libopcodes (operand_general_constraint_met_p). By
5032 doing this, we achieve better diagnostics. */
5033 if (skip_past_comma (&str)
5034 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5035 goto failure;
5036 if (!info->shifter.operator_present
5037 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5038 {
5039 /* Default to LSL if not present. Libopcodes prefers shifter
5040 kind to be explicit. */
5041 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5042 info->shifter.kind = AARCH64_MOD_LSL;
5043 }
5044 break;
5045
5046 case AARCH64_OPND_FPIMM:
5047 case AARCH64_OPND_SIMD_FPIMM:
5048 {
5049 int qfloat;
5050 bfd_boolean dp_p
5051 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5052 == 8);
5053 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
5054 goto failure;
5055 if (qfloat == 0)
5056 {
5057 set_fatal_syntax_error (_("invalid floating-point constant"));
5058 goto failure;
5059 }
5060 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5061 inst.base.operands[i].imm.is_fp = 1;
5062 }
5063 break;
5064
5065 case AARCH64_OPND_LIMM:
5066 po_misc_or_fail (parse_shifter_operand (&str, info,
5067 SHIFTED_LOGIC_IMM));
5068 if (info->shifter.operator_present)
5069 {
5070 set_fatal_syntax_error
5071 (_("shift not allowed for bitmask immediate"));
5072 goto failure;
5073 }
5074 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5075 /* addr_off_p */ 0,
5076 /* need_libopcodes_p */ 1,
5077 /* skip_p */ 1);
5078 break;
5079
5080 case AARCH64_OPND_AIMM:
5081 if (opcode->op == OP_ADD)
5082 /* ADD may have relocation types. */
5083 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5084 SHIFTED_ARITH_IMM));
5085 else
5086 po_misc_or_fail (parse_shifter_operand (&str, info,
5087 SHIFTED_ARITH_IMM));
5088 switch (inst.reloc.type)
5089 {
5090 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5091 info->shifter.amount = 12;
5092 break;
5093 case BFD_RELOC_UNUSED:
5094 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5095 if (info->shifter.kind != AARCH64_MOD_NONE)
5096 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5097 inst.reloc.pc_rel = 0;
5098 break;
5099 default:
5100 break;
5101 }
5102 info->imm.value = 0;
5103 if (!info->shifter.operator_present)
5104 {
5105 /* Default to LSL if not present. Libopcodes prefers shifter
5106 kind to be explicit. */
5107 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5108 info->shifter.kind = AARCH64_MOD_LSL;
5109 }
5110 break;
5111
5112 case AARCH64_OPND_HALF:
5113 {
5114 /* #<imm16> or relocation. */
5115 int internal_fixup_p;
5116 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5117 if (internal_fixup_p)
5118 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5119 skip_whitespace (str);
5120 if (skip_past_comma (&str))
5121 {
5122 /* {, LSL #<shift>} */
5123 if (! aarch64_gas_internal_fixup_p ())
5124 {
5125 set_fatal_syntax_error (_("can't mix relocation modifier "
5126 "with explicit shift"));
5127 goto failure;
5128 }
5129 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5130 }
5131 else
5132 inst.base.operands[i].shifter.amount = 0;
5133 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5134 inst.base.operands[i].imm.value = 0;
5135 if (! process_movw_reloc_info ())
5136 goto failure;
5137 }
5138 break;
5139
5140 case AARCH64_OPND_EXCEPTION:
5141 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5142 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5143 /* addr_off_p */ 0,
5144 /* need_libopcodes_p */ 0,
5145 /* skip_p */ 1);
5146 break;
5147
5148 case AARCH64_OPND_NZCV:
5149 {
5150 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5151 if (nzcv != NULL)
5152 {
5153 str += 4;
5154 info->imm.value = nzcv->value;
5155 break;
5156 }
5157 po_imm_or_fail (0, 15);
5158 info->imm.value = val;
5159 }
5160 break;
5161
5162 case AARCH64_OPND_COND:
5163 case AARCH64_OPND_COND1:
5164 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5165 str += 2;
5166 if (info->cond == NULL)
5167 {
5168 set_syntax_error (_("invalid condition"));
5169 goto failure;
5170 }
5171 else if (operands[i] == AARCH64_OPND_COND1
5172 && (info->cond->value & 0xe) == 0xe)
5173 {
5174 /* Not allow AL or NV. */
5175 set_default_error ();
5176 goto failure;
5177 }
5178 break;
5179
5180 case AARCH64_OPND_ADDR_ADRP:
5181 po_misc_or_fail (parse_adrp (&str));
5182 /* Clear the value as operand needs to be relocated. */
5183 info->imm.value = 0;
5184 break;
5185
5186 case AARCH64_OPND_ADDR_PCREL14:
5187 case AARCH64_OPND_ADDR_PCREL19:
5188 case AARCH64_OPND_ADDR_PCREL21:
5189 case AARCH64_OPND_ADDR_PCREL26:
5190 po_misc_or_fail (parse_address_reloc (&str, info));
5191 if (!info->addr.pcrel)
5192 {
5193 set_syntax_error (_("invalid pc-relative address"));
5194 goto failure;
5195 }
5196 if (inst.gen_lit_pool
5197 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5198 {
5199 /* Only permit "=value" in the literal load instructions.
5200 The literal will be generated by programmer_friendly_fixup. */
5201 set_syntax_error (_("invalid use of \"=immediate\""));
5202 goto failure;
5203 }
5204 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5205 {
5206 set_syntax_error (_("unrecognized relocation suffix"));
5207 goto failure;
5208 }
5209 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5210 {
5211 info->imm.value = inst.reloc.exp.X_add_number;
5212 inst.reloc.type = BFD_RELOC_UNUSED;
5213 }
5214 else
5215 {
5216 info->imm.value = 0;
5217 if (inst.reloc.type == BFD_RELOC_UNUSED)
5218 switch (opcode->iclass)
5219 {
5220 case compbranch:
5221 case condbranch:
5222 /* e.g. CBZ or B.COND */
5223 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5224 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5225 break;
5226 case testbranch:
5227 /* e.g. TBZ */
5228 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5229 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5230 break;
5231 case branch_imm:
5232 /* e.g. B or BL */
5233 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5234 inst.reloc.type =
5235 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5236 : BFD_RELOC_AARCH64_JUMP26;
5237 break;
5238 case loadlit:
5239 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5240 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5241 break;
5242 case pcreladdr:
5243 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5244 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5245 break;
5246 default:
5247 gas_assert (0);
5248 abort ();
5249 }
5250 inst.reloc.pc_rel = 1;
5251 }
5252 break;
5253
5254 case AARCH64_OPND_ADDR_SIMPLE:
5255 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5256 /* [<Xn|SP>{, #<simm>}] */
5257 po_char_or_fail ('[');
5258 po_reg_or_fail (REG_TYPE_R64_SP);
5259 /* Accept optional ", #0". */
5260 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5261 && skip_past_char (&str, ','))
5262 {
5263 skip_past_char (&str, '#');
5264 if (! skip_past_char (&str, '0'))
5265 {
5266 set_fatal_syntax_error
5267 (_("the optional immediate offset can only be 0"));
5268 goto failure;
5269 }
5270 }
5271 po_char_or_fail (']');
5272 info->addr.base_regno = val;
5273 break;
5274
5275 case AARCH64_OPND_ADDR_REGOFF:
5276 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5277 po_misc_or_fail (parse_address (&str, info, 0));
5278 if (info->addr.pcrel || !info->addr.offset.is_reg
5279 || !info->addr.preind || info->addr.postind
5280 || info->addr.writeback)
5281 {
5282 set_syntax_error (_("invalid addressing mode"));
5283 goto failure;
5284 }
5285 if (!info->shifter.operator_present)
5286 {
5287 /* Default to LSL if not present. Libopcodes prefers shifter
5288 kind to be explicit. */
5289 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5290 info->shifter.kind = AARCH64_MOD_LSL;
5291 }
5292 /* Qualifier to be deduced by libopcodes. */
5293 break;
5294
5295 case AARCH64_OPND_ADDR_SIMM7:
5296 po_misc_or_fail (parse_address (&str, info, 0));
5297 if (info->addr.pcrel || info->addr.offset.is_reg
5298 || (!info->addr.preind && !info->addr.postind))
5299 {
5300 set_syntax_error (_("invalid addressing mode"));
5301 goto failure;
5302 }
5303 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5304 /* addr_off_p */ 1,
5305 /* need_libopcodes_p */ 1,
5306 /* skip_p */ 0);
5307 break;
5308
5309 case AARCH64_OPND_ADDR_SIMM9:
5310 case AARCH64_OPND_ADDR_SIMM9_2:
5311 po_misc_or_fail (parse_address_reloc (&str, info));
5312 if (info->addr.pcrel || info->addr.offset.is_reg
5313 || (!info->addr.preind && !info->addr.postind)
5314 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5315 && info->addr.writeback))
5316 {
5317 set_syntax_error (_("invalid addressing mode"));
5318 goto failure;
5319 }
5320 if (inst.reloc.type != BFD_RELOC_UNUSED)
5321 {
5322 set_syntax_error (_("relocation not allowed"));
5323 goto failure;
5324 }
5325 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5326 /* addr_off_p */ 1,
5327 /* need_libopcodes_p */ 1,
5328 /* skip_p */ 0);
5329 break;
5330
5331 case AARCH64_OPND_ADDR_UIMM12:
5332 po_misc_or_fail (parse_address_reloc (&str, info));
5333 if (info->addr.pcrel || info->addr.offset.is_reg
5334 || !info->addr.preind || info->addr.writeback)
5335 {
5336 set_syntax_error (_("invalid addressing mode"));
5337 goto failure;
5338 }
5339 if (inst.reloc.type == BFD_RELOC_UNUSED)
5340 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5341 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5342 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5343 /* Leave qualifier to be determined by libopcodes. */
5344 break;
5345
5346 case AARCH64_OPND_SIMD_ADDR_POST:
5347 /* [<Xn|SP>], <Xm|#<amount>> */
5348 po_misc_or_fail (parse_address (&str, info, 1));
5349 if (!info->addr.postind || !info->addr.writeback)
5350 {
5351 set_syntax_error (_("invalid addressing mode"));
5352 goto failure;
5353 }
5354 if (!info->addr.offset.is_reg)
5355 {
5356 if (inst.reloc.exp.X_op == O_constant)
5357 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5358 else
5359 {
5360 set_fatal_syntax_error
5361 (_("writeback value should be an immediate constant"));
5362 goto failure;
5363 }
5364 }
5365 /* No qualifier. */
5366 break;
5367
5368 case AARCH64_OPND_SYSREG:
5369 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5370 == PARSE_FAIL)
5371 {
5372 set_syntax_error (_("unknown or missing system register name"));
5373 goto failure;
5374 }
5375 inst.base.operands[i].sysreg = val;
5376 break;
5377
5378 case AARCH64_OPND_PSTATEFIELD:
5379 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5380 == PARSE_FAIL)
5381 {
5382 set_syntax_error (_("unknown or missing PSTATE field name"));
5383 goto failure;
5384 }
5385 inst.base.operands[i].pstatefield = val;
5386 break;
5387
5388 case AARCH64_OPND_SYSREG_IC:
5389 inst.base.operands[i].sysins_op =
5390 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5391 goto sys_reg_ins;
5392 case AARCH64_OPND_SYSREG_DC:
5393 inst.base.operands[i].sysins_op =
5394 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5395 goto sys_reg_ins;
5396 case AARCH64_OPND_SYSREG_AT:
5397 inst.base.operands[i].sysins_op =
5398 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5399 goto sys_reg_ins;
5400 case AARCH64_OPND_SYSREG_TLBI:
5401 inst.base.operands[i].sysins_op =
5402 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5403 sys_reg_ins:
5404 if (inst.base.operands[i].sysins_op == NULL)
5405 {
5406 set_fatal_syntax_error ( _("unknown or missing operation name"));
5407 goto failure;
5408 }
5409 break;
5410
5411 case AARCH64_OPND_BARRIER:
5412 case AARCH64_OPND_BARRIER_ISB:
5413 val = parse_barrier (&str);
5414 if (val != PARSE_FAIL
5415 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5416 {
5417 /* ISB only accepts options name 'sy'. */
5418 set_syntax_error
5419 (_("the specified option is not accepted in ISB"));
5420 /* Turn off backtrack as this optional operand is present. */
5421 backtrack_pos = 0;
5422 goto failure;
5423 }
5424 /* This is an extension to accept a 0..15 immediate. */
5425 if (val == PARSE_FAIL)
5426 po_imm_or_fail (0, 15);
5427 info->barrier = aarch64_barrier_options + val;
5428 break;
5429
5430 case AARCH64_OPND_PRFOP:
5431 val = parse_pldop (&str);
5432 /* This is an extension to accept a 0..31 immediate. */
5433 if (val == PARSE_FAIL)
5434 po_imm_or_fail (0, 31);
5435 inst.base.operands[i].prfop = aarch64_prfops + val;
5436 break;
5437
5438 default:
5439 as_fatal (_("unhandled operand code %d"), operands[i]);
5440 }
5441
5442 /* If we get here, this operand was successfully parsed. */
5443 inst.base.operands[i].present = 1;
5444 continue;
5445
5446 failure:
5447 /* The parse routine should already have set the error, but in case
5448 not, set a default one here. */
5449 if (! error_p ())
5450 set_default_error ();
5451
5452 if (! backtrack_pos)
5453 goto parse_operands_return;
5454
5455 {
5456 /* We reach here because this operand is marked as optional, and
5457 either no operand was supplied or the operand was supplied but it
5458 was syntactically incorrect. In the latter case we report an
5459 error. In the former case we perform a few more checks before
5460 dropping through to the code to insert the default operand. */
5461
5462 char *tmp = backtrack_pos;
5463 char endchar = END_OF_INSN;
5464
5465 if (i != (aarch64_num_of_operands (opcode) - 1))
5466 endchar = ',';
5467 skip_past_char (&tmp, ',');
5468
5469 if (*tmp != endchar)
5470 /* The user has supplied an operand in the wrong format. */
5471 goto parse_operands_return;
5472
5473 /* Make sure there is not a comma before the optional operand.
5474 For example the fifth operand of 'sys' is optional:
5475
5476 sys #0,c0,c0,#0, <--- wrong
5477 sys #0,c0,c0,#0 <--- correct. */
5478 if (comma_skipped_p && i && endchar == END_OF_INSN)
5479 {
5480 set_fatal_syntax_error
5481 (_("unexpected comma before the omitted optional operand"));
5482 goto parse_operands_return;
5483 }
5484 }
5485
5486 /* Reaching here means we are dealing with an optional operand that is
5487 omitted from the assembly line. */
5488 gas_assert (optional_operand_p (opcode, i));
5489 info->present = 0;
5490 process_omitted_operand (operands[i], opcode, i, info);
5491
5492 /* Try again, skipping the optional operand at backtrack_pos. */
5493 str = backtrack_pos;
5494 backtrack_pos = 0;
5495
5496 /* Clear any error record after the omitted optional operand has been
5497 successfully handled. */
5498 clear_error ();
5499 }
5500
5501 /* Check if we have parsed all the operands. */
5502 if (*str != '\0' && ! error_p ())
5503 {
5504 /* Set I to the index of the last present operand; this is
5505 for the purpose of diagnostics. */
5506 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5507 ;
5508 set_fatal_syntax_error
5509 (_("unexpected characters following instruction"));
5510 }
5511
5512 parse_operands_return:
5513
5514 if (error_p ())
5515 {
5516 DEBUG_TRACE ("parsing FAIL: %s - %s",
5517 operand_mismatch_kind_names[get_error_kind ()],
5518 get_error_message ());
5519 /* Record the operand error properly; this is useful when there
5520 are multiple instruction templates for a mnemonic name, so that
5521 later on, we can select the error that most closely describes
5522 the problem. */
5523 record_operand_error (opcode, i, get_error_kind (),
5524 get_error_message ());
5525 return FALSE;
5526 }
5527 else
5528 {
5529 DEBUG_TRACE ("parsing SUCCESS");
5530 return TRUE;
5531 }
5532 }
5533
5534 /* It does some fix-up to provide some programmer friendly feature while
5535 keeping the libopcodes happy, i.e. libopcodes only accepts
5536 the preferred architectural syntax.
5537 Return FALSE if there is any failure; otherwise return TRUE. */
5538
5539 static bfd_boolean
5540 programmer_friendly_fixup (aarch64_instruction *instr)
5541 {
5542 aarch64_inst *base = &instr->base;
5543 const aarch64_opcode *opcode = base->opcode;
5544 enum aarch64_op op = opcode->op;
5545 aarch64_opnd_info *operands = base->operands;
5546
5547 DEBUG_TRACE ("enter");
5548
5549 switch (opcode->iclass)
5550 {
5551 case testbranch:
5552 /* TBNZ Xn|Wn, #uimm6, label
5553 Test and Branch Not Zero: conditionally jumps to label if bit number
5554 uimm6 in register Xn is not zero. The bit number implies the width of
5555 the register, which may be written and should be disassembled as Wn if
5556 uimm is less than 32. */
5557 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5558 {
5559 if (operands[1].imm.value >= 32)
5560 {
5561 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5562 0, 31);
5563 return FALSE;
5564 }
5565 operands[0].qualifier = AARCH64_OPND_QLF_X;
5566 }
5567 break;
5568 case loadlit:
5569 /* LDR Wt, label | =value
5570 As a convenience assemblers will typically permit the notation
5571 "=value" in conjunction with the pc-relative literal load instructions
5572 to automatically place an immediate value or symbolic address in a
5573 nearby literal pool and generate a hidden label which references it.
5574 ISREG has been set to 0 in the case of =value. */
5575 if (instr->gen_lit_pool
5576 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5577 {
5578 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5579 if (op == OP_LDRSW_LIT)
5580 size = 4;
5581 if (instr->reloc.exp.X_op != O_constant
5582 && instr->reloc.exp.X_op != O_big
5583 && instr->reloc.exp.X_op != O_symbol)
5584 {
5585 record_operand_error (opcode, 1,
5586 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5587 _("constant expression expected"));
5588 return FALSE;
5589 }
5590 if (! add_to_lit_pool (&instr->reloc.exp, size))
5591 {
5592 record_operand_error (opcode, 1,
5593 AARCH64_OPDE_OTHER_ERROR,
5594 _("literal pool insertion failed"));
5595 return FALSE;
5596 }
5597 }
5598 break;
5599 case log_shift:
5600 case bitfield:
5601 /* UXT[BHW] Wd, Wn
5602 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5603 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5604 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5605 A programmer-friendly assembler should accept a destination Xd in
5606 place of Wd, however that is not the preferred form for disassembly.
5607 */
5608 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5609 && operands[1].qualifier == AARCH64_OPND_QLF_W
5610 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5611 operands[0].qualifier = AARCH64_OPND_QLF_W;
5612 break;
5613
5614 case addsub_ext:
5615 {
5616 /* In the 64-bit form, the final register operand is written as Wm
5617 for all but the (possibly omitted) UXTX/LSL and SXTX
5618 operators.
5619 As a programmer-friendly assembler, we accept e.g.
5620 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5621 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5622 int idx = aarch64_operand_index (opcode->operands,
5623 AARCH64_OPND_Rm_EXT);
5624 gas_assert (idx == 1 || idx == 2);
5625 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5626 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5627 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5628 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5629 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5630 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5631 }
5632 break;
5633
5634 default:
5635 break;
5636 }
5637
5638 DEBUG_TRACE ("exit with SUCCESS");
5639 return TRUE;
5640 }
5641
5642 /* Check for loads and stores that will cause unpredictable behavior. */
5643
5644 static void
5645 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5646 {
5647 aarch64_inst *base = &instr->base;
5648 const aarch64_opcode *opcode = base->opcode;
5649 const aarch64_opnd_info *opnds = base->operands;
5650 switch (opcode->iclass)
5651 {
5652 case ldst_pos:
5653 case ldst_imm9:
5654 case ldst_unscaled:
5655 case ldst_unpriv:
5656 /* Loading/storing the base register is unpredictable if writeback. */
5657 if ((aarch64_get_operand_class (opnds[0].type)
5658 == AARCH64_OPND_CLASS_INT_REG)
5659 && opnds[0].reg.regno == opnds[1].addr.base_regno
5660 && opnds[1].addr.base_regno != REG_SP
5661 && opnds[1].addr.writeback)
5662 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5663 break;
5664 case ldstpair_off:
5665 case ldstnapair_offs:
5666 case ldstpair_indexed:
5667 /* Loading/storing the base register is unpredictable if writeback. */
5668 if ((aarch64_get_operand_class (opnds[0].type)
5669 == AARCH64_OPND_CLASS_INT_REG)
5670 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5671 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5672 && opnds[2].addr.base_regno != REG_SP
5673 && opnds[2].addr.writeback)
5674 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5675 /* Load operations must load different registers. */
5676 if ((opcode->opcode & (1 << 22))
5677 && opnds[0].reg.regno == opnds[1].reg.regno)
5678 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5679 break;
5680 default:
5681 break;
5682 }
5683 }
5684
5685 /* A wrapper function to interface with libopcodes on encoding and
5686 record the error message if there is any.
5687
5688 Return TRUE on success; otherwise return FALSE. */
5689
5690 static bfd_boolean
5691 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5692 aarch64_insn *code)
5693 {
5694 aarch64_operand_error error_info;
5695 error_info.kind = AARCH64_OPDE_NIL;
5696 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5697 return TRUE;
5698 else
5699 {
5700 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5701 record_operand_error_info (opcode, &error_info);
5702 return FALSE;
5703 }
5704 }
5705
5706 #ifdef DEBUG_AARCH64
5707 static inline void
5708 dump_opcode_operands (const aarch64_opcode *opcode)
5709 {
5710 int i = 0;
5711 while (opcode->operands[i] != AARCH64_OPND_NIL)
5712 {
5713 aarch64_verbose ("\t\t opnd%d: %s", i,
5714 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5715 ? aarch64_get_operand_name (opcode->operands[i])
5716 : aarch64_get_operand_desc (opcode->operands[i]));
5717 ++i;
5718 }
5719 }
5720 #endif /* DEBUG_AARCH64 */
5721
5722 /* This is the guts of the machine-dependent assembler. STR points to a
5723 machine dependent instruction. This function is supposed to emit
5724 the frags/bytes it assembles to. */
5725
5726 void
5727 md_assemble (char *str)
5728 {
5729 char *p = str;
5730 templates *template;
5731 aarch64_opcode *opcode;
5732 aarch64_inst *inst_base;
5733 unsigned saved_cond;
5734
5735 /* Align the previous label if needed. */
5736 if (last_label_seen != NULL)
5737 {
5738 symbol_set_frag (last_label_seen, frag_now);
5739 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5740 S_SET_SEGMENT (last_label_seen, now_seg);
5741 }
5742
5743 inst.reloc.type = BFD_RELOC_UNUSED;
5744
5745 DEBUG_TRACE ("\n\n");
5746 DEBUG_TRACE ("==============================");
5747 DEBUG_TRACE ("Enter md_assemble with %s", str);
5748
5749 template = opcode_lookup (&p);
5750 if (!template)
5751 {
5752 /* It wasn't an instruction, but it might be a register alias of
5753 the form alias .req reg directive. */
5754 if (!create_register_alias (str, p))
5755 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5756 str);
5757 return;
5758 }
5759
5760 skip_whitespace (p);
5761 if (*p == ',')
5762 {
5763 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5764 get_mnemonic_name (str), str);
5765 return;
5766 }
5767
5768 init_operand_error_report ();
5769
5770 /* Sections are assumed to start aligned. In executable section, there is no
5771 MAP_DATA symbol pending. So we only align the address during
5772 MAP_DATA --> MAP_INSN transition.
5773 For other sections, this is not guaranteed. */
5774 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
5775 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
5776 frag_align_code (2, 0);
5777
5778 saved_cond = inst.cond;
5779 reset_aarch64_instruction (&inst);
5780 inst.cond = saved_cond;
5781
5782 /* Iterate through all opcode entries with the same mnemonic name. */
5783 do
5784 {
5785 opcode = template->opcode;
5786
5787 DEBUG_TRACE ("opcode %s found", opcode->name);
5788 #ifdef DEBUG_AARCH64
5789 if (debug_dump)
5790 dump_opcode_operands (opcode);
5791 #endif /* DEBUG_AARCH64 */
5792
5793 mapping_state (MAP_INSN);
5794
5795 inst_base = &inst.base;
5796 inst_base->opcode = opcode;
5797
5798 /* Truly conditionally executed instructions, e.g. b.cond. */
5799 if (opcode->flags & F_COND)
5800 {
5801 gas_assert (inst.cond != COND_ALWAYS);
5802 inst_base->cond = get_cond_from_value (inst.cond);
5803 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5804 }
5805 else if (inst.cond != COND_ALWAYS)
5806 {
5807 /* It shouldn't arrive here, where the assembly looks like a
5808 conditional instruction but the found opcode is unconditional. */
5809 gas_assert (0);
5810 continue;
5811 }
5812
5813 if (parse_operands (p, opcode)
5814 && programmer_friendly_fixup (&inst)
5815 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5816 {
5817 /* Check that this instruction is supported for this CPU. */
5818 if (!opcode->avariant
5819 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5820 {
5821 as_bad (_("selected processor does not support `%s'"), str);
5822 return;
5823 }
5824
5825 warn_unpredictable_ldst (&inst, str);
5826
5827 if (inst.reloc.type == BFD_RELOC_UNUSED
5828 || !inst.reloc.need_libopcodes_p)
5829 output_inst (NULL);
5830 else
5831 {
5832 /* If there is relocation generated for the instruction,
5833 store the instruction information for the future fix-up. */
5834 struct aarch64_inst *copy;
5835 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5836 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5837 abort ();
5838 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5839 output_inst (copy);
5840 }
5841 return;
5842 }
5843
5844 template = template->next;
5845 if (template != NULL)
5846 {
5847 reset_aarch64_instruction (&inst);
5848 inst.cond = saved_cond;
5849 }
5850 }
5851 while (template != NULL);
5852
5853 /* Issue the error messages if any. */
5854 output_operand_error_report (str);
5855 }
5856
5857 /* Various frobbings of labels and their addresses. */
5858
5859 void
5860 aarch64_start_line_hook (void)
5861 {
5862 last_label_seen = NULL;
5863 }
5864
5865 void
5866 aarch64_frob_label (symbolS * sym)
5867 {
5868 last_label_seen = sym;
5869
5870 dwarf2_emit_label (sym);
5871 }
5872
5873 int
5874 aarch64_data_in_code (void)
5875 {
5876 if (!strncmp (input_line_pointer + 1, "data:", 5))
5877 {
5878 *input_line_pointer = '/';
5879 input_line_pointer += 5;
5880 *input_line_pointer = 0;
5881 return 1;
5882 }
5883
5884 return 0;
5885 }
5886
5887 char *
5888 aarch64_canonicalize_symbol_name (char *name)
5889 {
5890 int len;
5891
5892 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5893 *(name + len - 5) = 0;
5894
5895 return name;
5896 }
5897 \f
5898 /* Table of all register names defined by default. The user can
5899 define additional names with .req. Note that all register names
5900 should appear in both upper and lowercase variants. Some registers
5901 also have mixed-case names. */
5902
5903 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5904 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5905 #define REGSET31(p,t) \
5906 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5907 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5908 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5909 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5910 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5911 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5912 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5913 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5914 #define REGSET(p,t) \
5915 REGSET31(p,t), REGNUM(p,31,t)
5916
5917 /* These go into aarch64_reg_hsh hash-table. */
5918 static const reg_entry reg_names[] = {
5919 /* Integer registers. */
5920 REGSET31 (x, R_64), REGSET31 (X, R_64),
5921 REGSET31 (w, R_32), REGSET31 (W, R_32),
5922
5923 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5924 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5925
5926 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5927 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5928
5929 /* Coprocessor register numbers. */
5930 REGSET (c, CN), REGSET (C, CN),
5931
5932 /* Floating-point single precision registers. */
5933 REGSET (s, FP_S), REGSET (S, FP_S),
5934
5935 /* Floating-point double precision registers. */
5936 REGSET (d, FP_D), REGSET (D, FP_D),
5937
5938 /* Floating-point half precision registers. */
5939 REGSET (h, FP_H), REGSET (H, FP_H),
5940
5941 /* Floating-point byte precision registers. */
5942 REGSET (b, FP_B), REGSET (B, FP_B),
5943
5944 /* Floating-point quad precision registers. */
5945 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5946
5947 /* FP/SIMD registers. */
5948 REGSET (v, VN), REGSET (V, VN),
5949 };
5950
5951 #undef REGDEF
5952 #undef REGNUM
5953 #undef REGSET
5954
5955 #define N 1
5956 #define n 0
5957 #define Z 1
5958 #define z 0
5959 #define C 1
5960 #define c 0
5961 #define V 1
5962 #define v 0
5963 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5964 static const asm_nzcv nzcv_names[] = {
5965 {"nzcv", B (n, z, c, v)},
5966 {"nzcV", B (n, z, c, V)},
5967 {"nzCv", B (n, z, C, v)},
5968 {"nzCV", B (n, z, C, V)},
5969 {"nZcv", B (n, Z, c, v)},
5970 {"nZcV", B (n, Z, c, V)},
5971 {"nZCv", B (n, Z, C, v)},
5972 {"nZCV", B (n, Z, C, V)},
5973 {"Nzcv", B (N, z, c, v)},
5974 {"NzcV", B (N, z, c, V)},
5975 {"NzCv", B (N, z, C, v)},
5976 {"NzCV", B (N, z, C, V)},
5977 {"NZcv", B (N, Z, c, v)},
5978 {"NZcV", B (N, Z, c, V)},
5979 {"NZCv", B (N, Z, C, v)},
5980 {"NZCV", B (N, Z, C, V)}
5981 };
5982
5983 #undef N
5984 #undef n
5985 #undef Z
5986 #undef z
5987 #undef C
5988 #undef c
5989 #undef V
5990 #undef v
5991 #undef B
5992 \f
5993 /* MD interface: bits in the object file. */
5994
5995 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5996 for use in the a.out file, and stores them in the array pointed to by buf.
5997 This knows about the endian-ness of the target machine and does
5998 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5999 2 (short) and 4 (long) Floating numbers are put out as a series of
6000 LITTLENUMS (shorts, here at least). */
6001
6002 void
6003 md_number_to_chars (char *buf, valueT val, int n)
6004 {
6005 if (target_big_endian)
6006 number_to_chars_bigendian (buf, val, n);
6007 else
6008 number_to_chars_littleendian (buf, val, n);
6009 }
6010
6011 /* MD interface: Sections. */
6012
6013 /* Estimate the size of a frag before relaxing. Assume everything fits in
6014 4 bytes. */
6015
6016 int
6017 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6018 {
6019 fragp->fr_var = 4;
6020 return 4;
6021 }
6022
6023 /* Round up a section size to the appropriate boundary. */
6024
6025 valueT
6026 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6027 {
6028 return size;
6029 }
6030
6031 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6032 of an rs_align_code fragment.
6033
6034 Here we fill the frag with the appropriate info for padding the
6035 output stream. The resulting frag will consist of a fixed (fr_fix)
6036 and of a repeating (fr_var) part.
6037
6038 The fixed content is always emitted before the repeating content and
6039 these two parts are used as follows in constructing the output:
6040 - the fixed part will be used to align to a valid instruction word
6041 boundary, in case that we start at a misaligned address; as no
6042 executable instruction can live at the misaligned location, we
6043 simply fill with zeros;
6044 - the variable part will be used to cover the remaining padding and
6045 we fill using the AArch64 NOP instruction.
6046
6047 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6048 enough storage space for up to 3 bytes for padding the back to a valid
6049 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6050
6051 void
6052 aarch64_handle_align (fragS * fragP)
6053 {
6054 /* NOP = d503201f */
6055 /* AArch64 instructions are always little-endian. */
6056 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6057
6058 int bytes, fix, noop_size;
6059 char *p;
6060
6061 if (fragP->fr_type != rs_align_code)
6062 return;
6063
6064 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6065 p = fragP->fr_literal + fragP->fr_fix;
6066
6067 #ifdef OBJ_ELF
6068 gas_assert (fragP->tc_frag_data.recorded);
6069 #endif
6070
6071 noop_size = sizeof (aarch64_noop);
6072
6073 fix = bytes & (noop_size - 1);
6074 if (fix)
6075 {
6076 #ifdef OBJ_ELF
6077 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6078 #endif
6079 memset (p, 0, fix);
6080 p += fix;
6081 fragP->fr_fix += fix;
6082 }
6083
6084 if (noop_size)
6085 memcpy (p, aarch64_noop, noop_size);
6086 fragP->fr_var = noop_size;
6087 }
6088
6089 /* Perform target specific initialisation of a frag.
6090 Note - despite the name this initialisation is not done when the frag
6091 is created, but only when its type is assigned. A frag can be created
6092 and used a long time before its type is set, so beware of assuming that
6093 this initialisationis performed first. */
6094
6095 #ifndef OBJ_ELF
6096 void
6097 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6098 int max_chars ATTRIBUTE_UNUSED)
6099 {
6100 }
6101
6102 #else /* OBJ_ELF is defined. */
6103 void
6104 aarch64_init_frag (fragS * fragP, int max_chars)
6105 {
6106 /* Record a mapping symbol for alignment frags. We will delete this
6107 later if the alignment ends up empty. */
6108 if (!fragP->tc_frag_data.recorded)
6109 fragP->tc_frag_data.recorded = 1;
6110
6111 switch (fragP->fr_type)
6112 {
6113 case rs_align:
6114 case rs_align_test:
6115 case rs_fill:
6116 mapping_state_2 (MAP_DATA, max_chars);
6117 break;
6118 case rs_align_code:
6119 mapping_state_2 (MAP_INSN, max_chars);
6120 break;
6121 default:
6122 break;
6123 }
6124 }
6125 \f
6126 /* Initialize the DWARF-2 unwind information for this procedure. */
6127
6128 void
6129 tc_aarch64_frame_initial_instructions (void)
6130 {
6131 cfi_add_CFA_def_cfa (REG_SP, 0);
6132 }
6133 #endif /* OBJ_ELF */
6134
6135 /* Convert REGNAME to a DWARF-2 register number. */
6136
6137 int
6138 tc_aarch64_regname_to_dw2regnum (char *regname)
6139 {
6140 const reg_entry *reg = parse_reg (&regname);
6141 if (reg == NULL)
6142 return -1;
6143
6144 switch (reg->type)
6145 {
6146 case REG_TYPE_SP_32:
6147 case REG_TYPE_SP_64:
6148 case REG_TYPE_R_32:
6149 case REG_TYPE_R_64:
6150 return reg->number;
6151
6152 case REG_TYPE_FP_B:
6153 case REG_TYPE_FP_H:
6154 case REG_TYPE_FP_S:
6155 case REG_TYPE_FP_D:
6156 case REG_TYPE_FP_Q:
6157 return reg->number + 64;
6158
6159 default:
6160 break;
6161 }
6162 return -1;
6163 }
6164
6165 /* Implement DWARF2_ADDR_SIZE. */
6166
6167 int
6168 aarch64_dwarf2_addr_size (void)
6169 {
6170 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6171 if (ilp32_p)
6172 return 4;
6173 #endif
6174 return bfd_arch_bits_per_address (stdoutput) / 8;
6175 }
6176
6177 /* MD interface: Symbol and relocation handling. */
6178
6179 /* Return the address within the segment that a PC-relative fixup is
6180 relative to. For AArch64 PC-relative fixups applied to instructions
6181 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6182
6183 long
6184 md_pcrel_from_section (fixS * fixP, segT seg)
6185 {
6186 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6187
6188 /* If this is pc-relative and we are going to emit a relocation
6189 then we just want to put out any pipeline compensation that the linker
6190 will need. Otherwise we want to use the calculated base. */
6191 if (fixP->fx_pcrel
6192 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6193 || aarch64_force_relocation (fixP)))
6194 base = 0;
6195
6196 /* AArch64 should be consistent for all pc-relative relocations. */
6197 return base + AARCH64_PCREL_OFFSET;
6198 }
6199
6200 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6201 Otherwise we have no need to default values of symbols. */
6202
6203 symbolS *
6204 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6205 {
6206 #ifdef OBJ_ELF
6207 if (name[0] == '_' && name[1] == 'G'
6208 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6209 {
6210 if (!GOT_symbol)
6211 {
6212 if (symbol_find (name))
6213 as_bad (_("GOT already in the symbol table"));
6214
6215 GOT_symbol = symbol_new (name, undefined_section,
6216 (valueT) 0, &zero_address_frag);
6217 }
6218
6219 return GOT_symbol;
6220 }
6221 #endif
6222
6223 return 0;
6224 }
6225
6226 /* Return non-zero if the indicated VALUE has overflowed the maximum
6227 range expressible by a unsigned number with the indicated number of
6228 BITS. */
6229
6230 static bfd_boolean
6231 unsigned_overflow (valueT value, unsigned bits)
6232 {
6233 valueT lim;
6234 if (bits >= sizeof (valueT) * 8)
6235 return FALSE;
6236 lim = (valueT) 1 << bits;
6237 return (value >= lim);
6238 }
6239
6240
6241 /* Return non-zero if the indicated VALUE has overflowed the maximum
6242 range expressible by an signed number with the indicated number of
6243 BITS. */
6244
6245 static bfd_boolean
6246 signed_overflow (offsetT value, unsigned bits)
6247 {
6248 offsetT lim;
6249 if (bits >= sizeof (offsetT) * 8)
6250 return FALSE;
6251 lim = (offsetT) 1 << (bits - 1);
6252 return (value < -lim || value >= lim);
6253 }
6254
6255 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6256 unsigned immediate offset load/store instruction, try to encode it as
6257 an unscaled, 9-bit, signed immediate offset load/store instruction.
6258 Return TRUE if it is successful; otherwise return FALSE.
6259
6260 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6261 in response to the standard LDR/STR mnemonics when the immediate offset is
6262 unambiguous, i.e. when it is negative or unaligned. */
6263
6264 static bfd_boolean
6265 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6266 {
6267 int idx;
6268 enum aarch64_op new_op;
6269 const aarch64_opcode *new_opcode;
6270
6271 gas_assert (instr->opcode->iclass == ldst_pos);
6272
6273 switch (instr->opcode->op)
6274 {
6275 case OP_LDRB_POS:new_op = OP_LDURB; break;
6276 case OP_STRB_POS: new_op = OP_STURB; break;
6277 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6278 case OP_LDRH_POS: new_op = OP_LDURH; break;
6279 case OP_STRH_POS: new_op = OP_STURH; break;
6280 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6281 case OP_LDR_POS: new_op = OP_LDUR; break;
6282 case OP_STR_POS: new_op = OP_STUR; break;
6283 case OP_LDRF_POS: new_op = OP_LDURV; break;
6284 case OP_STRF_POS: new_op = OP_STURV; break;
6285 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6286 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6287 default: new_op = OP_NIL; break;
6288 }
6289
6290 if (new_op == OP_NIL)
6291 return FALSE;
6292
6293 new_opcode = aarch64_get_opcode (new_op);
6294 gas_assert (new_opcode != NULL);
6295
6296 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6297 instr->opcode->op, new_opcode->op);
6298
6299 aarch64_replace_opcode (instr, new_opcode);
6300
6301 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6302 qualifier matching may fail because the out-of-date qualifier will
6303 prevent the operand being updated with a new and correct qualifier. */
6304 idx = aarch64_operand_index (instr->opcode->operands,
6305 AARCH64_OPND_ADDR_SIMM9);
6306 gas_assert (idx == 1);
6307 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6308
6309 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6310
6311 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6312 return FALSE;
6313
6314 return TRUE;
6315 }
6316
6317 /* Called by fix_insn to fix a MOV immediate alias instruction.
6318
6319 Operand for a generic move immediate instruction, which is an alias
6320 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6321 a 32-bit/64-bit immediate value into general register. An assembler error
6322 shall result if the immediate cannot be created by a single one of these
6323 instructions. If there is a choice, then to ensure reversability an
6324 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6325
6326 static void
6327 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6328 {
6329 const aarch64_opcode *opcode;
6330
6331 /* Need to check if the destination is SP/ZR. The check has to be done
6332 before any aarch64_replace_opcode. */
6333 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6334 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6335
6336 instr->operands[1].imm.value = value;
6337 instr->operands[1].skip = 0;
6338
6339 if (try_mov_wide_p)
6340 {
6341 /* Try the MOVZ alias. */
6342 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6343 aarch64_replace_opcode (instr, opcode);
6344 if (aarch64_opcode_encode (instr->opcode, instr,
6345 &instr->value, NULL, NULL))
6346 {
6347 put_aarch64_insn (buf, instr->value);
6348 return;
6349 }
6350 /* Try the MOVK alias. */
6351 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6352 aarch64_replace_opcode (instr, opcode);
6353 if (aarch64_opcode_encode (instr->opcode, instr,
6354 &instr->value, NULL, NULL))
6355 {
6356 put_aarch64_insn (buf, instr->value);
6357 return;
6358 }
6359 }
6360
6361 if (try_mov_bitmask_p)
6362 {
6363 /* Try the ORR alias. */
6364 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6365 aarch64_replace_opcode (instr, opcode);
6366 if (aarch64_opcode_encode (instr->opcode, instr,
6367 &instr->value, NULL, NULL))
6368 {
6369 put_aarch64_insn (buf, instr->value);
6370 return;
6371 }
6372 }
6373
6374 as_bad_where (fixP->fx_file, fixP->fx_line,
6375 _("immediate cannot be moved by a single instruction"));
6376 }
6377
6378 /* An instruction operand which is immediate related may have symbol used
6379 in the assembly, e.g.
6380
6381 mov w0, u32
6382 .set u32, 0x00ffff00
6383
6384 At the time when the assembly instruction is parsed, a referenced symbol,
6385 like 'u32' in the above example may not have been seen; a fixS is created
6386 in such a case and is handled here after symbols have been resolved.
6387 Instruction is fixed up with VALUE using the information in *FIXP plus
6388 extra information in FLAGS.
6389
6390 This function is called by md_apply_fix to fix up instructions that need
6391 a fix-up described above but does not involve any linker-time relocation. */
6392
6393 static void
6394 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6395 {
6396 int idx;
6397 uint32_t insn;
6398 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6399 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6400 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6401
6402 if (new_inst)
6403 {
6404 /* Now the instruction is about to be fixed-up, so the operand that
6405 was previously marked as 'ignored' needs to be unmarked in order
6406 to get the encoding done properly. */
6407 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6408 new_inst->operands[idx].skip = 0;
6409 }
6410
6411 gas_assert (opnd != AARCH64_OPND_NIL);
6412
6413 switch (opnd)
6414 {
6415 case AARCH64_OPND_EXCEPTION:
6416 if (unsigned_overflow (value, 16))
6417 as_bad_where (fixP->fx_file, fixP->fx_line,
6418 _("immediate out of range"));
6419 insn = get_aarch64_insn (buf);
6420 insn |= encode_svc_imm (value);
6421 put_aarch64_insn (buf, insn);
6422 break;
6423
6424 case AARCH64_OPND_AIMM:
6425 /* ADD or SUB with immediate.
6426 NOTE this assumes we come here with a add/sub shifted reg encoding
6427 3 322|2222|2 2 2 21111 111111
6428 1 098|7654|3 2 1 09876 543210 98765 43210
6429 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6430 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6431 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6432 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6433 ->
6434 3 322|2222|2 2 221111111111
6435 1 098|7654|3 2 109876543210 98765 43210
6436 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6437 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6438 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6439 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6440 Fields sf Rn Rd are already set. */
6441 insn = get_aarch64_insn (buf);
6442 if (value < 0)
6443 {
6444 /* Add <-> sub. */
6445 insn = reencode_addsub_switch_add_sub (insn);
6446 value = -value;
6447 }
6448
6449 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6450 && unsigned_overflow (value, 12))
6451 {
6452 /* Try to shift the value by 12 to make it fit. */
6453 if (((value >> 12) << 12) == value
6454 && ! unsigned_overflow (value, 12 + 12))
6455 {
6456 value >>= 12;
6457 insn |= encode_addsub_imm_shift_amount (1);
6458 }
6459 }
6460
6461 if (unsigned_overflow (value, 12))
6462 as_bad_where (fixP->fx_file, fixP->fx_line,
6463 _("immediate out of range"));
6464
6465 insn |= encode_addsub_imm (value);
6466
6467 put_aarch64_insn (buf, insn);
6468 break;
6469
6470 case AARCH64_OPND_SIMD_IMM:
6471 case AARCH64_OPND_SIMD_IMM_SFT:
6472 case AARCH64_OPND_LIMM:
6473 /* Bit mask immediate. */
6474 gas_assert (new_inst != NULL);
6475 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6476 new_inst->operands[idx].imm.value = value;
6477 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6478 &new_inst->value, NULL, NULL))
6479 put_aarch64_insn (buf, new_inst->value);
6480 else
6481 as_bad_where (fixP->fx_file, fixP->fx_line,
6482 _("invalid immediate"));
6483 break;
6484
6485 case AARCH64_OPND_HALF:
6486 /* 16-bit unsigned immediate. */
6487 if (unsigned_overflow (value, 16))
6488 as_bad_where (fixP->fx_file, fixP->fx_line,
6489 _("immediate out of range"));
6490 insn = get_aarch64_insn (buf);
6491 insn |= encode_movw_imm (value & 0xffff);
6492 put_aarch64_insn (buf, insn);
6493 break;
6494
6495 case AARCH64_OPND_IMM_MOV:
6496 /* Operand for a generic move immediate instruction, which is
6497 an alias instruction that generates a single MOVZ, MOVN or ORR
6498 instruction to loads a 32-bit/64-bit immediate value into general
6499 register. An assembler error shall result if the immediate cannot be
6500 created by a single one of these instructions. If there is a choice,
6501 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6502 and MOVZ or MOVN to ORR. */
6503 gas_assert (new_inst != NULL);
6504 fix_mov_imm_insn (fixP, buf, new_inst, value);
6505 break;
6506
6507 case AARCH64_OPND_ADDR_SIMM7:
6508 case AARCH64_OPND_ADDR_SIMM9:
6509 case AARCH64_OPND_ADDR_SIMM9_2:
6510 case AARCH64_OPND_ADDR_UIMM12:
6511 /* Immediate offset in an address. */
6512 insn = get_aarch64_insn (buf);
6513
6514 gas_assert (new_inst != NULL && new_inst->value == insn);
6515 gas_assert (new_inst->opcode->operands[1] == opnd
6516 || new_inst->opcode->operands[2] == opnd);
6517
6518 /* Get the index of the address operand. */
6519 if (new_inst->opcode->operands[1] == opnd)
6520 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6521 idx = 1;
6522 else
6523 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6524 idx = 2;
6525
6526 /* Update the resolved offset value. */
6527 new_inst->operands[idx].addr.offset.imm = value;
6528
6529 /* Encode/fix-up. */
6530 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6531 &new_inst->value, NULL, NULL))
6532 {
6533 put_aarch64_insn (buf, new_inst->value);
6534 break;
6535 }
6536 else if (new_inst->opcode->iclass == ldst_pos
6537 && try_to_encode_as_unscaled_ldst (new_inst))
6538 {
6539 put_aarch64_insn (buf, new_inst->value);
6540 break;
6541 }
6542
6543 as_bad_where (fixP->fx_file, fixP->fx_line,
6544 _("immediate offset out of range"));
6545 break;
6546
6547 default:
6548 gas_assert (0);
6549 as_fatal (_("unhandled operand code %d"), opnd);
6550 }
6551 }
6552
6553 /* Apply a fixup (fixP) to segment data, once it has been determined
6554 by our caller that we have all the info we need to fix it up.
6555
6556 Parameter valP is the pointer to the value of the bits. */
6557
6558 void
6559 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6560 {
6561 offsetT value = *valP;
6562 uint32_t insn;
6563 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6564 int scale;
6565 unsigned flags = fixP->fx_addnumber;
6566
6567 DEBUG_TRACE ("\n\n");
6568 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6569 DEBUG_TRACE ("Enter md_apply_fix");
6570
6571 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6572
6573 /* Note whether this will delete the relocation. */
6574
6575 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6576 fixP->fx_done = 1;
6577
6578 /* Process the relocations. */
6579 switch (fixP->fx_r_type)
6580 {
6581 case BFD_RELOC_NONE:
6582 /* This will need to go in the object file. */
6583 fixP->fx_done = 0;
6584 break;
6585
6586 case BFD_RELOC_8:
6587 case BFD_RELOC_8_PCREL:
6588 if (fixP->fx_done || !seg->use_rela_p)
6589 md_number_to_chars (buf, value, 1);
6590 break;
6591
6592 case BFD_RELOC_16:
6593 case BFD_RELOC_16_PCREL:
6594 if (fixP->fx_done || !seg->use_rela_p)
6595 md_number_to_chars (buf, value, 2);
6596 break;
6597
6598 case BFD_RELOC_32:
6599 case BFD_RELOC_32_PCREL:
6600 if (fixP->fx_done || !seg->use_rela_p)
6601 md_number_to_chars (buf, value, 4);
6602 break;
6603
6604 case BFD_RELOC_64:
6605 case BFD_RELOC_64_PCREL:
6606 if (fixP->fx_done || !seg->use_rela_p)
6607 md_number_to_chars (buf, value, 8);
6608 break;
6609
6610 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6611 /* We claim that these fixups have been processed here, even if
6612 in fact we generate an error because we do not have a reloc
6613 for them, so tc_gen_reloc() will reject them. */
6614 fixP->fx_done = 1;
6615 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6616 {
6617 as_bad_where (fixP->fx_file, fixP->fx_line,
6618 _("undefined symbol %s used as an immediate value"),
6619 S_GET_NAME (fixP->fx_addsy));
6620 goto apply_fix_return;
6621 }
6622 fix_insn (fixP, flags, value);
6623 break;
6624
6625 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6626 if (fixP->fx_done || !seg->use_rela_p)
6627 {
6628 if (value & 3)
6629 as_bad_where (fixP->fx_file, fixP->fx_line,
6630 _("pc-relative load offset not word aligned"));
6631 if (signed_overflow (value, 21))
6632 as_bad_where (fixP->fx_file, fixP->fx_line,
6633 _("pc-relative load offset out of range"));
6634 insn = get_aarch64_insn (buf);
6635 insn |= encode_ld_lit_ofs_19 (value >> 2);
6636 put_aarch64_insn (buf, insn);
6637 }
6638 break;
6639
6640 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6641 if (fixP->fx_done || !seg->use_rela_p)
6642 {
6643 if (signed_overflow (value, 21))
6644 as_bad_where (fixP->fx_file, fixP->fx_line,
6645 _("pc-relative address offset out of range"));
6646 insn = get_aarch64_insn (buf);
6647 insn |= encode_adr_imm (value);
6648 put_aarch64_insn (buf, insn);
6649 }
6650 break;
6651
6652 case BFD_RELOC_AARCH64_BRANCH19:
6653 if (fixP->fx_done || !seg->use_rela_p)
6654 {
6655 if (value & 3)
6656 as_bad_where (fixP->fx_file, fixP->fx_line,
6657 _("conditional branch target not word aligned"));
6658 if (signed_overflow (value, 21))
6659 as_bad_where (fixP->fx_file, fixP->fx_line,
6660 _("conditional branch out of range"));
6661 insn = get_aarch64_insn (buf);
6662 insn |= encode_cond_branch_ofs_19 (value >> 2);
6663 put_aarch64_insn (buf, insn);
6664 }
6665 break;
6666
6667 case BFD_RELOC_AARCH64_TSTBR14:
6668 if (fixP->fx_done || !seg->use_rela_p)
6669 {
6670 if (value & 3)
6671 as_bad_where (fixP->fx_file, fixP->fx_line,
6672 _("conditional branch target not word aligned"));
6673 if (signed_overflow (value, 16))
6674 as_bad_where (fixP->fx_file, fixP->fx_line,
6675 _("conditional branch out of range"));
6676 insn = get_aarch64_insn (buf);
6677 insn |= encode_tst_branch_ofs_14 (value >> 2);
6678 put_aarch64_insn (buf, insn);
6679 }
6680 break;
6681
6682 case BFD_RELOC_AARCH64_CALL26:
6683 case BFD_RELOC_AARCH64_JUMP26:
6684 if (fixP->fx_done || !seg->use_rela_p)
6685 {
6686 if (value & 3)
6687 as_bad_where (fixP->fx_file, fixP->fx_line,
6688 _("branch target not word aligned"));
6689 if (signed_overflow (value, 28))
6690 as_bad_where (fixP->fx_file, fixP->fx_line,
6691 _("branch out of range"));
6692 insn = get_aarch64_insn (buf);
6693 insn |= encode_branch_ofs_26 (value >> 2);
6694 put_aarch64_insn (buf, insn);
6695 }
6696 break;
6697
6698 case BFD_RELOC_AARCH64_MOVW_G0:
6699 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6700 case BFD_RELOC_AARCH64_MOVW_G0_S:
6701 scale = 0;
6702 goto movw_common;
6703 case BFD_RELOC_AARCH64_MOVW_G1:
6704 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6705 case BFD_RELOC_AARCH64_MOVW_G1_S:
6706 scale = 16;
6707 goto movw_common;
6708 case BFD_RELOC_AARCH64_MOVW_G2:
6709 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6710 case BFD_RELOC_AARCH64_MOVW_G2_S:
6711 scale = 32;
6712 goto movw_common;
6713 case BFD_RELOC_AARCH64_MOVW_G3:
6714 scale = 48;
6715 movw_common:
6716 if (fixP->fx_done || !seg->use_rela_p)
6717 {
6718 insn = get_aarch64_insn (buf);
6719
6720 if (!fixP->fx_done)
6721 {
6722 /* REL signed addend must fit in 16 bits */
6723 if (signed_overflow (value, 16))
6724 as_bad_where (fixP->fx_file, fixP->fx_line,
6725 _("offset out of range"));
6726 }
6727 else
6728 {
6729 /* Check for overflow and scale. */
6730 switch (fixP->fx_r_type)
6731 {
6732 case BFD_RELOC_AARCH64_MOVW_G0:
6733 case BFD_RELOC_AARCH64_MOVW_G1:
6734 case BFD_RELOC_AARCH64_MOVW_G2:
6735 case BFD_RELOC_AARCH64_MOVW_G3:
6736 if (unsigned_overflow (value, scale + 16))
6737 as_bad_where (fixP->fx_file, fixP->fx_line,
6738 _("unsigned value out of range"));
6739 break;
6740 case BFD_RELOC_AARCH64_MOVW_G0_S:
6741 case BFD_RELOC_AARCH64_MOVW_G1_S:
6742 case BFD_RELOC_AARCH64_MOVW_G2_S:
6743 /* NOTE: We can only come here with movz or movn. */
6744 if (signed_overflow (value, scale + 16))
6745 as_bad_where (fixP->fx_file, fixP->fx_line,
6746 _("signed value out of range"));
6747 if (value < 0)
6748 {
6749 /* Force use of MOVN. */
6750 value = ~value;
6751 insn = reencode_movzn_to_movn (insn);
6752 }
6753 else
6754 {
6755 /* Force use of MOVZ. */
6756 insn = reencode_movzn_to_movz (insn);
6757 }
6758 break;
6759 default:
6760 /* Unchecked relocations. */
6761 break;
6762 }
6763 value >>= scale;
6764 }
6765
6766 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6767 insn |= encode_movw_imm (value & 0xffff);
6768
6769 put_aarch64_insn (buf, insn);
6770 }
6771 break;
6772
6773 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6774 fixP->fx_r_type = (ilp32_p
6775 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6776 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6777 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6778 /* Should always be exported to object file, see
6779 aarch64_force_relocation(). */
6780 gas_assert (!fixP->fx_done);
6781 gas_assert (seg->use_rela_p);
6782 break;
6783
6784 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6785 fixP->fx_r_type = (ilp32_p
6786 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6787 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6788 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6789 /* Should always be exported to object file, see
6790 aarch64_force_relocation(). */
6791 gas_assert (!fixP->fx_done);
6792 gas_assert (seg->use_rela_p);
6793 break;
6794
6795 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6796 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6797 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6798 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6799 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6800 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6801 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6802 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6803 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6804 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6805 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6806 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6807 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6808 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
6809 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6810 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6811 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6812 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6813 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6814 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6815 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6816 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6817 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6818 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6819 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6820 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6821 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6822 /* Should always be exported to object file, see
6823 aarch64_force_relocation(). */
6824 gas_assert (!fixP->fx_done);
6825 gas_assert (seg->use_rela_p);
6826 break;
6827
6828 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6829 /* Should always be exported to object file, see
6830 aarch64_force_relocation(). */
6831 fixP->fx_r_type = (ilp32_p
6832 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6833 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6834 gas_assert (!fixP->fx_done);
6835 gas_assert (seg->use_rela_p);
6836 break;
6837
6838 case BFD_RELOC_AARCH64_ADD_LO12:
6839 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6840 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6841 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6842 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6843 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6844 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6845 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
6846 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6847 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6848 case BFD_RELOC_AARCH64_LDST128_LO12:
6849 case BFD_RELOC_AARCH64_LDST16_LO12:
6850 case BFD_RELOC_AARCH64_LDST32_LO12:
6851 case BFD_RELOC_AARCH64_LDST64_LO12:
6852 case BFD_RELOC_AARCH64_LDST8_LO12:
6853 /* Should always be exported to object file, see
6854 aarch64_force_relocation(). */
6855 gas_assert (!fixP->fx_done);
6856 gas_assert (seg->use_rela_p);
6857 break;
6858
6859 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6860 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6861 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6862 break;
6863
6864 case BFD_RELOC_UNUSED:
6865 /* An error will already have been reported. */
6866 break;
6867
6868 default:
6869 as_bad_where (fixP->fx_file, fixP->fx_line,
6870 _("unexpected %s fixup"),
6871 bfd_get_reloc_code_name (fixP->fx_r_type));
6872 break;
6873 }
6874
6875 apply_fix_return:
6876 /* Free the allocated the struct aarch64_inst.
6877 N.B. currently there are very limited number of fix-up types actually use
6878 this field, so the impact on the performance should be minimal . */
6879 if (fixP->tc_fix_data.inst != NULL)
6880 free (fixP->tc_fix_data.inst);
6881
6882 return;
6883 }
6884
6885 /* Translate internal representation of relocation info to BFD target
6886 format. */
6887
6888 arelent *
6889 tc_gen_reloc (asection * section, fixS * fixp)
6890 {
6891 arelent *reloc;
6892 bfd_reloc_code_real_type code;
6893
6894 reloc = xmalloc (sizeof (arelent));
6895
6896 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6897 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6898 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6899
6900 if (fixp->fx_pcrel)
6901 {
6902 if (section->use_rela_p)
6903 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6904 else
6905 fixp->fx_offset = reloc->address;
6906 }
6907 reloc->addend = fixp->fx_offset;
6908
6909 code = fixp->fx_r_type;
6910 switch (code)
6911 {
6912 case BFD_RELOC_16:
6913 if (fixp->fx_pcrel)
6914 code = BFD_RELOC_16_PCREL;
6915 break;
6916
6917 case BFD_RELOC_32:
6918 if (fixp->fx_pcrel)
6919 code = BFD_RELOC_32_PCREL;
6920 break;
6921
6922 case BFD_RELOC_64:
6923 if (fixp->fx_pcrel)
6924 code = BFD_RELOC_64_PCREL;
6925 break;
6926
6927 default:
6928 break;
6929 }
6930
6931 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6932 if (reloc->howto == NULL)
6933 {
6934 as_bad_where (fixp->fx_file, fixp->fx_line,
6935 _
6936 ("cannot represent %s relocation in this object file format"),
6937 bfd_get_reloc_code_name (code));
6938 return NULL;
6939 }
6940
6941 return reloc;
6942 }
6943
6944 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6945
6946 void
6947 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6948 {
6949 bfd_reloc_code_real_type type;
6950 int pcrel = 0;
6951
6952 /* Pick a reloc.
6953 FIXME: @@ Should look at CPU word size. */
6954 switch (size)
6955 {
6956 case 1:
6957 type = BFD_RELOC_8;
6958 break;
6959 case 2:
6960 type = BFD_RELOC_16;
6961 break;
6962 case 4:
6963 type = BFD_RELOC_32;
6964 break;
6965 case 8:
6966 type = BFD_RELOC_64;
6967 break;
6968 default:
6969 as_bad (_("cannot do %u-byte relocation"), size);
6970 type = BFD_RELOC_UNUSED;
6971 break;
6972 }
6973
6974 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6975 }
6976
6977 int
6978 aarch64_force_relocation (struct fix *fixp)
6979 {
6980 switch (fixp->fx_r_type)
6981 {
6982 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6983 /* Perform these "immediate" internal relocations
6984 even if the symbol is extern or weak. */
6985 return 0;
6986
6987 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6988 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6989 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6990 /* Pseudo relocs that need to be fixed up according to
6991 ilp32_p. */
6992 return 0;
6993
6994 case BFD_RELOC_AARCH64_ADD_LO12:
6995 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6996 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6997 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6998 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6999 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7000 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7001 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7002 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7003 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7004 case BFD_RELOC_AARCH64_LDST128_LO12:
7005 case BFD_RELOC_AARCH64_LDST16_LO12:
7006 case BFD_RELOC_AARCH64_LDST32_LO12:
7007 case BFD_RELOC_AARCH64_LDST64_LO12:
7008 case BFD_RELOC_AARCH64_LDST8_LO12:
7009 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7010 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7011 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7012 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7013 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7014 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7015 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7016 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7017 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7018 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7019 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7020 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7021 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7022 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7023 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7024 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7025 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7026 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7027 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7028 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7029 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7030 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7031 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7032 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7033 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7034 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7035 /* Always leave these relocations for the linker. */
7036 return 1;
7037
7038 default:
7039 break;
7040 }
7041
7042 return generic_force_reloc (fixp);
7043 }
7044
7045 #ifdef OBJ_ELF
7046
7047 const char *
7048 elf64_aarch64_target_format (void)
7049 {
7050 if (target_big_endian)
7051 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7052 else
7053 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7054 }
7055
7056 void
7057 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7058 {
7059 elf_frob_symbol (symp, puntp);
7060 }
7061 #endif
7062
7063 /* MD interface: Finalization. */
7064
7065 /* A good place to do this, although this was probably not intended
7066 for this kind of use. We need to dump the literal pool before
7067 references are made to a null symbol pointer. */
7068
7069 void
7070 aarch64_cleanup (void)
7071 {
7072 literal_pool *pool;
7073
7074 for (pool = list_of_pools; pool; pool = pool->next)
7075 {
7076 /* Put it at the end of the relevant section. */
7077 subseg_set (pool->section, pool->sub_section);
7078 s_ltorg (0);
7079 }
7080 }
7081
7082 #ifdef OBJ_ELF
7083 /* Remove any excess mapping symbols generated for alignment frags in
7084 SEC. We may have created a mapping symbol before a zero byte
7085 alignment; remove it if there's a mapping symbol after the
7086 alignment. */
7087 static void
7088 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7089 void *dummy ATTRIBUTE_UNUSED)
7090 {
7091 segment_info_type *seginfo = seg_info (sec);
7092 fragS *fragp;
7093
7094 if (seginfo == NULL || seginfo->frchainP == NULL)
7095 return;
7096
7097 for (fragp = seginfo->frchainP->frch_root;
7098 fragp != NULL; fragp = fragp->fr_next)
7099 {
7100 symbolS *sym = fragp->tc_frag_data.last_map;
7101 fragS *next = fragp->fr_next;
7102
7103 /* Variable-sized frags have been converted to fixed size by
7104 this point. But if this was variable-sized to start with,
7105 there will be a fixed-size frag after it. So don't handle
7106 next == NULL. */
7107 if (sym == NULL || next == NULL)
7108 continue;
7109
7110 if (S_GET_VALUE (sym) < next->fr_address)
7111 /* Not at the end of this frag. */
7112 continue;
7113 know (S_GET_VALUE (sym) == next->fr_address);
7114
7115 do
7116 {
7117 if (next->tc_frag_data.first_map != NULL)
7118 {
7119 /* Next frag starts with a mapping symbol. Discard this
7120 one. */
7121 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7122 break;
7123 }
7124
7125 if (next->fr_next == NULL)
7126 {
7127 /* This mapping symbol is at the end of the section. Discard
7128 it. */
7129 know (next->fr_fix == 0 && next->fr_var == 0);
7130 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7131 break;
7132 }
7133
7134 /* As long as we have empty frags without any mapping symbols,
7135 keep looking. */
7136 /* If the next frag is non-empty and does not start with a
7137 mapping symbol, then this mapping symbol is required. */
7138 if (next->fr_address != next->fr_next->fr_address)
7139 break;
7140
7141 next = next->fr_next;
7142 }
7143 while (next != NULL);
7144 }
7145 }
7146 #endif
7147
7148 /* Adjust the symbol table. */
7149
7150 void
7151 aarch64_adjust_symtab (void)
7152 {
7153 #ifdef OBJ_ELF
7154 /* Remove any overlapping mapping symbols generated by alignment frags. */
7155 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7156 /* Now do generic ELF adjustments. */
7157 elf_adjust_symtab ();
7158 #endif
7159 }
7160
7161 static void
7162 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7163 {
7164 const char *hash_err;
7165
7166 hash_err = hash_insert (table, key, value);
7167 if (hash_err)
7168 printf ("Internal Error: Can't hash %s\n", key);
7169 }
7170
7171 static void
7172 fill_instruction_hash_table (void)
7173 {
7174 aarch64_opcode *opcode = aarch64_opcode_table;
7175
7176 while (opcode->name != NULL)
7177 {
7178 templates *templ, *new_templ;
7179 templ = hash_find (aarch64_ops_hsh, opcode->name);
7180
7181 new_templ = (templates *) xmalloc (sizeof (templates));
7182 new_templ->opcode = opcode;
7183 new_templ->next = NULL;
7184
7185 if (!templ)
7186 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7187 else
7188 {
7189 new_templ->next = templ->next;
7190 templ->next = new_templ;
7191 }
7192 ++opcode;
7193 }
7194 }
7195
7196 static inline void
7197 convert_to_upper (char *dst, const char *src, size_t num)
7198 {
7199 unsigned int i;
7200 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7201 *dst = TOUPPER (*src);
7202 *dst = '\0';
7203 }
7204
7205 /* Assume STR point to a lower-case string, allocate, convert and return
7206 the corresponding upper-case string. */
7207 static inline const char*
7208 get_upper_str (const char *str)
7209 {
7210 char *ret;
7211 size_t len = strlen (str);
7212 if ((ret = xmalloc (len + 1)) == NULL)
7213 abort ();
7214 convert_to_upper (ret, str, len);
7215 return ret;
7216 }
7217
7218 /* MD interface: Initialization. */
7219
7220 void
7221 md_begin (void)
7222 {
7223 unsigned mach;
7224 unsigned int i;
7225
7226 if ((aarch64_ops_hsh = hash_new ()) == NULL
7227 || (aarch64_cond_hsh = hash_new ()) == NULL
7228 || (aarch64_shift_hsh = hash_new ()) == NULL
7229 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7230 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7231 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7232 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7233 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7234 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7235 || (aarch64_reg_hsh = hash_new ()) == NULL
7236 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7237 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7238 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7239 as_fatal (_("virtual memory exhausted"));
7240
7241 fill_instruction_hash_table ();
7242
7243 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7244 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7245 (void *) (aarch64_sys_regs + i));
7246
7247 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7248 checked_hash_insert (aarch64_pstatefield_hsh,
7249 aarch64_pstatefields[i].name,
7250 (void *) (aarch64_pstatefields + i));
7251
7252 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7253 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7254 aarch64_sys_regs_ic[i].template,
7255 (void *) (aarch64_sys_regs_ic + i));
7256
7257 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7258 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7259 aarch64_sys_regs_dc[i].template,
7260 (void *) (aarch64_sys_regs_dc + i));
7261
7262 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7263 checked_hash_insert (aarch64_sys_regs_at_hsh,
7264 aarch64_sys_regs_at[i].template,
7265 (void *) (aarch64_sys_regs_at + i));
7266
7267 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7268 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7269 aarch64_sys_regs_tlbi[i].template,
7270 (void *) (aarch64_sys_regs_tlbi + i));
7271
7272 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7273 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7274 (void *) (reg_names + i));
7275
7276 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7277 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7278 (void *) (nzcv_names + i));
7279
7280 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7281 {
7282 const char *name = aarch64_operand_modifiers[i].name;
7283 checked_hash_insert (aarch64_shift_hsh, name,
7284 (void *) (aarch64_operand_modifiers + i));
7285 /* Also hash the name in the upper case. */
7286 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7287 (void *) (aarch64_operand_modifiers + i));
7288 }
7289
7290 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7291 {
7292 unsigned int j;
7293 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7294 the same condition code. */
7295 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7296 {
7297 const char *name = aarch64_conds[i].names[j];
7298 if (name == NULL)
7299 break;
7300 checked_hash_insert (aarch64_cond_hsh, name,
7301 (void *) (aarch64_conds + i));
7302 /* Also hash the name in the upper case. */
7303 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7304 (void *) (aarch64_conds + i));
7305 }
7306 }
7307
7308 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7309 {
7310 const char *name = aarch64_barrier_options[i].name;
7311 /* Skip xx00 - the unallocated values of option. */
7312 if ((i & 0x3) == 0)
7313 continue;
7314 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7315 (void *) (aarch64_barrier_options + i));
7316 /* Also hash the name in the upper case. */
7317 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7318 (void *) (aarch64_barrier_options + i));
7319 }
7320
7321 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7322 {
7323 const char* name = aarch64_prfops[i].name;
7324 /* Skip the unallocated hint encodings. */
7325 if (name == NULL)
7326 continue;
7327 checked_hash_insert (aarch64_pldop_hsh, name,
7328 (void *) (aarch64_prfops + i));
7329 /* Also hash the name in the upper case. */
7330 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7331 (void *) (aarch64_prfops + i));
7332 }
7333
7334 /* Set the cpu variant based on the command-line options. */
7335 if (!mcpu_cpu_opt)
7336 mcpu_cpu_opt = march_cpu_opt;
7337
7338 if (!mcpu_cpu_opt)
7339 mcpu_cpu_opt = &cpu_default;
7340
7341 cpu_variant = *mcpu_cpu_opt;
7342
7343 /* Record the CPU type. */
7344 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7345
7346 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7347 }
7348
7349 /* Command line processing. */
7350
7351 const char *md_shortopts = "m:";
7352
7353 #ifdef AARCH64_BI_ENDIAN
7354 #define OPTION_EB (OPTION_MD_BASE + 0)
7355 #define OPTION_EL (OPTION_MD_BASE + 1)
7356 #else
7357 #if TARGET_BYTES_BIG_ENDIAN
7358 #define OPTION_EB (OPTION_MD_BASE + 0)
7359 #else
7360 #define OPTION_EL (OPTION_MD_BASE + 1)
7361 #endif
7362 #endif
7363
7364 struct option md_longopts[] = {
7365 #ifdef OPTION_EB
7366 {"EB", no_argument, NULL, OPTION_EB},
7367 #endif
7368 #ifdef OPTION_EL
7369 {"EL", no_argument, NULL, OPTION_EL},
7370 #endif
7371 {NULL, no_argument, NULL, 0}
7372 };
7373
7374 size_t md_longopts_size = sizeof (md_longopts);
7375
7376 struct aarch64_option_table
7377 {
7378 char *option; /* Option name to match. */
7379 char *help; /* Help information. */
7380 int *var; /* Variable to change. */
7381 int value; /* What to change it to. */
7382 char *deprecated; /* If non-null, print this message. */
7383 };
7384
7385 static struct aarch64_option_table aarch64_opts[] = {
7386 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7387 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7388 NULL},
7389 #ifdef DEBUG_AARCH64
7390 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7391 #endif /* DEBUG_AARCH64 */
7392 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7393 NULL},
7394 {"mno-verbose-error", N_("do not output verbose error messages"),
7395 &verbose_error_p, 0, NULL},
7396 {NULL, NULL, NULL, 0, NULL}
7397 };
7398
7399 struct aarch64_cpu_option_table
7400 {
7401 char *name;
7402 const aarch64_feature_set value;
7403 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7404 case. */
7405 const char *canonical_name;
7406 };
7407
7408 /* This list should, at a minimum, contain all the cpu names
7409 recognized by GCC. */
7410 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7411 {"all", AARCH64_ANY, NULL},
7412 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7413 AARCH64_FEATURE_CRC), "Cortex-A53"},
7414 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7415 AARCH64_FEATURE_CRC), "Cortex-A57"},
7416 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7417 AARCH64_FEATURE_CRC), "Cortex-A72"},
7418 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7419 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7420 "Samsung Exynos M1"},
7421 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7422 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7423 "Cavium ThunderX"},
7424 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7425 in earlier releases and is superseded by 'xgene1' in all
7426 tools. */
7427 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7428 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7429 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7430 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7431 {"generic", AARCH64_ARCH_V8, NULL},
7432
7433 {NULL, AARCH64_ARCH_NONE, NULL}
7434 };
7435
7436 struct aarch64_arch_option_table
7437 {
7438 char *name;
7439 const aarch64_feature_set value;
7440 };
7441
7442 /* This list should, at a minimum, contain all the architecture names
7443 recognized by GCC. */
7444 static const struct aarch64_arch_option_table aarch64_archs[] = {
7445 {"all", AARCH64_ANY},
7446 {"armv8-a", AARCH64_ARCH_V8},
7447 {"armv8.1-a", AARCH64_ARCH_V8_1},
7448 {NULL, AARCH64_ARCH_NONE}
7449 };
7450
7451 /* ISA extensions. */
7452 struct aarch64_option_cpu_value_table
7453 {
7454 char *name;
7455 const aarch64_feature_set value;
7456 };
7457
7458 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7459 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7460 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7461 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7462 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7463 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7464 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0)},
7465 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0)},
7466 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_SIMD
7467 | AARCH64_FEATURE_RDMA, 0)},
7468 {NULL, AARCH64_ARCH_NONE}
7469 };
7470
7471 struct aarch64_long_option_table
7472 {
7473 char *option; /* Substring to match. */
7474 char *help; /* Help information. */
7475 int (*func) (char *subopt); /* Function to decode sub-option. */
7476 char *deprecated; /* If non-null, print this message. */
7477 };
7478
7479 static int
7480 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7481 bfd_boolean ext_only)
7482 {
7483 /* We insist on extensions being added before being removed. We achieve
7484 this by using the ADDING_VALUE variable to indicate whether we are
7485 adding an extension (1) or removing it (0) and only allowing it to
7486 change in the order -1 -> 1 -> 0. */
7487 int adding_value = -1;
7488 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7489
7490 /* Copy the feature set, so that we can modify it. */
7491 *ext_set = **opt_p;
7492 *opt_p = ext_set;
7493
7494 while (str != NULL && *str != 0)
7495 {
7496 const struct aarch64_option_cpu_value_table *opt;
7497 char *ext = NULL;
7498 int optlen;
7499
7500 if (!ext_only)
7501 {
7502 if (*str != '+')
7503 {
7504 as_bad (_("invalid architectural extension"));
7505 return 0;
7506 }
7507
7508 ext = strchr (++str, '+');
7509 }
7510
7511 if (ext != NULL)
7512 optlen = ext - str;
7513 else
7514 optlen = strlen (str);
7515
7516 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7517 {
7518 if (adding_value != 0)
7519 adding_value = 0;
7520 optlen -= 2;
7521 str += 2;
7522 }
7523 else if (optlen > 0)
7524 {
7525 if (adding_value == -1)
7526 adding_value = 1;
7527 else if (adding_value != 1)
7528 {
7529 as_bad (_("must specify extensions to add before specifying "
7530 "those to remove"));
7531 return FALSE;
7532 }
7533 }
7534
7535 if (optlen == 0)
7536 {
7537 as_bad (_("missing architectural extension"));
7538 return 0;
7539 }
7540
7541 gas_assert (adding_value != -1);
7542
7543 for (opt = aarch64_features; opt->name != NULL; opt++)
7544 if (strncmp (opt->name, str, optlen) == 0)
7545 {
7546 /* Add or remove the extension. */
7547 if (adding_value)
7548 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7549 else
7550 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7551 break;
7552 }
7553
7554 if (opt->name == NULL)
7555 {
7556 as_bad (_("unknown architectural extension `%s'"), str);
7557 return 0;
7558 }
7559
7560 str = ext;
7561 };
7562
7563 return 1;
7564 }
7565
7566 static int
7567 aarch64_parse_cpu (char *str)
7568 {
7569 const struct aarch64_cpu_option_table *opt;
7570 char *ext = strchr (str, '+');
7571 size_t optlen;
7572
7573 if (ext != NULL)
7574 optlen = ext - str;
7575 else
7576 optlen = strlen (str);
7577
7578 if (optlen == 0)
7579 {
7580 as_bad (_("missing cpu name `%s'"), str);
7581 return 0;
7582 }
7583
7584 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7585 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7586 {
7587 mcpu_cpu_opt = &opt->value;
7588 if (ext != NULL)
7589 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7590
7591 return 1;
7592 }
7593
7594 as_bad (_("unknown cpu `%s'"), str);
7595 return 0;
7596 }
7597
7598 static int
7599 aarch64_parse_arch (char *str)
7600 {
7601 const struct aarch64_arch_option_table *opt;
7602 char *ext = strchr (str, '+');
7603 size_t optlen;
7604
7605 if (ext != NULL)
7606 optlen = ext - str;
7607 else
7608 optlen = strlen (str);
7609
7610 if (optlen == 0)
7611 {
7612 as_bad (_("missing architecture name `%s'"), str);
7613 return 0;
7614 }
7615
7616 for (opt = aarch64_archs; opt->name != NULL; opt++)
7617 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7618 {
7619 march_cpu_opt = &opt->value;
7620 if (ext != NULL)
7621 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7622
7623 return 1;
7624 }
7625
7626 as_bad (_("unknown architecture `%s'\n"), str);
7627 return 0;
7628 }
7629
7630 /* ABIs. */
7631 struct aarch64_option_abi_value_table
7632 {
7633 char *name;
7634 enum aarch64_abi_type value;
7635 };
7636
7637 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7638 {"ilp32", AARCH64_ABI_ILP32},
7639 {"lp64", AARCH64_ABI_LP64},
7640 {NULL, 0}
7641 };
7642
7643 static int
7644 aarch64_parse_abi (char *str)
7645 {
7646 const struct aarch64_option_abi_value_table *opt;
7647 size_t optlen = strlen (str);
7648
7649 if (optlen == 0)
7650 {
7651 as_bad (_("missing abi name `%s'"), str);
7652 return 0;
7653 }
7654
7655 for (opt = aarch64_abis; opt->name != NULL; opt++)
7656 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7657 {
7658 aarch64_abi = opt->value;
7659 return 1;
7660 }
7661
7662 as_bad (_("unknown abi `%s'\n"), str);
7663 return 0;
7664 }
7665
7666 static struct aarch64_long_option_table aarch64_long_opts[] = {
7667 #ifdef OBJ_ELF
7668 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7669 aarch64_parse_abi, NULL},
7670 #endif /* OBJ_ELF */
7671 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7672 aarch64_parse_cpu, NULL},
7673 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7674 aarch64_parse_arch, NULL},
7675 {NULL, NULL, 0, NULL}
7676 };
7677
7678 int
7679 md_parse_option (int c, char *arg)
7680 {
7681 struct aarch64_option_table *opt;
7682 struct aarch64_long_option_table *lopt;
7683
7684 switch (c)
7685 {
7686 #ifdef OPTION_EB
7687 case OPTION_EB:
7688 target_big_endian = 1;
7689 break;
7690 #endif
7691
7692 #ifdef OPTION_EL
7693 case OPTION_EL:
7694 target_big_endian = 0;
7695 break;
7696 #endif
7697
7698 case 'a':
7699 /* Listing option. Just ignore these, we don't support additional
7700 ones. */
7701 return 0;
7702
7703 default:
7704 for (opt = aarch64_opts; opt->option != NULL; opt++)
7705 {
7706 if (c == opt->option[0]
7707 && ((arg == NULL && opt->option[1] == 0)
7708 || streq (arg, opt->option + 1)))
7709 {
7710 /* If the option is deprecated, tell the user. */
7711 if (opt->deprecated != NULL)
7712 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7713 arg ? arg : "", _(opt->deprecated));
7714
7715 if (opt->var != NULL)
7716 *opt->var = opt->value;
7717
7718 return 1;
7719 }
7720 }
7721
7722 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7723 {
7724 /* These options are expected to have an argument. */
7725 if (c == lopt->option[0]
7726 && arg != NULL
7727 && strncmp (arg, lopt->option + 1,
7728 strlen (lopt->option + 1)) == 0)
7729 {
7730 /* If the option is deprecated, tell the user. */
7731 if (lopt->deprecated != NULL)
7732 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7733 _(lopt->deprecated));
7734
7735 /* Call the sup-option parser. */
7736 return lopt->func (arg + strlen (lopt->option) - 1);
7737 }
7738 }
7739
7740 return 0;
7741 }
7742
7743 return 1;
7744 }
7745
7746 void
7747 md_show_usage (FILE * fp)
7748 {
7749 struct aarch64_option_table *opt;
7750 struct aarch64_long_option_table *lopt;
7751
7752 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7753
7754 for (opt = aarch64_opts; opt->option != NULL; opt++)
7755 if (opt->help != NULL)
7756 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7757
7758 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7759 if (lopt->help != NULL)
7760 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7761
7762 #ifdef OPTION_EB
7763 fprintf (fp, _("\
7764 -EB assemble code for a big-endian cpu\n"));
7765 #endif
7766
7767 #ifdef OPTION_EL
7768 fprintf (fp, _("\
7769 -EL assemble code for a little-endian cpu\n"));
7770 #endif
7771 }
7772
7773 /* Parse a .cpu directive. */
7774
7775 static void
7776 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7777 {
7778 const struct aarch64_cpu_option_table *opt;
7779 char saved_char;
7780 char *name;
7781 char *ext;
7782 size_t optlen;
7783
7784 name = input_line_pointer;
7785 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7786 input_line_pointer++;
7787 saved_char = *input_line_pointer;
7788 *input_line_pointer = 0;
7789
7790 ext = strchr (name, '+');
7791
7792 if (ext != NULL)
7793 optlen = ext - name;
7794 else
7795 optlen = strlen (name);
7796
7797 /* Skip the first "all" entry. */
7798 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7799 if (strlen (opt->name) == optlen
7800 && strncmp (name, opt->name, optlen) == 0)
7801 {
7802 mcpu_cpu_opt = &opt->value;
7803 if (ext != NULL)
7804 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7805 return;
7806
7807 cpu_variant = *mcpu_cpu_opt;
7808
7809 *input_line_pointer = saved_char;
7810 demand_empty_rest_of_line ();
7811 return;
7812 }
7813 as_bad (_("unknown cpu `%s'"), name);
7814 *input_line_pointer = saved_char;
7815 ignore_rest_of_line ();
7816 }
7817
7818
7819 /* Parse a .arch directive. */
7820
7821 static void
7822 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7823 {
7824 const struct aarch64_arch_option_table *opt;
7825 char saved_char;
7826 char *name;
7827 char *ext;
7828 size_t optlen;
7829
7830 name = input_line_pointer;
7831 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7832 input_line_pointer++;
7833 saved_char = *input_line_pointer;
7834 *input_line_pointer = 0;
7835
7836 ext = strchr (name, '+');
7837
7838 if (ext != NULL)
7839 optlen = ext - name;
7840 else
7841 optlen = strlen (name);
7842
7843 /* Skip the first "all" entry. */
7844 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7845 if (strlen (opt->name) == optlen
7846 && strncmp (name, opt->name, optlen) == 0)
7847 {
7848 mcpu_cpu_opt = &opt->value;
7849 if (ext != NULL)
7850 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7851 return;
7852
7853 cpu_variant = *mcpu_cpu_opt;
7854
7855 *input_line_pointer = saved_char;
7856 demand_empty_rest_of_line ();
7857 return;
7858 }
7859
7860 as_bad (_("unknown architecture `%s'\n"), name);
7861 *input_line_pointer = saved_char;
7862 ignore_rest_of_line ();
7863 }
7864
7865 /* Parse a .arch_extension directive. */
7866
7867 static void
7868 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
7869 {
7870 char saved_char;
7871 char *ext = input_line_pointer;;
7872
7873 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7874 input_line_pointer++;
7875 saved_char = *input_line_pointer;
7876 *input_line_pointer = 0;
7877
7878 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
7879 return;
7880
7881 cpu_variant = *mcpu_cpu_opt;
7882
7883 *input_line_pointer = saved_char;
7884 demand_empty_rest_of_line ();
7885 }
7886
7887 /* Copy symbol information. */
7888
7889 void
7890 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7891 {
7892 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7893 }
This page took 0.240144 seconds and 4 git commands to generate.