Enable verbose error messages by default for AArch64 gas.
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2014 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 static aarch64_feature_set cpu_variant;
46
47 /* Variables that we set while parsing command-line options. Once all
48 options have been read we re-process these values to set the real
49 assembly flags. */
50 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
51 static const aarch64_feature_set *march_cpu_opt = NULL;
52
53 /* Constants for known architecture features. */
54 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
55
56 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
57 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
58
59 #ifdef OBJ_ELF
60 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
61 static symbolS *GOT_symbol;
62
63 /* Which ABI to use. */
64 enum aarch64_abi_type
65 {
66 AARCH64_ABI_LP64 = 0,
67 AARCH64_ABI_ILP32 = 1
68 };
69
70 /* AArch64 ABI for the output file. */
71 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
72
73 /* When non-zero, program to a 32-bit model, in which the C data types
74 int, long and all pointer types are 32-bit objects (ILP32); or to a
75 64-bit model, in which the C int type is 32-bits but the C long type
76 and all pointer types are 64-bit objects (LP64). */
77 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
78 #endif
79
80 enum neon_el_type
81 {
82 NT_invtype = -1,
83 NT_b,
84 NT_h,
85 NT_s,
86 NT_d,
87 NT_q
88 };
89
90 /* Bits for DEFINED field in neon_type_el. */
91 #define NTA_HASTYPE 1
92 #define NTA_HASINDEX 2
93
94 struct neon_type_el
95 {
96 enum neon_el_type type;
97 unsigned char defined;
98 unsigned width;
99 int64_t index;
100 };
101
102 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
103
104 struct reloc
105 {
106 bfd_reloc_code_real_type type;
107 expressionS exp;
108 int pc_rel;
109 enum aarch64_opnd opnd;
110 uint32_t flags;
111 unsigned need_libopcodes_p : 1;
112 };
113
114 struct aarch64_instruction
115 {
116 /* libopcodes structure for instruction intermediate representation. */
117 aarch64_inst base;
118 /* Record assembly errors found during the parsing. */
119 struct
120 {
121 enum aarch64_operand_error_kind kind;
122 const char *error;
123 } parsing_error;
124 /* The condition that appears in the assembly line. */
125 int cond;
126 /* Relocation information (including the GAS internal fixup). */
127 struct reloc reloc;
128 /* Need to generate an immediate in the literal pool. */
129 unsigned gen_lit_pool : 1;
130 };
131
132 typedef struct aarch64_instruction aarch64_instruction;
133
134 static aarch64_instruction inst;
135
136 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
137 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
138
139 /* Diagnostics inline function utilites.
140
141 These are lightweight utlities which should only be called by parse_operands
142 and other parsers. GAS processes each assembly line by parsing it against
143 instruction template(s), in the case of multiple templates (for the same
144 mnemonic name), those templates are tried one by one until one succeeds or
145 all fail. An assembly line may fail a few templates before being
146 successfully parsed; an error saved here in most cases is not a user error
147 but an error indicating the current template is not the right template.
148 Therefore it is very important that errors can be saved at a low cost during
149 the parsing; we don't want to slow down the whole parsing by recording
150 non-user errors in detail.
151
152 Remember that the objective is to help GAS pick up the most approapriate
153 error message in the case of multiple templates, e.g. FMOV which has 8
154 templates. */
155
156 static inline void
157 clear_error (void)
158 {
159 inst.parsing_error.kind = AARCH64_OPDE_NIL;
160 inst.parsing_error.error = NULL;
161 }
162
163 static inline bfd_boolean
164 error_p (void)
165 {
166 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
167 }
168
169 static inline const char *
170 get_error_message (void)
171 {
172 return inst.parsing_error.error;
173 }
174
175 static inline void
176 set_error_message (const char *error)
177 {
178 inst.parsing_error.error = error;
179 }
180
181 static inline enum aarch64_operand_error_kind
182 get_error_kind (void)
183 {
184 return inst.parsing_error.kind;
185 }
186
187 static inline void
188 set_error_kind (enum aarch64_operand_error_kind kind)
189 {
190 inst.parsing_error.kind = kind;
191 }
192
193 static inline void
194 set_error (enum aarch64_operand_error_kind kind, const char *error)
195 {
196 inst.parsing_error.kind = kind;
197 inst.parsing_error.error = error;
198 }
199
200 static inline void
201 set_recoverable_error (const char *error)
202 {
203 set_error (AARCH64_OPDE_RECOVERABLE, error);
204 }
205
206 /* Use the DESC field of the corresponding aarch64_operand entry to compose
207 the error message. */
208 static inline void
209 set_default_error (void)
210 {
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
212 }
213
214 static inline void
215 set_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
218 }
219
220 static inline void
221 set_first_syntax_error (const char *error)
222 {
223 if (! error_p ())
224 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
225 }
226
227 static inline void
228 set_fatal_syntax_error (const char *error)
229 {
230 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
231 }
232 \f
233 /* Number of littlenums required to hold an extended precision number. */
234 #define MAX_LITTLENUMS 6
235
236 /* Return value for certain parsers when the parsing fails; those parsers
237 return the information of the parsed result, e.g. register number, on
238 success. */
239 #define PARSE_FAIL -1
240
241 /* This is an invalid condition code that means no conditional field is
242 present. */
243 #define COND_ALWAYS 0x10
244
245 typedef struct
246 {
247 const char *template;
248 unsigned long value;
249 } asm_barrier_opt;
250
251 typedef struct
252 {
253 const char *template;
254 uint32_t value;
255 } asm_nzcv;
256
257 struct reloc_entry
258 {
259 char *name;
260 bfd_reloc_code_real_type reloc;
261 };
262
263 /* Structure for a hash table entry for a register. */
264 typedef struct
265 {
266 const char *name;
267 unsigned char number;
268 unsigned char type;
269 unsigned char builtin;
270 } reg_entry;
271
272 /* Macros to define the register types and masks for the purpose
273 of parsing. */
274
275 #undef AARCH64_REG_TYPES
276 #define AARCH64_REG_TYPES \
277 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
278 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
279 BASIC_REG_TYPE(SP_32) /* wsp */ \
280 BASIC_REG_TYPE(SP_64) /* sp */ \
281 BASIC_REG_TYPE(Z_32) /* wzr */ \
282 BASIC_REG_TYPE(Z_64) /* xzr */ \
283 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
284 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
285 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
286 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
287 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
288 BASIC_REG_TYPE(CN) /* c[0-7] */ \
289 BASIC_REG_TYPE(VN) /* v[0-31] */ \
290 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
291 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
292 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
293 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
295 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
296 /* Typecheck: any [BHSDQ]P FP. */ \
297 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
298 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
299 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
300 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
301 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
302 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
303 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
304 /* Any integer register; used for error messages only. */ \
305 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
306 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
307 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
308 /* Pseudo type to mark the end of the enumerator sequence. */ \
309 BASIC_REG_TYPE(MAX)
310
311 #undef BASIC_REG_TYPE
312 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
313 #undef MULTI_REG_TYPE
314 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
315
316 /* Register type enumerators. */
317 typedef enum
318 {
319 /* A list of REG_TYPE_*. */
320 AARCH64_REG_TYPES
321 } aarch64_reg_type;
322
323 #undef BASIC_REG_TYPE
324 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
325 #undef REG_TYPE
326 #define REG_TYPE(T) (1 << REG_TYPE_##T)
327 #undef MULTI_REG_TYPE
328 #define MULTI_REG_TYPE(T,V) V,
329
330 /* Values indexed by aarch64_reg_type to assist the type checking. */
331 static const unsigned reg_type_masks[] =
332 {
333 AARCH64_REG_TYPES
334 };
335
336 #undef BASIC_REG_TYPE
337 #undef REG_TYPE
338 #undef MULTI_REG_TYPE
339 #undef AARCH64_REG_TYPES
340
341 /* Diagnostics used when we don't get a register of the expected type.
342 Note: this has to synchronized with aarch64_reg_type definitions
343 above. */
344 static const char *
345 get_reg_expected_msg (aarch64_reg_type reg_type)
346 {
347 const char *msg;
348
349 switch (reg_type)
350 {
351 case REG_TYPE_R_32:
352 msg = N_("integer 32-bit register expected");
353 break;
354 case REG_TYPE_R_64:
355 msg = N_("integer 64-bit register expected");
356 break;
357 case REG_TYPE_R_N:
358 msg = N_("integer register expected");
359 break;
360 case REG_TYPE_R_Z_SP:
361 msg = N_("integer, zero or SP register expected");
362 break;
363 case REG_TYPE_FP_B:
364 msg = N_("8-bit SIMD scalar register expected");
365 break;
366 case REG_TYPE_FP_H:
367 msg = N_("16-bit SIMD scalar or floating-point half precision "
368 "register expected");
369 break;
370 case REG_TYPE_FP_S:
371 msg = N_("32-bit SIMD scalar or floating-point single precision "
372 "register expected");
373 break;
374 case REG_TYPE_FP_D:
375 msg = N_("64-bit SIMD scalar or floating-point double precision "
376 "register expected");
377 break;
378 case REG_TYPE_FP_Q:
379 msg = N_("128-bit SIMD scalar or floating-point quad precision "
380 "register expected");
381 break;
382 case REG_TYPE_CN:
383 msg = N_("C0 - C15 expected");
384 break;
385 case REG_TYPE_R_Z_BHSDQ_V:
386 msg = N_("register expected");
387 break;
388 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
389 msg = N_("SIMD scalar or floating-point register expected");
390 break;
391 case REG_TYPE_VN: /* any V reg */
392 msg = N_("vector register expected");
393 break;
394 default:
395 as_fatal (_("invalid register type %d"), reg_type);
396 }
397 return msg;
398 }
399
400 /* Some well known registers that we refer to directly elsewhere. */
401 #define REG_SP 31
402
403 /* Instructions take 4 bytes in the object file. */
404 #define INSN_SIZE 4
405
406 /* Define some common error messages. */
407 #define BAD_SP _("SP not allowed here")
408
409 static struct hash_control *aarch64_ops_hsh;
410 static struct hash_control *aarch64_cond_hsh;
411 static struct hash_control *aarch64_shift_hsh;
412 static struct hash_control *aarch64_sys_regs_hsh;
413 static struct hash_control *aarch64_pstatefield_hsh;
414 static struct hash_control *aarch64_sys_regs_ic_hsh;
415 static struct hash_control *aarch64_sys_regs_dc_hsh;
416 static struct hash_control *aarch64_sys_regs_at_hsh;
417 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
418 static struct hash_control *aarch64_reg_hsh;
419 static struct hash_control *aarch64_barrier_opt_hsh;
420 static struct hash_control *aarch64_nzcv_hsh;
421 static struct hash_control *aarch64_pldop_hsh;
422
423 /* Stuff needed to resolve the label ambiguity
424 As:
425 ...
426 label: <insn>
427 may differ from:
428 ...
429 label:
430 <insn> */
431
432 static symbolS *last_label_seen;
433
434 /* Literal pool structure. Held on a per-section
435 and per-sub-section basis. */
436
437 #define MAX_LITERAL_POOL_SIZE 1024
438 typedef struct literal_expression
439 {
440 expressionS exp;
441 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
442 LITTLENUM_TYPE * bignum;
443 } literal_expression;
444
445 typedef struct literal_pool
446 {
447 literal_expression literals[MAX_LITERAL_POOL_SIZE];
448 unsigned int next_free_entry;
449 unsigned int id;
450 symbolS *symbol;
451 segT section;
452 subsegT sub_section;
453 int size;
454 struct literal_pool *next;
455 } literal_pool;
456
457 /* Pointer to a linked list of literal pools. */
458 static literal_pool *list_of_pools = NULL;
459 \f
460 /* Pure syntax. */
461
462 /* This array holds the chars that always start a comment. If the
463 pre-processor is disabled, these aren't very useful. */
464 const char comment_chars[] = "";
465
466 /* This array holds the chars that only start a comment at the beginning of
467 a line. If the line seems to have the form '# 123 filename'
468 .line and .file directives will appear in the pre-processed output. */
469 /* Note that input_file.c hand checks for '#' at the beginning of the
470 first line of the input file. This is because the compiler outputs
471 #NO_APP at the beginning of its output. */
472 /* Also note that comments like this one will always work. */
473 const char line_comment_chars[] = "#";
474
475 const char line_separator_chars[] = ";";
476
477 /* Chars that can be used to separate mant
478 from exp in floating point numbers. */
479 const char EXP_CHARS[] = "eE";
480
481 /* Chars that mean this number is a floating point constant. */
482 /* As in 0f12.456 */
483 /* or 0d1.2345e12 */
484
485 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
486
487 /* Prefix character that indicates the start of an immediate value. */
488 #define is_immediate_prefix(C) ((C) == '#')
489
490 /* Separator character handling. */
491
492 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
493
494 static inline bfd_boolean
495 skip_past_char (char **str, char c)
496 {
497 if (**str == c)
498 {
499 (*str)++;
500 return TRUE;
501 }
502 else
503 return FALSE;
504 }
505
506 #define skip_past_comma(str) skip_past_char (str, ',')
507
508 /* Arithmetic expressions (possibly involving symbols). */
509
510 static bfd_boolean in_my_get_expression_p = FALSE;
511
512 /* Third argument to my_get_expression. */
513 #define GE_NO_PREFIX 0
514 #define GE_OPT_PREFIX 1
515
516 /* Return TRUE if the string pointed by *STR is successfully parsed
517 as an valid expression; *EP will be filled with the information of
518 such an expression. Otherwise return FALSE. */
519
520 static bfd_boolean
521 my_get_expression (expressionS * ep, char **str, int prefix_mode,
522 int reject_absent)
523 {
524 char *save_in;
525 segT seg;
526 int prefix_present_p = 0;
527
528 switch (prefix_mode)
529 {
530 case GE_NO_PREFIX:
531 break;
532 case GE_OPT_PREFIX:
533 if (is_immediate_prefix (**str))
534 {
535 (*str)++;
536 prefix_present_p = 1;
537 }
538 break;
539 default:
540 abort ();
541 }
542
543 memset (ep, 0, sizeof (expressionS));
544
545 save_in = input_line_pointer;
546 input_line_pointer = *str;
547 in_my_get_expression_p = TRUE;
548 seg = expression (ep);
549 in_my_get_expression_p = FALSE;
550
551 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
552 {
553 /* We found a bad expression in md_operand(). */
554 *str = input_line_pointer;
555 input_line_pointer = save_in;
556 if (prefix_present_p && ! error_p ())
557 set_fatal_syntax_error (_("bad expression"));
558 else
559 set_first_syntax_error (_("bad expression"));
560 return FALSE;
561 }
562
563 #ifdef OBJ_AOUT
564 if (seg != absolute_section
565 && seg != text_section
566 && seg != data_section
567 && seg != bss_section && seg != undefined_section)
568 {
569 set_syntax_error (_("bad segment"));
570 *str = input_line_pointer;
571 input_line_pointer = save_in;
572 return FALSE;
573 }
574 #else
575 (void) seg;
576 #endif
577
578 *str = input_line_pointer;
579 input_line_pointer = save_in;
580 return TRUE;
581 }
582
583 /* Turn a string in input_line_pointer into a floating point constant
584 of type TYPE, and store the appropriate bytes in *LITP. The number
585 of LITTLENUMS emitted is stored in *SIZEP. An error message is
586 returned, or NULL on OK. */
587
588 char *
589 md_atof (int type, char *litP, int *sizeP)
590 {
591 return ieee_md_atof (type, litP, sizeP, target_big_endian);
592 }
593
594 /* We handle all bad expressions here, so that we can report the faulty
595 instruction in the error message. */
596 void
597 md_operand (expressionS * exp)
598 {
599 if (in_my_get_expression_p)
600 exp->X_op = O_illegal;
601 }
602
603 /* Immediate values. */
604
605 /* Errors may be set multiple times during parsing or bit encoding
606 (particularly in the Neon bits), but usually the earliest error which is set
607 will be the most meaningful. Avoid overwriting it with later (cascading)
608 errors by calling this function. */
609
610 static void
611 first_error (const char *error)
612 {
613 if (! error_p ())
614 set_syntax_error (error);
615 }
616
617 /* Similiar to first_error, but this function accepts formatted error
618 message. */
619 static void
620 first_error_fmt (const char *format, ...)
621 {
622 va_list args;
623 enum
624 { size = 100 };
625 /* N.B. this single buffer will not cause error messages for different
626 instructions to pollute each other; this is because at the end of
627 processing of each assembly line, error message if any will be
628 collected by as_bad. */
629 static char buffer[size];
630
631 if (! error_p ())
632 {
633 int ret ATTRIBUTE_UNUSED;
634 va_start (args, format);
635 ret = vsnprintf (buffer, size, format, args);
636 know (ret <= size - 1 && ret >= 0);
637 va_end (args);
638 set_syntax_error (buffer);
639 }
640 }
641
642 /* Register parsing. */
643
644 /* Generic register parser which is called by other specialized
645 register parsers.
646 CCP points to what should be the beginning of a register name.
647 If it is indeed a valid register name, advance CCP over it and
648 return the reg_entry structure; otherwise return NULL.
649 It does not issue diagnostics. */
650
651 static reg_entry *
652 parse_reg (char **ccp)
653 {
654 char *start = *ccp;
655 char *p;
656 reg_entry *reg;
657
658 #ifdef REGISTER_PREFIX
659 if (*start != REGISTER_PREFIX)
660 return NULL;
661 start++;
662 #endif
663
664 p = start;
665 if (!ISALPHA (*p) || !is_name_beginner (*p))
666 return NULL;
667
668 do
669 p++;
670 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
671
672 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
673
674 if (!reg)
675 return NULL;
676
677 *ccp = p;
678 return reg;
679 }
680
681 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
682 return FALSE. */
683 static bfd_boolean
684 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
685 {
686 if (reg->type == type)
687 return TRUE;
688
689 switch (type)
690 {
691 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
692 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
693 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
694 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
695 case REG_TYPE_VN: /* Vector register. */
696 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
697 return ((reg_type_masks[reg->type] & reg_type_masks[type])
698 == reg_type_masks[reg->type]);
699 default:
700 as_fatal ("unhandled type %d", type);
701 abort ();
702 }
703 }
704
705 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
706 Return the register number otherwise. *ISREG32 is set to one if the
707 register is 32-bit wide; *ISREGZERO is set to one if the register is
708 of type Z_32 or Z_64.
709 Note that this function does not issue any diagnostics. */
710
711 static int
712 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
713 int *isreg32, int *isregzero)
714 {
715 char *str = *ccp;
716 const reg_entry *reg = parse_reg (&str);
717
718 if (reg == NULL)
719 return PARSE_FAIL;
720
721 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
722 return PARSE_FAIL;
723
724 switch (reg->type)
725 {
726 case REG_TYPE_SP_32:
727 case REG_TYPE_SP_64:
728 if (reject_sp)
729 return PARSE_FAIL;
730 *isreg32 = reg->type == REG_TYPE_SP_32;
731 *isregzero = 0;
732 break;
733 case REG_TYPE_R_32:
734 case REG_TYPE_R_64:
735 *isreg32 = reg->type == REG_TYPE_R_32;
736 *isregzero = 0;
737 break;
738 case REG_TYPE_Z_32:
739 case REG_TYPE_Z_64:
740 if (reject_rz)
741 return PARSE_FAIL;
742 *isreg32 = reg->type == REG_TYPE_Z_32;
743 *isregzero = 1;
744 break;
745 default:
746 return PARSE_FAIL;
747 }
748
749 *ccp = str;
750
751 return reg->number;
752 }
753
754 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
755 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
756 otherwise return FALSE.
757
758 Accept only one occurrence of:
759 8b 16b 4h 8h 2s 4s 1d 2d
760 b h s d q */
761 static bfd_boolean
762 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
763 {
764 char *ptr = *str;
765 unsigned width;
766 unsigned element_size;
767 enum neon_el_type type;
768
769 /* skip '.' */
770 ptr++;
771
772 if (!ISDIGIT (*ptr))
773 {
774 width = 0;
775 goto elt_size;
776 }
777 width = strtoul (ptr, &ptr, 10);
778 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
779 {
780 first_error_fmt (_("bad size %d in vector width specifier"), width);
781 return FALSE;
782 }
783
784 elt_size:
785 switch (TOLOWER (*ptr))
786 {
787 case 'b':
788 type = NT_b;
789 element_size = 8;
790 break;
791 case 'h':
792 type = NT_h;
793 element_size = 16;
794 break;
795 case 's':
796 type = NT_s;
797 element_size = 32;
798 break;
799 case 'd':
800 type = NT_d;
801 element_size = 64;
802 break;
803 case 'q':
804 if (width == 1)
805 {
806 type = NT_q;
807 element_size = 128;
808 break;
809 }
810 /* fall through. */
811 default:
812 if (*ptr != '\0')
813 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
814 else
815 first_error (_("missing element size"));
816 return FALSE;
817 }
818 if (width != 0 && width * element_size != 64 && width * element_size != 128)
819 {
820 first_error_fmt (_
821 ("invalid element size %d and vector size combination %c"),
822 width, *ptr);
823 return FALSE;
824 }
825 ptr++;
826
827 parsed_type->type = type;
828 parsed_type->width = width;
829
830 *str = ptr;
831
832 return TRUE;
833 }
834
835 /* Parse a single type, e.g. ".8b", leading period included.
836 Only applicable to Vn registers.
837
838 Return TRUE on success; otherwise return FALSE. */
839 static bfd_boolean
840 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
841 {
842 char *str = *ccp;
843
844 if (*str == '.')
845 {
846 if (! parse_neon_type_for_operand (vectype, &str))
847 {
848 first_error (_("vector type expected"));
849 return FALSE;
850 }
851 }
852 else
853 return FALSE;
854
855 *ccp = str;
856
857 return TRUE;
858 }
859
860 /* Parse a register of the type TYPE.
861
862 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
863 name or the parsed register is not of TYPE.
864
865 Otherwise return the register number, and optionally fill in the actual
866 type of the register in *RTYPE when multiple alternatives were given, and
867 return the register shape and element index information in *TYPEINFO.
868
869 IN_REG_LIST should be set with TRUE if the caller is parsing a register
870 list. */
871
872 static int
873 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
874 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
875 {
876 char *str = *ccp;
877 const reg_entry *reg = parse_reg (&str);
878 struct neon_type_el atype;
879 struct neon_type_el parsetype;
880 bfd_boolean is_typed_vecreg = FALSE;
881
882 atype.defined = 0;
883 atype.type = NT_invtype;
884 atype.width = -1;
885 atype.index = 0;
886
887 if (reg == NULL)
888 {
889 if (typeinfo)
890 *typeinfo = atype;
891 set_default_error ();
892 return PARSE_FAIL;
893 }
894
895 if (! aarch64_check_reg_type (reg, type))
896 {
897 DEBUG_TRACE ("reg type check failed");
898 set_default_error ();
899 return PARSE_FAIL;
900 }
901 type = reg->type;
902
903 if (type == REG_TYPE_VN
904 && parse_neon_operand_type (&parsetype, &str))
905 {
906 /* Register if of the form Vn.[bhsdq]. */
907 is_typed_vecreg = TRUE;
908
909 if (parsetype.width == 0)
910 /* Expect index. In the new scheme we cannot have
911 Vn.[bhsdq] represent a scalar. Therefore any
912 Vn.[bhsdq] should have an index following it.
913 Except in reglists ofcourse. */
914 atype.defined |= NTA_HASINDEX;
915 else
916 atype.defined |= NTA_HASTYPE;
917
918 atype.type = parsetype.type;
919 atype.width = parsetype.width;
920 }
921
922 if (skip_past_char (&str, '['))
923 {
924 expressionS exp;
925
926 /* Reject Sn[index] syntax. */
927 if (!is_typed_vecreg)
928 {
929 first_error (_("this type of register can't be indexed"));
930 return PARSE_FAIL;
931 }
932
933 if (in_reg_list == TRUE)
934 {
935 first_error (_("index not allowed inside register list"));
936 return PARSE_FAIL;
937 }
938
939 atype.defined |= NTA_HASINDEX;
940
941 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
942
943 if (exp.X_op != O_constant)
944 {
945 first_error (_("constant expression required"));
946 return PARSE_FAIL;
947 }
948
949 if (! skip_past_char (&str, ']'))
950 return PARSE_FAIL;
951
952 atype.index = exp.X_add_number;
953 }
954 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
955 {
956 /* Indexed vector register expected. */
957 first_error (_("indexed vector register expected"));
958 return PARSE_FAIL;
959 }
960
961 /* A vector reg Vn should be typed or indexed. */
962 if (type == REG_TYPE_VN && atype.defined == 0)
963 {
964 first_error (_("invalid use of vector register"));
965 }
966
967 if (typeinfo)
968 *typeinfo = atype;
969
970 if (rtype)
971 *rtype = type;
972
973 *ccp = str;
974
975 return reg->number;
976 }
977
978 /* Parse register.
979
980 Return the register number on success; return PARSE_FAIL otherwise.
981
982 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
983 the register (e.g. NEON double or quad reg when either has been requested).
984
985 If this is a NEON vector register with additional type information, fill
986 in the struct pointed to by VECTYPE (if non-NULL).
987
988 This parser does not handle register list. */
989
990 static int
991 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
992 aarch64_reg_type *rtype, struct neon_type_el *vectype)
993 {
994 struct neon_type_el atype;
995 char *str = *ccp;
996 int reg = parse_typed_reg (&str, type, rtype, &atype,
997 /*in_reg_list= */ FALSE);
998
999 if (reg == PARSE_FAIL)
1000 return PARSE_FAIL;
1001
1002 if (vectype)
1003 *vectype = atype;
1004
1005 *ccp = str;
1006
1007 return reg;
1008 }
1009
1010 static inline bfd_boolean
1011 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
1012 {
1013 return
1014 e1.type == e2.type
1015 && e1.defined == e2.defined
1016 && e1.width == e2.width && e1.index == e2.index;
1017 }
1018
1019 /* This function parses the NEON register list. On success, it returns
1020 the parsed register list information in the following encoded format:
1021
1022 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1023 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1024
1025 The information of the register shape and/or index is returned in
1026 *VECTYPE.
1027
1028 It returns PARSE_FAIL if the register list is invalid.
1029
1030 The list contains one to four registers.
1031 Each register can be one of:
1032 <Vt>.<T>[<index>]
1033 <Vt>.<T>
1034 All <T> should be identical.
1035 All <index> should be identical.
1036 There are restrictions on <Vt> numbers which are checked later
1037 (by reg_list_valid_p). */
1038
1039 static int
1040 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1041 {
1042 char *str = *ccp;
1043 int nb_regs;
1044 struct neon_type_el typeinfo, typeinfo_first;
1045 int val, val_range;
1046 int in_range;
1047 int ret_val;
1048 int i;
1049 bfd_boolean error = FALSE;
1050 bfd_boolean expect_index = FALSE;
1051
1052 if (*str != '{')
1053 {
1054 set_syntax_error (_("expecting {"));
1055 return PARSE_FAIL;
1056 }
1057 str++;
1058
1059 nb_regs = 0;
1060 typeinfo_first.defined = 0;
1061 typeinfo_first.type = NT_invtype;
1062 typeinfo_first.width = -1;
1063 typeinfo_first.index = 0;
1064 ret_val = 0;
1065 val = -1;
1066 val_range = -1;
1067 in_range = 0;
1068 do
1069 {
1070 if (in_range)
1071 {
1072 str++; /* skip over '-' */
1073 val_range = val;
1074 }
1075 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1076 /*in_reg_list= */ TRUE);
1077 if (val == PARSE_FAIL)
1078 {
1079 set_first_syntax_error (_("invalid vector register in list"));
1080 error = TRUE;
1081 continue;
1082 }
1083 /* reject [bhsd]n */
1084 if (typeinfo.defined == 0)
1085 {
1086 set_first_syntax_error (_("invalid scalar register in list"));
1087 error = TRUE;
1088 continue;
1089 }
1090
1091 if (typeinfo.defined & NTA_HASINDEX)
1092 expect_index = TRUE;
1093
1094 if (in_range)
1095 {
1096 if (val < val_range)
1097 {
1098 set_first_syntax_error
1099 (_("invalid range in vector register list"));
1100 error = TRUE;
1101 }
1102 val_range++;
1103 }
1104 else
1105 {
1106 val_range = val;
1107 if (nb_regs == 0)
1108 typeinfo_first = typeinfo;
1109 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1110 {
1111 set_first_syntax_error
1112 (_("type mismatch in vector register list"));
1113 error = TRUE;
1114 }
1115 }
1116 if (! error)
1117 for (i = val_range; i <= val; i++)
1118 {
1119 ret_val |= i << (5 * nb_regs);
1120 nb_regs++;
1121 }
1122 in_range = 0;
1123 }
1124 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1125
1126 skip_whitespace (str);
1127 if (*str != '}')
1128 {
1129 set_first_syntax_error (_("end of vector register list not found"));
1130 error = TRUE;
1131 }
1132 str++;
1133
1134 skip_whitespace (str);
1135
1136 if (expect_index)
1137 {
1138 if (skip_past_char (&str, '['))
1139 {
1140 expressionS exp;
1141
1142 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1143 if (exp.X_op != O_constant)
1144 {
1145 set_first_syntax_error (_("constant expression required."));
1146 error = TRUE;
1147 }
1148 if (! skip_past_char (&str, ']'))
1149 error = TRUE;
1150 else
1151 typeinfo_first.index = exp.X_add_number;
1152 }
1153 else
1154 {
1155 set_first_syntax_error (_("expected index"));
1156 error = TRUE;
1157 }
1158 }
1159
1160 if (nb_regs > 4)
1161 {
1162 set_first_syntax_error (_("too many registers in vector register list"));
1163 error = TRUE;
1164 }
1165 else if (nb_regs == 0)
1166 {
1167 set_first_syntax_error (_("empty vector register list"));
1168 error = TRUE;
1169 }
1170
1171 *ccp = str;
1172 if (! error)
1173 *vectype = typeinfo_first;
1174
1175 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1176 }
1177
1178 /* Directives: register aliases. */
1179
1180 static reg_entry *
1181 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1182 {
1183 reg_entry *new;
1184 const char *name;
1185
1186 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1187 {
1188 if (new->builtin)
1189 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1190 str);
1191
1192 /* Only warn about a redefinition if it's not defined as the
1193 same register. */
1194 else if (new->number != number || new->type != type)
1195 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1196
1197 return NULL;
1198 }
1199
1200 name = xstrdup (str);
1201 new = xmalloc (sizeof (reg_entry));
1202
1203 new->name = name;
1204 new->number = number;
1205 new->type = type;
1206 new->builtin = FALSE;
1207
1208 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1209 abort ();
1210
1211 return new;
1212 }
1213
1214 /* Look for the .req directive. This is of the form:
1215
1216 new_register_name .req existing_register_name
1217
1218 If we find one, or if it looks sufficiently like one that we want to
1219 handle any error here, return TRUE. Otherwise return FALSE. */
1220
1221 static bfd_boolean
1222 create_register_alias (char *newname, char *p)
1223 {
1224 const reg_entry *old;
1225 char *oldname, *nbuf;
1226 size_t nlen;
1227
1228 /* The input scrubber ensures that whitespace after the mnemonic is
1229 collapsed to single spaces. */
1230 oldname = p;
1231 if (strncmp (oldname, " .req ", 6) != 0)
1232 return FALSE;
1233
1234 oldname += 6;
1235 if (*oldname == '\0')
1236 return FALSE;
1237
1238 old = hash_find (aarch64_reg_hsh, oldname);
1239 if (!old)
1240 {
1241 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1242 return TRUE;
1243 }
1244
1245 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1246 the desired alias name, and p points to its end. If not, then
1247 the desired alias name is in the global original_case_string. */
1248 #ifdef TC_CASE_SENSITIVE
1249 nlen = p - newname;
1250 #else
1251 newname = original_case_string;
1252 nlen = strlen (newname);
1253 #endif
1254
1255 nbuf = alloca (nlen + 1);
1256 memcpy (nbuf, newname, nlen);
1257 nbuf[nlen] = '\0';
1258
1259 /* Create aliases under the new name as stated; an all-lowercase
1260 version of the new name; and an all-uppercase version of the new
1261 name. */
1262 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1263 {
1264 for (p = nbuf; *p; p++)
1265 *p = TOUPPER (*p);
1266
1267 if (strncmp (nbuf, newname, nlen))
1268 {
1269 /* If this attempt to create an additional alias fails, do not bother
1270 trying to create the all-lower case alias. We will fail and issue
1271 a second, duplicate error message. This situation arises when the
1272 programmer does something like:
1273 foo .req r0
1274 Foo .req r1
1275 The second .req creates the "Foo" alias but then fails to create
1276 the artificial FOO alias because it has already been created by the
1277 first .req. */
1278 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1279 return TRUE;
1280 }
1281
1282 for (p = nbuf; *p; p++)
1283 *p = TOLOWER (*p);
1284
1285 if (strncmp (nbuf, newname, nlen))
1286 insert_reg_alias (nbuf, old->number, old->type);
1287 }
1288
1289 return TRUE;
1290 }
1291
1292 /* Should never be called, as .req goes between the alias and the
1293 register name, not at the beginning of the line. */
1294 static void
1295 s_req (int a ATTRIBUTE_UNUSED)
1296 {
1297 as_bad (_("invalid syntax for .req directive"));
1298 }
1299
1300 /* The .unreq directive deletes an alias which was previously defined
1301 by .req. For example:
1302
1303 my_alias .req r11
1304 .unreq my_alias */
1305
1306 static void
1307 s_unreq (int a ATTRIBUTE_UNUSED)
1308 {
1309 char *name;
1310 char saved_char;
1311
1312 name = input_line_pointer;
1313
1314 while (*input_line_pointer != 0
1315 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1316 ++input_line_pointer;
1317
1318 saved_char = *input_line_pointer;
1319 *input_line_pointer = 0;
1320
1321 if (!*name)
1322 as_bad (_("invalid syntax for .unreq directive"));
1323 else
1324 {
1325 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1326
1327 if (!reg)
1328 as_bad (_("unknown register alias '%s'"), name);
1329 else if (reg->builtin)
1330 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1331 name);
1332 else
1333 {
1334 char *p;
1335 char *nbuf;
1336
1337 hash_delete (aarch64_reg_hsh, name, FALSE);
1338 free ((char *) reg->name);
1339 free (reg);
1340
1341 /* Also locate the all upper case and all lower case versions.
1342 Do not complain if we cannot find one or the other as it
1343 was probably deleted above. */
1344
1345 nbuf = strdup (name);
1346 for (p = nbuf; *p; p++)
1347 *p = TOUPPER (*p);
1348 reg = hash_find (aarch64_reg_hsh, nbuf);
1349 if (reg)
1350 {
1351 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1352 free ((char *) reg->name);
1353 free (reg);
1354 }
1355
1356 for (p = nbuf; *p; p++)
1357 *p = TOLOWER (*p);
1358 reg = hash_find (aarch64_reg_hsh, nbuf);
1359 if (reg)
1360 {
1361 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1362 free ((char *) reg->name);
1363 free (reg);
1364 }
1365
1366 free (nbuf);
1367 }
1368 }
1369
1370 *input_line_pointer = saved_char;
1371 demand_empty_rest_of_line ();
1372 }
1373
1374 /* Directives: Instruction set selection. */
1375
1376 #ifdef OBJ_ELF
1377 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1378 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1379 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1380 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1381
1382 /* Create a new mapping symbol for the transition to STATE. */
1383
1384 static void
1385 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1386 {
1387 symbolS *symbolP;
1388 const char *symname;
1389 int type;
1390
1391 switch (state)
1392 {
1393 case MAP_DATA:
1394 symname = "$d";
1395 type = BSF_NO_FLAGS;
1396 break;
1397 case MAP_INSN:
1398 symname = "$x";
1399 type = BSF_NO_FLAGS;
1400 break;
1401 default:
1402 abort ();
1403 }
1404
1405 symbolP = symbol_new (symname, now_seg, value, frag);
1406 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1407
1408 /* Save the mapping symbols for future reference. Also check that
1409 we do not place two mapping symbols at the same offset within a
1410 frag. We'll handle overlap between frags in
1411 check_mapping_symbols.
1412
1413 If .fill or other data filling directive generates zero sized data,
1414 the mapping symbol for the following code will have the same value
1415 as the one generated for the data filling directive. In this case,
1416 we replace the old symbol with the new one at the same address. */
1417 if (value == 0)
1418 {
1419 if (frag->tc_frag_data.first_map != NULL)
1420 {
1421 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1422 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1423 &symbol_lastP);
1424 }
1425 frag->tc_frag_data.first_map = symbolP;
1426 }
1427 if (frag->tc_frag_data.last_map != NULL)
1428 {
1429 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1430 S_GET_VALUE (symbolP));
1431 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1432 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1433 &symbol_lastP);
1434 }
1435 frag->tc_frag_data.last_map = symbolP;
1436 }
1437
1438 /* We must sometimes convert a region marked as code to data during
1439 code alignment, if an odd number of bytes have to be padded. The
1440 code mapping symbol is pushed to an aligned address. */
1441
1442 static void
1443 insert_data_mapping_symbol (enum mstate state,
1444 valueT value, fragS * frag, offsetT bytes)
1445 {
1446 /* If there was already a mapping symbol, remove it. */
1447 if (frag->tc_frag_data.last_map != NULL
1448 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1449 frag->fr_address + value)
1450 {
1451 symbolS *symp = frag->tc_frag_data.last_map;
1452
1453 if (value == 0)
1454 {
1455 know (frag->tc_frag_data.first_map == symp);
1456 frag->tc_frag_data.first_map = NULL;
1457 }
1458 frag->tc_frag_data.last_map = NULL;
1459 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1460 }
1461
1462 make_mapping_symbol (MAP_DATA, value, frag);
1463 make_mapping_symbol (state, value + bytes, frag);
1464 }
1465
1466 static void mapping_state_2 (enum mstate state, int max_chars);
1467
1468 /* Set the mapping state to STATE. Only call this when about to
1469 emit some STATE bytes to the file. */
1470
1471 void
1472 mapping_state (enum mstate state)
1473 {
1474 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1475
1476 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1477
1478 if (mapstate == state)
1479 /* The mapping symbol has already been emitted.
1480 There is nothing else to do. */
1481 return;
1482 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1483 /* This case will be evaluated later in the next else. */
1484 return;
1485 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1486 {
1487 /* Only add the symbol if the offset is > 0:
1488 if we're at the first frag, check it's size > 0;
1489 if we're not at the first frag, then for sure
1490 the offset is > 0. */
1491 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1492 const int add_symbol = (frag_now != frag_first)
1493 || (frag_now_fix () > 0);
1494
1495 if (add_symbol)
1496 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1497 }
1498
1499 mapping_state_2 (state, 0);
1500 #undef TRANSITION
1501 }
1502
1503 /* Same as mapping_state, but MAX_CHARS bytes have already been
1504 allocated. Put the mapping symbol that far back. */
1505
1506 static void
1507 mapping_state_2 (enum mstate state, int max_chars)
1508 {
1509 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1510
1511 if (!SEG_NORMAL (now_seg))
1512 return;
1513
1514 if (mapstate == state)
1515 /* The mapping symbol has already been emitted.
1516 There is nothing else to do. */
1517 return;
1518
1519 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1520 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1521 }
1522 #else
1523 #define mapping_state(x) /* nothing */
1524 #define mapping_state_2(x, y) /* nothing */
1525 #endif
1526
1527 /* Directives: sectioning and alignment. */
1528
1529 static void
1530 s_bss (int ignore ATTRIBUTE_UNUSED)
1531 {
1532 /* We don't support putting frags in the BSS segment, we fake it by
1533 marking in_bss, then looking at s_skip for clues. */
1534 subseg_set (bss_section, 0);
1535 demand_empty_rest_of_line ();
1536 mapping_state (MAP_DATA);
1537 }
1538
1539 static void
1540 s_even (int ignore ATTRIBUTE_UNUSED)
1541 {
1542 /* Never make frag if expect extra pass. */
1543 if (!need_pass_2)
1544 frag_align (1, 0, 0);
1545
1546 record_alignment (now_seg, 1);
1547
1548 demand_empty_rest_of_line ();
1549 }
1550
1551 /* Directives: Literal pools. */
1552
1553 static literal_pool *
1554 find_literal_pool (int size)
1555 {
1556 literal_pool *pool;
1557
1558 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1559 {
1560 if (pool->section == now_seg
1561 && pool->sub_section == now_subseg && pool->size == size)
1562 break;
1563 }
1564
1565 return pool;
1566 }
1567
1568 static literal_pool *
1569 find_or_make_literal_pool (int size)
1570 {
1571 /* Next literal pool ID number. */
1572 static unsigned int latest_pool_num = 1;
1573 literal_pool *pool;
1574
1575 pool = find_literal_pool (size);
1576
1577 if (pool == NULL)
1578 {
1579 /* Create a new pool. */
1580 pool = xmalloc (sizeof (*pool));
1581 if (!pool)
1582 return NULL;
1583
1584 /* Currently we always put the literal pool in the current text
1585 section. If we were generating "small" model code where we
1586 knew that all code and initialised data was within 1MB then
1587 we could output literals to mergeable, read-only data
1588 sections. */
1589
1590 pool->next_free_entry = 0;
1591 pool->section = now_seg;
1592 pool->sub_section = now_subseg;
1593 pool->size = size;
1594 pool->next = list_of_pools;
1595 pool->symbol = NULL;
1596
1597 /* Add it to the list. */
1598 list_of_pools = pool;
1599 }
1600
1601 /* New pools, and emptied pools, will have a NULL symbol. */
1602 if (pool->symbol == NULL)
1603 {
1604 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1605 (valueT) 0, &zero_address_frag);
1606 pool->id = latest_pool_num++;
1607 }
1608
1609 /* Done. */
1610 return pool;
1611 }
1612
1613 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1614 Return TRUE on success, otherwise return FALSE. */
1615 static bfd_boolean
1616 add_to_lit_pool (expressionS *exp, int size)
1617 {
1618 literal_pool *pool;
1619 unsigned int entry;
1620
1621 pool = find_or_make_literal_pool (size);
1622
1623 /* Check if this literal value is already in the pool. */
1624 for (entry = 0; entry < pool->next_free_entry; entry++)
1625 {
1626 expressionS * litexp = & pool->literals[entry].exp;
1627
1628 if ((litexp->X_op == exp->X_op)
1629 && (exp->X_op == O_constant)
1630 && (litexp->X_add_number == exp->X_add_number)
1631 && (litexp->X_unsigned == exp->X_unsigned))
1632 break;
1633
1634 if ((litexp->X_op == exp->X_op)
1635 && (exp->X_op == O_symbol)
1636 && (litexp->X_add_number == exp->X_add_number)
1637 && (litexp->X_add_symbol == exp->X_add_symbol)
1638 && (litexp->X_op_symbol == exp->X_op_symbol))
1639 break;
1640 }
1641
1642 /* Do we need to create a new entry? */
1643 if (entry == pool->next_free_entry)
1644 {
1645 if (entry >= MAX_LITERAL_POOL_SIZE)
1646 {
1647 set_syntax_error (_("literal pool overflow"));
1648 return FALSE;
1649 }
1650
1651 pool->literals[entry].exp = *exp;
1652 pool->next_free_entry += 1;
1653 if (exp->X_op == O_big)
1654 {
1655 /* PR 16688: Bignums are held in a single global array. We must
1656 copy and preserve that value now, before it is overwritten. */
1657 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1658 memcpy (pool->literals[entry].bignum, generic_bignum,
1659 CHARS_PER_LITTLENUM * exp->X_add_number);
1660 }
1661 else
1662 pool->literals[entry].bignum = NULL;
1663 }
1664
1665 exp->X_op = O_symbol;
1666 exp->X_add_number = ((int) entry) * size;
1667 exp->X_add_symbol = pool->symbol;
1668
1669 return TRUE;
1670 }
1671
1672 /* Can't use symbol_new here, so have to create a symbol and then at
1673 a later date assign it a value. Thats what these functions do. */
1674
1675 static void
1676 symbol_locate (symbolS * symbolP,
1677 const char *name,/* It is copied, the caller can modify. */
1678 segT segment, /* Segment identifier (SEG_<something>). */
1679 valueT valu, /* Symbol value. */
1680 fragS * frag) /* Associated fragment. */
1681 {
1682 unsigned int name_length;
1683 char *preserved_copy_of_name;
1684
1685 name_length = strlen (name) + 1; /* +1 for \0. */
1686 obstack_grow (&notes, name, name_length);
1687 preserved_copy_of_name = obstack_finish (&notes);
1688
1689 #ifdef tc_canonicalize_symbol_name
1690 preserved_copy_of_name =
1691 tc_canonicalize_symbol_name (preserved_copy_of_name);
1692 #endif
1693
1694 S_SET_NAME (symbolP, preserved_copy_of_name);
1695
1696 S_SET_SEGMENT (symbolP, segment);
1697 S_SET_VALUE (symbolP, valu);
1698 symbol_clear_list_pointers (symbolP);
1699
1700 symbol_set_frag (symbolP, frag);
1701
1702 /* Link to end of symbol chain. */
1703 {
1704 extern int symbol_table_frozen;
1705
1706 if (symbol_table_frozen)
1707 abort ();
1708 }
1709
1710 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1711
1712 obj_symbol_new_hook (symbolP);
1713
1714 #ifdef tc_symbol_new_hook
1715 tc_symbol_new_hook (symbolP);
1716 #endif
1717
1718 #ifdef DEBUG_SYMS
1719 verify_symbol_chain (symbol_rootP, symbol_lastP);
1720 #endif /* DEBUG_SYMS */
1721 }
1722
1723
1724 static void
1725 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1726 {
1727 unsigned int entry;
1728 literal_pool *pool;
1729 char sym_name[20];
1730 int align;
1731
1732 for (align = 2; align <= 4; align++)
1733 {
1734 int size = 1 << align;
1735
1736 pool = find_literal_pool (size);
1737 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1738 continue;
1739
1740 mapping_state (MAP_DATA);
1741
1742 /* Align pool as you have word accesses.
1743 Only make a frag if we have to. */
1744 if (!need_pass_2)
1745 frag_align (align, 0, 0);
1746
1747 record_alignment (now_seg, align);
1748
1749 sprintf (sym_name, "$$lit_\002%x", pool->id);
1750
1751 symbol_locate (pool->symbol, sym_name, now_seg,
1752 (valueT) frag_now_fix (), frag_now);
1753 symbol_table_insert (pool->symbol);
1754
1755 for (entry = 0; entry < pool->next_free_entry; entry++)
1756 {
1757 expressionS * exp = & pool->literals[entry].exp;
1758
1759 if (exp->X_op == O_big)
1760 {
1761 /* PR 16688: Restore the global bignum value. */
1762 gas_assert (pool->literals[entry].bignum != NULL);
1763 memcpy (generic_bignum, pool->literals[entry].bignum,
1764 CHARS_PER_LITTLENUM * exp->X_add_number);
1765 }
1766
1767 /* First output the expression in the instruction to the pool. */
1768 emit_expr (exp, size); /* .word|.xword */
1769
1770 if (exp->X_op == O_big)
1771 {
1772 free (pool->literals[entry].bignum);
1773 pool->literals[entry].bignum = NULL;
1774 }
1775 }
1776
1777 /* Mark the pool as empty. */
1778 pool->next_free_entry = 0;
1779 pool->symbol = NULL;
1780 }
1781 }
1782
1783 #ifdef OBJ_ELF
1784 /* Forward declarations for functions below, in the MD interface
1785 section. */
1786 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1787 static struct reloc_table_entry * find_reloc_table_entry (char **);
1788
1789 /* Directives: Data. */
1790 /* N.B. the support for relocation suffix in this directive needs to be
1791 implemented properly. */
1792
1793 static void
1794 s_aarch64_elf_cons (int nbytes)
1795 {
1796 expressionS exp;
1797
1798 #ifdef md_flush_pending_output
1799 md_flush_pending_output ();
1800 #endif
1801
1802 if (is_it_end_of_statement ())
1803 {
1804 demand_empty_rest_of_line ();
1805 return;
1806 }
1807
1808 #ifdef md_cons_align
1809 md_cons_align (nbytes);
1810 #endif
1811
1812 mapping_state (MAP_DATA);
1813 do
1814 {
1815 struct reloc_table_entry *reloc;
1816
1817 expression (&exp);
1818
1819 if (exp.X_op != O_symbol)
1820 emit_expr (&exp, (unsigned int) nbytes);
1821 else
1822 {
1823 skip_past_char (&input_line_pointer, '#');
1824 if (skip_past_char (&input_line_pointer, ':'))
1825 {
1826 reloc = find_reloc_table_entry (&input_line_pointer);
1827 if (reloc == NULL)
1828 as_bad (_("unrecognized relocation suffix"));
1829 else
1830 as_bad (_("unimplemented relocation suffix"));
1831 ignore_rest_of_line ();
1832 return;
1833 }
1834 else
1835 emit_expr (&exp, (unsigned int) nbytes);
1836 }
1837 }
1838 while (*input_line_pointer++ == ',');
1839
1840 /* Put terminator back into stream. */
1841 input_line_pointer--;
1842 demand_empty_rest_of_line ();
1843 }
1844
1845 #endif /* OBJ_ELF */
1846
1847 /* Output a 32-bit word, but mark as an instruction. */
1848
1849 static void
1850 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1851 {
1852 expressionS exp;
1853
1854 #ifdef md_flush_pending_output
1855 md_flush_pending_output ();
1856 #endif
1857
1858 if (is_it_end_of_statement ())
1859 {
1860 demand_empty_rest_of_line ();
1861 return;
1862 }
1863
1864 if (!need_pass_2)
1865 frag_align_code (2, 0);
1866 #ifdef OBJ_ELF
1867 mapping_state (MAP_INSN);
1868 #endif
1869
1870 do
1871 {
1872 expression (&exp);
1873 if (exp.X_op != O_constant)
1874 {
1875 as_bad (_("constant expression required"));
1876 ignore_rest_of_line ();
1877 return;
1878 }
1879
1880 if (target_big_endian)
1881 {
1882 unsigned int val = exp.X_add_number;
1883 exp.X_add_number = SWAP_32 (val);
1884 }
1885 emit_expr (&exp, 4);
1886 }
1887 while (*input_line_pointer++ == ',');
1888
1889 /* Put terminator back into stream. */
1890 input_line_pointer--;
1891 demand_empty_rest_of_line ();
1892 }
1893
1894 #ifdef OBJ_ELF
1895 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1896
1897 static void
1898 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1899 {
1900 expressionS exp;
1901
1902 /* Since we're just labelling the code, there's no need to define a
1903 mapping symbol. */
1904 expression (&exp);
1905 /* Make sure there is enough room in this frag for the following
1906 blr. This trick only works if the blr follows immediately after
1907 the .tlsdesc directive. */
1908 frag_grow (4);
1909 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1910 BFD_RELOC_AARCH64_TLSDESC_CALL);
1911
1912 demand_empty_rest_of_line ();
1913 }
1914 #endif /* OBJ_ELF */
1915
1916 static void s_aarch64_arch (int);
1917 static void s_aarch64_cpu (int);
1918
1919 /* This table describes all the machine specific pseudo-ops the assembler
1920 has to support. The fields are:
1921 pseudo-op name without dot
1922 function to call to execute this pseudo-op
1923 Integer arg to pass to the function. */
1924
1925 const pseudo_typeS md_pseudo_table[] = {
1926 /* Never called because '.req' does not start a line. */
1927 {"req", s_req, 0},
1928 {"unreq", s_unreq, 0},
1929 {"bss", s_bss, 0},
1930 {"even", s_even, 0},
1931 {"ltorg", s_ltorg, 0},
1932 {"pool", s_ltorg, 0},
1933 {"cpu", s_aarch64_cpu, 0},
1934 {"arch", s_aarch64_arch, 0},
1935 {"inst", s_aarch64_inst, 0},
1936 #ifdef OBJ_ELF
1937 {"tlsdesccall", s_tlsdesccall, 0},
1938 {"word", s_aarch64_elf_cons, 4},
1939 {"long", s_aarch64_elf_cons, 4},
1940 {"xword", s_aarch64_elf_cons, 8},
1941 {"dword", s_aarch64_elf_cons, 8},
1942 #endif
1943 {0, 0, 0}
1944 };
1945 \f
1946
1947 /* Check whether STR points to a register name followed by a comma or the
1948 end of line; REG_TYPE indicates which register types are checked
1949 against. Return TRUE if STR is such a register name; otherwise return
1950 FALSE. The function does not intend to produce any diagnostics, but since
1951 the register parser aarch64_reg_parse, which is called by this function,
1952 does produce diagnostics, we call clear_error to clear any diagnostics
1953 that may be generated by aarch64_reg_parse.
1954 Also, the function returns FALSE directly if there is any user error
1955 present at the function entry. This prevents the existing diagnostics
1956 state from being spoiled.
1957 The function currently serves parse_constant_immediate and
1958 parse_big_immediate only. */
1959 static bfd_boolean
1960 reg_name_p (char *str, aarch64_reg_type reg_type)
1961 {
1962 int reg;
1963
1964 /* Prevent the diagnostics state from being spoiled. */
1965 if (error_p ())
1966 return FALSE;
1967
1968 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1969
1970 /* Clear the parsing error that may be set by the reg parser. */
1971 clear_error ();
1972
1973 if (reg == PARSE_FAIL)
1974 return FALSE;
1975
1976 skip_whitespace (str);
1977 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1978 return TRUE;
1979
1980 return FALSE;
1981 }
1982
1983 /* Parser functions used exclusively in instruction operands. */
1984
1985 /* Parse an immediate expression which may not be constant.
1986
1987 To prevent the expression parser from pushing a register name
1988 into the symbol table as an undefined symbol, firstly a check is
1989 done to find out whether STR is a valid register name followed
1990 by a comma or the end of line. Return FALSE if STR is such a
1991 string. */
1992
1993 static bfd_boolean
1994 parse_immediate_expression (char **str, expressionS *exp)
1995 {
1996 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1997 {
1998 set_recoverable_error (_("immediate operand required"));
1999 return FALSE;
2000 }
2001
2002 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2003
2004 if (exp->X_op == O_absent)
2005 {
2006 set_fatal_syntax_error (_("missing immediate expression"));
2007 return FALSE;
2008 }
2009
2010 return TRUE;
2011 }
2012
2013 /* Constant immediate-value read function for use in insn parsing.
2014 STR points to the beginning of the immediate (with the optional
2015 leading #); *VAL receives the value.
2016
2017 Return TRUE on success; otherwise return FALSE. */
2018
2019 static bfd_boolean
2020 parse_constant_immediate (char **str, int64_t * val)
2021 {
2022 expressionS exp;
2023
2024 if (! parse_immediate_expression (str, &exp))
2025 return FALSE;
2026
2027 if (exp.X_op != O_constant)
2028 {
2029 set_syntax_error (_("constant expression required"));
2030 return FALSE;
2031 }
2032
2033 *val = exp.X_add_number;
2034 return TRUE;
2035 }
2036
2037 static uint32_t
2038 encode_imm_float_bits (uint32_t imm)
2039 {
2040 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2041 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2042 }
2043
2044 /* Return TRUE if the single-precision floating-point value encoded in IMM
2045 can be expressed in the AArch64 8-bit signed floating-point format with
2046 3-bit exponent and normalized 4 bits of precision; in other words, the
2047 floating-point value must be expressable as
2048 (+/-) n / 16 * power (2, r)
2049 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2050
2051 static bfd_boolean
2052 aarch64_imm_float_p (uint32_t imm)
2053 {
2054 /* If a single-precision floating-point value has the following bit
2055 pattern, it can be expressed in the AArch64 8-bit floating-point
2056 format:
2057
2058 3 32222222 2221111111111
2059 1 09876543 21098765432109876543210
2060 n Eeeeeexx xxxx0000000000000000000
2061
2062 where n, e and each x are either 0 or 1 independently, with
2063 E == ~ e. */
2064
2065 uint32_t pattern;
2066
2067 /* Prepare the pattern for 'Eeeeee'. */
2068 if (((imm >> 30) & 0x1) == 0)
2069 pattern = 0x3e000000;
2070 else
2071 pattern = 0x40000000;
2072
2073 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2074 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2075 }
2076
2077 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2078
2079 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2080 8-bit signed floating-point format with 3-bit exponent and normalized 4
2081 bits of precision (i.e. can be used in an FMOV instruction); return the
2082 equivalent single-precision encoding in *FPWORD.
2083
2084 Otherwise return FALSE. */
2085
2086 static bfd_boolean
2087 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2088 {
2089 /* If a double-precision floating-point value has the following bit
2090 pattern, it can be expressed in the AArch64 8-bit floating-point
2091 format:
2092
2093 6 66655555555 554444444...21111111111
2094 3 21098765432 109876543...098765432109876543210
2095 n Eeeeeeeeexx xxxx00000...000000000000000000000
2096
2097 where n, e and each x are either 0 or 1 independently, with
2098 E == ~ e. */
2099
2100 uint32_t pattern;
2101 uint32_t high32 = imm >> 32;
2102
2103 /* Lower 32 bits need to be 0s. */
2104 if ((imm & 0xffffffff) != 0)
2105 return FALSE;
2106
2107 /* Prepare the pattern for 'Eeeeeeeee'. */
2108 if (((high32 >> 30) & 0x1) == 0)
2109 pattern = 0x3fc00000;
2110 else
2111 pattern = 0x40000000;
2112
2113 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2114 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2115 {
2116 /* Convert to the single-precision encoding.
2117 i.e. convert
2118 n Eeeeeeeeexx xxxx00000...000000000000000000000
2119 to
2120 n Eeeeeexx xxxx0000000000000000000. */
2121 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2122 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2123 return TRUE;
2124 }
2125 else
2126 return FALSE;
2127 }
2128
2129 /* Parse a floating-point immediate. Return TRUE on success and return the
2130 value in *IMMED in the format of IEEE754 single-precision encoding.
2131 *CCP points to the start of the string; DP_P is TRUE when the immediate
2132 is expected to be in double-precision (N.B. this only matters when
2133 hexadecimal representation is involved).
2134
2135 N.B. 0.0 is accepted by this function. */
2136
2137 static bfd_boolean
2138 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2139 {
2140 char *str = *ccp;
2141 char *fpnum;
2142 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2143 int found_fpchar = 0;
2144 int64_t val = 0;
2145 unsigned fpword = 0;
2146 bfd_boolean hex_p = FALSE;
2147
2148 skip_past_char (&str, '#');
2149
2150 fpnum = str;
2151 skip_whitespace (fpnum);
2152
2153 if (strncmp (fpnum, "0x", 2) == 0)
2154 {
2155 /* Support the hexadecimal representation of the IEEE754 encoding.
2156 Double-precision is expected when DP_P is TRUE, otherwise the
2157 representation should be in single-precision. */
2158 if (! parse_constant_immediate (&str, &val))
2159 goto invalid_fp;
2160
2161 if (dp_p)
2162 {
2163 if (! aarch64_double_precision_fmovable (val, &fpword))
2164 goto invalid_fp;
2165 }
2166 else if ((uint64_t) val > 0xffffffff)
2167 goto invalid_fp;
2168 else
2169 fpword = val;
2170
2171 hex_p = TRUE;
2172 }
2173 else
2174 {
2175 /* We must not accidentally parse an integer as a floating-point number.
2176 Make sure that the value we parse is not an integer by checking for
2177 special characters '.' or 'e'. */
2178 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2179 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2180 {
2181 found_fpchar = 1;
2182 break;
2183 }
2184
2185 if (!found_fpchar)
2186 return FALSE;
2187 }
2188
2189 if (! hex_p)
2190 {
2191 int i;
2192
2193 if ((str = atof_ieee (str, 's', words)) == NULL)
2194 goto invalid_fp;
2195
2196 /* Our FP word must be 32 bits (single-precision FP). */
2197 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2198 {
2199 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2200 fpword |= words[i];
2201 }
2202 }
2203
2204 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2205 {
2206 *immed = fpword;
2207 *ccp = str;
2208 return TRUE;
2209 }
2210
2211 invalid_fp:
2212 set_fatal_syntax_error (_("invalid floating-point constant"));
2213 return FALSE;
2214 }
2215
2216 /* Less-generic immediate-value read function with the possibility of loading
2217 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2218 instructions.
2219
2220 To prevent the expression parser from pushing a register name into the
2221 symbol table as an undefined symbol, a check is firstly done to find
2222 out whether STR is a valid register name followed by a comma or the end
2223 of line. Return FALSE if STR is such a register. */
2224
2225 static bfd_boolean
2226 parse_big_immediate (char **str, int64_t *imm)
2227 {
2228 char *ptr = *str;
2229
2230 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2231 {
2232 set_syntax_error (_("immediate operand required"));
2233 return FALSE;
2234 }
2235
2236 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2237
2238 if (inst.reloc.exp.X_op == O_constant)
2239 *imm = inst.reloc.exp.X_add_number;
2240
2241 *str = ptr;
2242
2243 return TRUE;
2244 }
2245
2246 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2247 if NEED_LIBOPCODES is non-zero, the fixup will need
2248 assistance from the libopcodes. */
2249
2250 static inline void
2251 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2252 const aarch64_opnd_info *operand,
2253 int need_libopcodes_p)
2254 {
2255 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2256 reloc->opnd = operand->type;
2257 if (need_libopcodes_p)
2258 reloc->need_libopcodes_p = 1;
2259 };
2260
2261 /* Return TRUE if the instruction needs to be fixed up later internally by
2262 the GAS; otherwise return FALSE. */
2263
2264 static inline bfd_boolean
2265 aarch64_gas_internal_fixup_p (void)
2266 {
2267 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2268 }
2269
2270 /* Assign the immediate value to the relavant field in *OPERAND if
2271 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2272 needs an internal fixup in a later stage.
2273 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2274 IMM.VALUE that may get assigned with the constant. */
2275 static inline void
2276 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2277 aarch64_opnd_info *operand,
2278 int addr_off_p,
2279 int need_libopcodes_p,
2280 int skip_p)
2281 {
2282 if (reloc->exp.X_op == O_constant)
2283 {
2284 if (addr_off_p)
2285 operand->addr.offset.imm = reloc->exp.X_add_number;
2286 else
2287 operand->imm.value = reloc->exp.X_add_number;
2288 reloc->type = BFD_RELOC_UNUSED;
2289 }
2290 else
2291 {
2292 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2293 /* Tell libopcodes to ignore this operand or not. This is helpful
2294 when one of the operands needs to be fixed up later but we need
2295 libopcodes to check the other operands. */
2296 operand->skip = skip_p;
2297 }
2298 }
2299
2300 /* Relocation modifiers. Each entry in the table contains the textual
2301 name for the relocation which may be placed before a symbol used as
2302 a load/store offset, or add immediate. It must be surrounded by a
2303 leading and trailing colon, for example:
2304
2305 ldr x0, [x1, #:rello:varsym]
2306 add x0, x1, #:rello:varsym */
2307
2308 struct reloc_table_entry
2309 {
2310 const char *name;
2311 int pc_rel;
2312 bfd_reloc_code_real_type adrp_type;
2313 bfd_reloc_code_real_type movw_type;
2314 bfd_reloc_code_real_type add_type;
2315 bfd_reloc_code_real_type ldst_type;
2316 };
2317
2318 static struct reloc_table_entry reloc_table[] = {
2319 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2320 {"lo12", 0,
2321 0,
2322 0,
2323 BFD_RELOC_AARCH64_ADD_LO12,
2324 BFD_RELOC_AARCH64_LDST_LO12},
2325
2326 /* Higher 21 bits of pc-relative page offset: ADRP */
2327 {"pg_hi21", 1,
2328 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2329 0,
2330 0,
2331 0},
2332
2333 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2334 {"pg_hi21_nc", 1,
2335 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2336 0,
2337 0,
2338 0},
2339
2340 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2341 {"abs_g0", 0,
2342 0,
2343 BFD_RELOC_AARCH64_MOVW_G0,
2344 0,
2345 0},
2346
2347 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2348 {"abs_g0_s", 0,
2349 0,
2350 BFD_RELOC_AARCH64_MOVW_G0_S,
2351 0,
2352 0},
2353
2354 /* Less significant bits 0-15 of address/value: MOVK, no check */
2355 {"abs_g0_nc", 0,
2356 0,
2357 BFD_RELOC_AARCH64_MOVW_G0_NC,
2358 0,
2359 0},
2360
2361 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2362 {"abs_g1", 0,
2363 0,
2364 BFD_RELOC_AARCH64_MOVW_G1,
2365 0,
2366 0},
2367
2368 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2369 {"abs_g1_s", 0,
2370 0,
2371 BFD_RELOC_AARCH64_MOVW_G1_S,
2372 0,
2373 0},
2374
2375 /* Less significant bits 16-31 of address/value: MOVK, no check */
2376 {"abs_g1_nc", 0,
2377 0,
2378 BFD_RELOC_AARCH64_MOVW_G1_NC,
2379 0,
2380 0},
2381
2382 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2383 {"abs_g2", 0,
2384 0,
2385 BFD_RELOC_AARCH64_MOVW_G2,
2386 0,
2387 0},
2388
2389 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2390 {"abs_g2_s", 0,
2391 0,
2392 BFD_RELOC_AARCH64_MOVW_G2_S,
2393 0,
2394 0},
2395
2396 /* Less significant bits 32-47 of address/value: MOVK, no check */
2397 {"abs_g2_nc", 0,
2398 0,
2399 BFD_RELOC_AARCH64_MOVW_G2_NC,
2400 0,
2401 0},
2402
2403 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2404 {"abs_g3", 0,
2405 0,
2406 BFD_RELOC_AARCH64_MOVW_G3,
2407 0,
2408 0},
2409
2410 /* Get to the page containing GOT entry for a symbol. */
2411 {"got", 1,
2412 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2413 0,
2414 0,
2415 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2416
2417 /* 12 bit offset into the page containing GOT entry for that symbol. */
2418 {"got_lo12", 0,
2419 0,
2420 0,
2421 0,
2422 BFD_RELOC_AARCH64_LD_GOT_LO12_NC},
2423
2424 /* Get to the page containing GOT TLS entry for a symbol */
2425 {"tlsgd", 0,
2426 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2427 0,
2428 0,
2429 0},
2430
2431 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2432 {"tlsgd_lo12", 0,
2433 0,
2434 0,
2435 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2436 0},
2437
2438 /* Get to the page containing GOT TLS entry for a symbol */
2439 {"tlsdesc", 0,
2440 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2441 0,
2442 0,
2443 0},
2444
2445 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2446 {"tlsdesc_lo12", 0,
2447 0,
2448 0,
2449 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2450 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC},
2451
2452 /* Get to the page containing GOT TLS entry for a symbol */
2453 {"gottprel", 0,
2454 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2455 0,
2456 0,
2457 0},
2458
2459 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2460 {"gottprel_lo12", 0,
2461 0,
2462 0,
2463 0,
2464 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC},
2465
2466 /* Get tp offset for a symbol. */
2467 {"tprel", 0,
2468 0,
2469 0,
2470 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2471 0},
2472
2473 /* Get tp offset for a symbol. */
2474 {"tprel_lo12", 0,
2475 0,
2476 0,
2477 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2478 0},
2479
2480 /* Get tp offset for a symbol. */
2481 {"tprel_hi12", 0,
2482 0,
2483 0,
2484 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2485 0},
2486
2487 /* Get tp offset for a symbol. */
2488 {"tprel_lo12_nc", 0,
2489 0,
2490 0,
2491 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2492 0},
2493
2494 /* Most significant bits 32-47 of address/value: MOVZ. */
2495 {"tprel_g2", 0,
2496 0,
2497 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2498 0,
2499 0},
2500
2501 /* Most significant bits 16-31 of address/value: MOVZ. */
2502 {"tprel_g1", 0,
2503 0,
2504 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2505 0,
2506 0},
2507
2508 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2509 {"tprel_g1_nc", 0,
2510 0,
2511 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2512 0,
2513 0},
2514
2515 /* Most significant bits 0-15 of address/value: MOVZ. */
2516 {"tprel_g0", 0,
2517 0,
2518 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2519 0,
2520 0},
2521
2522 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2523 {"tprel_g0_nc", 0,
2524 0,
2525 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2526 0,
2527 0},
2528 };
2529
2530 /* Given the address of a pointer pointing to the textual name of a
2531 relocation as may appear in assembler source, attempt to find its
2532 details in reloc_table. The pointer will be updated to the character
2533 after the trailing colon. On failure, NULL will be returned;
2534 otherwise return the reloc_table_entry. */
2535
2536 static struct reloc_table_entry *
2537 find_reloc_table_entry (char **str)
2538 {
2539 unsigned int i;
2540 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2541 {
2542 int length = strlen (reloc_table[i].name);
2543
2544 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2545 && (*str)[length] == ':')
2546 {
2547 *str += (length + 1);
2548 return &reloc_table[i];
2549 }
2550 }
2551
2552 return NULL;
2553 }
2554
2555 /* Mode argument to parse_shift and parser_shifter_operand. */
2556 enum parse_shift_mode
2557 {
2558 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2559 "#imm{,lsl #n}" */
2560 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2561 "#imm" */
2562 SHIFTED_LSL, /* bare "lsl #n" */
2563 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2564 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2565 };
2566
2567 /* Parse a <shift> operator on an AArch64 data processing instruction.
2568 Return TRUE on success; otherwise return FALSE. */
2569 static bfd_boolean
2570 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2571 {
2572 const struct aarch64_name_value_pair *shift_op;
2573 enum aarch64_modifier_kind kind;
2574 expressionS exp;
2575 int exp_has_prefix;
2576 char *s = *str;
2577 char *p = s;
2578
2579 for (p = *str; ISALPHA (*p); p++)
2580 ;
2581
2582 if (p == *str)
2583 {
2584 set_syntax_error (_("shift expression expected"));
2585 return FALSE;
2586 }
2587
2588 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2589
2590 if (shift_op == NULL)
2591 {
2592 set_syntax_error (_("shift operator expected"));
2593 return FALSE;
2594 }
2595
2596 kind = aarch64_get_operand_modifier (shift_op);
2597
2598 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2599 {
2600 set_syntax_error (_("invalid use of 'MSL'"));
2601 return FALSE;
2602 }
2603
2604 switch (mode)
2605 {
2606 case SHIFTED_LOGIC_IMM:
2607 if (aarch64_extend_operator_p (kind) == TRUE)
2608 {
2609 set_syntax_error (_("extending shift is not permitted"));
2610 return FALSE;
2611 }
2612 break;
2613
2614 case SHIFTED_ARITH_IMM:
2615 if (kind == AARCH64_MOD_ROR)
2616 {
2617 set_syntax_error (_("'ROR' shift is not permitted"));
2618 return FALSE;
2619 }
2620 break;
2621
2622 case SHIFTED_LSL:
2623 if (kind != AARCH64_MOD_LSL)
2624 {
2625 set_syntax_error (_("only 'LSL' shift is permitted"));
2626 return FALSE;
2627 }
2628 break;
2629
2630 case SHIFTED_REG_OFFSET:
2631 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2632 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2633 {
2634 set_fatal_syntax_error
2635 (_("invalid shift for the register offset addressing mode"));
2636 return FALSE;
2637 }
2638 break;
2639
2640 case SHIFTED_LSL_MSL:
2641 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2642 {
2643 set_syntax_error (_("invalid shift operator"));
2644 return FALSE;
2645 }
2646 break;
2647
2648 default:
2649 abort ();
2650 }
2651
2652 /* Whitespace can appear here if the next thing is a bare digit. */
2653 skip_whitespace (p);
2654
2655 /* Parse shift amount. */
2656 exp_has_prefix = 0;
2657 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2658 exp.X_op = O_absent;
2659 else
2660 {
2661 if (is_immediate_prefix (*p))
2662 {
2663 p++;
2664 exp_has_prefix = 1;
2665 }
2666 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2667 }
2668 if (exp.X_op == O_absent)
2669 {
2670 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2671 {
2672 set_syntax_error (_("missing shift amount"));
2673 return FALSE;
2674 }
2675 operand->shifter.amount = 0;
2676 }
2677 else if (exp.X_op != O_constant)
2678 {
2679 set_syntax_error (_("constant shift amount required"));
2680 return FALSE;
2681 }
2682 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2683 {
2684 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2685 return FALSE;
2686 }
2687 else
2688 {
2689 operand->shifter.amount = exp.X_add_number;
2690 operand->shifter.amount_present = 1;
2691 }
2692
2693 operand->shifter.operator_present = 1;
2694 operand->shifter.kind = kind;
2695
2696 *str = p;
2697 return TRUE;
2698 }
2699
2700 /* Parse a <shifter_operand> for a data processing instruction:
2701
2702 #<immediate>
2703 #<immediate>, LSL #imm
2704
2705 Validation of immediate operands is deferred to md_apply_fix.
2706
2707 Return TRUE on success; otherwise return FALSE. */
2708
2709 static bfd_boolean
2710 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2711 enum parse_shift_mode mode)
2712 {
2713 char *p;
2714
2715 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2716 return FALSE;
2717
2718 p = *str;
2719
2720 /* Accept an immediate expression. */
2721 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2722 return FALSE;
2723
2724 /* Accept optional LSL for arithmetic immediate values. */
2725 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2726 if (! parse_shift (&p, operand, SHIFTED_LSL))
2727 return FALSE;
2728
2729 /* Not accept any shifter for logical immediate values. */
2730 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2731 && parse_shift (&p, operand, mode))
2732 {
2733 set_syntax_error (_("unexpected shift operator"));
2734 return FALSE;
2735 }
2736
2737 *str = p;
2738 return TRUE;
2739 }
2740
2741 /* Parse a <shifter_operand> for a data processing instruction:
2742
2743 <Rm>
2744 <Rm>, <shift>
2745 #<immediate>
2746 #<immediate>, LSL #imm
2747
2748 where <shift> is handled by parse_shift above, and the last two
2749 cases are handled by the function above.
2750
2751 Validation of immediate operands is deferred to md_apply_fix.
2752
2753 Return TRUE on success; otherwise return FALSE. */
2754
2755 static bfd_boolean
2756 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2757 enum parse_shift_mode mode)
2758 {
2759 int reg;
2760 int isreg32, isregzero;
2761 enum aarch64_operand_class opd_class
2762 = aarch64_get_operand_class (operand->type);
2763
2764 if ((reg =
2765 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2766 {
2767 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2768 {
2769 set_syntax_error (_("unexpected register in the immediate operand"));
2770 return FALSE;
2771 }
2772
2773 if (!isregzero && reg == REG_SP)
2774 {
2775 set_syntax_error (BAD_SP);
2776 return FALSE;
2777 }
2778
2779 operand->reg.regno = reg;
2780 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2781
2782 /* Accept optional shift operation on register. */
2783 if (! skip_past_comma (str))
2784 return TRUE;
2785
2786 if (! parse_shift (str, operand, mode))
2787 return FALSE;
2788
2789 return TRUE;
2790 }
2791 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2792 {
2793 set_syntax_error
2794 (_("integer register expected in the extended/shifted operand "
2795 "register"));
2796 return FALSE;
2797 }
2798
2799 /* We have a shifted immediate variable. */
2800 return parse_shifter_operand_imm (str, operand, mode);
2801 }
2802
2803 /* Return TRUE on success; return FALSE otherwise. */
2804
2805 static bfd_boolean
2806 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2807 enum parse_shift_mode mode)
2808 {
2809 char *p = *str;
2810
2811 /* Determine if we have the sequence of characters #: or just :
2812 coming next. If we do, then we check for a :rello: relocation
2813 modifier. If we don't, punt the whole lot to
2814 parse_shifter_operand. */
2815
2816 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2817 {
2818 struct reloc_table_entry *entry;
2819
2820 if (p[0] == '#')
2821 p += 2;
2822 else
2823 p++;
2824 *str = p;
2825
2826 /* Try to parse a relocation. Anything else is an error. */
2827 if (!(entry = find_reloc_table_entry (str)))
2828 {
2829 set_syntax_error (_("unknown relocation modifier"));
2830 return FALSE;
2831 }
2832
2833 if (entry->add_type == 0)
2834 {
2835 set_syntax_error
2836 (_("this relocation modifier is not allowed on this instruction"));
2837 return FALSE;
2838 }
2839
2840 /* Save str before we decompose it. */
2841 p = *str;
2842
2843 /* Next, we parse the expression. */
2844 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2845 return FALSE;
2846
2847 /* Record the relocation type (use the ADD variant here). */
2848 inst.reloc.type = entry->add_type;
2849 inst.reloc.pc_rel = entry->pc_rel;
2850
2851 /* If str is empty, we've reached the end, stop here. */
2852 if (**str == '\0')
2853 return TRUE;
2854
2855 /* Otherwise, we have a shifted reloc modifier, so rewind to
2856 recover the variable name and continue parsing for the shifter. */
2857 *str = p;
2858 return parse_shifter_operand_imm (str, operand, mode);
2859 }
2860
2861 return parse_shifter_operand (str, operand, mode);
2862 }
2863
2864 /* Parse all forms of an address expression. Information is written
2865 to *OPERAND and/or inst.reloc.
2866
2867 The A64 instruction set has the following addressing modes:
2868
2869 Offset
2870 [base] // in SIMD ld/st structure
2871 [base{,#0}] // in ld/st exclusive
2872 [base{,#imm}]
2873 [base,Xm{,LSL #imm}]
2874 [base,Xm,SXTX {#imm}]
2875 [base,Wm,(S|U)XTW {#imm}]
2876 Pre-indexed
2877 [base,#imm]!
2878 Post-indexed
2879 [base],#imm
2880 [base],Xm // in SIMD ld/st structure
2881 PC-relative (literal)
2882 label
2883 =immediate
2884
2885 (As a convenience, the notation "=immediate" is permitted in conjunction
2886 with the pc-relative literal load instructions to automatically place an
2887 immediate value or symbolic address in a nearby literal pool and generate
2888 a hidden label which references it.)
2889
2890 Upon a successful parsing, the address structure in *OPERAND will be
2891 filled in the following way:
2892
2893 .base_regno = <base>
2894 .offset.is_reg // 1 if the offset is a register
2895 .offset.imm = <imm>
2896 .offset.regno = <Rm>
2897
2898 For different addressing modes defined in the A64 ISA:
2899
2900 Offset
2901 .pcrel=0; .preind=1; .postind=0; .writeback=0
2902 Pre-indexed
2903 .pcrel=0; .preind=1; .postind=0; .writeback=1
2904 Post-indexed
2905 .pcrel=0; .preind=0; .postind=1; .writeback=1
2906 PC-relative (literal)
2907 .pcrel=1; .preind=1; .postind=0; .writeback=0
2908
2909 The shift/extension information, if any, will be stored in .shifter.
2910
2911 It is the caller's responsibility to check for addressing modes not
2912 supported by the instruction, and to set inst.reloc.type. */
2913
2914 static bfd_boolean
2915 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2916 int accept_reg_post_index)
2917 {
2918 char *p = *str;
2919 int reg;
2920 int isreg32, isregzero;
2921 expressionS *exp = &inst.reloc.exp;
2922
2923 if (! skip_past_char (&p, '['))
2924 {
2925 /* =immediate or label. */
2926 operand->addr.pcrel = 1;
2927 operand->addr.preind = 1;
2928
2929 /* #:<reloc_op>:<symbol> */
2930 skip_past_char (&p, '#');
2931 if (reloc && skip_past_char (&p, ':'))
2932 {
2933 struct reloc_table_entry *entry;
2934
2935 /* Try to parse a relocation modifier. Anything else is
2936 an error. */
2937 entry = find_reloc_table_entry (&p);
2938 if (! entry)
2939 {
2940 set_syntax_error (_("unknown relocation modifier"));
2941 return FALSE;
2942 }
2943
2944 if (entry->ldst_type == 0)
2945 {
2946 set_syntax_error
2947 (_("this relocation modifier is not allowed on this "
2948 "instruction"));
2949 return FALSE;
2950 }
2951
2952 /* #:<reloc_op>: */
2953 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2954 {
2955 set_syntax_error (_("invalid relocation expression"));
2956 return FALSE;
2957 }
2958
2959 /* #:<reloc_op>:<expr> */
2960 /* Record the load/store relocation type. */
2961 inst.reloc.type = entry->ldst_type;
2962 inst.reloc.pc_rel = entry->pc_rel;
2963 }
2964 else
2965 {
2966
2967 if (skip_past_char (&p, '='))
2968 /* =immediate; need to generate the literal in the literal pool. */
2969 inst.gen_lit_pool = 1;
2970
2971 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2972 {
2973 set_syntax_error (_("invalid address"));
2974 return FALSE;
2975 }
2976 }
2977
2978 *str = p;
2979 return TRUE;
2980 }
2981
2982 /* [ */
2983
2984 /* Accept SP and reject ZR */
2985 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2986 if (reg == PARSE_FAIL || isreg32)
2987 {
2988 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2989 return FALSE;
2990 }
2991 operand->addr.base_regno = reg;
2992
2993 /* [Xn */
2994 if (skip_past_comma (&p))
2995 {
2996 /* [Xn, */
2997 operand->addr.preind = 1;
2998
2999 /* Reject SP and accept ZR */
3000 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3001 if (reg != PARSE_FAIL)
3002 {
3003 /* [Xn,Rm */
3004 operand->addr.offset.regno = reg;
3005 operand->addr.offset.is_reg = 1;
3006 /* Shifted index. */
3007 if (skip_past_comma (&p))
3008 {
3009 /* [Xn,Rm, */
3010 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3011 /* Use the diagnostics set in parse_shift, so not set new
3012 error message here. */
3013 return FALSE;
3014 }
3015 /* We only accept:
3016 [base,Xm{,LSL #imm}]
3017 [base,Xm,SXTX {#imm}]
3018 [base,Wm,(S|U)XTW {#imm}] */
3019 if (operand->shifter.kind == AARCH64_MOD_NONE
3020 || operand->shifter.kind == AARCH64_MOD_LSL
3021 || operand->shifter.kind == AARCH64_MOD_SXTX)
3022 {
3023 if (isreg32)
3024 {
3025 set_syntax_error (_("invalid use of 32-bit register offset"));
3026 return FALSE;
3027 }
3028 }
3029 else if (!isreg32)
3030 {
3031 set_syntax_error (_("invalid use of 64-bit register offset"));
3032 return FALSE;
3033 }
3034 }
3035 else
3036 {
3037 /* [Xn,#:<reloc_op>:<symbol> */
3038 skip_past_char (&p, '#');
3039 if (reloc && skip_past_char (&p, ':'))
3040 {
3041 struct reloc_table_entry *entry;
3042
3043 /* Try to parse a relocation modifier. Anything else is
3044 an error. */
3045 if (!(entry = find_reloc_table_entry (&p)))
3046 {
3047 set_syntax_error (_("unknown relocation modifier"));
3048 return FALSE;
3049 }
3050
3051 if (entry->ldst_type == 0)
3052 {
3053 set_syntax_error
3054 (_("this relocation modifier is not allowed on this "
3055 "instruction"));
3056 return FALSE;
3057 }
3058
3059 /* [Xn,#:<reloc_op>: */
3060 /* We now have the group relocation table entry corresponding to
3061 the name in the assembler source. Next, we parse the
3062 expression. */
3063 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3064 {
3065 set_syntax_error (_("invalid relocation expression"));
3066 return FALSE;
3067 }
3068
3069 /* [Xn,#:<reloc_op>:<expr> */
3070 /* Record the load/store relocation type. */
3071 inst.reloc.type = entry->ldst_type;
3072 inst.reloc.pc_rel = entry->pc_rel;
3073 }
3074 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3075 {
3076 set_syntax_error (_("invalid expression in the address"));
3077 return FALSE;
3078 }
3079 /* [Xn,<expr> */
3080 }
3081 }
3082
3083 if (! skip_past_char (&p, ']'))
3084 {
3085 set_syntax_error (_("']' expected"));
3086 return FALSE;
3087 }
3088
3089 if (skip_past_char (&p, '!'))
3090 {
3091 if (operand->addr.preind && operand->addr.offset.is_reg)
3092 {
3093 set_syntax_error (_("register offset not allowed in pre-indexed "
3094 "addressing mode"));
3095 return FALSE;
3096 }
3097 /* [Xn]! */
3098 operand->addr.writeback = 1;
3099 }
3100 else if (skip_past_comma (&p))
3101 {
3102 /* [Xn], */
3103 operand->addr.postind = 1;
3104 operand->addr.writeback = 1;
3105
3106 if (operand->addr.preind)
3107 {
3108 set_syntax_error (_("cannot combine pre- and post-indexing"));
3109 return FALSE;
3110 }
3111
3112 if (accept_reg_post_index
3113 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3114 &isregzero)) != PARSE_FAIL)
3115 {
3116 /* [Xn],Xm */
3117 if (isreg32)
3118 {
3119 set_syntax_error (_("invalid 32-bit register offset"));
3120 return FALSE;
3121 }
3122 operand->addr.offset.regno = reg;
3123 operand->addr.offset.is_reg = 1;
3124 }
3125 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3126 {
3127 /* [Xn],#expr */
3128 set_syntax_error (_("invalid expression in the address"));
3129 return FALSE;
3130 }
3131 }
3132
3133 /* If at this point neither .preind nor .postind is set, we have a
3134 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3135 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3136 {
3137 if (operand->addr.writeback)
3138 {
3139 /* Reject [Rn]! */
3140 set_syntax_error (_("missing offset in the pre-indexed address"));
3141 return FALSE;
3142 }
3143 operand->addr.preind = 1;
3144 inst.reloc.exp.X_op = O_constant;
3145 inst.reloc.exp.X_add_number = 0;
3146 }
3147
3148 *str = p;
3149 return TRUE;
3150 }
3151
3152 /* Return TRUE on success; otherwise return FALSE. */
3153 static bfd_boolean
3154 parse_address (char **str, aarch64_opnd_info *operand,
3155 int accept_reg_post_index)
3156 {
3157 return parse_address_main (str, operand, 0, accept_reg_post_index);
3158 }
3159
3160 /* Return TRUE on success; otherwise return FALSE. */
3161 static bfd_boolean
3162 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3163 {
3164 return parse_address_main (str, operand, 1, 0);
3165 }
3166
3167 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3168 Return TRUE on success; otherwise return FALSE. */
3169 static bfd_boolean
3170 parse_half (char **str, int *internal_fixup_p)
3171 {
3172 char *p, *saved;
3173 int dummy;
3174
3175 p = *str;
3176 skip_past_char (&p, '#');
3177
3178 gas_assert (internal_fixup_p);
3179 *internal_fixup_p = 0;
3180
3181 if (*p == ':')
3182 {
3183 struct reloc_table_entry *entry;
3184
3185 /* Try to parse a relocation. Anything else is an error. */
3186 ++p;
3187 if (!(entry = find_reloc_table_entry (&p)))
3188 {
3189 set_syntax_error (_("unknown relocation modifier"));
3190 return FALSE;
3191 }
3192
3193 if (entry->movw_type == 0)
3194 {
3195 set_syntax_error
3196 (_("this relocation modifier is not allowed on this instruction"));
3197 return FALSE;
3198 }
3199
3200 inst.reloc.type = entry->movw_type;
3201 }
3202 else
3203 *internal_fixup_p = 1;
3204
3205 /* Avoid parsing a register as a general symbol. */
3206 saved = p;
3207 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3208 return FALSE;
3209 p = saved;
3210
3211 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3212 return FALSE;
3213
3214 *str = p;
3215 return TRUE;
3216 }
3217
3218 /* Parse an operand for an ADRP instruction:
3219 ADRP <Xd>, <label>
3220 Return TRUE on success; otherwise return FALSE. */
3221
3222 static bfd_boolean
3223 parse_adrp (char **str)
3224 {
3225 char *p;
3226
3227 p = *str;
3228 if (*p == ':')
3229 {
3230 struct reloc_table_entry *entry;
3231
3232 /* Try to parse a relocation. Anything else is an error. */
3233 ++p;
3234 if (!(entry = find_reloc_table_entry (&p)))
3235 {
3236 set_syntax_error (_("unknown relocation modifier"));
3237 return FALSE;
3238 }
3239
3240 if (entry->adrp_type == 0)
3241 {
3242 set_syntax_error
3243 (_("this relocation modifier is not allowed on this instruction"));
3244 return FALSE;
3245 }
3246
3247 inst.reloc.type = entry->adrp_type;
3248 }
3249 else
3250 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3251
3252 inst.reloc.pc_rel = 1;
3253
3254 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3255 return FALSE;
3256
3257 *str = p;
3258 return TRUE;
3259 }
3260
3261 /* Miscellaneous. */
3262
3263 /* Parse an option for a preload instruction. Returns the encoding for the
3264 option, or PARSE_FAIL. */
3265
3266 static int
3267 parse_pldop (char **str)
3268 {
3269 char *p, *q;
3270 const struct aarch64_name_value_pair *o;
3271
3272 p = q = *str;
3273 while (ISALNUM (*q))
3274 q++;
3275
3276 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3277 if (!o)
3278 return PARSE_FAIL;
3279
3280 *str = q;
3281 return o->value;
3282 }
3283
3284 /* Parse an option for a barrier instruction. Returns the encoding for the
3285 option, or PARSE_FAIL. */
3286
3287 static int
3288 parse_barrier (char **str)
3289 {
3290 char *p, *q;
3291 const asm_barrier_opt *o;
3292
3293 p = q = *str;
3294 while (ISALPHA (*q))
3295 q++;
3296
3297 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3298 if (!o)
3299 return PARSE_FAIL;
3300
3301 *str = q;
3302 return o->value;
3303 }
3304
3305 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3306 Returns the encoding for the option, or PARSE_FAIL.
3307
3308 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3309 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3310
3311 static int
3312 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3313 {
3314 char *p, *q;
3315 char buf[32];
3316 const aarch64_sys_reg *o;
3317 int value;
3318
3319 p = buf;
3320 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3321 if (p < buf + 31)
3322 *p++ = TOLOWER (*q);
3323 *p = '\0';
3324 /* Assert that BUF be large enough. */
3325 gas_assert (p - buf == q - *str);
3326
3327 o = hash_find (sys_regs, buf);
3328 if (!o)
3329 {
3330 if (!imple_defined_p)
3331 return PARSE_FAIL;
3332 else
3333 {
3334 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
3335 registers. */
3336 unsigned int op0, op1, cn, cm, op2;
3337 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
3338 return PARSE_FAIL;
3339 /* The architecture specifies the encoding space for implementation
3340 defined registers as:
3341 op0 op1 CRn CRm op2
3342 1x xxx 1x11 xxxx xxx
3343 For convenience GAS accepts a wider encoding space, as follows:
3344 op0 op1 CRn CRm op2
3345 1x xxx xxxx xxxx xxx */
3346 if ((op0 != 2 && op0 != 3) || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3347 return PARSE_FAIL;
3348 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3349 }
3350 }
3351 else
3352 {
3353 if (aarch64_sys_reg_deprecated_p (o))
3354 as_warn (_("system register name '%s' is deprecated and may be "
3355 "removed in a future release"), buf);
3356 value = o->value;
3357 }
3358
3359 *str = q;
3360 return value;
3361 }
3362
3363 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3364 for the option, or NULL. */
3365
3366 static const aarch64_sys_ins_reg *
3367 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3368 {
3369 char *p, *q;
3370 char buf[32];
3371 const aarch64_sys_ins_reg *o;
3372
3373 p = buf;
3374 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3375 if (p < buf + 31)
3376 *p++ = TOLOWER (*q);
3377 *p = '\0';
3378
3379 o = hash_find (sys_ins_regs, buf);
3380 if (!o)
3381 return NULL;
3382
3383 *str = q;
3384 return o;
3385 }
3386 \f
3387 #define po_char_or_fail(chr) do { \
3388 if (! skip_past_char (&str, chr)) \
3389 goto failure; \
3390 } while (0)
3391
3392 #define po_reg_or_fail(regtype) do { \
3393 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3394 if (val == PARSE_FAIL) \
3395 { \
3396 set_default_error (); \
3397 goto failure; \
3398 } \
3399 } while (0)
3400
3401 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3402 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3403 &isreg32, &isregzero); \
3404 if (val == PARSE_FAIL) \
3405 { \
3406 set_default_error (); \
3407 goto failure; \
3408 } \
3409 info->reg.regno = val; \
3410 if (isreg32) \
3411 info->qualifier = AARCH64_OPND_QLF_W; \
3412 else \
3413 info->qualifier = AARCH64_OPND_QLF_X; \
3414 } while (0)
3415
3416 #define po_imm_nc_or_fail() do { \
3417 if (! parse_constant_immediate (&str, &val)) \
3418 goto failure; \
3419 } while (0)
3420
3421 #define po_imm_or_fail(min, max) do { \
3422 if (! parse_constant_immediate (&str, &val)) \
3423 goto failure; \
3424 if (val < min || val > max) \
3425 { \
3426 set_fatal_syntax_error (_("immediate value out of range "\
3427 #min " to "#max)); \
3428 goto failure; \
3429 } \
3430 } while (0)
3431
3432 #define po_misc_or_fail(expr) do { \
3433 if (!expr) \
3434 goto failure; \
3435 } while (0)
3436 \f
3437 /* encode the 12-bit imm field of Add/sub immediate */
3438 static inline uint32_t
3439 encode_addsub_imm (uint32_t imm)
3440 {
3441 return imm << 10;
3442 }
3443
3444 /* encode the shift amount field of Add/sub immediate */
3445 static inline uint32_t
3446 encode_addsub_imm_shift_amount (uint32_t cnt)
3447 {
3448 return cnt << 22;
3449 }
3450
3451
3452 /* encode the imm field of Adr instruction */
3453 static inline uint32_t
3454 encode_adr_imm (uint32_t imm)
3455 {
3456 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3457 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3458 }
3459
3460 /* encode the immediate field of Move wide immediate */
3461 static inline uint32_t
3462 encode_movw_imm (uint32_t imm)
3463 {
3464 return imm << 5;
3465 }
3466
3467 /* encode the 26-bit offset of unconditional branch */
3468 static inline uint32_t
3469 encode_branch_ofs_26 (uint32_t ofs)
3470 {
3471 return ofs & ((1 << 26) - 1);
3472 }
3473
3474 /* encode the 19-bit offset of conditional branch and compare & branch */
3475 static inline uint32_t
3476 encode_cond_branch_ofs_19 (uint32_t ofs)
3477 {
3478 return (ofs & ((1 << 19) - 1)) << 5;
3479 }
3480
3481 /* encode the 19-bit offset of ld literal */
3482 static inline uint32_t
3483 encode_ld_lit_ofs_19 (uint32_t ofs)
3484 {
3485 return (ofs & ((1 << 19) - 1)) << 5;
3486 }
3487
3488 /* Encode the 14-bit offset of test & branch. */
3489 static inline uint32_t
3490 encode_tst_branch_ofs_14 (uint32_t ofs)
3491 {
3492 return (ofs & ((1 << 14) - 1)) << 5;
3493 }
3494
3495 /* Encode the 16-bit imm field of svc/hvc/smc. */
3496 static inline uint32_t
3497 encode_svc_imm (uint32_t imm)
3498 {
3499 return imm << 5;
3500 }
3501
3502 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3503 static inline uint32_t
3504 reencode_addsub_switch_add_sub (uint32_t opcode)
3505 {
3506 return opcode ^ (1 << 30);
3507 }
3508
3509 static inline uint32_t
3510 reencode_movzn_to_movz (uint32_t opcode)
3511 {
3512 return opcode | (1 << 30);
3513 }
3514
3515 static inline uint32_t
3516 reencode_movzn_to_movn (uint32_t opcode)
3517 {
3518 return opcode & ~(1 << 30);
3519 }
3520
3521 /* Overall per-instruction processing. */
3522
3523 /* We need to be able to fix up arbitrary expressions in some statements.
3524 This is so that we can handle symbols that are an arbitrary distance from
3525 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3526 which returns part of an address in a form which will be valid for
3527 a data instruction. We do this by pushing the expression into a symbol
3528 in the expr_section, and creating a fix for that. */
3529
3530 static fixS *
3531 fix_new_aarch64 (fragS * frag,
3532 int where,
3533 short int size, expressionS * exp, int pc_rel, int reloc)
3534 {
3535 fixS *new_fix;
3536
3537 switch (exp->X_op)
3538 {
3539 case O_constant:
3540 case O_symbol:
3541 case O_add:
3542 case O_subtract:
3543 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3544 break;
3545
3546 default:
3547 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3548 pc_rel, reloc);
3549 break;
3550 }
3551 return new_fix;
3552 }
3553 \f
3554 /* Diagnostics on operands errors. */
3555
3556 /* By default, output verbose error message.
3557 Disable the verbose error message by -mno-verbose-error. */
3558 static int verbose_error_p = 1;
3559
3560 #ifdef DEBUG_AARCH64
3561 /* N.B. this is only for the purpose of debugging. */
3562 const char* operand_mismatch_kind_names[] =
3563 {
3564 "AARCH64_OPDE_NIL",
3565 "AARCH64_OPDE_RECOVERABLE",
3566 "AARCH64_OPDE_SYNTAX_ERROR",
3567 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3568 "AARCH64_OPDE_INVALID_VARIANT",
3569 "AARCH64_OPDE_OUT_OF_RANGE",
3570 "AARCH64_OPDE_UNALIGNED",
3571 "AARCH64_OPDE_REG_LIST",
3572 "AARCH64_OPDE_OTHER_ERROR",
3573 };
3574 #endif /* DEBUG_AARCH64 */
3575
3576 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3577
3578 When multiple errors of different kinds are found in the same assembly
3579 line, only the error of the highest severity will be picked up for
3580 issuing the diagnostics. */
3581
3582 static inline bfd_boolean
3583 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3584 enum aarch64_operand_error_kind rhs)
3585 {
3586 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3587 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3588 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3589 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3590 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3591 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3592 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3593 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3594 return lhs > rhs;
3595 }
3596
3597 /* Helper routine to get the mnemonic name from the assembly instruction
3598 line; should only be called for the diagnosis purpose, as there is
3599 string copy operation involved, which may affect the runtime
3600 performance if used in elsewhere. */
3601
3602 static const char*
3603 get_mnemonic_name (const char *str)
3604 {
3605 static char mnemonic[32];
3606 char *ptr;
3607
3608 /* Get the first 15 bytes and assume that the full name is included. */
3609 strncpy (mnemonic, str, 31);
3610 mnemonic[31] = '\0';
3611
3612 /* Scan up to the end of the mnemonic, which must end in white space,
3613 '.', or end of string. */
3614 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3615 ;
3616
3617 *ptr = '\0';
3618
3619 /* Append '...' to the truncated long name. */
3620 if (ptr - mnemonic == 31)
3621 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3622
3623 return mnemonic;
3624 }
3625
3626 static void
3627 reset_aarch64_instruction (aarch64_instruction *instruction)
3628 {
3629 memset (instruction, '\0', sizeof (aarch64_instruction));
3630 instruction->reloc.type = BFD_RELOC_UNUSED;
3631 }
3632
3633 /* Data strutures storing one user error in the assembly code related to
3634 operands. */
3635
3636 struct operand_error_record
3637 {
3638 const aarch64_opcode *opcode;
3639 aarch64_operand_error detail;
3640 struct operand_error_record *next;
3641 };
3642
3643 typedef struct operand_error_record operand_error_record;
3644
3645 struct operand_errors
3646 {
3647 operand_error_record *head;
3648 operand_error_record *tail;
3649 };
3650
3651 typedef struct operand_errors operand_errors;
3652
3653 /* Top-level data structure reporting user errors for the current line of
3654 the assembly code.
3655 The way md_assemble works is that all opcodes sharing the same mnemonic
3656 name are iterated to find a match to the assembly line. In this data
3657 structure, each of the such opcodes will have one operand_error_record
3658 allocated and inserted. In other words, excessive errors related with
3659 a single opcode are disregarded. */
3660 operand_errors operand_error_report;
3661
3662 /* Free record nodes. */
3663 static operand_error_record *free_opnd_error_record_nodes = NULL;
3664
3665 /* Initialize the data structure that stores the operand mismatch
3666 information on assembling one line of the assembly code. */
3667 static void
3668 init_operand_error_report (void)
3669 {
3670 if (operand_error_report.head != NULL)
3671 {
3672 gas_assert (operand_error_report.tail != NULL);
3673 operand_error_report.tail->next = free_opnd_error_record_nodes;
3674 free_opnd_error_record_nodes = operand_error_report.head;
3675 operand_error_report.head = NULL;
3676 operand_error_report.tail = NULL;
3677 return;
3678 }
3679 gas_assert (operand_error_report.tail == NULL);
3680 }
3681
3682 /* Return TRUE if some operand error has been recorded during the
3683 parsing of the current assembly line using the opcode *OPCODE;
3684 otherwise return FALSE. */
3685 static inline bfd_boolean
3686 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3687 {
3688 operand_error_record *record = operand_error_report.head;
3689 return record && record->opcode == opcode;
3690 }
3691
3692 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3693 OPCODE field is initialized with OPCODE.
3694 N.B. only one record for each opcode, i.e. the maximum of one error is
3695 recorded for each instruction template. */
3696
3697 static void
3698 add_operand_error_record (const operand_error_record* new_record)
3699 {
3700 const aarch64_opcode *opcode = new_record->opcode;
3701 operand_error_record* record = operand_error_report.head;
3702
3703 /* The record may have been created for this opcode. If not, we need
3704 to prepare one. */
3705 if (! opcode_has_operand_error_p (opcode))
3706 {
3707 /* Get one empty record. */
3708 if (free_opnd_error_record_nodes == NULL)
3709 {
3710 record = xmalloc (sizeof (operand_error_record));
3711 if (record == NULL)
3712 abort ();
3713 }
3714 else
3715 {
3716 record = free_opnd_error_record_nodes;
3717 free_opnd_error_record_nodes = record->next;
3718 }
3719 record->opcode = opcode;
3720 /* Insert at the head. */
3721 record->next = operand_error_report.head;
3722 operand_error_report.head = record;
3723 if (operand_error_report.tail == NULL)
3724 operand_error_report.tail = record;
3725 }
3726 else if (record->detail.kind != AARCH64_OPDE_NIL
3727 && record->detail.index <= new_record->detail.index
3728 && operand_error_higher_severity_p (record->detail.kind,
3729 new_record->detail.kind))
3730 {
3731 /* In the case of multiple errors found on operands related with a
3732 single opcode, only record the error of the leftmost operand and
3733 only if the error is of higher severity. */
3734 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3735 " the existing error %s on operand %d",
3736 operand_mismatch_kind_names[new_record->detail.kind],
3737 new_record->detail.index,
3738 operand_mismatch_kind_names[record->detail.kind],
3739 record->detail.index);
3740 return;
3741 }
3742
3743 record->detail = new_record->detail;
3744 }
3745
3746 static inline void
3747 record_operand_error_info (const aarch64_opcode *opcode,
3748 aarch64_operand_error *error_info)
3749 {
3750 operand_error_record record;
3751 record.opcode = opcode;
3752 record.detail = *error_info;
3753 add_operand_error_record (&record);
3754 }
3755
3756 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3757 error message *ERROR, for operand IDX (count from 0). */
3758
3759 static void
3760 record_operand_error (const aarch64_opcode *opcode, int idx,
3761 enum aarch64_operand_error_kind kind,
3762 const char* error)
3763 {
3764 aarch64_operand_error info;
3765 memset(&info, 0, sizeof (info));
3766 info.index = idx;
3767 info.kind = kind;
3768 info.error = error;
3769 record_operand_error_info (opcode, &info);
3770 }
3771
3772 static void
3773 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3774 enum aarch64_operand_error_kind kind,
3775 const char* error, const int *extra_data)
3776 {
3777 aarch64_operand_error info;
3778 info.index = idx;
3779 info.kind = kind;
3780 info.error = error;
3781 info.data[0] = extra_data[0];
3782 info.data[1] = extra_data[1];
3783 info.data[2] = extra_data[2];
3784 record_operand_error_info (opcode, &info);
3785 }
3786
3787 static void
3788 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3789 const char* error, int lower_bound,
3790 int upper_bound)
3791 {
3792 int data[3] = {lower_bound, upper_bound, 0};
3793 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3794 error, data);
3795 }
3796
3797 /* Remove the operand error record for *OPCODE. */
3798 static void ATTRIBUTE_UNUSED
3799 remove_operand_error_record (const aarch64_opcode *opcode)
3800 {
3801 if (opcode_has_operand_error_p (opcode))
3802 {
3803 operand_error_record* record = operand_error_report.head;
3804 gas_assert (record != NULL && operand_error_report.tail != NULL);
3805 operand_error_report.head = record->next;
3806 record->next = free_opnd_error_record_nodes;
3807 free_opnd_error_record_nodes = record;
3808 if (operand_error_report.head == NULL)
3809 {
3810 gas_assert (operand_error_report.tail == record);
3811 operand_error_report.tail = NULL;
3812 }
3813 }
3814 }
3815
3816 /* Given the instruction in *INSTR, return the index of the best matched
3817 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3818
3819 Return -1 if there is no qualifier sequence; return the first match
3820 if there is multiple matches found. */
3821
3822 static int
3823 find_best_match (const aarch64_inst *instr,
3824 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3825 {
3826 int i, num_opnds, max_num_matched, idx;
3827
3828 num_opnds = aarch64_num_of_operands (instr->opcode);
3829 if (num_opnds == 0)
3830 {
3831 DEBUG_TRACE ("no operand");
3832 return -1;
3833 }
3834
3835 max_num_matched = 0;
3836 idx = -1;
3837
3838 /* For each pattern. */
3839 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3840 {
3841 int j, num_matched;
3842 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3843
3844 /* Most opcodes has much fewer patterns in the list. */
3845 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3846 {
3847 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3848 if (i != 0 && idx == -1)
3849 /* If nothing has been matched, return the 1st sequence. */
3850 idx = 0;
3851 break;
3852 }
3853
3854 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3855 if (*qualifiers == instr->operands[j].qualifier)
3856 ++num_matched;
3857
3858 if (num_matched > max_num_matched)
3859 {
3860 max_num_matched = num_matched;
3861 idx = i;
3862 }
3863 }
3864
3865 DEBUG_TRACE ("return with %d", idx);
3866 return idx;
3867 }
3868
3869 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3870 corresponding operands in *INSTR. */
3871
3872 static inline void
3873 assign_qualifier_sequence (aarch64_inst *instr,
3874 const aarch64_opnd_qualifier_t *qualifiers)
3875 {
3876 int i = 0;
3877 int num_opnds = aarch64_num_of_operands (instr->opcode);
3878 gas_assert (num_opnds);
3879 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3880 instr->operands[i].qualifier = *qualifiers;
3881 }
3882
3883 /* Print operands for the diagnosis purpose. */
3884
3885 static void
3886 print_operands (char *buf, const aarch64_opcode *opcode,
3887 const aarch64_opnd_info *opnds)
3888 {
3889 int i;
3890
3891 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3892 {
3893 const size_t size = 128;
3894 char str[size];
3895
3896 /* We regard the opcode operand info more, however we also look into
3897 the inst->operands to support the disassembling of the optional
3898 operand.
3899 The two operand code should be the same in all cases, apart from
3900 when the operand can be optional. */
3901 if (opcode->operands[i] == AARCH64_OPND_NIL
3902 || opnds[i].type == AARCH64_OPND_NIL)
3903 break;
3904
3905 /* Generate the operand string in STR. */
3906 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3907
3908 /* Delimiter. */
3909 if (str[0] != '\0')
3910 strcat (buf, i == 0 ? " " : ",");
3911
3912 /* Append the operand string. */
3913 strcat (buf, str);
3914 }
3915 }
3916
3917 /* Send to stderr a string as information. */
3918
3919 static void
3920 output_info (const char *format, ...)
3921 {
3922 char *file;
3923 unsigned int line;
3924 va_list args;
3925
3926 as_where (&file, &line);
3927 if (file)
3928 {
3929 if (line != 0)
3930 fprintf (stderr, "%s:%u: ", file, line);
3931 else
3932 fprintf (stderr, "%s: ", file);
3933 }
3934 fprintf (stderr, _("Info: "));
3935 va_start (args, format);
3936 vfprintf (stderr, format, args);
3937 va_end (args);
3938 (void) putc ('\n', stderr);
3939 }
3940
3941 /* Output one operand error record. */
3942
3943 static void
3944 output_operand_error_record (const operand_error_record *record, char *str)
3945 {
3946 int idx = record->detail.index;
3947 const aarch64_opcode *opcode = record->opcode;
3948 enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
3949 : AARCH64_OPND_NIL);
3950 const aarch64_operand_error *detail = &record->detail;
3951
3952 switch (detail->kind)
3953 {
3954 case AARCH64_OPDE_NIL:
3955 gas_assert (0);
3956 break;
3957
3958 case AARCH64_OPDE_SYNTAX_ERROR:
3959 case AARCH64_OPDE_RECOVERABLE:
3960 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3961 case AARCH64_OPDE_OTHER_ERROR:
3962 gas_assert (idx >= 0);
3963 /* Use the prepared error message if there is, otherwise use the
3964 operand description string to describe the error. */
3965 if (detail->error != NULL)
3966 {
3967 if (detail->index == -1)
3968 as_bad (_("%s -- `%s'"), detail->error, str);
3969 else
3970 as_bad (_("%s at operand %d -- `%s'"),
3971 detail->error, detail->index + 1, str);
3972 }
3973 else
3974 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3975 aarch64_get_operand_desc (opd_code), str);
3976 break;
3977
3978 case AARCH64_OPDE_INVALID_VARIANT:
3979 as_bad (_("operand mismatch -- `%s'"), str);
3980 if (verbose_error_p)
3981 {
3982 /* We will try to correct the erroneous instruction and also provide
3983 more information e.g. all other valid variants.
3984
3985 The string representation of the corrected instruction and other
3986 valid variants are generated by
3987
3988 1) obtaining the intermediate representation of the erroneous
3989 instruction;
3990 2) manipulating the IR, e.g. replacing the operand qualifier;
3991 3) printing out the instruction by calling the printer functions
3992 shared with the disassembler.
3993
3994 The limitation of this method is that the exact input assembly
3995 line cannot be accurately reproduced in some cases, for example an
3996 optional operand present in the actual assembly line will be
3997 omitted in the output; likewise for the optional syntax rules,
3998 e.g. the # before the immediate. Another limitation is that the
3999 assembly symbols and relocation operations in the assembly line
4000 currently cannot be printed out in the error report. Last but not
4001 least, when there is other error(s) co-exist with this error, the
4002 'corrected' instruction may be still incorrect, e.g. given
4003 'ldnp h0,h1,[x0,#6]!'
4004 this diagnosis will provide the version:
4005 'ldnp s0,s1,[x0,#6]!'
4006 which is still not right. */
4007 size_t len = strlen (get_mnemonic_name (str));
4008 int i, qlf_idx;
4009 bfd_boolean result;
4010 const size_t size = 2048;
4011 char buf[size];
4012 aarch64_inst *inst_base = &inst.base;
4013 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4014
4015 /* Init inst. */
4016 reset_aarch64_instruction (&inst);
4017 inst_base->opcode = opcode;
4018
4019 /* Reset the error report so that there is no side effect on the
4020 following operand parsing. */
4021 init_operand_error_report ();
4022
4023 /* Fill inst. */
4024 result = parse_operands (str + len, opcode)
4025 && programmer_friendly_fixup (&inst);
4026 gas_assert (result);
4027 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4028 NULL, NULL);
4029 gas_assert (!result);
4030
4031 /* Find the most matched qualifier sequence. */
4032 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4033 gas_assert (qlf_idx > -1);
4034
4035 /* Assign the qualifiers. */
4036 assign_qualifier_sequence (inst_base,
4037 opcode->qualifiers_list[qlf_idx]);
4038
4039 /* Print the hint. */
4040 output_info (_(" did you mean this?"));
4041 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4042 print_operands (buf, opcode, inst_base->operands);
4043 output_info (_(" %s"), buf);
4044
4045 /* Print out other variant(s) if there is any. */
4046 if (qlf_idx != 0 ||
4047 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4048 output_info (_(" other valid variant(s):"));
4049
4050 /* For each pattern. */
4051 qualifiers_list = opcode->qualifiers_list;
4052 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4053 {
4054 /* Most opcodes has much fewer patterns in the list.
4055 First NIL qualifier indicates the end in the list. */
4056 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4057 break;
4058
4059 if (i != qlf_idx)
4060 {
4061 /* Mnemonics name. */
4062 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4063
4064 /* Assign the qualifiers. */
4065 assign_qualifier_sequence (inst_base, *qualifiers_list);
4066
4067 /* Print instruction. */
4068 print_operands (buf, opcode, inst_base->operands);
4069
4070 output_info (_(" %s"), buf);
4071 }
4072 }
4073 }
4074 break;
4075
4076 case AARCH64_OPDE_OUT_OF_RANGE:
4077 if (detail->data[0] != detail->data[1])
4078 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4079 detail->error ? detail->error : _("immediate value"),
4080 detail->data[0], detail->data[1], detail->index + 1, str);
4081 else
4082 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4083 detail->error ? detail->error : _("immediate value"),
4084 detail->data[0], detail->index + 1, str);
4085 break;
4086
4087 case AARCH64_OPDE_REG_LIST:
4088 if (detail->data[0] == 1)
4089 as_bad (_("invalid number of registers in the list; "
4090 "only 1 register is expected at operand %d -- `%s'"),
4091 detail->index + 1, str);
4092 else
4093 as_bad (_("invalid number of registers in the list; "
4094 "%d registers are expected at operand %d -- `%s'"),
4095 detail->data[0], detail->index + 1, str);
4096 break;
4097
4098 case AARCH64_OPDE_UNALIGNED:
4099 as_bad (_("immediate value should be a multiple of "
4100 "%d at operand %d -- `%s'"),
4101 detail->data[0], detail->index + 1, str);
4102 break;
4103
4104 default:
4105 gas_assert (0);
4106 break;
4107 }
4108 }
4109
4110 /* Process and output the error message about the operand mismatching.
4111
4112 When this function is called, the operand error information had
4113 been collected for an assembly line and there will be multiple
4114 errors in the case of mulitple instruction templates; output the
4115 error message that most closely describes the problem. */
4116
4117 static void
4118 output_operand_error_report (char *str)
4119 {
4120 int largest_error_pos;
4121 const char *msg = NULL;
4122 enum aarch64_operand_error_kind kind;
4123 operand_error_record *curr;
4124 operand_error_record *head = operand_error_report.head;
4125 operand_error_record *record = NULL;
4126
4127 /* No error to report. */
4128 if (head == NULL)
4129 return;
4130
4131 gas_assert (head != NULL && operand_error_report.tail != NULL);
4132
4133 /* Only one error. */
4134 if (head == operand_error_report.tail)
4135 {
4136 DEBUG_TRACE ("single opcode entry with error kind: %s",
4137 operand_mismatch_kind_names[head->detail.kind]);
4138 output_operand_error_record (head, str);
4139 return;
4140 }
4141
4142 /* Find the error kind of the highest severity. */
4143 DEBUG_TRACE ("multiple opcode entres with error kind");
4144 kind = AARCH64_OPDE_NIL;
4145 for (curr = head; curr != NULL; curr = curr->next)
4146 {
4147 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4148 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4149 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4150 kind = curr->detail.kind;
4151 }
4152 gas_assert (kind != AARCH64_OPDE_NIL);
4153
4154 /* Pick up one of errors of KIND to report. */
4155 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4156 for (curr = head; curr != NULL; curr = curr->next)
4157 {
4158 if (curr->detail.kind != kind)
4159 continue;
4160 /* If there are multiple errors, pick up the one with the highest
4161 mismatching operand index. In the case of multiple errors with
4162 the equally highest operand index, pick up the first one or the
4163 first one with non-NULL error message. */
4164 if (curr->detail.index > largest_error_pos
4165 || (curr->detail.index == largest_error_pos && msg == NULL
4166 && curr->detail.error != NULL))
4167 {
4168 largest_error_pos = curr->detail.index;
4169 record = curr;
4170 msg = record->detail.error;
4171 }
4172 }
4173
4174 gas_assert (largest_error_pos != -2 && record != NULL);
4175 DEBUG_TRACE ("Pick up error kind %s to report",
4176 operand_mismatch_kind_names[record->detail.kind]);
4177
4178 /* Output. */
4179 output_operand_error_record (record, str);
4180 }
4181 \f
4182 /* Write an AARCH64 instruction to buf - always little-endian. */
4183 static void
4184 put_aarch64_insn (char *buf, uint32_t insn)
4185 {
4186 unsigned char *where = (unsigned char *) buf;
4187 where[0] = insn;
4188 where[1] = insn >> 8;
4189 where[2] = insn >> 16;
4190 where[3] = insn >> 24;
4191 }
4192
4193 static uint32_t
4194 get_aarch64_insn (char *buf)
4195 {
4196 unsigned char *where = (unsigned char *) buf;
4197 uint32_t result;
4198 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4199 return result;
4200 }
4201
4202 static void
4203 output_inst (struct aarch64_inst *new_inst)
4204 {
4205 char *to = NULL;
4206
4207 to = frag_more (INSN_SIZE);
4208
4209 frag_now->tc_frag_data.recorded = 1;
4210
4211 put_aarch64_insn (to, inst.base.value);
4212
4213 if (inst.reloc.type != BFD_RELOC_UNUSED)
4214 {
4215 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4216 INSN_SIZE, &inst.reloc.exp,
4217 inst.reloc.pc_rel,
4218 inst.reloc.type);
4219 DEBUG_TRACE ("Prepared relocation fix up");
4220 /* Don't check the addend value against the instruction size,
4221 that's the job of our code in md_apply_fix(). */
4222 fixp->fx_no_overflow = 1;
4223 if (new_inst != NULL)
4224 fixp->tc_fix_data.inst = new_inst;
4225 if (aarch64_gas_internal_fixup_p ())
4226 {
4227 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4228 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4229 fixp->fx_addnumber = inst.reloc.flags;
4230 }
4231 }
4232
4233 dwarf2_emit_insn (INSN_SIZE);
4234 }
4235
4236 /* Link together opcodes of the same name. */
4237
4238 struct templates
4239 {
4240 aarch64_opcode *opcode;
4241 struct templates *next;
4242 };
4243
4244 typedef struct templates templates;
4245
4246 static templates *
4247 lookup_mnemonic (const char *start, int len)
4248 {
4249 templates *templ = NULL;
4250
4251 templ = hash_find_n (aarch64_ops_hsh, start, len);
4252 return templ;
4253 }
4254
4255 /* Subroutine of md_assemble, responsible for looking up the primary
4256 opcode from the mnemonic the user wrote. STR points to the
4257 beginning of the mnemonic. */
4258
4259 static templates *
4260 opcode_lookup (char **str)
4261 {
4262 char *end, *base;
4263 const aarch64_cond *cond;
4264 char condname[16];
4265 int len;
4266
4267 /* Scan up to the end of the mnemonic, which must end in white space,
4268 '.', or end of string. */
4269 for (base = end = *str; is_part_of_name(*end); end++)
4270 if (*end == '.')
4271 break;
4272
4273 if (end == base)
4274 return 0;
4275
4276 inst.cond = COND_ALWAYS;
4277
4278 /* Handle a possible condition. */
4279 if (end[0] == '.')
4280 {
4281 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4282 if (cond)
4283 {
4284 inst.cond = cond->value;
4285 *str = end + 3;
4286 }
4287 else
4288 {
4289 *str = end;
4290 return 0;
4291 }
4292 }
4293 else
4294 *str = end;
4295
4296 len = end - base;
4297
4298 if (inst.cond == COND_ALWAYS)
4299 {
4300 /* Look for unaffixed mnemonic. */
4301 return lookup_mnemonic (base, len);
4302 }
4303 else if (len <= 13)
4304 {
4305 /* append ".c" to mnemonic if conditional */
4306 memcpy (condname, base, len);
4307 memcpy (condname + len, ".c", 2);
4308 base = condname;
4309 len += 2;
4310 return lookup_mnemonic (base, len);
4311 }
4312
4313 return NULL;
4314 }
4315
4316 /* Internal helper routine converting a vector neon_type_el structure
4317 *VECTYPE to a corresponding operand qualifier. */
4318
4319 static inline aarch64_opnd_qualifier_t
4320 vectype_to_qualifier (const struct neon_type_el *vectype)
4321 {
4322 /* Element size in bytes indexed by neon_el_type. */
4323 const unsigned char ele_size[5]
4324 = {1, 2, 4, 8, 16};
4325
4326 if (!vectype->defined || vectype->type == NT_invtype)
4327 goto vectype_conversion_fail;
4328
4329 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4330
4331 if (vectype->defined & NTA_HASINDEX)
4332 /* Vector element register. */
4333 return AARCH64_OPND_QLF_S_B + vectype->type;
4334 else
4335 {
4336 /* Vector register. */
4337 int reg_size = ele_size[vectype->type] * vectype->width;
4338 unsigned offset;
4339 if (reg_size != 16 && reg_size != 8)
4340 goto vectype_conversion_fail;
4341 /* The conversion is calculated based on the relation of the order of
4342 qualifiers to the vector element size and vector register size. */
4343 offset = (vectype->type == NT_q)
4344 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4345 gas_assert (offset <= 8);
4346 return AARCH64_OPND_QLF_V_8B + offset;
4347 }
4348
4349 vectype_conversion_fail:
4350 first_error (_("bad vector arrangement type"));
4351 return AARCH64_OPND_QLF_NIL;
4352 }
4353
4354 /* Process an optional operand that is found omitted from the assembly line.
4355 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4356 instruction's opcode entry while IDX is the index of this omitted operand.
4357 */
4358
4359 static void
4360 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4361 int idx, aarch64_opnd_info *operand)
4362 {
4363 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4364 gas_assert (optional_operand_p (opcode, idx));
4365 gas_assert (!operand->present);
4366
4367 switch (type)
4368 {
4369 case AARCH64_OPND_Rd:
4370 case AARCH64_OPND_Rn:
4371 case AARCH64_OPND_Rm:
4372 case AARCH64_OPND_Rt:
4373 case AARCH64_OPND_Rt2:
4374 case AARCH64_OPND_Rs:
4375 case AARCH64_OPND_Ra:
4376 case AARCH64_OPND_Rt_SYS:
4377 case AARCH64_OPND_Rd_SP:
4378 case AARCH64_OPND_Rn_SP:
4379 case AARCH64_OPND_Fd:
4380 case AARCH64_OPND_Fn:
4381 case AARCH64_OPND_Fm:
4382 case AARCH64_OPND_Fa:
4383 case AARCH64_OPND_Ft:
4384 case AARCH64_OPND_Ft2:
4385 case AARCH64_OPND_Sd:
4386 case AARCH64_OPND_Sn:
4387 case AARCH64_OPND_Sm:
4388 case AARCH64_OPND_Vd:
4389 case AARCH64_OPND_Vn:
4390 case AARCH64_OPND_Vm:
4391 case AARCH64_OPND_VdD1:
4392 case AARCH64_OPND_VnD1:
4393 operand->reg.regno = default_value;
4394 break;
4395
4396 case AARCH64_OPND_Ed:
4397 case AARCH64_OPND_En:
4398 case AARCH64_OPND_Em:
4399 operand->reglane.regno = default_value;
4400 break;
4401
4402 case AARCH64_OPND_IDX:
4403 case AARCH64_OPND_BIT_NUM:
4404 case AARCH64_OPND_IMMR:
4405 case AARCH64_OPND_IMMS:
4406 case AARCH64_OPND_SHLL_IMM:
4407 case AARCH64_OPND_IMM_VLSL:
4408 case AARCH64_OPND_IMM_VLSR:
4409 case AARCH64_OPND_CCMP_IMM:
4410 case AARCH64_OPND_FBITS:
4411 case AARCH64_OPND_UIMM4:
4412 case AARCH64_OPND_UIMM3_OP1:
4413 case AARCH64_OPND_UIMM3_OP2:
4414 case AARCH64_OPND_IMM:
4415 case AARCH64_OPND_WIDTH:
4416 case AARCH64_OPND_UIMM7:
4417 case AARCH64_OPND_NZCV:
4418 operand->imm.value = default_value;
4419 break;
4420
4421 case AARCH64_OPND_EXCEPTION:
4422 inst.reloc.type = BFD_RELOC_UNUSED;
4423 break;
4424
4425 case AARCH64_OPND_BARRIER_ISB:
4426 operand->barrier = aarch64_barrier_options + default_value;
4427
4428 default:
4429 break;
4430 }
4431 }
4432
4433 /* Process the relocation type for move wide instructions.
4434 Return TRUE on success; otherwise return FALSE. */
4435
4436 static bfd_boolean
4437 process_movw_reloc_info (void)
4438 {
4439 int is32;
4440 unsigned shift;
4441
4442 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4443
4444 if (inst.base.opcode->op == OP_MOVK)
4445 switch (inst.reloc.type)
4446 {
4447 case BFD_RELOC_AARCH64_MOVW_G0_S:
4448 case BFD_RELOC_AARCH64_MOVW_G1_S:
4449 case BFD_RELOC_AARCH64_MOVW_G2_S:
4450 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4451 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4452 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4453 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4454 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4455 set_syntax_error
4456 (_("the specified relocation type is not allowed for MOVK"));
4457 return FALSE;
4458 default:
4459 break;
4460 }
4461
4462 switch (inst.reloc.type)
4463 {
4464 case BFD_RELOC_AARCH64_MOVW_G0:
4465 case BFD_RELOC_AARCH64_MOVW_G0_S:
4466 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4467 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4468 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4469 shift = 0;
4470 break;
4471 case BFD_RELOC_AARCH64_MOVW_G1:
4472 case BFD_RELOC_AARCH64_MOVW_G1_S:
4473 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4474 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4475 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4476 shift = 16;
4477 break;
4478 case BFD_RELOC_AARCH64_MOVW_G2:
4479 case BFD_RELOC_AARCH64_MOVW_G2_S:
4480 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4481 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4482 if (is32)
4483 {
4484 set_fatal_syntax_error
4485 (_("the specified relocation type is not allowed for 32-bit "
4486 "register"));
4487 return FALSE;
4488 }
4489 shift = 32;
4490 break;
4491 case BFD_RELOC_AARCH64_MOVW_G3:
4492 if (is32)
4493 {
4494 set_fatal_syntax_error
4495 (_("the specified relocation type is not allowed for 32-bit "
4496 "register"));
4497 return FALSE;
4498 }
4499 shift = 48;
4500 break;
4501 default:
4502 /* More cases should be added when more MOVW-related relocation types
4503 are supported in GAS. */
4504 gas_assert (aarch64_gas_internal_fixup_p ());
4505 /* The shift amount should have already been set by the parser. */
4506 return TRUE;
4507 }
4508 inst.base.operands[1].shifter.amount = shift;
4509 return TRUE;
4510 }
4511
4512 /* A primitive log caculator. */
4513
4514 static inline unsigned int
4515 get_logsz (unsigned int size)
4516 {
4517 const unsigned char ls[16] =
4518 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4519 if (size > 16)
4520 {
4521 gas_assert (0);
4522 return -1;
4523 }
4524 gas_assert (ls[size - 1] != (unsigned char)-1);
4525 return ls[size - 1];
4526 }
4527
4528 /* Determine and return the real reloc type code for an instruction
4529 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4530
4531 static inline bfd_reloc_code_real_type
4532 ldst_lo12_determine_real_reloc_type (void)
4533 {
4534 int logsz;
4535 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4536 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4537
4538 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4539 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4540 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4541 BFD_RELOC_AARCH64_LDST128_LO12
4542 };
4543
4544 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4545 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4546
4547 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4548 opd1_qlf =
4549 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4550 1, opd0_qlf, 0);
4551 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4552
4553 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4554 gas_assert (logsz >= 0 && logsz <= 4);
4555
4556 return reloc_ldst_lo12[logsz];
4557 }
4558
4559 /* Check whether a register list REGINFO is valid. The registers must be
4560 numbered in increasing order (modulo 32), in increments of one or two.
4561
4562 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4563 increments of two.
4564
4565 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4566
4567 static bfd_boolean
4568 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4569 {
4570 uint32_t i, nb_regs, prev_regno, incr;
4571
4572 nb_regs = 1 + (reginfo & 0x3);
4573 reginfo >>= 2;
4574 prev_regno = reginfo & 0x1f;
4575 incr = accept_alternate ? 2 : 1;
4576
4577 for (i = 1; i < nb_regs; ++i)
4578 {
4579 uint32_t curr_regno;
4580 reginfo >>= 5;
4581 curr_regno = reginfo & 0x1f;
4582 if (curr_regno != ((prev_regno + incr) & 0x1f))
4583 return FALSE;
4584 prev_regno = curr_regno;
4585 }
4586
4587 return TRUE;
4588 }
4589
4590 /* Generic instruction operand parser. This does no encoding and no
4591 semantic validation; it merely squirrels values away in the inst
4592 structure. Returns TRUE or FALSE depending on whether the
4593 specified grammar matched. */
4594
4595 static bfd_boolean
4596 parse_operands (char *str, const aarch64_opcode *opcode)
4597 {
4598 int i;
4599 char *backtrack_pos = 0;
4600 const enum aarch64_opnd *operands = opcode->operands;
4601
4602 clear_error ();
4603 skip_whitespace (str);
4604
4605 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4606 {
4607 int64_t val;
4608 int isreg32, isregzero;
4609 int comma_skipped_p = 0;
4610 aarch64_reg_type rtype;
4611 struct neon_type_el vectype;
4612 aarch64_opnd_info *info = &inst.base.operands[i];
4613
4614 DEBUG_TRACE ("parse operand %d", i);
4615
4616 /* Assign the operand code. */
4617 info->type = operands[i];
4618
4619 if (optional_operand_p (opcode, i))
4620 {
4621 /* Remember where we are in case we need to backtrack. */
4622 gas_assert (!backtrack_pos);
4623 backtrack_pos = str;
4624 }
4625
4626 /* Expect comma between operands; the backtrack mechanizm will take
4627 care of cases of omitted optional operand. */
4628 if (i > 0 && ! skip_past_char (&str, ','))
4629 {
4630 set_syntax_error (_("comma expected between operands"));
4631 goto failure;
4632 }
4633 else
4634 comma_skipped_p = 1;
4635
4636 switch (operands[i])
4637 {
4638 case AARCH64_OPND_Rd:
4639 case AARCH64_OPND_Rn:
4640 case AARCH64_OPND_Rm:
4641 case AARCH64_OPND_Rt:
4642 case AARCH64_OPND_Rt2:
4643 case AARCH64_OPND_Rs:
4644 case AARCH64_OPND_Ra:
4645 case AARCH64_OPND_Rt_SYS:
4646 po_int_reg_or_fail (1, 0);
4647 break;
4648
4649 case AARCH64_OPND_Rd_SP:
4650 case AARCH64_OPND_Rn_SP:
4651 po_int_reg_or_fail (0, 1);
4652 break;
4653
4654 case AARCH64_OPND_Rm_EXT:
4655 case AARCH64_OPND_Rm_SFT:
4656 po_misc_or_fail (parse_shifter_operand
4657 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4658 ? SHIFTED_ARITH_IMM
4659 : SHIFTED_LOGIC_IMM)));
4660 if (!info->shifter.operator_present)
4661 {
4662 /* Default to LSL if not present. Libopcodes prefers shifter
4663 kind to be explicit. */
4664 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4665 info->shifter.kind = AARCH64_MOD_LSL;
4666 /* For Rm_EXT, libopcodes will carry out further check on whether
4667 or not stack pointer is used in the instruction (Recall that
4668 "the extend operator is not optional unless at least one of
4669 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4670 }
4671 break;
4672
4673 case AARCH64_OPND_Fd:
4674 case AARCH64_OPND_Fn:
4675 case AARCH64_OPND_Fm:
4676 case AARCH64_OPND_Fa:
4677 case AARCH64_OPND_Ft:
4678 case AARCH64_OPND_Ft2:
4679 case AARCH64_OPND_Sd:
4680 case AARCH64_OPND_Sn:
4681 case AARCH64_OPND_Sm:
4682 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4683 if (val == PARSE_FAIL)
4684 {
4685 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4686 goto failure;
4687 }
4688 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4689
4690 info->reg.regno = val;
4691 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4692 break;
4693
4694 case AARCH64_OPND_Vd:
4695 case AARCH64_OPND_Vn:
4696 case AARCH64_OPND_Vm:
4697 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4698 if (val == PARSE_FAIL)
4699 {
4700 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4701 goto failure;
4702 }
4703 if (vectype.defined & NTA_HASINDEX)
4704 goto failure;
4705
4706 info->reg.regno = val;
4707 info->qualifier = vectype_to_qualifier (&vectype);
4708 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4709 goto failure;
4710 break;
4711
4712 case AARCH64_OPND_VdD1:
4713 case AARCH64_OPND_VnD1:
4714 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4715 if (val == PARSE_FAIL)
4716 {
4717 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4718 goto failure;
4719 }
4720 if (vectype.type != NT_d || vectype.index != 1)
4721 {
4722 set_fatal_syntax_error
4723 (_("the top half of a 128-bit FP/SIMD register is expected"));
4724 goto failure;
4725 }
4726 info->reg.regno = val;
4727 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4728 here; it is correct for the purpose of encoding/decoding since
4729 only the register number is explicitly encoded in the related
4730 instructions, although this appears a bit hacky. */
4731 info->qualifier = AARCH64_OPND_QLF_S_D;
4732 break;
4733
4734 case AARCH64_OPND_Ed:
4735 case AARCH64_OPND_En:
4736 case AARCH64_OPND_Em:
4737 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4738 if (val == PARSE_FAIL)
4739 {
4740 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4741 goto failure;
4742 }
4743 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4744 goto failure;
4745
4746 info->reglane.regno = val;
4747 info->reglane.index = vectype.index;
4748 info->qualifier = vectype_to_qualifier (&vectype);
4749 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4750 goto failure;
4751 break;
4752
4753 case AARCH64_OPND_LVn:
4754 case AARCH64_OPND_LVt:
4755 case AARCH64_OPND_LVt_AL:
4756 case AARCH64_OPND_LEt:
4757 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4758 goto failure;
4759 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4760 {
4761 set_fatal_syntax_error (_("invalid register list"));
4762 goto failure;
4763 }
4764 info->reglist.first_regno = (val >> 2) & 0x1f;
4765 info->reglist.num_regs = (val & 0x3) + 1;
4766 if (operands[i] == AARCH64_OPND_LEt)
4767 {
4768 if (!(vectype.defined & NTA_HASINDEX))
4769 goto failure;
4770 info->reglist.has_index = 1;
4771 info->reglist.index = vectype.index;
4772 }
4773 else if (!(vectype.defined & NTA_HASTYPE))
4774 goto failure;
4775 info->qualifier = vectype_to_qualifier (&vectype);
4776 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4777 goto failure;
4778 break;
4779
4780 case AARCH64_OPND_Cn:
4781 case AARCH64_OPND_Cm:
4782 po_reg_or_fail (REG_TYPE_CN);
4783 if (val > 15)
4784 {
4785 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4786 goto failure;
4787 }
4788 inst.base.operands[i].reg.regno = val;
4789 break;
4790
4791 case AARCH64_OPND_SHLL_IMM:
4792 case AARCH64_OPND_IMM_VLSR:
4793 po_imm_or_fail (1, 64);
4794 info->imm.value = val;
4795 break;
4796
4797 case AARCH64_OPND_CCMP_IMM:
4798 case AARCH64_OPND_FBITS:
4799 case AARCH64_OPND_UIMM4:
4800 case AARCH64_OPND_UIMM3_OP1:
4801 case AARCH64_OPND_UIMM3_OP2:
4802 case AARCH64_OPND_IMM_VLSL:
4803 case AARCH64_OPND_IMM:
4804 case AARCH64_OPND_WIDTH:
4805 po_imm_nc_or_fail ();
4806 info->imm.value = val;
4807 break;
4808
4809 case AARCH64_OPND_UIMM7:
4810 po_imm_or_fail (0, 127);
4811 info->imm.value = val;
4812 break;
4813
4814 case AARCH64_OPND_IDX:
4815 case AARCH64_OPND_BIT_NUM:
4816 case AARCH64_OPND_IMMR:
4817 case AARCH64_OPND_IMMS:
4818 po_imm_or_fail (0, 63);
4819 info->imm.value = val;
4820 break;
4821
4822 case AARCH64_OPND_IMM0:
4823 po_imm_nc_or_fail ();
4824 if (val != 0)
4825 {
4826 set_fatal_syntax_error (_("immediate zero expected"));
4827 goto failure;
4828 }
4829 info->imm.value = 0;
4830 break;
4831
4832 case AARCH64_OPND_FPIMM0:
4833 {
4834 int qfloat;
4835 bfd_boolean res1 = FALSE, res2 = FALSE;
4836 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4837 it is probably not worth the effort to support it. */
4838 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4839 && !(res2 = parse_constant_immediate (&str, &val)))
4840 goto failure;
4841 if ((res1 && qfloat == 0) || (res2 && val == 0))
4842 {
4843 info->imm.value = 0;
4844 info->imm.is_fp = 1;
4845 break;
4846 }
4847 set_fatal_syntax_error (_("immediate zero expected"));
4848 goto failure;
4849 }
4850
4851 case AARCH64_OPND_IMM_MOV:
4852 {
4853 char *saved = str;
4854 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4855 reg_name_p (str, REG_TYPE_VN))
4856 goto failure;
4857 str = saved;
4858 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4859 GE_OPT_PREFIX, 1));
4860 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4861 later. fix_mov_imm_insn will try to determine a machine
4862 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4863 message if the immediate cannot be moved by a single
4864 instruction. */
4865 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4866 inst.base.operands[i].skip = 1;
4867 }
4868 break;
4869
4870 case AARCH64_OPND_SIMD_IMM:
4871 case AARCH64_OPND_SIMD_IMM_SFT:
4872 if (! parse_big_immediate (&str, &val))
4873 goto failure;
4874 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4875 /* addr_off_p */ 0,
4876 /* need_libopcodes_p */ 1,
4877 /* skip_p */ 1);
4878 /* Parse shift.
4879 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4880 shift, we don't check it here; we leave the checking to
4881 the libopcodes (operand_general_constraint_met_p). By
4882 doing this, we achieve better diagnostics. */
4883 if (skip_past_comma (&str)
4884 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4885 goto failure;
4886 if (!info->shifter.operator_present
4887 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4888 {
4889 /* Default to LSL if not present. Libopcodes prefers shifter
4890 kind to be explicit. */
4891 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4892 info->shifter.kind = AARCH64_MOD_LSL;
4893 }
4894 break;
4895
4896 case AARCH64_OPND_FPIMM:
4897 case AARCH64_OPND_SIMD_FPIMM:
4898 {
4899 int qfloat;
4900 bfd_boolean dp_p
4901 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4902 == 8);
4903 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4904 goto failure;
4905 if (qfloat == 0)
4906 {
4907 set_fatal_syntax_error (_("invalid floating-point constant"));
4908 goto failure;
4909 }
4910 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4911 inst.base.operands[i].imm.is_fp = 1;
4912 }
4913 break;
4914
4915 case AARCH64_OPND_LIMM:
4916 po_misc_or_fail (parse_shifter_operand (&str, info,
4917 SHIFTED_LOGIC_IMM));
4918 if (info->shifter.operator_present)
4919 {
4920 set_fatal_syntax_error
4921 (_("shift not allowed for bitmask immediate"));
4922 goto failure;
4923 }
4924 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4925 /* addr_off_p */ 0,
4926 /* need_libopcodes_p */ 1,
4927 /* skip_p */ 1);
4928 break;
4929
4930 case AARCH64_OPND_AIMM:
4931 if (opcode->op == OP_ADD)
4932 /* ADD may have relocation types. */
4933 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4934 SHIFTED_ARITH_IMM));
4935 else
4936 po_misc_or_fail (parse_shifter_operand (&str, info,
4937 SHIFTED_ARITH_IMM));
4938 switch (inst.reloc.type)
4939 {
4940 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4941 info->shifter.amount = 12;
4942 break;
4943 case BFD_RELOC_UNUSED:
4944 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4945 if (info->shifter.kind != AARCH64_MOD_NONE)
4946 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4947 inst.reloc.pc_rel = 0;
4948 break;
4949 default:
4950 break;
4951 }
4952 info->imm.value = 0;
4953 if (!info->shifter.operator_present)
4954 {
4955 /* Default to LSL if not present. Libopcodes prefers shifter
4956 kind to be explicit. */
4957 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4958 info->shifter.kind = AARCH64_MOD_LSL;
4959 }
4960 break;
4961
4962 case AARCH64_OPND_HALF:
4963 {
4964 /* #<imm16> or relocation. */
4965 int internal_fixup_p;
4966 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4967 if (internal_fixup_p)
4968 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4969 skip_whitespace (str);
4970 if (skip_past_comma (&str))
4971 {
4972 /* {, LSL #<shift>} */
4973 if (! aarch64_gas_internal_fixup_p ())
4974 {
4975 set_fatal_syntax_error (_("can't mix relocation modifier "
4976 "with explicit shift"));
4977 goto failure;
4978 }
4979 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4980 }
4981 else
4982 inst.base.operands[i].shifter.amount = 0;
4983 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4984 inst.base.operands[i].imm.value = 0;
4985 if (! process_movw_reloc_info ())
4986 goto failure;
4987 }
4988 break;
4989
4990 case AARCH64_OPND_EXCEPTION:
4991 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4992 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4993 /* addr_off_p */ 0,
4994 /* need_libopcodes_p */ 0,
4995 /* skip_p */ 1);
4996 break;
4997
4998 case AARCH64_OPND_NZCV:
4999 {
5000 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5001 if (nzcv != NULL)
5002 {
5003 str += 4;
5004 info->imm.value = nzcv->value;
5005 break;
5006 }
5007 po_imm_or_fail (0, 15);
5008 info->imm.value = val;
5009 }
5010 break;
5011
5012 case AARCH64_OPND_COND:
5013 case AARCH64_OPND_COND1:
5014 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5015 str += 2;
5016 if (info->cond == NULL)
5017 {
5018 set_syntax_error (_("invalid condition"));
5019 goto failure;
5020 }
5021 else if (operands[i] == AARCH64_OPND_COND1
5022 && (info->cond->value & 0xe) == 0xe)
5023 {
5024 /* Not allow AL or NV. */
5025 set_default_error ();
5026 goto failure;
5027 }
5028 break;
5029
5030 case AARCH64_OPND_ADDR_ADRP:
5031 po_misc_or_fail (parse_adrp (&str));
5032 /* Clear the value as operand needs to be relocated. */
5033 info->imm.value = 0;
5034 break;
5035
5036 case AARCH64_OPND_ADDR_PCREL14:
5037 case AARCH64_OPND_ADDR_PCREL19:
5038 case AARCH64_OPND_ADDR_PCREL21:
5039 case AARCH64_OPND_ADDR_PCREL26:
5040 po_misc_or_fail (parse_address_reloc (&str, info));
5041 if (!info->addr.pcrel)
5042 {
5043 set_syntax_error (_("invalid pc-relative address"));
5044 goto failure;
5045 }
5046 if (inst.gen_lit_pool
5047 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5048 {
5049 /* Only permit "=value" in the literal load instructions.
5050 The literal will be generated by programmer_friendly_fixup. */
5051 set_syntax_error (_("invalid use of \"=immediate\""));
5052 goto failure;
5053 }
5054 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5055 {
5056 set_syntax_error (_("unrecognized relocation suffix"));
5057 goto failure;
5058 }
5059 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5060 {
5061 info->imm.value = inst.reloc.exp.X_add_number;
5062 inst.reloc.type = BFD_RELOC_UNUSED;
5063 }
5064 else
5065 {
5066 info->imm.value = 0;
5067 if (inst.reloc.type == BFD_RELOC_UNUSED)
5068 switch (opcode->iclass)
5069 {
5070 case compbranch:
5071 case condbranch:
5072 /* e.g. CBZ or B.COND */
5073 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5074 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5075 break;
5076 case testbranch:
5077 /* e.g. TBZ */
5078 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5079 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5080 break;
5081 case branch_imm:
5082 /* e.g. B or BL */
5083 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5084 inst.reloc.type =
5085 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5086 : BFD_RELOC_AARCH64_JUMP26;
5087 break;
5088 case loadlit:
5089 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5090 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5091 break;
5092 case pcreladdr:
5093 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5094 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5095 break;
5096 default:
5097 gas_assert (0);
5098 abort ();
5099 }
5100 inst.reloc.pc_rel = 1;
5101 }
5102 break;
5103
5104 case AARCH64_OPND_ADDR_SIMPLE:
5105 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5106 /* [<Xn|SP>{, #<simm>}] */
5107 po_char_or_fail ('[');
5108 po_reg_or_fail (REG_TYPE_R64_SP);
5109 /* Accept optional ", #0". */
5110 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5111 && skip_past_char (&str, ','))
5112 {
5113 skip_past_char (&str, '#');
5114 if (! skip_past_char (&str, '0'))
5115 {
5116 set_fatal_syntax_error
5117 (_("the optional immediate offset can only be 0"));
5118 goto failure;
5119 }
5120 }
5121 po_char_or_fail (']');
5122 info->addr.base_regno = val;
5123 break;
5124
5125 case AARCH64_OPND_ADDR_REGOFF:
5126 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5127 po_misc_or_fail (parse_address (&str, info, 0));
5128 if (info->addr.pcrel || !info->addr.offset.is_reg
5129 || !info->addr.preind || info->addr.postind
5130 || info->addr.writeback)
5131 {
5132 set_syntax_error (_("invalid addressing mode"));
5133 goto failure;
5134 }
5135 if (!info->shifter.operator_present)
5136 {
5137 /* Default to LSL if not present. Libopcodes prefers shifter
5138 kind to be explicit. */
5139 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5140 info->shifter.kind = AARCH64_MOD_LSL;
5141 }
5142 /* Qualifier to be deduced by libopcodes. */
5143 break;
5144
5145 case AARCH64_OPND_ADDR_SIMM7:
5146 po_misc_or_fail (parse_address (&str, info, 0));
5147 if (info->addr.pcrel || info->addr.offset.is_reg
5148 || (!info->addr.preind && !info->addr.postind))
5149 {
5150 set_syntax_error (_("invalid addressing mode"));
5151 goto failure;
5152 }
5153 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5154 /* addr_off_p */ 1,
5155 /* need_libopcodes_p */ 1,
5156 /* skip_p */ 0);
5157 break;
5158
5159 case AARCH64_OPND_ADDR_SIMM9:
5160 case AARCH64_OPND_ADDR_SIMM9_2:
5161 po_misc_or_fail (parse_address_reloc (&str, info));
5162 if (info->addr.pcrel || info->addr.offset.is_reg
5163 || (!info->addr.preind && !info->addr.postind)
5164 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5165 && info->addr.writeback))
5166 {
5167 set_syntax_error (_("invalid addressing mode"));
5168 goto failure;
5169 }
5170 if (inst.reloc.type != BFD_RELOC_UNUSED)
5171 {
5172 set_syntax_error (_("relocation not allowed"));
5173 goto failure;
5174 }
5175 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5176 /* addr_off_p */ 1,
5177 /* need_libopcodes_p */ 1,
5178 /* skip_p */ 0);
5179 break;
5180
5181 case AARCH64_OPND_ADDR_UIMM12:
5182 po_misc_or_fail (parse_address_reloc (&str, info));
5183 if (info->addr.pcrel || info->addr.offset.is_reg
5184 || !info->addr.preind || info->addr.writeback)
5185 {
5186 set_syntax_error (_("invalid addressing mode"));
5187 goto failure;
5188 }
5189 if (inst.reloc.type == BFD_RELOC_UNUSED)
5190 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5191 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5192 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5193 /* Leave qualifier to be determined by libopcodes. */
5194 break;
5195
5196 case AARCH64_OPND_SIMD_ADDR_POST:
5197 /* [<Xn|SP>], <Xm|#<amount>> */
5198 po_misc_or_fail (parse_address (&str, info, 1));
5199 if (!info->addr.postind || !info->addr.writeback)
5200 {
5201 set_syntax_error (_("invalid addressing mode"));
5202 goto failure;
5203 }
5204 if (!info->addr.offset.is_reg)
5205 {
5206 if (inst.reloc.exp.X_op == O_constant)
5207 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5208 else
5209 {
5210 set_fatal_syntax_error
5211 (_("writeback value should be an immediate constant"));
5212 goto failure;
5213 }
5214 }
5215 /* No qualifier. */
5216 break;
5217
5218 case AARCH64_OPND_SYSREG:
5219 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5220 == PARSE_FAIL)
5221 {
5222 set_syntax_error (_("unknown or missing system register name"));
5223 goto failure;
5224 }
5225 inst.base.operands[i].sysreg = val;
5226 break;
5227
5228 case AARCH64_OPND_PSTATEFIELD:
5229 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5230 == PARSE_FAIL)
5231 {
5232 set_syntax_error (_("unknown or missing PSTATE field name"));
5233 goto failure;
5234 }
5235 inst.base.operands[i].pstatefield = val;
5236 break;
5237
5238 case AARCH64_OPND_SYSREG_IC:
5239 inst.base.operands[i].sysins_op =
5240 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5241 goto sys_reg_ins;
5242 case AARCH64_OPND_SYSREG_DC:
5243 inst.base.operands[i].sysins_op =
5244 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5245 goto sys_reg_ins;
5246 case AARCH64_OPND_SYSREG_AT:
5247 inst.base.operands[i].sysins_op =
5248 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5249 goto sys_reg_ins;
5250 case AARCH64_OPND_SYSREG_TLBI:
5251 inst.base.operands[i].sysins_op =
5252 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5253 sys_reg_ins:
5254 if (inst.base.operands[i].sysins_op == NULL)
5255 {
5256 set_fatal_syntax_error ( _("unknown or missing operation name"));
5257 goto failure;
5258 }
5259 break;
5260
5261 case AARCH64_OPND_BARRIER:
5262 case AARCH64_OPND_BARRIER_ISB:
5263 val = parse_barrier (&str);
5264 if (val != PARSE_FAIL
5265 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5266 {
5267 /* ISB only accepts options name 'sy'. */
5268 set_syntax_error
5269 (_("the specified option is not accepted in ISB"));
5270 /* Turn off backtrack as this optional operand is present. */
5271 backtrack_pos = 0;
5272 goto failure;
5273 }
5274 /* This is an extension to accept a 0..15 immediate. */
5275 if (val == PARSE_FAIL)
5276 po_imm_or_fail (0, 15);
5277 info->barrier = aarch64_barrier_options + val;
5278 break;
5279
5280 case AARCH64_OPND_PRFOP:
5281 val = parse_pldop (&str);
5282 /* This is an extension to accept a 0..31 immediate. */
5283 if (val == PARSE_FAIL)
5284 po_imm_or_fail (0, 31);
5285 inst.base.operands[i].prfop = aarch64_prfops + val;
5286 break;
5287
5288 default:
5289 as_fatal (_("unhandled operand code %d"), operands[i]);
5290 }
5291
5292 /* If we get here, this operand was successfully parsed. */
5293 inst.base.operands[i].present = 1;
5294 continue;
5295
5296 failure:
5297 /* The parse routine should already have set the error, but in case
5298 not, set a default one here. */
5299 if (! error_p ())
5300 set_default_error ();
5301
5302 if (! backtrack_pos)
5303 goto parse_operands_return;
5304
5305 /* Reaching here means we are dealing with an optional operand that is
5306 omitted from the assembly line. */
5307 gas_assert (optional_operand_p (opcode, i));
5308 info->present = 0;
5309 process_omitted_operand (operands[i], opcode, i, info);
5310
5311 /* Try again, skipping the optional operand at backtrack_pos. */
5312 str = backtrack_pos;
5313 backtrack_pos = 0;
5314
5315 /* If this is the last operand that is optional and omitted, but without
5316 the presence of a comma. */
5317 if (i && comma_skipped_p && i == aarch64_num_of_operands (opcode) - 1)
5318 {
5319 set_fatal_syntax_error
5320 (_("unexpected comma before the omitted optional operand"));
5321 goto parse_operands_return;
5322 }
5323
5324 /* Clear any error record after the omitted optional operand has been
5325 successfully handled. */
5326 clear_error ();
5327 }
5328
5329 /* Check if we have parsed all the operands. */
5330 if (*str != '\0' && ! error_p ())
5331 {
5332 /* Set I to the index of the last present operand; this is
5333 for the purpose of diagnostics. */
5334 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5335 ;
5336 set_fatal_syntax_error
5337 (_("unexpected characters following instruction"));
5338 }
5339
5340 parse_operands_return:
5341
5342 if (error_p ())
5343 {
5344 DEBUG_TRACE ("parsing FAIL: %s - %s",
5345 operand_mismatch_kind_names[get_error_kind ()],
5346 get_error_message ());
5347 /* Record the operand error properly; this is useful when there
5348 are multiple instruction templates for a mnemonic name, so that
5349 later on, we can select the error that most closely describes
5350 the problem. */
5351 record_operand_error (opcode, i, get_error_kind (),
5352 get_error_message ());
5353 return FALSE;
5354 }
5355 else
5356 {
5357 DEBUG_TRACE ("parsing SUCCESS");
5358 return TRUE;
5359 }
5360 }
5361
5362 /* It does some fix-up to provide some programmer friendly feature while
5363 keeping the libopcodes happy, i.e. libopcodes only accepts
5364 the preferred architectural syntax.
5365 Return FALSE if there is any failure; otherwise return TRUE. */
5366
5367 static bfd_boolean
5368 programmer_friendly_fixup (aarch64_instruction *instr)
5369 {
5370 aarch64_inst *base = &instr->base;
5371 const aarch64_opcode *opcode = base->opcode;
5372 enum aarch64_op op = opcode->op;
5373 aarch64_opnd_info *operands = base->operands;
5374
5375 DEBUG_TRACE ("enter");
5376
5377 switch (opcode->iclass)
5378 {
5379 case testbranch:
5380 /* TBNZ Xn|Wn, #uimm6, label
5381 Test and Branch Not Zero: conditionally jumps to label if bit number
5382 uimm6 in register Xn is not zero. The bit number implies the width of
5383 the register, which may be written and should be disassembled as Wn if
5384 uimm is less than 32. */
5385 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5386 {
5387 if (operands[1].imm.value >= 32)
5388 {
5389 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5390 0, 31);
5391 return FALSE;
5392 }
5393 operands[0].qualifier = AARCH64_OPND_QLF_X;
5394 }
5395 break;
5396 case loadlit:
5397 /* LDR Wt, label | =value
5398 As a convenience assemblers will typically permit the notation
5399 "=value" in conjunction with the pc-relative literal load instructions
5400 to automatically place an immediate value or symbolic address in a
5401 nearby literal pool and generate a hidden label which references it.
5402 ISREG has been set to 0 in the case of =value. */
5403 if (instr->gen_lit_pool
5404 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5405 {
5406 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5407 if (op == OP_LDRSW_LIT)
5408 size = 4;
5409 if (instr->reloc.exp.X_op != O_constant
5410 && instr->reloc.exp.X_op != O_big
5411 && instr->reloc.exp.X_op != O_symbol)
5412 {
5413 record_operand_error (opcode, 1,
5414 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5415 _("constant expression expected"));
5416 return FALSE;
5417 }
5418 if (! add_to_lit_pool (&instr->reloc.exp, size))
5419 {
5420 record_operand_error (opcode, 1,
5421 AARCH64_OPDE_OTHER_ERROR,
5422 _("literal pool insertion failed"));
5423 return FALSE;
5424 }
5425 }
5426 break;
5427 case log_shift:
5428 case bitfield:
5429 /* UXT[BHW] Wd, Wn
5430 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5431 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5432 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5433 A programmer-friendly assembler should accept a destination Xd in
5434 place of Wd, however that is not the preferred form for disassembly.
5435 */
5436 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5437 && operands[1].qualifier == AARCH64_OPND_QLF_W
5438 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5439 operands[0].qualifier = AARCH64_OPND_QLF_W;
5440 break;
5441
5442 case addsub_ext:
5443 {
5444 /* In the 64-bit form, the final register operand is written as Wm
5445 for all but the (possibly omitted) UXTX/LSL and SXTX
5446 operators.
5447 As a programmer-friendly assembler, we accept e.g.
5448 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5449 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5450 int idx = aarch64_operand_index (opcode->operands,
5451 AARCH64_OPND_Rm_EXT);
5452 gas_assert (idx == 1 || idx == 2);
5453 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5454 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5455 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5456 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5457 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5458 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5459 }
5460 break;
5461
5462 default:
5463 break;
5464 }
5465
5466 DEBUG_TRACE ("exit with SUCCESS");
5467 return TRUE;
5468 }
5469
5470 /* A wrapper function to interface with libopcodes on encoding and
5471 record the error message if there is any.
5472
5473 Return TRUE on success; otherwise return FALSE. */
5474
5475 static bfd_boolean
5476 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5477 aarch64_insn *code)
5478 {
5479 aarch64_operand_error error_info;
5480 error_info.kind = AARCH64_OPDE_NIL;
5481 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5482 return TRUE;
5483 else
5484 {
5485 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5486 record_operand_error_info (opcode, &error_info);
5487 return FALSE;
5488 }
5489 }
5490
5491 #ifdef DEBUG_AARCH64
5492 static inline void
5493 dump_opcode_operands (const aarch64_opcode *opcode)
5494 {
5495 int i = 0;
5496 while (opcode->operands[i] != AARCH64_OPND_NIL)
5497 {
5498 aarch64_verbose ("\t\t opnd%d: %s", i,
5499 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5500 ? aarch64_get_operand_name (opcode->operands[i])
5501 : aarch64_get_operand_desc (opcode->operands[i]));
5502 ++i;
5503 }
5504 }
5505 #endif /* DEBUG_AARCH64 */
5506
5507 /* This is the guts of the machine-dependent assembler. STR points to a
5508 machine dependent instruction. This function is supposed to emit
5509 the frags/bytes it assembles to. */
5510
5511 void
5512 md_assemble (char *str)
5513 {
5514 char *p = str;
5515 templates *template;
5516 aarch64_opcode *opcode;
5517 aarch64_inst *inst_base;
5518 unsigned saved_cond;
5519
5520 /* Align the previous label if needed. */
5521 if (last_label_seen != NULL)
5522 {
5523 symbol_set_frag (last_label_seen, frag_now);
5524 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5525 S_SET_SEGMENT (last_label_seen, now_seg);
5526 }
5527
5528 inst.reloc.type = BFD_RELOC_UNUSED;
5529
5530 DEBUG_TRACE ("\n\n");
5531 DEBUG_TRACE ("==============================");
5532 DEBUG_TRACE ("Enter md_assemble with %s", str);
5533
5534 template = opcode_lookup (&p);
5535 if (!template)
5536 {
5537 /* It wasn't an instruction, but it might be a register alias of
5538 the form alias .req reg directive. */
5539 if (!create_register_alias (str, p))
5540 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5541 str);
5542 return;
5543 }
5544
5545 skip_whitespace (p);
5546 if (*p == ',')
5547 {
5548 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5549 get_mnemonic_name (str), str);
5550 return;
5551 }
5552
5553 init_operand_error_report ();
5554
5555 saved_cond = inst.cond;
5556 reset_aarch64_instruction (&inst);
5557 inst.cond = saved_cond;
5558
5559 /* Iterate through all opcode entries with the same mnemonic name. */
5560 do
5561 {
5562 opcode = template->opcode;
5563
5564 DEBUG_TRACE ("opcode %s found", opcode->name);
5565 #ifdef DEBUG_AARCH64
5566 if (debug_dump)
5567 dump_opcode_operands (opcode);
5568 #endif /* DEBUG_AARCH64 */
5569
5570 mapping_state (MAP_INSN);
5571
5572 inst_base = &inst.base;
5573 inst_base->opcode = opcode;
5574
5575 /* Truly conditionally executed instructions, e.g. b.cond. */
5576 if (opcode->flags & F_COND)
5577 {
5578 gas_assert (inst.cond != COND_ALWAYS);
5579 inst_base->cond = get_cond_from_value (inst.cond);
5580 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5581 }
5582 else if (inst.cond != COND_ALWAYS)
5583 {
5584 /* It shouldn't arrive here, where the assembly looks like a
5585 conditional instruction but the found opcode is unconditional. */
5586 gas_assert (0);
5587 continue;
5588 }
5589
5590 if (parse_operands (p, opcode)
5591 && programmer_friendly_fixup (&inst)
5592 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5593 {
5594 /* Check that this instruction is supported for this CPU. */
5595 if (!opcode->avariant
5596 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5597 {
5598 as_bad (_("selected processor does not support `%s'"), str);
5599 return;
5600 }
5601
5602 if (inst.reloc.type == BFD_RELOC_UNUSED
5603 || !inst.reloc.need_libopcodes_p)
5604 output_inst (NULL);
5605 else
5606 {
5607 /* If there is relocation generated for the instruction,
5608 store the instruction information for the future fix-up. */
5609 struct aarch64_inst *copy;
5610 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5611 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5612 abort ();
5613 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5614 output_inst (copy);
5615 }
5616 return;
5617 }
5618
5619 template = template->next;
5620 if (template != NULL)
5621 {
5622 reset_aarch64_instruction (&inst);
5623 inst.cond = saved_cond;
5624 }
5625 }
5626 while (template != NULL);
5627
5628 /* Issue the error messages if any. */
5629 output_operand_error_report (str);
5630 }
5631
5632 /* Various frobbings of labels and their addresses. */
5633
5634 void
5635 aarch64_start_line_hook (void)
5636 {
5637 last_label_seen = NULL;
5638 }
5639
5640 void
5641 aarch64_frob_label (symbolS * sym)
5642 {
5643 last_label_seen = sym;
5644
5645 dwarf2_emit_label (sym);
5646 }
5647
5648 int
5649 aarch64_data_in_code (void)
5650 {
5651 if (!strncmp (input_line_pointer + 1, "data:", 5))
5652 {
5653 *input_line_pointer = '/';
5654 input_line_pointer += 5;
5655 *input_line_pointer = 0;
5656 return 1;
5657 }
5658
5659 return 0;
5660 }
5661
5662 char *
5663 aarch64_canonicalize_symbol_name (char *name)
5664 {
5665 int len;
5666
5667 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5668 *(name + len - 5) = 0;
5669
5670 return name;
5671 }
5672 \f
5673 /* Table of all register names defined by default. The user can
5674 define additional names with .req. Note that all register names
5675 should appear in both upper and lowercase variants. Some registers
5676 also have mixed-case names. */
5677
5678 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5679 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5680 #define REGSET31(p,t) \
5681 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5682 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5683 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5684 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5685 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5686 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5687 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5688 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5689 #define REGSET(p,t) \
5690 REGSET31(p,t), REGNUM(p,31,t)
5691
5692 /* These go into aarch64_reg_hsh hash-table. */
5693 static const reg_entry reg_names[] = {
5694 /* Integer registers. */
5695 REGSET31 (x, R_64), REGSET31 (X, R_64),
5696 REGSET31 (w, R_32), REGSET31 (W, R_32),
5697
5698 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5699 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5700
5701 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5702 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5703
5704 /* Coprocessor register numbers. */
5705 REGSET (c, CN), REGSET (C, CN),
5706
5707 /* Floating-point single precision registers. */
5708 REGSET (s, FP_S), REGSET (S, FP_S),
5709
5710 /* Floating-point double precision registers. */
5711 REGSET (d, FP_D), REGSET (D, FP_D),
5712
5713 /* Floating-point half precision registers. */
5714 REGSET (h, FP_H), REGSET (H, FP_H),
5715
5716 /* Floating-point byte precision registers. */
5717 REGSET (b, FP_B), REGSET (B, FP_B),
5718
5719 /* Floating-point quad precision registers. */
5720 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5721
5722 /* FP/SIMD registers. */
5723 REGSET (v, VN), REGSET (V, VN),
5724 };
5725
5726 #undef REGDEF
5727 #undef REGNUM
5728 #undef REGSET
5729
5730 #define N 1
5731 #define n 0
5732 #define Z 1
5733 #define z 0
5734 #define C 1
5735 #define c 0
5736 #define V 1
5737 #define v 0
5738 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5739 static const asm_nzcv nzcv_names[] = {
5740 {"nzcv", B (n, z, c, v)},
5741 {"nzcV", B (n, z, c, V)},
5742 {"nzCv", B (n, z, C, v)},
5743 {"nzCV", B (n, z, C, V)},
5744 {"nZcv", B (n, Z, c, v)},
5745 {"nZcV", B (n, Z, c, V)},
5746 {"nZCv", B (n, Z, C, v)},
5747 {"nZCV", B (n, Z, C, V)},
5748 {"Nzcv", B (N, z, c, v)},
5749 {"NzcV", B (N, z, c, V)},
5750 {"NzCv", B (N, z, C, v)},
5751 {"NzCV", B (N, z, C, V)},
5752 {"NZcv", B (N, Z, c, v)},
5753 {"NZcV", B (N, Z, c, V)},
5754 {"NZCv", B (N, Z, C, v)},
5755 {"NZCV", B (N, Z, C, V)}
5756 };
5757
5758 #undef N
5759 #undef n
5760 #undef Z
5761 #undef z
5762 #undef C
5763 #undef c
5764 #undef V
5765 #undef v
5766 #undef B
5767 \f
5768 /* MD interface: bits in the object file. */
5769
5770 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5771 for use in the a.out file, and stores them in the array pointed to by buf.
5772 This knows about the endian-ness of the target machine and does
5773 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5774 2 (short) and 4 (long) Floating numbers are put out as a series of
5775 LITTLENUMS (shorts, here at least). */
5776
5777 void
5778 md_number_to_chars (char *buf, valueT val, int n)
5779 {
5780 if (target_big_endian)
5781 number_to_chars_bigendian (buf, val, n);
5782 else
5783 number_to_chars_littleendian (buf, val, n);
5784 }
5785
5786 /* MD interface: Sections. */
5787
5788 /* Estimate the size of a frag before relaxing. Assume everything fits in
5789 4 bytes. */
5790
5791 int
5792 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5793 {
5794 fragp->fr_var = 4;
5795 return 4;
5796 }
5797
5798 /* Round up a section size to the appropriate boundary. */
5799
5800 valueT
5801 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5802 {
5803 return size;
5804 }
5805
5806 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5807 of an rs_align_code fragment. */
5808
5809 void
5810 aarch64_handle_align (fragS * fragP)
5811 {
5812 /* NOP = d503201f */
5813 /* AArch64 instructions are always little-endian. */
5814 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5815
5816 int bytes, fix, noop_size;
5817 char *p;
5818 const char *noop;
5819
5820 if (fragP->fr_type != rs_align_code)
5821 return;
5822
5823 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5824 p = fragP->fr_literal + fragP->fr_fix;
5825 fix = 0;
5826
5827 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
5828 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
5829
5830 #ifdef OBJ_ELF
5831 gas_assert (fragP->tc_frag_data.recorded);
5832 #endif
5833
5834 noop = aarch64_noop;
5835 noop_size = sizeof (aarch64_noop);
5836 fragP->fr_var = noop_size;
5837
5838 if (bytes & (noop_size - 1))
5839 {
5840 fix = bytes & (noop_size - 1);
5841 #ifdef OBJ_ELF
5842 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5843 #endif
5844 memset (p, 0, fix);
5845 p += fix;
5846 bytes -= fix;
5847 }
5848
5849 while (bytes >= noop_size)
5850 {
5851 memcpy (p, noop, noop_size);
5852 p += noop_size;
5853 bytes -= noop_size;
5854 fix += noop_size;
5855 }
5856
5857 fragP->fr_fix += fix;
5858 }
5859
5860 /* Called from md_do_align. Used to create an alignment
5861 frag in a code section. */
5862
5863 void
5864 aarch64_frag_align_code (int n, int max)
5865 {
5866 char *p;
5867
5868 /* We assume that there will never be a requirement
5869 to support alignments greater than x bytes. */
5870 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
5871 as_fatal (_
5872 ("alignments greater than %d bytes not supported in .text sections"),
5873 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
5874
5875 p = frag_var (rs_align_code,
5876 MAX_MEM_FOR_RS_ALIGN_CODE,
5877 1,
5878 (relax_substateT) max,
5879 (symbolS *) NULL, (offsetT) n, (char *) NULL);
5880 *p = 0;
5881 }
5882
5883 /* Perform target specific initialisation of a frag.
5884 Note - despite the name this initialisation is not done when the frag
5885 is created, but only when its type is assigned. A frag can be created
5886 and used a long time before its type is set, so beware of assuming that
5887 this initialisationis performed first. */
5888
5889 #ifndef OBJ_ELF
5890 void
5891 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5892 int max_chars ATTRIBUTE_UNUSED)
5893 {
5894 }
5895
5896 #else /* OBJ_ELF is defined. */
5897 void
5898 aarch64_init_frag (fragS * fragP, int max_chars)
5899 {
5900 /* Record a mapping symbol for alignment frags. We will delete this
5901 later if the alignment ends up empty. */
5902 if (!fragP->tc_frag_data.recorded)
5903 {
5904 fragP->tc_frag_data.recorded = 1;
5905 switch (fragP->fr_type)
5906 {
5907 case rs_align:
5908 case rs_align_test:
5909 case rs_fill:
5910 mapping_state_2 (MAP_DATA, max_chars);
5911 break;
5912 case rs_align_code:
5913 mapping_state_2 (MAP_INSN, max_chars);
5914 break;
5915 default:
5916 break;
5917 }
5918 }
5919 }
5920 \f
5921 /* Initialize the DWARF-2 unwind information for this procedure. */
5922
5923 void
5924 tc_aarch64_frame_initial_instructions (void)
5925 {
5926 cfi_add_CFA_def_cfa (REG_SP, 0);
5927 }
5928 #endif /* OBJ_ELF */
5929
5930 /* Convert REGNAME to a DWARF-2 register number. */
5931
5932 int
5933 tc_aarch64_regname_to_dw2regnum (char *regname)
5934 {
5935 const reg_entry *reg = parse_reg (&regname);
5936 if (reg == NULL)
5937 return -1;
5938
5939 switch (reg->type)
5940 {
5941 case REG_TYPE_SP_32:
5942 case REG_TYPE_SP_64:
5943 case REG_TYPE_R_32:
5944 case REG_TYPE_R_64:
5945 case REG_TYPE_FP_B:
5946 case REG_TYPE_FP_H:
5947 case REG_TYPE_FP_S:
5948 case REG_TYPE_FP_D:
5949 case REG_TYPE_FP_Q:
5950 return reg->number;
5951 default:
5952 break;
5953 }
5954 return -1;
5955 }
5956
5957 /* Implement DWARF2_ADDR_SIZE. */
5958
5959 int
5960 aarch64_dwarf2_addr_size (void)
5961 {
5962 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
5963 if (ilp32_p)
5964 return 4;
5965 #endif
5966 return bfd_arch_bits_per_address (stdoutput) / 8;
5967 }
5968
5969 /* MD interface: Symbol and relocation handling. */
5970
5971 /* Return the address within the segment that a PC-relative fixup is
5972 relative to. For AArch64 PC-relative fixups applied to instructions
5973 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
5974
5975 long
5976 md_pcrel_from_section (fixS * fixP, segT seg)
5977 {
5978 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
5979
5980 /* If this is pc-relative and we are going to emit a relocation
5981 then we just want to put out any pipeline compensation that the linker
5982 will need. Otherwise we want to use the calculated base. */
5983 if (fixP->fx_pcrel
5984 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
5985 || aarch64_force_relocation (fixP)))
5986 base = 0;
5987
5988 /* AArch64 should be consistent for all pc-relative relocations. */
5989 return base + AARCH64_PCREL_OFFSET;
5990 }
5991
5992 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
5993 Otherwise we have no need to default values of symbols. */
5994
5995 symbolS *
5996 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
5997 {
5998 #ifdef OBJ_ELF
5999 if (name[0] == '_' && name[1] == 'G'
6000 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6001 {
6002 if (!GOT_symbol)
6003 {
6004 if (symbol_find (name))
6005 as_bad (_("GOT already in the symbol table"));
6006
6007 GOT_symbol = symbol_new (name, undefined_section,
6008 (valueT) 0, &zero_address_frag);
6009 }
6010
6011 return GOT_symbol;
6012 }
6013 #endif
6014
6015 return 0;
6016 }
6017
6018 /* Return non-zero if the indicated VALUE has overflowed the maximum
6019 range expressible by a unsigned number with the indicated number of
6020 BITS. */
6021
6022 static bfd_boolean
6023 unsigned_overflow (valueT value, unsigned bits)
6024 {
6025 valueT lim;
6026 if (bits >= sizeof (valueT) * 8)
6027 return FALSE;
6028 lim = (valueT) 1 << bits;
6029 return (value >= lim);
6030 }
6031
6032
6033 /* Return non-zero if the indicated VALUE has overflowed the maximum
6034 range expressible by an signed number with the indicated number of
6035 BITS. */
6036
6037 static bfd_boolean
6038 signed_overflow (offsetT value, unsigned bits)
6039 {
6040 offsetT lim;
6041 if (bits >= sizeof (offsetT) * 8)
6042 return FALSE;
6043 lim = (offsetT) 1 << (bits - 1);
6044 return (value < -lim || value >= lim);
6045 }
6046
6047 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6048 unsigned immediate offset load/store instruction, try to encode it as
6049 an unscaled, 9-bit, signed immediate offset load/store instruction.
6050 Return TRUE if it is successful; otherwise return FALSE.
6051
6052 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6053 in response to the standard LDR/STR mnemonics when the immediate offset is
6054 unambiguous, i.e. when it is negative or unaligned. */
6055
6056 static bfd_boolean
6057 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6058 {
6059 int idx;
6060 enum aarch64_op new_op;
6061 const aarch64_opcode *new_opcode;
6062
6063 gas_assert (instr->opcode->iclass == ldst_pos);
6064
6065 switch (instr->opcode->op)
6066 {
6067 case OP_LDRB_POS:new_op = OP_LDURB; break;
6068 case OP_STRB_POS: new_op = OP_STURB; break;
6069 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6070 case OP_LDRH_POS: new_op = OP_LDURH; break;
6071 case OP_STRH_POS: new_op = OP_STURH; break;
6072 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6073 case OP_LDR_POS: new_op = OP_LDUR; break;
6074 case OP_STR_POS: new_op = OP_STUR; break;
6075 case OP_LDRF_POS: new_op = OP_LDURV; break;
6076 case OP_STRF_POS: new_op = OP_STURV; break;
6077 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6078 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6079 default: new_op = OP_NIL; break;
6080 }
6081
6082 if (new_op == OP_NIL)
6083 return FALSE;
6084
6085 new_opcode = aarch64_get_opcode (new_op);
6086 gas_assert (new_opcode != NULL);
6087
6088 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6089 instr->opcode->op, new_opcode->op);
6090
6091 aarch64_replace_opcode (instr, new_opcode);
6092
6093 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6094 qualifier matching may fail because the out-of-date qualifier will
6095 prevent the operand being updated with a new and correct qualifier. */
6096 idx = aarch64_operand_index (instr->opcode->operands,
6097 AARCH64_OPND_ADDR_SIMM9);
6098 gas_assert (idx == 1);
6099 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6100
6101 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6102
6103 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6104 return FALSE;
6105
6106 return TRUE;
6107 }
6108
6109 /* Called by fix_insn to fix a MOV immediate alias instruction.
6110
6111 Operand for a generic move immediate instruction, which is an alias
6112 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6113 a 32-bit/64-bit immediate value into general register. An assembler error
6114 shall result if the immediate cannot be created by a single one of these
6115 instructions. If there is a choice, then to ensure reversability an
6116 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6117
6118 static void
6119 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6120 {
6121 const aarch64_opcode *opcode;
6122
6123 /* Need to check if the destination is SP/ZR. The check has to be done
6124 before any aarch64_replace_opcode. */
6125 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6126 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6127
6128 instr->operands[1].imm.value = value;
6129 instr->operands[1].skip = 0;
6130
6131 if (try_mov_wide_p)
6132 {
6133 /* Try the MOVZ alias. */
6134 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6135 aarch64_replace_opcode (instr, opcode);
6136 if (aarch64_opcode_encode (instr->opcode, instr,
6137 &instr->value, NULL, NULL))
6138 {
6139 put_aarch64_insn (buf, instr->value);
6140 return;
6141 }
6142 /* Try the MOVK alias. */
6143 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6144 aarch64_replace_opcode (instr, opcode);
6145 if (aarch64_opcode_encode (instr->opcode, instr,
6146 &instr->value, NULL, NULL))
6147 {
6148 put_aarch64_insn (buf, instr->value);
6149 return;
6150 }
6151 }
6152
6153 if (try_mov_bitmask_p)
6154 {
6155 /* Try the ORR alias. */
6156 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6157 aarch64_replace_opcode (instr, opcode);
6158 if (aarch64_opcode_encode (instr->opcode, instr,
6159 &instr->value, NULL, NULL))
6160 {
6161 put_aarch64_insn (buf, instr->value);
6162 return;
6163 }
6164 }
6165
6166 as_bad_where (fixP->fx_file, fixP->fx_line,
6167 _("immediate cannot be moved by a single instruction"));
6168 }
6169
6170 /* An instruction operand which is immediate related may have symbol used
6171 in the assembly, e.g.
6172
6173 mov w0, u32
6174 .set u32, 0x00ffff00
6175
6176 At the time when the assembly instruction is parsed, a referenced symbol,
6177 like 'u32' in the above example may not have been seen; a fixS is created
6178 in such a case and is handled here after symbols have been resolved.
6179 Instruction is fixed up with VALUE using the information in *FIXP plus
6180 extra information in FLAGS.
6181
6182 This function is called by md_apply_fix to fix up instructions that need
6183 a fix-up described above but does not involve any linker-time relocation. */
6184
6185 static void
6186 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6187 {
6188 int idx;
6189 uint32_t insn;
6190 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6191 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6192 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6193
6194 if (new_inst)
6195 {
6196 /* Now the instruction is about to be fixed-up, so the operand that
6197 was previously marked as 'ignored' needs to be unmarked in order
6198 to get the encoding done properly. */
6199 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6200 new_inst->operands[idx].skip = 0;
6201 }
6202
6203 gas_assert (opnd != AARCH64_OPND_NIL);
6204
6205 switch (opnd)
6206 {
6207 case AARCH64_OPND_EXCEPTION:
6208 if (unsigned_overflow (value, 16))
6209 as_bad_where (fixP->fx_file, fixP->fx_line,
6210 _("immediate out of range"));
6211 insn = get_aarch64_insn (buf);
6212 insn |= encode_svc_imm (value);
6213 put_aarch64_insn (buf, insn);
6214 break;
6215
6216 case AARCH64_OPND_AIMM:
6217 /* ADD or SUB with immediate.
6218 NOTE this assumes we come here with a add/sub shifted reg encoding
6219 3 322|2222|2 2 2 21111 111111
6220 1 098|7654|3 2 1 09876 543210 98765 43210
6221 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6222 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6223 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6224 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6225 ->
6226 3 322|2222|2 2 221111111111
6227 1 098|7654|3 2 109876543210 98765 43210
6228 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6229 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6230 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6231 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6232 Fields sf Rn Rd are already set. */
6233 insn = get_aarch64_insn (buf);
6234 if (value < 0)
6235 {
6236 /* Add <-> sub. */
6237 insn = reencode_addsub_switch_add_sub (insn);
6238 value = -value;
6239 }
6240
6241 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6242 && unsigned_overflow (value, 12))
6243 {
6244 /* Try to shift the value by 12 to make it fit. */
6245 if (((value >> 12) << 12) == value
6246 && ! unsigned_overflow (value, 12 + 12))
6247 {
6248 value >>= 12;
6249 insn |= encode_addsub_imm_shift_amount (1);
6250 }
6251 }
6252
6253 if (unsigned_overflow (value, 12))
6254 as_bad_where (fixP->fx_file, fixP->fx_line,
6255 _("immediate out of range"));
6256
6257 insn |= encode_addsub_imm (value);
6258
6259 put_aarch64_insn (buf, insn);
6260 break;
6261
6262 case AARCH64_OPND_SIMD_IMM:
6263 case AARCH64_OPND_SIMD_IMM_SFT:
6264 case AARCH64_OPND_LIMM:
6265 /* Bit mask immediate. */
6266 gas_assert (new_inst != NULL);
6267 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6268 new_inst->operands[idx].imm.value = value;
6269 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6270 &new_inst->value, NULL, NULL))
6271 put_aarch64_insn (buf, new_inst->value);
6272 else
6273 as_bad_where (fixP->fx_file, fixP->fx_line,
6274 _("invalid immediate"));
6275 break;
6276
6277 case AARCH64_OPND_HALF:
6278 /* 16-bit unsigned immediate. */
6279 if (unsigned_overflow (value, 16))
6280 as_bad_where (fixP->fx_file, fixP->fx_line,
6281 _("immediate out of range"));
6282 insn = get_aarch64_insn (buf);
6283 insn |= encode_movw_imm (value & 0xffff);
6284 put_aarch64_insn (buf, insn);
6285 break;
6286
6287 case AARCH64_OPND_IMM_MOV:
6288 /* Operand for a generic move immediate instruction, which is
6289 an alias instruction that generates a single MOVZ, MOVN or ORR
6290 instruction to loads a 32-bit/64-bit immediate value into general
6291 register. An assembler error shall result if the immediate cannot be
6292 created by a single one of these instructions. If there is a choice,
6293 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6294 and MOVZ or MOVN to ORR. */
6295 gas_assert (new_inst != NULL);
6296 fix_mov_imm_insn (fixP, buf, new_inst, value);
6297 break;
6298
6299 case AARCH64_OPND_ADDR_SIMM7:
6300 case AARCH64_OPND_ADDR_SIMM9:
6301 case AARCH64_OPND_ADDR_SIMM9_2:
6302 case AARCH64_OPND_ADDR_UIMM12:
6303 /* Immediate offset in an address. */
6304 insn = get_aarch64_insn (buf);
6305
6306 gas_assert (new_inst != NULL && new_inst->value == insn);
6307 gas_assert (new_inst->opcode->operands[1] == opnd
6308 || new_inst->opcode->operands[2] == opnd);
6309
6310 /* Get the index of the address operand. */
6311 if (new_inst->opcode->operands[1] == opnd)
6312 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6313 idx = 1;
6314 else
6315 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6316 idx = 2;
6317
6318 /* Update the resolved offset value. */
6319 new_inst->operands[idx].addr.offset.imm = value;
6320
6321 /* Encode/fix-up. */
6322 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6323 &new_inst->value, NULL, NULL))
6324 {
6325 put_aarch64_insn (buf, new_inst->value);
6326 break;
6327 }
6328 else if (new_inst->opcode->iclass == ldst_pos
6329 && try_to_encode_as_unscaled_ldst (new_inst))
6330 {
6331 put_aarch64_insn (buf, new_inst->value);
6332 break;
6333 }
6334
6335 as_bad_where (fixP->fx_file, fixP->fx_line,
6336 _("immediate offset out of range"));
6337 break;
6338
6339 default:
6340 gas_assert (0);
6341 as_fatal (_("unhandled operand code %d"), opnd);
6342 }
6343 }
6344
6345 /* Apply a fixup (fixP) to segment data, once it has been determined
6346 by our caller that we have all the info we need to fix it up.
6347
6348 Parameter valP is the pointer to the value of the bits. */
6349
6350 void
6351 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6352 {
6353 offsetT value = *valP;
6354 uint32_t insn;
6355 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6356 int scale;
6357 unsigned flags = fixP->fx_addnumber;
6358
6359 DEBUG_TRACE ("\n\n");
6360 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6361 DEBUG_TRACE ("Enter md_apply_fix");
6362
6363 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6364
6365 /* Note whether this will delete the relocation. */
6366
6367 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6368 fixP->fx_done = 1;
6369
6370 /* Process the relocations. */
6371 switch (fixP->fx_r_type)
6372 {
6373 case BFD_RELOC_NONE:
6374 /* This will need to go in the object file. */
6375 fixP->fx_done = 0;
6376 break;
6377
6378 case BFD_RELOC_8:
6379 case BFD_RELOC_8_PCREL:
6380 if (fixP->fx_done || !seg->use_rela_p)
6381 md_number_to_chars (buf, value, 1);
6382 break;
6383
6384 case BFD_RELOC_16:
6385 case BFD_RELOC_16_PCREL:
6386 if (fixP->fx_done || !seg->use_rela_p)
6387 md_number_to_chars (buf, value, 2);
6388 break;
6389
6390 case BFD_RELOC_32:
6391 case BFD_RELOC_32_PCREL:
6392 if (fixP->fx_done || !seg->use_rela_p)
6393 md_number_to_chars (buf, value, 4);
6394 break;
6395
6396 case BFD_RELOC_64:
6397 case BFD_RELOC_64_PCREL:
6398 if (fixP->fx_done || !seg->use_rela_p)
6399 md_number_to_chars (buf, value, 8);
6400 break;
6401
6402 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6403 /* We claim that these fixups have been processed here, even if
6404 in fact we generate an error because we do not have a reloc
6405 for them, so tc_gen_reloc() will reject them. */
6406 fixP->fx_done = 1;
6407 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6408 {
6409 as_bad_where (fixP->fx_file, fixP->fx_line,
6410 _("undefined symbol %s used as an immediate value"),
6411 S_GET_NAME (fixP->fx_addsy));
6412 goto apply_fix_return;
6413 }
6414 fix_insn (fixP, flags, value);
6415 break;
6416
6417 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6418 if (fixP->fx_done || !seg->use_rela_p)
6419 {
6420 if (value & 3)
6421 as_bad_where (fixP->fx_file, fixP->fx_line,
6422 _("pc-relative load offset not word aligned"));
6423 if (signed_overflow (value, 21))
6424 as_bad_where (fixP->fx_file, fixP->fx_line,
6425 _("pc-relative load offset out of range"));
6426 insn = get_aarch64_insn (buf);
6427 insn |= encode_ld_lit_ofs_19 (value >> 2);
6428 put_aarch64_insn (buf, insn);
6429 }
6430 break;
6431
6432 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6433 if (fixP->fx_done || !seg->use_rela_p)
6434 {
6435 if (signed_overflow (value, 21))
6436 as_bad_where (fixP->fx_file, fixP->fx_line,
6437 _("pc-relative address offset out of range"));
6438 insn = get_aarch64_insn (buf);
6439 insn |= encode_adr_imm (value);
6440 put_aarch64_insn (buf, insn);
6441 }
6442 break;
6443
6444 case BFD_RELOC_AARCH64_BRANCH19:
6445 if (fixP->fx_done || !seg->use_rela_p)
6446 {
6447 if (value & 3)
6448 as_bad_where (fixP->fx_file, fixP->fx_line,
6449 _("conditional branch target not word aligned"));
6450 if (signed_overflow (value, 21))
6451 as_bad_where (fixP->fx_file, fixP->fx_line,
6452 _("conditional branch out of range"));
6453 insn = get_aarch64_insn (buf);
6454 insn |= encode_cond_branch_ofs_19 (value >> 2);
6455 put_aarch64_insn (buf, insn);
6456 }
6457 break;
6458
6459 case BFD_RELOC_AARCH64_TSTBR14:
6460 if (fixP->fx_done || !seg->use_rela_p)
6461 {
6462 if (value & 3)
6463 as_bad_where (fixP->fx_file, fixP->fx_line,
6464 _("conditional branch target not word aligned"));
6465 if (signed_overflow (value, 16))
6466 as_bad_where (fixP->fx_file, fixP->fx_line,
6467 _("conditional branch out of range"));
6468 insn = get_aarch64_insn (buf);
6469 insn |= encode_tst_branch_ofs_14 (value >> 2);
6470 put_aarch64_insn (buf, insn);
6471 }
6472 break;
6473
6474 case BFD_RELOC_AARCH64_JUMP26:
6475 case BFD_RELOC_AARCH64_CALL26:
6476 if (fixP->fx_done || !seg->use_rela_p)
6477 {
6478 if (value & 3)
6479 as_bad_where (fixP->fx_file, fixP->fx_line,
6480 _("branch target not word aligned"));
6481 if (signed_overflow (value, 28))
6482 as_bad_where (fixP->fx_file, fixP->fx_line,
6483 _("branch out of range"));
6484 insn = get_aarch64_insn (buf);
6485 insn |= encode_branch_ofs_26 (value >> 2);
6486 put_aarch64_insn (buf, insn);
6487 }
6488 break;
6489
6490 case BFD_RELOC_AARCH64_MOVW_G0:
6491 case BFD_RELOC_AARCH64_MOVW_G0_S:
6492 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6493 scale = 0;
6494 goto movw_common;
6495 case BFD_RELOC_AARCH64_MOVW_G1:
6496 case BFD_RELOC_AARCH64_MOVW_G1_S:
6497 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6498 scale = 16;
6499 goto movw_common;
6500 case BFD_RELOC_AARCH64_MOVW_G2:
6501 case BFD_RELOC_AARCH64_MOVW_G2_S:
6502 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6503 scale = 32;
6504 goto movw_common;
6505 case BFD_RELOC_AARCH64_MOVW_G3:
6506 scale = 48;
6507 movw_common:
6508 if (fixP->fx_done || !seg->use_rela_p)
6509 {
6510 insn = get_aarch64_insn (buf);
6511
6512 if (!fixP->fx_done)
6513 {
6514 /* REL signed addend must fit in 16 bits */
6515 if (signed_overflow (value, 16))
6516 as_bad_where (fixP->fx_file, fixP->fx_line,
6517 _("offset out of range"));
6518 }
6519 else
6520 {
6521 /* Check for overflow and scale. */
6522 switch (fixP->fx_r_type)
6523 {
6524 case BFD_RELOC_AARCH64_MOVW_G0:
6525 case BFD_RELOC_AARCH64_MOVW_G1:
6526 case BFD_RELOC_AARCH64_MOVW_G2:
6527 case BFD_RELOC_AARCH64_MOVW_G3:
6528 if (unsigned_overflow (value, scale + 16))
6529 as_bad_where (fixP->fx_file, fixP->fx_line,
6530 _("unsigned value out of range"));
6531 break;
6532 case BFD_RELOC_AARCH64_MOVW_G0_S:
6533 case BFD_RELOC_AARCH64_MOVW_G1_S:
6534 case BFD_RELOC_AARCH64_MOVW_G2_S:
6535 /* NOTE: We can only come here with movz or movn. */
6536 if (signed_overflow (value, scale + 16))
6537 as_bad_where (fixP->fx_file, fixP->fx_line,
6538 _("signed value out of range"));
6539 if (value < 0)
6540 {
6541 /* Force use of MOVN. */
6542 value = ~value;
6543 insn = reencode_movzn_to_movn (insn);
6544 }
6545 else
6546 {
6547 /* Force use of MOVZ. */
6548 insn = reencode_movzn_to_movz (insn);
6549 }
6550 break;
6551 default:
6552 /* Unchecked relocations. */
6553 break;
6554 }
6555 value >>= scale;
6556 }
6557
6558 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6559 insn |= encode_movw_imm (value & 0xffff);
6560
6561 put_aarch64_insn (buf, insn);
6562 }
6563 break;
6564
6565 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6566 fixP->fx_r_type = (ilp32_p
6567 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6568 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6569 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6570 /* Should always be exported to object file, see
6571 aarch64_force_relocation(). */
6572 gas_assert (!fixP->fx_done);
6573 gas_assert (seg->use_rela_p);
6574 break;
6575
6576 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6577 fixP->fx_r_type = (ilp32_p
6578 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6579 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6580 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6581 /* Should always be exported to object file, see
6582 aarch64_force_relocation(). */
6583 gas_assert (!fixP->fx_done);
6584 gas_assert (seg->use_rela_p);
6585 break;
6586
6587 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6588 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6589 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6590 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6591 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6592 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6593 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6594 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6595 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6596 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6597 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6598 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6599 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6600 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6601 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6602 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6603 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6604 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6605 /* Should always be exported to object file, see
6606 aarch64_force_relocation(). */
6607 gas_assert (!fixP->fx_done);
6608 gas_assert (seg->use_rela_p);
6609 break;
6610
6611 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6612 /* Should always be exported to object file, see
6613 aarch64_force_relocation(). */
6614 fixP->fx_r_type = (ilp32_p
6615 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6616 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6617 gas_assert (!fixP->fx_done);
6618 gas_assert (seg->use_rela_p);
6619 break;
6620
6621 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6622 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6623 case BFD_RELOC_AARCH64_ADD_LO12:
6624 case BFD_RELOC_AARCH64_LDST8_LO12:
6625 case BFD_RELOC_AARCH64_LDST16_LO12:
6626 case BFD_RELOC_AARCH64_LDST32_LO12:
6627 case BFD_RELOC_AARCH64_LDST64_LO12:
6628 case BFD_RELOC_AARCH64_LDST128_LO12:
6629 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6630 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6631 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6632 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6633 /* Should always be exported to object file, see
6634 aarch64_force_relocation(). */
6635 gas_assert (!fixP->fx_done);
6636 gas_assert (seg->use_rela_p);
6637 break;
6638
6639 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6640 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6641 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6642 break;
6643
6644 default:
6645 as_bad_where (fixP->fx_file, fixP->fx_line,
6646 _("unexpected %s fixup"),
6647 bfd_get_reloc_code_name (fixP->fx_r_type));
6648 break;
6649 }
6650
6651 apply_fix_return:
6652 /* Free the allocated the struct aarch64_inst.
6653 N.B. currently there are very limited number of fix-up types actually use
6654 this field, so the impact on the performance should be minimal . */
6655 if (fixP->tc_fix_data.inst != NULL)
6656 free (fixP->tc_fix_data.inst);
6657
6658 return;
6659 }
6660
6661 /* Translate internal representation of relocation info to BFD target
6662 format. */
6663
6664 arelent *
6665 tc_gen_reloc (asection * section, fixS * fixp)
6666 {
6667 arelent *reloc;
6668 bfd_reloc_code_real_type code;
6669
6670 reloc = xmalloc (sizeof (arelent));
6671
6672 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6673 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6674 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6675
6676 if (fixp->fx_pcrel)
6677 {
6678 if (section->use_rela_p)
6679 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6680 else
6681 fixp->fx_offset = reloc->address;
6682 }
6683 reloc->addend = fixp->fx_offset;
6684
6685 code = fixp->fx_r_type;
6686 switch (code)
6687 {
6688 case BFD_RELOC_16:
6689 if (fixp->fx_pcrel)
6690 code = BFD_RELOC_16_PCREL;
6691 break;
6692
6693 case BFD_RELOC_32:
6694 if (fixp->fx_pcrel)
6695 code = BFD_RELOC_32_PCREL;
6696 break;
6697
6698 case BFD_RELOC_64:
6699 if (fixp->fx_pcrel)
6700 code = BFD_RELOC_64_PCREL;
6701 break;
6702
6703 default:
6704 break;
6705 }
6706
6707 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6708 if (reloc->howto == NULL)
6709 {
6710 as_bad_where (fixp->fx_file, fixp->fx_line,
6711 _
6712 ("cannot represent %s relocation in this object file format"),
6713 bfd_get_reloc_code_name (code));
6714 return NULL;
6715 }
6716
6717 return reloc;
6718 }
6719
6720 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6721
6722 void
6723 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6724 {
6725 bfd_reloc_code_real_type type;
6726 int pcrel = 0;
6727
6728 /* Pick a reloc.
6729 FIXME: @@ Should look at CPU word size. */
6730 switch (size)
6731 {
6732 case 1:
6733 type = BFD_RELOC_8;
6734 break;
6735 case 2:
6736 type = BFD_RELOC_16;
6737 break;
6738 case 4:
6739 type = BFD_RELOC_32;
6740 break;
6741 case 8:
6742 type = BFD_RELOC_64;
6743 break;
6744 default:
6745 as_bad (_("cannot do %u-byte relocation"), size);
6746 type = BFD_RELOC_UNUSED;
6747 break;
6748 }
6749
6750 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6751 }
6752
6753 int
6754 aarch64_force_relocation (struct fix *fixp)
6755 {
6756 switch (fixp->fx_r_type)
6757 {
6758 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6759 /* Perform these "immediate" internal relocations
6760 even if the symbol is extern or weak. */
6761 return 0;
6762
6763 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6764 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6765 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6766 /* Pseudo relocs that need to be fixed up according to
6767 ilp32_p. */
6768 return 0;
6769
6770 case BFD_RELOC_AARCH64_ADD_LO12:
6771 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6772 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6773 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6774 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6775 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6776 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6777 case BFD_RELOC_AARCH64_LDST128_LO12:
6778 case BFD_RELOC_AARCH64_LDST16_LO12:
6779 case BFD_RELOC_AARCH64_LDST32_LO12:
6780 case BFD_RELOC_AARCH64_LDST64_LO12:
6781 case BFD_RELOC_AARCH64_LDST8_LO12:
6782 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6783 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6784 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6785 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6786 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6787 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6788 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6789 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6790 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6791 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6792 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6793 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6794 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6795 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6796 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6797 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6798 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6799 /* Always leave these relocations for the linker. */
6800 return 1;
6801
6802 default:
6803 break;
6804 }
6805
6806 return generic_force_reloc (fixp);
6807 }
6808
6809 #ifdef OBJ_ELF
6810
6811 const char *
6812 elf64_aarch64_target_format (void)
6813 {
6814 if (target_big_endian)
6815 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6816 else
6817 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6818 }
6819
6820 void
6821 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6822 {
6823 elf_frob_symbol (symp, puntp);
6824 }
6825 #endif
6826
6827 /* MD interface: Finalization. */
6828
6829 /* A good place to do this, although this was probably not intended
6830 for this kind of use. We need to dump the literal pool before
6831 references are made to a null symbol pointer. */
6832
6833 void
6834 aarch64_cleanup (void)
6835 {
6836 literal_pool *pool;
6837
6838 for (pool = list_of_pools; pool; pool = pool->next)
6839 {
6840 /* Put it at the end of the relevant section. */
6841 subseg_set (pool->section, pool->sub_section);
6842 s_ltorg (0);
6843 }
6844 }
6845
6846 #ifdef OBJ_ELF
6847 /* Remove any excess mapping symbols generated for alignment frags in
6848 SEC. We may have created a mapping symbol before a zero byte
6849 alignment; remove it if there's a mapping symbol after the
6850 alignment. */
6851 static void
6852 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6853 void *dummy ATTRIBUTE_UNUSED)
6854 {
6855 segment_info_type *seginfo = seg_info (sec);
6856 fragS *fragp;
6857
6858 if (seginfo == NULL || seginfo->frchainP == NULL)
6859 return;
6860
6861 for (fragp = seginfo->frchainP->frch_root;
6862 fragp != NULL; fragp = fragp->fr_next)
6863 {
6864 symbolS *sym = fragp->tc_frag_data.last_map;
6865 fragS *next = fragp->fr_next;
6866
6867 /* Variable-sized frags have been converted to fixed size by
6868 this point. But if this was variable-sized to start with,
6869 there will be a fixed-size frag after it. So don't handle
6870 next == NULL. */
6871 if (sym == NULL || next == NULL)
6872 continue;
6873
6874 if (S_GET_VALUE (sym) < next->fr_address)
6875 /* Not at the end of this frag. */
6876 continue;
6877 know (S_GET_VALUE (sym) == next->fr_address);
6878
6879 do
6880 {
6881 if (next->tc_frag_data.first_map != NULL)
6882 {
6883 /* Next frag starts with a mapping symbol. Discard this
6884 one. */
6885 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6886 break;
6887 }
6888
6889 if (next->fr_next == NULL)
6890 {
6891 /* This mapping symbol is at the end of the section. Discard
6892 it. */
6893 know (next->fr_fix == 0 && next->fr_var == 0);
6894 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6895 break;
6896 }
6897
6898 /* As long as we have empty frags without any mapping symbols,
6899 keep looking. */
6900 /* If the next frag is non-empty and does not start with a
6901 mapping symbol, then this mapping symbol is required. */
6902 if (next->fr_address != next->fr_next->fr_address)
6903 break;
6904
6905 next = next->fr_next;
6906 }
6907 while (next != NULL);
6908 }
6909 }
6910 #endif
6911
6912 /* Adjust the symbol table. */
6913
6914 void
6915 aarch64_adjust_symtab (void)
6916 {
6917 #ifdef OBJ_ELF
6918 /* Remove any overlapping mapping symbols generated by alignment frags. */
6919 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6920 /* Now do generic ELF adjustments. */
6921 elf_adjust_symtab ();
6922 #endif
6923 }
6924
6925 static void
6926 checked_hash_insert (struct hash_control *table, const char *key, void *value)
6927 {
6928 const char *hash_err;
6929
6930 hash_err = hash_insert (table, key, value);
6931 if (hash_err)
6932 printf ("Internal Error: Can't hash %s\n", key);
6933 }
6934
6935 static void
6936 fill_instruction_hash_table (void)
6937 {
6938 aarch64_opcode *opcode = aarch64_opcode_table;
6939
6940 while (opcode->name != NULL)
6941 {
6942 templates *templ, *new_templ;
6943 templ = hash_find (aarch64_ops_hsh, opcode->name);
6944
6945 new_templ = (templates *) xmalloc (sizeof (templates));
6946 new_templ->opcode = opcode;
6947 new_templ->next = NULL;
6948
6949 if (!templ)
6950 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6951 else
6952 {
6953 new_templ->next = templ->next;
6954 templ->next = new_templ;
6955 }
6956 ++opcode;
6957 }
6958 }
6959
6960 static inline void
6961 convert_to_upper (char *dst, const char *src, size_t num)
6962 {
6963 unsigned int i;
6964 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6965 *dst = TOUPPER (*src);
6966 *dst = '\0';
6967 }
6968
6969 /* Assume STR point to a lower-case string, allocate, convert and return
6970 the corresponding upper-case string. */
6971 static inline const char*
6972 get_upper_str (const char *str)
6973 {
6974 char *ret;
6975 size_t len = strlen (str);
6976 if ((ret = xmalloc (len + 1)) == NULL)
6977 abort ();
6978 convert_to_upper (ret, str, len);
6979 return ret;
6980 }
6981
6982 /* MD interface: Initialization. */
6983
6984 void
6985 md_begin (void)
6986 {
6987 unsigned mach;
6988 unsigned int i;
6989
6990 if ((aarch64_ops_hsh = hash_new ()) == NULL
6991 || (aarch64_cond_hsh = hash_new ()) == NULL
6992 || (aarch64_shift_hsh = hash_new ()) == NULL
6993 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
6994 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
6995 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
6996 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
6997 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
6998 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
6999 || (aarch64_reg_hsh = hash_new ()) == NULL
7000 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7001 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7002 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7003 as_fatal (_("virtual memory exhausted"));
7004
7005 fill_instruction_hash_table ();
7006
7007 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7008 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7009 (void *) (aarch64_sys_regs + i));
7010
7011 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7012 checked_hash_insert (aarch64_pstatefield_hsh,
7013 aarch64_pstatefields[i].name,
7014 (void *) (aarch64_pstatefields + i));
7015
7016 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7017 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7018 aarch64_sys_regs_ic[i].template,
7019 (void *) (aarch64_sys_regs_ic + i));
7020
7021 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7022 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7023 aarch64_sys_regs_dc[i].template,
7024 (void *) (aarch64_sys_regs_dc + i));
7025
7026 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7027 checked_hash_insert (aarch64_sys_regs_at_hsh,
7028 aarch64_sys_regs_at[i].template,
7029 (void *) (aarch64_sys_regs_at + i));
7030
7031 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7032 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7033 aarch64_sys_regs_tlbi[i].template,
7034 (void *) (aarch64_sys_regs_tlbi + i));
7035
7036 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7037 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7038 (void *) (reg_names + i));
7039
7040 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7041 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7042 (void *) (nzcv_names + i));
7043
7044 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7045 {
7046 const char *name = aarch64_operand_modifiers[i].name;
7047 checked_hash_insert (aarch64_shift_hsh, name,
7048 (void *) (aarch64_operand_modifiers + i));
7049 /* Also hash the name in the upper case. */
7050 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7051 (void *) (aarch64_operand_modifiers + i));
7052 }
7053
7054 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7055 {
7056 unsigned int j;
7057 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7058 the same condition code. */
7059 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7060 {
7061 const char *name = aarch64_conds[i].names[j];
7062 if (name == NULL)
7063 break;
7064 checked_hash_insert (aarch64_cond_hsh, name,
7065 (void *) (aarch64_conds + i));
7066 /* Also hash the name in the upper case. */
7067 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7068 (void *) (aarch64_conds + i));
7069 }
7070 }
7071
7072 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7073 {
7074 const char *name = aarch64_barrier_options[i].name;
7075 /* Skip xx00 - the unallocated values of option. */
7076 if ((i & 0x3) == 0)
7077 continue;
7078 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7079 (void *) (aarch64_barrier_options + i));
7080 /* Also hash the name in the upper case. */
7081 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7082 (void *) (aarch64_barrier_options + i));
7083 }
7084
7085 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7086 {
7087 const char* name = aarch64_prfops[i].name;
7088 /* Skip the unallocated hint encodings. */
7089 if (name == NULL)
7090 continue;
7091 checked_hash_insert (aarch64_pldop_hsh, name,
7092 (void *) (aarch64_prfops + i));
7093 /* Also hash the name in the upper case. */
7094 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7095 (void *) (aarch64_prfops + i));
7096 }
7097
7098 /* Set the cpu variant based on the command-line options. */
7099 if (!mcpu_cpu_opt)
7100 mcpu_cpu_opt = march_cpu_opt;
7101
7102 if (!mcpu_cpu_opt)
7103 mcpu_cpu_opt = &cpu_default;
7104
7105 cpu_variant = *mcpu_cpu_opt;
7106
7107 /* Record the CPU type. */
7108 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7109
7110 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7111 }
7112
7113 /* Command line processing. */
7114
7115 const char *md_shortopts = "m:";
7116
7117 #ifdef AARCH64_BI_ENDIAN
7118 #define OPTION_EB (OPTION_MD_BASE + 0)
7119 #define OPTION_EL (OPTION_MD_BASE + 1)
7120 #else
7121 #if TARGET_BYTES_BIG_ENDIAN
7122 #define OPTION_EB (OPTION_MD_BASE + 0)
7123 #else
7124 #define OPTION_EL (OPTION_MD_BASE + 1)
7125 #endif
7126 #endif
7127
7128 struct option md_longopts[] = {
7129 #ifdef OPTION_EB
7130 {"EB", no_argument, NULL, OPTION_EB},
7131 #endif
7132 #ifdef OPTION_EL
7133 {"EL", no_argument, NULL, OPTION_EL},
7134 #endif
7135 {NULL, no_argument, NULL, 0}
7136 };
7137
7138 size_t md_longopts_size = sizeof (md_longopts);
7139
7140 struct aarch64_option_table
7141 {
7142 char *option; /* Option name to match. */
7143 char *help; /* Help information. */
7144 int *var; /* Variable to change. */
7145 int value; /* What to change it to. */
7146 char *deprecated; /* If non-null, print this message. */
7147 };
7148
7149 static struct aarch64_option_table aarch64_opts[] = {
7150 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7151 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7152 NULL},
7153 #ifdef DEBUG_AARCH64
7154 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7155 #endif /* DEBUG_AARCH64 */
7156 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7157 NULL},
7158 {"mno-verbose-error", N_("do not output verbose error messages"),
7159 &verbose_error_p, 0, NULL},
7160 {NULL, NULL, NULL, 0, NULL}
7161 };
7162
7163 struct aarch64_cpu_option_table
7164 {
7165 char *name;
7166 const aarch64_feature_set value;
7167 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7168 case. */
7169 const char *canonical_name;
7170 };
7171
7172 /* This list should, at a minimum, contain all the cpu names
7173 recognized by GCC. */
7174 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7175 {"all", AARCH64_ANY, NULL},
7176 {"cortex-a53", AARCH64_ARCH_V8, "Cortex-A53"},
7177 {"cortex-a57", AARCH64_ARCH_V8, "Cortex-A57"},
7178 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7179 {"generic", AARCH64_ARCH_V8, NULL},
7180
7181 /* These two are example CPUs supported in GCC, once we have real
7182 CPUs they will be removed. */
7183 {"example-1", AARCH64_ARCH_V8, NULL},
7184 {"example-2", AARCH64_ARCH_V8, NULL},
7185
7186 {NULL, AARCH64_ARCH_NONE, NULL}
7187 };
7188
7189 struct aarch64_arch_option_table
7190 {
7191 char *name;
7192 const aarch64_feature_set value;
7193 };
7194
7195 /* This list should, at a minimum, contain all the architecture names
7196 recognized by GCC. */
7197 static const struct aarch64_arch_option_table aarch64_archs[] = {
7198 {"all", AARCH64_ANY},
7199 {"armv8-a", AARCH64_ARCH_V8},
7200 {NULL, AARCH64_ARCH_NONE}
7201 };
7202
7203 /* ISA extensions. */
7204 struct aarch64_option_cpu_value_table
7205 {
7206 char *name;
7207 const aarch64_feature_set value;
7208 };
7209
7210 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7211 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7212 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7213 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7214 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7215 {NULL, AARCH64_ARCH_NONE}
7216 };
7217
7218 struct aarch64_long_option_table
7219 {
7220 char *option; /* Substring to match. */
7221 char *help; /* Help information. */
7222 int (*func) (char *subopt); /* Function to decode sub-option. */
7223 char *deprecated; /* If non-null, print this message. */
7224 };
7225
7226 static int
7227 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7228 {
7229 /* We insist on extensions being added before being removed. We achieve
7230 this by using the ADDING_VALUE variable to indicate whether we are
7231 adding an extension (1) or removing it (0) and only allowing it to
7232 change in the order -1 -> 1 -> 0. */
7233 int adding_value = -1;
7234 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7235
7236 /* Copy the feature set, so that we can modify it. */
7237 *ext_set = **opt_p;
7238 *opt_p = ext_set;
7239
7240 while (str != NULL && *str != 0)
7241 {
7242 const struct aarch64_option_cpu_value_table *opt;
7243 char *ext;
7244 int optlen;
7245
7246 if (*str != '+')
7247 {
7248 as_bad (_("invalid architectural extension"));
7249 return 0;
7250 }
7251
7252 str++;
7253 ext = strchr (str, '+');
7254
7255 if (ext != NULL)
7256 optlen = ext - str;
7257 else
7258 optlen = strlen (str);
7259
7260 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7261 {
7262 if (adding_value != 0)
7263 adding_value = 0;
7264 optlen -= 2;
7265 str += 2;
7266 }
7267 else if (optlen > 0)
7268 {
7269 if (adding_value == -1)
7270 adding_value = 1;
7271 else if (adding_value != 1)
7272 {
7273 as_bad (_("must specify extensions to add before specifying "
7274 "those to remove"));
7275 return FALSE;
7276 }
7277 }
7278
7279 if (optlen == 0)
7280 {
7281 as_bad (_("missing architectural extension"));
7282 return 0;
7283 }
7284
7285 gas_assert (adding_value != -1);
7286
7287 for (opt = aarch64_features; opt->name != NULL; opt++)
7288 if (strncmp (opt->name, str, optlen) == 0)
7289 {
7290 /* Add or remove the extension. */
7291 if (adding_value)
7292 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7293 else
7294 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7295 break;
7296 }
7297
7298 if (opt->name == NULL)
7299 {
7300 as_bad (_("unknown architectural extension `%s'"), str);
7301 return 0;
7302 }
7303
7304 str = ext;
7305 };
7306
7307 return 1;
7308 }
7309
7310 static int
7311 aarch64_parse_cpu (char *str)
7312 {
7313 const struct aarch64_cpu_option_table *opt;
7314 char *ext = strchr (str, '+');
7315 size_t optlen;
7316
7317 if (ext != NULL)
7318 optlen = ext - str;
7319 else
7320 optlen = strlen (str);
7321
7322 if (optlen == 0)
7323 {
7324 as_bad (_("missing cpu name `%s'"), str);
7325 return 0;
7326 }
7327
7328 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7329 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7330 {
7331 mcpu_cpu_opt = &opt->value;
7332 if (ext != NULL)
7333 return aarch64_parse_features (ext, &mcpu_cpu_opt);
7334
7335 return 1;
7336 }
7337
7338 as_bad (_("unknown cpu `%s'"), str);
7339 return 0;
7340 }
7341
7342 static int
7343 aarch64_parse_arch (char *str)
7344 {
7345 const struct aarch64_arch_option_table *opt;
7346 char *ext = strchr (str, '+');
7347 size_t optlen;
7348
7349 if (ext != NULL)
7350 optlen = ext - str;
7351 else
7352 optlen = strlen (str);
7353
7354 if (optlen == 0)
7355 {
7356 as_bad (_("missing architecture name `%s'"), str);
7357 return 0;
7358 }
7359
7360 for (opt = aarch64_archs; opt->name != NULL; opt++)
7361 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7362 {
7363 march_cpu_opt = &opt->value;
7364 if (ext != NULL)
7365 return aarch64_parse_features (ext, &march_cpu_opt);
7366
7367 return 1;
7368 }
7369
7370 as_bad (_("unknown architecture `%s'\n"), str);
7371 return 0;
7372 }
7373
7374 /* ABIs. */
7375 struct aarch64_option_abi_value_table
7376 {
7377 char *name;
7378 enum aarch64_abi_type value;
7379 };
7380
7381 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7382 {"ilp32", AARCH64_ABI_ILP32},
7383 {"lp64", AARCH64_ABI_LP64},
7384 {NULL, 0}
7385 };
7386
7387 static int
7388 aarch64_parse_abi (char *str)
7389 {
7390 const struct aarch64_option_abi_value_table *opt;
7391 size_t optlen = strlen (str);
7392
7393 if (optlen == 0)
7394 {
7395 as_bad (_("missing abi name `%s'"), str);
7396 return 0;
7397 }
7398
7399 for (opt = aarch64_abis; opt->name != NULL; opt++)
7400 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7401 {
7402 aarch64_abi = opt->value;
7403 return 1;
7404 }
7405
7406 as_bad (_("unknown abi `%s'\n"), str);
7407 return 0;
7408 }
7409
7410 static struct aarch64_long_option_table aarch64_long_opts[] = {
7411 #ifdef OBJ_ELF
7412 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7413 aarch64_parse_abi, NULL},
7414 #endif /* OBJ_ELF */
7415 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7416 aarch64_parse_cpu, NULL},
7417 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7418 aarch64_parse_arch, NULL},
7419 {NULL, NULL, 0, NULL}
7420 };
7421
7422 int
7423 md_parse_option (int c, char *arg)
7424 {
7425 struct aarch64_option_table *opt;
7426 struct aarch64_long_option_table *lopt;
7427
7428 switch (c)
7429 {
7430 #ifdef OPTION_EB
7431 case OPTION_EB:
7432 target_big_endian = 1;
7433 break;
7434 #endif
7435
7436 #ifdef OPTION_EL
7437 case OPTION_EL:
7438 target_big_endian = 0;
7439 break;
7440 #endif
7441
7442 case 'a':
7443 /* Listing option. Just ignore these, we don't support additional
7444 ones. */
7445 return 0;
7446
7447 default:
7448 for (opt = aarch64_opts; opt->option != NULL; opt++)
7449 {
7450 if (c == opt->option[0]
7451 && ((arg == NULL && opt->option[1] == 0)
7452 || streq (arg, opt->option + 1)))
7453 {
7454 /* If the option is deprecated, tell the user. */
7455 if (opt->deprecated != NULL)
7456 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7457 arg ? arg : "", _(opt->deprecated));
7458
7459 if (opt->var != NULL)
7460 *opt->var = opt->value;
7461
7462 return 1;
7463 }
7464 }
7465
7466 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7467 {
7468 /* These options are expected to have an argument. */
7469 if (c == lopt->option[0]
7470 && arg != NULL
7471 && strncmp (arg, lopt->option + 1,
7472 strlen (lopt->option + 1)) == 0)
7473 {
7474 /* If the option is deprecated, tell the user. */
7475 if (lopt->deprecated != NULL)
7476 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7477 _(lopt->deprecated));
7478
7479 /* Call the sup-option parser. */
7480 return lopt->func (arg + strlen (lopt->option) - 1);
7481 }
7482 }
7483
7484 return 0;
7485 }
7486
7487 return 1;
7488 }
7489
7490 void
7491 md_show_usage (FILE * fp)
7492 {
7493 struct aarch64_option_table *opt;
7494 struct aarch64_long_option_table *lopt;
7495
7496 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7497
7498 for (opt = aarch64_opts; opt->option != NULL; opt++)
7499 if (opt->help != NULL)
7500 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7501
7502 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7503 if (lopt->help != NULL)
7504 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7505
7506 #ifdef OPTION_EB
7507 fprintf (fp, _("\
7508 -EB assemble code for a big-endian cpu\n"));
7509 #endif
7510
7511 #ifdef OPTION_EL
7512 fprintf (fp, _("\
7513 -EL assemble code for a little-endian cpu\n"));
7514 #endif
7515 }
7516
7517 /* Parse a .cpu directive. */
7518
7519 static void
7520 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7521 {
7522 const struct aarch64_cpu_option_table *opt;
7523 char saved_char;
7524 char *name;
7525 char *ext;
7526 size_t optlen;
7527
7528 name = input_line_pointer;
7529 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7530 input_line_pointer++;
7531 saved_char = *input_line_pointer;
7532 *input_line_pointer = 0;
7533
7534 ext = strchr (name, '+');
7535
7536 if (ext != NULL)
7537 optlen = ext - name;
7538 else
7539 optlen = strlen (name);
7540
7541 /* Skip the first "all" entry. */
7542 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7543 if (strlen (opt->name) == optlen
7544 && strncmp (name, opt->name, optlen) == 0)
7545 {
7546 mcpu_cpu_opt = &opt->value;
7547 if (ext != NULL)
7548 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7549 return;
7550
7551 cpu_variant = *mcpu_cpu_opt;
7552
7553 *input_line_pointer = saved_char;
7554 demand_empty_rest_of_line ();
7555 return;
7556 }
7557 as_bad (_("unknown cpu `%s'"), name);
7558 *input_line_pointer = saved_char;
7559 ignore_rest_of_line ();
7560 }
7561
7562
7563 /* Parse a .arch directive. */
7564
7565 static void
7566 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7567 {
7568 const struct aarch64_arch_option_table *opt;
7569 char saved_char;
7570 char *name;
7571 char *ext;
7572 size_t optlen;
7573
7574 name = input_line_pointer;
7575 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7576 input_line_pointer++;
7577 saved_char = *input_line_pointer;
7578 *input_line_pointer = 0;
7579
7580 ext = strchr (name, '+');
7581
7582 if (ext != NULL)
7583 optlen = ext - name;
7584 else
7585 optlen = strlen (name);
7586
7587 /* Skip the first "all" entry. */
7588 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7589 if (strlen (opt->name) == optlen
7590 && strncmp (name, opt->name, optlen) == 0)
7591 {
7592 mcpu_cpu_opt = &opt->value;
7593 if (ext != NULL)
7594 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7595 return;
7596
7597 cpu_variant = *mcpu_cpu_opt;
7598
7599 *input_line_pointer = saved_char;
7600 demand_empty_rest_of_line ();
7601 return;
7602 }
7603
7604 as_bad (_("unknown architecture `%s'\n"), name);
7605 *input_line_pointer = saved_char;
7606 ignore_rest_of_line ();
7607 }
7608
7609 /* Copy symbol information. */
7610
7611 void
7612 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7613 {
7614 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7615 }
This page took 0.205992 seconds and 4 git commands to generate.