Add support for 64-bit ARM architecture: AArch64
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 static aarch64_feature_set cpu_variant;
46
47 /* Variables that we set while parsing command-line options. Once all
48 options have been read we re-process these values to set the real
49 assembly flags. */
50 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
51 static const aarch64_feature_set *march_cpu_opt = NULL;
52
53 /* Constants for known architecture features. */
54 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
55
56 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
57 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
58
59 #ifdef OBJ_ELF
60 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
61 static symbolS *GOT_symbol;
62 #endif
63
64 enum neon_el_type
65 {
66 NT_invtype = -1,
67 NT_b,
68 NT_h,
69 NT_s,
70 NT_d,
71 NT_q
72 };
73
74 /* Bits for DEFINED field in neon_type_el. */
75 #define NTA_HASTYPE 1
76 #define NTA_HASINDEX 2
77
78 struct neon_type_el
79 {
80 enum neon_el_type type;
81 unsigned char defined;
82 unsigned width;
83 int64_t index;
84 };
85
86 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
87
88 struct reloc
89 {
90 bfd_reloc_code_real_type type;
91 expressionS exp;
92 int pc_rel;
93 enum aarch64_opnd opnd;
94 uint32_t flags;
95 unsigned need_libopcodes_p : 1;
96 };
97
98 struct aarch64_instruction
99 {
100 /* libopcodes structure for instruction intermediate representation. */
101 aarch64_inst base;
102 /* Record assembly errors found during the parsing. */
103 struct
104 {
105 enum aarch64_operand_error_kind kind;
106 const char *error;
107 } parsing_error;
108 /* The condition that appears in the assembly line. */
109 int cond;
110 /* Relocation information (including the GAS internal fixup). */
111 struct reloc reloc;
112 /* Need to generate an immediate in the literal pool. */
113 unsigned gen_lit_pool : 1;
114 };
115
116 typedef struct aarch64_instruction aarch64_instruction;
117
118 static aarch64_instruction inst;
119
120 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
121 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
122
123 /* Diagnostics inline function utilites.
124
125 These are lightweight utlities which should only be called by parse_operands
126 and other parsers. GAS processes each assembly line by parsing it against
127 instruction template(s), in the case of multiple templates (for the same
128 mnemonic name), those templates are tried one by one until one succeeds or
129 all fail. An assembly line may fail a few templates before being
130 successfully parsed; an error saved here in most cases is not a user error
131 but an error indicating the current template is not the right template.
132 Therefore it is very important that errors can be saved at a low cost during
133 the parsing; we don't want to slow down the whole parsing by recording
134 non-user errors in detail.
135
136 Remember that the objective is to help GAS pick up the most approapriate
137 error message in the case of multiple templates, e.g. FMOV which has 8
138 templates. */
139
140 static inline void
141 clear_error (void)
142 {
143 inst.parsing_error.kind = AARCH64_OPDE_NIL;
144 inst.parsing_error.error = NULL;
145 }
146
147 static inline bfd_boolean
148 error_p (void)
149 {
150 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
151 }
152
153 static inline const char *
154 get_error_message (void)
155 {
156 return inst.parsing_error.error;
157 }
158
159 static inline void
160 set_error_message (const char *error)
161 {
162 inst.parsing_error.error = error;
163 }
164
165 static inline enum aarch64_operand_error_kind
166 get_error_kind (void)
167 {
168 return inst.parsing_error.kind;
169 }
170
171 static inline void
172 set_error_kind (enum aarch64_operand_error_kind kind)
173 {
174 inst.parsing_error.kind = kind;
175 }
176
177 static inline void
178 set_error (enum aarch64_operand_error_kind kind, const char *error)
179 {
180 inst.parsing_error.kind = kind;
181 inst.parsing_error.error = error;
182 }
183
184 static inline void
185 set_recoverable_error (const char *error)
186 {
187 set_error (AARCH64_OPDE_RECOVERABLE, error);
188 }
189
190 /* Use the DESC field of the corresponding aarch64_operand entry to compose
191 the error message. */
192 static inline void
193 set_default_error (void)
194 {
195 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
196 }
197
198 static inline void
199 set_syntax_error (const char *error)
200 {
201 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
202 }
203
204 static inline void
205 set_first_syntax_error (const char *error)
206 {
207 if (! error_p ())
208 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
209 }
210
211 static inline void
212 set_fatal_syntax_error (const char *error)
213 {
214 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
215 }
216 \f
217 /* Number of littlenums required to hold an extended precision number. */
218 #define MAX_LITTLENUMS 6
219
220 /* Return value for certain parsers when the parsing fails; those parsers
221 return the information of the parsed result, e.g. register number, on
222 success. */
223 #define PARSE_FAIL -1
224
225 /* This is an invalid condition code that means no conditional field is
226 present. */
227 #define COND_ALWAYS 0x10
228
229 typedef struct
230 {
231 const char *template;
232 unsigned long value;
233 } asm_barrier_opt;
234
235 typedef struct
236 {
237 const char *template;
238 uint32_t value;
239 } asm_nzcv;
240
241 struct reloc_entry
242 {
243 char *name;
244 bfd_reloc_code_real_type reloc;
245 };
246
247 /* Structure for a hash table entry for a register. */
248 typedef struct
249 {
250 const char *name;
251 unsigned char number;
252 unsigned char type;
253 unsigned char builtin;
254 } reg_entry;
255
256 /* Macros to define the register types and masks for the purpose
257 of parsing. */
258
259 #undef AARCH64_REG_TYPES
260 #define AARCH64_REG_TYPES \
261 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
262 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
263 BASIC_REG_TYPE(SP_32) /* wsp */ \
264 BASIC_REG_TYPE(SP_64) /* sp */ \
265 BASIC_REG_TYPE(Z_32) /* wzr */ \
266 BASIC_REG_TYPE(Z_64) /* xzr */ \
267 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
268 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
269 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
270 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
271 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
272 BASIC_REG_TYPE(CN) /* c[0-7] */ \
273 BASIC_REG_TYPE(VN) /* v[0-31] */ \
274 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
275 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
276 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
277 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
278 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
279 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
280 /* Typecheck: any [BHSDQ]P FP. */ \
281 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
282 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
283 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
284 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
285 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
286 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
287 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
288 /* Any integer register; used for error messages only. */ \
289 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
290 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
291 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
292 /* Pseudo type to mark the end of the enumerator sequence. */ \
293 BASIC_REG_TYPE(MAX)
294
295 #undef BASIC_REG_TYPE
296 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
297 #undef MULTI_REG_TYPE
298 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
299
300 /* Register type enumerators. */
301 typedef enum
302 {
303 /* A list of REG_TYPE_*. */
304 AARCH64_REG_TYPES
305 } aarch64_reg_type;
306
307 #undef BASIC_REG_TYPE
308 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
309 #undef REG_TYPE
310 #define REG_TYPE(T) (1 << REG_TYPE_##T)
311 #undef MULTI_REG_TYPE
312 #define MULTI_REG_TYPE(T,V) V,
313
314 /* Values indexed by aarch64_reg_type to assist the type checking. */
315 static const unsigned reg_type_masks[] =
316 {
317 AARCH64_REG_TYPES
318 };
319
320 #undef BASIC_REG_TYPE
321 #undef REG_TYPE
322 #undef MULTI_REG_TYPE
323 #undef AARCH64_REG_TYPES
324
325 /* Diagnostics used when we don't get a register of the expected type.
326 Note: this has to synchronized with aarch64_reg_type definitions
327 above. */
328 static const char *
329 get_reg_expected_msg (aarch64_reg_type reg_type)
330 {
331 const char *msg;
332
333 switch (reg_type)
334 {
335 case REG_TYPE_R_32:
336 msg = N_("integer 32-bit register expected");
337 break;
338 case REG_TYPE_R_64:
339 msg = N_("integer 64-bit register expected");
340 break;
341 case REG_TYPE_R_N:
342 msg = N_("integer register expected");
343 break;
344 case REG_TYPE_R_Z_SP:
345 msg = N_("integer, zero or SP register expected");
346 break;
347 case REG_TYPE_FP_B:
348 msg = N_("8-bit SIMD scalar register expected");
349 break;
350 case REG_TYPE_FP_H:
351 msg = N_("16-bit SIMD scalar or floating-point half precision "
352 "register expected");
353 break;
354 case REG_TYPE_FP_S:
355 msg = N_("32-bit SIMD scalar or floating-point single precision "
356 "register expected");
357 break;
358 case REG_TYPE_FP_D:
359 msg = N_("64-bit SIMD scalar or floating-point double precision "
360 "register expected");
361 break;
362 case REG_TYPE_FP_Q:
363 msg = N_("128-bit SIMD scalar or floating-point quad precision "
364 "register expected");
365 break;
366 case REG_TYPE_CN:
367 msg = N_("C0 - C15 expected");
368 break;
369 case REG_TYPE_R_Z_BHSDQ_V:
370 msg = N_("register expected");
371 break;
372 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
373 msg = N_("SIMD scalar or floating-point register expected");
374 break;
375 case REG_TYPE_VN: /* any V reg */
376 msg = N_("vector register expected");
377 break;
378 default:
379 as_fatal (_("invalid register type %d"), reg_type);
380 }
381 return msg;
382 }
383
384 /* Some well known registers that we refer to directly elsewhere. */
385 #define REG_SP 31
386
387 /* Instructions take 4 bytes in the object file. */
388 #define INSN_SIZE 4
389
390 /* Define some common error messages. */
391 #define BAD_SP _("SP not allowed here")
392
393 static struct hash_control *aarch64_ops_hsh;
394 static struct hash_control *aarch64_cond_hsh;
395 static struct hash_control *aarch64_shift_hsh;
396 static struct hash_control *aarch64_sys_regs_hsh;
397 static struct hash_control *aarch64_pstatefield_hsh;
398 static struct hash_control *aarch64_sys_regs_ic_hsh;
399 static struct hash_control *aarch64_sys_regs_dc_hsh;
400 static struct hash_control *aarch64_sys_regs_at_hsh;
401 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
402 static struct hash_control *aarch64_reg_hsh;
403 static struct hash_control *aarch64_barrier_opt_hsh;
404 static struct hash_control *aarch64_nzcv_hsh;
405 static struct hash_control *aarch64_pldop_hsh;
406
407 /* Stuff needed to resolve the label ambiguity
408 As:
409 ...
410 label: <insn>
411 may differ from:
412 ...
413 label:
414 <insn> */
415
416 static symbolS *last_label_seen;
417
418 /* Literal pool structure. Held on a per-section
419 and per-sub-section basis. */
420
421 #define MAX_LITERAL_POOL_SIZE 1024
422 typedef struct literal_pool
423 {
424 expressionS literals[MAX_LITERAL_POOL_SIZE];
425 unsigned int next_free_entry;
426 unsigned int id;
427 symbolS *symbol;
428 segT section;
429 subsegT sub_section;
430 int size;
431 struct literal_pool *next;
432 } literal_pool;
433
434 /* Pointer to a linked list of literal pools. */
435 static literal_pool *list_of_pools = NULL;
436 \f
437 /* Pure syntax. */
438
439 /* This array holds the chars that always start a comment. If the
440 pre-processor is disabled, these aren't very useful. */
441 const char comment_chars[] = "";
442
443 /* This array holds the chars that only start a comment at the beginning of
444 a line. If the line seems to have the form '# 123 filename'
445 .line and .file directives will appear in the pre-processed output. */
446 /* Note that input_file.c hand checks for '#' at the beginning of the
447 first line of the input file. This is because the compiler outputs
448 #NO_APP at the beginning of its output. */
449 /* Also note that comments like this one will always work. */
450 const char line_comment_chars[] = "#";
451
452 const char line_separator_chars[] = ";";
453
454 /* Chars that can be used to separate mant
455 from exp in floating point numbers. */
456 const char EXP_CHARS[] = "eE";
457
458 /* Chars that mean this number is a floating point constant. */
459 /* As in 0f12.456 */
460 /* or 0d1.2345e12 */
461
462 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
463
464 /* Prefix character that indicates the start of an immediate value. */
465 #define is_immediate_prefix(C) ((C) == '#')
466
467 /* Separator character handling. */
468
469 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
470
471 static inline bfd_boolean
472 skip_past_char (char **str, char c)
473 {
474 if (**str == c)
475 {
476 (*str)++;
477 return TRUE;
478 }
479 else
480 return FALSE;
481 }
482
483 #define skip_past_comma(str) skip_past_char (str, ',')
484
485 /* Arithmetic expressions (possibly involving symbols). */
486
487 /* Return TRUE if anything in the expression *SP is a bignum. */
488
489 static bfd_boolean
490 exp_has_bignum_p (symbolS * sp)
491 {
492 if (symbol_get_value_expression (sp)->X_op == O_big)
493 return TRUE;
494
495 if (symbol_get_value_expression (sp)->X_add_symbol)
496 {
497 return (exp_has_bignum_p (symbol_get_value_expression (sp)->X_add_symbol)
498 || (symbol_get_value_expression (sp)->X_op_symbol
499 && exp_has_bignum_p (symbol_get_value_expression (sp)->
500 X_op_symbol)));
501 }
502
503 return FALSE;
504 }
505
506 static bfd_boolean in_my_get_expression_p = FALSE;
507
508 /* Third argument to my_get_expression. */
509 #define GE_NO_PREFIX 0
510 #define GE_OPT_PREFIX 1
511
512 /* Return TRUE if the string pointed by *STR is successfully parsed
513 as an valid expression; *EP will be filled with the information of
514 such an expression. Otherwise return FALSE. */
515
516 static bfd_boolean
517 my_get_expression (expressionS * ep, char **str, int prefix_mode,
518 int reject_absent)
519 {
520 char *save_in;
521 segT seg;
522 int prefix_present_p = 0;
523
524 switch (prefix_mode)
525 {
526 case GE_NO_PREFIX:
527 break;
528 case GE_OPT_PREFIX:
529 if (is_immediate_prefix (**str))
530 {
531 (*str)++;
532 prefix_present_p = 1;
533 }
534 break;
535 default:
536 abort ();
537 }
538
539 memset (ep, 0, sizeof (expressionS));
540
541 save_in = input_line_pointer;
542 input_line_pointer = *str;
543 in_my_get_expression_p = TRUE;
544 seg = expression (ep);
545 in_my_get_expression_p = FALSE;
546
547 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
548 {
549 /* We found a bad expression in md_operand(). */
550 *str = input_line_pointer;
551 input_line_pointer = save_in;
552 if (prefix_present_p && ! error_p ())
553 set_fatal_syntax_error (_("bad expression"));
554 else
555 set_first_syntax_error (_("bad expression"));
556 return FALSE;
557 }
558
559 #ifdef OBJ_AOUT
560 if (seg != absolute_section
561 && seg != text_section
562 && seg != data_section
563 && seg != bss_section && seg != undefined_section)
564 {
565 set_syntax_error (_("bad segment"));
566 *str = input_line_pointer;
567 input_line_pointer = save_in;
568 return FALSE;
569 }
570 #else
571 (void) seg;
572 #endif
573
574 /* Get rid of any bignums now, so that we don't generate an error for which
575 we can't establish a line number later on. Big numbers are never valid
576 in instructions, which is where this routine is always called. */
577 if (ep->X_op == O_big
578 || (ep->X_add_symbol
579 && (exp_has_bignum_p (ep->X_add_symbol)
580 || (ep->X_op_symbol && exp_has_bignum_p (ep->X_op_symbol)))))
581 {
582 if (prefix_present_p && error_p ())
583 set_fatal_syntax_error (_("invalid constant"));
584 else
585 set_first_syntax_error (_("invalid constant"));
586 *str = input_line_pointer;
587 input_line_pointer = save_in;
588 return FALSE;
589 }
590
591 *str = input_line_pointer;
592 input_line_pointer = save_in;
593 return TRUE;
594 }
595
596 /* Turn a string in input_line_pointer into a floating point constant
597 of type TYPE, and store the appropriate bytes in *LITP. The number
598 of LITTLENUMS emitted is stored in *SIZEP. An error message is
599 returned, or NULL on OK. */
600
601 char *
602 md_atof (int type, char *litP, int *sizeP)
603 {
604 return ieee_md_atof (type, litP, sizeP, target_big_endian);
605 }
606
607 /* We handle all bad expressions here, so that we can report the faulty
608 instruction in the error message. */
609 void
610 md_operand (expressionS * exp)
611 {
612 if (in_my_get_expression_p)
613 exp->X_op = O_illegal;
614 }
615
616 /* Immediate values. */
617
618 /* Errors may be set multiple times during parsing or bit encoding
619 (particularly in the Neon bits), but usually the earliest error which is set
620 will be the most meaningful. Avoid overwriting it with later (cascading)
621 errors by calling this function. */
622
623 static void
624 first_error (const char *error)
625 {
626 if (! error_p ())
627 set_syntax_error (error);
628 }
629
630 /* Similiar to first_error, but this function accepts formatted error
631 message. */
632 static void
633 first_error_fmt (const char *format, ...)
634 {
635 va_list args;
636 enum
637 { size = 100 };
638 /* N.B. this single buffer will not cause error messages for different
639 instructions to pollute each other; this is because at the end of
640 processing of each assembly line, error message if any will be
641 collected by as_bad. */
642 static char buffer[size];
643
644 if (! error_p ())
645 {
646 int ret;
647 va_start (args, format);
648 ret = vsnprintf (buffer, size, format, args);
649 know (ret <= size - 1 && ret >= 0);
650 va_end (args);
651 set_syntax_error (buffer);
652 }
653 }
654
655 /* Register parsing. */
656
657 /* Generic register parser which is called by other specialized
658 register parsers.
659 CCP points to what should be the beginning of a register name.
660 If it is indeed a valid register name, advance CCP over it and
661 return the reg_entry structure; otherwise return NULL.
662 It does not issue diagnostics. */
663
664 static reg_entry *
665 parse_reg (char **ccp)
666 {
667 char *start = *ccp;
668 char *p;
669 reg_entry *reg;
670
671 #ifdef REGISTER_PREFIX
672 if (*start != REGISTER_PREFIX)
673 return NULL;
674 start++;
675 #endif
676
677 p = start;
678 if (!ISALPHA (*p) || !is_name_beginner (*p))
679 return NULL;
680
681 do
682 p++;
683 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
684
685 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
686
687 if (!reg)
688 return NULL;
689
690 *ccp = p;
691 return reg;
692 }
693
694 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
695 return FALSE. */
696 static bfd_boolean
697 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
698 {
699 if (reg->type == type)
700 return TRUE;
701
702 switch (type)
703 {
704 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
705 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
706 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
707 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
708 case REG_TYPE_VN: /* Vector register. */
709 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
710 return ((reg_type_masks[reg->type] & reg_type_masks[type])
711 == reg_type_masks[reg->type]);
712 default:
713 as_fatal ("unhandled type %d", type);
714 abort ();
715 }
716 }
717
718 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
719 Return the register number otherwise. *ISREG32 is set to one if the
720 register is 32-bit wide; *ISREGZERO is set to one if the register is
721 of type Z_32 or Z_64.
722 Note that this function does not issue any diagnostics. */
723
724 static int
725 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
726 int *isreg32, int *isregzero)
727 {
728 char *str = *ccp;
729 const reg_entry *reg = parse_reg (&str);
730
731 if (reg == NULL)
732 return PARSE_FAIL;
733
734 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
735 return PARSE_FAIL;
736
737 switch (reg->type)
738 {
739 case REG_TYPE_SP_32:
740 case REG_TYPE_SP_64:
741 if (reject_sp)
742 return PARSE_FAIL;
743 *isreg32 = reg->type == REG_TYPE_SP_32;
744 *isregzero = 0;
745 break;
746 case REG_TYPE_R_32:
747 case REG_TYPE_R_64:
748 *isreg32 = reg->type == REG_TYPE_R_32;
749 *isregzero = 0;
750 break;
751 case REG_TYPE_Z_32:
752 case REG_TYPE_Z_64:
753 if (reject_rz)
754 return PARSE_FAIL;
755 *isreg32 = reg->type == REG_TYPE_Z_32;
756 *isregzero = 1;
757 break;
758 default:
759 return PARSE_FAIL;
760 }
761
762 *ccp = str;
763
764 return reg->number;
765 }
766
767 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
768 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
769 otherwise return FALSE.
770
771 Accept only one occurrence of:
772 8b 16b 4h 8h 2s 4s 1d 2d
773 b h s d q */
774 static bfd_boolean
775 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
776 {
777 char *ptr = *str;
778 unsigned width;
779 unsigned element_size;
780 enum neon_el_type type;
781
782 /* skip '.' */
783 ptr++;
784
785 if (!ISDIGIT (*ptr))
786 {
787 width = 0;
788 goto elt_size;
789 }
790 width = strtoul (ptr, &ptr, 10);
791 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
792 {
793 first_error_fmt (_("bad size %d in vector width specifier"), width);
794 return FALSE;
795 }
796
797 elt_size:
798 switch (TOLOWER (*ptr))
799 {
800 case 'b':
801 type = NT_b;
802 element_size = 8;
803 break;
804 case 'h':
805 type = NT_h;
806 element_size = 16;
807 break;
808 case 's':
809 type = NT_s;
810 element_size = 32;
811 break;
812 case 'd':
813 type = NT_d;
814 element_size = 64;
815 break;
816 case 'q':
817 if (width == 1)
818 {
819 type = NT_q;
820 element_size = 128;
821 break;
822 }
823 /* fall through. */
824 default:
825 if (*ptr != '\0')
826 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
827 else
828 first_error (_("missing element size"));
829 return FALSE;
830 }
831 if (width != 0 && width * element_size != 64 && width * element_size != 128)
832 {
833 first_error_fmt (_
834 ("invalid element size %d and vector size combination %c"),
835 width, *ptr);
836 return FALSE;
837 }
838 ptr++;
839
840 parsed_type->type = type;
841 parsed_type->width = width;
842
843 *str = ptr;
844
845 return TRUE;
846 }
847
848 /* Parse a single type, e.g. ".8b", leading period included.
849 Only applicable to Vn registers.
850
851 Return TRUE on success; otherwise return FALSE. */
852 static bfd_boolean
853 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
854 {
855 char *str = *ccp;
856
857 if (*str == '.')
858 {
859 if (! parse_neon_type_for_operand (vectype, &str))
860 {
861 first_error (_("vector type expected"));
862 return FALSE;
863 }
864 }
865 else
866 return FALSE;
867
868 *ccp = str;
869
870 return TRUE;
871 }
872
873 /* Parse a register of the type TYPE.
874
875 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
876 name or the parsed register is not of TYPE.
877
878 Otherwise return the register number, and optionally fill in the actual
879 type of the register in *RTYPE when multiple alternatives were given, and
880 return the register shape and element index information in *TYPEINFO.
881
882 IN_REG_LIST should be set with TRUE if the caller is parsing a register
883 list. */
884
885 static int
886 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
887 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
888 {
889 char *str = *ccp;
890 const reg_entry *reg = parse_reg (&str);
891 struct neon_type_el atype;
892 struct neon_type_el parsetype;
893 bfd_boolean is_typed_vecreg = FALSE;
894
895 atype.defined = 0;
896 atype.type = NT_invtype;
897 atype.width = -1;
898 atype.index = 0;
899
900 if (reg == NULL)
901 {
902 if (typeinfo)
903 *typeinfo = atype;
904 set_default_error ();
905 return PARSE_FAIL;
906 }
907
908 if (! aarch64_check_reg_type (reg, type))
909 {
910 DEBUG_TRACE ("reg type check failed");
911 set_default_error ();
912 return PARSE_FAIL;
913 }
914 type = reg->type;
915
916 if (type == REG_TYPE_VN
917 && parse_neon_operand_type (&parsetype, &str))
918 {
919 /* Register if of the form Vn.[bhsdq]. */
920 is_typed_vecreg = TRUE;
921
922 if (parsetype.width == 0)
923 /* Expect index. In the new scheme we cannot have
924 Vn.[bhsdq] represent a scalar. Therefore any
925 Vn.[bhsdq] should have an index following it.
926 Except in reglists ofcourse. */
927 atype.defined |= NTA_HASINDEX;
928 else
929 atype.defined |= NTA_HASTYPE;
930
931 atype.type = parsetype.type;
932 atype.width = parsetype.width;
933 }
934
935 if (skip_past_char (&str, '['))
936 {
937 expressionS exp;
938
939 /* Reject Sn[index] syntax. */
940 if (!is_typed_vecreg)
941 {
942 first_error (_("this type of register can't be indexed"));
943 return PARSE_FAIL;
944 }
945
946 if (in_reg_list == TRUE)
947 {
948 first_error (_("index not allowed inside register list"));
949 return PARSE_FAIL;
950 }
951
952 atype.defined |= NTA_HASINDEX;
953
954 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
955
956 if (exp.X_op != O_constant)
957 {
958 first_error (_("constant expression required"));
959 return PARSE_FAIL;
960 }
961
962 if (! skip_past_char (&str, ']'))
963 return PARSE_FAIL;
964
965 atype.index = exp.X_add_number;
966 }
967 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
968 {
969 /* Indexed vector register expected. */
970 first_error (_("indexed vector register expected"));
971 return PARSE_FAIL;
972 }
973
974 /* A vector reg Vn should be typed or indexed. */
975 if (type == REG_TYPE_VN && atype.defined == 0)
976 {
977 first_error (_("invalid use of vector register"));
978 }
979
980 if (typeinfo)
981 *typeinfo = atype;
982
983 if (rtype)
984 *rtype = type;
985
986 *ccp = str;
987
988 return reg->number;
989 }
990
991 /* Parse register.
992
993 Return the register number on success; return PARSE_FAIL otherwise.
994
995 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
996 the register (e.g. NEON double or quad reg when either has been requested).
997
998 If this is a NEON vector register with additional type information, fill
999 in the struct pointed to by VECTYPE (if non-NULL).
1000
1001 This parser does not handle register list. */
1002
1003 static int
1004 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1005 aarch64_reg_type *rtype, struct neon_type_el *vectype)
1006 {
1007 struct neon_type_el atype;
1008 char *str = *ccp;
1009 int reg = parse_typed_reg (&str, type, rtype, &atype,
1010 /*in_reg_list= */ FALSE);
1011
1012 if (reg == PARSE_FAIL)
1013 return PARSE_FAIL;
1014
1015 if (vectype)
1016 *vectype = atype;
1017
1018 *ccp = str;
1019
1020 return reg;
1021 }
1022
1023 static inline bfd_boolean
1024 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
1025 {
1026 return
1027 e1.type == e2.type
1028 && e1.defined == e2.defined
1029 && e1.width == e2.width && e1.index == e2.index;
1030 }
1031
1032 /* This function parses the NEON register list. On success, it returns
1033 the parsed register list information in the following encoded format:
1034
1035 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1036 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1037
1038 The information of the register shape and/or index is returned in
1039 *VECTYPE.
1040
1041 It returns PARSE_FAIL if the register list is invalid.
1042
1043 The list contains one to four registers.
1044 Each register can be one of:
1045 <Vt>.<T>[<index>]
1046 <Vt>.<T>
1047 All <T> should be identical.
1048 All <index> should be identical.
1049 There are restrictions on <Vt> numbers which are checked later
1050 (by reg_list_valid_p). */
1051
1052 static int
1053 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1054 {
1055 char *str = *ccp;
1056 int nb_regs;
1057 struct neon_type_el typeinfo, typeinfo_first;
1058 int val, val_range;
1059 int in_range;
1060 int ret_val;
1061 int i;
1062 bfd_boolean error = FALSE;
1063 bfd_boolean expect_index = FALSE;
1064
1065 if (*str != '{')
1066 {
1067 set_syntax_error (_("expecting {"));
1068 return PARSE_FAIL;
1069 }
1070 str++;
1071
1072 nb_regs = 0;
1073 typeinfo_first.defined = 0;
1074 typeinfo_first.type = NT_invtype;
1075 typeinfo_first.width = -1;
1076 typeinfo_first.index = 0;
1077 ret_val = 0;
1078 val = -1;
1079 val_range = -1;
1080 in_range = 0;
1081 do
1082 {
1083 if (in_range)
1084 {
1085 str++; /* skip over '-' */
1086 val_range = val;
1087 }
1088 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1089 /*in_reg_list= */ TRUE);
1090 if (val == PARSE_FAIL)
1091 {
1092 set_first_syntax_error (_("invalid vector register in list"));
1093 error = TRUE;
1094 continue;
1095 }
1096 /* reject [bhsd]n */
1097 if (typeinfo.defined == 0)
1098 {
1099 set_first_syntax_error (_("invalid scalar register in list"));
1100 error = TRUE;
1101 continue;
1102 }
1103
1104 if (typeinfo.defined & NTA_HASINDEX)
1105 expect_index = TRUE;
1106
1107 if (in_range)
1108 {
1109 if (val < val_range)
1110 {
1111 set_first_syntax_error
1112 (_("invalid range in vector register list"));
1113 error = TRUE;
1114 }
1115 val_range++;
1116 }
1117 else
1118 {
1119 val_range = val;
1120 if (nb_regs == 0)
1121 typeinfo_first = typeinfo;
1122 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1123 {
1124 set_first_syntax_error
1125 (_("type mismatch in vector register list"));
1126 error = TRUE;
1127 }
1128 }
1129 if (! error)
1130 for (i = val_range; i <= val; i++)
1131 {
1132 ret_val |= i << (5 * nb_regs);
1133 nb_regs++;
1134 }
1135 in_range = 0;
1136 }
1137 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1138
1139 skip_whitespace (str);
1140 if (*str != '}')
1141 {
1142 set_first_syntax_error (_("end of vector register list not found"));
1143 error = TRUE;
1144 }
1145 str++;
1146
1147 skip_whitespace (str);
1148
1149 if (expect_index)
1150 {
1151 if (skip_past_char (&str, '['))
1152 {
1153 expressionS exp;
1154
1155 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1156 if (exp.X_op != O_constant)
1157 {
1158 set_first_syntax_error (_("constant expression required."));
1159 error = TRUE;
1160 }
1161 if (! skip_past_char (&str, ']'))
1162 error = TRUE;
1163 else
1164 typeinfo_first.index = exp.X_add_number;
1165 }
1166 else
1167 {
1168 set_first_syntax_error (_("expected index"));
1169 error = TRUE;
1170 }
1171 }
1172
1173 if (nb_regs > 4)
1174 {
1175 set_first_syntax_error (_("too many registers in vector register list"));
1176 error = TRUE;
1177 }
1178 else if (nb_regs == 0)
1179 {
1180 set_first_syntax_error (_("empty vector register list"));
1181 error = TRUE;
1182 }
1183
1184 *ccp = str;
1185 if (! error)
1186 *vectype = typeinfo_first;
1187
1188 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1189 }
1190
1191 /* Directives: register aliases. */
1192
1193 static reg_entry *
1194 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1195 {
1196 reg_entry *new;
1197 const char *name;
1198
1199 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1200 {
1201 if (new->builtin)
1202 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1203 str);
1204
1205 /* Only warn about a redefinition if it's not defined as the
1206 same register. */
1207 else if (new->number != number || new->type != type)
1208 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1209
1210 return NULL;
1211 }
1212
1213 name = xstrdup (str);
1214 new = xmalloc (sizeof (reg_entry));
1215
1216 new->name = name;
1217 new->number = number;
1218 new->type = type;
1219 new->builtin = FALSE;
1220
1221 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1222 abort ();
1223
1224 return new;
1225 }
1226
1227 /* Look for the .req directive. This is of the form:
1228
1229 new_register_name .req existing_register_name
1230
1231 If we find one, or if it looks sufficiently like one that we want to
1232 handle any error here, return TRUE. Otherwise return FALSE. */
1233
1234 static bfd_boolean
1235 create_register_alias (char *newname, char *p)
1236 {
1237 const reg_entry *old;
1238 char *oldname, *nbuf;
1239 size_t nlen;
1240
1241 /* The input scrubber ensures that whitespace after the mnemonic is
1242 collapsed to single spaces. */
1243 oldname = p;
1244 if (strncmp (oldname, " .req ", 6) != 0)
1245 return FALSE;
1246
1247 oldname += 6;
1248 if (*oldname == '\0')
1249 return FALSE;
1250
1251 old = hash_find (aarch64_reg_hsh, oldname);
1252 if (!old)
1253 {
1254 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1255 return TRUE;
1256 }
1257
1258 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1259 the desired alias name, and p points to its end. If not, then
1260 the desired alias name is in the global original_case_string. */
1261 #ifdef TC_CASE_SENSITIVE
1262 nlen = p - newname;
1263 #else
1264 newname = original_case_string;
1265 nlen = strlen (newname);
1266 #endif
1267
1268 nbuf = alloca (nlen + 1);
1269 memcpy (nbuf, newname, nlen);
1270 nbuf[nlen] = '\0';
1271
1272 /* Create aliases under the new name as stated; an all-lowercase
1273 version of the new name; and an all-uppercase version of the new
1274 name. */
1275 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1276 {
1277 for (p = nbuf; *p; p++)
1278 *p = TOUPPER (*p);
1279
1280 if (strncmp (nbuf, newname, nlen))
1281 {
1282 /* If this attempt to create an additional alias fails, do not bother
1283 trying to create the all-lower case alias. We will fail and issue
1284 a second, duplicate error message. This situation arises when the
1285 programmer does something like:
1286 foo .req r0
1287 Foo .req r1
1288 The second .req creates the "Foo" alias but then fails to create
1289 the artificial FOO alias because it has already been created by the
1290 first .req. */
1291 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1292 return TRUE;
1293 }
1294
1295 for (p = nbuf; *p; p++)
1296 *p = TOLOWER (*p);
1297
1298 if (strncmp (nbuf, newname, nlen))
1299 insert_reg_alias (nbuf, old->number, old->type);
1300 }
1301
1302 return TRUE;
1303 }
1304
1305 /* Should never be called, as .req goes between the alias and the
1306 register name, not at the beginning of the line. */
1307 static void
1308 s_req (int a ATTRIBUTE_UNUSED)
1309 {
1310 as_bad (_("invalid syntax for .req directive"));
1311 }
1312
1313 /* The .unreq directive deletes an alias which was previously defined
1314 by .req. For example:
1315
1316 my_alias .req r11
1317 .unreq my_alias */
1318
1319 static void
1320 s_unreq (int a ATTRIBUTE_UNUSED)
1321 {
1322 char *name;
1323 char saved_char;
1324
1325 name = input_line_pointer;
1326
1327 while (*input_line_pointer != 0
1328 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1329 ++input_line_pointer;
1330
1331 saved_char = *input_line_pointer;
1332 *input_line_pointer = 0;
1333
1334 if (!*name)
1335 as_bad (_("invalid syntax for .unreq directive"));
1336 else
1337 {
1338 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1339
1340 if (!reg)
1341 as_bad (_("unknown register alias '%s'"), name);
1342 else if (reg->builtin)
1343 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1344 name);
1345 else
1346 {
1347 char *p;
1348 char *nbuf;
1349
1350 hash_delete (aarch64_reg_hsh, name, FALSE);
1351 free ((char *) reg->name);
1352 free (reg);
1353
1354 /* Also locate the all upper case and all lower case versions.
1355 Do not complain if we cannot find one or the other as it
1356 was probably deleted above. */
1357
1358 nbuf = strdup (name);
1359 for (p = nbuf; *p; p++)
1360 *p = TOUPPER (*p);
1361 reg = hash_find (aarch64_reg_hsh, nbuf);
1362 if (reg)
1363 {
1364 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1365 free ((char *) reg->name);
1366 free (reg);
1367 }
1368
1369 for (p = nbuf; *p; p++)
1370 *p = TOLOWER (*p);
1371 reg = hash_find (aarch64_reg_hsh, nbuf);
1372 if (reg)
1373 {
1374 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1375 free ((char *) reg->name);
1376 free (reg);
1377 }
1378
1379 free (nbuf);
1380 }
1381 }
1382
1383 *input_line_pointer = saved_char;
1384 demand_empty_rest_of_line ();
1385 }
1386
1387 /* Directives: Instruction set selection. */
1388
1389 #ifdef OBJ_ELF
1390 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1391 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1392 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1393 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1394
1395 /* Create a new mapping symbol for the transition to STATE. */
1396
1397 static void
1398 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1399 {
1400 symbolS *symbolP;
1401 const char *symname;
1402 int type;
1403
1404 switch (state)
1405 {
1406 case MAP_DATA:
1407 symname = "$d";
1408 type = BSF_NO_FLAGS;
1409 break;
1410 case MAP_INSN:
1411 symname = "$x";
1412 type = BSF_NO_FLAGS;
1413 break;
1414 default:
1415 abort ();
1416 }
1417
1418 symbolP = symbol_new (symname, now_seg, value, frag);
1419 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1420
1421 /* Save the mapping symbols for future reference. Also check that
1422 we do not place two mapping symbols at the same offset within a
1423 frag. We'll handle overlap between frags in
1424 check_mapping_symbols.
1425
1426 If .fill or other data filling directive generates zero sized data,
1427 the mapping symbol for the following code will have the same value
1428 as the one generated for the data filling directive. In this case,
1429 we replace the old symbol with the new one at the same address. */
1430 if (value == 0)
1431 {
1432 if (frag->tc_frag_data.first_map != NULL)
1433 {
1434 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1435 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1436 &symbol_lastP);
1437 }
1438 frag->tc_frag_data.first_map = symbolP;
1439 }
1440 if (frag->tc_frag_data.last_map != NULL)
1441 {
1442 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1443 S_GET_VALUE (symbolP));
1444 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1445 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1446 &symbol_lastP);
1447 }
1448 frag->tc_frag_data.last_map = symbolP;
1449 }
1450
1451 /* We must sometimes convert a region marked as code to data during
1452 code alignment, if an odd number of bytes have to be padded. The
1453 code mapping symbol is pushed to an aligned address. */
1454
1455 static void
1456 insert_data_mapping_symbol (enum mstate state,
1457 valueT value, fragS * frag, offsetT bytes)
1458 {
1459 /* If there was already a mapping symbol, remove it. */
1460 if (frag->tc_frag_data.last_map != NULL
1461 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1462 frag->fr_address + value)
1463 {
1464 symbolS *symp = frag->tc_frag_data.last_map;
1465
1466 if (value == 0)
1467 {
1468 know (frag->tc_frag_data.first_map == symp);
1469 frag->tc_frag_data.first_map = NULL;
1470 }
1471 frag->tc_frag_data.last_map = NULL;
1472 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1473 }
1474
1475 make_mapping_symbol (MAP_DATA, value, frag);
1476 make_mapping_symbol (state, value + bytes, frag);
1477 }
1478
1479 static void mapping_state_2 (enum mstate state, int max_chars);
1480
1481 /* Set the mapping state to STATE. Only call this when about to
1482 emit some STATE bytes to the file. */
1483
1484 void
1485 mapping_state (enum mstate state)
1486 {
1487 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1488
1489 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1490
1491 if (mapstate == state)
1492 /* The mapping symbol has already been emitted.
1493 There is nothing else to do. */
1494 return;
1495 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1496 /* This case will be evaluated later in the next else. */
1497 return;
1498 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1499 {
1500 /* Only add the symbol if the offset is > 0:
1501 if we're at the first frag, check it's size > 0;
1502 if we're not at the first frag, then for sure
1503 the offset is > 0. */
1504 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1505 const int add_symbol = (frag_now != frag_first)
1506 || (frag_now_fix () > 0);
1507
1508 if (add_symbol)
1509 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1510 }
1511
1512 mapping_state_2 (state, 0);
1513 #undef TRANSITION
1514 }
1515
1516 /* Same as mapping_state, but MAX_CHARS bytes have already been
1517 allocated. Put the mapping symbol that far back. */
1518
1519 static void
1520 mapping_state_2 (enum mstate state, int max_chars)
1521 {
1522 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1523
1524 if (!SEG_NORMAL (now_seg))
1525 return;
1526
1527 if (mapstate == state)
1528 /* The mapping symbol has already been emitted.
1529 There is nothing else to do. */
1530 return;
1531
1532 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1533 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1534 }
1535 #else
1536 #define mapping_state(x) /* nothing */
1537 #define mapping_state_2(x, y) /* nothing */
1538 #endif
1539
1540 /* Directives: sectioning and alignment. */
1541
1542 static void
1543 s_bss (int ignore ATTRIBUTE_UNUSED)
1544 {
1545 /* We don't support putting frags in the BSS segment, we fake it by
1546 marking in_bss, then looking at s_skip for clues. */
1547 subseg_set (bss_section, 0);
1548 demand_empty_rest_of_line ();
1549 mapping_state (MAP_DATA);
1550 }
1551
1552 static void
1553 s_even (int ignore ATTRIBUTE_UNUSED)
1554 {
1555 /* Never make frag if expect extra pass. */
1556 if (!need_pass_2)
1557 frag_align (1, 0, 0);
1558
1559 record_alignment (now_seg, 1);
1560
1561 demand_empty_rest_of_line ();
1562 }
1563
1564 /* Directives: Literal pools. */
1565
1566 static literal_pool *
1567 find_literal_pool (int size)
1568 {
1569 literal_pool *pool;
1570
1571 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1572 {
1573 if (pool->section == now_seg
1574 && pool->sub_section == now_subseg && pool->size == size)
1575 break;
1576 }
1577
1578 return pool;
1579 }
1580
1581 static literal_pool *
1582 find_or_make_literal_pool (int size)
1583 {
1584 /* Next literal pool ID number. */
1585 static unsigned int latest_pool_num = 1;
1586 literal_pool *pool;
1587
1588 pool = find_literal_pool (size);
1589
1590 if (pool == NULL)
1591 {
1592 /* Create a new pool. */
1593 pool = xmalloc (sizeof (*pool));
1594 if (!pool)
1595 return NULL;
1596
1597 /* Currently we always put the literal pool in the current text
1598 section. If we were generating "small" model code where we
1599 knew that all code and initialised data was within 1MB then
1600 we could output literals to mergeable, read-only data
1601 sections. */
1602
1603 pool->next_free_entry = 0;
1604 pool->section = now_seg;
1605 pool->sub_section = now_subseg;
1606 pool->size = size;
1607 pool->next = list_of_pools;
1608 pool->symbol = NULL;
1609
1610 /* Add it to the list. */
1611 list_of_pools = pool;
1612 }
1613
1614 /* New pools, and emptied pools, will have a NULL symbol. */
1615 if (pool->symbol == NULL)
1616 {
1617 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1618 (valueT) 0, &zero_address_frag);
1619 pool->id = latest_pool_num++;
1620 }
1621
1622 /* Done. */
1623 return pool;
1624 }
1625
1626 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1627 Return TRUE on success, otherwise return FALSE. */
1628 static bfd_boolean
1629 add_to_lit_pool (expressionS *exp, int size)
1630 {
1631 literal_pool *pool;
1632 unsigned int entry;
1633
1634 pool = find_or_make_literal_pool (size);
1635
1636 /* Check if this literal value is already in the pool. */
1637 for (entry = 0; entry < pool->next_free_entry; entry++)
1638 {
1639 if ((pool->literals[entry].X_op == exp->X_op)
1640 && (exp->X_op == O_constant)
1641 && (pool->literals[entry].X_add_number == exp->X_add_number)
1642 && (pool->literals[entry].X_unsigned == exp->X_unsigned))
1643 break;
1644
1645 if ((pool->literals[entry].X_op == exp->X_op)
1646 && (exp->X_op == O_symbol)
1647 && (pool->literals[entry].X_add_number == exp->X_add_number)
1648 && (pool->literals[entry].X_add_symbol == exp->X_add_symbol)
1649 && (pool->literals[entry].X_op_symbol == exp->X_op_symbol))
1650 break;
1651 }
1652
1653 /* Do we need to create a new entry? */
1654 if (entry == pool->next_free_entry)
1655 {
1656 if (entry >= MAX_LITERAL_POOL_SIZE)
1657 {
1658 set_syntax_error (_("literal pool overflow"));
1659 return FALSE;
1660 }
1661
1662 pool->literals[entry] = *exp;
1663 pool->next_free_entry += 1;
1664 }
1665
1666 exp->X_op = O_symbol;
1667 exp->X_add_number = ((int) entry) * size;
1668 exp->X_add_symbol = pool->symbol;
1669
1670 return TRUE;
1671 }
1672
1673 /* Can't use symbol_new here, so have to create a symbol and then at
1674 a later date assign it a value. Thats what these functions do. */
1675
1676 static void
1677 symbol_locate (symbolS * symbolP,
1678 const char *name,/* It is copied, the caller can modify. */
1679 segT segment, /* Segment identifier (SEG_<something>). */
1680 valueT valu, /* Symbol value. */
1681 fragS * frag) /* Associated fragment. */
1682 {
1683 unsigned int name_length;
1684 char *preserved_copy_of_name;
1685
1686 name_length = strlen (name) + 1; /* +1 for \0. */
1687 obstack_grow (&notes, name, name_length);
1688 preserved_copy_of_name = obstack_finish (&notes);
1689
1690 #ifdef tc_canonicalize_symbol_name
1691 preserved_copy_of_name =
1692 tc_canonicalize_symbol_name (preserved_copy_of_name);
1693 #endif
1694
1695 S_SET_NAME (symbolP, preserved_copy_of_name);
1696
1697 S_SET_SEGMENT (symbolP, segment);
1698 S_SET_VALUE (symbolP, valu);
1699 symbol_clear_list_pointers (symbolP);
1700
1701 symbol_set_frag (symbolP, frag);
1702
1703 /* Link to end of symbol chain. */
1704 {
1705 extern int symbol_table_frozen;
1706
1707 if (symbol_table_frozen)
1708 abort ();
1709 }
1710
1711 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1712
1713 obj_symbol_new_hook (symbolP);
1714
1715 #ifdef tc_symbol_new_hook
1716 tc_symbol_new_hook (symbolP);
1717 #endif
1718
1719 #ifdef DEBUG_SYMS
1720 verify_symbol_chain (symbol_rootP, symbol_lastP);
1721 #endif /* DEBUG_SYMS */
1722 }
1723
1724
1725 static void
1726 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1727 {
1728 unsigned int entry;
1729 literal_pool *pool;
1730 char sym_name[20];
1731 int align;
1732
1733 for (align = 2; align < 4; align++)
1734 {
1735 int size = 1 << align;
1736
1737 pool = find_literal_pool (size);
1738 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1739 continue;
1740
1741 mapping_state (MAP_DATA);
1742
1743 /* Align pool as you have word accesses.
1744 Only make a frag if we have to. */
1745 if (!need_pass_2)
1746 frag_align (align, 0, 0);
1747
1748 record_alignment (now_seg, align);
1749
1750 sprintf (sym_name, "$$lit_\002%x", pool->id);
1751
1752 symbol_locate (pool->symbol, sym_name, now_seg,
1753 (valueT) frag_now_fix (), frag_now);
1754 symbol_table_insert (pool->symbol);
1755
1756 for (entry = 0; entry < pool->next_free_entry; entry++)
1757 /* First output the expression in the instruction to the pool. */
1758 emit_expr (&(pool->literals[entry]), size); /* .word|.xword */
1759
1760 /* Mark the pool as empty. */
1761 pool->next_free_entry = 0;
1762 pool->symbol = NULL;
1763 }
1764 }
1765
1766 #ifdef OBJ_ELF
1767 /* Forward declarations for functions below, in the MD interface
1768 section. */
1769 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1770 static struct reloc_table_entry * find_reloc_table_entry (char **);
1771
1772 /* Directives: Data. */
1773 /* N.B. the support for relocation suffix in this directive needs to be
1774 implemented properly. */
1775
1776 static void
1777 s_aarch64_elf_cons (int nbytes)
1778 {
1779 expressionS exp;
1780
1781 #ifdef md_flush_pending_output
1782 md_flush_pending_output ();
1783 #endif
1784
1785 if (is_it_end_of_statement ())
1786 {
1787 demand_empty_rest_of_line ();
1788 return;
1789 }
1790
1791 #ifdef md_cons_align
1792 md_cons_align (nbytes);
1793 #endif
1794
1795 mapping_state (MAP_DATA);
1796 do
1797 {
1798 struct reloc_table_entry *reloc;
1799
1800 expression (&exp);
1801
1802 if (exp.X_op != O_symbol)
1803 emit_expr (&exp, (unsigned int) nbytes);
1804 else
1805 {
1806 skip_past_char (&input_line_pointer, '#');
1807 if (skip_past_char (&input_line_pointer, ':'))
1808 {
1809 reloc = find_reloc_table_entry (&input_line_pointer);
1810 if (reloc == NULL)
1811 as_bad (_("unrecognized relocation suffix"));
1812 else
1813 as_bad (_("unimplemented relocation suffix"));
1814 ignore_rest_of_line ();
1815 return;
1816 }
1817 else
1818 emit_expr (&exp, (unsigned int) nbytes);
1819 }
1820 }
1821 while (*input_line_pointer++ == ',');
1822
1823 /* Put terminator back into stream. */
1824 input_line_pointer--;
1825 demand_empty_rest_of_line ();
1826 }
1827
1828 #endif /* OBJ_ELF */
1829
1830 /* Output a 32-bit word, but mark as an instruction. */
1831
1832 static void
1833 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1834 {
1835 expressionS exp;
1836
1837 #ifdef md_flush_pending_output
1838 md_flush_pending_output ();
1839 #endif
1840
1841 if (is_it_end_of_statement ())
1842 {
1843 demand_empty_rest_of_line ();
1844 return;
1845 }
1846
1847 if (!need_pass_2)
1848 frag_align_code (2, 0);
1849 #ifdef OBJ_ELF
1850 mapping_state (MAP_INSN);
1851 #endif
1852
1853 do
1854 {
1855 expression (&exp);
1856 if (exp.X_op != O_constant)
1857 {
1858 as_bad (_("constant expression required"));
1859 ignore_rest_of_line ();
1860 return;
1861 }
1862
1863 if (target_big_endian)
1864 {
1865 unsigned int val = exp.X_add_number;
1866 exp.X_add_number = SWAP_32 (val);
1867 }
1868 emit_expr (&exp, 4);
1869 }
1870 while (*input_line_pointer++ == ',');
1871
1872 /* Put terminator back into stream. */
1873 input_line_pointer--;
1874 demand_empty_rest_of_line ();
1875 }
1876
1877 #ifdef OBJ_ELF
1878 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1879
1880 static void
1881 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1882 {
1883 expressionS exp;
1884
1885 /* Since we're just labelling the code, there's no need to define a
1886 mapping symbol. */
1887 expression (&exp);
1888 /* Make sure there is enough room in this frag for the following
1889 blr. This trick only works if the blr follows immediately after
1890 the .tlsdesc directive. */
1891 frag_grow (4);
1892 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1893 BFD_RELOC_AARCH64_TLSDESC_CALL);
1894
1895 demand_empty_rest_of_line ();
1896 }
1897 #endif /* OBJ_ELF */
1898
1899 static void s_aarch64_arch (int);
1900 static void s_aarch64_cpu (int);
1901
1902 /* This table describes all the machine specific pseudo-ops the assembler
1903 has to support. The fields are:
1904 pseudo-op name without dot
1905 function to call to execute this pseudo-op
1906 Integer arg to pass to the function. */
1907
1908 const pseudo_typeS md_pseudo_table[] = {
1909 /* Never called because '.req' does not start a line. */
1910 {"req", s_req, 0},
1911 {"unreq", s_unreq, 0},
1912 {"bss", s_bss, 0},
1913 {"even", s_even, 0},
1914 {"ltorg", s_ltorg, 0},
1915 {"pool", s_ltorg, 0},
1916 {"cpu", s_aarch64_cpu, 0},
1917 {"arch", s_aarch64_arch, 0},
1918 {"inst", s_aarch64_inst, 0},
1919 #ifdef OBJ_ELF
1920 {"tlsdesccall", s_tlsdesccall, 0},
1921 {"word", s_aarch64_elf_cons, 4},
1922 {"long", s_aarch64_elf_cons, 4},
1923 {"xword", s_aarch64_elf_cons, 8},
1924 {"dword", s_aarch64_elf_cons, 8},
1925 #endif
1926 {0, 0, 0}
1927 };
1928 \f
1929
1930 /* Check whether STR points to a register name followed by a comma or the
1931 end of line; REG_TYPE indicates which register types are checked
1932 against. Return TRUE if STR is such a register name; otherwise return
1933 FALSE. The function does not intend to produce any diagnostics, but since
1934 the register parser aarch64_reg_parse, which is called by this function,
1935 does produce diagnostics, we call clear_error to clear any diagnostics
1936 that may be generated by aarch64_reg_parse.
1937 Also, the function returns FALSE directly if there is any user error
1938 present at the function entry. This prevents the existing diagnostics
1939 state from being spoiled.
1940 The function currently serves parse_constant_immediate and
1941 parse_big_immediate only. */
1942 static bfd_boolean
1943 reg_name_p (char *str, aarch64_reg_type reg_type)
1944 {
1945 int reg;
1946
1947 /* Prevent the diagnostics state from being spoiled. */
1948 if (error_p ())
1949 return FALSE;
1950
1951 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1952
1953 /* Clear the parsing error that may be set by the reg parser. */
1954 clear_error ();
1955
1956 if (reg == PARSE_FAIL)
1957 return FALSE;
1958
1959 skip_whitespace (str);
1960 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1961 return TRUE;
1962
1963 return FALSE;
1964 }
1965
1966 /* Parser functions used exclusively in instruction operands. */
1967
1968 /* Parse an immediate expression which may not be constant.
1969
1970 To prevent the expression parser from pushing a register name
1971 into the symbol table as an undefined symbol, firstly a check is
1972 done to find out whether STR is a valid register name followed
1973 by a comma or the end of line. Return FALSE if STR is such a
1974 string. */
1975
1976 static bfd_boolean
1977 parse_immediate_expression (char **str, expressionS *exp)
1978 {
1979 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1980 {
1981 set_recoverable_error (_("immediate operand required"));
1982 return FALSE;
1983 }
1984
1985 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
1986
1987 if (exp->X_op == O_absent)
1988 {
1989 set_fatal_syntax_error (_("missing immediate expression"));
1990 return FALSE;
1991 }
1992
1993 return TRUE;
1994 }
1995
1996 /* Constant immediate-value read function for use in insn parsing.
1997 STR points to the beginning of the immediate (with the optional
1998 leading #); *VAL receives the value.
1999
2000 Return TRUE on success; otherwise return FALSE. */
2001
2002 static bfd_boolean
2003 parse_constant_immediate (char **str, int64_t * val)
2004 {
2005 expressionS exp;
2006
2007 if (! parse_immediate_expression (str, &exp))
2008 return FALSE;
2009
2010 if (exp.X_op != O_constant)
2011 {
2012 set_syntax_error (_("constant expression required"));
2013 return FALSE;
2014 }
2015
2016 *val = exp.X_add_number;
2017 return TRUE;
2018 }
2019
2020 static uint32_t
2021 encode_imm_float_bits (uint32_t imm)
2022 {
2023 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2024 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2025 }
2026
2027 /* Return TRUE if IMM is a valid floating-point immediate; return FALSE
2028 otherwise. */
2029 static bfd_boolean
2030 aarch64_imm_float_p (uint32_t imm)
2031 {
2032 /* 3 32222222 2221111111111
2033 1 09876543 21098765432109876543210
2034 n Eeeeeexx xxxx0000000000000000000 */
2035 uint32_t e;
2036
2037 e = (imm >> 30) & 0x1;
2038 if (e == 0)
2039 e = 0x3e000000;
2040 else
2041 e = 0x40000000;
2042 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0 */
2043 && ((imm & 0x7e000000) == e); /* bits 25-29 = ~ bit 30 */
2044 }
2045
2046 /* Note: this accepts the floating-point 0 constant. */
2047 static bfd_boolean
2048 parse_aarch64_imm_float (char **ccp, int *immed)
2049 {
2050 char *str = *ccp;
2051 char *fpnum;
2052 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2053 int found_fpchar = 0;
2054
2055 skip_past_char (&str, '#');
2056
2057 /* We must not accidentally parse an integer as a floating-point number. Make
2058 sure that the value we parse is not an integer by checking for special
2059 characters '.' or 'e'.
2060 FIXME: This is a hack that is not very efficient, but doing better is
2061 tricky because type information isn't in a very usable state at parse
2062 time. */
2063 fpnum = str;
2064 skip_whitespace (fpnum);
2065
2066 if (strncmp (fpnum, "0x", 2) == 0)
2067 return FALSE;
2068 else
2069 {
2070 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2071 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2072 {
2073 found_fpchar = 1;
2074 break;
2075 }
2076
2077 if (!found_fpchar)
2078 return FALSE;
2079 }
2080
2081 if ((str = atof_ieee (str, 's', words)) != NULL)
2082 {
2083 unsigned fpword = 0;
2084 int i;
2085
2086 /* Our FP word must be 32 bits (single-precision FP). */
2087 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2088 {
2089 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2090 fpword |= words[i];
2091 }
2092
2093 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2094 *immed = fpword;
2095 else
2096 goto invalid_fp;
2097
2098 *ccp = str;
2099
2100 return TRUE;
2101 }
2102
2103 invalid_fp:
2104 set_fatal_syntax_error (_("invalid floating-point constant"));
2105 return FALSE;
2106 }
2107
2108 /* Less-generic immediate-value read function with the possibility of loading
2109 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2110 instructions.
2111
2112 To prevent the expression parser from pushing a register name into the
2113 symbol table as an undefined symbol, a check is firstly done to find
2114 out whether STR is a valid register name followed by a comma or the end
2115 of line. Return FALSE if STR is such a register. */
2116
2117 static bfd_boolean
2118 parse_big_immediate (char **str, int64_t *imm)
2119 {
2120 char *ptr = *str;
2121
2122 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2123 {
2124 set_syntax_error (_("immediate operand required"));
2125 return FALSE;
2126 }
2127
2128 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2129
2130 if (inst.reloc.exp.X_op == O_constant)
2131 *imm = inst.reloc.exp.X_add_number;
2132
2133 *str = ptr;
2134
2135 return TRUE;
2136 }
2137
2138 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2139 if NEED_LIBOPCODES is non-zero, the fixup will need
2140 assistance from the libopcodes. */
2141
2142 static inline void
2143 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2144 const aarch64_opnd_info *operand,
2145 int need_libopcodes_p)
2146 {
2147 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2148 reloc->opnd = operand->type;
2149 if (need_libopcodes_p)
2150 reloc->need_libopcodes_p = 1;
2151 };
2152
2153 /* Return TRUE if the instruction needs to be fixed up later internally by
2154 the GAS; otherwise return FALSE. */
2155
2156 static inline bfd_boolean
2157 aarch64_gas_internal_fixup_p (void)
2158 {
2159 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2160 }
2161
2162 /* Assign the immediate value to the relavant field in *OPERAND if
2163 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2164 needs an internal fixup in a later stage.
2165 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2166 IMM.VALUE that may get assigned with the constant. */
2167 static inline void
2168 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2169 aarch64_opnd_info *operand,
2170 int addr_off_p,
2171 int need_libopcodes_p,
2172 int skip_p)
2173 {
2174 if (reloc->exp.X_op == O_constant)
2175 {
2176 if (addr_off_p)
2177 operand->addr.offset.imm = reloc->exp.X_add_number;
2178 else
2179 operand->imm.value = reloc->exp.X_add_number;
2180 reloc->type = BFD_RELOC_UNUSED;
2181 }
2182 else
2183 {
2184 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2185 /* Tell libopcodes to ignore this operand or not. This is helpful
2186 when one of the operands needs to be fixed up later but we need
2187 libopcodes to check the other operands. */
2188 operand->skip = skip_p;
2189 }
2190 }
2191
2192 /* Relocation modifiers. Each entry in the table contains the textual
2193 name for the relocation which may be placed before a symbol used as
2194 a load/store offset, or add immediate. It must be surrounded by a
2195 leading and trailing colon, for example:
2196
2197 ldr x0, [x1, #:rello:varsym]
2198 add x0, x1, #:rello:varsym */
2199
2200 struct reloc_table_entry
2201 {
2202 const char *name;
2203 int pc_rel;
2204 bfd_reloc_code_real_type adrp_type;
2205 bfd_reloc_code_real_type movw_type;
2206 bfd_reloc_code_real_type add_type;
2207 bfd_reloc_code_real_type ldst_type;
2208 };
2209
2210 static struct reloc_table_entry reloc_table[] = {
2211 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2212 {"lo12", 0,
2213 0,
2214 0,
2215 BFD_RELOC_AARCH64_ADD_LO12,
2216 BFD_RELOC_AARCH64_LDST_LO12},
2217
2218 /* Higher 21 bits of pc-relative page offset: ADRP */
2219 {"pg_hi21", 1,
2220 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2221 0,
2222 0,
2223 0},
2224
2225 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2226 {"pg_hi21_nc", 1,
2227 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2228 0,
2229 0,
2230 0},
2231
2232 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2233 {"abs_g0", 0,
2234 0,
2235 BFD_RELOC_AARCH64_MOVW_G0,
2236 0,
2237 0},
2238
2239 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2240 {"abs_g0_s", 0,
2241 0,
2242 BFD_RELOC_AARCH64_MOVW_G0_S,
2243 0,
2244 0},
2245
2246 /* Less significant bits 0-15 of address/value: MOVK, no check */
2247 {"abs_g0_nc", 0,
2248 0,
2249 BFD_RELOC_AARCH64_MOVW_G0_NC,
2250 0,
2251 0},
2252
2253 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2254 {"abs_g1", 0,
2255 0,
2256 BFD_RELOC_AARCH64_MOVW_G1,
2257 0,
2258 0},
2259
2260 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2261 {"abs_g1_s", 0,
2262 0,
2263 BFD_RELOC_AARCH64_MOVW_G1_S,
2264 0,
2265 0},
2266
2267 /* Less significant bits 16-31 of address/value: MOVK, no check */
2268 {"abs_g1_nc", 0,
2269 0,
2270 BFD_RELOC_AARCH64_MOVW_G1_NC,
2271 0,
2272 0},
2273
2274 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2275 {"abs_g2", 0,
2276 0,
2277 BFD_RELOC_AARCH64_MOVW_G2,
2278 0,
2279 0},
2280
2281 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2282 {"abs_g2_s", 0,
2283 0,
2284 BFD_RELOC_AARCH64_MOVW_G2_S,
2285 0,
2286 0},
2287
2288 /* Less significant bits 32-47 of address/value: MOVK, no check */
2289 {"abs_g2_nc", 0,
2290 0,
2291 BFD_RELOC_AARCH64_MOVW_G2_NC,
2292 0,
2293 0},
2294
2295 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2296 {"abs_g3", 0,
2297 0,
2298 BFD_RELOC_AARCH64_MOVW_G3,
2299 0,
2300 0},
2301 /* Get to the page containing GOT entry for a symbol. */
2302 {"got", 1,
2303 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2304 0,
2305 0,
2306 0},
2307 /* 12 bit offset into the page containing GOT entry for that symbol. */
2308 {"got_lo12", 0,
2309 0,
2310 0,
2311 0,
2312 BFD_RELOC_AARCH64_LD64_GOT_LO12_NC},
2313
2314 /* Get to the page containing GOT TLS entry for a symbol */
2315 {"tlsgd", 0,
2316 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2317 0,
2318 0,
2319 0},
2320
2321 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2322 {"tlsgd_lo12", 0,
2323 0,
2324 0,
2325 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2326 0},
2327
2328 /* Get to the page containing GOT TLS entry for a symbol */
2329 {"tlsdesc", 0,
2330 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE,
2331 0,
2332 0,
2333 0},
2334
2335 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2336 {"tlsdesc_lo12", 0,
2337 0,
2338 0,
2339 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2340 BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC},
2341
2342 /* Get to the page containing GOT TLS entry for a symbol */
2343 {"gottprel", 0,
2344 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2345 0,
2346 0,
2347 0},
2348
2349 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2350 {"gottprel_lo12", 0,
2351 0,
2352 0,
2353 0,
2354 BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
2355
2356 /* Get tp offset for a symbol. */
2357 {"tprel", 0,
2358 0,
2359 0,
2360 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2361 0},
2362
2363 /* Get tp offset for a symbol. */
2364 {"tprel_lo12", 0,
2365 0,
2366 0,
2367 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2368 0},
2369
2370 /* Get tp offset for a symbol. */
2371 {"tprel_hi12", 0,
2372 0,
2373 0,
2374 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2375 0},
2376
2377 /* Get tp offset for a symbol. */
2378 {"tprel_lo12_nc", 0,
2379 0,
2380 0,
2381 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2382 0},
2383
2384 /* Most significant bits 32-47 of address/value: MOVZ. */
2385 {"tprel_g2", 0,
2386 0,
2387 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2388 0,
2389 0},
2390
2391 /* Most significant bits 16-31 of address/value: MOVZ. */
2392 {"tprel_g1", 0,
2393 0,
2394 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2395 0,
2396 0},
2397
2398 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2399 {"tprel_g1_nc", 0,
2400 0,
2401 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2402 0,
2403 0},
2404
2405 /* Most significant bits 0-15 of address/value: MOVZ. */
2406 {"tprel_g0", 0,
2407 0,
2408 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2409 0,
2410 0},
2411
2412 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2413 {"tprel_g0_nc", 0,
2414 0,
2415 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2416 0,
2417 0},
2418 };
2419
2420 /* Given the address of a pointer pointing to the textual name of a
2421 relocation as may appear in assembler source, attempt to find its
2422 details in reloc_table. The pointer will be updated to the character
2423 after the trailing colon. On failure, NULL will be returned;
2424 otherwise return the reloc_table_entry. */
2425
2426 static struct reloc_table_entry *
2427 find_reloc_table_entry (char **str)
2428 {
2429 unsigned int i;
2430 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2431 {
2432 int length = strlen (reloc_table[i].name);
2433
2434 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2435 && (*str)[length] == ':')
2436 {
2437 *str += (length + 1);
2438 return &reloc_table[i];
2439 }
2440 }
2441
2442 return NULL;
2443 }
2444
2445 /* Mode argument to parse_shift and parser_shifter_operand. */
2446 enum parse_shift_mode
2447 {
2448 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2449 "#imm{,lsl #n}" */
2450 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2451 "#imm" */
2452 SHIFTED_LSL, /* bare "lsl #n" */
2453 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2454 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2455 };
2456
2457 /* Parse a <shift> operator on an AArch64 data processing instruction.
2458 Return TRUE on success; otherwise return FALSE. */
2459 static bfd_boolean
2460 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2461 {
2462 const struct aarch64_name_value_pair *shift_op;
2463 enum aarch64_modifier_kind kind;
2464 expressionS exp;
2465 int exp_has_prefix;
2466 char *s = *str;
2467 char *p = s;
2468
2469 for (p = *str; ISALPHA (*p); p++)
2470 ;
2471
2472 if (p == *str)
2473 {
2474 set_syntax_error (_("shift expression expected"));
2475 return FALSE;
2476 }
2477
2478 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2479
2480 if (shift_op == NULL)
2481 {
2482 set_syntax_error (_("shift operator expected"));
2483 return FALSE;
2484 }
2485
2486 kind = aarch64_get_operand_modifier (shift_op);
2487
2488 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2489 {
2490 set_syntax_error (_("invalid use of 'MSL'"));
2491 return FALSE;
2492 }
2493
2494 switch (mode)
2495 {
2496 case SHIFTED_LOGIC_IMM:
2497 if (aarch64_extend_operator_p (kind) == TRUE)
2498 {
2499 set_syntax_error (_("extending shift is not permitted"));
2500 return FALSE;
2501 }
2502 break;
2503
2504 case SHIFTED_ARITH_IMM:
2505 if (kind == AARCH64_MOD_ROR)
2506 {
2507 set_syntax_error (_("'ROR' shift is not permitted"));
2508 return FALSE;
2509 }
2510 break;
2511
2512 case SHIFTED_LSL:
2513 if (kind != AARCH64_MOD_LSL)
2514 {
2515 set_syntax_error (_("only 'LSL' shift is permitted"));
2516 return FALSE;
2517 }
2518 break;
2519
2520 case SHIFTED_REG_OFFSET:
2521 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2522 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2523 {
2524 set_fatal_syntax_error
2525 (_("invalid shift for the register offset addressing mode"));
2526 return FALSE;
2527 }
2528 break;
2529
2530 case SHIFTED_LSL_MSL:
2531 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2532 {
2533 set_syntax_error (_("invalid shift operator"));
2534 return FALSE;
2535 }
2536 break;
2537
2538 default:
2539 abort ();
2540 }
2541
2542 /* Whitespace can appear here if the next thing is a bare digit. */
2543 skip_whitespace (p);
2544
2545 /* Parse shift amount. */
2546 exp_has_prefix = 0;
2547 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2548 exp.X_op = O_absent;
2549 else
2550 {
2551 if (is_immediate_prefix (*p))
2552 {
2553 p++;
2554 exp_has_prefix = 1;
2555 }
2556 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2557 }
2558 if (exp.X_op == O_absent)
2559 {
2560 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2561 {
2562 set_syntax_error (_("missing shift amount"));
2563 return FALSE;
2564 }
2565 operand->shifter.amount = 0;
2566 }
2567 else if (exp.X_op != O_constant)
2568 {
2569 set_syntax_error (_("constant shift amount required"));
2570 return FALSE;
2571 }
2572 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2573 {
2574 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2575 return FALSE;
2576 }
2577 else
2578 {
2579 operand->shifter.amount = exp.X_add_number;
2580 operand->shifter.amount_present = 1;
2581 }
2582
2583 operand->shifter.operator_present = 1;
2584 operand->shifter.kind = kind;
2585
2586 *str = p;
2587 return TRUE;
2588 }
2589
2590 /* Parse a <shifter_operand> for a data processing instruction:
2591
2592 #<immediate>
2593 #<immediate>, LSL #imm
2594
2595 Validation of immediate operands is deferred to md_apply_fix.
2596
2597 Return TRUE on success; otherwise return FALSE. */
2598
2599 static bfd_boolean
2600 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2601 enum parse_shift_mode mode)
2602 {
2603 char *p;
2604
2605 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2606 return FALSE;
2607
2608 p = *str;
2609
2610 /* Accept an immediate expression. */
2611 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2612 return FALSE;
2613
2614 /* Accept optional LSL for arithmetic immediate values. */
2615 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2616 if (! parse_shift (&p, operand, SHIFTED_LSL))
2617 return FALSE;
2618
2619 /* Not accept any shifter for logical immediate values. */
2620 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2621 && parse_shift (&p, operand, mode))
2622 {
2623 set_syntax_error (_("unexpected shift operator"));
2624 return FALSE;
2625 }
2626
2627 *str = p;
2628 return TRUE;
2629 }
2630
2631 /* Parse a <shifter_operand> for a data processing instruction:
2632
2633 <Rm>
2634 <Rm>, <shift>
2635 #<immediate>
2636 #<immediate>, LSL #imm
2637
2638 where <shift> is handled by parse_shift above, and the last two
2639 cases are handled by the function above.
2640
2641 Validation of immediate operands is deferred to md_apply_fix.
2642
2643 Return TRUE on success; otherwise return FALSE. */
2644
2645 static bfd_boolean
2646 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2647 enum parse_shift_mode mode)
2648 {
2649 int reg;
2650 int isreg32, isregzero;
2651 enum aarch64_operand_class opd_class
2652 = aarch64_get_operand_class (operand->type);
2653
2654 if ((reg =
2655 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2656 {
2657 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2658 {
2659 set_syntax_error (_("unexpected register in the immediate operand"));
2660 return FALSE;
2661 }
2662
2663 if (!isregzero && reg == REG_SP)
2664 {
2665 set_syntax_error (BAD_SP);
2666 return FALSE;
2667 }
2668
2669 operand->reg.regno = reg;
2670 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2671
2672 /* Accept optional shift operation on register. */
2673 if (! skip_past_comma (str))
2674 return TRUE;
2675
2676 if (! parse_shift (str, operand, mode))
2677 return FALSE;
2678
2679 return TRUE;
2680 }
2681 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2682 {
2683 set_syntax_error
2684 (_("integer register expected in the extended/shifted operand "
2685 "register"));
2686 return FALSE;
2687 }
2688
2689 /* We have a shifted immediate variable. */
2690 return parse_shifter_operand_imm (str, operand, mode);
2691 }
2692
2693 /* Return TRUE on success; return FALSE otherwise. */
2694
2695 static bfd_boolean
2696 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2697 enum parse_shift_mode mode)
2698 {
2699 char *p = *str;
2700
2701 /* Determine if we have the sequence of characters #: or just :
2702 coming next. If we do, then we check for a :rello: relocation
2703 modifier. If we don't, punt the whole lot to
2704 parse_shifter_operand. */
2705
2706 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2707 {
2708 struct reloc_table_entry *entry;
2709
2710 if (p[0] == '#')
2711 p += 2;
2712 else
2713 p++;
2714 *str = p;
2715
2716 /* Try to parse a relocation. Anything else is an error. */
2717 if (!(entry = find_reloc_table_entry (str)))
2718 {
2719 set_syntax_error (_("unknown relocation modifier"));
2720 return FALSE;
2721 }
2722
2723 if (entry->add_type == 0)
2724 {
2725 set_syntax_error
2726 (_("this relocation modifier is not allowed on this instruction"));
2727 return FALSE;
2728 }
2729
2730 /* Save str before we decompose it. */
2731 p = *str;
2732
2733 /* Next, we parse the expression. */
2734 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2735 return FALSE;
2736
2737 /* Record the relocation type (use the ADD variant here). */
2738 inst.reloc.type = entry->add_type;
2739 inst.reloc.pc_rel = entry->pc_rel;
2740
2741 /* If str is empty, we've reached the end, stop here. */
2742 if (**str == '\0')
2743 return TRUE;
2744
2745 /* Otherwise, we have a shifted reloc modifier, so rewind to
2746 recover the variable name and continue parsing for the shifter. */
2747 *str = p;
2748 return parse_shifter_operand_imm (str, operand, mode);
2749 }
2750
2751 return parse_shifter_operand (str, operand, mode);
2752 }
2753
2754 /* Parse all forms of an address expression. Information is written
2755 to *OPERAND and/or inst.reloc.
2756
2757 The A64 instruction set has the following addressing modes:
2758
2759 Offset
2760 [base] // in SIMD ld/st structure
2761 [base{,#0}] // in ld/st exclusive
2762 [base{,#imm}]
2763 [base,Xm{,LSL #imm}]
2764 [base,Xm,SXTX {#imm}]
2765 [base,Wm,(S|U)XTW {#imm}]
2766 Pre-indexed
2767 [base,#imm]!
2768 Post-indexed
2769 [base],#imm
2770 [base],Xm // in SIMD ld/st structure
2771 PC-relative (literal)
2772 label
2773 =immediate
2774
2775 (As a convenience, the notation "=immediate" is permitted in conjunction
2776 with the pc-relative literal load instructions to automatically place an
2777 immediate value or symbolic address in a nearby literal pool and generate
2778 a hidden label which references it.)
2779
2780 Upon a successful parsing, the address structure in *OPERAND will be
2781 filled in the following way:
2782
2783 .base_regno = <base>
2784 .offset.is_reg // 1 if the offset is a register
2785 .offset.imm = <imm>
2786 .offset.regno = <Rm>
2787
2788 For different addressing modes defined in the A64 ISA:
2789
2790 Offset
2791 .pcrel=0; .preind=1; .postind=0; .writeback=0
2792 Pre-indexed
2793 .pcrel=0; .preind=1; .postind=0; .writeback=1
2794 Post-indexed
2795 .pcrel=0; .preind=0; .postind=1; .writeback=1
2796 PC-relative (literal)
2797 .pcrel=1; .preind=1; .postind=0; .writeback=0
2798
2799 The shift/extension information, if any, will be stored in .shifter.
2800
2801 It is the caller's responsibility to check for addressing modes not
2802 supported by the instruction, and to set inst.reloc.type. */
2803
2804 static bfd_boolean
2805 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2806 int accept_reg_post_index)
2807 {
2808 char *p = *str;
2809 int reg;
2810 int isreg32, isregzero;
2811 expressionS *exp = &inst.reloc.exp;
2812
2813 if (! skip_past_char (&p, '['))
2814 {
2815 /* =immediate or label. */
2816 operand->addr.pcrel = 1;
2817 operand->addr.preind = 1;
2818
2819 if (skip_past_char (&p, '='))
2820 /* =immediate; need to generate the literal in the liternal pool. */
2821 inst.gen_lit_pool = 1;
2822
2823 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2824 {
2825 set_syntax_error (_("invalid address"));
2826 return FALSE;
2827 }
2828
2829 *str = p;
2830 return TRUE;
2831 }
2832
2833 /* [ */
2834
2835 /* Accept SP and reject ZR */
2836 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2837 if (reg == PARSE_FAIL || isreg32)
2838 {
2839 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2840 return FALSE;
2841 }
2842 operand->addr.base_regno = reg;
2843
2844 /* [Xn */
2845 if (skip_past_comma (&p))
2846 {
2847 /* [Xn, */
2848 operand->addr.preind = 1;
2849
2850 /* Reject SP and accept ZR */
2851 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
2852 if (reg != PARSE_FAIL)
2853 {
2854 /* [Xn,Rm */
2855 operand->addr.offset.regno = reg;
2856 operand->addr.offset.is_reg = 1;
2857 /* Shifted index. */
2858 if (skip_past_comma (&p))
2859 {
2860 /* [Xn,Rm, */
2861 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
2862 /* Use the diagnostics set in parse_shift, so not set new
2863 error message here. */
2864 return FALSE;
2865 }
2866 /* We only accept:
2867 [base,Xm{,LSL #imm}]
2868 [base,Xm,SXTX {#imm}]
2869 [base,Wm,(S|U)XTW {#imm}] */
2870 if (operand->shifter.kind == AARCH64_MOD_NONE
2871 || operand->shifter.kind == AARCH64_MOD_LSL
2872 || operand->shifter.kind == AARCH64_MOD_SXTX)
2873 {
2874 if (isreg32)
2875 {
2876 set_syntax_error (_("invalid use of 32-bit register offset"));
2877 return FALSE;
2878 }
2879 }
2880 else if (!isreg32)
2881 {
2882 set_syntax_error (_("invalid use of 64-bit register offset"));
2883 return FALSE;
2884 }
2885 }
2886 else
2887 {
2888 /* [Xn,#:<reloc_op>:<symbol> */
2889 skip_past_char (&p, '#');
2890 if (reloc && skip_past_char (&p, ':'))
2891 {
2892 struct reloc_table_entry *entry;
2893
2894 /* Try to parse a relocation modifier. Anything else is
2895 an error. */
2896 if (!(entry = find_reloc_table_entry (&p)))
2897 {
2898 set_syntax_error (_("unknown relocation modifier"));
2899 return FALSE;
2900 }
2901
2902 if (entry->ldst_type == 0)
2903 {
2904 set_syntax_error
2905 (_("this relocation modifier is not allowed on this "
2906 "instruction"));
2907 return FALSE;
2908 }
2909
2910 /* [Xn,#:<reloc_op>: */
2911 /* We now have the group relocation table entry corresponding to
2912 the name in the assembler source. Next, we parse the
2913 expression. */
2914 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2915 {
2916 set_syntax_error (_("invalid relocation expression"));
2917 return FALSE;
2918 }
2919
2920 /* [Xn,#:<reloc_op>:<expr> */
2921 /* Record the load/store relocation type. */
2922 inst.reloc.type = entry->ldst_type;
2923 inst.reloc.pc_rel = entry->pc_rel;
2924 }
2925 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
2926 {
2927 set_syntax_error (_("invalid expression in the address"));
2928 return FALSE;
2929 }
2930 /* [Xn,<expr> */
2931 }
2932 }
2933
2934 if (! skip_past_char (&p, ']'))
2935 {
2936 set_syntax_error (_("']' expected"));
2937 return FALSE;
2938 }
2939
2940 if (skip_past_char (&p, '!'))
2941 {
2942 if (operand->addr.preind && operand->addr.offset.is_reg)
2943 {
2944 set_syntax_error (_("register offset not allowed in pre-indexed "
2945 "addressing mode"));
2946 return FALSE;
2947 }
2948 /* [Xn]! */
2949 operand->addr.writeback = 1;
2950 }
2951 else if (skip_past_comma (&p))
2952 {
2953 /* [Xn], */
2954 operand->addr.postind = 1;
2955 operand->addr.writeback = 1;
2956
2957 if (operand->addr.preind)
2958 {
2959 set_syntax_error (_("cannot combine pre- and post-indexing"));
2960 return FALSE;
2961 }
2962
2963 if (accept_reg_post_index
2964 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
2965 &isregzero)) != PARSE_FAIL)
2966 {
2967 /* [Xn],Xm */
2968 if (isreg32)
2969 {
2970 set_syntax_error (_("invalid 32-bit register offset"));
2971 return FALSE;
2972 }
2973 operand->addr.offset.regno = reg;
2974 operand->addr.offset.is_reg = 1;
2975 }
2976 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
2977 {
2978 /* [Xn],#expr */
2979 set_syntax_error (_("invalid expression in the address"));
2980 return FALSE;
2981 }
2982 }
2983
2984 /* If at this point neither .preind nor .postind is set, we have a
2985 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
2986 if (operand->addr.preind == 0 && operand->addr.postind == 0)
2987 {
2988 if (operand->addr.writeback)
2989 {
2990 /* Reject [Rn]! */
2991 set_syntax_error (_("missing offset in the pre-indexed address"));
2992 return FALSE;
2993 }
2994 operand->addr.preind = 1;
2995 inst.reloc.exp.X_op = O_constant;
2996 inst.reloc.exp.X_add_number = 0;
2997 }
2998
2999 *str = p;
3000 return TRUE;
3001 }
3002
3003 /* Return TRUE on success; otherwise return FALSE. */
3004 static bfd_boolean
3005 parse_address (char **str, aarch64_opnd_info *operand,
3006 int accept_reg_post_index)
3007 {
3008 return parse_address_main (str, operand, 0, accept_reg_post_index);
3009 }
3010
3011 /* Return TRUE on success; otherwise return FALSE. */
3012 static bfd_boolean
3013 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3014 {
3015 return parse_address_main (str, operand, 1, 0);
3016 }
3017
3018 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3019 Return TRUE on success; otherwise return FALSE. */
3020 static bfd_boolean
3021 parse_half (char **str, int *internal_fixup_p)
3022 {
3023 char *p, *saved;
3024 int dummy;
3025
3026 p = *str;
3027 skip_past_char (&p, '#');
3028
3029 gas_assert (internal_fixup_p);
3030 *internal_fixup_p = 0;
3031
3032 if (*p == ':')
3033 {
3034 struct reloc_table_entry *entry;
3035
3036 /* Try to parse a relocation. Anything else is an error. */
3037 ++p;
3038 if (!(entry = find_reloc_table_entry (&p)))
3039 {
3040 set_syntax_error (_("unknown relocation modifier"));
3041 return FALSE;
3042 }
3043
3044 if (entry->movw_type == 0)
3045 {
3046 set_syntax_error
3047 (_("this relocation modifier is not allowed on this instruction"));
3048 return FALSE;
3049 }
3050
3051 inst.reloc.type = entry->movw_type;
3052 }
3053 else
3054 *internal_fixup_p = 1;
3055
3056 /* Avoid parsing a register as a general symbol. */
3057 saved = p;
3058 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3059 return FALSE;
3060 p = saved;
3061
3062 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3063 return FALSE;
3064
3065 *str = p;
3066 return TRUE;
3067 }
3068
3069 /* Parse an operand for an ADRP instruction:
3070 ADRP <Xd>, <label>
3071 Return TRUE on success; otherwise return FALSE. */
3072
3073 static bfd_boolean
3074 parse_adrp (char **str)
3075 {
3076 char *p;
3077
3078 p = *str;
3079 if (*p == ':')
3080 {
3081 struct reloc_table_entry *entry;
3082
3083 /* Try to parse a relocation. Anything else is an error. */
3084 ++p;
3085 if (!(entry = find_reloc_table_entry (&p)))
3086 {
3087 set_syntax_error (_("unknown relocation modifier"));
3088 return FALSE;
3089 }
3090
3091 if (entry->adrp_type == 0)
3092 {
3093 set_syntax_error
3094 (_("this relocation modifier is not allowed on this instruction"));
3095 return FALSE;
3096 }
3097
3098 inst.reloc.type = entry->adrp_type;
3099 }
3100 else
3101 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3102
3103 inst.reloc.pc_rel = 1;
3104
3105 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3106 return FALSE;
3107
3108 *str = p;
3109 return TRUE;
3110 }
3111
3112 /* Miscellaneous. */
3113
3114 /* Parse an option for a preload instruction. Returns the encoding for the
3115 option, or PARSE_FAIL. */
3116
3117 static int
3118 parse_pldop (char **str)
3119 {
3120 char *p, *q;
3121 const struct aarch64_name_value_pair *o;
3122
3123 p = q = *str;
3124 while (ISALNUM (*q))
3125 q++;
3126
3127 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3128 if (!o)
3129 return PARSE_FAIL;
3130
3131 *str = q;
3132 return o->value;
3133 }
3134
3135 /* Parse an option for a barrier instruction. Returns the encoding for the
3136 option, or PARSE_FAIL. */
3137
3138 static int
3139 parse_barrier (char **str)
3140 {
3141 char *p, *q;
3142 const asm_barrier_opt *o;
3143
3144 p = q = *str;
3145 while (ISALPHA (*q))
3146 q++;
3147
3148 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3149 if (!o)
3150 return PARSE_FAIL;
3151
3152 *str = q;
3153 return o->value;
3154 }
3155
3156 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3157 Returns the encoding for the option, or PARSE_FAIL.
3158
3159 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3160 implementation defined system register name S3_<op1>_<Cn>_<Cm>_<op2>. */
3161
3162 static int
3163 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3164 {
3165 char *p, *q;
3166 char buf[32];
3167 const struct aarch64_name_value_pair *o;
3168 int value;
3169
3170 p = buf;
3171 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3172 if (p < buf + 31)
3173 *p++ = TOLOWER (*q);
3174 *p = '\0';
3175 /* Assert that BUF be large enough. */
3176 gas_assert (p - buf == q - *str);
3177
3178 o = hash_find (sys_regs, buf);
3179 if (!o)
3180 {
3181 if (!imple_defined_p)
3182 return PARSE_FAIL;
3183 else
3184 {
3185 /* Parse S3_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
3186 registers. */
3187 unsigned int op0, op1, cn, cm, op2;
3188 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
3189 return PARSE_FAIL;
3190 /* Register access is encoded as follows:
3191 op0 op1 CRn CRm op2
3192 11 xxx 1x11 xxxx xxx. */
3193 if (op0 != 3 || op1 > 7 || (cn | 0x4) != 0xf || cm > 15 || op2 > 7)
3194 return PARSE_FAIL;
3195 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3196 }
3197 }
3198 else
3199 value = o->value;
3200
3201 *str = q;
3202 return value;
3203 }
3204
3205 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3206 for the option, or NULL. */
3207
3208 static const aarch64_sys_ins_reg *
3209 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3210 {
3211 char *p, *q;
3212 char buf[32];
3213 const aarch64_sys_ins_reg *o;
3214
3215 p = buf;
3216 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3217 if (p < buf + 31)
3218 *p++ = TOLOWER (*q);
3219 *p = '\0';
3220
3221 o = hash_find (sys_ins_regs, buf);
3222 if (!o)
3223 return NULL;
3224
3225 *str = q;
3226 return o;
3227 }
3228 \f
3229 #define po_char_or_fail(chr) do { \
3230 if (! skip_past_char (&str, chr)) \
3231 goto failure; \
3232 } while (0)
3233
3234 #define po_reg_or_fail(regtype) do { \
3235 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3236 if (val == PARSE_FAIL) \
3237 { \
3238 set_default_error (); \
3239 goto failure; \
3240 } \
3241 } while (0)
3242
3243 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3244 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3245 &isreg32, &isregzero); \
3246 if (val == PARSE_FAIL) \
3247 { \
3248 set_default_error (); \
3249 goto failure; \
3250 } \
3251 info->reg.regno = val; \
3252 if (isreg32) \
3253 info->qualifier = AARCH64_OPND_QLF_W; \
3254 else \
3255 info->qualifier = AARCH64_OPND_QLF_X; \
3256 } while (0)
3257
3258 #define po_imm_nc_or_fail() do { \
3259 if (! parse_constant_immediate (&str, &val)) \
3260 goto failure; \
3261 } while (0)
3262
3263 #define po_imm_or_fail(min, max) do { \
3264 if (! parse_constant_immediate (&str, &val)) \
3265 goto failure; \
3266 if (val < min || val > max) \
3267 { \
3268 set_fatal_syntax_error (_("immediate value out of range "\
3269 #min " to "#max)); \
3270 goto failure; \
3271 } \
3272 } while (0)
3273
3274 #define po_misc_or_fail(expr) do { \
3275 if (!expr) \
3276 goto failure; \
3277 } while (0)
3278 \f
3279 /* encode the 12-bit imm field of Add/sub immediate */
3280 static inline uint32_t
3281 encode_addsub_imm (uint32_t imm)
3282 {
3283 return imm << 10;
3284 }
3285
3286 /* encode the shift amount field of Add/sub immediate */
3287 static inline uint32_t
3288 encode_addsub_imm_shift_amount (uint32_t cnt)
3289 {
3290 return cnt << 22;
3291 }
3292
3293
3294 /* encode the imm field of Adr instruction */
3295 static inline uint32_t
3296 encode_adr_imm (uint32_t imm)
3297 {
3298 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3299 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3300 }
3301
3302 /* encode the immediate field of Move wide immediate */
3303 static inline uint32_t
3304 encode_movw_imm (uint32_t imm)
3305 {
3306 return imm << 5;
3307 }
3308
3309 /* encode the 26-bit offset of unconditional branch */
3310 static inline uint32_t
3311 encode_branch_ofs_26 (uint32_t ofs)
3312 {
3313 return ofs & ((1 << 26) - 1);
3314 }
3315
3316 /* encode the 19-bit offset of conditional branch and compare & branch */
3317 static inline uint32_t
3318 encode_cond_branch_ofs_19 (uint32_t ofs)
3319 {
3320 return (ofs & ((1 << 19) - 1)) << 5;
3321 }
3322
3323 /* encode the 19-bit offset of ld literal */
3324 static inline uint32_t
3325 encode_ld_lit_ofs_19 (uint32_t ofs)
3326 {
3327 return (ofs & ((1 << 19) - 1)) << 5;
3328 }
3329
3330 /* Encode the 14-bit offset of test & branch. */
3331 static inline uint32_t
3332 encode_tst_branch_ofs_14 (uint32_t ofs)
3333 {
3334 return (ofs & ((1 << 14) - 1)) << 5;
3335 }
3336
3337 /* Encode the 16-bit imm field of svc/hvc/smc. */
3338 static inline uint32_t
3339 encode_svc_imm (uint32_t imm)
3340 {
3341 return imm << 5;
3342 }
3343
3344 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3345 static inline uint32_t
3346 reencode_addsub_switch_add_sub (uint32_t opcode)
3347 {
3348 return opcode ^ (1 << 30);
3349 }
3350
3351 static inline uint32_t
3352 reencode_movzn_to_movz (uint32_t opcode)
3353 {
3354 return opcode | (1 << 30);
3355 }
3356
3357 static inline uint32_t
3358 reencode_movzn_to_movn (uint32_t opcode)
3359 {
3360 return opcode & ~(1 << 30);
3361 }
3362
3363 /* Overall per-instruction processing. */
3364
3365 /* We need to be able to fix up arbitrary expressions in some statements.
3366 This is so that we can handle symbols that are an arbitrary distance from
3367 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3368 which returns part of an address in a form which will be valid for
3369 a data instruction. We do this by pushing the expression into a symbol
3370 in the expr_section, and creating a fix for that. */
3371
3372 static fixS *
3373 fix_new_aarch64 (fragS * frag,
3374 int where,
3375 short int size, expressionS * exp, int pc_rel, int reloc)
3376 {
3377 fixS *new_fix;
3378
3379 switch (exp->X_op)
3380 {
3381 case O_constant:
3382 case O_symbol:
3383 case O_add:
3384 case O_subtract:
3385 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3386 break;
3387
3388 default:
3389 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3390 pc_rel, reloc);
3391 break;
3392 }
3393 return new_fix;
3394 }
3395 \f
3396 /* Diagnostics on operands errors. */
3397
3398 /* By default, output one-line error message only.
3399 Enable the verbose error message by -merror-verbose. */
3400 static int verbose_error_p = 0;
3401
3402 #ifdef DEBUG_AARCH64
3403 /* N.B. this is only for the purpose of debugging. */
3404 const char* operand_mismatch_kind_names[] =
3405 {
3406 "AARCH64_OPDE_NIL",
3407 "AARCH64_OPDE_RECOVERABLE",
3408 "AARCH64_OPDE_SYNTAX_ERROR",
3409 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3410 "AARCH64_OPDE_INVALID_VARIANT",
3411 "AARCH64_OPDE_OUT_OF_RANGE",
3412 "AARCH64_OPDE_UNALIGNED",
3413 "AARCH64_OPDE_REG_LIST",
3414 "AARCH64_OPDE_OTHER_ERROR",
3415 };
3416 #endif /* DEBUG_AARCH64 */
3417
3418 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3419
3420 When multiple errors of different kinds are found in the same assembly
3421 line, only the error of the highest severity will be picked up for
3422 issuing the diagnostics. */
3423
3424 static inline bfd_boolean
3425 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3426 enum aarch64_operand_error_kind rhs)
3427 {
3428 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3429 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3430 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3431 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3432 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3433 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3434 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3435 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3436 return lhs > rhs;
3437 }
3438
3439 /* Helper routine to get the mnemonic name from the assembly instruction
3440 line; should only be called for the diagnosis purpose, as there is
3441 string copy operation involved, which may affect the runtime
3442 performance if used in elsewhere. */
3443
3444 static const char*
3445 get_mnemonic_name (const char *str)
3446 {
3447 static char mnemonic[32];
3448 char *ptr;
3449
3450 /* Get the first 15 bytes and assume that the full name is included. */
3451 strncpy (mnemonic, str, 31);
3452 mnemonic[31] = '\0';
3453
3454 /* Scan up to the end of the mnemonic, which must end in white space,
3455 '.', or end of string. */
3456 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3457 ;
3458
3459 *ptr = '\0';
3460
3461 /* Append '...' to the truncated long name. */
3462 if (ptr - mnemonic == 31)
3463 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3464
3465 return mnemonic;
3466 }
3467
3468 static void
3469 reset_aarch64_instruction (aarch64_instruction *instruction)
3470 {
3471 memset (instruction, '\0', sizeof (aarch64_instruction));
3472 instruction->reloc.type = BFD_RELOC_UNUSED;
3473 }
3474
3475 /* Data strutures storing one user error in the assembly code related to
3476 operands. */
3477
3478 struct operand_error_record
3479 {
3480 const aarch64_opcode *opcode;
3481 aarch64_operand_error detail;
3482 struct operand_error_record *next;
3483 };
3484
3485 typedef struct operand_error_record operand_error_record;
3486
3487 struct operand_errors
3488 {
3489 operand_error_record *head;
3490 operand_error_record *tail;
3491 };
3492
3493 typedef struct operand_errors operand_errors;
3494
3495 /* Top-level data structure reporting user errors for the current line of
3496 the assembly code.
3497 The way md_assemble works is that all opcodes sharing the same mnemonic
3498 name are iterated to find a match to the assembly line. In this data
3499 structure, each of the such opcodes will have one operand_error_record
3500 allocated and inserted. In other words, excessive errors related with
3501 a single opcode are disregarded. */
3502 operand_errors operand_error_report;
3503
3504 /* Free record nodes. */
3505 static operand_error_record *free_opnd_error_record_nodes = NULL;
3506
3507 /* Initialize the data structure that stores the operand mismatch
3508 information on assembling one line of the assembly code. */
3509 static void
3510 init_operand_error_report (void)
3511 {
3512 if (operand_error_report.head != NULL)
3513 {
3514 gas_assert (operand_error_report.tail != NULL);
3515 operand_error_report.tail->next = free_opnd_error_record_nodes;
3516 free_opnd_error_record_nodes = operand_error_report.head;
3517 operand_error_report.head = NULL;
3518 operand_error_report.tail = NULL;
3519 return;
3520 }
3521 gas_assert (operand_error_report.tail == NULL);
3522 }
3523
3524 /* Return TRUE if some operand error has been recorded during the
3525 parsing of the current assembly line using the opcode *OPCODE;
3526 otherwise return FALSE. */
3527 static inline bfd_boolean
3528 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3529 {
3530 operand_error_record *record = operand_error_report.head;
3531 return record && record->opcode == opcode;
3532 }
3533
3534 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3535 OPCODE field is initialized with OPCODE.
3536 N.B. only one record for each opcode, i.e. the maximum of one error is
3537 recorded for each instruction template. */
3538
3539 static void
3540 add_operand_error_record (const operand_error_record* new_record)
3541 {
3542 const aarch64_opcode *opcode = new_record->opcode;
3543 operand_error_record* record = operand_error_report.head;
3544
3545 /* The record may have been created for this opcode. If not, we need
3546 to prepare one. */
3547 if (! opcode_has_operand_error_p (opcode))
3548 {
3549 /* Get one empty record. */
3550 if (free_opnd_error_record_nodes == NULL)
3551 {
3552 record = xmalloc (sizeof (operand_error_record));
3553 if (record == NULL)
3554 abort ();
3555 }
3556 else
3557 {
3558 record = free_opnd_error_record_nodes;
3559 free_opnd_error_record_nodes = record->next;
3560 }
3561 record->opcode = opcode;
3562 /* Insert at the head. */
3563 record->next = operand_error_report.head;
3564 operand_error_report.head = record;
3565 if (operand_error_report.tail == NULL)
3566 operand_error_report.tail = record;
3567 }
3568 else if (record->detail.kind != AARCH64_OPDE_NIL
3569 && record->detail.index <= new_record->detail.index
3570 && operand_error_higher_severity_p (record->detail.kind,
3571 new_record->detail.kind))
3572 {
3573 /* In the case of multiple errors found on operands related with a
3574 single opcode, only record the error of the leftmost operand and
3575 only if the error is of higher severity. */
3576 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3577 " the existing error %s on operand %d",
3578 operand_mismatch_kind_names[new_record->detail.kind],
3579 new_record->detail.index,
3580 operand_mismatch_kind_names[record->detail.kind],
3581 record->detail.index);
3582 return;
3583 }
3584
3585 record->detail = new_record->detail;
3586 }
3587
3588 static inline void
3589 record_operand_error_info (const aarch64_opcode *opcode,
3590 aarch64_operand_error *error_info)
3591 {
3592 operand_error_record record;
3593 record.opcode = opcode;
3594 record.detail = *error_info;
3595 add_operand_error_record (&record);
3596 }
3597
3598 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3599 error message *ERROR, for operand IDX (count from 0). */
3600
3601 static void
3602 record_operand_error (const aarch64_opcode *opcode, int idx,
3603 enum aarch64_operand_error_kind kind,
3604 const char* error)
3605 {
3606 aarch64_operand_error info;
3607 memset(&info, 0, sizeof (info));
3608 info.index = idx;
3609 info.kind = kind;
3610 info.error = error;
3611 record_operand_error_info (opcode, &info);
3612 }
3613
3614 static void
3615 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3616 enum aarch64_operand_error_kind kind,
3617 const char* error, const int *extra_data)
3618 {
3619 aarch64_operand_error info;
3620 info.index = idx;
3621 info.kind = kind;
3622 info.error = error;
3623 info.data[0] = extra_data[0];
3624 info.data[1] = extra_data[1];
3625 info.data[2] = extra_data[2];
3626 record_operand_error_info (opcode, &info);
3627 }
3628
3629 static void
3630 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3631 const char* error, int lower_bound,
3632 int upper_bound)
3633 {
3634 int data[3] = {lower_bound, upper_bound, 0};
3635 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3636 error, data);
3637 }
3638
3639 /* Remove the operand error record for *OPCODE. */
3640 static void ATTRIBUTE_UNUSED
3641 remove_operand_error_record (const aarch64_opcode *opcode)
3642 {
3643 if (opcode_has_operand_error_p (opcode))
3644 {
3645 operand_error_record* record = operand_error_report.head;
3646 gas_assert (record != NULL && operand_error_report.tail != NULL);
3647 operand_error_report.head = record->next;
3648 record->next = free_opnd_error_record_nodes;
3649 free_opnd_error_record_nodes = record;
3650 if (operand_error_report.head == NULL)
3651 {
3652 gas_assert (operand_error_report.tail == record);
3653 operand_error_report.tail = NULL;
3654 }
3655 }
3656 }
3657
3658 /* Given the instruction in *INSTR, return the index of the best matched
3659 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3660
3661 Return -1 if there is no qualifier sequence; return the first match
3662 if there is multiple matches found. */
3663
3664 static int
3665 find_best_match (const aarch64_inst *instr,
3666 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3667 {
3668 int i, num_opnds, max_num_matched, idx;
3669
3670 num_opnds = aarch64_num_of_operands (instr->opcode);
3671 if (num_opnds == 0)
3672 {
3673 DEBUG_TRACE ("no operand");
3674 return -1;
3675 }
3676
3677 max_num_matched = 0;
3678 idx = -1;
3679
3680 /* For each pattern. */
3681 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3682 {
3683 int j, num_matched;
3684 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3685
3686 /* Most opcodes has much fewer patterns in the list. */
3687 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3688 {
3689 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3690 if (i != 0 && idx == -1)
3691 /* If nothing has been matched, return the 1st sequence. */
3692 idx = 0;
3693 break;
3694 }
3695
3696 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3697 if (*qualifiers == instr->operands[j].qualifier)
3698 ++num_matched;
3699
3700 if (num_matched > max_num_matched)
3701 {
3702 max_num_matched = num_matched;
3703 idx = i;
3704 }
3705 }
3706
3707 DEBUG_TRACE ("return with %d", idx);
3708 return idx;
3709 }
3710
3711 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3712 corresponding operands in *INSTR. */
3713
3714 static inline void
3715 assign_qualifier_sequence (aarch64_inst *instr,
3716 const aarch64_opnd_qualifier_t *qualifiers)
3717 {
3718 int i = 0;
3719 int num_opnds = aarch64_num_of_operands (instr->opcode);
3720 gas_assert (num_opnds);
3721 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3722 instr->operands[i].qualifier = *qualifiers;
3723 }
3724
3725 /* Print operands for the diagnosis purpose. */
3726
3727 static void
3728 print_operands (char *buf, const aarch64_opcode *opcode,
3729 const aarch64_opnd_info *opnds)
3730 {
3731 int i;
3732
3733 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3734 {
3735 const size_t size = 128;
3736 char str[size];
3737
3738 /* We regard the opcode operand info more, however we also look into
3739 the inst->operands to support the disassembling of the optional
3740 operand.
3741 The two operand code should be the same in all cases, apart from
3742 when the operand can be optional. */
3743 if (opcode->operands[i] == AARCH64_OPND_NIL
3744 || opnds[i].type == AARCH64_OPND_NIL)
3745 break;
3746
3747 /* Generate the operand string in STR. */
3748 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3749
3750 /* Delimiter. */
3751 if (str[0] != '\0')
3752 strcat (buf, i == 0 ? " " : ",");
3753
3754 /* Append the operand string. */
3755 strcat (buf, str);
3756 }
3757 }
3758
3759 /* Send to stderr a string as information. */
3760
3761 static void
3762 output_info (const char *format, ...)
3763 {
3764 char *file;
3765 unsigned int line;
3766 va_list args;
3767
3768 as_where (&file, &line);
3769 if (file)
3770 {
3771 if (line != 0)
3772 fprintf (stderr, "%s:%u: ", file, line);
3773 else
3774 fprintf (stderr, "%s: ", file);
3775 }
3776 fprintf (stderr, _("Info: "));
3777 va_start (args, format);
3778 vfprintf (stderr, format, args);
3779 va_end (args);
3780 (void) putc ('\n', stderr);
3781 }
3782
3783 /* Output one operand error record. */
3784
3785 static void
3786 output_operand_error_record (const operand_error_record *record, char *str)
3787 {
3788 int idx = record->detail.index;
3789 const aarch64_opcode *opcode = record->opcode;
3790 enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
3791 : AARCH64_OPND_NIL);
3792 const aarch64_operand_error *detail = &record->detail;
3793
3794 switch (detail->kind)
3795 {
3796 case AARCH64_OPDE_NIL:
3797 gas_assert (0);
3798 break;
3799
3800 case AARCH64_OPDE_SYNTAX_ERROR:
3801 case AARCH64_OPDE_RECOVERABLE:
3802 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3803 case AARCH64_OPDE_OTHER_ERROR:
3804 gas_assert (idx >= 0);
3805 /* Use the prepared error message if there is, otherwise use the
3806 operand description string to describe the error. */
3807 if (detail->error != NULL)
3808 {
3809 if (detail->index == -1)
3810 as_bad (_("%s -- `%s'"), detail->error, str);
3811 else
3812 as_bad (_("%s at operand %d -- `%s'"),
3813 detail->error, detail->index + 1, str);
3814 }
3815 else
3816 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3817 aarch64_get_operand_desc (opd_code), str);
3818 break;
3819
3820 case AARCH64_OPDE_INVALID_VARIANT:
3821 as_bad (_("operand mismatch -- `%s'"), str);
3822 if (verbose_error_p)
3823 {
3824 /* We will try to correct the erroneous instruction and also provide
3825 more information e.g. all other valid variants.
3826
3827 The string representation of the corrected instruction and other
3828 valid variants are generated by
3829
3830 1) obtaining the intermediate representation of the erroneous
3831 instruction;
3832 2) manipulating the IR, e.g. replacing the operand qualifier;
3833 3) printing out the instruction by calling the printer functions
3834 shared with the disassembler.
3835
3836 The limitation of this method is that the exact input assembly
3837 line cannot be accurately reproduced in some cases, for example an
3838 optional operand present in the actual assembly line will be
3839 omitted in the output; likewise for the optional syntax rules,
3840 e.g. the # before the immediate. Another limitation is that the
3841 assembly symbols and relocation operations in the assembly line
3842 currently cannot be printed out in the error report. Last but not
3843 least, when there is other error(s) co-exist with this error, the
3844 'corrected' instruction may be still incorrect, e.g. given
3845 'ldnp h0,h1,[x0,#6]!'
3846 this diagnosis will provide the version:
3847 'ldnp s0,s1,[x0,#6]!'
3848 which is still not right. */
3849 size_t len = strlen (get_mnemonic_name (str));
3850 int i, qlf_idx;
3851 bfd_boolean result;
3852 const size_t size = 2048;
3853 char buf[size];
3854 aarch64_inst *inst_base = &inst.base;
3855 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
3856
3857 /* Init inst. */
3858 reset_aarch64_instruction (&inst);
3859 inst_base->opcode = opcode;
3860
3861 /* Reset the error report so that there is no side effect on the
3862 following operand parsing. */
3863 init_operand_error_report ();
3864
3865 /* Fill inst. */
3866 result = parse_operands (str + len, opcode)
3867 && programmer_friendly_fixup (&inst);
3868 gas_assert (result);
3869 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
3870 NULL, NULL);
3871 gas_assert (!result);
3872
3873 /* Find the most matched qualifier sequence. */
3874 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
3875 gas_assert (qlf_idx > -1);
3876
3877 /* Assign the qualifiers. */
3878 assign_qualifier_sequence (inst_base,
3879 opcode->qualifiers_list[qlf_idx]);
3880
3881 /* Print the hint. */
3882 output_info (_(" did you mean this?"));
3883 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
3884 print_operands (buf, opcode, inst_base->operands);
3885 output_info (_(" %s"), buf);
3886
3887 /* Print out other variant(s) if there is any. */
3888 if (qlf_idx != 0 ||
3889 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
3890 output_info (_(" other valid variant(s):"));
3891
3892 /* For each pattern. */
3893 qualifiers_list = opcode->qualifiers_list;
3894 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3895 {
3896 /* Most opcodes has much fewer patterns in the list.
3897 First NIL qualifier indicates the end in the list. */
3898 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
3899 break;
3900
3901 if (i != qlf_idx)
3902 {
3903 /* Mnemonics name. */
3904 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
3905
3906 /* Assign the qualifiers. */
3907 assign_qualifier_sequence (inst_base, *qualifiers_list);
3908
3909 /* Print instruction. */
3910 print_operands (buf, opcode, inst_base->operands);
3911
3912 output_info (_(" %s"), buf);
3913 }
3914 }
3915 }
3916 break;
3917
3918 case AARCH64_OPDE_OUT_OF_RANGE:
3919 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
3920 detail->error ? detail->error : _("immediate value"),
3921 detail->data[0], detail->data[1], detail->index + 1, str);
3922 break;
3923
3924 case AARCH64_OPDE_REG_LIST:
3925 if (detail->data[0] == 1)
3926 as_bad (_("invalid number of registers in the list; "
3927 "only 1 register is expected at operand %d -- `%s'"),
3928 detail->index + 1, str);
3929 else
3930 as_bad (_("invalid number of registers in the list; "
3931 "%d registers are expected at operand %d -- `%s'"),
3932 detail->data[0], detail->index + 1, str);
3933 break;
3934
3935 case AARCH64_OPDE_UNALIGNED:
3936 as_bad (_("immediate value should be a multiple of "
3937 "%d at operand %d -- `%s'"),
3938 detail->data[0], detail->index + 1, str);
3939 break;
3940
3941 default:
3942 gas_assert (0);
3943 break;
3944 }
3945 }
3946
3947 /* Process and output the error message about the operand mismatching.
3948
3949 When this function is called, the operand error information had
3950 been collected for an assembly line and there will be multiple
3951 errors in the case of mulitple instruction templates; output the
3952 error message that most closely describes the problem. */
3953
3954 static void
3955 output_operand_error_report (char *str)
3956 {
3957 int largest_error_pos;
3958 const char *msg = NULL;
3959 enum aarch64_operand_error_kind kind;
3960 operand_error_record *curr;
3961 operand_error_record *head = operand_error_report.head;
3962 operand_error_record *record = NULL;
3963
3964 /* No error to report. */
3965 if (head == NULL)
3966 return;
3967
3968 gas_assert (head != NULL && operand_error_report.tail != NULL);
3969
3970 /* Only one error. */
3971 if (head == operand_error_report.tail)
3972 {
3973 DEBUG_TRACE ("single opcode entry with error kind: %s",
3974 operand_mismatch_kind_names[head->detail.kind]);
3975 output_operand_error_record (head, str);
3976 return;
3977 }
3978
3979 /* Find the error kind of the highest severity. */
3980 DEBUG_TRACE ("multiple opcode entres with error kind");
3981 kind = AARCH64_OPDE_NIL;
3982 for (curr = head; curr != NULL; curr = curr->next)
3983 {
3984 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
3985 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
3986 if (operand_error_higher_severity_p (curr->detail.kind, kind))
3987 kind = curr->detail.kind;
3988 }
3989 gas_assert (kind != AARCH64_OPDE_NIL);
3990
3991 /* Pick up one of errors of KIND to report. */
3992 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
3993 for (curr = head; curr != NULL; curr = curr->next)
3994 {
3995 if (curr->detail.kind != kind)
3996 continue;
3997 /* If there are multiple errors, pick up the one with the highest
3998 mismatching operand index. In the case of multiple errors with
3999 the equally highest operand index, pick up the first one or the
4000 first one with non-NULL error message. */
4001 if (curr->detail.index > largest_error_pos
4002 || (curr->detail.index == largest_error_pos && msg == NULL
4003 && curr->detail.error != NULL))
4004 {
4005 largest_error_pos = curr->detail.index;
4006 record = curr;
4007 msg = record->detail.error;
4008 }
4009 }
4010
4011 gas_assert (largest_error_pos != -2 && record != NULL);
4012 DEBUG_TRACE ("Pick up error kind %s to report",
4013 operand_mismatch_kind_names[record->detail.kind]);
4014
4015 /* Output. */
4016 output_operand_error_record (record, str);
4017 }
4018 \f
4019 /* Write an AARCH64 instruction to buf - always little-endian. */
4020 static void
4021 put_aarch64_insn (char *buf, uint32_t insn)
4022 {
4023 unsigned char *where = (unsigned char *) buf;
4024 where[0] = insn;
4025 where[1] = insn >> 8;
4026 where[2] = insn >> 16;
4027 where[3] = insn >> 24;
4028 }
4029
4030 static uint32_t
4031 get_aarch64_insn (char *buf)
4032 {
4033 unsigned char *where = (unsigned char *) buf;
4034 uint32_t result;
4035 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4036 return result;
4037 }
4038
4039 static void
4040 output_inst (struct aarch64_inst *new_inst)
4041 {
4042 char *to = NULL;
4043
4044 to = frag_more (INSN_SIZE);
4045
4046 frag_now->tc_frag_data.recorded = 1;
4047
4048 put_aarch64_insn (to, inst.base.value);
4049
4050 if (inst.reloc.type != BFD_RELOC_UNUSED)
4051 {
4052 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4053 INSN_SIZE, &inst.reloc.exp,
4054 inst.reloc.pc_rel,
4055 inst.reloc.type);
4056 DEBUG_TRACE ("Prepared relocation fix up");
4057 /* Don't check the addend value against the instruction size,
4058 that's the job of our code in md_apply_fix(). */
4059 fixp->fx_no_overflow = 1;
4060 if (new_inst != NULL)
4061 fixp->tc_fix_data.inst = new_inst;
4062 if (aarch64_gas_internal_fixup_p ())
4063 {
4064 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4065 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4066 fixp->fx_addnumber = inst.reloc.flags;
4067 }
4068 }
4069
4070 dwarf2_emit_insn (INSN_SIZE);
4071 }
4072
4073 /* Link together opcodes of the same name. */
4074
4075 struct templates
4076 {
4077 aarch64_opcode *opcode;
4078 struct templates *next;
4079 };
4080
4081 typedef struct templates templates;
4082
4083 static templates *
4084 lookup_mnemonic (const char *start, int len)
4085 {
4086 templates *templ = NULL;
4087
4088 templ = hash_find_n (aarch64_ops_hsh, start, len);
4089 return templ;
4090 }
4091
4092 /* Subroutine of md_assemble, responsible for looking up the primary
4093 opcode from the mnemonic the user wrote. STR points to the
4094 beginning of the mnemonic. */
4095
4096 static templates *
4097 opcode_lookup (char **str)
4098 {
4099 char *end, *base;
4100 const aarch64_cond *cond;
4101 char condname[16];
4102 int len;
4103
4104 /* Scan up to the end of the mnemonic, which must end in white space,
4105 '.', or end of string. */
4106 for (base = end = *str; is_part_of_name(*end); end++)
4107 if (*end == '.')
4108 break;
4109
4110 if (end == base)
4111 return 0;
4112
4113 inst.cond = COND_ALWAYS;
4114
4115 /* Handle a possible condition. */
4116 if (end[0] == '.')
4117 {
4118 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4119 if (cond)
4120 {
4121 inst.cond = cond->value;
4122 *str = end + 3;
4123 }
4124 else
4125 {
4126 *str = end;
4127 return 0;
4128 }
4129 }
4130 else
4131 *str = end;
4132
4133 len = end - base;
4134
4135 if (inst.cond == COND_ALWAYS)
4136 {
4137 /* Look for unaffixed mnemonic. */
4138 return lookup_mnemonic (base, len);
4139 }
4140 else if (len <= 13)
4141 {
4142 /* append ".c" to mnemonic if conditional */
4143 memcpy (condname, base, len);
4144 memcpy (condname + len, ".c", 2);
4145 base = condname;
4146 len += 2;
4147 return lookup_mnemonic (base, len);
4148 }
4149
4150 return NULL;
4151 }
4152
4153 /* Internal helper routine converting a vector neon_type_el structure
4154 *VECTYPE to a corresponding operand qualifier. */
4155
4156 static inline aarch64_opnd_qualifier_t
4157 vectype_to_qualifier (const struct neon_type_el *vectype)
4158 {
4159 /* Element size in bytes indexed by neon_el_type. */
4160 const unsigned char ele_size[5]
4161 = {1, 2, 4, 8, 16};
4162
4163 if (!vectype->defined || vectype->type == NT_invtype)
4164 goto vectype_conversion_fail;
4165
4166 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4167
4168 if (vectype->defined & NTA_HASINDEX)
4169 /* Vector element register. */
4170 return AARCH64_OPND_QLF_S_B + vectype->type;
4171 else
4172 {
4173 /* Vector register. */
4174 int reg_size = ele_size[vectype->type] * vectype->width;
4175 unsigned offset;
4176 if (reg_size != 16 && reg_size != 8)
4177 goto vectype_conversion_fail;
4178 /* The conversion is calculated based on the relation of the order of
4179 qualifiers to the vector element size and vector register size. */
4180 offset = (vectype->type == NT_q)
4181 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4182 gas_assert (offset <= 8);
4183 return AARCH64_OPND_QLF_V_8B + offset;
4184 }
4185
4186 vectype_conversion_fail:
4187 first_error (_("bad vector arrangement type"));
4188 return AARCH64_OPND_QLF_NIL;
4189 }
4190
4191 /* Process an optional operand that is found omitted from the assembly line.
4192 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4193 instruction's opcode entry while IDX is the index of this omitted operand.
4194 */
4195
4196 static void
4197 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4198 int idx, aarch64_opnd_info *operand)
4199 {
4200 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4201 gas_assert (optional_operand_p (opcode, idx));
4202 gas_assert (!operand->present);
4203
4204 switch (type)
4205 {
4206 case AARCH64_OPND_Rd:
4207 case AARCH64_OPND_Rn:
4208 case AARCH64_OPND_Rm:
4209 case AARCH64_OPND_Rt:
4210 case AARCH64_OPND_Rt2:
4211 case AARCH64_OPND_Rs:
4212 case AARCH64_OPND_Ra:
4213 case AARCH64_OPND_Rt_SYS:
4214 case AARCH64_OPND_Rd_SP:
4215 case AARCH64_OPND_Rn_SP:
4216 case AARCH64_OPND_Fd:
4217 case AARCH64_OPND_Fn:
4218 case AARCH64_OPND_Fm:
4219 case AARCH64_OPND_Fa:
4220 case AARCH64_OPND_Ft:
4221 case AARCH64_OPND_Ft2:
4222 case AARCH64_OPND_Sd:
4223 case AARCH64_OPND_Sn:
4224 case AARCH64_OPND_Sm:
4225 case AARCH64_OPND_Vd:
4226 case AARCH64_OPND_Vn:
4227 case AARCH64_OPND_Vm:
4228 case AARCH64_OPND_VdD1:
4229 case AARCH64_OPND_VnD1:
4230 operand->reg.regno = default_value;
4231 break;
4232
4233 case AARCH64_OPND_Ed:
4234 case AARCH64_OPND_En:
4235 case AARCH64_OPND_Em:
4236 operand->reglane.regno = default_value;
4237 break;
4238
4239 case AARCH64_OPND_IDX:
4240 case AARCH64_OPND_BIT_NUM:
4241 case AARCH64_OPND_IMMR:
4242 case AARCH64_OPND_IMMS:
4243 case AARCH64_OPND_SHLL_IMM:
4244 case AARCH64_OPND_IMM_VLSL:
4245 case AARCH64_OPND_IMM_VLSR:
4246 case AARCH64_OPND_CCMP_IMM:
4247 case AARCH64_OPND_FBITS:
4248 case AARCH64_OPND_UIMM4:
4249 case AARCH64_OPND_UIMM3_OP1:
4250 case AARCH64_OPND_UIMM3_OP2:
4251 case AARCH64_OPND_IMM:
4252 case AARCH64_OPND_WIDTH:
4253 case AARCH64_OPND_UIMM7:
4254 case AARCH64_OPND_NZCV:
4255 operand->imm.value = default_value;
4256 break;
4257
4258 case AARCH64_OPND_EXCEPTION:
4259 inst.reloc.type = BFD_RELOC_UNUSED;
4260 break;
4261
4262 case AARCH64_OPND_BARRIER_ISB:
4263 operand->barrier = aarch64_barrier_options + default_value;
4264
4265 default:
4266 break;
4267 }
4268 }
4269
4270 /* Process the relocation type for move wide instructions.
4271 Return TRUE on success; otherwise return FALSE. */
4272
4273 static bfd_boolean
4274 process_movw_reloc_info (void)
4275 {
4276 int is32;
4277 unsigned shift;
4278
4279 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4280
4281 if (inst.base.opcode->op == OP_MOVK)
4282 switch (inst.reloc.type)
4283 {
4284 case BFD_RELOC_AARCH64_MOVW_G0_S:
4285 case BFD_RELOC_AARCH64_MOVW_G1_S:
4286 case BFD_RELOC_AARCH64_MOVW_G2_S:
4287 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4288 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4289 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4290 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4291 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4292 set_syntax_error
4293 (_("the specified relocation type is not allowed for MOVK"));
4294 return FALSE;
4295 default:
4296 break;
4297 }
4298
4299 switch (inst.reloc.type)
4300 {
4301 case BFD_RELOC_AARCH64_MOVW_G0:
4302 case BFD_RELOC_AARCH64_MOVW_G0_S:
4303 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4304 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4305 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4306 shift = 0;
4307 break;
4308 case BFD_RELOC_AARCH64_MOVW_G1:
4309 case BFD_RELOC_AARCH64_MOVW_G1_S:
4310 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4311 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4312 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4313 shift = 16;
4314 break;
4315 case BFD_RELOC_AARCH64_MOVW_G2:
4316 case BFD_RELOC_AARCH64_MOVW_G2_S:
4317 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4318 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4319 if (is32)
4320 {
4321 set_fatal_syntax_error
4322 (_("the specified relocation type is not allowed for 32-bit "
4323 "register"));
4324 return FALSE;
4325 }
4326 shift = 32;
4327 break;
4328 case BFD_RELOC_AARCH64_MOVW_G3:
4329 if (is32)
4330 {
4331 set_fatal_syntax_error
4332 (_("the specified relocation type is not allowed for 32-bit "
4333 "register"));
4334 return FALSE;
4335 }
4336 shift = 48;
4337 break;
4338 default:
4339 /* More cases should be added when more MOVW-related relocation types
4340 are supported in GAS. */
4341 gas_assert (aarch64_gas_internal_fixup_p ());
4342 /* The shift amount should have already been set by the parser. */
4343 return TRUE;
4344 }
4345 inst.base.operands[1].shifter.amount = shift;
4346 return TRUE;
4347 }
4348
4349 /* A primitive log caculator. */
4350
4351 static inline unsigned int
4352 get_logsz (unsigned int size)
4353 {
4354 const unsigned char ls[16] =
4355 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4356 if (size > 16)
4357 {
4358 gas_assert (0);
4359 return -1;
4360 }
4361 gas_assert (ls[size - 1] != (unsigned char)-1);
4362 return ls[size - 1];
4363 }
4364
4365 /* Determine and return the real reloc type code for an instruction
4366 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4367
4368 static inline bfd_reloc_code_real_type
4369 ldst_lo12_determine_real_reloc_type (void)
4370 {
4371 int logsz;
4372 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4373 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4374
4375 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4376 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4377 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4378 BFD_RELOC_AARCH64_LDST128_LO12
4379 };
4380
4381 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4382 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4383
4384 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4385 opd1_qlf =
4386 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4387 1, opd0_qlf, 0);
4388 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4389
4390 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4391 gas_assert (logsz >= 0 && logsz <= 4);
4392
4393 return reloc_ldst_lo12[logsz];
4394 }
4395
4396 /* Check whether a register list REGINFO is valid. The registers must be
4397 numbered in increasing order (modulo 32), in increments of one or two.
4398
4399 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4400 increments of two.
4401
4402 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4403
4404 static bfd_boolean
4405 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4406 {
4407 uint32_t i, nb_regs, prev_regno, incr;
4408
4409 nb_regs = 1 + (reginfo & 0x3);
4410 reginfo >>= 2;
4411 prev_regno = reginfo & 0x1f;
4412 incr = accept_alternate ? 2 : 1;
4413
4414 for (i = 1; i < nb_regs; ++i)
4415 {
4416 uint32_t curr_regno;
4417 reginfo >>= 5;
4418 curr_regno = reginfo & 0x1f;
4419 if (curr_regno != ((prev_regno + incr) & 0x1f))
4420 return FALSE;
4421 prev_regno = curr_regno;
4422 }
4423
4424 return TRUE;
4425 }
4426
4427 /* Generic instruction operand parser. This does no encoding and no
4428 semantic validation; it merely squirrels values away in the inst
4429 structure. Returns TRUE or FALSE depending on whether the
4430 specified grammar matched. */
4431
4432 static bfd_boolean
4433 parse_operands (char *str, const aarch64_opcode *opcode)
4434 {
4435 int i;
4436 char *backtrack_pos = 0;
4437 const enum aarch64_opnd *operands = opcode->operands;
4438
4439 clear_error ();
4440 skip_whitespace (str);
4441
4442 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4443 {
4444 int64_t val;
4445 int isreg32, isregzero;
4446 int comma_skipped_p = 0;
4447 aarch64_reg_type rtype;
4448 struct neon_type_el vectype;
4449 aarch64_opnd_info *info = &inst.base.operands[i];
4450
4451 DEBUG_TRACE ("parse operand %d", i);
4452
4453 /* Assign the operand code. */
4454 info->type = operands[i];
4455
4456 if (optional_operand_p (opcode, i))
4457 {
4458 /* Remember where we are in case we need to backtrack. */
4459 gas_assert (!backtrack_pos);
4460 backtrack_pos = str;
4461 }
4462
4463 /* Expect comma between operands; the backtrack mechanizm will take
4464 care of cases of omitted optional operand. */
4465 if (i > 0 && ! skip_past_char (&str, ','))
4466 {
4467 set_syntax_error (_("comma expected between operands"));
4468 goto failure;
4469 }
4470 else
4471 comma_skipped_p = 1;
4472
4473 switch (operands[i])
4474 {
4475 case AARCH64_OPND_Rd:
4476 case AARCH64_OPND_Rn:
4477 case AARCH64_OPND_Rm:
4478 case AARCH64_OPND_Rt:
4479 case AARCH64_OPND_Rt2:
4480 case AARCH64_OPND_Rs:
4481 case AARCH64_OPND_Ra:
4482 case AARCH64_OPND_Rt_SYS:
4483 po_int_reg_or_fail (1, 0);
4484 break;
4485
4486 case AARCH64_OPND_Rd_SP:
4487 case AARCH64_OPND_Rn_SP:
4488 po_int_reg_or_fail (0, 1);
4489 break;
4490
4491 case AARCH64_OPND_Rm_EXT:
4492 case AARCH64_OPND_Rm_SFT:
4493 po_misc_or_fail (parse_shifter_operand
4494 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4495 ? SHIFTED_ARITH_IMM
4496 : SHIFTED_LOGIC_IMM)));
4497 if (!info->shifter.operator_present)
4498 {
4499 /* Default to LSL if not present. Libopcodes prefers shifter
4500 kind to be explicit. */
4501 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4502 info->shifter.kind = AARCH64_MOD_LSL;
4503 /* For Rm_EXT, libopcodes will carry out further check on whether
4504 or not stack pointer is used in the instruction (Recall that
4505 "the extend operator is not optional unless at least one of
4506 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4507 }
4508 break;
4509
4510 case AARCH64_OPND_Fd:
4511 case AARCH64_OPND_Fn:
4512 case AARCH64_OPND_Fm:
4513 case AARCH64_OPND_Fa:
4514 case AARCH64_OPND_Ft:
4515 case AARCH64_OPND_Ft2:
4516 case AARCH64_OPND_Sd:
4517 case AARCH64_OPND_Sn:
4518 case AARCH64_OPND_Sm:
4519 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4520 if (val == PARSE_FAIL)
4521 {
4522 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4523 goto failure;
4524 }
4525 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4526
4527 info->reg.regno = val;
4528 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4529 break;
4530
4531 case AARCH64_OPND_Vd:
4532 case AARCH64_OPND_Vn:
4533 case AARCH64_OPND_Vm:
4534 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4535 if (val == PARSE_FAIL)
4536 {
4537 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4538 goto failure;
4539 }
4540 if (vectype.defined & NTA_HASINDEX)
4541 goto failure;
4542
4543 info->reg.regno = val;
4544 info->qualifier = vectype_to_qualifier (&vectype);
4545 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4546 goto failure;
4547 break;
4548
4549 case AARCH64_OPND_VdD1:
4550 case AARCH64_OPND_VnD1:
4551 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4552 if (val == PARSE_FAIL)
4553 {
4554 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4555 goto failure;
4556 }
4557 if (vectype.type != NT_d || vectype.index != 1)
4558 {
4559 set_fatal_syntax_error
4560 (_("the top half of a 128-bit FP/SIMD register is expected"));
4561 goto failure;
4562 }
4563 info->reg.regno = val;
4564 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4565 here; it is correct for the purpose of encoding/decoding since
4566 only the register number is explicitly encoded in the related
4567 instructions, although this appears a bit hacky. */
4568 info->qualifier = AARCH64_OPND_QLF_S_D;
4569 break;
4570
4571 case AARCH64_OPND_Ed:
4572 case AARCH64_OPND_En:
4573 case AARCH64_OPND_Em:
4574 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4575 if (val == PARSE_FAIL)
4576 {
4577 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4578 goto failure;
4579 }
4580 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4581 goto failure;
4582
4583 info->reglane.regno = val;
4584 info->reglane.index = vectype.index;
4585 info->qualifier = vectype_to_qualifier (&vectype);
4586 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4587 goto failure;
4588 break;
4589
4590 case AARCH64_OPND_LVn:
4591 case AARCH64_OPND_LVt:
4592 case AARCH64_OPND_LVt_AL:
4593 case AARCH64_OPND_LEt:
4594 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4595 goto failure;
4596 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4597 {
4598 set_fatal_syntax_error (_("invalid register list"));
4599 goto failure;
4600 }
4601 info->reglist.first_regno = (val >> 2) & 0x1f;
4602 info->reglist.num_regs = (val & 0x3) + 1;
4603 if (operands[i] == AARCH64_OPND_LEt)
4604 {
4605 if (!(vectype.defined & NTA_HASINDEX))
4606 goto failure;
4607 info->reglist.has_index = 1;
4608 info->reglist.index = vectype.index;
4609 }
4610 else if (!(vectype.defined & NTA_HASTYPE))
4611 goto failure;
4612 info->qualifier = vectype_to_qualifier (&vectype);
4613 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4614 goto failure;
4615 break;
4616
4617 case AARCH64_OPND_Cn:
4618 case AARCH64_OPND_Cm:
4619 po_reg_or_fail (REG_TYPE_CN);
4620 if (val > 15)
4621 {
4622 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4623 goto failure;
4624 }
4625 inst.base.operands[i].reg.regno = val;
4626 break;
4627
4628 case AARCH64_OPND_SHLL_IMM:
4629 case AARCH64_OPND_IMM_VLSR:
4630 po_imm_or_fail (1, 64);
4631 info->imm.value = val;
4632 break;
4633
4634 case AARCH64_OPND_CCMP_IMM:
4635 case AARCH64_OPND_FBITS:
4636 case AARCH64_OPND_UIMM4:
4637 case AARCH64_OPND_UIMM3_OP1:
4638 case AARCH64_OPND_UIMM3_OP2:
4639 case AARCH64_OPND_IMM_VLSL:
4640 case AARCH64_OPND_IMM:
4641 case AARCH64_OPND_WIDTH:
4642 po_imm_nc_or_fail ();
4643 info->imm.value = val;
4644 break;
4645
4646 case AARCH64_OPND_UIMM7:
4647 po_imm_or_fail (0, 127);
4648 info->imm.value = val;
4649 break;
4650
4651 case AARCH64_OPND_IDX:
4652 case AARCH64_OPND_BIT_NUM:
4653 case AARCH64_OPND_IMMR:
4654 case AARCH64_OPND_IMMS:
4655 po_imm_or_fail (0, 63);
4656 info->imm.value = val;
4657 break;
4658
4659 case AARCH64_OPND_IMM0:
4660 po_imm_nc_or_fail ();
4661 if (val != 0)
4662 {
4663 set_fatal_syntax_error (_("immediate zero expected"));
4664 goto failure;
4665 }
4666 info->imm.value = 0;
4667 break;
4668
4669 case AARCH64_OPND_FPIMM0:
4670 {
4671 int qfloat;
4672 bfd_boolean res1 = FALSE, res2 = FALSE;
4673 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4674 it is probably not worth the effort to support it. */
4675 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat))
4676 && !(res2 = parse_constant_immediate (&str, &val)))
4677 goto failure;
4678 if ((res1 && qfloat == 0) || (res2 && val == 0))
4679 {
4680 info->imm.value = 0;
4681 info->imm.is_fp = 1;
4682 break;
4683 }
4684 set_fatal_syntax_error (_("immediate zero expected"));
4685 goto failure;
4686 }
4687
4688 case AARCH64_OPND_IMM_MOV:
4689 {
4690 char *saved = str;
4691 if (reg_name_p (str, REG_TYPE_R_Z_SP))
4692 goto failure;
4693 str = saved;
4694 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4695 GE_OPT_PREFIX, 1));
4696 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4697 later. fix_mov_imm_insn will try to determine a machine
4698 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4699 message if the immediate cannot be moved by a single
4700 instruction. */
4701 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4702 inst.base.operands[i].skip = 1;
4703 }
4704 break;
4705
4706 case AARCH64_OPND_SIMD_IMM:
4707 case AARCH64_OPND_SIMD_IMM_SFT:
4708 if (! parse_big_immediate (&str, &val))
4709 goto failure;
4710 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4711 /* addr_off_p */ 0,
4712 /* need_libopcodes_p */ 1,
4713 /* skip_p */ 1);
4714 /* Parse shift.
4715 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4716 shift, we don't check it here; we leave the checking to
4717 the libopcodes (operand_general_constraint_met_p). By
4718 doing this, we achieve better diagnostics. */
4719 if (skip_past_comma (&str)
4720 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4721 goto failure;
4722 if (!info->shifter.operator_present
4723 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4724 {
4725 /* Default to LSL if not present. Libopcodes prefers shifter
4726 kind to be explicit. */
4727 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4728 info->shifter.kind = AARCH64_MOD_LSL;
4729 }
4730 break;
4731
4732 case AARCH64_OPND_FPIMM:
4733 case AARCH64_OPND_SIMD_FPIMM:
4734 {
4735 int qfloat;
4736 if (! parse_aarch64_imm_float (&str, &qfloat))
4737 goto failure;
4738 if (qfloat == 0)
4739 {
4740 set_fatal_syntax_error (_("invalid floating-point constant"));
4741 goto failure;
4742 }
4743 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4744 inst.base.operands[i].imm.is_fp = 1;
4745 }
4746 break;
4747
4748 case AARCH64_OPND_LIMM:
4749 po_misc_or_fail (parse_shifter_operand (&str, info,
4750 SHIFTED_LOGIC_IMM));
4751 if (info->shifter.operator_present)
4752 {
4753 set_fatal_syntax_error
4754 (_("shift not allowed for bitmask immediate"));
4755 goto failure;
4756 }
4757 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4758 /* addr_off_p */ 0,
4759 /* need_libopcodes_p */ 1,
4760 /* skip_p */ 1);
4761 break;
4762
4763 case AARCH64_OPND_AIMM:
4764 if (opcode->op == OP_ADD)
4765 /* ADD may have relocation types. */
4766 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4767 SHIFTED_ARITH_IMM));
4768 else
4769 po_misc_or_fail (parse_shifter_operand (&str, info,
4770 SHIFTED_ARITH_IMM));
4771 switch (inst.reloc.type)
4772 {
4773 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4774 info->shifter.amount = 12;
4775 break;
4776 case BFD_RELOC_UNUSED:
4777 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4778 if (info->shifter.kind != AARCH64_MOD_NONE)
4779 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4780 inst.reloc.pc_rel = 0;
4781 break;
4782 default:
4783 break;
4784 }
4785 info->imm.value = 0;
4786 if (!info->shifter.operator_present)
4787 {
4788 /* Default to LSL if not present. Libopcodes prefers shifter
4789 kind to be explicit. */
4790 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4791 info->shifter.kind = AARCH64_MOD_LSL;
4792 }
4793 break;
4794
4795 case AARCH64_OPND_HALF:
4796 {
4797 /* #<imm16> or relocation. */
4798 int internal_fixup_p;
4799 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4800 if (internal_fixup_p)
4801 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4802 skip_whitespace (str);
4803 if (skip_past_comma (&str))
4804 {
4805 /* {, LSL #<shift>} */
4806 if (! aarch64_gas_internal_fixup_p ())
4807 {
4808 set_fatal_syntax_error (_("can't mix relocation modifier "
4809 "with explicit shift"));
4810 goto failure;
4811 }
4812 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4813 }
4814 else
4815 inst.base.operands[i].shifter.amount = 0;
4816 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4817 inst.base.operands[i].imm.value = 0;
4818 if (! process_movw_reloc_info ())
4819 goto failure;
4820 }
4821 break;
4822
4823 case AARCH64_OPND_EXCEPTION:
4824 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4825 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4826 /* addr_off_p */ 0,
4827 /* need_libopcodes_p */ 0,
4828 /* skip_p */ 1);
4829 break;
4830
4831 case AARCH64_OPND_NZCV:
4832 {
4833 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
4834 if (nzcv != NULL)
4835 {
4836 str += 4;
4837 info->imm.value = nzcv->value;
4838 break;
4839 }
4840 po_imm_or_fail (0, 15);
4841 info->imm.value = val;
4842 }
4843 break;
4844
4845 case AARCH64_OPND_COND:
4846 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
4847 str += 2;
4848 if (info->cond == NULL)
4849 {
4850 set_syntax_error (_("invalid condition"));
4851 goto failure;
4852 }
4853 break;
4854
4855 case AARCH64_OPND_ADDR_ADRP:
4856 po_misc_or_fail (parse_adrp (&str));
4857 /* Clear the value as operand needs to be relocated. */
4858 info->imm.value = 0;
4859 break;
4860
4861 case AARCH64_OPND_ADDR_PCREL14:
4862 case AARCH64_OPND_ADDR_PCREL19:
4863 case AARCH64_OPND_ADDR_PCREL21:
4864 case AARCH64_OPND_ADDR_PCREL26:
4865 po_misc_or_fail (parse_address_reloc (&str, info));
4866 if (!info->addr.pcrel)
4867 {
4868 set_syntax_error (_("invalid pc-relative address"));
4869 goto failure;
4870 }
4871 if (inst.gen_lit_pool
4872 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
4873 {
4874 /* Only permit "=value" in the literal load instructions.
4875 The literal will be generated by programmer_friendly_fixup. */
4876 set_syntax_error (_("invalid use of \"=immediate\""));
4877 goto failure;
4878 }
4879 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
4880 {
4881 set_syntax_error (_("unrecognized relocation suffix"));
4882 goto failure;
4883 }
4884 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
4885 {
4886 info->imm.value = inst.reloc.exp.X_add_number;
4887 inst.reloc.type = BFD_RELOC_UNUSED;
4888 }
4889 else
4890 {
4891 info->imm.value = 0;
4892 switch (opcode->iclass)
4893 {
4894 case compbranch:
4895 case condbranch:
4896 /* e.g. CBZ or B.COND */
4897 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
4898 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
4899 break;
4900 case testbranch:
4901 /* e.g. TBZ */
4902 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
4903 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
4904 break;
4905 case branch_imm:
4906 /* e.g. B or BL */
4907 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
4908 inst.reloc.type = (opcode->op == OP_BL)
4909 ? BFD_RELOC_AARCH64_CALL26 : BFD_RELOC_AARCH64_JUMP26;
4910 break;
4911 case loadlit:
4912 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
4913 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
4914 break;
4915 case pcreladdr:
4916 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
4917 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
4918 break;
4919 default:
4920 gas_assert (0);
4921 abort ();
4922 }
4923 inst.reloc.pc_rel = 1;
4924 }
4925 break;
4926
4927 case AARCH64_OPND_ADDR_SIMPLE:
4928 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4929 /* [<Xn|SP>{, #<simm>}] */
4930 po_char_or_fail ('[');
4931 po_reg_or_fail (REG_TYPE_R64_SP);
4932 /* Accept optional ", #0". */
4933 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
4934 && skip_past_char (&str, ','))
4935 {
4936 skip_past_char (&str, '#');
4937 if (! skip_past_char (&str, '0'))
4938 {
4939 set_fatal_syntax_error
4940 (_("the optional immediate offset can only be 0"));
4941 goto failure;
4942 }
4943 }
4944 po_char_or_fail (']');
4945 info->addr.base_regno = val;
4946 break;
4947
4948 case AARCH64_OPND_ADDR_REGOFF:
4949 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
4950 po_misc_or_fail (parse_address (&str, info, 0));
4951 if (info->addr.pcrel || !info->addr.offset.is_reg
4952 || !info->addr.preind || info->addr.postind
4953 || info->addr.writeback)
4954 {
4955 set_syntax_error (_("invalid addressing mode"));
4956 goto failure;
4957 }
4958 if (!info->shifter.operator_present)
4959 {
4960 /* Default to LSL if not present. Libopcodes prefers shifter
4961 kind to be explicit. */
4962 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4963 info->shifter.kind = AARCH64_MOD_LSL;
4964 }
4965 /* Qualifier to be deduced by libopcodes. */
4966 break;
4967
4968 case AARCH64_OPND_ADDR_SIMM7:
4969 po_misc_or_fail (parse_address (&str, info, 0));
4970 if (info->addr.pcrel || info->addr.offset.is_reg
4971 || (!info->addr.preind && !info->addr.postind))
4972 {
4973 set_syntax_error (_("invalid addressing mode"));
4974 goto failure;
4975 }
4976 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4977 /* addr_off_p */ 1,
4978 /* need_libopcodes_p */ 1,
4979 /* skip_p */ 0);
4980 break;
4981
4982 case AARCH64_OPND_ADDR_SIMM9:
4983 case AARCH64_OPND_ADDR_SIMM9_2:
4984 po_misc_or_fail (parse_address_reloc (&str, info));
4985 if (info->addr.pcrel || info->addr.offset.is_reg
4986 || (!info->addr.preind && !info->addr.postind)
4987 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
4988 && info->addr.writeback))
4989 {
4990 set_syntax_error (_("invalid addressing mode"));
4991 goto failure;
4992 }
4993 if (inst.reloc.type != BFD_RELOC_UNUSED)
4994 {
4995 set_syntax_error (_("relocation not allowed"));
4996 goto failure;
4997 }
4998 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4999 /* addr_off_p */ 1,
5000 /* need_libopcodes_p */ 1,
5001 /* skip_p */ 0);
5002 break;
5003
5004 case AARCH64_OPND_ADDR_UIMM12:
5005 po_misc_or_fail (parse_address_reloc (&str, info));
5006 if (info->addr.pcrel || info->addr.offset.is_reg
5007 || !info->addr.preind || info->addr.writeback)
5008 {
5009 set_syntax_error (_("invalid addressing mode"));
5010 goto failure;
5011 }
5012 if (inst.reloc.type == BFD_RELOC_UNUSED)
5013 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5014 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5015 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5016 /* Leave qualifier to be determined by libopcodes. */
5017 break;
5018
5019 case AARCH64_OPND_SIMD_ADDR_POST:
5020 /* [<Xn|SP>], <Xm|#<amount>> */
5021 po_misc_or_fail (parse_address (&str, info, 1));
5022 if (!info->addr.postind || !info->addr.writeback)
5023 {
5024 set_syntax_error (_("invalid addressing mode"));
5025 goto failure;
5026 }
5027 if (!info->addr.offset.is_reg)
5028 {
5029 if (inst.reloc.exp.X_op == O_constant)
5030 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5031 else
5032 {
5033 set_fatal_syntax_error
5034 (_("writeback value should be an immediate constant"));
5035 goto failure;
5036 }
5037 }
5038 /* No qualifier. */
5039 break;
5040
5041 case AARCH64_OPND_SYSREG:
5042 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1)) == FALSE)
5043 {
5044 set_syntax_error (_("unknown or missing system register name"));
5045 goto failure;
5046 }
5047 inst.base.operands[i].sysreg = val;
5048 break;
5049
5050 case AARCH64_OPND_PSTATEFIELD:
5051 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0)) == FALSE)
5052 {
5053 set_syntax_error (_("unknown or missing PSTATE field name"));
5054 goto failure;
5055 }
5056 inst.base.operands[i].pstatefield = val;
5057 break;
5058
5059 case AARCH64_OPND_SYSREG_IC:
5060 inst.base.operands[i].sysins_op =
5061 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5062 goto sys_reg_ins;
5063 case AARCH64_OPND_SYSREG_DC:
5064 inst.base.operands[i].sysins_op =
5065 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5066 goto sys_reg_ins;
5067 case AARCH64_OPND_SYSREG_AT:
5068 inst.base.operands[i].sysins_op =
5069 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5070 goto sys_reg_ins;
5071 case AARCH64_OPND_SYSREG_TLBI:
5072 inst.base.operands[i].sysins_op =
5073 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5074 sys_reg_ins:
5075 if (inst.base.operands[i].sysins_op == NULL)
5076 {
5077 set_fatal_syntax_error ( _("unknown or missing operation name"));
5078 goto failure;
5079 }
5080 break;
5081
5082 case AARCH64_OPND_BARRIER:
5083 case AARCH64_OPND_BARRIER_ISB:
5084 val = parse_barrier (&str);
5085 if (val != PARSE_FAIL
5086 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5087 {
5088 /* ISB only accepts options name 'sy'. */
5089 set_syntax_error
5090 (_("the specified option is not accepted in ISB"));
5091 /* Turn off backtrack as this optional operand is present. */
5092 backtrack_pos = 0;
5093 goto failure;
5094 }
5095 /* This is an extension to accept a 0..15 immediate. */
5096 if (val == PARSE_FAIL)
5097 po_imm_or_fail (0, 15);
5098 info->barrier = aarch64_barrier_options + val;
5099 break;
5100
5101 case AARCH64_OPND_PRFOP:
5102 val = parse_pldop (&str);
5103 /* This is an extension to accept a 0..31 immediate. */
5104 if (val == PARSE_FAIL)
5105 po_imm_or_fail (0, 31);
5106 inst.base.operands[i].prfop = aarch64_prfops + val;
5107 break;
5108
5109 default:
5110 as_fatal (_("unhandled operand code %d"), operands[i]);
5111 }
5112
5113 /* If we get here, this operand was successfully parsed. */
5114 inst.base.operands[i].present = 1;
5115 continue;
5116
5117 failure:
5118 /* The parse routine should already have set the error, but in case
5119 not, set a default one here. */
5120 if (! error_p ())
5121 set_default_error ();
5122
5123 if (! backtrack_pos)
5124 goto parse_operands_return;
5125
5126 /* Reaching here means we are dealing with an optional operand that is
5127 omitted from the assembly line. */
5128 gas_assert (optional_operand_p (opcode, i));
5129 info->present = 0;
5130 process_omitted_operand (operands[i], opcode, i, info);
5131
5132 /* Try again, skipping the optional operand at backtrack_pos. */
5133 str = backtrack_pos;
5134 backtrack_pos = 0;
5135
5136 /* If this is the last operand that is optional and omitted, but without
5137 the presence of a comma. */
5138 if (i && comma_skipped_p && i == aarch64_num_of_operands (opcode) - 1)
5139 {
5140 set_fatal_syntax_error
5141 (_("unexpected comma before the omitted optional operand"));
5142 goto parse_operands_return;
5143 }
5144
5145 /* Clear any error record after the omitted optional operand has been
5146 successfully handled. */
5147 clear_error ();
5148 }
5149
5150 /* Check if we have parsed all the operands. */
5151 if (*str != '\0' && ! error_p ())
5152 {
5153 /* Set I to the index of the last present operand; this is
5154 for the purpose of diagnostics. */
5155 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5156 ;
5157 set_fatal_syntax_error
5158 (_("unexpected characters following instruction"));
5159 }
5160
5161 parse_operands_return:
5162
5163 if (error_p ())
5164 {
5165 DEBUG_TRACE ("parsing FAIL: %s - %s",
5166 operand_mismatch_kind_names[get_error_kind ()],
5167 get_error_message ());
5168 /* Record the operand error properly; this is useful when there
5169 are multiple instruction templates for a mnemonic name, so that
5170 later on, we can select the error that most closely describes
5171 the problem. */
5172 record_operand_error (opcode, i, get_error_kind (),
5173 get_error_message ());
5174 return FALSE;
5175 }
5176 else
5177 {
5178 DEBUG_TRACE ("parsing SUCCESS");
5179 return TRUE;
5180 }
5181 }
5182
5183 /* It does some fix-up to provide some programmer friendly feature while
5184 keeping the libopcodes happy, i.e. libopcodes only accepts
5185 the preferred architectural syntax.
5186 Return FALSE if there is any failure; otherwise return TRUE. */
5187
5188 static bfd_boolean
5189 programmer_friendly_fixup (aarch64_instruction *instr)
5190 {
5191 aarch64_inst *base = &instr->base;
5192 const aarch64_opcode *opcode = base->opcode;
5193 enum aarch64_op op = opcode->op;
5194 aarch64_opnd_info *operands = base->operands;
5195
5196 DEBUG_TRACE ("enter");
5197
5198 switch (opcode->iclass)
5199 {
5200 case testbranch:
5201 /* TBNZ Xn|Wn, #uimm6, label
5202 Test and Branch Not Zero: conditionally jumps to label if bit number
5203 uimm6 in register Xn is not zero. The bit number implies the width of
5204 the register, which may be written and should be disassembled as Wn if
5205 uimm is less than 32. */
5206 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5207 {
5208 if (operands[1].imm.value >= 32)
5209 {
5210 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5211 0, 31);
5212 return FALSE;
5213 }
5214 operands[0].qualifier = AARCH64_OPND_QLF_X;
5215 }
5216 break;
5217 case loadlit:
5218 /* LDR Wt, label | =value
5219 As a convenience assemblers will typically permit the notation
5220 "=value" in conjunction with the pc-relative literal load instructions
5221 to automatically place an immediate value or symbolic address in a
5222 nearby literal pool and generate a hidden label which references it.
5223 ISREG has been set to 0 in the case of =value. */
5224 if (instr->gen_lit_pool
5225 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5226 {
5227 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5228 if (op == OP_LDRSW_LIT)
5229 size = 4;
5230 if (instr->reloc.exp.X_op != O_constant
5231 && instr->reloc.exp.X_op != O_symbol)
5232 {
5233 record_operand_error (opcode, 1,
5234 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5235 _("constant expression expected"));
5236 return FALSE;
5237 }
5238 if (! add_to_lit_pool (&instr->reloc.exp, size))
5239 {
5240 record_operand_error (opcode, 1,
5241 AARCH64_OPDE_OTHER_ERROR,
5242 _("literal pool insertion failed"));
5243 return FALSE;
5244 }
5245 }
5246 break;
5247 case asimdimm:
5248 /* Allow MOVI V0.16B, 97, LSL 0, although the preferred architectural
5249 syntax requires that the LSL shifter can only be used when the
5250 destination register has the shape of 4H, 8H, 2S or 4S. */
5251 if (op == OP_V_MOVI_B && operands[1].shifter.kind == AARCH64_MOD_LSL
5252 && (operands[0].qualifier == AARCH64_OPND_QLF_V_8B
5253 || operands[0].qualifier == AARCH64_OPND_QLF_V_16B))
5254 {
5255 if (operands[1].shifter.amount != 0)
5256 {
5257 record_operand_error (opcode, 1,
5258 AARCH64_OPDE_OTHER_ERROR,
5259 _("shift amount non-zero"));
5260 return FALSE;
5261 }
5262 operands[1].shifter.kind = AARCH64_MOD_NONE;
5263 operands[1].qualifier = AARCH64_OPND_QLF_NIL;
5264 }
5265 break;
5266 case log_shift:
5267 case bitfield:
5268 /* UXT[BHW] Wd, Wn
5269 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5270 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5271 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5272 A programmer-friendly assembler should accept a destination Xd in
5273 place of Wd, however that is not the preferred form for disassembly.
5274 */
5275 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5276 && operands[1].qualifier == AARCH64_OPND_QLF_W
5277 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5278 operands[0].qualifier = AARCH64_OPND_QLF_W;
5279 break;
5280
5281 case addsub_ext:
5282 {
5283 /* In the 64-bit form, the final register operand is written as Wm
5284 for all but the (possibly omitted) UXTX/LSL and SXTX
5285 operators.
5286 As a programmer-friendly assembler, we accept e.g.
5287 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5288 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5289 int idx = aarch64_operand_index (opcode->operands,
5290 AARCH64_OPND_Rm_EXT);
5291 gas_assert (idx == 1 || idx == 2);
5292 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5293 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5294 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5295 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5296 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5297 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5298 }
5299 break;
5300
5301 default:
5302 break;
5303 }
5304
5305 DEBUG_TRACE ("exit with SUCCESS");
5306 return TRUE;
5307 }
5308
5309 /* A wrapper function to interface with libopcodes on encoding and
5310 record the error message if there is any.
5311
5312 Return TRUE on success; otherwise return FALSE. */
5313
5314 static bfd_boolean
5315 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5316 aarch64_insn *code)
5317 {
5318 aarch64_operand_error error_info;
5319 error_info.kind = AARCH64_OPDE_NIL;
5320 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5321 return TRUE;
5322 else
5323 {
5324 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5325 record_operand_error_info (opcode, &error_info);
5326 return FALSE;
5327 }
5328 }
5329
5330 #ifdef DEBUG_AARCH64
5331 static inline void
5332 dump_opcode_operands (const aarch64_opcode *opcode)
5333 {
5334 int i = 0;
5335 while (opcode->operands[i] != AARCH64_OPND_NIL)
5336 {
5337 aarch64_verbose ("\t\t opnd%d: %s", i,
5338 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5339 ? aarch64_get_operand_name (opcode->operands[i])
5340 : aarch64_get_operand_desc (opcode->operands[i]));
5341 ++i;
5342 }
5343 }
5344 #endif /* DEBUG_AARCH64 */
5345
5346 /* This is the guts of the machine-dependent assembler. STR points to a
5347 machine dependent instruction. This function is supposed to emit
5348 the frags/bytes it assembles to. */
5349
5350 void
5351 md_assemble (char *str)
5352 {
5353 char *p = str;
5354 templates *template;
5355 aarch64_opcode *opcode;
5356 aarch64_inst *inst_base;
5357 unsigned saved_cond;
5358
5359 /* Align the previous label if needed. */
5360 if (last_label_seen != NULL)
5361 {
5362 symbol_set_frag (last_label_seen, frag_now);
5363 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5364 S_SET_SEGMENT (last_label_seen, now_seg);
5365 }
5366
5367 inst.reloc.type = BFD_RELOC_UNUSED;
5368
5369 DEBUG_TRACE ("\n\n");
5370 DEBUG_TRACE ("==============================");
5371 DEBUG_TRACE ("Enter md_assemble with %s", str);
5372
5373 template = opcode_lookup (&p);
5374 if (!template)
5375 {
5376 /* It wasn't an instruction, but it might be a register alias of
5377 the form alias .req reg directive. */
5378 if (!create_register_alias (str, p))
5379 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5380 str);
5381 return;
5382 }
5383
5384 skip_whitespace (p);
5385 if (*p == ',')
5386 {
5387 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5388 get_mnemonic_name (str), str);
5389 return;
5390 }
5391
5392 init_operand_error_report ();
5393
5394 saved_cond = inst.cond;
5395 reset_aarch64_instruction (&inst);
5396 inst.cond = saved_cond;
5397
5398 /* Iterate through all opcode entries with the same mnemonic name. */
5399 do
5400 {
5401 opcode = template->opcode;
5402
5403 DEBUG_TRACE ("opcode %s found", opcode->name);
5404 #ifdef DEBUG_AARCH64
5405 if (debug_dump)
5406 dump_opcode_operands (opcode);
5407 #endif /* DEBUG_AARCH64 */
5408
5409 /* Check that this instruction is supported for this CPU. */
5410 if (!opcode->avariant
5411 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5412 {
5413 as_bad (_("selected processor does not support `%s'"), str);
5414 return;
5415 }
5416
5417 mapping_state (MAP_INSN);
5418
5419 inst_base = &inst.base;
5420 inst_base->opcode = opcode;
5421
5422 /* Truly conditionally executed instructions, e.g. b.cond. */
5423 if (opcode->flags & F_COND)
5424 {
5425 gas_assert (inst.cond != COND_ALWAYS);
5426 inst_base->cond = get_cond_from_value (inst.cond);
5427 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5428 }
5429 else if (inst.cond != COND_ALWAYS)
5430 {
5431 /* It shouldn't arrive here, where the assembly looks like a
5432 conditional instruction but the found opcode is unconditional. */
5433 gas_assert (0);
5434 continue;
5435 }
5436
5437 if (parse_operands (p, opcode)
5438 && programmer_friendly_fixup (&inst)
5439 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5440 {
5441 if (inst.reloc.type == BFD_RELOC_UNUSED
5442 || !inst.reloc.need_libopcodes_p)
5443 output_inst (NULL);
5444 else
5445 {
5446 /* If there is relocation generated for the instruction,
5447 store the instruction information for the future fix-up. */
5448 struct aarch64_inst *copy;
5449 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5450 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5451 abort ();
5452 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5453 output_inst (copy);
5454 }
5455 return;
5456 }
5457
5458 template = template->next;
5459 if (template != NULL)
5460 {
5461 reset_aarch64_instruction (&inst);
5462 inst.cond = saved_cond;
5463 }
5464 }
5465 while (template != NULL);
5466
5467 /* Issue the error messages if any. */
5468 output_operand_error_report (str);
5469 }
5470
5471 /* Various frobbings of labels and their addresses. */
5472
5473 void
5474 aarch64_start_line_hook (void)
5475 {
5476 last_label_seen = NULL;
5477 }
5478
5479 void
5480 aarch64_frob_label (symbolS * sym)
5481 {
5482 last_label_seen = sym;
5483
5484 dwarf2_emit_label (sym);
5485 }
5486
5487 int
5488 aarch64_data_in_code (void)
5489 {
5490 if (!strncmp (input_line_pointer + 1, "data:", 5))
5491 {
5492 *input_line_pointer = '/';
5493 input_line_pointer += 5;
5494 *input_line_pointer = 0;
5495 return 1;
5496 }
5497
5498 return 0;
5499 }
5500
5501 char *
5502 aarch64_canonicalize_symbol_name (char *name)
5503 {
5504 int len;
5505
5506 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5507 *(name + len - 5) = 0;
5508
5509 return name;
5510 }
5511 \f
5512 /* Table of all register names defined by default. The user can
5513 define additional names with .req. Note that all register names
5514 should appear in both upper and lowercase variants. Some registers
5515 also have mixed-case names. */
5516
5517 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5518 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5519 #define REGSET31(p,t) \
5520 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5521 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5522 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5523 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5524 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5525 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5526 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5527 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5528 #define REGSET(p,t) \
5529 REGSET31(p,t), REGNUM(p,31,t)
5530
5531 /* These go into aarch64_reg_hsh hash-table. */
5532 static const reg_entry reg_names[] = {
5533 /* Integer registers. */
5534 REGSET31 (x, R_64), REGSET31 (X, R_64),
5535 REGSET31 (w, R_32), REGSET31 (W, R_32),
5536
5537 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5538 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5539
5540 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5541 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5542
5543 /* Coprocessor register numbers. */
5544 REGSET (c, CN), REGSET (C, CN),
5545
5546 /* Floating-point single precision registers. */
5547 REGSET (s, FP_S), REGSET (S, FP_S),
5548
5549 /* Floating-point double precision registers. */
5550 REGSET (d, FP_D), REGSET (D, FP_D),
5551
5552 /* Floating-point half precision registers. */
5553 REGSET (h, FP_H), REGSET (H, FP_H),
5554
5555 /* Floating-point byte precision registers. */
5556 REGSET (b, FP_B), REGSET (B, FP_B),
5557
5558 /* Floating-point quad precision registers. */
5559 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5560
5561 /* FP/SIMD registers. */
5562 REGSET (v, VN), REGSET (V, VN),
5563 };
5564
5565 #undef REGDEF
5566 #undef REGNUM
5567 #undef REGSET
5568
5569 #define N 1
5570 #define n 0
5571 #define Z 1
5572 #define z 0
5573 #define C 1
5574 #define c 0
5575 #define V 1
5576 #define v 0
5577 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5578 static const asm_nzcv nzcv_names[] = {
5579 {"nzcv", B (n, z, c, v)},
5580 {"nzcV", B (n, z, c, V)},
5581 {"nzCv", B (n, z, C, v)},
5582 {"nzCV", B (n, z, C, V)},
5583 {"nZcv", B (n, Z, c, v)},
5584 {"nZcV", B (n, Z, c, V)},
5585 {"nZCv", B (n, Z, C, v)},
5586 {"nZCV", B (n, Z, C, V)},
5587 {"Nzcv", B (N, z, c, v)},
5588 {"NzcV", B (N, z, c, V)},
5589 {"NzCv", B (N, z, C, v)},
5590 {"NzCV", B (N, z, C, V)},
5591 {"NZcv", B (N, Z, c, v)},
5592 {"NZcV", B (N, Z, c, V)},
5593 {"NZCv", B (N, Z, C, v)},
5594 {"NZCV", B (N, Z, C, V)}
5595 };
5596
5597 #undef N
5598 #undef n
5599 #undef Z
5600 #undef z
5601 #undef C
5602 #undef c
5603 #undef V
5604 #undef v
5605 #undef B
5606 \f
5607 /* MD interface: bits in the object file. */
5608
5609 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5610 for use in the a.out file, and stores them in the array pointed to by buf.
5611 This knows about the endian-ness of the target machine and does
5612 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5613 2 (short) and 4 (long) Floating numbers are put out as a series of
5614 LITTLENUMS (shorts, here at least). */
5615
5616 void
5617 md_number_to_chars (char *buf, valueT val, int n)
5618 {
5619 if (target_big_endian)
5620 number_to_chars_bigendian (buf, val, n);
5621 else
5622 number_to_chars_littleendian (buf, val, n);
5623 }
5624
5625 /* MD interface: Sections. */
5626
5627 /* Estimate the size of a frag before relaxing. Assume everything fits in
5628 4 bytes. */
5629
5630 int
5631 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5632 {
5633 fragp->fr_var = 4;
5634 return 4;
5635 }
5636
5637 /* Round up a section size to the appropriate boundary. */
5638
5639 valueT
5640 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5641 {
5642 return size;
5643 }
5644
5645 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5646 of an rs_align_code fragment. */
5647
5648 void
5649 aarch64_handle_align (fragS * fragP)
5650 {
5651 /* NOP = d503201f */
5652 /* AArch64 instructions are always little-endian. */
5653 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5654
5655 int bytes, fix, noop_size;
5656 char *p;
5657 const char *noop;
5658
5659 if (fragP->fr_type != rs_align_code)
5660 return;
5661
5662 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5663 p = fragP->fr_literal + fragP->fr_fix;
5664 fix = 0;
5665
5666 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
5667 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
5668
5669 #ifdef OBJ_ELF
5670 gas_assert (fragP->tc_frag_data.recorded);
5671 #endif
5672
5673 noop = aarch64_noop;
5674 noop_size = sizeof (aarch64_noop);
5675 fragP->fr_var = noop_size;
5676
5677 if (bytes & (noop_size - 1))
5678 {
5679 fix = bytes & (noop_size - 1);
5680 #ifdef OBJ_ELF
5681 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5682 #endif
5683 memset (p, 0, fix);
5684 p += fix;
5685 bytes -= fix;
5686 }
5687
5688 while (bytes >= noop_size)
5689 {
5690 memcpy (p, noop, noop_size);
5691 p += noop_size;
5692 bytes -= noop_size;
5693 fix += noop_size;
5694 }
5695
5696 fragP->fr_fix += fix;
5697 }
5698
5699 /* Called from md_do_align. Used to create an alignment
5700 frag in a code section. */
5701
5702 void
5703 aarch64_frag_align_code (int n, int max)
5704 {
5705 char *p;
5706
5707 /* We assume that there will never be a requirement
5708 to support alignments greater than x bytes. */
5709 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
5710 as_fatal (_
5711 ("alignments greater than %d bytes not supported in .text sections"),
5712 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
5713
5714 p = frag_var (rs_align_code,
5715 MAX_MEM_FOR_RS_ALIGN_CODE,
5716 1,
5717 (relax_substateT) max,
5718 (symbolS *) NULL, (offsetT) n, (char *) NULL);
5719 *p = 0;
5720 }
5721
5722 /* Perform target specific initialisation of a frag.
5723 Note - despite the name this initialisation is not done when the frag
5724 is created, but only when its type is assigned. A frag can be created
5725 and used a long time before its type is set, so beware of assuming that
5726 this initialisationis performed first. */
5727
5728 #ifndef OBJ_ELF
5729 void
5730 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5731 int max_chars ATTRIBUTE_UNUSED)
5732 {
5733 }
5734
5735 #else /* OBJ_ELF is defined. */
5736 void
5737 aarch64_init_frag (fragS * fragP, int max_chars)
5738 {
5739 /* Record a mapping symbol for alignment frags. We will delete this
5740 later if the alignment ends up empty. */
5741 if (!fragP->tc_frag_data.recorded)
5742 {
5743 fragP->tc_frag_data.recorded = 1;
5744 switch (fragP->fr_type)
5745 {
5746 case rs_align:
5747 case rs_align_test:
5748 case rs_fill:
5749 mapping_state_2 (MAP_DATA, max_chars);
5750 break;
5751 case rs_align_code:
5752 mapping_state_2 (MAP_INSN, max_chars);
5753 break;
5754 default:
5755 break;
5756 }
5757 }
5758 }
5759 \f
5760 /* Initialize the DWARF-2 unwind information for this procedure. */
5761
5762 void
5763 tc_aarch64_frame_initial_instructions (void)
5764 {
5765 cfi_add_CFA_def_cfa (REG_SP, 0);
5766 }
5767 #endif /* OBJ_ELF */
5768
5769 /* Convert REGNAME to a DWARF-2 register number. */
5770
5771 int
5772 tc_aarch64_regname_to_dw2regnum (char *regname)
5773 {
5774 const reg_entry *reg = parse_reg (&regname);
5775 if (reg == NULL)
5776 return -1;
5777
5778 switch (reg->type)
5779 {
5780 case REG_TYPE_SP_32:
5781 case REG_TYPE_SP_64:
5782 case REG_TYPE_R_32:
5783 case REG_TYPE_R_64:
5784 case REG_TYPE_FP_B:
5785 case REG_TYPE_FP_H:
5786 case REG_TYPE_FP_S:
5787 case REG_TYPE_FP_D:
5788 case REG_TYPE_FP_Q:
5789 return reg->number;
5790 default:
5791 break;
5792 }
5793 return -1;
5794 }
5795
5796 /* MD interface: Symbol and relocation handling. */
5797
5798 /* Return the address within the segment that a PC-relative fixup is
5799 relative to. For AArch64 PC-relative fixups applied to instructions
5800 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
5801
5802 long
5803 md_pcrel_from_section (fixS * fixP, segT seg)
5804 {
5805 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
5806
5807 /* If this is pc-relative and we are going to emit a relocation
5808 then we just want to put out any pipeline compensation that the linker
5809 will need. Otherwise we want to use the calculated base. */
5810 if (fixP->fx_pcrel
5811 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
5812 || aarch64_force_relocation (fixP)))
5813 base = 0;
5814
5815 /* AArch64 should be consistent for all pc-relative relocations. */
5816 return base + AARCH64_PCREL_OFFSET;
5817 }
5818
5819 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
5820 Otherwise we have no need to default values of symbols. */
5821
5822 symbolS *
5823 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
5824 {
5825 #ifdef OBJ_ELF
5826 if (name[0] == '_' && name[1] == 'G'
5827 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
5828 {
5829 if (!GOT_symbol)
5830 {
5831 if (symbol_find (name))
5832 as_bad (_("GOT already in the symbol table"));
5833
5834 GOT_symbol = symbol_new (name, undefined_section,
5835 (valueT) 0, &zero_address_frag);
5836 }
5837
5838 return GOT_symbol;
5839 }
5840 #endif
5841
5842 return 0;
5843 }
5844
5845 /* Return non-zero if the indicated VALUE has overflowed the maximum
5846 range expressible by a unsigned number with the indicated number of
5847 BITS. */
5848
5849 static bfd_boolean
5850 unsigned_overflow (valueT value, unsigned bits)
5851 {
5852 valueT lim;
5853 if (bits >= sizeof (valueT) * 8)
5854 return FALSE;
5855 lim = (valueT) 1 << bits;
5856 return (value >= lim);
5857 }
5858
5859
5860 /* Return non-zero if the indicated VALUE has overflowed the maximum
5861 range expressible by an signed number with the indicated number of
5862 BITS. */
5863
5864 static bfd_boolean
5865 signed_overflow (offsetT value, unsigned bits)
5866 {
5867 offsetT lim;
5868 if (bits >= sizeof (offsetT) * 8)
5869 return FALSE;
5870 lim = (offsetT) 1 << (bits - 1);
5871 return (value < -lim || value >= lim);
5872 }
5873
5874 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
5875 unsigned immediate offset load/store instruction, try to encode it as
5876 an unscaled, 9-bit, signed immediate offset load/store instruction.
5877 Return TRUE if it is successful; otherwise return FALSE.
5878
5879 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
5880 in response to the standard LDR/STR mnemonics when the immediate offset is
5881 unambiguous, i.e. when it is negative or unaligned. */
5882
5883 static bfd_boolean
5884 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
5885 {
5886 int idx;
5887 enum aarch64_op new_op;
5888 const aarch64_opcode *new_opcode;
5889
5890 gas_assert (instr->opcode->iclass == ldst_pos);
5891
5892 switch (instr->opcode->op)
5893 {
5894 case OP_LDRB_POS:new_op = OP_LDURB; break;
5895 case OP_STRB_POS: new_op = OP_STURB; break;
5896 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
5897 case OP_LDRH_POS: new_op = OP_LDURH; break;
5898 case OP_STRH_POS: new_op = OP_STURH; break;
5899 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
5900 case OP_LDR_POS: new_op = OP_LDUR; break;
5901 case OP_STR_POS: new_op = OP_STUR; break;
5902 case OP_LDRF_POS: new_op = OP_LDURV; break;
5903 case OP_STRF_POS: new_op = OP_STURV; break;
5904 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
5905 case OP_PRFM_POS: new_op = OP_PRFUM; break;
5906 default: new_op = OP_NIL; break;
5907 }
5908
5909 if (new_op == OP_NIL)
5910 return FALSE;
5911
5912 new_opcode = aarch64_get_opcode (new_op);
5913 gas_assert (new_opcode != NULL);
5914
5915 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
5916 instr->opcode->op, new_opcode->op);
5917
5918 aarch64_replace_opcode (instr, new_opcode);
5919
5920 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
5921 qualifier matching may fail because the out-of-date qualifier will
5922 prevent the operand being updated with a new and correct qualifier. */
5923 idx = aarch64_operand_index (instr->opcode->operands,
5924 AARCH64_OPND_ADDR_SIMM9);
5925 gas_assert (idx == 1);
5926 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
5927
5928 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
5929
5930 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
5931 return FALSE;
5932
5933 return TRUE;
5934 }
5935
5936 /* Called by fix_insn to fix a MOV immediate alias instruction.
5937
5938 Operand for a generic move immediate instruction, which is an alias
5939 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
5940 a 32-bit/64-bit immediate value into general register. An assembler error
5941 shall result if the immediate cannot be created by a single one of these
5942 instructions. If there is a choice, then to ensure reversability an
5943 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
5944
5945 static void
5946 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
5947 {
5948 const aarch64_opcode *opcode;
5949
5950 /* Need to check if the destination is SP/ZR. The check has to be done
5951 before any aarch64_replace_opcode. */
5952 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
5953 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
5954
5955 instr->operands[1].imm.value = value;
5956 instr->operands[1].skip = 0;
5957
5958 if (try_mov_wide_p)
5959 {
5960 /* Try the MOVZ alias. */
5961 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
5962 aarch64_replace_opcode (instr, opcode);
5963 if (aarch64_opcode_encode (instr->opcode, instr,
5964 &instr->value, NULL, NULL))
5965 {
5966 put_aarch64_insn (buf, instr->value);
5967 return;
5968 }
5969 /* Try the MOVK alias. */
5970 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
5971 aarch64_replace_opcode (instr, opcode);
5972 if (aarch64_opcode_encode (instr->opcode, instr,
5973 &instr->value, NULL, NULL))
5974 {
5975 put_aarch64_insn (buf, instr->value);
5976 return;
5977 }
5978 }
5979
5980 if (try_mov_bitmask_p)
5981 {
5982 /* Try the ORR alias. */
5983 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
5984 aarch64_replace_opcode (instr, opcode);
5985 if (aarch64_opcode_encode (instr->opcode, instr,
5986 &instr->value, NULL, NULL))
5987 {
5988 put_aarch64_insn (buf, instr->value);
5989 return;
5990 }
5991 }
5992
5993 as_bad_where (fixP->fx_file, fixP->fx_line,
5994 _("immediate cannot be moved by a single instruction"));
5995 }
5996
5997 /* An instruction operand which is immediate related may have symbol used
5998 in the assembly, e.g.
5999
6000 mov w0, u32
6001 .set u32, 0x00ffff00
6002
6003 At the time when the assembly instruction is parsed, a referenced symbol,
6004 like 'u32' in the above example may not have been seen; a fixS is created
6005 in such a case and is handled here after symbols have been resolved.
6006 Instruction is fixed up with VALUE using the information in *FIXP plus
6007 extra information in FLAGS.
6008
6009 This function is called by md_apply_fix to fix up instructions that need
6010 a fix-up described above but does not involve any linker-time relocation. */
6011
6012 static void
6013 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6014 {
6015 int idx;
6016 uint32_t insn;
6017 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6018 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6019 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6020
6021 if (new_inst)
6022 {
6023 /* Now the instruction is about to be fixed-up, so the operand that
6024 was previously marked as 'ignored' needs to be unmarked in order
6025 to get the encoding done properly. */
6026 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6027 new_inst->operands[idx].skip = 0;
6028 }
6029
6030 gas_assert (opnd != AARCH64_OPND_NIL);
6031
6032 switch (opnd)
6033 {
6034 case AARCH64_OPND_EXCEPTION:
6035 if (unsigned_overflow (value, 16))
6036 as_bad_where (fixP->fx_file, fixP->fx_line,
6037 _("immediate out of range"));
6038 insn = get_aarch64_insn (buf);
6039 insn |= encode_svc_imm (value);
6040 put_aarch64_insn (buf, insn);
6041 break;
6042
6043 case AARCH64_OPND_AIMM:
6044 /* ADD or SUB with immediate.
6045 NOTE this assumes we come here with a add/sub shifted reg encoding
6046 3 322|2222|2 2 2 21111 111111
6047 1 098|7654|3 2 1 09876 543210 98765 43210
6048 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6049 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6050 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6051 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6052 ->
6053 3 322|2222|2 2 221111111111
6054 1 098|7654|3 2 109876543210 98765 43210
6055 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6056 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6057 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6058 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6059 Fields sf Rn Rd are already set. */
6060 insn = get_aarch64_insn (buf);
6061 if (value < 0)
6062 {
6063 /* Add <-> sub. */
6064 insn = reencode_addsub_switch_add_sub (insn);
6065 value = -value;
6066 }
6067
6068 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6069 && unsigned_overflow (value, 12))
6070 {
6071 /* Try to shift the value by 12 to make it fit. */
6072 if (((value >> 12) << 12) == value
6073 && ! unsigned_overflow (value, 12 + 12))
6074 {
6075 value >>= 12;
6076 insn |= encode_addsub_imm_shift_amount (1);
6077 }
6078 }
6079
6080 if (unsigned_overflow (value, 12))
6081 as_bad_where (fixP->fx_file, fixP->fx_line,
6082 _("immediate out of range"));
6083
6084 insn |= encode_addsub_imm (value);
6085
6086 put_aarch64_insn (buf, insn);
6087 break;
6088
6089 case AARCH64_OPND_SIMD_IMM:
6090 case AARCH64_OPND_SIMD_IMM_SFT:
6091 case AARCH64_OPND_LIMM:
6092 /* Bit mask immediate. */
6093 gas_assert (new_inst != NULL);
6094 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6095 new_inst->operands[idx].imm.value = value;
6096 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6097 &new_inst->value, NULL, NULL))
6098 put_aarch64_insn (buf, new_inst->value);
6099 else
6100 as_bad_where (fixP->fx_file, fixP->fx_line,
6101 _("invalid immediate"));
6102 break;
6103
6104 case AARCH64_OPND_HALF:
6105 /* 16-bit unsigned immediate. */
6106 if (unsigned_overflow (value, 16))
6107 as_bad_where (fixP->fx_file, fixP->fx_line,
6108 _("immediate out of range"));
6109 insn = get_aarch64_insn (buf);
6110 insn |= encode_movw_imm (value & 0xffff);
6111 put_aarch64_insn (buf, insn);
6112 break;
6113
6114 case AARCH64_OPND_IMM_MOV:
6115 /* Operand for a generic move immediate instruction, which is
6116 an alias instruction that generates a single MOVZ, MOVN or ORR
6117 instruction to loads a 32-bit/64-bit immediate value into general
6118 register. An assembler error shall result if the immediate cannot be
6119 created by a single one of these instructions. If there is a choice,
6120 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6121 and MOVZ or MOVN to ORR. */
6122 gas_assert (new_inst != NULL);
6123 fix_mov_imm_insn (fixP, buf, new_inst, value);
6124 break;
6125
6126 case AARCH64_OPND_ADDR_SIMM7:
6127 case AARCH64_OPND_ADDR_SIMM9:
6128 case AARCH64_OPND_ADDR_SIMM9_2:
6129 case AARCH64_OPND_ADDR_UIMM12:
6130 /* Immediate offset in an address. */
6131 insn = get_aarch64_insn (buf);
6132
6133 gas_assert (new_inst != NULL && new_inst->value == insn);
6134 gas_assert (new_inst->opcode->operands[1] == opnd
6135 || new_inst->opcode->operands[2] == opnd);
6136
6137 /* Get the index of the address operand. */
6138 if (new_inst->opcode->operands[1] == opnd)
6139 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6140 idx = 1;
6141 else
6142 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6143 idx = 2;
6144
6145 /* Update the resolved offset value. */
6146 new_inst->operands[idx].addr.offset.imm = value;
6147
6148 /* Encode/fix-up. */
6149 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6150 &new_inst->value, NULL, NULL))
6151 {
6152 put_aarch64_insn (buf, new_inst->value);
6153 break;
6154 }
6155 else if (new_inst->opcode->iclass == ldst_pos
6156 && try_to_encode_as_unscaled_ldst (new_inst))
6157 {
6158 put_aarch64_insn (buf, new_inst->value);
6159 break;
6160 }
6161
6162 as_bad_where (fixP->fx_file, fixP->fx_line,
6163 _("immediate offset out of range"));
6164 break;
6165
6166 default:
6167 gas_assert (0);
6168 as_fatal (_("unhandled operand code %d"), opnd);
6169 }
6170 }
6171
6172 /* Apply a fixup (fixP) to segment data, once it has been determined
6173 by our caller that we have all the info we need to fix it up.
6174
6175 Parameter valP is the pointer to the value of the bits. */
6176
6177 void
6178 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6179 {
6180 offsetT value = *valP;
6181 uint32_t insn;
6182 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6183 int scale;
6184 unsigned flags = fixP->fx_addnumber;
6185
6186 DEBUG_TRACE ("\n\n");
6187 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6188 DEBUG_TRACE ("Enter md_apply_fix");
6189
6190 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6191
6192 /* Note whether this will delete the relocation. */
6193
6194 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6195 fixP->fx_done = 1;
6196
6197 /* Process the relocations. */
6198 switch (fixP->fx_r_type)
6199 {
6200 case BFD_RELOC_NONE:
6201 /* This will need to go in the object file. */
6202 fixP->fx_done = 0;
6203 break;
6204
6205 case BFD_RELOC_8:
6206 case BFD_RELOC_8_PCREL:
6207 if (fixP->fx_done || !seg->use_rela_p)
6208 md_number_to_chars (buf, value, 1);
6209 break;
6210
6211 case BFD_RELOC_16:
6212 case BFD_RELOC_16_PCREL:
6213 if (fixP->fx_done || !seg->use_rela_p)
6214 md_number_to_chars (buf, value, 2);
6215 break;
6216
6217 case BFD_RELOC_32:
6218 case BFD_RELOC_32_PCREL:
6219 if (fixP->fx_done || !seg->use_rela_p)
6220 md_number_to_chars (buf, value, 4);
6221 break;
6222
6223 case BFD_RELOC_64:
6224 case BFD_RELOC_64_PCREL:
6225 if (fixP->fx_done || !seg->use_rela_p)
6226 md_number_to_chars (buf, value, 8);
6227 break;
6228
6229 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6230 /* We claim that these fixups have been processed here, even if
6231 in fact we generate an error because we do not have a reloc
6232 for them, so tc_gen_reloc() will reject them. */
6233 fixP->fx_done = 1;
6234 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6235 {
6236 as_bad_where (fixP->fx_file, fixP->fx_line,
6237 _("undefined symbol %s used as an immediate value"),
6238 S_GET_NAME (fixP->fx_addsy));
6239 goto apply_fix_return;
6240 }
6241 fix_insn (fixP, flags, value);
6242 break;
6243
6244 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6245 if (value & 3)
6246 as_bad_where (fixP->fx_file, fixP->fx_line,
6247 _("pc-relative load offset not word aligned"));
6248 if (signed_overflow (value, 21))
6249 as_bad_where (fixP->fx_file, fixP->fx_line,
6250 _("pc-relative load offset out of range"));
6251 if (fixP->fx_done || !seg->use_rela_p)
6252 {
6253 insn = get_aarch64_insn (buf);
6254 insn |= encode_ld_lit_ofs_19 (value >> 2);
6255 put_aarch64_insn (buf, insn);
6256 }
6257 break;
6258
6259 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6260 if (signed_overflow (value, 21))
6261 as_bad_where (fixP->fx_file, fixP->fx_line,
6262 _("pc-relative address offset out of range"));
6263 if (fixP->fx_done || !seg->use_rela_p)
6264 {
6265 insn = get_aarch64_insn (buf);
6266 insn |= encode_adr_imm (value);
6267 put_aarch64_insn (buf, insn);
6268 }
6269 break;
6270
6271 case BFD_RELOC_AARCH64_BRANCH19:
6272 if (value & 3)
6273 as_bad_where (fixP->fx_file, fixP->fx_line,
6274 _("conditional branch target not word aligned"));
6275 if (signed_overflow (value, 21))
6276 as_bad_where (fixP->fx_file, fixP->fx_line,
6277 _("conditional branch out of range"));
6278 if (fixP->fx_done || !seg->use_rela_p)
6279 {
6280 insn = get_aarch64_insn (buf);
6281 insn |= encode_cond_branch_ofs_19 (value >> 2);
6282 put_aarch64_insn (buf, insn);
6283 }
6284 break;
6285
6286 case BFD_RELOC_AARCH64_TSTBR14:
6287 if (value & 3)
6288 as_bad_where (fixP->fx_file, fixP->fx_line,
6289 _("conditional branch target not word aligned"));
6290 if (signed_overflow (value, 16))
6291 as_bad_where (fixP->fx_file, fixP->fx_line,
6292 _("conditional branch out of range"));
6293 if (fixP->fx_done || !seg->use_rela_p)
6294 {
6295 insn = get_aarch64_insn (buf);
6296 insn |= encode_tst_branch_ofs_14 (value >> 2);
6297 put_aarch64_insn (buf, insn);
6298 }
6299 break;
6300
6301 case BFD_RELOC_AARCH64_JUMP26:
6302 case BFD_RELOC_AARCH64_CALL26:
6303 if (value & 3)
6304 as_bad_where (fixP->fx_file, fixP->fx_line,
6305 _("branch target not word aligned"));
6306 if (signed_overflow (value, 28))
6307 as_bad_where (fixP->fx_file, fixP->fx_line, _("branch out of range"));
6308 if (fixP->fx_done || !seg->use_rela_p)
6309 {
6310 insn = get_aarch64_insn (buf);
6311 insn |= encode_branch_ofs_26 (value >> 2);
6312 put_aarch64_insn (buf, insn);
6313 }
6314 break;
6315
6316 case BFD_RELOC_AARCH64_MOVW_G0:
6317 case BFD_RELOC_AARCH64_MOVW_G0_S:
6318 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6319 scale = 0;
6320 goto movw_common;
6321 case BFD_RELOC_AARCH64_MOVW_G1:
6322 case BFD_RELOC_AARCH64_MOVW_G1_S:
6323 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6324 scale = 16;
6325 goto movw_common;
6326 case BFD_RELOC_AARCH64_MOVW_G2:
6327 case BFD_RELOC_AARCH64_MOVW_G2_S:
6328 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6329 scale = 32;
6330 goto movw_common;
6331 case BFD_RELOC_AARCH64_MOVW_G3:
6332 scale = 48;
6333 movw_common:
6334 if (fixP->fx_done || !seg->use_rela_p)
6335 {
6336 insn = get_aarch64_insn (buf);
6337
6338 if (!fixP->fx_done)
6339 {
6340 /* REL signed addend must fit in 16 bits */
6341 if (signed_overflow (value, 16))
6342 as_bad_where (fixP->fx_file, fixP->fx_line,
6343 _("offset out of range"));
6344 }
6345 else
6346 {
6347 /* Check for overflow and scale. */
6348 switch (fixP->fx_r_type)
6349 {
6350 case BFD_RELOC_AARCH64_MOVW_G0:
6351 case BFD_RELOC_AARCH64_MOVW_G1:
6352 case BFD_RELOC_AARCH64_MOVW_G2:
6353 case BFD_RELOC_AARCH64_MOVW_G3:
6354 if (unsigned_overflow (value, scale + 16))
6355 as_bad_where (fixP->fx_file, fixP->fx_line,
6356 _("unsigned value out of range"));
6357 break;
6358 case BFD_RELOC_AARCH64_MOVW_G0_S:
6359 case BFD_RELOC_AARCH64_MOVW_G1_S:
6360 case BFD_RELOC_AARCH64_MOVW_G2_S:
6361 /* NOTE: We can only come here with movz or movn. */
6362 if (signed_overflow (value, scale + 16))
6363 as_bad_where (fixP->fx_file, fixP->fx_line,
6364 _("signed value out of range"));
6365 if (value < 0)
6366 {
6367 /* Force use of MOVN. */
6368 value = ~value;
6369 insn = reencode_movzn_to_movn (insn);
6370 }
6371 else
6372 {
6373 /* Force use of MOVZ. */
6374 insn = reencode_movzn_to_movz (insn);
6375 }
6376 break;
6377 default:
6378 /* Unchecked relocations. */
6379 break;
6380 }
6381 value >>= scale;
6382 }
6383
6384 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6385 insn |= encode_movw_imm (value & 0xffff);
6386
6387 put_aarch64_insn (buf, insn);
6388 }
6389 break;
6390
6391 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6392 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6393 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6394 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6395 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6396 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6397 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6398 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6399 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6400 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6401 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6402 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6403 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE:
6404 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6405 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6406 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6407 /* Should always be exported to object file, see
6408 aarch64_force_relocation(). */
6409 gas_assert (!fixP->fx_done);
6410 gas_assert (seg->use_rela_p);
6411 break;
6412
6413 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6414 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6415 case BFD_RELOC_AARCH64_ADD_LO12:
6416 case BFD_RELOC_AARCH64_LDST8_LO12:
6417 case BFD_RELOC_AARCH64_LDST16_LO12:
6418 case BFD_RELOC_AARCH64_LDST32_LO12:
6419 case BFD_RELOC_AARCH64_LDST64_LO12:
6420 case BFD_RELOC_AARCH64_LDST128_LO12:
6421 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6422 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6423 /* Should always be exported to object file, see
6424 aarch64_force_relocation(). */
6425 gas_assert (!fixP->fx_done);
6426 gas_assert (seg->use_rela_p);
6427 break;
6428
6429 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6430 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6431 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6432 break;
6433
6434 default:
6435 as_bad_where (fixP->fx_file, fixP->fx_line,
6436 _("unexpected %s fixup"),
6437 bfd_get_reloc_code_name (fixP->fx_r_type));
6438 break;
6439 }
6440
6441 apply_fix_return:
6442 /* Free the allocated the struct aarch64_inst.
6443 N.B. currently there are very limited number of fix-up types actually use
6444 this field, so the impact on the performance should be minimal . */
6445 if (fixP->tc_fix_data.inst != NULL)
6446 free (fixP->tc_fix_data.inst);
6447
6448 return;
6449 }
6450
6451 /* Translate internal representation of relocation info to BFD target
6452 format. */
6453
6454 arelent *
6455 tc_gen_reloc (asection * section, fixS * fixp)
6456 {
6457 arelent *reloc;
6458 bfd_reloc_code_real_type code;
6459
6460 reloc = xmalloc (sizeof (arelent));
6461
6462 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6463 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6464 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6465
6466 if (fixp->fx_pcrel)
6467 {
6468 if (section->use_rela_p)
6469 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6470 else
6471 fixp->fx_offset = reloc->address;
6472 }
6473 reloc->addend = fixp->fx_offset;
6474
6475 code = fixp->fx_r_type;
6476 switch (code)
6477 {
6478 case BFD_RELOC_16:
6479 if (fixp->fx_pcrel)
6480 code = BFD_RELOC_16_PCREL;
6481 break;
6482
6483 case BFD_RELOC_32:
6484 if (fixp->fx_pcrel)
6485 code = BFD_RELOC_32_PCREL;
6486 break;
6487
6488 case BFD_RELOC_64:
6489 if (fixp->fx_pcrel)
6490 code = BFD_RELOC_64_PCREL;
6491 break;
6492
6493 default:
6494 break;
6495 }
6496
6497 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6498 if (reloc->howto == NULL)
6499 {
6500 as_bad_where (fixp->fx_file, fixp->fx_line,
6501 _
6502 ("cannot represent %s relocation in this object file format"),
6503 bfd_get_reloc_code_name (code));
6504 return NULL;
6505 }
6506
6507 return reloc;
6508 }
6509
6510 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6511
6512 void
6513 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6514 {
6515 bfd_reloc_code_real_type type;
6516 int pcrel = 0;
6517
6518 /* Pick a reloc.
6519 FIXME: @@ Should look at CPU word size. */
6520 switch (size)
6521 {
6522 case 1:
6523 type = BFD_RELOC_8;
6524 break;
6525 case 2:
6526 type = BFD_RELOC_16;
6527 break;
6528 case 4:
6529 type = BFD_RELOC_32;
6530 break;
6531 case 8:
6532 type = BFD_RELOC_64;
6533 break;
6534 default:
6535 as_bad (_("cannot do %u-byte relocation"), size);
6536 type = BFD_RELOC_UNUSED;
6537 break;
6538 }
6539
6540 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6541 }
6542
6543 int
6544 aarch64_force_relocation (struct fix *fixp)
6545 {
6546 switch (fixp->fx_r_type)
6547 {
6548 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6549 /* Perform these "immediate" internal relocations
6550 even if the symbol is extern or weak. */
6551 return 0;
6552
6553 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6554 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6555 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6556 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6557 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6558 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6559 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6560 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6561 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6562 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6563 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6564 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6565 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE:
6566 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6567 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6568 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6569 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6570 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6571 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6572 case BFD_RELOC_AARCH64_ADD_LO12:
6573 case BFD_RELOC_AARCH64_LDST8_LO12:
6574 case BFD_RELOC_AARCH64_LDST16_LO12:
6575 case BFD_RELOC_AARCH64_LDST32_LO12:
6576 case BFD_RELOC_AARCH64_LDST64_LO12:
6577 case BFD_RELOC_AARCH64_LDST128_LO12:
6578 /* Always leave these relocations for the linker. */
6579 return 1;
6580
6581 default:
6582 break;
6583 }
6584
6585 return generic_force_reloc (fixp);
6586 }
6587
6588 #ifdef OBJ_ELF
6589
6590 const char *
6591 elf64_aarch64_target_format (void)
6592 {
6593 if (target_big_endian)
6594 return "elf64-bigaarch64";
6595 else
6596 return "elf64-littleaarch64";
6597 }
6598
6599 void
6600 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6601 {
6602 elf_frob_symbol (symp, puntp);
6603 }
6604 #endif
6605
6606 /* MD interface: Finalization. */
6607
6608 /* A good place to do this, although this was probably not intended
6609 for this kind of use. We need to dump the literal pool before
6610 references are made to a null symbol pointer. */
6611
6612 void
6613 aarch64_cleanup (void)
6614 {
6615 literal_pool *pool;
6616
6617 for (pool = list_of_pools; pool; pool = pool->next)
6618 {
6619 /* Put it at the end of the relevant section. */
6620 subseg_set (pool->section, pool->sub_section);
6621 s_ltorg (0);
6622 }
6623 }
6624
6625 #ifdef OBJ_ELF
6626 /* Remove any excess mapping symbols generated for alignment frags in
6627 SEC. We may have created a mapping symbol before a zero byte
6628 alignment; remove it if there's a mapping symbol after the
6629 alignment. */
6630 static void
6631 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6632 void *dummy ATTRIBUTE_UNUSED)
6633 {
6634 segment_info_type *seginfo = seg_info (sec);
6635 fragS *fragp;
6636
6637 if (seginfo == NULL || seginfo->frchainP == NULL)
6638 return;
6639
6640 for (fragp = seginfo->frchainP->frch_root;
6641 fragp != NULL; fragp = fragp->fr_next)
6642 {
6643 symbolS *sym = fragp->tc_frag_data.last_map;
6644 fragS *next = fragp->fr_next;
6645
6646 /* Variable-sized frags have been converted to fixed size by
6647 this point. But if this was variable-sized to start with,
6648 there will be a fixed-size frag after it. So don't handle
6649 next == NULL. */
6650 if (sym == NULL || next == NULL)
6651 continue;
6652
6653 if (S_GET_VALUE (sym) < next->fr_address)
6654 /* Not at the end of this frag. */
6655 continue;
6656 know (S_GET_VALUE (sym) == next->fr_address);
6657
6658 do
6659 {
6660 if (next->tc_frag_data.first_map != NULL)
6661 {
6662 /* Next frag starts with a mapping symbol. Discard this
6663 one. */
6664 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6665 break;
6666 }
6667
6668 if (next->fr_next == NULL)
6669 {
6670 /* This mapping symbol is at the end of the section. Discard
6671 it. */
6672 know (next->fr_fix == 0 && next->fr_var == 0);
6673 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6674 break;
6675 }
6676
6677 /* As long as we have empty frags without any mapping symbols,
6678 keep looking. */
6679 /* If the next frag is non-empty and does not start with a
6680 mapping symbol, then this mapping symbol is required. */
6681 if (next->fr_address != next->fr_next->fr_address)
6682 break;
6683
6684 next = next->fr_next;
6685 }
6686 while (next != NULL);
6687 }
6688 }
6689 #endif
6690
6691 /* Adjust the symbol table. */
6692
6693 void
6694 aarch64_adjust_symtab (void)
6695 {
6696 #ifdef OBJ_ELF
6697 /* Remove any overlapping mapping symbols generated by alignment frags. */
6698 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6699 /* Now do generic ELF adjustments. */
6700 elf_adjust_symtab ();
6701 #endif
6702 }
6703
6704 static void
6705 checked_hash_insert (struct hash_control *table, const char *key, void *value)
6706 {
6707 const char *hash_err;
6708
6709 hash_err = hash_insert (table, key, value);
6710 if (hash_err)
6711 printf ("Internal Error: Can't hash %s\n", key);
6712 }
6713
6714 static void
6715 fill_instruction_hash_table (void)
6716 {
6717 aarch64_opcode *opcode = aarch64_opcode_table;
6718
6719 while (opcode->name != NULL)
6720 {
6721 templates *templ, *new_templ;
6722 templ = hash_find (aarch64_ops_hsh, opcode->name);
6723
6724 new_templ = (templates *) xmalloc (sizeof (templates));
6725 new_templ->opcode = opcode;
6726 new_templ->next = NULL;
6727
6728 if (!templ)
6729 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6730 else
6731 {
6732 new_templ->next = templ->next;
6733 templ->next = new_templ;
6734 }
6735 ++opcode;
6736 }
6737 }
6738
6739 static inline void
6740 convert_to_upper (char *dst, const char *src, size_t num)
6741 {
6742 unsigned int i;
6743 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6744 *dst = TOUPPER (*src);
6745 *dst = '\0';
6746 }
6747
6748 /* Assume STR point to a lower-case string, allocate, convert and return
6749 the corresponding upper-case string. */
6750 static inline const char*
6751 get_upper_str (const char *str)
6752 {
6753 char *ret;
6754 size_t len = strlen (str);
6755 if ((ret = xmalloc (len + 1)) == NULL)
6756 abort ();
6757 convert_to_upper (ret, str, len);
6758 return ret;
6759 }
6760
6761 /* MD interface: Initialization. */
6762
6763 void
6764 md_begin (void)
6765 {
6766 unsigned mach;
6767 unsigned int i;
6768
6769 if ((aarch64_ops_hsh = hash_new ()) == NULL
6770 || (aarch64_cond_hsh = hash_new ()) == NULL
6771 || (aarch64_shift_hsh = hash_new ()) == NULL
6772 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
6773 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
6774 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
6775 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
6776 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
6777 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
6778 || (aarch64_reg_hsh = hash_new ()) == NULL
6779 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
6780 || (aarch64_nzcv_hsh = hash_new ()) == NULL
6781 || (aarch64_pldop_hsh = hash_new ()) == NULL)
6782 as_fatal (_("virtual memory exhausted"));
6783
6784 fill_instruction_hash_table ();
6785
6786 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
6787 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
6788 (void *) (aarch64_sys_regs + i));
6789
6790 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
6791 checked_hash_insert (aarch64_pstatefield_hsh,
6792 aarch64_pstatefields[i].name,
6793 (void *) (aarch64_pstatefields + i));
6794
6795 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
6796 checked_hash_insert (aarch64_sys_regs_ic_hsh,
6797 aarch64_sys_regs_ic[i].template,
6798 (void *) (aarch64_sys_regs_ic + i));
6799
6800 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
6801 checked_hash_insert (aarch64_sys_regs_dc_hsh,
6802 aarch64_sys_regs_dc[i].template,
6803 (void *) (aarch64_sys_regs_dc + i));
6804
6805 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
6806 checked_hash_insert (aarch64_sys_regs_at_hsh,
6807 aarch64_sys_regs_at[i].template,
6808 (void *) (aarch64_sys_regs_at + i));
6809
6810 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
6811 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
6812 aarch64_sys_regs_tlbi[i].template,
6813 (void *) (aarch64_sys_regs_tlbi + i));
6814
6815 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
6816 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
6817 (void *) (reg_names + i));
6818
6819 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
6820 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
6821 (void *) (nzcv_names + i));
6822
6823 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
6824 {
6825 const char *name = aarch64_operand_modifiers[i].name;
6826 checked_hash_insert (aarch64_shift_hsh, name,
6827 (void *) (aarch64_operand_modifiers + i));
6828 /* Also hash the name in the upper case. */
6829 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
6830 (void *) (aarch64_operand_modifiers + i));
6831 }
6832
6833 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
6834 {
6835 unsigned int j;
6836 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
6837 the same condition code. */
6838 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
6839 {
6840 const char *name = aarch64_conds[i].names[j];
6841 if (name == NULL)
6842 break;
6843 checked_hash_insert (aarch64_cond_hsh, name,
6844 (void *) (aarch64_conds + i));
6845 /* Also hash the name in the upper case. */
6846 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
6847 (void *) (aarch64_conds + i));
6848 }
6849 }
6850
6851 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
6852 {
6853 const char *name = aarch64_barrier_options[i].name;
6854 /* Skip xx00 - the unallocated values of option. */
6855 if ((i & 0x3) == 0)
6856 continue;
6857 checked_hash_insert (aarch64_barrier_opt_hsh, name,
6858 (void *) (aarch64_barrier_options + i));
6859 /* Also hash the name in the upper case. */
6860 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
6861 (void *) (aarch64_barrier_options + i));
6862 }
6863
6864 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
6865 {
6866 const char* name = aarch64_prfops[i].name;
6867 /* Skip 0011x, 01xxx, 1011x and 11xxx - the unallocated hint encodings
6868 as a 5-bit immediate #uimm5. */
6869 if ((i & 0xf) >= 6)
6870 continue;
6871 checked_hash_insert (aarch64_pldop_hsh, name,
6872 (void *) (aarch64_prfops + i));
6873 /* Also hash the name in the upper case. */
6874 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
6875 (void *) (aarch64_prfops + i));
6876 }
6877
6878 /* Set the cpu variant based on the command-line options. */
6879 if (!mcpu_cpu_opt)
6880 mcpu_cpu_opt = march_cpu_opt;
6881
6882 if (!mcpu_cpu_opt)
6883 mcpu_cpu_opt = &cpu_default;
6884
6885 cpu_variant = *mcpu_cpu_opt;
6886
6887 /* Record the CPU type. */
6888 mach = bfd_mach_aarch64;
6889
6890 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
6891 }
6892
6893 /* Command line processing. */
6894
6895 const char *md_shortopts = "m:";
6896
6897 #ifdef AARCH64_BI_ENDIAN
6898 #define OPTION_EB (OPTION_MD_BASE + 0)
6899 #define OPTION_EL (OPTION_MD_BASE + 1)
6900 #else
6901 #if TARGET_BYTES_BIG_ENDIAN
6902 #define OPTION_EB (OPTION_MD_BASE + 0)
6903 #else
6904 #define OPTION_EL (OPTION_MD_BASE + 1)
6905 #endif
6906 #endif
6907
6908 struct option md_longopts[] = {
6909 #ifdef OPTION_EB
6910 {"EB", no_argument, NULL, OPTION_EB},
6911 #endif
6912 #ifdef OPTION_EL
6913 {"EL", no_argument, NULL, OPTION_EL},
6914 #endif
6915 {NULL, no_argument, NULL, 0}
6916 };
6917
6918 size_t md_longopts_size = sizeof (md_longopts);
6919
6920 struct aarch64_option_table
6921 {
6922 char *option; /* Option name to match. */
6923 char *help; /* Help information. */
6924 int *var; /* Variable to change. */
6925 int value; /* What to change it to. */
6926 char *deprecated; /* If non-null, print this message. */
6927 };
6928
6929 static struct aarch64_option_table aarch64_opts[] = {
6930 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
6931 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
6932 NULL},
6933 #ifdef DEBUG_AARCH64
6934 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
6935 #endif /* DEBUG_AARCH64 */
6936 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
6937 NULL},
6938 {NULL, NULL, NULL, 0, NULL}
6939 };
6940
6941 struct aarch64_cpu_option_table
6942 {
6943 char *name;
6944 const aarch64_feature_set value;
6945 /* The canonical name of the CPU, or NULL to use NAME converted to upper
6946 case. */
6947 const char *canonical_name;
6948 };
6949
6950 /* This list should, at a minimum, contain all the cpu names
6951 recognized by GCC. */
6952 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
6953 {"all", AARCH64_ANY, NULL},
6954 {"generic", AARCH64_ARCH_V8, NULL},
6955
6956 /* These two are example CPUs supported in GCC, once we have real
6957 CPUs they will be removed. */
6958 {"example-1", AARCH64_ARCH_V8, NULL},
6959 {"example-2", AARCH64_ARCH_V8, NULL},
6960
6961 {NULL, AARCH64_ARCH_NONE, NULL}
6962 };
6963
6964 struct aarch64_arch_option_table
6965 {
6966 char *name;
6967 const aarch64_feature_set value;
6968 };
6969
6970 /* This list should, at a minimum, contain all the architecture names
6971 recognized by GCC. */
6972 static const struct aarch64_arch_option_table aarch64_archs[] = {
6973 {"all", AARCH64_ANY},
6974 {"armv8", AARCH64_ARCH_V8},
6975 {NULL, AARCH64_ARCH_NONE}
6976 };
6977
6978 /* ISA extensions. */
6979 struct aarch64_option_cpu_value_table
6980 {
6981 char *name;
6982 const aarch64_feature_set value;
6983 };
6984
6985 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
6986 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
6987 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
6988 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
6989 {NULL, AARCH64_ARCH_NONE}
6990 };
6991
6992 struct aarch64_long_option_table
6993 {
6994 char *option; /* Substring to match. */
6995 char *help; /* Help information. */
6996 int (*func) (char *subopt); /* Function to decode sub-option. */
6997 char *deprecated; /* If non-null, print this message. */
6998 };
6999
7000 static int
7001 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7002 {
7003 /* We insist on extensions being added before being removed. We achieve
7004 this by using the ADDING_VALUE variable to indicate whether we are
7005 adding an extension (1) or removing it (0) and only allowing it to
7006 change in the order -1 -> 1 -> 0. */
7007 int adding_value = -1;
7008 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7009
7010 /* Copy the feature set, so that we can modify it. */
7011 *ext_set = **opt_p;
7012 *opt_p = ext_set;
7013
7014 while (str != NULL && *str != 0)
7015 {
7016 const struct aarch64_option_cpu_value_table *opt;
7017 char *ext;
7018 int optlen;
7019
7020 if (*str != '+')
7021 {
7022 as_bad (_("invalid architectural extension"));
7023 return 0;
7024 }
7025
7026 str++;
7027 ext = strchr (str, '+');
7028
7029 if (ext != NULL)
7030 optlen = ext - str;
7031 else
7032 optlen = strlen (str);
7033
7034 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7035 {
7036 if (adding_value != 0)
7037 adding_value = 0;
7038 optlen -= 2;
7039 str += 2;
7040 }
7041 else if (optlen > 0)
7042 {
7043 if (adding_value == -1)
7044 adding_value = 1;
7045 else if (adding_value != 1)
7046 {
7047 as_bad (_("must specify extensions to add before specifying "
7048 "those to remove"));
7049 return FALSE;
7050 }
7051 }
7052
7053 if (optlen == 0)
7054 {
7055 as_bad (_("missing architectural extension"));
7056 return 0;
7057 }
7058
7059 gas_assert (adding_value != -1);
7060
7061 for (opt = aarch64_features; opt->name != NULL; opt++)
7062 if (strncmp (opt->name, str, optlen) == 0)
7063 {
7064 /* Add or remove the extension. */
7065 if (adding_value)
7066 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7067 else
7068 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7069 break;
7070 }
7071
7072 if (opt->name == NULL)
7073 {
7074 as_bad (_("unknown architectural extension `%s'"), str);
7075 return 0;
7076 }
7077
7078 str = ext;
7079 };
7080
7081 return 1;
7082 }
7083
7084 static int
7085 aarch64_parse_cpu (char *str)
7086 {
7087 const struct aarch64_cpu_option_table *opt;
7088 char *ext = strchr (str, '+');
7089 size_t optlen;
7090
7091 if (ext != NULL)
7092 optlen = ext - str;
7093 else
7094 optlen = strlen (str);
7095
7096 if (optlen == 0)
7097 {
7098 as_bad (_("missing cpu name `%s'"), str);
7099 return 0;
7100 }
7101
7102 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7103 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7104 {
7105 mcpu_cpu_opt = &opt->value;
7106 if (ext != NULL)
7107 return aarch64_parse_features (ext, &mcpu_cpu_opt);
7108
7109 return 1;
7110 }
7111
7112 as_bad (_("unknown cpu `%s'"), str);
7113 return 0;
7114 }
7115
7116 static int
7117 aarch64_parse_arch (char *str)
7118 {
7119 const struct aarch64_arch_option_table *opt;
7120 char *ext = strchr (str, '+');
7121 size_t optlen;
7122
7123 if (ext != NULL)
7124 optlen = ext - str;
7125 else
7126 optlen = strlen (str);
7127
7128 if (optlen == 0)
7129 {
7130 as_bad (_("missing architecture name `%s'"), str);
7131 return 0;
7132 }
7133
7134 for (opt = aarch64_archs; opt->name != NULL; opt++)
7135 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7136 {
7137 march_cpu_opt = &opt->value;
7138 if (ext != NULL)
7139 return aarch64_parse_features (ext, &march_cpu_opt);
7140
7141 return 1;
7142 }
7143
7144 as_bad (_("unknown architecture `%s'\n"), str);
7145 return 0;
7146 }
7147
7148 static struct aarch64_long_option_table aarch64_long_opts[] = {
7149 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7150 aarch64_parse_cpu, NULL},
7151 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7152 aarch64_parse_arch, NULL},
7153 {NULL, NULL, 0, NULL}
7154 };
7155
7156 int
7157 md_parse_option (int c, char *arg)
7158 {
7159 struct aarch64_option_table *opt;
7160 struct aarch64_long_option_table *lopt;
7161
7162 switch (c)
7163 {
7164 #ifdef OPTION_EB
7165 case OPTION_EB:
7166 target_big_endian = 1;
7167 break;
7168 #endif
7169
7170 #ifdef OPTION_EL
7171 case OPTION_EL:
7172 target_big_endian = 0;
7173 break;
7174 #endif
7175
7176 case 'a':
7177 /* Listing option. Just ignore these, we don't support additional
7178 ones. */
7179 return 0;
7180
7181 default:
7182 for (opt = aarch64_opts; opt->option != NULL; opt++)
7183 {
7184 if (c == opt->option[0]
7185 && ((arg == NULL && opt->option[1] == 0)
7186 || streq (arg, opt->option + 1)))
7187 {
7188 /* If the option is deprecated, tell the user. */
7189 if (opt->deprecated != NULL)
7190 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7191 arg ? arg : "", _(opt->deprecated));
7192
7193 if (opt->var != NULL)
7194 *opt->var = opt->value;
7195
7196 return 1;
7197 }
7198 }
7199
7200 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7201 {
7202 /* These options are expected to have an argument. */
7203 if (c == lopt->option[0]
7204 && arg != NULL
7205 && strncmp (arg, lopt->option + 1,
7206 strlen (lopt->option + 1)) == 0)
7207 {
7208 /* If the option is deprecated, tell the user. */
7209 if (lopt->deprecated != NULL)
7210 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7211 _(lopt->deprecated));
7212
7213 /* Call the sup-option parser. */
7214 return lopt->func (arg + strlen (lopt->option) - 1);
7215 }
7216 }
7217
7218 return 0;
7219 }
7220
7221 return 1;
7222 }
7223
7224 void
7225 md_show_usage (FILE * fp)
7226 {
7227 struct aarch64_option_table *opt;
7228 struct aarch64_long_option_table *lopt;
7229
7230 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7231
7232 for (opt = aarch64_opts; opt->option != NULL; opt++)
7233 if (opt->help != NULL)
7234 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7235
7236 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7237 if (lopt->help != NULL)
7238 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7239
7240 #ifdef OPTION_EB
7241 fprintf (fp, _("\
7242 -EB assemble code for a big-endian cpu\n"));
7243 #endif
7244
7245 #ifdef OPTION_EL
7246 fprintf (fp, _("\
7247 -EL assemble code for a little-endian cpu\n"));
7248 #endif
7249 }
7250
7251 /* Parse a .cpu directive. */
7252
7253 static void
7254 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7255 {
7256 const struct aarch64_cpu_option_table *opt;
7257 char saved_char;
7258 char *name;
7259 char *ext;
7260 size_t optlen;
7261
7262 name = input_line_pointer;
7263 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7264 input_line_pointer++;
7265 saved_char = *input_line_pointer;
7266 *input_line_pointer = 0;
7267
7268 ext = strchr (name, '+');
7269
7270 if (ext != NULL)
7271 optlen = ext - name;
7272 else
7273 optlen = strlen (name);
7274
7275 /* Skip the first "all" entry. */
7276 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7277 if (strlen (opt->name) == optlen
7278 && strncmp (name, opt->name, optlen) == 0)
7279 {
7280 mcpu_cpu_opt = &opt->value;
7281 if (ext != NULL)
7282 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7283 return;
7284
7285 cpu_variant = *mcpu_cpu_opt;
7286
7287 *input_line_pointer = saved_char;
7288 demand_empty_rest_of_line ();
7289 return;
7290 }
7291 as_bad (_("unknown cpu `%s'"), name);
7292 *input_line_pointer = saved_char;
7293 ignore_rest_of_line ();
7294 }
7295
7296
7297 /* Parse a .arch directive. */
7298
7299 static void
7300 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7301 {
7302 const struct aarch64_arch_option_table *opt;
7303 char saved_char;
7304 char *name;
7305 char *ext;
7306 size_t optlen;
7307
7308 name = input_line_pointer;
7309 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7310 input_line_pointer++;
7311 saved_char = *input_line_pointer;
7312 *input_line_pointer = 0;
7313
7314 ext = strchr (name, '+');
7315
7316 if (ext != NULL)
7317 optlen = ext - name;
7318 else
7319 optlen = strlen (name);
7320
7321 /* Skip the first "all" entry. */
7322 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7323 if (strlen (opt->name) == optlen
7324 && strncmp (name, opt->name, optlen) == 0)
7325 {
7326 mcpu_cpu_opt = &opt->value;
7327 if (ext != NULL)
7328 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7329 return;
7330
7331 cpu_variant = *mcpu_cpu_opt;
7332
7333 *input_line_pointer = saved_char;
7334 demand_empty_rest_of_line ();
7335 return;
7336 }
7337
7338 as_bad (_("unknown architecture `%s'\n"), name);
7339 *input_line_pointer = saved_char;
7340 ignore_rest_of_line ();
7341 }
7342
7343 /* Copy symbol information. */
7344
7345 void
7346 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7347 {
7348 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7349 }
This page took 0.215364 seconds and 4 git commands to generate.