include/opcode/
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright 2009, 2010, 2011, 2012, 2013
4 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GAS.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the license, or
12 (at your option) any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING3. If not,
21 see <http://www.gnu.org/licenses/>. */
22
23 #include "as.h"
24 #include <limits.h>
25 #include <stdarg.h>
26 #include "bfd_stdint.h"
27 #define NO_RELOC 0
28 #include "safe-ctype.h"
29 #include "subsegs.h"
30 #include "obstack.h"
31
32 #ifdef OBJ_ELF
33 #include "elf/aarch64.h"
34 #include "dw2gencfi.h"
35 #endif
36
37 #include "dwarf2dbg.h"
38
39 /* Types of processor to assemble for. */
40 #ifndef CPU_DEFAULT
41 #define CPU_DEFAULT AARCH64_ARCH_V8
42 #endif
43
44 #define streq(a, b) (strcmp (a, b) == 0)
45
46 static aarch64_feature_set cpu_variant;
47
48 /* Variables that we set while parsing command-line options. Once all
49 options have been read we re-process these values to set the real
50 assembly flags. */
51 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
52 static const aarch64_feature_set *march_cpu_opt = NULL;
53
54 /* Constants for known architecture features. */
55 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
56
57 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
58 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
59
60 #ifdef OBJ_ELF
61 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
62 static symbolS *GOT_symbol;
63 #endif
64
65 enum neon_el_type
66 {
67 NT_invtype = -1,
68 NT_b,
69 NT_h,
70 NT_s,
71 NT_d,
72 NT_q
73 };
74
75 /* Bits for DEFINED field in neon_type_el. */
76 #define NTA_HASTYPE 1
77 #define NTA_HASINDEX 2
78
79 struct neon_type_el
80 {
81 enum neon_el_type type;
82 unsigned char defined;
83 unsigned width;
84 int64_t index;
85 };
86
87 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
88
89 struct reloc
90 {
91 bfd_reloc_code_real_type type;
92 expressionS exp;
93 int pc_rel;
94 enum aarch64_opnd opnd;
95 uint32_t flags;
96 unsigned need_libopcodes_p : 1;
97 };
98
99 struct aarch64_instruction
100 {
101 /* libopcodes structure for instruction intermediate representation. */
102 aarch64_inst base;
103 /* Record assembly errors found during the parsing. */
104 struct
105 {
106 enum aarch64_operand_error_kind kind;
107 const char *error;
108 } parsing_error;
109 /* The condition that appears in the assembly line. */
110 int cond;
111 /* Relocation information (including the GAS internal fixup). */
112 struct reloc reloc;
113 /* Need to generate an immediate in the literal pool. */
114 unsigned gen_lit_pool : 1;
115 };
116
117 typedef struct aarch64_instruction aarch64_instruction;
118
119 static aarch64_instruction inst;
120
121 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
122 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
123
124 /* Diagnostics inline function utilites.
125
126 These are lightweight utlities which should only be called by parse_operands
127 and other parsers. GAS processes each assembly line by parsing it against
128 instruction template(s), in the case of multiple templates (for the same
129 mnemonic name), those templates are tried one by one until one succeeds or
130 all fail. An assembly line may fail a few templates before being
131 successfully parsed; an error saved here in most cases is not a user error
132 but an error indicating the current template is not the right template.
133 Therefore it is very important that errors can be saved at a low cost during
134 the parsing; we don't want to slow down the whole parsing by recording
135 non-user errors in detail.
136
137 Remember that the objective is to help GAS pick up the most approapriate
138 error message in the case of multiple templates, e.g. FMOV which has 8
139 templates. */
140
141 static inline void
142 clear_error (void)
143 {
144 inst.parsing_error.kind = AARCH64_OPDE_NIL;
145 inst.parsing_error.error = NULL;
146 }
147
148 static inline bfd_boolean
149 error_p (void)
150 {
151 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
152 }
153
154 static inline const char *
155 get_error_message (void)
156 {
157 return inst.parsing_error.error;
158 }
159
160 static inline void
161 set_error_message (const char *error)
162 {
163 inst.parsing_error.error = error;
164 }
165
166 static inline enum aarch64_operand_error_kind
167 get_error_kind (void)
168 {
169 return inst.parsing_error.kind;
170 }
171
172 static inline void
173 set_error_kind (enum aarch64_operand_error_kind kind)
174 {
175 inst.parsing_error.kind = kind;
176 }
177
178 static inline void
179 set_error (enum aarch64_operand_error_kind kind, const char *error)
180 {
181 inst.parsing_error.kind = kind;
182 inst.parsing_error.error = error;
183 }
184
185 static inline void
186 set_recoverable_error (const char *error)
187 {
188 set_error (AARCH64_OPDE_RECOVERABLE, error);
189 }
190
191 /* Use the DESC field of the corresponding aarch64_operand entry to compose
192 the error message. */
193 static inline void
194 set_default_error (void)
195 {
196 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
197 }
198
199 static inline void
200 set_syntax_error (const char *error)
201 {
202 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
203 }
204
205 static inline void
206 set_first_syntax_error (const char *error)
207 {
208 if (! error_p ())
209 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
210 }
211
212 static inline void
213 set_fatal_syntax_error (const char *error)
214 {
215 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
216 }
217 \f
218 /* Number of littlenums required to hold an extended precision number. */
219 #define MAX_LITTLENUMS 6
220
221 /* Return value for certain parsers when the parsing fails; those parsers
222 return the information of the parsed result, e.g. register number, on
223 success. */
224 #define PARSE_FAIL -1
225
226 /* This is an invalid condition code that means no conditional field is
227 present. */
228 #define COND_ALWAYS 0x10
229
230 typedef struct
231 {
232 const char *template;
233 unsigned long value;
234 } asm_barrier_opt;
235
236 typedef struct
237 {
238 const char *template;
239 uint32_t value;
240 } asm_nzcv;
241
242 struct reloc_entry
243 {
244 char *name;
245 bfd_reloc_code_real_type reloc;
246 };
247
248 /* Structure for a hash table entry for a register. */
249 typedef struct
250 {
251 const char *name;
252 unsigned char number;
253 unsigned char type;
254 unsigned char builtin;
255 } reg_entry;
256
257 /* Macros to define the register types and masks for the purpose
258 of parsing. */
259
260 #undef AARCH64_REG_TYPES
261 #define AARCH64_REG_TYPES \
262 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
263 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
264 BASIC_REG_TYPE(SP_32) /* wsp */ \
265 BASIC_REG_TYPE(SP_64) /* sp */ \
266 BASIC_REG_TYPE(Z_32) /* wzr */ \
267 BASIC_REG_TYPE(Z_64) /* xzr */ \
268 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
269 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
270 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
271 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
272 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
273 BASIC_REG_TYPE(CN) /* c[0-7] */ \
274 BASIC_REG_TYPE(VN) /* v[0-31] */ \
275 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
276 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
277 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
278 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
279 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
280 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
281 /* Typecheck: any [BHSDQ]P FP. */ \
282 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
283 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
284 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
285 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
286 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
287 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
288 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
289 /* Any integer register; used for error messages only. */ \
290 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
291 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
292 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
293 /* Pseudo type to mark the end of the enumerator sequence. */ \
294 BASIC_REG_TYPE(MAX)
295
296 #undef BASIC_REG_TYPE
297 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
298 #undef MULTI_REG_TYPE
299 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
300
301 /* Register type enumerators. */
302 typedef enum
303 {
304 /* A list of REG_TYPE_*. */
305 AARCH64_REG_TYPES
306 } aarch64_reg_type;
307
308 #undef BASIC_REG_TYPE
309 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
310 #undef REG_TYPE
311 #define REG_TYPE(T) (1 << REG_TYPE_##T)
312 #undef MULTI_REG_TYPE
313 #define MULTI_REG_TYPE(T,V) V,
314
315 /* Values indexed by aarch64_reg_type to assist the type checking. */
316 static const unsigned reg_type_masks[] =
317 {
318 AARCH64_REG_TYPES
319 };
320
321 #undef BASIC_REG_TYPE
322 #undef REG_TYPE
323 #undef MULTI_REG_TYPE
324 #undef AARCH64_REG_TYPES
325
326 /* Diagnostics used when we don't get a register of the expected type.
327 Note: this has to synchronized with aarch64_reg_type definitions
328 above. */
329 static const char *
330 get_reg_expected_msg (aarch64_reg_type reg_type)
331 {
332 const char *msg;
333
334 switch (reg_type)
335 {
336 case REG_TYPE_R_32:
337 msg = N_("integer 32-bit register expected");
338 break;
339 case REG_TYPE_R_64:
340 msg = N_("integer 64-bit register expected");
341 break;
342 case REG_TYPE_R_N:
343 msg = N_("integer register expected");
344 break;
345 case REG_TYPE_R_Z_SP:
346 msg = N_("integer, zero or SP register expected");
347 break;
348 case REG_TYPE_FP_B:
349 msg = N_("8-bit SIMD scalar register expected");
350 break;
351 case REG_TYPE_FP_H:
352 msg = N_("16-bit SIMD scalar or floating-point half precision "
353 "register expected");
354 break;
355 case REG_TYPE_FP_S:
356 msg = N_("32-bit SIMD scalar or floating-point single precision "
357 "register expected");
358 break;
359 case REG_TYPE_FP_D:
360 msg = N_("64-bit SIMD scalar or floating-point double precision "
361 "register expected");
362 break;
363 case REG_TYPE_FP_Q:
364 msg = N_("128-bit SIMD scalar or floating-point quad precision "
365 "register expected");
366 break;
367 case REG_TYPE_CN:
368 msg = N_("C0 - C15 expected");
369 break;
370 case REG_TYPE_R_Z_BHSDQ_V:
371 msg = N_("register expected");
372 break;
373 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
374 msg = N_("SIMD scalar or floating-point register expected");
375 break;
376 case REG_TYPE_VN: /* any V reg */
377 msg = N_("vector register expected");
378 break;
379 default:
380 as_fatal (_("invalid register type %d"), reg_type);
381 }
382 return msg;
383 }
384
385 /* Some well known registers that we refer to directly elsewhere. */
386 #define REG_SP 31
387
388 /* Instructions take 4 bytes in the object file. */
389 #define INSN_SIZE 4
390
391 /* Define some common error messages. */
392 #define BAD_SP _("SP not allowed here")
393
394 static struct hash_control *aarch64_ops_hsh;
395 static struct hash_control *aarch64_cond_hsh;
396 static struct hash_control *aarch64_shift_hsh;
397 static struct hash_control *aarch64_sys_regs_hsh;
398 static struct hash_control *aarch64_pstatefield_hsh;
399 static struct hash_control *aarch64_sys_regs_ic_hsh;
400 static struct hash_control *aarch64_sys_regs_dc_hsh;
401 static struct hash_control *aarch64_sys_regs_at_hsh;
402 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
403 static struct hash_control *aarch64_reg_hsh;
404 static struct hash_control *aarch64_barrier_opt_hsh;
405 static struct hash_control *aarch64_nzcv_hsh;
406 static struct hash_control *aarch64_pldop_hsh;
407
408 /* Stuff needed to resolve the label ambiguity
409 As:
410 ...
411 label: <insn>
412 may differ from:
413 ...
414 label:
415 <insn> */
416
417 static symbolS *last_label_seen;
418
419 /* Literal pool structure. Held on a per-section
420 and per-sub-section basis. */
421
422 #define MAX_LITERAL_POOL_SIZE 1024
423 typedef struct literal_pool
424 {
425 expressionS literals[MAX_LITERAL_POOL_SIZE];
426 unsigned int next_free_entry;
427 unsigned int id;
428 symbolS *symbol;
429 segT section;
430 subsegT sub_section;
431 int size;
432 struct literal_pool *next;
433 } literal_pool;
434
435 /* Pointer to a linked list of literal pools. */
436 static literal_pool *list_of_pools = NULL;
437 \f
438 /* Pure syntax. */
439
440 /* This array holds the chars that always start a comment. If the
441 pre-processor is disabled, these aren't very useful. */
442 const char comment_chars[] = "";
443
444 /* This array holds the chars that only start a comment at the beginning of
445 a line. If the line seems to have the form '# 123 filename'
446 .line and .file directives will appear in the pre-processed output. */
447 /* Note that input_file.c hand checks for '#' at the beginning of the
448 first line of the input file. This is because the compiler outputs
449 #NO_APP at the beginning of its output. */
450 /* Also note that comments like this one will always work. */
451 const char line_comment_chars[] = "#";
452
453 const char line_separator_chars[] = ";";
454
455 /* Chars that can be used to separate mant
456 from exp in floating point numbers. */
457 const char EXP_CHARS[] = "eE";
458
459 /* Chars that mean this number is a floating point constant. */
460 /* As in 0f12.456 */
461 /* or 0d1.2345e12 */
462
463 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
464
465 /* Prefix character that indicates the start of an immediate value. */
466 #define is_immediate_prefix(C) ((C) == '#')
467
468 /* Separator character handling. */
469
470 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
471
472 static inline bfd_boolean
473 skip_past_char (char **str, char c)
474 {
475 if (**str == c)
476 {
477 (*str)++;
478 return TRUE;
479 }
480 else
481 return FALSE;
482 }
483
484 #define skip_past_comma(str) skip_past_char (str, ',')
485
486 /* Arithmetic expressions (possibly involving symbols). */
487
488 static bfd_boolean in_my_get_expression_p = FALSE;
489
490 /* Third argument to my_get_expression. */
491 #define GE_NO_PREFIX 0
492 #define GE_OPT_PREFIX 1
493
494 /* Return TRUE if the string pointed by *STR is successfully parsed
495 as an valid expression; *EP will be filled with the information of
496 such an expression. Otherwise return FALSE. */
497
498 static bfd_boolean
499 my_get_expression (expressionS * ep, char **str, int prefix_mode,
500 int reject_absent)
501 {
502 char *save_in;
503 segT seg;
504 int prefix_present_p = 0;
505
506 switch (prefix_mode)
507 {
508 case GE_NO_PREFIX:
509 break;
510 case GE_OPT_PREFIX:
511 if (is_immediate_prefix (**str))
512 {
513 (*str)++;
514 prefix_present_p = 1;
515 }
516 break;
517 default:
518 abort ();
519 }
520
521 memset (ep, 0, sizeof (expressionS));
522
523 save_in = input_line_pointer;
524 input_line_pointer = *str;
525 in_my_get_expression_p = TRUE;
526 seg = expression (ep);
527 in_my_get_expression_p = FALSE;
528
529 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
530 {
531 /* We found a bad expression in md_operand(). */
532 *str = input_line_pointer;
533 input_line_pointer = save_in;
534 if (prefix_present_p && ! error_p ())
535 set_fatal_syntax_error (_("bad expression"));
536 else
537 set_first_syntax_error (_("bad expression"));
538 return FALSE;
539 }
540
541 #ifdef OBJ_AOUT
542 if (seg != absolute_section
543 && seg != text_section
544 && seg != data_section
545 && seg != bss_section && seg != undefined_section)
546 {
547 set_syntax_error (_("bad segment"));
548 *str = input_line_pointer;
549 input_line_pointer = save_in;
550 return FALSE;
551 }
552 #else
553 (void) seg;
554 #endif
555
556 *str = input_line_pointer;
557 input_line_pointer = save_in;
558 return TRUE;
559 }
560
561 /* Turn a string in input_line_pointer into a floating point constant
562 of type TYPE, and store the appropriate bytes in *LITP. The number
563 of LITTLENUMS emitted is stored in *SIZEP. An error message is
564 returned, or NULL on OK. */
565
566 char *
567 md_atof (int type, char *litP, int *sizeP)
568 {
569 return ieee_md_atof (type, litP, sizeP, target_big_endian);
570 }
571
572 /* We handle all bad expressions here, so that we can report the faulty
573 instruction in the error message. */
574 void
575 md_operand (expressionS * exp)
576 {
577 if (in_my_get_expression_p)
578 exp->X_op = O_illegal;
579 }
580
581 /* Immediate values. */
582
583 /* Errors may be set multiple times during parsing or bit encoding
584 (particularly in the Neon bits), but usually the earliest error which is set
585 will be the most meaningful. Avoid overwriting it with later (cascading)
586 errors by calling this function. */
587
588 static void
589 first_error (const char *error)
590 {
591 if (! error_p ())
592 set_syntax_error (error);
593 }
594
595 /* Similiar to first_error, but this function accepts formatted error
596 message. */
597 static void
598 first_error_fmt (const char *format, ...)
599 {
600 va_list args;
601 enum
602 { size = 100 };
603 /* N.B. this single buffer will not cause error messages for different
604 instructions to pollute each other; this is because at the end of
605 processing of each assembly line, error message if any will be
606 collected by as_bad. */
607 static char buffer[size];
608
609 if (! error_p ())
610 {
611 int ret ATTRIBUTE_UNUSED;
612 va_start (args, format);
613 ret = vsnprintf (buffer, size, format, args);
614 know (ret <= size - 1 && ret >= 0);
615 va_end (args);
616 set_syntax_error (buffer);
617 }
618 }
619
620 /* Register parsing. */
621
622 /* Generic register parser which is called by other specialized
623 register parsers.
624 CCP points to what should be the beginning of a register name.
625 If it is indeed a valid register name, advance CCP over it and
626 return the reg_entry structure; otherwise return NULL.
627 It does not issue diagnostics. */
628
629 static reg_entry *
630 parse_reg (char **ccp)
631 {
632 char *start = *ccp;
633 char *p;
634 reg_entry *reg;
635
636 #ifdef REGISTER_PREFIX
637 if (*start != REGISTER_PREFIX)
638 return NULL;
639 start++;
640 #endif
641
642 p = start;
643 if (!ISALPHA (*p) || !is_name_beginner (*p))
644 return NULL;
645
646 do
647 p++;
648 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
649
650 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
651
652 if (!reg)
653 return NULL;
654
655 *ccp = p;
656 return reg;
657 }
658
659 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
660 return FALSE. */
661 static bfd_boolean
662 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
663 {
664 if (reg->type == type)
665 return TRUE;
666
667 switch (type)
668 {
669 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
670 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
671 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
672 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
673 case REG_TYPE_VN: /* Vector register. */
674 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
675 return ((reg_type_masks[reg->type] & reg_type_masks[type])
676 == reg_type_masks[reg->type]);
677 default:
678 as_fatal ("unhandled type %d", type);
679 abort ();
680 }
681 }
682
683 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
684 Return the register number otherwise. *ISREG32 is set to one if the
685 register is 32-bit wide; *ISREGZERO is set to one if the register is
686 of type Z_32 or Z_64.
687 Note that this function does not issue any diagnostics. */
688
689 static int
690 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
691 int *isreg32, int *isregzero)
692 {
693 char *str = *ccp;
694 const reg_entry *reg = parse_reg (&str);
695
696 if (reg == NULL)
697 return PARSE_FAIL;
698
699 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
700 return PARSE_FAIL;
701
702 switch (reg->type)
703 {
704 case REG_TYPE_SP_32:
705 case REG_TYPE_SP_64:
706 if (reject_sp)
707 return PARSE_FAIL;
708 *isreg32 = reg->type == REG_TYPE_SP_32;
709 *isregzero = 0;
710 break;
711 case REG_TYPE_R_32:
712 case REG_TYPE_R_64:
713 *isreg32 = reg->type == REG_TYPE_R_32;
714 *isregzero = 0;
715 break;
716 case REG_TYPE_Z_32:
717 case REG_TYPE_Z_64:
718 if (reject_rz)
719 return PARSE_FAIL;
720 *isreg32 = reg->type == REG_TYPE_Z_32;
721 *isregzero = 1;
722 break;
723 default:
724 return PARSE_FAIL;
725 }
726
727 *ccp = str;
728
729 return reg->number;
730 }
731
732 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
733 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
734 otherwise return FALSE.
735
736 Accept only one occurrence of:
737 8b 16b 4h 8h 2s 4s 1d 2d
738 b h s d q */
739 static bfd_boolean
740 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
741 {
742 char *ptr = *str;
743 unsigned width;
744 unsigned element_size;
745 enum neon_el_type type;
746
747 /* skip '.' */
748 ptr++;
749
750 if (!ISDIGIT (*ptr))
751 {
752 width = 0;
753 goto elt_size;
754 }
755 width = strtoul (ptr, &ptr, 10);
756 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
757 {
758 first_error_fmt (_("bad size %d in vector width specifier"), width);
759 return FALSE;
760 }
761
762 elt_size:
763 switch (TOLOWER (*ptr))
764 {
765 case 'b':
766 type = NT_b;
767 element_size = 8;
768 break;
769 case 'h':
770 type = NT_h;
771 element_size = 16;
772 break;
773 case 's':
774 type = NT_s;
775 element_size = 32;
776 break;
777 case 'd':
778 type = NT_d;
779 element_size = 64;
780 break;
781 case 'q':
782 if (width == 1)
783 {
784 type = NT_q;
785 element_size = 128;
786 break;
787 }
788 /* fall through. */
789 default:
790 if (*ptr != '\0')
791 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
792 else
793 first_error (_("missing element size"));
794 return FALSE;
795 }
796 if (width != 0 && width * element_size != 64 && width * element_size != 128)
797 {
798 first_error_fmt (_
799 ("invalid element size %d and vector size combination %c"),
800 width, *ptr);
801 return FALSE;
802 }
803 ptr++;
804
805 parsed_type->type = type;
806 parsed_type->width = width;
807
808 *str = ptr;
809
810 return TRUE;
811 }
812
813 /* Parse a single type, e.g. ".8b", leading period included.
814 Only applicable to Vn registers.
815
816 Return TRUE on success; otherwise return FALSE. */
817 static bfd_boolean
818 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
819 {
820 char *str = *ccp;
821
822 if (*str == '.')
823 {
824 if (! parse_neon_type_for_operand (vectype, &str))
825 {
826 first_error (_("vector type expected"));
827 return FALSE;
828 }
829 }
830 else
831 return FALSE;
832
833 *ccp = str;
834
835 return TRUE;
836 }
837
838 /* Parse a register of the type TYPE.
839
840 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
841 name or the parsed register is not of TYPE.
842
843 Otherwise return the register number, and optionally fill in the actual
844 type of the register in *RTYPE when multiple alternatives were given, and
845 return the register shape and element index information in *TYPEINFO.
846
847 IN_REG_LIST should be set with TRUE if the caller is parsing a register
848 list. */
849
850 static int
851 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
852 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
853 {
854 char *str = *ccp;
855 const reg_entry *reg = parse_reg (&str);
856 struct neon_type_el atype;
857 struct neon_type_el parsetype;
858 bfd_boolean is_typed_vecreg = FALSE;
859
860 atype.defined = 0;
861 atype.type = NT_invtype;
862 atype.width = -1;
863 atype.index = 0;
864
865 if (reg == NULL)
866 {
867 if (typeinfo)
868 *typeinfo = atype;
869 set_default_error ();
870 return PARSE_FAIL;
871 }
872
873 if (! aarch64_check_reg_type (reg, type))
874 {
875 DEBUG_TRACE ("reg type check failed");
876 set_default_error ();
877 return PARSE_FAIL;
878 }
879 type = reg->type;
880
881 if (type == REG_TYPE_VN
882 && parse_neon_operand_type (&parsetype, &str))
883 {
884 /* Register if of the form Vn.[bhsdq]. */
885 is_typed_vecreg = TRUE;
886
887 if (parsetype.width == 0)
888 /* Expect index. In the new scheme we cannot have
889 Vn.[bhsdq] represent a scalar. Therefore any
890 Vn.[bhsdq] should have an index following it.
891 Except in reglists ofcourse. */
892 atype.defined |= NTA_HASINDEX;
893 else
894 atype.defined |= NTA_HASTYPE;
895
896 atype.type = parsetype.type;
897 atype.width = parsetype.width;
898 }
899
900 if (skip_past_char (&str, '['))
901 {
902 expressionS exp;
903
904 /* Reject Sn[index] syntax. */
905 if (!is_typed_vecreg)
906 {
907 first_error (_("this type of register can't be indexed"));
908 return PARSE_FAIL;
909 }
910
911 if (in_reg_list == TRUE)
912 {
913 first_error (_("index not allowed inside register list"));
914 return PARSE_FAIL;
915 }
916
917 atype.defined |= NTA_HASINDEX;
918
919 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
920
921 if (exp.X_op != O_constant)
922 {
923 first_error (_("constant expression required"));
924 return PARSE_FAIL;
925 }
926
927 if (! skip_past_char (&str, ']'))
928 return PARSE_FAIL;
929
930 atype.index = exp.X_add_number;
931 }
932 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
933 {
934 /* Indexed vector register expected. */
935 first_error (_("indexed vector register expected"));
936 return PARSE_FAIL;
937 }
938
939 /* A vector reg Vn should be typed or indexed. */
940 if (type == REG_TYPE_VN && atype.defined == 0)
941 {
942 first_error (_("invalid use of vector register"));
943 }
944
945 if (typeinfo)
946 *typeinfo = atype;
947
948 if (rtype)
949 *rtype = type;
950
951 *ccp = str;
952
953 return reg->number;
954 }
955
956 /* Parse register.
957
958 Return the register number on success; return PARSE_FAIL otherwise.
959
960 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
961 the register (e.g. NEON double or quad reg when either has been requested).
962
963 If this is a NEON vector register with additional type information, fill
964 in the struct pointed to by VECTYPE (if non-NULL).
965
966 This parser does not handle register list. */
967
968 static int
969 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
970 aarch64_reg_type *rtype, struct neon_type_el *vectype)
971 {
972 struct neon_type_el atype;
973 char *str = *ccp;
974 int reg = parse_typed_reg (&str, type, rtype, &atype,
975 /*in_reg_list= */ FALSE);
976
977 if (reg == PARSE_FAIL)
978 return PARSE_FAIL;
979
980 if (vectype)
981 *vectype = atype;
982
983 *ccp = str;
984
985 return reg;
986 }
987
988 static inline bfd_boolean
989 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
990 {
991 return
992 e1.type == e2.type
993 && e1.defined == e2.defined
994 && e1.width == e2.width && e1.index == e2.index;
995 }
996
997 /* This function parses the NEON register list. On success, it returns
998 the parsed register list information in the following encoded format:
999
1000 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1001 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1002
1003 The information of the register shape and/or index is returned in
1004 *VECTYPE.
1005
1006 It returns PARSE_FAIL if the register list is invalid.
1007
1008 The list contains one to four registers.
1009 Each register can be one of:
1010 <Vt>.<T>[<index>]
1011 <Vt>.<T>
1012 All <T> should be identical.
1013 All <index> should be identical.
1014 There are restrictions on <Vt> numbers which are checked later
1015 (by reg_list_valid_p). */
1016
1017 static int
1018 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1019 {
1020 char *str = *ccp;
1021 int nb_regs;
1022 struct neon_type_el typeinfo, typeinfo_first;
1023 int val, val_range;
1024 int in_range;
1025 int ret_val;
1026 int i;
1027 bfd_boolean error = FALSE;
1028 bfd_boolean expect_index = FALSE;
1029
1030 if (*str != '{')
1031 {
1032 set_syntax_error (_("expecting {"));
1033 return PARSE_FAIL;
1034 }
1035 str++;
1036
1037 nb_regs = 0;
1038 typeinfo_first.defined = 0;
1039 typeinfo_first.type = NT_invtype;
1040 typeinfo_first.width = -1;
1041 typeinfo_first.index = 0;
1042 ret_val = 0;
1043 val = -1;
1044 val_range = -1;
1045 in_range = 0;
1046 do
1047 {
1048 if (in_range)
1049 {
1050 str++; /* skip over '-' */
1051 val_range = val;
1052 }
1053 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1054 /*in_reg_list= */ TRUE);
1055 if (val == PARSE_FAIL)
1056 {
1057 set_first_syntax_error (_("invalid vector register in list"));
1058 error = TRUE;
1059 continue;
1060 }
1061 /* reject [bhsd]n */
1062 if (typeinfo.defined == 0)
1063 {
1064 set_first_syntax_error (_("invalid scalar register in list"));
1065 error = TRUE;
1066 continue;
1067 }
1068
1069 if (typeinfo.defined & NTA_HASINDEX)
1070 expect_index = TRUE;
1071
1072 if (in_range)
1073 {
1074 if (val < val_range)
1075 {
1076 set_first_syntax_error
1077 (_("invalid range in vector register list"));
1078 error = TRUE;
1079 }
1080 val_range++;
1081 }
1082 else
1083 {
1084 val_range = val;
1085 if (nb_regs == 0)
1086 typeinfo_first = typeinfo;
1087 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1088 {
1089 set_first_syntax_error
1090 (_("type mismatch in vector register list"));
1091 error = TRUE;
1092 }
1093 }
1094 if (! error)
1095 for (i = val_range; i <= val; i++)
1096 {
1097 ret_val |= i << (5 * nb_regs);
1098 nb_regs++;
1099 }
1100 in_range = 0;
1101 }
1102 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1103
1104 skip_whitespace (str);
1105 if (*str != '}')
1106 {
1107 set_first_syntax_error (_("end of vector register list not found"));
1108 error = TRUE;
1109 }
1110 str++;
1111
1112 skip_whitespace (str);
1113
1114 if (expect_index)
1115 {
1116 if (skip_past_char (&str, '['))
1117 {
1118 expressionS exp;
1119
1120 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1121 if (exp.X_op != O_constant)
1122 {
1123 set_first_syntax_error (_("constant expression required."));
1124 error = TRUE;
1125 }
1126 if (! skip_past_char (&str, ']'))
1127 error = TRUE;
1128 else
1129 typeinfo_first.index = exp.X_add_number;
1130 }
1131 else
1132 {
1133 set_first_syntax_error (_("expected index"));
1134 error = TRUE;
1135 }
1136 }
1137
1138 if (nb_regs > 4)
1139 {
1140 set_first_syntax_error (_("too many registers in vector register list"));
1141 error = TRUE;
1142 }
1143 else if (nb_regs == 0)
1144 {
1145 set_first_syntax_error (_("empty vector register list"));
1146 error = TRUE;
1147 }
1148
1149 *ccp = str;
1150 if (! error)
1151 *vectype = typeinfo_first;
1152
1153 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1154 }
1155
1156 /* Directives: register aliases. */
1157
1158 static reg_entry *
1159 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1160 {
1161 reg_entry *new;
1162 const char *name;
1163
1164 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1165 {
1166 if (new->builtin)
1167 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1168 str);
1169
1170 /* Only warn about a redefinition if it's not defined as the
1171 same register. */
1172 else if (new->number != number || new->type != type)
1173 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1174
1175 return NULL;
1176 }
1177
1178 name = xstrdup (str);
1179 new = xmalloc (sizeof (reg_entry));
1180
1181 new->name = name;
1182 new->number = number;
1183 new->type = type;
1184 new->builtin = FALSE;
1185
1186 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1187 abort ();
1188
1189 return new;
1190 }
1191
1192 /* Look for the .req directive. This is of the form:
1193
1194 new_register_name .req existing_register_name
1195
1196 If we find one, or if it looks sufficiently like one that we want to
1197 handle any error here, return TRUE. Otherwise return FALSE. */
1198
1199 static bfd_boolean
1200 create_register_alias (char *newname, char *p)
1201 {
1202 const reg_entry *old;
1203 char *oldname, *nbuf;
1204 size_t nlen;
1205
1206 /* The input scrubber ensures that whitespace after the mnemonic is
1207 collapsed to single spaces. */
1208 oldname = p;
1209 if (strncmp (oldname, " .req ", 6) != 0)
1210 return FALSE;
1211
1212 oldname += 6;
1213 if (*oldname == '\0')
1214 return FALSE;
1215
1216 old = hash_find (aarch64_reg_hsh, oldname);
1217 if (!old)
1218 {
1219 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1220 return TRUE;
1221 }
1222
1223 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1224 the desired alias name, and p points to its end. If not, then
1225 the desired alias name is in the global original_case_string. */
1226 #ifdef TC_CASE_SENSITIVE
1227 nlen = p - newname;
1228 #else
1229 newname = original_case_string;
1230 nlen = strlen (newname);
1231 #endif
1232
1233 nbuf = alloca (nlen + 1);
1234 memcpy (nbuf, newname, nlen);
1235 nbuf[nlen] = '\0';
1236
1237 /* Create aliases under the new name as stated; an all-lowercase
1238 version of the new name; and an all-uppercase version of the new
1239 name. */
1240 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1241 {
1242 for (p = nbuf; *p; p++)
1243 *p = TOUPPER (*p);
1244
1245 if (strncmp (nbuf, newname, nlen))
1246 {
1247 /* If this attempt to create an additional alias fails, do not bother
1248 trying to create the all-lower case alias. We will fail and issue
1249 a second, duplicate error message. This situation arises when the
1250 programmer does something like:
1251 foo .req r0
1252 Foo .req r1
1253 The second .req creates the "Foo" alias but then fails to create
1254 the artificial FOO alias because it has already been created by the
1255 first .req. */
1256 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1257 return TRUE;
1258 }
1259
1260 for (p = nbuf; *p; p++)
1261 *p = TOLOWER (*p);
1262
1263 if (strncmp (nbuf, newname, nlen))
1264 insert_reg_alias (nbuf, old->number, old->type);
1265 }
1266
1267 return TRUE;
1268 }
1269
1270 /* Should never be called, as .req goes between the alias and the
1271 register name, not at the beginning of the line. */
1272 static void
1273 s_req (int a ATTRIBUTE_UNUSED)
1274 {
1275 as_bad (_("invalid syntax for .req directive"));
1276 }
1277
1278 /* The .unreq directive deletes an alias which was previously defined
1279 by .req. For example:
1280
1281 my_alias .req r11
1282 .unreq my_alias */
1283
1284 static void
1285 s_unreq (int a ATTRIBUTE_UNUSED)
1286 {
1287 char *name;
1288 char saved_char;
1289
1290 name = input_line_pointer;
1291
1292 while (*input_line_pointer != 0
1293 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1294 ++input_line_pointer;
1295
1296 saved_char = *input_line_pointer;
1297 *input_line_pointer = 0;
1298
1299 if (!*name)
1300 as_bad (_("invalid syntax for .unreq directive"));
1301 else
1302 {
1303 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1304
1305 if (!reg)
1306 as_bad (_("unknown register alias '%s'"), name);
1307 else if (reg->builtin)
1308 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1309 name);
1310 else
1311 {
1312 char *p;
1313 char *nbuf;
1314
1315 hash_delete (aarch64_reg_hsh, name, FALSE);
1316 free ((char *) reg->name);
1317 free (reg);
1318
1319 /* Also locate the all upper case and all lower case versions.
1320 Do not complain if we cannot find one or the other as it
1321 was probably deleted above. */
1322
1323 nbuf = strdup (name);
1324 for (p = nbuf; *p; p++)
1325 *p = TOUPPER (*p);
1326 reg = hash_find (aarch64_reg_hsh, nbuf);
1327 if (reg)
1328 {
1329 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1330 free ((char *) reg->name);
1331 free (reg);
1332 }
1333
1334 for (p = nbuf; *p; p++)
1335 *p = TOLOWER (*p);
1336 reg = hash_find (aarch64_reg_hsh, nbuf);
1337 if (reg)
1338 {
1339 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1340 free ((char *) reg->name);
1341 free (reg);
1342 }
1343
1344 free (nbuf);
1345 }
1346 }
1347
1348 *input_line_pointer = saved_char;
1349 demand_empty_rest_of_line ();
1350 }
1351
1352 /* Directives: Instruction set selection. */
1353
1354 #ifdef OBJ_ELF
1355 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1356 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1357 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1358 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1359
1360 /* Create a new mapping symbol for the transition to STATE. */
1361
1362 static void
1363 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1364 {
1365 symbolS *symbolP;
1366 const char *symname;
1367 int type;
1368
1369 switch (state)
1370 {
1371 case MAP_DATA:
1372 symname = "$d";
1373 type = BSF_NO_FLAGS;
1374 break;
1375 case MAP_INSN:
1376 symname = "$x";
1377 type = BSF_NO_FLAGS;
1378 break;
1379 default:
1380 abort ();
1381 }
1382
1383 symbolP = symbol_new (symname, now_seg, value, frag);
1384 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1385
1386 /* Save the mapping symbols for future reference. Also check that
1387 we do not place two mapping symbols at the same offset within a
1388 frag. We'll handle overlap between frags in
1389 check_mapping_symbols.
1390
1391 If .fill or other data filling directive generates zero sized data,
1392 the mapping symbol for the following code will have the same value
1393 as the one generated for the data filling directive. In this case,
1394 we replace the old symbol with the new one at the same address. */
1395 if (value == 0)
1396 {
1397 if (frag->tc_frag_data.first_map != NULL)
1398 {
1399 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1400 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1401 &symbol_lastP);
1402 }
1403 frag->tc_frag_data.first_map = symbolP;
1404 }
1405 if (frag->tc_frag_data.last_map != NULL)
1406 {
1407 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1408 S_GET_VALUE (symbolP));
1409 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1410 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1411 &symbol_lastP);
1412 }
1413 frag->tc_frag_data.last_map = symbolP;
1414 }
1415
1416 /* We must sometimes convert a region marked as code to data during
1417 code alignment, if an odd number of bytes have to be padded. The
1418 code mapping symbol is pushed to an aligned address. */
1419
1420 static void
1421 insert_data_mapping_symbol (enum mstate state,
1422 valueT value, fragS * frag, offsetT bytes)
1423 {
1424 /* If there was already a mapping symbol, remove it. */
1425 if (frag->tc_frag_data.last_map != NULL
1426 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1427 frag->fr_address + value)
1428 {
1429 symbolS *symp = frag->tc_frag_data.last_map;
1430
1431 if (value == 0)
1432 {
1433 know (frag->tc_frag_data.first_map == symp);
1434 frag->tc_frag_data.first_map = NULL;
1435 }
1436 frag->tc_frag_data.last_map = NULL;
1437 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1438 }
1439
1440 make_mapping_symbol (MAP_DATA, value, frag);
1441 make_mapping_symbol (state, value + bytes, frag);
1442 }
1443
1444 static void mapping_state_2 (enum mstate state, int max_chars);
1445
1446 /* Set the mapping state to STATE. Only call this when about to
1447 emit some STATE bytes to the file. */
1448
1449 void
1450 mapping_state (enum mstate state)
1451 {
1452 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1453
1454 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1455
1456 if (mapstate == state)
1457 /* The mapping symbol has already been emitted.
1458 There is nothing else to do. */
1459 return;
1460 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1461 /* This case will be evaluated later in the next else. */
1462 return;
1463 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1464 {
1465 /* Only add the symbol if the offset is > 0:
1466 if we're at the first frag, check it's size > 0;
1467 if we're not at the first frag, then for sure
1468 the offset is > 0. */
1469 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1470 const int add_symbol = (frag_now != frag_first)
1471 || (frag_now_fix () > 0);
1472
1473 if (add_symbol)
1474 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1475 }
1476
1477 mapping_state_2 (state, 0);
1478 #undef TRANSITION
1479 }
1480
1481 /* Same as mapping_state, but MAX_CHARS bytes have already been
1482 allocated. Put the mapping symbol that far back. */
1483
1484 static void
1485 mapping_state_2 (enum mstate state, int max_chars)
1486 {
1487 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1488
1489 if (!SEG_NORMAL (now_seg))
1490 return;
1491
1492 if (mapstate == state)
1493 /* The mapping symbol has already been emitted.
1494 There is nothing else to do. */
1495 return;
1496
1497 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1498 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1499 }
1500 #else
1501 #define mapping_state(x) /* nothing */
1502 #define mapping_state_2(x, y) /* nothing */
1503 #endif
1504
1505 /* Directives: sectioning and alignment. */
1506
1507 static void
1508 s_bss (int ignore ATTRIBUTE_UNUSED)
1509 {
1510 /* We don't support putting frags in the BSS segment, we fake it by
1511 marking in_bss, then looking at s_skip for clues. */
1512 subseg_set (bss_section, 0);
1513 demand_empty_rest_of_line ();
1514 mapping_state (MAP_DATA);
1515 }
1516
1517 static void
1518 s_even (int ignore ATTRIBUTE_UNUSED)
1519 {
1520 /* Never make frag if expect extra pass. */
1521 if (!need_pass_2)
1522 frag_align (1, 0, 0);
1523
1524 record_alignment (now_seg, 1);
1525
1526 demand_empty_rest_of_line ();
1527 }
1528
1529 /* Directives: Literal pools. */
1530
1531 static literal_pool *
1532 find_literal_pool (int size)
1533 {
1534 literal_pool *pool;
1535
1536 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1537 {
1538 if (pool->section == now_seg
1539 && pool->sub_section == now_subseg && pool->size == size)
1540 break;
1541 }
1542
1543 return pool;
1544 }
1545
1546 static literal_pool *
1547 find_or_make_literal_pool (int size)
1548 {
1549 /* Next literal pool ID number. */
1550 static unsigned int latest_pool_num = 1;
1551 literal_pool *pool;
1552
1553 pool = find_literal_pool (size);
1554
1555 if (pool == NULL)
1556 {
1557 /* Create a new pool. */
1558 pool = xmalloc (sizeof (*pool));
1559 if (!pool)
1560 return NULL;
1561
1562 /* Currently we always put the literal pool in the current text
1563 section. If we were generating "small" model code where we
1564 knew that all code and initialised data was within 1MB then
1565 we could output literals to mergeable, read-only data
1566 sections. */
1567
1568 pool->next_free_entry = 0;
1569 pool->section = now_seg;
1570 pool->sub_section = now_subseg;
1571 pool->size = size;
1572 pool->next = list_of_pools;
1573 pool->symbol = NULL;
1574
1575 /* Add it to the list. */
1576 list_of_pools = pool;
1577 }
1578
1579 /* New pools, and emptied pools, will have a NULL symbol. */
1580 if (pool->symbol == NULL)
1581 {
1582 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1583 (valueT) 0, &zero_address_frag);
1584 pool->id = latest_pool_num++;
1585 }
1586
1587 /* Done. */
1588 return pool;
1589 }
1590
1591 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1592 Return TRUE on success, otherwise return FALSE. */
1593 static bfd_boolean
1594 add_to_lit_pool (expressionS *exp, int size)
1595 {
1596 literal_pool *pool;
1597 unsigned int entry;
1598
1599 pool = find_or_make_literal_pool (size);
1600
1601 /* Check if this literal value is already in the pool. */
1602 for (entry = 0; entry < pool->next_free_entry; entry++)
1603 {
1604 if ((pool->literals[entry].X_op == exp->X_op)
1605 && (exp->X_op == O_constant)
1606 && (pool->literals[entry].X_add_number == exp->X_add_number)
1607 && (pool->literals[entry].X_unsigned == exp->X_unsigned))
1608 break;
1609
1610 if ((pool->literals[entry].X_op == exp->X_op)
1611 && (exp->X_op == O_symbol)
1612 && (pool->literals[entry].X_add_number == exp->X_add_number)
1613 && (pool->literals[entry].X_add_symbol == exp->X_add_symbol)
1614 && (pool->literals[entry].X_op_symbol == exp->X_op_symbol))
1615 break;
1616 }
1617
1618 /* Do we need to create a new entry? */
1619 if (entry == pool->next_free_entry)
1620 {
1621 if (entry >= MAX_LITERAL_POOL_SIZE)
1622 {
1623 set_syntax_error (_("literal pool overflow"));
1624 return FALSE;
1625 }
1626
1627 pool->literals[entry] = *exp;
1628 pool->next_free_entry += 1;
1629 }
1630
1631 exp->X_op = O_symbol;
1632 exp->X_add_number = ((int) entry) * size;
1633 exp->X_add_symbol = pool->symbol;
1634
1635 return TRUE;
1636 }
1637
1638 /* Can't use symbol_new here, so have to create a symbol and then at
1639 a later date assign it a value. Thats what these functions do. */
1640
1641 static void
1642 symbol_locate (symbolS * symbolP,
1643 const char *name,/* It is copied, the caller can modify. */
1644 segT segment, /* Segment identifier (SEG_<something>). */
1645 valueT valu, /* Symbol value. */
1646 fragS * frag) /* Associated fragment. */
1647 {
1648 unsigned int name_length;
1649 char *preserved_copy_of_name;
1650
1651 name_length = strlen (name) + 1; /* +1 for \0. */
1652 obstack_grow (&notes, name, name_length);
1653 preserved_copy_of_name = obstack_finish (&notes);
1654
1655 #ifdef tc_canonicalize_symbol_name
1656 preserved_copy_of_name =
1657 tc_canonicalize_symbol_name (preserved_copy_of_name);
1658 #endif
1659
1660 S_SET_NAME (symbolP, preserved_copy_of_name);
1661
1662 S_SET_SEGMENT (symbolP, segment);
1663 S_SET_VALUE (symbolP, valu);
1664 symbol_clear_list_pointers (symbolP);
1665
1666 symbol_set_frag (symbolP, frag);
1667
1668 /* Link to end of symbol chain. */
1669 {
1670 extern int symbol_table_frozen;
1671
1672 if (symbol_table_frozen)
1673 abort ();
1674 }
1675
1676 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1677
1678 obj_symbol_new_hook (symbolP);
1679
1680 #ifdef tc_symbol_new_hook
1681 tc_symbol_new_hook (symbolP);
1682 #endif
1683
1684 #ifdef DEBUG_SYMS
1685 verify_symbol_chain (symbol_rootP, symbol_lastP);
1686 #endif /* DEBUG_SYMS */
1687 }
1688
1689
1690 static void
1691 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1692 {
1693 unsigned int entry;
1694 literal_pool *pool;
1695 char sym_name[20];
1696 int align;
1697
1698 for (align = 2; align <= 4; align++)
1699 {
1700 int size = 1 << align;
1701
1702 pool = find_literal_pool (size);
1703 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1704 continue;
1705
1706 mapping_state (MAP_DATA);
1707
1708 /* Align pool as you have word accesses.
1709 Only make a frag if we have to. */
1710 if (!need_pass_2)
1711 frag_align (align, 0, 0);
1712
1713 record_alignment (now_seg, align);
1714
1715 sprintf (sym_name, "$$lit_\002%x", pool->id);
1716
1717 symbol_locate (pool->symbol, sym_name, now_seg,
1718 (valueT) frag_now_fix (), frag_now);
1719 symbol_table_insert (pool->symbol);
1720
1721 for (entry = 0; entry < pool->next_free_entry; entry++)
1722 /* First output the expression in the instruction to the pool. */
1723 emit_expr (&(pool->literals[entry]), size); /* .word|.xword */
1724
1725 /* Mark the pool as empty. */
1726 pool->next_free_entry = 0;
1727 pool->symbol = NULL;
1728 }
1729 }
1730
1731 #ifdef OBJ_ELF
1732 /* Forward declarations for functions below, in the MD interface
1733 section. */
1734 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1735 static struct reloc_table_entry * find_reloc_table_entry (char **);
1736
1737 /* Directives: Data. */
1738 /* N.B. the support for relocation suffix in this directive needs to be
1739 implemented properly. */
1740
1741 static void
1742 s_aarch64_elf_cons (int nbytes)
1743 {
1744 expressionS exp;
1745
1746 #ifdef md_flush_pending_output
1747 md_flush_pending_output ();
1748 #endif
1749
1750 if (is_it_end_of_statement ())
1751 {
1752 demand_empty_rest_of_line ();
1753 return;
1754 }
1755
1756 #ifdef md_cons_align
1757 md_cons_align (nbytes);
1758 #endif
1759
1760 mapping_state (MAP_DATA);
1761 do
1762 {
1763 struct reloc_table_entry *reloc;
1764
1765 expression (&exp);
1766
1767 if (exp.X_op != O_symbol)
1768 emit_expr (&exp, (unsigned int) nbytes);
1769 else
1770 {
1771 skip_past_char (&input_line_pointer, '#');
1772 if (skip_past_char (&input_line_pointer, ':'))
1773 {
1774 reloc = find_reloc_table_entry (&input_line_pointer);
1775 if (reloc == NULL)
1776 as_bad (_("unrecognized relocation suffix"));
1777 else
1778 as_bad (_("unimplemented relocation suffix"));
1779 ignore_rest_of_line ();
1780 return;
1781 }
1782 else
1783 emit_expr (&exp, (unsigned int) nbytes);
1784 }
1785 }
1786 while (*input_line_pointer++ == ',');
1787
1788 /* Put terminator back into stream. */
1789 input_line_pointer--;
1790 demand_empty_rest_of_line ();
1791 }
1792
1793 #endif /* OBJ_ELF */
1794
1795 /* Output a 32-bit word, but mark as an instruction. */
1796
1797 static void
1798 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1799 {
1800 expressionS exp;
1801
1802 #ifdef md_flush_pending_output
1803 md_flush_pending_output ();
1804 #endif
1805
1806 if (is_it_end_of_statement ())
1807 {
1808 demand_empty_rest_of_line ();
1809 return;
1810 }
1811
1812 if (!need_pass_2)
1813 frag_align_code (2, 0);
1814 #ifdef OBJ_ELF
1815 mapping_state (MAP_INSN);
1816 #endif
1817
1818 do
1819 {
1820 expression (&exp);
1821 if (exp.X_op != O_constant)
1822 {
1823 as_bad (_("constant expression required"));
1824 ignore_rest_of_line ();
1825 return;
1826 }
1827
1828 if (target_big_endian)
1829 {
1830 unsigned int val = exp.X_add_number;
1831 exp.X_add_number = SWAP_32 (val);
1832 }
1833 emit_expr (&exp, 4);
1834 }
1835 while (*input_line_pointer++ == ',');
1836
1837 /* Put terminator back into stream. */
1838 input_line_pointer--;
1839 demand_empty_rest_of_line ();
1840 }
1841
1842 #ifdef OBJ_ELF
1843 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1844
1845 static void
1846 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1847 {
1848 expressionS exp;
1849
1850 /* Since we're just labelling the code, there's no need to define a
1851 mapping symbol. */
1852 expression (&exp);
1853 /* Make sure there is enough room in this frag for the following
1854 blr. This trick only works if the blr follows immediately after
1855 the .tlsdesc directive. */
1856 frag_grow (4);
1857 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1858 BFD_RELOC_AARCH64_TLSDESC_CALL);
1859
1860 demand_empty_rest_of_line ();
1861 }
1862 #endif /* OBJ_ELF */
1863
1864 static void s_aarch64_arch (int);
1865 static void s_aarch64_cpu (int);
1866
1867 /* This table describes all the machine specific pseudo-ops the assembler
1868 has to support. The fields are:
1869 pseudo-op name without dot
1870 function to call to execute this pseudo-op
1871 Integer arg to pass to the function. */
1872
1873 const pseudo_typeS md_pseudo_table[] = {
1874 /* Never called because '.req' does not start a line. */
1875 {"req", s_req, 0},
1876 {"unreq", s_unreq, 0},
1877 {"bss", s_bss, 0},
1878 {"even", s_even, 0},
1879 {"ltorg", s_ltorg, 0},
1880 {"pool", s_ltorg, 0},
1881 {"cpu", s_aarch64_cpu, 0},
1882 {"arch", s_aarch64_arch, 0},
1883 {"inst", s_aarch64_inst, 0},
1884 #ifdef OBJ_ELF
1885 {"tlsdesccall", s_tlsdesccall, 0},
1886 {"word", s_aarch64_elf_cons, 4},
1887 {"long", s_aarch64_elf_cons, 4},
1888 {"xword", s_aarch64_elf_cons, 8},
1889 {"dword", s_aarch64_elf_cons, 8},
1890 #endif
1891 {0, 0, 0}
1892 };
1893 \f
1894
1895 /* Check whether STR points to a register name followed by a comma or the
1896 end of line; REG_TYPE indicates which register types are checked
1897 against. Return TRUE if STR is such a register name; otherwise return
1898 FALSE. The function does not intend to produce any diagnostics, but since
1899 the register parser aarch64_reg_parse, which is called by this function,
1900 does produce diagnostics, we call clear_error to clear any diagnostics
1901 that may be generated by aarch64_reg_parse.
1902 Also, the function returns FALSE directly if there is any user error
1903 present at the function entry. This prevents the existing diagnostics
1904 state from being spoiled.
1905 The function currently serves parse_constant_immediate and
1906 parse_big_immediate only. */
1907 static bfd_boolean
1908 reg_name_p (char *str, aarch64_reg_type reg_type)
1909 {
1910 int reg;
1911
1912 /* Prevent the diagnostics state from being spoiled. */
1913 if (error_p ())
1914 return FALSE;
1915
1916 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1917
1918 /* Clear the parsing error that may be set by the reg parser. */
1919 clear_error ();
1920
1921 if (reg == PARSE_FAIL)
1922 return FALSE;
1923
1924 skip_whitespace (str);
1925 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1926 return TRUE;
1927
1928 return FALSE;
1929 }
1930
1931 /* Parser functions used exclusively in instruction operands. */
1932
1933 /* Parse an immediate expression which may not be constant.
1934
1935 To prevent the expression parser from pushing a register name
1936 into the symbol table as an undefined symbol, firstly a check is
1937 done to find out whether STR is a valid register name followed
1938 by a comma or the end of line. Return FALSE if STR is such a
1939 string. */
1940
1941 static bfd_boolean
1942 parse_immediate_expression (char **str, expressionS *exp)
1943 {
1944 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1945 {
1946 set_recoverable_error (_("immediate operand required"));
1947 return FALSE;
1948 }
1949
1950 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
1951
1952 if (exp->X_op == O_absent)
1953 {
1954 set_fatal_syntax_error (_("missing immediate expression"));
1955 return FALSE;
1956 }
1957
1958 return TRUE;
1959 }
1960
1961 /* Constant immediate-value read function for use in insn parsing.
1962 STR points to the beginning of the immediate (with the optional
1963 leading #); *VAL receives the value.
1964
1965 Return TRUE on success; otherwise return FALSE. */
1966
1967 static bfd_boolean
1968 parse_constant_immediate (char **str, int64_t * val)
1969 {
1970 expressionS exp;
1971
1972 if (! parse_immediate_expression (str, &exp))
1973 return FALSE;
1974
1975 if (exp.X_op != O_constant)
1976 {
1977 set_syntax_error (_("constant expression required"));
1978 return FALSE;
1979 }
1980
1981 *val = exp.X_add_number;
1982 return TRUE;
1983 }
1984
1985 static uint32_t
1986 encode_imm_float_bits (uint32_t imm)
1987 {
1988 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
1989 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
1990 }
1991
1992 /* Return TRUE if IMM is a valid floating-point immediate; return FALSE
1993 otherwise. */
1994 static bfd_boolean
1995 aarch64_imm_float_p (uint32_t imm)
1996 {
1997 /* 3 32222222 2221111111111
1998 1 09876543 21098765432109876543210
1999 n Eeeeeexx xxxx0000000000000000000 */
2000 uint32_t e;
2001
2002 e = (imm >> 30) & 0x1;
2003 if (e == 0)
2004 e = 0x3e000000;
2005 else
2006 e = 0x40000000;
2007 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0 */
2008 && ((imm & 0x7e000000) == e); /* bits 25-29 = ~ bit 30 */
2009 }
2010
2011 /* Note: this accepts the floating-point 0 constant. */
2012 static bfd_boolean
2013 parse_aarch64_imm_float (char **ccp, int *immed)
2014 {
2015 char *str = *ccp;
2016 char *fpnum;
2017 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2018 int found_fpchar = 0;
2019
2020 skip_past_char (&str, '#');
2021
2022 /* We must not accidentally parse an integer as a floating-point number. Make
2023 sure that the value we parse is not an integer by checking for special
2024 characters '.' or 'e'.
2025 FIXME: This is a hack that is not very efficient, but doing better is
2026 tricky because type information isn't in a very usable state at parse
2027 time. */
2028 fpnum = str;
2029 skip_whitespace (fpnum);
2030
2031 if (strncmp (fpnum, "0x", 2) == 0)
2032 return FALSE;
2033 else
2034 {
2035 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2036 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2037 {
2038 found_fpchar = 1;
2039 break;
2040 }
2041
2042 if (!found_fpchar)
2043 return FALSE;
2044 }
2045
2046 if ((str = atof_ieee (str, 's', words)) != NULL)
2047 {
2048 unsigned fpword = 0;
2049 int i;
2050
2051 /* Our FP word must be 32 bits (single-precision FP). */
2052 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2053 {
2054 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2055 fpword |= words[i];
2056 }
2057
2058 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2059 *immed = fpword;
2060 else
2061 goto invalid_fp;
2062
2063 *ccp = str;
2064
2065 return TRUE;
2066 }
2067
2068 invalid_fp:
2069 set_fatal_syntax_error (_("invalid floating-point constant"));
2070 return FALSE;
2071 }
2072
2073 /* Less-generic immediate-value read function with the possibility of loading
2074 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2075 instructions.
2076
2077 To prevent the expression parser from pushing a register name into the
2078 symbol table as an undefined symbol, a check is firstly done to find
2079 out whether STR is a valid register name followed by a comma or the end
2080 of line. Return FALSE if STR is such a register. */
2081
2082 static bfd_boolean
2083 parse_big_immediate (char **str, int64_t *imm)
2084 {
2085 char *ptr = *str;
2086
2087 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2088 {
2089 set_syntax_error (_("immediate operand required"));
2090 return FALSE;
2091 }
2092
2093 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2094
2095 if (inst.reloc.exp.X_op == O_constant)
2096 *imm = inst.reloc.exp.X_add_number;
2097
2098 *str = ptr;
2099
2100 return TRUE;
2101 }
2102
2103 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2104 if NEED_LIBOPCODES is non-zero, the fixup will need
2105 assistance from the libopcodes. */
2106
2107 static inline void
2108 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2109 const aarch64_opnd_info *operand,
2110 int need_libopcodes_p)
2111 {
2112 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2113 reloc->opnd = operand->type;
2114 if (need_libopcodes_p)
2115 reloc->need_libopcodes_p = 1;
2116 };
2117
2118 /* Return TRUE if the instruction needs to be fixed up later internally by
2119 the GAS; otherwise return FALSE. */
2120
2121 static inline bfd_boolean
2122 aarch64_gas_internal_fixup_p (void)
2123 {
2124 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2125 }
2126
2127 /* Assign the immediate value to the relavant field in *OPERAND if
2128 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2129 needs an internal fixup in a later stage.
2130 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2131 IMM.VALUE that may get assigned with the constant. */
2132 static inline void
2133 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2134 aarch64_opnd_info *operand,
2135 int addr_off_p,
2136 int need_libopcodes_p,
2137 int skip_p)
2138 {
2139 if (reloc->exp.X_op == O_constant)
2140 {
2141 if (addr_off_p)
2142 operand->addr.offset.imm = reloc->exp.X_add_number;
2143 else
2144 operand->imm.value = reloc->exp.X_add_number;
2145 reloc->type = BFD_RELOC_UNUSED;
2146 }
2147 else
2148 {
2149 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2150 /* Tell libopcodes to ignore this operand or not. This is helpful
2151 when one of the operands needs to be fixed up later but we need
2152 libopcodes to check the other operands. */
2153 operand->skip = skip_p;
2154 }
2155 }
2156
2157 /* Relocation modifiers. Each entry in the table contains the textual
2158 name for the relocation which may be placed before a symbol used as
2159 a load/store offset, or add immediate. It must be surrounded by a
2160 leading and trailing colon, for example:
2161
2162 ldr x0, [x1, #:rello:varsym]
2163 add x0, x1, #:rello:varsym */
2164
2165 struct reloc_table_entry
2166 {
2167 const char *name;
2168 int pc_rel;
2169 bfd_reloc_code_real_type adrp_type;
2170 bfd_reloc_code_real_type movw_type;
2171 bfd_reloc_code_real_type add_type;
2172 bfd_reloc_code_real_type ldst_type;
2173 };
2174
2175 static struct reloc_table_entry reloc_table[] = {
2176 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2177 {"lo12", 0,
2178 0,
2179 0,
2180 BFD_RELOC_AARCH64_ADD_LO12,
2181 BFD_RELOC_AARCH64_LDST_LO12},
2182
2183 /* Higher 21 bits of pc-relative page offset: ADRP */
2184 {"pg_hi21", 1,
2185 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2186 0,
2187 0,
2188 0},
2189
2190 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2191 {"pg_hi21_nc", 1,
2192 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2193 0,
2194 0,
2195 0},
2196
2197 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2198 {"abs_g0", 0,
2199 0,
2200 BFD_RELOC_AARCH64_MOVW_G0,
2201 0,
2202 0},
2203
2204 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2205 {"abs_g0_s", 0,
2206 0,
2207 BFD_RELOC_AARCH64_MOVW_G0_S,
2208 0,
2209 0},
2210
2211 /* Less significant bits 0-15 of address/value: MOVK, no check */
2212 {"abs_g0_nc", 0,
2213 0,
2214 BFD_RELOC_AARCH64_MOVW_G0_NC,
2215 0,
2216 0},
2217
2218 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2219 {"abs_g1", 0,
2220 0,
2221 BFD_RELOC_AARCH64_MOVW_G1,
2222 0,
2223 0},
2224
2225 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2226 {"abs_g1_s", 0,
2227 0,
2228 BFD_RELOC_AARCH64_MOVW_G1_S,
2229 0,
2230 0},
2231
2232 /* Less significant bits 16-31 of address/value: MOVK, no check */
2233 {"abs_g1_nc", 0,
2234 0,
2235 BFD_RELOC_AARCH64_MOVW_G1_NC,
2236 0,
2237 0},
2238
2239 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2240 {"abs_g2", 0,
2241 0,
2242 BFD_RELOC_AARCH64_MOVW_G2,
2243 0,
2244 0},
2245
2246 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2247 {"abs_g2_s", 0,
2248 0,
2249 BFD_RELOC_AARCH64_MOVW_G2_S,
2250 0,
2251 0},
2252
2253 /* Less significant bits 32-47 of address/value: MOVK, no check */
2254 {"abs_g2_nc", 0,
2255 0,
2256 BFD_RELOC_AARCH64_MOVW_G2_NC,
2257 0,
2258 0},
2259
2260 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2261 {"abs_g3", 0,
2262 0,
2263 BFD_RELOC_AARCH64_MOVW_G3,
2264 0,
2265 0},
2266 /* Get to the GOT entry for a symbol. */
2267 {"got_prel19", 0,
2268 0,
2269 0,
2270 0,
2271 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2272 /* Get to the page containing GOT entry for a symbol. */
2273 {"got", 1,
2274 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2275 0,
2276 0,
2277 0},
2278 /* 12 bit offset into the page containing GOT entry for that symbol. */
2279 {"got_lo12", 0,
2280 0,
2281 0,
2282 0,
2283 BFD_RELOC_AARCH64_LD64_GOT_LO12_NC},
2284
2285 /* Get to the page containing GOT TLS entry for a symbol */
2286 {"tlsgd", 0,
2287 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2288 0,
2289 0,
2290 0},
2291
2292 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2293 {"tlsgd_lo12", 0,
2294 0,
2295 0,
2296 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2297 0},
2298
2299 /* Get to the page containing GOT TLS entry for a symbol */
2300 {"tlsdesc", 0,
2301 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE,
2302 0,
2303 0,
2304 0},
2305
2306 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2307 {"tlsdesc_lo12", 0,
2308 0,
2309 0,
2310 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2311 BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC},
2312
2313 /* Get to the page containing GOT TLS entry for a symbol */
2314 {"gottprel", 0,
2315 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2316 0,
2317 0,
2318 0},
2319
2320 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2321 {"gottprel_lo12", 0,
2322 0,
2323 0,
2324 0,
2325 BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
2326
2327 /* Get tp offset for a symbol. */
2328 {"tprel", 0,
2329 0,
2330 0,
2331 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2332 0},
2333
2334 /* Get tp offset for a symbol. */
2335 {"tprel_lo12", 0,
2336 0,
2337 0,
2338 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2339 0},
2340
2341 /* Get tp offset for a symbol. */
2342 {"tprel_hi12", 0,
2343 0,
2344 0,
2345 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2346 0},
2347
2348 /* Get tp offset for a symbol. */
2349 {"tprel_lo12_nc", 0,
2350 0,
2351 0,
2352 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2353 0},
2354
2355 /* Most significant bits 32-47 of address/value: MOVZ. */
2356 {"tprel_g2", 0,
2357 0,
2358 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2359 0,
2360 0},
2361
2362 /* Most significant bits 16-31 of address/value: MOVZ. */
2363 {"tprel_g1", 0,
2364 0,
2365 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2366 0,
2367 0},
2368
2369 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2370 {"tprel_g1_nc", 0,
2371 0,
2372 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2373 0,
2374 0},
2375
2376 /* Most significant bits 0-15 of address/value: MOVZ. */
2377 {"tprel_g0", 0,
2378 0,
2379 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2380 0,
2381 0},
2382
2383 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2384 {"tprel_g0_nc", 0,
2385 0,
2386 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2387 0,
2388 0},
2389 };
2390
2391 /* Given the address of a pointer pointing to the textual name of a
2392 relocation as may appear in assembler source, attempt to find its
2393 details in reloc_table. The pointer will be updated to the character
2394 after the trailing colon. On failure, NULL will be returned;
2395 otherwise return the reloc_table_entry. */
2396
2397 static struct reloc_table_entry *
2398 find_reloc_table_entry (char **str)
2399 {
2400 unsigned int i;
2401 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2402 {
2403 int length = strlen (reloc_table[i].name);
2404
2405 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2406 && (*str)[length] == ':')
2407 {
2408 *str += (length + 1);
2409 return &reloc_table[i];
2410 }
2411 }
2412
2413 return NULL;
2414 }
2415
2416 /* Mode argument to parse_shift and parser_shifter_operand. */
2417 enum parse_shift_mode
2418 {
2419 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2420 "#imm{,lsl #n}" */
2421 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2422 "#imm" */
2423 SHIFTED_LSL, /* bare "lsl #n" */
2424 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2425 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2426 };
2427
2428 /* Parse a <shift> operator on an AArch64 data processing instruction.
2429 Return TRUE on success; otherwise return FALSE. */
2430 static bfd_boolean
2431 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2432 {
2433 const struct aarch64_name_value_pair *shift_op;
2434 enum aarch64_modifier_kind kind;
2435 expressionS exp;
2436 int exp_has_prefix;
2437 char *s = *str;
2438 char *p = s;
2439
2440 for (p = *str; ISALPHA (*p); p++)
2441 ;
2442
2443 if (p == *str)
2444 {
2445 set_syntax_error (_("shift expression expected"));
2446 return FALSE;
2447 }
2448
2449 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2450
2451 if (shift_op == NULL)
2452 {
2453 set_syntax_error (_("shift operator expected"));
2454 return FALSE;
2455 }
2456
2457 kind = aarch64_get_operand_modifier (shift_op);
2458
2459 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2460 {
2461 set_syntax_error (_("invalid use of 'MSL'"));
2462 return FALSE;
2463 }
2464
2465 switch (mode)
2466 {
2467 case SHIFTED_LOGIC_IMM:
2468 if (aarch64_extend_operator_p (kind) == TRUE)
2469 {
2470 set_syntax_error (_("extending shift is not permitted"));
2471 return FALSE;
2472 }
2473 break;
2474
2475 case SHIFTED_ARITH_IMM:
2476 if (kind == AARCH64_MOD_ROR)
2477 {
2478 set_syntax_error (_("'ROR' shift is not permitted"));
2479 return FALSE;
2480 }
2481 break;
2482
2483 case SHIFTED_LSL:
2484 if (kind != AARCH64_MOD_LSL)
2485 {
2486 set_syntax_error (_("only 'LSL' shift is permitted"));
2487 return FALSE;
2488 }
2489 break;
2490
2491 case SHIFTED_REG_OFFSET:
2492 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2493 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2494 {
2495 set_fatal_syntax_error
2496 (_("invalid shift for the register offset addressing mode"));
2497 return FALSE;
2498 }
2499 break;
2500
2501 case SHIFTED_LSL_MSL:
2502 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2503 {
2504 set_syntax_error (_("invalid shift operator"));
2505 return FALSE;
2506 }
2507 break;
2508
2509 default:
2510 abort ();
2511 }
2512
2513 /* Whitespace can appear here if the next thing is a bare digit. */
2514 skip_whitespace (p);
2515
2516 /* Parse shift amount. */
2517 exp_has_prefix = 0;
2518 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2519 exp.X_op = O_absent;
2520 else
2521 {
2522 if (is_immediate_prefix (*p))
2523 {
2524 p++;
2525 exp_has_prefix = 1;
2526 }
2527 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2528 }
2529 if (exp.X_op == O_absent)
2530 {
2531 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2532 {
2533 set_syntax_error (_("missing shift amount"));
2534 return FALSE;
2535 }
2536 operand->shifter.amount = 0;
2537 }
2538 else if (exp.X_op != O_constant)
2539 {
2540 set_syntax_error (_("constant shift amount required"));
2541 return FALSE;
2542 }
2543 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2544 {
2545 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2546 return FALSE;
2547 }
2548 else
2549 {
2550 operand->shifter.amount = exp.X_add_number;
2551 operand->shifter.amount_present = 1;
2552 }
2553
2554 operand->shifter.operator_present = 1;
2555 operand->shifter.kind = kind;
2556
2557 *str = p;
2558 return TRUE;
2559 }
2560
2561 /* Parse a <shifter_operand> for a data processing instruction:
2562
2563 #<immediate>
2564 #<immediate>, LSL #imm
2565
2566 Validation of immediate operands is deferred to md_apply_fix.
2567
2568 Return TRUE on success; otherwise return FALSE. */
2569
2570 static bfd_boolean
2571 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2572 enum parse_shift_mode mode)
2573 {
2574 char *p;
2575
2576 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2577 return FALSE;
2578
2579 p = *str;
2580
2581 /* Accept an immediate expression. */
2582 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2583 return FALSE;
2584
2585 /* Accept optional LSL for arithmetic immediate values. */
2586 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2587 if (! parse_shift (&p, operand, SHIFTED_LSL))
2588 return FALSE;
2589
2590 /* Not accept any shifter for logical immediate values. */
2591 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2592 && parse_shift (&p, operand, mode))
2593 {
2594 set_syntax_error (_("unexpected shift operator"));
2595 return FALSE;
2596 }
2597
2598 *str = p;
2599 return TRUE;
2600 }
2601
2602 /* Parse a <shifter_operand> for a data processing instruction:
2603
2604 <Rm>
2605 <Rm>, <shift>
2606 #<immediate>
2607 #<immediate>, LSL #imm
2608
2609 where <shift> is handled by parse_shift above, and the last two
2610 cases are handled by the function above.
2611
2612 Validation of immediate operands is deferred to md_apply_fix.
2613
2614 Return TRUE on success; otherwise return FALSE. */
2615
2616 static bfd_boolean
2617 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2618 enum parse_shift_mode mode)
2619 {
2620 int reg;
2621 int isreg32, isregzero;
2622 enum aarch64_operand_class opd_class
2623 = aarch64_get_operand_class (operand->type);
2624
2625 if ((reg =
2626 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2627 {
2628 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2629 {
2630 set_syntax_error (_("unexpected register in the immediate operand"));
2631 return FALSE;
2632 }
2633
2634 if (!isregzero && reg == REG_SP)
2635 {
2636 set_syntax_error (BAD_SP);
2637 return FALSE;
2638 }
2639
2640 operand->reg.regno = reg;
2641 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2642
2643 /* Accept optional shift operation on register. */
2644 if (! skip_past_comma (str))
2645 return TRUE;
2646
2647 if (! parse_shift (str, operand, mode))
2648 return FALSE;
2649
2650 return TRUE;
2651 }
2652 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2653 {
2654 set_syntax_error
2655 (_("integer register expected in the extended/shifted operand "
2656 "register"));
2657 return FALSE;
2658 }
2659
2660 /* We have a shifted immediate variable. */
2661 return parse_shifter_operand_imm (str, operand, mode);
2662 }
2663
2664 /* Return TRUE on success; return FALSE otherwise. */
2665
2666 static bfd_boolean
2667 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2668 enum parse_shift_mode mode)
2669 {
2670 char *p = *str;
2671
2672 /* Determine if we have the sequence of characters #: or just :
2673 coming next. If we do, then we check for a :rello: relocation
2674 modifier. If we don't, punt the whole lot to
2675 parse_shifter_operand. */
2676
2677 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2678 {
2679 struct reloc_table_entry *entry;
2680
2681 if (p[0] == '#')
2682 p += 2;
2683 else
2684 p++;
2685 *str = p;
2686
2687 /* Try to parse a relocation. Anything else is an error. */
2688 if (!(entry = find_reloc_table_entry (str)))
2689 {
2690 set_syntax_error (_("unknown relocation modifier"));
2691 return FALSE;
2692 }
2693
2694 if (entry->add_type == 0)
2695 {
2696 set_syntax_error
2697 (_("this relocation modifier is not allowed on this instruction"));
2698 return FALSE;
2699 }
2700
2701 /* Save str before we decompose it. */
2702 p = *str;
2703
2704 /* Next, we parse the expression. */
2705 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2706 return FALSE;
2707
2708 /* Record the relocation type (use the ADD variant here). */
2709 inst.reloc.type = entry->add_type;
2710 inst.reloc.pc_rel = entry->pc_rel;
2711
2712 /* If str is empty, we've reached the end, stop here. */
2713 if (**str == '\0')
2714 return TRUE;
2715
2716 /* Otherwise, we have a shifted reloc modifier, so rewind to
2717 recover the variable name and continue parsing for the shifter. */
2718 *str = p;
2719 return parse_shifter_operand_imm (str, operand, mode);
2720 }
2721
2722 return parse_shifter_operand (str, operand, mode);
2723 }
2724
2725 /* Parse all forms of an address expression. Information is written
2726 to *OPERAND and/or inst.reloc.
2727
2728 The A64 instruction set has the following addressing modes:
2729
2730 Offset
2731 [base] // in SIMD ld/st structure
2732 [base{,#0}] // in ld/st exclusive
2733 [base{,#imm}]
2734 [base,Xm{,LSL #imm}]
2735 [base,Xm,SXTX {#imm}]
2736 [base,Wm,(S|U)XTW {#imm}]
2737 Pre-indexed
2738 [base,#imm]!
2739 Post-indexed
2740 [base],#imm
2741 [base],Xm // in SIMD ld/st structure
2742 PC-relative (literal)
2743 label
2744 =immediate
2745
2746 (As a convenience, the notation "=immediate" is permitted in conjunction
2747 with the pc-relative literal load instructions to automatically place an
2748 immediate value or symbolic address in a nearby literal pool and generate
2749 a hidden label which references it.)
2750
2751 Upon a successful parsing, the address structure in *OPERAND will be
2752 filled in the following way:
2753
2754 .base_regno = <base>
2755 .offset.is_reg // 1 if the offset is a register
2756 .offset.imm = <imm>
2757 .offset.regno = <Rm>
2758
2759 For different addressing modes defined in the A64 ISA:
2760
2761 Offset
2762 .pcrel=0; .preind=1; .postind=0; .writeback=0
2763 Pre-indexed
2764 .pcrel=0; .preind=1; .postind=0; .writeback=1
2765 Post-indexed
2766 .pcrel=0; .preind=0; .postind=1; .writeback=1
2767 PC-relative (literal)
2768 .pcrel=1; .preind=1; .postind=0; .writeback=0
2769
2770 The shift/extension information, if any, will be stored in .shifter.
2771
2772 It is the caller's responsibility to check for addressing modes not
2773 supported by the instruction, and to set inst.reloc.type. */
2774
2775 static bfd_boolean
2776 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2777 int accept_reg_post_index)
2778 {
2779 char *p = *str;
2780 int reg;
2781 int isreg32, isregzero;
2782 expressionS *exp = &inst.reloc.exp;
2783
2784 if (! skip_past_char (&p, '['))
2785 {
2786 /* =immediate or label. */
2787 operand->addr.pcrel = 1;
2788 operand->addr.preind = 1;
2789
2790 /* #:<reloc_op>:<symbol> */
2791 skip_past_char (&p, '#');
2792 if (reloc && skip_past_char (&p, ':'))
2793 {
2794 struct reloc_table_entry *entry;
2795
2796 /* Try to parse a relocation modifier. Anything else is
2797 an error. */
2798 entry = find_reloc_table_entry (&p);
2799 if (! entry)
2800 {
2801 set_syntax_error (_("unknown relocation modifier"));
2802 return FALSE;
2803 }
2804
2805 if (entry->ldst_type == 0)
2806 {
2807 set_syntax_error
2808 (_("this relocation modifier is not allowed on this "
2809 "instruction"));
2810 return FALSE;
2811 }
2812
2813 /* #:<reloc_op>: */
2814 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2815 {
2816 set_syntax_error (_("invalid relocation expression"));
2817 return FALSE;
2818 }
2819
2820 /* #:<reloc_op>:<expr> */
2821 /* Record the load/store relocation type. */
2822 inst.reloc.type = entry->ldst_type;
2823 inst.reloc.pc_rel = entry->pc_rel;
2824 }
2825 else
2826 {
2827
2828 if (skip_past_char (&p, '='))
2829 /* =immediate; need to generate the literal in the literal pool. */
2830 inst.gen_lit_pool = 1;
2831
2832 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2833 {
2834 set_syntax_error (_("invalid address"));
2835 return FALSE;
2836 }
2837 }
2838
2839 *str = p;
2840 return TRUE;
2841 }
2842
2843 /* [ */
2844
2845 /* Accept SP and reject ZR */
2846 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2847 if (reg == PARSE_FAIL || isreg32)
2848 {
2849 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2850 return FALSE;
2851 }
2852 operand->addr.base_regno = reg;
2853
2854 /* [Xn */
2855 if (skip_past_comma (&p))
2856 {
2857 /* [Xn, */
2858 operand->addr.preind = 1;
2859
2860 /* Reject SP and accept ZR */
2861 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
2862 if (reg != PARSE_FAIL)
2863 {
2864 /* [Xn,Rm */
2865 operand->addr.offset.regno = reg;
2866 operand->addr.offset.is_reg = 1;
2867 /* Shifted index. */
2868 if (skip_past_comma (&p))
2869 {
2870 /* [Xn,Rm, */
2871 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
2872 /* Use the diagnostics set in parse_shift, so not set new
2873 error message here. */
2874 return FALSE;
2875 }
2876 /* We only accept:
2877 [base,Xm{,LSL #imm}]
2878 [base,Xm,SXTX {#imm}]
2879 [base,Wm,(S|U)XTW {#imm}] */
2880 if (operand->shifter.kind == AARCH64_MOD_NONE
2881 || operand->shifter.kind == AARCH64_MOD_LSL
2882 || operand->shifter.kind == AARCH64_MOD_SXTX)
2883 {
2884 if (isreg32)
2885 {
2886 set_syntax_error (_("invalid use of 32-bit register offset"));
2887 return FALSE;
2888 }
2889 }
2890 else if (!isreg32)
2891 {
2892 set_syntax_error (_("invalid use of 64-bit register offset"));
2893 return FALSE;
2894 }
2895 }
2896 else
2897 {
2898 /* [Xn,#:<reloc_op>:<symbol> */
2899 skip_past_char (&p, '#');
2900 if (reloc && skip_past_char (&p, ':'))
2901 {
2902 struct reloc_table_entry *entry;
2903
2904 /* Try to parse a relocation modifier. Anything else is
2905 an error. */
2906 if (!(entry = find_reloc_table_entry (&p)))
2907 {
2908 set_syntax_error (_("unknown relocation modifier"));
2909 return FALSE;
2910 }
2911
2912 if (entry->ldst_type == 0)
2913 {
2914 set_syntax_error
2915 (_("this relocation modifier is not allowed on this "
2916 "instruction"));
2917 return FALSE;
2918 }
2919
2920 /* [Xn,#:<reloc_op>: */
2921 /* We now have the group relocation table entry corresponding to
2922 the name in the assembler source. Next, we parse the
2923 expression. */
2924 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2925 {
2926 set_syntax_error (_("invalid relocation expression"));
2927 return FALSE;
2928 }
2929
2930 /* [Xn,#:<reloc_op>:<expr> */
2931 /* Record the load/store relocation type. */
2932 inst.reloc.type = entry->ldst_type;
2933 inst.reloc.pc_rel = entry->pc_rel;
2934 }
2935 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
2936 {
2937 set_syntax_error (_("invalid expression in the address"));
2938 return FALSE;
2939 }
2940 /* [Xn,<expr> */
2941 }
2942 }
2943
2944 if (! skip_past_char (&p, ']'))
2945 {
2946 set_syntax_error (_("']' expected"));
2947 return FALSE;
2948 }
2949
2950 if (skip_past_char (&p, '!'))
2951 {
2952 if (operand->addr.preind && operand->addr.offset.is_reg)
2953 {
2954 set_syntax_error (_("register offset not allowed in pre-indexed "
2955 "addressing mode"));
2956 return FALSE;
2957 }
2958 /* [Xn]! */
2959 operand->addr.writeback = 1;
2960 }
2961 else if (skip_past_comma (&p))
2962 {
2963 /* [Xn], */
2964 operand->addr.postind = 1;
2965 operand->addr.writeback = 1;
2966
2967 if (operand->addr.preind)
2968 {
2969 set_syntax_error (_("cannot combine pre- and post-indexing"));
2970 return FALSE;
2971 }
2972
2973 if (accept_reg_post_index
2974 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
2975 &isregzero)) != PARSE_FAIL)
2976 {
2977 /* [Xn],Xm */
2978 if (isreg32)
2979 {
2980 set_syntax_error (_("invalid 32-bit register offset"));
2981 return FALSE;
2982 }
2983 operand->addr.offset.regno = reg;
2984 operand->addr.offset.is_reg = 1;
2985 }
2986 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
2987 {
2988 /* [Xn],#expr */
2989 set_syntax_error (_("invalid expression in the address"));
2990 return FALSE;
2991 }
2992 }
2993
2994 /* If at this point neither .preind nor .postind is set, we have a
2995 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
2996 if (operand->addr.preind == 0 && operand->addr.postind == 0)
2997 {
2998 if (operand->addr.writeback)
2999 {
3000 /* Reject [Rn]! */
3001 set_syntax_error (_("missing offset in the pre-indexed address"));
3002 return FALSE;
3003 }
3004 operand->addr.preind = 1;
3005 inst.reloc.exp.X_op = O_constant;
3006 inst.reloc.exp.X_add_number = 0;
3007 }
3008
3009 *str = p;
3010 return TRUE;
3011 }
3012
3013 /* Return TRUE on success; otherwise return FALSE. */
3014 static bfd_boolean
3015 parse_address (char **str, aarch64_opnd_info *operand,
3016 int accept_reg_post_index)
3017 {
3018 return parse_address_main (str, operand, 0, accept_reg_post_index);
3019 }
3020
3021 /* Return TRUE on success; otherwise return FALSE. */
3022 static bfd_boolean
3023 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3024 {
3025 return parse_address_main (str, operand, 1, 0);
3026 }
3027
3028 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3029 Return TRUE on success; otherwise return FALSE. */
3030 static bfd_boolean
3031 parse_half (char **str, int *internal_fixup_p)
3032 {
3033 char *p, *saved;
3034 int dummy;
3035
3036 p = *str;
3037 skip_past_char (&p, '#');
3038
3039 gas_assert (internal_fixup_p);
3040 *internal_fixup_p = 0;
3041
3042 if (*p == ':')
3043 {
3044 struct reloc_table_entry *entry;
3045
3046 /* Try to parse a relocation. Anything else is an error. */
3047 ++p;
3048 if (!(entry = find_reloc_table_entry (&p)))
3049 {
3050 set_syntax_error (_("unknown relocation modifier"));
3051 return FALSE;
3052 }
3053
3054 if (entry->movw_type == 0)
3055 {
3056 set_syntax_error
3057 (_("this relocation modifier is not allowed on this instruction"));
3058 return FALSE;
3059 }
3060
3061 inst.reloc.type = entry->movw_type;
3062 }
3063 else
3064 *internal_fixup_p = 1;
3065
3066 /* Avoid parsing a register as a general symbol. */
3067 saved = p;
3068 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3069 return FALSE;
3070 p = saved;
3071
3072 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3073 return FALSE;
3074
3075 *str = p;
3076 return TRUE;
3077 }
3078
3079 /* Parse an operand for an ADRP instruction:
3080 ADRP <Xd>, <label>
3081 Return TRUE on success; otherwise return FALSE. */
3082
3083 static bfd_boolean
3084 parse_adrp (char **str)
3085 {
3086 char *p;
3087
3088 p = *str;
3089 if (*p == ':')
3090 {
3091 struct reloc_table_entry *entry;
3092
3093 /* Try to parse a relocation. Anything else is an error. */
3094 ++p;
3095 if (!(entry = find_reloc_table_entry (&p)))
3096 {
3097 set_syntax_error (_("unknown relocation modifier"));
3098 return FALSE;
3099 }
3100
3101 if (entry->adrp_type == 0)
3102 {
3103 set_syntax_error
3104 (_("this relocation modifier is not allowed on this instruction"));
3105 return FALSE;
3106 }
3107
3108 inst.reloc.type = entry->adrp_type;
3109 }
3110 else
3111 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3112
3113 inst.reloc.pc_rel = 1;
3114
3115 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3116 return FALSE;
3117
3118 *str = p;
3119 return TRUE;
3120 }
3121
3122 /* Miscellaneous. */
3123
3124 /* Parse an option for a preload instruction. Returns the encoding for the
3125 option, or PARSE_FAIL. */
3126
3127 static int
3128 parse_pldop (char **str)
3129 {
3130 char *p, *q;
3131 const struct aarch64_name_value_pair *o;
3132
3133 p = q = *str;
3134 while (ISALNUM (*q))
3135 q++;
3136
3137 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3138 if (!o)
3139 return PARSE_FAIL;
3140
3141 *str = q;
3142 return o->value;
3143 }
3144
3145 /* Parse an option for a barrier instruction. Returns the encoding for the
3146 option, or PARSE_FAIL. */
3147
3148 static int
3149 parse_barrier (char **str)
3150 {
3151 char *p, *q;
3152 const asm_barrier_opt *o;
3153
3154 p = q = *str;
3155 while (ISALPHA (*q))
3156 q++;
3157
3158 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3159 if (!o)
3160 return PARSE_FAIL;
3161
3162 *str = q;
3163 return o->value;
3164 }
3165
3166 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3167 Returns the encoding for the option, or PARSE_FAIL.
3168
3169 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3170 implementation defined system register name S3_<op1>_<Cn>_<Cm>_<op2>. */
3171
3172 static int
3173 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3174 {
3175 char *p, *q;
3176 char buf[32];
3177 const struct aarch64_name_value_pair *o;
3178 int value;
3179
3180 p = buf;
3181 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3182 if (p < buf + 31)
3183 *p++ = TOLOWER (*q);
3184 *p = '\0';
3185 /* Assert that BUF be large enough. */
3186 gas_assert (p - buf == q - *str);
3187
3188 o = hash_find (sys_regs, buf);
3189 if (!o)
3190 {
3191 if (!imple_defined_p)
3192 return PARSE_FAIL;
3193 else
3194 {
3195 /* Parse S3_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
3196 registers. */
3197 unsigned int op0, op1, cn, cm, op2;
3198 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
3199 return PARSE_FAIL;
3200 /* Register access is encoded as follows:
3201 op0 op1 CRn CRm op2
3202 11 xxx 1x11 xxxx xxx. */
3203 if (op0 != 3 || op1 > 7 || (cn | 0x4) != 0xf || cm > 15 || op2 > 7)
3204 return PARSE_FAIL;
3205 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3206 }
3207 }
3208 else
3209 value = o->value;
3210
3211 *str = q;
3212 return value;
3213 }
3214
3215 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3216 for the option, or NULL. */
3217
3218 static const aarch64_sys_ins_reg *
3219 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3220 {
3221 char *p, *q;
3222 char buf[32];
3223 const aarch64_sys_ins_reg *o;
3224
3225 p = buf;
3226 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3227 if (p < buf + 31)
3228 *p++ = TOLOWER (*q);
3229 *p = '\0';
3230
3231 o = hash_find (sys_ins_regs, buf);
3232 if (!o)
3233 return NULL;
3234
3235 *str = q;
3236 return o;
3237 }
3238 \f
3239 #define po_char_or_fail(chr) do { \
3240 if (! skip_past_char (&str, chr)) \
3241 goto failure; \
3242 } while (0)
3243
3244 #define po_reg_or_fail(regtype) do { \
3245 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3246 if (val == PARSE_FAIL) \
3247 { \
3248 set_default_error (); \
3249 goto failure; \
3250 } \
3251 } while (0)
3252
3253 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3254 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3255 &isreg32, &isregzero); \
3256 if (val == PARSE_FAIL) \
3257 { \
3258 set_default_error (); \
3259 goto failure; \
3260 } \
3261 info->reg.regno = val; \
3262 if (isreg32) \
3263 info->qualifier = AARCH64_OPND_QLF_W; \
3264 else \
3265 info->qualifier = AARCH64_OPND_QLF_X; \
3266 } while (0)
3267
3268 #define po_imm_nc_or_fail() do { \
3269 if (! parse_constant_immediate (&str, &val)) \
3270 goto failure; \
3271 } while (0)
3272
3273 #define po_imm_or_fail(min, max) do { \
3274 if (! parse_constant_immediate (&str, &val)) \
3275 goto failure; \
3276 if (val < min || val > max) \
3277 { \
3278 set_fatal_syntax_error (_("immediate value out of range "\
3279 #min " to "#max)); \
3280 goto failure; \
3281 } \
3282 } while (0)
3283
3284 #define po_misc_or_fail(expr) do { \
3285 if (!expr) \
3286 goto failure; \
3287 } while (0)
3288 \f
3289 /* encode the 12-bit imm field of Add/sub immediate */
3290 static inline uint32_t
3291 encode_addsub_imm (uint32_t imm)
3292 {
3293 return imm << 10;
3294 }
3295
3296 /* encode the shift amount field of Add/sub immediate */
3297 static inline uint32_t
3298 encode_addsub_imm_shift_amount (uint32_t cnt)
3299 {
3300 return cnt << 22;
3301 }
3302
3303
3304 /* encode the imm field of Adr instruction */
3305 static inline uint32_t
3306 encode_adr_imm (uint32_t imm)
3307 {
3308 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3309 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3310 }
3311
3312 /* encode the immediate field of Move wide immediate */
3313 static inline uint32_t
3314 encode_movw_imm (uint32_t imm)
3315 {
3316 return imm << 5;
3317 }
3318
3319 /* encode the 26-bit offset of unconditional branch */
3320 static inline uint32_t
3321 encode_branch_ofs_26 (uint32_t ofs)
3322 {
3323 return ofs & ((1 << 26) - 1);
3324 }
3325
3326 /* encode the 19-bit offset of conditional branch and compare & branch */
3327 static inline uint32_t
3328 encode_cond_branch_ofs_19 (uint32_t ofs)
3329 {
3330 return (ofs & ((1 << 19) - 1)) << 5;
3331 }
3332
3333 /* encode the 19-bit offset of ld literal */
3334 static inline uint32_t
3335 encode_ld_lit_ofs_19 (uint32_t ofs)
3336 {
3337 return (ofs & ((1 << 19) - 1)) << 5;
3338 }
3339
3340 /* Encode the 14-bit offset of test & branch. */
3341 static inline uint32_t
3342 encode_tst_branch_ofs_14 (uint32_t ofs)
3343 {
3344 return (ofs & ((1 << 14) - 1)) << 5;
3345 }
3346
3347 /* Encode the 16-bit imm field of svc/hvc/smc. */
3348 static inline uint32_t
3349 encode_svc_imm (uint32_t imm)
3350 {
3351 return imm << 5;
3352 }
3353
3354 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3355 static inline uint32_t
3356 reencode_addsub_switch_add_sub (uint32_t opcode)
3357 {
3358 return opcode ^ (1 << 30);
3359 }
3360
3361 static inline uint32_t
3362 reencode_movzn_to_movz (uint32_t opcode)
3363 {
3364 return opcode | (1 << 30);
3365 }
3366
3367 static inline uint32_t
3368 reencode_movzn_to_movn (uint32_t opcode)
3369 {
3370 return opcode & ~(1 << 30);
3371 }
3372
3373 /* Overall per-instruction processing. */
3374
3375 /* We need to be able to fix up arbitrary expressions in some statements.
3376 This is so that we can handle symbols that are an arbitrary distance from
3377 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3378 which returns part of an address in a form which will be valid for
3379 a data instruction. We do this by pushing the expression into a symbol
3380 in the expr_section, and creating a fix for that. */
3381
3382 static fixS *
3383 fix_new_aarch64 (fragS * frag,
3384 int where,
3385 short int size, expressionS * exp, int pc_rel, int reloc)
3386 {
3387 fixS *new_fix;
3388
3389 switch (exp->X_op)
3390 {
3391 case O_constant:
3392 case O_symbol:
3393 case O_add:
3394 case O_subtract:
3395 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3396 break;
3397
3398 default:
3399 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3400 pc_rel, reloc);
3401 break;
3402 }
3403 return new_fix;
3404 }
3405 \f
3406 /* Diagnostics on operands errors. */
3407
3408 /* By default, output one-line error message only.
3409 Enable the verbose error message by -merror-verbose. */
3410 static int verbose_error_p = 0;
3411
3412 #ifdef DEBUG_AARCH64
3413 /* N.B. this is only for the purpose of debugging. */
3414 const char* operand_mismatch_kind_names[] =
3415 {
3416 "AARCH64_OPDE_NIL",
3417 "AARCH64_OPDE_RECOVERABLE",
3418 "AARCH64_OPDE_SYNTAX_ERROR",
3419 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3420 "AARCH64_OPDE_INVALID_VARIANT",
3421 "AARCH64_OPDE_OUT_OF_RANGE",
3422 "AARCH64_OPDE_UNALIGNED",
3423 "AARCH64_OPDE_REG_LIST",
3424 "AARCH64_OPDE_OTHER_ERROR",
3425 };
3426 #endif /* DEBUG_AARCH64 */
3427
3428 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3429
3430 When multiple errors of different kinds are found in the same assembly
3431 line, only the error of the highest severity will be picked up for
3432 issuing the diagnostics. */
3433
3434 static inline bfd_boolean
3435 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3436 enum aarch64_operand_error_kind rhs)
3437 {
3438 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3439 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3440 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3441 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3442 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3443 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3444 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3445 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3446 return lhs > rhs;
3447 }
3448
3449 /* Helper routine to get the mnemonic name from the assembly instruction
3450 line; should only be called for the diagnosis purpose, as there is
3451 string copy operation involved, which may affect the runtime
3452 performance if used in elsewhere. */
3453
3454 static const char*
3455 get_mnemonic_name (const char *str)
3456 {
3457 static char mnemonic[32];
3458 char *ptr;
3459
3460 /* Get the first 15 bytes and assume that the full name is included. */
3461 strncpy (mnemonic, str, 31);
3462 mnemonic[31] = '\0';
3463
3464 /* Scan up to the end of the mnemonic, which must end in white space,
3465 '.', or end of string. */
3466 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3467 ;
3468
3469 *ptr = '\0';
3470
3471 /* Append '...' to the truncated long name. */
3472 if (ptr - mnemonic == 31)
3473 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3474
3475 return mnemonic;
3476 }
3477
3478 static void
3479 reset_aarch64_instruction (aarch64_instruction *instruction)
3480 {
3481 memset (instruction, '\0', sizeof (aarch64_instruction));
3482 instruction->reloc.type = BFD_RELOC_UNUSED;
3483 }
3484
3485 /* Data strutures storing one user error in the assembly code related to
3486 operands. */
3487
3488 struct operand_error_record
3489 {
3490 const aarch64_opcode *opcode;
3491 aarch64_operand_error detail;
3492 struct operand_error_record *next;
3493 };
3494
3495 typedef struct operand_error_record operand_error_record;
3496
3497 struct operand_errors
3498 {
3499 operand_error_record *head;
3500 operand_error_record *tail;
3501 };
3502
3503 typedef struct operand_errors operand_errors;
3504
3505 /* Top-level data structure reporting user errors for the current line of
3506 the assembly code.
3507 The way md_assemble works is that all opcodes sharing the same mnemonic
3508 name are iterated to find a match to the assembly line. In this data
3509 structure, each of the such opcodes will have one operand_error_record
3510 allocated and inserted. In other words, excessive errors related with
3511 a single opcode are disregarded. */
3512 operand_errors operand_error_report;
3513
3514 /* Free record nodes. */
3515 static operand_error_record *free_opnd_error_record_nodes = NULL;
3516
3517 /* Initialize the data structure that stores the operand mismatch
3518 information on assembling one line of the assembly code. */
3519 static void
3520 init_operand_error_report (void)
3521 {
3522 if (operand_error_report.head != NULL)
3523 {
3524 gas_assert (operand_error_report.tail != NULL);
3525 operand_error_report.tail->next = free_opnd_error_record_nodes;
3526 free_opnd_error_record_nodes = operand_error_report.head;
3527 operand_error_report.head = NULL;
3528 operand_error_report.tail = NULL;
3529 return;
3530 }
3531 gas_assert (operand_error_report.tail == NULL);
3532 }
3533
3534 /* Return TRUE if some operand error has been recorded during the
3535 parsing of the current assembly line using the opcode *OPCODE;
3536 otherwise return FALSE. */
3537 static inline bfd_boolean
3538 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3539 {
3540 operand_error_record *record = operand_error_report.head;
3541 return record && record->opcode == opcode;
3542 }
3543
3544 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3545 OPCODE field is initialized with OPCODE.
3546 N.B. only one record for each opcode, i.e. the maximum of one error is
3547 recorded for each instruction template. */
3548
3549 static void
3550 add_operand_error_record (const operand_error_record* new_record)
3551 {
3552 const aarch64_opcode *opcode = new_record->opcode;
3553 operand_error_record* record = operand_error_report.head;
3554
3555 /* The record may have been created for this opcode. If not, we need
3556 to prepare one. */
3557 if (! opcode_has_operand_error_p (opcode))
3558 {
3559 /* Get one empty record. */
3560 if (free_opnd_error_record_nodes == NULL)
3561 {
3562 record = xmalloc (sizeof (operand_error_record));
3563 if (record == NULL)
3564 abort ();
3565 }
3566 else
3567 {
3568 record = free_opnd_error_record_nodes;
3569 free_opnd_error_record_nodes = record->next;
3570 }
3571 record->opcode = opcode;
3572 /* Insert at the head. */
3573 record->next = operand_error_report.head;
3574 operand_error_report.head = record;
3575 if (operand_error_report.tail == NULL)
3576 operand_error_report.tail = record;
3577 }
3578 else if (record->detail.kind != AARCH64_OPDE_NIL
3579 && record->detail.index <= new_record->detail.index
3580 && operand_error_higher_severity_p (record->detail.kind,
3581 new_record->detail.kind))
3582 {
3583 /* In the case of multiple errors found on operands related with a
3584 single opcode, only record the error of the leftmost operand and
3585 only if the error is of higher severity. */
3586 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3587 " the existing error %s on operand %d",
3588 operand_mismatch_kind_names[new_record->detail.kind],
3589 new_record->detail.index,
3590 operand_mismatch_kind_names[record->detail.kind],
3591 record->detail.index);
3592 return;
3593 }
3594
3595 record->detail = new_record->detail;
3596 }
3597
3598 static inline void
3599 record_operand_error_info (const aarch64_opcode *opcode,
3600 aarch64_operand_error *error_info)
3601 {
3602 operand_error_record record;
3603 record.opcode = opcode;
3604 record.detail = *error_info;
3605 add_operand_error_record (&record);
3606 }
3607
3608 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3609 error message *ERROR, for operand IDX (count from 0). */
3610
3611 static void
3612 record_operand_error (const aarch64_opcode *opcode, int idx,
3613 enum aarch64_operand_error_kind kind,
3614 const char* error)
3615 {
3616 aarch64_operand_error info;
3617 memset(&info, 0, sizeof (info));
3618 info.index = idx;
3619 info.kind = kind;
3620 info.error = error;
3621 record_operand_error_info (opcode, &info);
3622 }
3623
3624 static void
3625 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3626 enum aarch64_operand_error_kind kind,
3627 const char* error, const int *extra_data)
3628 {
3629 aarch64_operand_error info;
3630 info.index = idx;
3631 info.kind = kind;
3632 info.error = error;
3633 info.data[0] = extra_data[0];
3634 info.data[1] = extra_data[1];
3635 info.data[2] = extra_data[2];
3636 record_operand_error_info (opcode, &info);
3637 }
3638
3639 static void
3640 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3641 const char* error, int lower_bound,
3642 int upper_bound)
3643 {
3644 int data[3] = {lower_bound, upper_bound, 0};
3645 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3646 error, data);
3647 }
3648
3649 /* Remove the operand error record for *OPCODE. */
3650 static void ATTRIBUTE_UNUSED
3651 remove_operand_error_record (const aarch64_opcode *opcode)
3652 {
3653 if (opcode_has_operand_error_p (opcode))
3654 {
3655 operand_error_record* record = operand_error_report.head;
3656 gas_assert (record != NULL && operand_error_report.tail != NULL);
3657 operand_error_report.head = record->next;
3658 record->next = free_opnd_error_record_nodes;
3659 free_opnd_error_record_nodes = record;
3660 if (operand_error_report.head == NULL)
3661 {
3662 gas_assert (operand_error_report.tail == record);
3663 operand_error_report.tail = NULL;
3664 }
3665 }
3666 }
3667
3668 /* Given the instruction in *INSTR, return the index of the best matched
3669 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3670
3671 Return -1 if there is no qualifier sequence; return the first match
3672 if there is multiple matches found. */
3673
3674 static int
3675 find_best_match (const aarch64_inst *instr,
3676 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3677 {
3678 int i, num_opnds, max_num_matched, idx;
3679
3680 num_opnds = aarch64_num_of_operands (instr->opcode);
3681 if (num_opnds == 0)
3682 {
3683 DEBUG_TRACE ("no operand");
3684 return -1;
3685 }
3686
3687 max_num_matched = 0;
3688 idx = -1;
3689
3690 /* For each pattern. */
3691 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3692 {
3693 int j, num_matched;
3694 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3695
3696 /* Most opcodes has much fewer patterns in the list. */
3697 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3698 {
3699 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3700 if (i != 0 && idx == -1)
3701 /* If nothing has been matched, return the 1st sequence. */
3702 idx = 0;
3703 break;
3704 }
3705
3706 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3707 if (*qualifiers == instr->operands[j].qualifier)
3708 ++num_matched;
3709
3710 if (num_matched > max_num_matched)
3711 {
3712 max_num_matched = num_matched;
3713 idx = i;
3714 }
3715 }
3716
3717 DEBUG_TRACE ("return with %d", idx);
3718 return idx;
3719 }
3720
3721 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3722 corresponding operands in *INSTR. */
3723
3724 static inline void
3725 assign_qualifier_sequence (aarch64_inst *instr,
3726 const aarch64_opnd_qualifier_t *qualifiers)
3727 {
3728 int i = 0;
3729 int num_opnds = aarch64_num_of_operands (instr->opcode);
3730 gas_assert (num_opnds);
3731 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3732 instr->operands[i].qualifier = *qualifiers;
3733 }
3734
3735 /* Print operands for the diagnosis purpose. */
3736
3737 static void
3738 print_operands (char *buf, const aarch64_opcode *opcode,
3739 const aarch64_opnd_info *opnds)
3740 {
3741 int i;
3742
3743 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3744 {
3745 const size_t size = 128;
3746 char str[size];
3747
3748 /* We regard the opcode operand info more, however we also look into
3749 the inst->operands to support the disassembling of the optional
3750 operand.
3751 The two operand code should be the same in all cases, apart from
3752 when the operand can be optional. */
3753 if (opcode->operands[i] == AARCH64_OPND_NIL
3754 || opnds[i].type == AARCH64_OPND_NIL)
3755 break;
3756
3757 /* Generate the operand string in STR. */
3758 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3759
3760 /* Delimiter. */
3761 if (str[0] != '\0')
3762 strcat (buf, i == 0 ? " " : ",");
3763
3764 /* Append the operand string. */
3765 strcat (buf, str);
3766 }
3767 }
3768
3769 /* Send to stderr a string as information. */
3770
3771 static void
3772 output_info (const char *format, ...)
3773 {
3774 char *file;
3775 unsigned int line;
3776 va_list args;
3777
3778 as_where (&file, &line);
3779 if (file)
3780 {
3781 if (line != 0)
3782 fprintf (stderr, "%s:%u: ", file, line);
3783 else
3784 fprintf (stderr, "%s: ", file);
3785 }
3786 fprintf (stderr, _("Info: "));
3787 va_start (args, format);
3788 vfprintf (stderr, format, args);
3789 va_end (args);
3790 (void) putc ('\n', stderr);
3791 }
3792
3793 /* Output one operand error record. */
3794
3795 static void
3796 output_operand_error_record (const operand_error_record *record, char *str)
3797 {
3798 int idx = record->detail.index;
3799 const aarch64_opcode *opcode = record->opcode;
3800 enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
3801 : AARCH64_OPND_NIL);
3802 const aarch64_operand_error *detail = &record->detail;
3803
3804 switch (detail->kind)
3805 {
3806 case AARCH64_OPDE_NIL:
3807 gas_assert (0);
3808 break;
3809
3810 case AARCH64_OPDE_SYNTAX_ERROR:
3811 case AARCH64_OPDE_RECOVERABLE:
3812 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3813 case AARCH64_OPDE_OTHER_ERROR:
3814 gas_assert (idx >= 0);
3815 /* Use the prepared error message if there is, otherwise use the
3816 operand description string to describe the error. */
3817 if (detail->error != NULL)
3818 {
3819 if (detail->index == -1)
3820 as_bad (_("%s -- `%s'"), detail->error, str);
3821 else
3822 as_bad (_("%s at operand %d -- `%s'"),
3823 detail->error, detail->index + 1, str);
3824 }
3825 else
3826 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3827 aarch64_get_operand_desc (opd_code), str);
3828 break;
3829
3830 case AARCH64_OPDE_INVALID_VARIANT:
3831 as_bad (_("operand mismatch -- `%s'"), str);
3832 if (verbose_error_p)
3833 {
3834 /* We will try to correct the erroneous instruction and also provide
3835 more information e.g. all other valid variants.
3836
3837 The string representation of the corrected instruction and other
3838 valid variants are generated by
3839
3840 1) obtaining the intermediate representation of the erroneous
3841 instruction;
3842 2) manipulating the IR, e.g. replacing the operand qualifier;
3843 3) printing out the instruction by calling the printer functions
3844 shared with the disassembler.
3845
3846 The limitation of this method is that the exact input assembly
3847 line cannot be accurately reproduced in some cases, for example an
3848 optional operand present in the actual assembly line will be
3849 omitted in the output; likewise for the optional syntax rules,
3850 e.g. the # before the immediate. Another limitation is that the
3851 assembly symbols and relocation operations in the assembly line
3852 currently cannot be printed out in the error report. Last but not
3853 least, when there is other error(s) co-exist with this error, the
3854 'corrected' instruction may be still incorrect, e.g. given
3855 'ldnp h0,h1,[x0,#6]!'
3856 this diagnosis will provide the version:
3857 'ldnp s0,s1,[x0,#6]!'
3858 which is still not right. */
3859 size_t len = strlen (get_mnemonic_name (str));
3860 int i, qlf_idx;
3861 bfd_boolean result;
3862 const size_t size = 2048;
3863 char buf[size];
3864 aarch64_inst *inst_base = &inst.base;
3865 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
3866
3867 /* Init inst. */
3868 reset_aarch64_instruction (&inst);
3869 inst_base->opcode = opcode;
3870
3871 /* Reset the error report so that there is no side effect on the
3872 following operand parsing. */
3873 init_operand_error_report ();
3874
3875 /* Fill inst. */
3876 result = parse_operands (str + len, opcode)
3877 && programmer_friendly_fixup (&inst);
3878 gas_assert (result);
3879 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
3880 NULL, NULL);
3881 gas_assert (!result);
3882
3883 /* Find the most matched qualifier sequence. */
3884 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
3885 gas_assert (qlf_idx > -1);
3886
3887 /* Assign the qualifiers. */
3888 assign_qualifier_sequence (inst_base,
3889 opcode->qualifiers_list[qlf_idx]);
3890
3891 /* Print the hint. */
3892 output_info (_(" did you mean this?"));
3893 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
3894 print_operands (buf, opcode, inst_base->operands);
3895 output_info (_(" %s"), buf);
3896
3897 /* Print out other variant(s) if there is any. */
3898 if (qlf_idx != 0 ||
3899 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
3900 output_info (_(" other valid variant(s):"));
3901
3902 /* For each pattern. */
3903 qualifiers_list = opcode->qualifiers_list;
3904 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3905 {
3906 /* Most opcodes has much fewer patterns in the list.
3907 First NIL qualifier indicates the end in the list. */
3908 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
3909 break;
3910
3911 if (i != qlf_idx)
3912 {
3913 /* Mnemonics name. */
3914 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
3915
3916 /* Assign the qualifiers. */
3917 assign_qualifier_sequence (inst_base, *qualifiers_list);
3918
3919 /* Print instruction. */
3920 print_operands (buf, opcode, inst_base->operands);
3921
3922 output_info (_(" %s"), buf);
3923 }
3924 }
3925 }
3926 break;
3927
3928 case AARCH64_OPDE_OUT_OF_RANGE:
3929 if (detail->data[0] != detail->data[1])
3930 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
3931 detail->error ? detail->error : _("immediate value"),
3932 detail->data[0], detail->data[1], detail->index + 1, str);
3933 else
3934 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
3935 detail->error ? detail->error : _("immediate value"),
3936 detail->data[0], detail->index + 1, str);
3937 break;
3938
3939 case AARCH64_OPDE_REG_LIST:
3940 if (detail->data[0] == 1)
3941 as_bad (_("invalid number of registers in the list; "
3942 "only 1 register is expected at operand %d -- `%s'"),
3943 detail->index + 1, str);
3944 else
3945 as_bad (_("invalid number of registers in the list; "
3946 "%d registers are expected at operand %d -- `%s'"),
3947 detail->data[0], detail->index + 1, str);
3948 break;
3949
3950 case AARCH64_OPDE_UNALIGNED:
3951 as_bad (_("immediate value should be a multiple of "
3952 "%d at operand %d -- `%s'"),
3953 detail->data[0], detail->index + 1, str);
3954 break;
3955
3956 default:
3957 gas_assert (0);
3958 break;
3959 }
3960 }
3961
3962 /* Process and output the error message about the operand mismatching.
3963
3964 When this function is called, the operand error information had
3965 been collected for an assembly line and there will be multiple
3966 errors in the case of mulitple instruction templates; output the
3967 error message that most closely describes the problem. */
3968
3969 static void
3970 output_operand_error_report (char *str)
3971 {
3972 int largest_error_pos;
3973 const char *msg = NULL;
3974 enum aarch64_operand_error_kind kind;
3975 operand_error_record *curr;
3976 operand_error_record *head = operand_error_report.head;
3977 operand_error_record *record = NULL;
3978
3979 /* No error to report. */
3980 if (head == NULL)
3981 return;
3982
3983 gas_assert (head != NULL && operand_error_report.tail != NULL);
3984
3985 /* Only one error. */
3986 if (head == operand_error_report.tail)
3987 {
3988 DEBUG_TRACE ("single opcode entry with error kind: %s",
3989 operand_mismatch_kind_names[head->detail.kind]);
3990 output_operand_error_record (head, str);
3991 return;
3992 }
3993
3994 /* Find the error kind of the highest severity. */
3995 DEBUG_TRACE ("multiple opcode entres with error kind");
3996 kind = AARCH64_OPDE_NIL;
3997 for (curr = head; curr != NULL; curr = curr->next)
3998 {
3999 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4000 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4001 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4002 kind = curr->detail.kind;
4003 }
4004 gas_assert (kind != AARCH64_OPDE_NIL);
4005
4006 /* Pick up one of errors of KIND to report. */
4007 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4008 for (curr = head; curr != NULL; curr = curr->next)
4009 {
4010 if (curr->detail.kind != kind)
4011 continue;
4012 /* If there are multiple errors, pick up the one with the highest
4013 mismatching operand index. In the case of multiple errors with
4014 the equally highest operand index, pick up the first one or the
4015 first one with non-NULL error message. */
4016 if (curr->detail.index > largest_error_pos
4017 || (curr->detail.index == largest_error_pos && msg == NULL
4018 && curr->detail.error != NULL))
4019 {
4020 largest_error_pos = curr->detail.index;
4021 record = curr;
4022 msg = record->detail.error;
4023 }
4024 }
4025
4026 gas_assert (largest_error_pos != -2 && record != NULL);
4027 DEBUG_TRACE ("Pick up error kind %s to report",
4028 operand_mismatch_kind_names[record->detail.kind]);
4029
4030 /* Output. */
4031 output_operand_error_record (record, str);
4032 }
4033 \f
4034 /* Write an AARCH64 instruction to buf - always little-endian. */
4035 static void
4036 put_aarch64_insn (char *buf, uint32_t insn)
4037 {
4038 unsigned char *where = (unsigned char *) buf;
4039 where[0] = insn;
4040 where[1] = insn >> 8;
4041 where[2] = insn >> 16;
4042 where[3] = insn >> 24;
4043 }
4044
4045 static uint32_t
4046 get_aarch64_insn (char *buf)
4047 {
4048 unsigned char *where = (unsigned char *) buf;
4049 uint32_t result;
4050 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4051 return result;
4052 }
4053
4054 static void
4055 output_inst (struct aarch64_inst *new_inst)
4056 {
4057 char *to = NULL;
4058
4059 to = frag_more (INSN_SIZE);
4060
4061 frag_now->tc_frag_data.recorded = 1;
4062
4063 put_aarch64_insn (to, inst.base.value);
4064
4065 if (inst.reloc.type != BFD_RELOC_UNUSED)
4066 {
4067 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4068 INSN_SIZE, &inst.reloc.exp,
4069 inst.reloc.pc_rel,
4070 inst.reloc.type);
4071 DEBUG_TRACE ("Prepared relocation fix up");
4072 /* Don't check the addend value against the instruction size,
4073 that's the job of our code in md_apply_fix(). */
4074 fixp->fx_no_overflow = 1;
4075 if (new_inst != NULL)
4076 fixp->tc_fix_data.inst = new_inst;
4077 if (aarch64_gas_internal_fixup_p ())
4078 {
4079 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4080 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4081 fixp->fx_addnumber = inst.reloc.flags;
4082 }
4083 }
4084
4085 dwarf2_emit_insn (INSN_SIZE);
4086 }
4087
4088 /* Link together opcodes of the same name. */
4089
4090 struct templates
4091 {
4092 aarch64_opcode *opcode;
4093 struct templates *next;
4094 };
4095
4096 typedef struct templates templates;
4097
4098 static templates *
4099 lookup_mnemonic (const char *start, int len)
4100 {
4101 templates *templ = NULL;
4102
4103 templ = hash_find_n (aarch64_ops_hsh, start, len);
4104 return templ;
4105 }
4106
4107 /* Subroutine of md_assemble, responsible for looking up the primary
4108 opcode from the mnemonic the user wrote. STR points to the
4109 beginning of the mnemonic. */
4110
4111 static templates *
4112 opcode_lookup (char **str)
4113 {
4114 char *end, *base;
4115 const aarch64_cond *cond;
4116 char condname[16];
4117 int len;
4118
4119 /* Scan up to the end of the mnemonic, which must end in white space,
4120 '.', or end of string. */
4121 for (base = end = *str; is_part_of_name(*end); end++)
4122 if (*end == '.')
4123 break;
4124
4125 if (end == base)
4126 return 0;
4127
4128 inst.cond = COND_ALWAYS;
4129
4130 /* Handle a possible condition. */
4131 if (end[0] == '.')
4132 {
4133 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4134 if (cond)
4135 {
4136 inst.cond = cond->value;
4137 *str = end + 3;
4138 }
4139 else
4140 {
4141 *str = end;
4142 return 0;
4143 }
4144 }
4145 else
4146 *str = end;
4147
4148 len = end - base;
4149
4150 if (inst.cond == COND_ALWAYS)
4151 {
4152 /* Look for unaffixed mnemonic. */
4153 return lookup_mnemonic (base, len);
4154 }
4155 else if (len <= 13)
4156 {
4157 /* append ".c" to mnemonic if conditional */
4158 memcpy (condname, base, len);
4159 memcpy (condname + len, ".c", 2);
4160 base = condname;
4161 len += 2;
4162 return lookup_mnemonic (base, len);
4163 }
4164
4165 return NULL;
4166 }
4167
4168 /* Internal helper routine converting a vector neon_type_el structure
4169 *VECTYPE to a corresponding operand qualifier. */
4170
4171 static inline aarch64_opnd_qualifier_t
4172 vectype_to_qualifier (const struct neon_type_el *vectype)
4173 {
4174 /* Element size in bytes indexed by neon_el_type. */
4175 const unsigned char ele_size[5]
4176 = {1, 2, 4, 8, 16};
4177
4178 if (!vectype->defined || vectype->type == NT_invtype)
4179 goto vectype_conversion_fail;
4180
4181 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4182
4183 if (vectype->defined & NTA_HASINDEX)
4184 /* Vector element register. */
4185 return AARCH64_OPND_QLF_S_B + vectype->type;
4186 else
4187 {
4188 /* Vector register. */
4189 int reg_size = ele_size[vectype->type] * vectype->width;
4190 unsigned offset;
4191 if (reg_size != 16 && reg_size != 8)
4192 goto vectype_conversion_fail;
4193 /* The conversion is calculated based on the relation of the order of
4194 qualifiers to the vector element size and vector register size. */
4195 offset = (vectype->type == NT_q)
4196 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4197 gas_assert (offset <= 8);
4198 return AARCH64_OPND_QLF_V_8B + offset;
4199 }
4200
4201 vectype_conversion_fail:
4202 first_error (_("bad vector arrangement type"));
4203 return AARCH64_OPND_QLF_NIL;
4204 }
4205
4206 /* Process an optional operand that is found omitted from the assembly line.
4207 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4208 instruction's opcode entry while IDX is the index of this omitted operand.
4209 */
4210
4211 static void
4212 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4213 int idx, aarch64_opnd_info *operand)
4214 {
4215 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4216 gas_assert (optional_operand_p (opcode, idx));
4217 gas_assert (!operand->present);
4218
4219 switch (type)
4220 {
4221 case AARCH64_OPND_Rd:
4222 case AARCH64_OPND_Rn:
4223 case AARCH64_OPND_Rm:
4224 case AARCH64_OPND_Rt:
4225 case AARCH64_OPND_Rt2:
4226 case AARCH64_OPND_Rs:
4227 case AARCH64_OPND_Ra:
4228 case AARCH64_OPND_Rt_SYS:
4229 case AARCH64_OPND_Rd_SP:
4230 case AARCH64_OPND_Rn_SP:
4231 case AARCH64_OPND_Fd:
4232 case AARCH64_OPND_Fn:
4233 case AARCH64_OPND_Fm:
4234 case AARCH64_OPND_Fa:
4235 case AARCH64_OPND_Ft:
4236 case AARCH64_OPND_Ft2:
4237 case AARCH64_OPND_Sd:
4238 case AARCH64_OPND_Sn:
4239 case AARCH64_OPND_Sm:
4240 case AARCH64_OPND_Vd:
4241 case AARCH64_OPND_Vn:
4242 case AARCH64_OPND_Vm:
4243 case AARCH64_OPND_VdD1:
4244 case AARCH64_OPND_VnD1:
4245 operand->reg.regno = default_value;
4246 break;
4247
4248 case AARCH64_OPND_Ed:
4249 case AARCH64_OPND_En:
4250 case AARCH64_OPND_Em:
4251 operand->reglane.regno = default_value;
4252 break;
4253
4254 case AARCH64_OPND_IDX:
4255 case AARCH64_OPND_BIT_NUM:
4256 case AARCH64_OPND_IMMR:
4257 case AARCH64_OPND_IMMS:
4258 case AARCH64_OPND_SHLL_IMM:
4259 case AARCH64_OPND_IMM_VLSL:
4260 case AARCH64_OPND_IMM_VLSR:
4261 case AARCH64_OPND_CCMP_IMM:
4262 case AARCH64_OPND_FBITS:
4263 case AARCH64_OPND_UIMM4:
4264 case AARCH64_OPND_UIMM3_OP1:
4265 case AARCH64_OPND_UIMM3_OP2:
4266 case AARCH64_OPND_IMM:
4267 case AARCH64_OPND_WIDTH:
4268 case AARCH64_OPND_UIMM7:
4269 case AARCH64_OPND_NZCV:
4270 operand->imm.value = default_value;
4271 break;
4272
4273 case AARCH64_OPND_EXCEPTION:
4274 inst.reloc.type = BFD_RELOC_UNUSED;
4275 break;
4276
4277 case AARCH64_OPND_BARRIER_ISB:
4278 operand->barrier = aarch64_barrier_options + default_value;
4279
4280 default:
4281 break;
4282 }
4283 }
4284
4285 /* Process the relocation type for move wide instructions.
4286 Return TRUE on success; otherwise return FALSE. */
4287
4288 static bfd_boolean
4289 process_movw_reloc_info (void)
4290 {
4291 int is32;
4292 unsigned shift;
4293
4294 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4295
4296 if (inst.base.opcode->op == OP_MOVK)
4297 switch (inst.reloc.type)
4298 {
4299 case BFD_RELOC_AARCH64_MOVW_G0_S:
4300 case BFD_RELOC_AARCH64_MOVW_G1_S:
4301 case BFD_RELOC_AARCH64_MOVW_G2_S:
4302 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4303 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4304 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4305 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4306 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4307 set_syntax_error
4308 (_("the specified relocation type is not allowed for MOVK"));
4309 return FALSE;
4310 default:
4311 break;
4312 }
4313
4314 switch (inst.reloc.type)
4315 {
4316 case BFD_RELOC_AARCH64_MOVW_G0:
4317 case BFD_RELOC_AARCH64_MOVW_G0_S:
4318 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4319 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4320 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4321 shift = 0;
4322 break;
4323 case BFD_RELOC_AARCH64_MOVW_G1:
4324 case BFD_RELOC_AARCH64_MOVW_G1_S:
4325 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4326 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4327 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4328 shift = 16;
4329 break;
4330 case BFD_RELOC_AARCH64_MOVW_G2:
4331 case BFD_RELOC_AARCH64_MOVW_G2_S:
4332 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4333 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4334 if (is32)
4335 {
4336 set_fatal_syntax_error
4337 (_("the specified relocation type is not allowed for 32-bit "
4338 "register"));
4339 return FALSE;
4340 }
4341 shift = 32;
4342 break;
4343 case BFD_RELOC_AARCH64_MOVW_G3:
4344 if (is32)
4345 {
4346 set_fatal_syntax_error
4347 (_("the specified relocation type is not allowed for 32-bit "
4348 "register"));
4349 return FALSE;
4350 }
4351 shift = 48;
4352 break;
4353 default:
4354 /* More cases should be added when more MOVW-related relocation types
4355 are supported in GAS. */
4356 gas_assert (aarch64_gas_internal_fixup_p ());
4357 /* The shift amount should have already been set by the parser. */
4358 return TRUE;
4359 }
4360 inst.base.operands[1].shifter.amount = shift;
4361 return TRUE;
4362 }
4363
4364 /* A primitive log caculator. */
4365
4366 static inline unsigned int
4367 get_logsz (unsigned int size)
4368 {
4369 const unsigned char ls[16] =
4370 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4371 if (size > 16)
4372 {
4373 gas_assert (0);
4374 return -1;
4375 }
4376 gas_assert (ls[size - 1] != (unsigned char)-1);
4377 return ls[size - 1];
4378 }
4379
4380 /* Determine and return the real reloc type code for an instruction
4381 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4382
4383 static inline bfd_reloc_code_real_type
4384 ldst_lo12_determine_real_reloc_type (void)
4385 {
4386 int logsz;
4387 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4388 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4389
4390 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4391 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4392 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4393 BFD_RELOC_AARCH64_LDST128_LO12
4394 };
4395
4396 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4397 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4398
4399 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4400 opd1_qlf =
4401 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4402 1, opd0_qlf, 0);
4403 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4404
4405 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4406 gas_assert (logsz >= 0 && logsz <= 4);
4407
4408 return reloc_ldst_lo12[logsz];
4409 }
4410
4411 /* Check whether a register list REGINFO is valid. The registers must be
4412 numbered in increasing order (modulo 32), in increments of one or two.
4413
4414 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4415 increments of two.
4416
4417 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4418
4419 static bfd_boolean
4420 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4421 {
4422 uint32_t i, nb_regs, prev_regno, incr;
4423
4424 nb_regs = 1 + (reginfo & 0x3);
4425 reginfo >>= 2;
4426 prev_regno = reginfo & 0x1f;
4427 incr = accept_alternate ? 2 : 1;
4428
4429 for (i = 1; i < nb_regs; ++i)
4430 {
4431 uint32_t curr_regno;
4432 reginfo >>= 5;
4433 curr_regno = reginfo & 0x1f;
4434 if (curr_regno != ((prev_regno + incr) & 0x1f))
4435 return FALSE;
4436 prev_regno = curr_regno;
4437 }
4438
4439 return TRUE;
4440 }
4441
4442 /* Generic instruction operand parser. This does no encoding and no
4443 semantic validation; it merely squirrels values away in the inst
4444 structure. Returns TRUE or FALSE depending on whether the
4445 specified grammar matched. */
4446
4447 static bfd_boolean
4448 parse_operands (char *str, const aarch64_opcode *opcode)
4449 {
4450 int i;
4451 char *backtrack_pos = 0;
4452 const enum aarch64_opnd *operands = opcode->operands;
4453
4454 clear_error ();
4455 skip_whitespace (str);
4456
4457 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4458 {
4459 int64_t val;
4460 int isreg32, isregzero;
4461 int comma_skipped_p = 0;
4462 aarch64_reg_type rtype;
4463 struct neon_type_el vectype;
4464 aarch64_opnd_info *info = &inst.base.operands[i];
4465
4466 DEBUG_TRACE ("parse operand %d", i);
4467
4468 /* Assign the operand code. */
4469 info->type = operands[i];
4470
4471 if (optional_operand_p (opcode, i))
4472 {
4473 /* Remember where we are in case we need to backtrack. */
4474 gas_assert (!backtrack_pos);
4475 backtrack_pos = str;
4476 }
4477
4478 /* Expect comma between operands; the backtrack mechanizm will take
4479 care of cases of omitted optional operand. */
4480 if (i > 0 && ! skip_past_char (&str, ','))
4481 {
4482 set_syntax_error (_("comma expected between operands"));
4483 goto failure;
4484 }
4485 else
4486 comma_skipped_p = 1;
4487
4488 switch (operands[i])
4489 {
4490 case AARCH64_OPND_Rd:
4491 case AARCH64_OPND_Rn:
4492 case AARCH64_OPND_Rm:
4493 case AARCH64_OPND_Rt:
4494 case AARCH64_OPND_Rt2:
4495 case AARCH64_OPND_Rs:
4496 case AARCH64_OPND_Ra:
4497 case AARCH64_OPND_Rt_SYS:
4498 po_int_reg_or_fail (1, 0);
4499 break;
4500
4501 case AARCH64_OPND_Rd_SP:
4502 case AARCH64_OPND_Rn_SP:
4503 po_int_reg_or_fail (0, 1);
4504 break;
4505
4506 case AARCH64_OPND_Rm_EXT:
4507 case AARCH64_OPND_Rm_SFT:
4508 po_misc_or_fail (parse_shifter_operand
4509 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4510 ? SHIFTED_ARITH_IMM
4511 : SHIFTED_LOGIC_IMM)));
4512 if (!info->shifter.operator_present)
4513 {
4514 /* Default to LSL if not present. Libopcodes prefers shifter
4515 kind to be explicit. */
4516 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4517 info->shifter.kind = AARCH64_MOD_LSL;
4518 /* For Rm_EXT, libopcodes will carry out further check on whether
4519 or not stack pointer is used in the instruction (Recall that
4520 "the extend operator is not optional unless at least one of
4521 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4522 }
4523 break;
4524
4525 case AARCH64_OPND_Fd:
4526 case AARCH64_OPND_Fn:
4527 case AARCH64_OPND_Fm:
4528 case AARCH64_OPND_Fa:
4529 case AARCH64_OPND_Ft:
4530 case AARCH64_OPND_Ft2:
4531 case AARCH64_OPND_Sd:
4532 case AARCH64_OPND_Sn:
4533 case AARCH64_OPND_Sm:
4534 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4535 if (val == PARSE_FAIL)
4536 {
4537 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4538 goto failure;
4539 }
4540 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4541
4542 info->reg.regno = val;
4543 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4544 break;
4545
4546 case AARCH64_OPND_Vd:
4547 case AARCH64_OPND_Vn:
4548 case AARCH64_OPND_Vm:
4549 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4550 if (val == PARSE_FAIL)
4551 {
4552 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4553 goto failure;
4554 }
4555 if (vectype.defined & NTA_HASINDEX)
4556 goto failure;
4557
4558 info->reg.regno = val;
4559 info->qualifier = vectype_to_qualifier (&vectype);
4560 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4561 goto failure;
4562 break;
4563
4564 case AARCH64_OPND_VdD1:
4565 case AARCH64_OPND_VnD1:
4566 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4567 if (val == PARSE_FAIL)
4568 {
4569 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4570 goto failure;
4571 }
4572 if (vectype.type != NT_d || vectype.index != 1)
4573 {
4574 set_fatal_syntax_error
4575 (_("the top half of a 128-bit FP/SIMD register is expected"));
4576 goto failure;
4577 }
4578 info->reg.regno = val;
4579 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4580 here; it is correct for the purpose of encoding/decoding since
4581 only the register number is explicitly encoded in the related
4582 instructions, although this appears a bit hacky. */
4583 info->qualifier = AARCH64_OPND_QLF_S_D;
4584 break;
4585
4586 case AARCH64_OPND_Ed:
4587 case AARCH64_OPND_En:
4588 case AARCH64_OPND_Em:
4589 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4590 if (val == PARSE_FAIL)
4591 {
4592 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4593 goto failure;
4594 }
4595 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4596 goto failure;
4597
4598 info->reglane.regno = val;
4599 info->reglane.index = vectype.index;
4600 info->qualifier = vectype_to_qualifier (&vectype);
4601 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4602 goto failure;
4603 break;
4604
4605 case AARCH64_OPND_LVn:
4606 case AARCH64_OPND_LVt:
4607 case AARCH64_OPND_LVt_AL:
4608 case AARCH64_OPND_LEt:
4609 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4610 goto failure;
4611 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4612 {
4613 set_fatal_syntax_error (_("invalid register list"));
4614 goto failure;
4615 }
4616 info->reglist.first_regno = (val >> 2) & 0x1f;
4617 info->reglist.num_regs = (val & 0x3) + 1;
4618 if (operands[i] == AARCH64_OPND_LEt)
4619 {
4620 if (!(vectype.defined & NTA_HASINDEX))
4621 goto failure;
4622 info->reglist.has_index = 1;
4623 info->reglist.index = vectype.index;
4624 }
4625 else if (!(vectype.defined & NTA_HASTYPE))
4626 goto failure;
4627 info->qualifier = vectype_to_qualifier (&vectype);
4628 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4629 goto failure;
4630 break;
4631
4632 case AARCH64_OPND_Cn:
4633 case AARCH64_OPND_Cm:
4634 po_reg_or_fail (REG_TYPE_CN);
4635 if (val > 15)
4636 {
4637 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4638 goto failure;
4639 }
4640 inst.base.operands[i].reg.regno = val;
4641 break;
4642
4643 case AARCH64_OPND_SHLL_IMM:
4644 case AARCH64_OPND_IMM_VLSR:
4645 po_imm_or_fail (1, 64);
4646 info->imm.value = val;
4647 break;
4648
4649 case AARCH64_OPND_CCMP_IMM:
4650 case AARCH64_OPND_FBITS:
4651 case AARCH64_OPND_UIMM4:
4652 case AARCH64_OPND_UIMM3_OP1:
4653 case AARCH64_OPND_UIMM3_OP2:
4654 case AARCH64_OPND_IMM_VLSL:
4655 case AARCH64_OPND_IMM:
4656 case AARCH64_OPND_WIDTH:
4657 po_imm_nc_or_fail ();
4658 info->imm.value = val;
4659 break;
4660
4661 case AARCH64_OPND_UIMM7:
4662 po_imm_or_fail (0, 127);
4663 info->imm.value = val;
4664 break;
4665
4666 case AARCH64_OPND_IDX:
4667 case AARCH64_OPND_BIT_NUM:
4668 case AARCH64_OPND_IMMR:
4669 case AARCH64_OPND_IMMS:
4670 po_imm_or_fail (0, 63);
4671 info->imm.value = val;
4672 break;
4673
4674 case AARCH64_OPND_IMM0:
4675 po_imm_nc_or_fail ();
4676 if (val != 0)
4677 {
4678 set_fatal_syntax_error (_("immediate zero expected"));
4679 goto failure;
4680 }
4681 info->imm.value = 0;
4682 break;
4683
4684 case AARCH64_OPND_FPIMM0:
4685 {
4686 int qfloat;
4687 bfd_boolean res1 = FALSE, res2 = FALSE;
4688 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4689 it is probably not worth the effort to support it. */
4690 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat))
4691 && !(res2 = parse_constant_immediate (&str, &val)))
4692 goto failure;
4693 if ((res1 && qfloat == 0) || (res2 && val == 0))
4694 {
4695 info->imm.value = 0;
4696 info->imm.is_fp = 1;
4697 break;
4698 }
4699 set_fatal_syntax_error (_("immediate zero expected"));
4700 goto failure;
4701 }
4702
4703 case AARCH64_OPND_IMM_MOV:
4704 {
4705 char *saved = str;
4706 if (reg_name_p (str, REG_TYPE_R_Z_SP))
4707 goto failure;
4708 str = saved;
4709 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4710 GE_OPT_PREFIX, 1));
4711 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4712 later. fix_mov_imm_insn will try to determine a machine
4713 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4714 message if the immediate cannot be moved by a single
4715 instruction. */
4716 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4717 inst.base.operands[i].skip = 1;
4718 }
4719 break;
4720
4721 case AARCH64_OPND_SIMD_IMM:
4722 case AARCH64_OPND_SIMD_IMM_SFT:
4723 if (! parse_big_immediate (&str, &val))
4724 goto failure;
4725 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4726 /* addr_off_p */ 0,
4727 /* need_libopcodes_p */ 1,
4728 /* skip_p */ 1);
4729 /* Parse shift.
4730 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4731 shift, we don't check it here; we leave the checking to
4732 the libopcodes (operand_general_constraint_met_p). By
4733 doing this, we achieve better diagnostics. */
4734 if (skip_past_comma (&str)
4735 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4736 goto failure;
4737 if (!info->shifter.operator_present
4738 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4739 {
4740 /* Default to LSL if not present. Libopcodes prefers shifter
4741 kind to be explicit. */
4742 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4743 info->shifter.kind = AARCH64_MOD_LSL;
4744 }
4745 break;
4746
4747 case AARCH64_OPND_FPIMM:
4748 case AARCH64_OPND_SIMD_FPIMM:
4749 {
4750 int qfloat;
4751 if (! parse_aarch64_imm_float (&str, &qfloat))
4752 goto failure;
4753 if (qfloat == 0)
4754 {
4755 set_fatal_syntax_error (_("invalid floating-point constant"));
4756 goto failure;
4757 }
4758 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4759 inst.base.operands[i].imm.is_fp = 1;
4760 }
4761 break;
4762
4763 case AARCH64_OPND_LIMM:
4764 po_misc_or_fail (parse_shifter_operand (&str, info,
4765 SHIFTED_LOGIC_IMM));
4766 if (info->shifter.operator_present)
4767 {
4768 set_fatal_syntax_error
4769 (_("shift not allowed for bitmask immediate"));
4770 goto failure;
4771 }
4772 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4773 /* addr_off_p */ 0,
4774 /* need_libopcodes_p */ 1,
4775 /* skip_p */ 1);
4776 break;
4777
4778 case AARCH64_OPND_AIMM:
4779 if (opcode->op == OP_ADD)
4780 /* ADD may have relocation types. */
4781 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4782 SHIFTED_ARITH_IMM));
4783 else
4784 po_misc_or_fail (parse_shifter_operand (&str, info,
4785 SHIFTED_ARITH_IMM));
4786 switch (inst.reloc.type)
4787 {
4788 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4789 info->shifter.amount = 12;
4790 break;
4791 case BFD_RELOC_UNUSED:
4792 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4793 if (info->shifter.kind != AARCH64_MOD_NONE)
4794 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4795 inst.reloc.pc_rel = 0;
4796 break;
4797 default:
4798 break;
4799 }
4800 info->imm.value = 0;
4801 if (!info->shifter.operator_present)
4802 {
4803 /* Default to LSL if not present. Libopcodes prefers shifter
4804 kind to be explicit. */
4805 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4806 info->shifter.kind = AARCH64_MOD_LSL;
4807 }
4808 break;
4809
4810 case AARCH64_OPND_HALF:
4811 {
4812 /* #<imm16> or relocation. */
4813 int internal_fixup_p;
4814 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4815 if (internal_fixup_p)
4816 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4817 skip_whitespace (str);
4818 if (skip_past_comma (&str))
4819 {
4820 /* {, LSL #<shift>} */
4821 if (! aarch64_gas_internal_fixup_p ())
4822 {
4823 set_fatal_syntax_error (_("can't mix relocation modifier "
4824 "with explicit shift"));
4825 goto failure;
4826 }
4827 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4828 }
4829 else
4830 inst.base.operands[i].shifter.amount = 0;
4831 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4832 inst.base.operands[i].imm.value = 0;
4833 if (! process_movw_reloc_info ())
4834 goto failure;
4835 }
4836 break;
4837
4838 case AARCH64_OPND_EXCEPTION:
4839 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4840 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4841 /* addr_off_p */ 0,
4842 /* need_libopcodes_p */ 0,
4843 /* skip_p */ 1);
4844 break;
4845
4846 case AARCH64_OPND_NZCV:
4847 {
4848 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
4849 if (nzcv != NULL)
4850 {
4851 str += 4;
4852 info->imm.value = nzcv->value;
4853 break;
4854 }
4855 po_imm_or_fail (0, 15);
4856 info->imm.value = val;
4857 }
4858 break;
4859
4860 case AARCH64_OPND_COND:
4861 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
4862 str += 2;
4863 if (info->cond == NULL)
4864 {
4865 set_syntax_error (_("invalid condition"));
4866 goto failure;
4867 }
4868 break;
4869
4870 case AARCH64_OPND_ADDR_ADRP:
4871 po_misc_or_fail (parse_adrp (&str));
4872 /* Clear the value as operand needs to be relocated. */
4873 info->imm.value = 0;
4874 break;
4875
4876 case AARCH64_OPND_ADDR_PCREL14:
4877 case AARCH64_OPND_ADDR_PCREL19:
4878 case AARCH64_OPND_ADDR_PCREL21:
4879 case AARCH64_OPND_ADDR_PCREL26:
4880 po_misc_or_fail (parse_address_reloc (&str, info));
4881 if (!info->addr.pcrel)
4882 {
4883 set_syntax_error (_("invalid pc-relative address"));
4884 goto failure;
4885 }
4886 if (inst.gen_lit_pool
4887 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
4888 {
4889 /* Only permit "=value" in the literal load instructions.
4890 The literal will be generated by programmer_friendly_fixup. */
4891 set_syntax_error (_("invalid use of \"=immediate\""));
4892 goto failure;
4893 }
4894 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
4895 {
4896 set_syntax_error (_("unrecognized relocation suffix"));
4897 goto failure;
4898 }
4899 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
4900 {
4901 info->imm.value = inst.reloc.exp.X_add_number;
4902 inst.reloc.type = BFD_RELOC_UNUSED;
4903 }
4904 else
4905 {
4906 info->imm.value = 0;
4907 if (inst.reloc.type == BFD_RELOC_UNUSED)
4908 switch (opcode->iclass)
4909 {
4910 case compbranch:
4911 case condbranch:
4912 /* e.g. CBZ or B.COND */
4913 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
4914 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
4915 break;
4916 case testbranch:
4917 /* e.g. TBZ */
4918 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
4919 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
4920 break;
4921 case branch_imm:
4922 /* e.g. B or BL */
4923 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
4924 inst.reloc.type =
4925 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
4926 : BFD_RELOC_AARCH64_JUMP26;
4927 break;
4928 case loadlit:
4929 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
4930 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
4931 break;
4932 case pcreladdr:
4933 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
4934 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
4935 break;
4936 default:
4937 gas_assert (0);
4938 abort ();
4939 }
4940 inst.reloc.pc_rel = 1;
4941 }
4942 break;
4943
4944 case AARCH64_OPND_ADDR_SIMPLE:
4945 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4946 /* [<Xn|SP>{, #<simm>}] */
4947 po_char_or_fail ('[');
4948 po_reg_or_fail (REG_TYPE_R64_SP);
4949 /* Accept optional ", #0". */
4950 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
4951 && skip_past_char (&str, ','))
4952 {
4953 skip_past_char (&str, '#');
4954 if (! skip_past_char (&str, '0'))
4955 {
4956 set_fatal_syntax_error
4957 (_("the optional immediate offset can only be 0"));
4958 goto failure;
4959 }
4960 }
4961 po_char_or_fail (']');
4962 info->addr.base_regno = val;
4963 break;
4964
4965 case AARCH64_OPND_ADDR_REGOFF:
4966 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
4967 po_misc_or_fail (parse_address (&str, info, 0));
4968 if (info->addr.pcrel || !info->addr.offset.is_reg
4969 || !info->addr.preind || info->addr.postind
4970 || info->addr.writeback)
4971 {
4972 set_syntax_error (_("invalid addressing mode"));
4973 goto failure;
4974 }
4975 if (!info->shifter.operator_present)
4976 {
4977 /* Default to LSL if not present. Libopcodes prefers shifter
4978 kind to be explicit. */
4979 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4980 info->shifter.kind = AARCH64_MOD_LSL;
4981 }
4982 /* Qualifier to be deduced by libopcodes. */
4983 break;
4984
4985 case AARCH64_OPND_ADDR_SIMM7:
4986 po_misc_or_fail (parse_address (&str, info, 0));
4987 if (info->addr.pcrel || info->addr.offset.is_reg
4988 || (!info->addr.preind && !info->addr.postind))
4989 {
4990 set_syntax_error (_("invalid addressing mode"));
4991 goto failure;
4992 }
4993 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4994 /* addr_off_p */ 1,
4995 /* need_libopcodes_p */ 1,
4996 /* skip_p */ 0);
4997 break;
4998
4999 case AARCH64_OPND_ADDR_SIMM9:
5000 case AARCH64_OPND_ADDR_SIMM9_2:
5001 po_misc_or_fail (parse_address_reloc (&str, info));
5002 if (info->addr.pcrel || info->addr.offset.is_reg
5003 || (!info->addr.preind && !info->addr.postind)
5004 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5005 && info->addr.writeback))
5006 {
5007 set_syntax_error (_("invalid addressing mode"));
5008 goto failure;
5009 }
5010 if (inst.reloc.type != BFD_RELOC_UNUSED)
5011 {
5012 set_syntax_error (_("relocation not allowed"));
5013 goto failure;
5014 }
5015 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5016 /* addr_off_p */ 1,
5017 /* need_libopcodes_p */ 1,
5018 /* skip_p */ 0);
5019 break;
5020
5021 case AARCH64_OPND_ADDR_UIMM12:
5022 po_misc_or_fail (parse_address_reloc (&str, info));
5023 if (info->addr.pcrel || info->addr.offset.is_reg
5024 || !info->addr.preind || info->addr.writeback)
5025 {
5026 set_syntax_error (_("invalid addressing mode"));
5027 goto failure;
5028 }
5029 if (inst.reloc.type == BFD_RELOC_UNUSED)
5030 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5031 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5032 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5033 /* Leave qualifier to be determined by libopcodes. */
5034 break;
5035
5036 case AARCH64_OPND_SIMD_ADDR_POST:
5037 /* [<Xn|SP>], <Xm|#<amount>> */
5038 po_misc_or_fail (parse_address (&str, info, 1));
5039 if (!info->addr.postind || !info->addr.writeback)
5040 {
5041 set_syntax_error (_("invalid addressing mode"));
5042 goto failure;
5043 }
5044 if (!info->addr.offset.is_reg)
5045 {
5046 if (inst.reloc.exp.X_op == O_constant)
5047 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5048 else
5049 {
5050 set_fatal_syntax_error
5051 (_("writeback value should be an immediate constant"));
5052 goto failure;
5053 }
5054 }
5055 /* No qualifier. */
5056 break;
5057
5058 case AARCH64_OPND_SYSREG:
5059 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5060 == PARSE_FAIL)
5061 {
5062 set_syntax_error (_("unknown or missing system register name"));
5063 goto failure;
5064 }
5065 inst.base.operands[i].sysreg = val;
5066 break;
5067
5068 case AARCH64_OPND_PSTATEFIELD:
5069 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5070 == PARSE_FAIL)
5071 {
5072 set_syntax_error (_("unknown or missing PSTATE field name"));
5073 goto failure;
5074 }
5075 inst.base.operands[i].pstatefield = val;
5076 break;
5077
5078 case AARCH64_OPND_SYSREG_IC:
5079 inst.base.operands[i].sysins_op =
5080 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5081 goto sys_reg_ins;
5082 case AARCH64_OPND_SYSREG_DC:
5083 inst.base.operands[i].sysins_op =
5084 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5085 goto sys_reg_ins;
5086 case AARCH64_OPND_SYSREG_AT:
5087 inst.base.operands[i].sysins_op =
5088 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5089 goto sys_reg_ins;
5090 case AARCH64_OPND_SYSREG_TLBI:
5091 inst.base.operands[i].sysins_op =
5092 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5093 sys_reg_ins:
5094 if (inst.base.operands[i].sysins_op == NULL)
5095 {
5096 set_fatal_syntax_error ( _("unknown or missing operation name"));
5097 goto failure;
5098 }
5099 break;
5100
5101 case AARCH64_OPND_BARRIER:
5102 case AARCH64_OPND_BARRIER_ISB:
5103 val = parse_barrier (&str);
5104 if (val != PARSE_FAIL
5105 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5106 {
5107 /* ISB only accepts options name 'sy'. */
5108 set_syntax_error
5109 (_("the specified option is not accepted in ISB"));
5110 /* Turn off backtrack as this optional operand is present. */
5111 backtrack_pos = 0;
5112 goto failure;
5113 }
5114 /* This is an extension to accept a 0..15 immediate. */
5115 if (val == PARSE_FAIL)
5116 po_imm_or_fail (0, 15);
5117 info->barrier = aarch64_barrier_options + val;
5118 break;
5119
5120 case AARCH64_OPND_PRFOP:
5121 val = parse_pldop (&str);
5122 /* This is an extension to accept a 0..31 immediate. */
5123 if (val == PARSE_FAIL)
5124 po_imm_or_fail (0, 31);
5125 inst.base.operands[i].prfop = aarch64_prfops + val;
5126 break;
5127
5128 default:
5129 as_fatal (_("unhandled operand code %d"), operands[i]);
5130 }
5131
5132 /* If we get here, this operand was successfully parsed. */
5133 inst.base.operands[i].present = 1;
5134 continue;
5135
5136 failure:
5137 /* The parse routine should already have set the error, but in case
5138 not, set a default one here. */
5139 if (! error_p ())
5140 set_default_error ();
5141
5142 if (! backtrack_pos)
5143 goto parse_operands_return;
5144
5145 /* Reaching here means we are dealing with an optional operand that is
5146 omitted from the assembly line. */
5147 gas_assert (optional_operand_p (opcode, i));
5148 info->present = 0;
5149 process_omitted_operand (operands[i], opcode, i, info);
5150
5151 /* Try again, skipping the optional operand at backtrack_pos. */
5152 str = backtrack_pos;
5153 backtrack_pos = 0;
5154
5155 /* If this is the last operand that is optional and omitted, but without
5156 the presence of a comma. */
5157 if (i && comma_skipped_p && i == aarch64_num_of_operands (opcode) - 1)
5158 {
5159 set_fatal_syntax_error
5160 (_("unexpected comma before the omitted optional operand"));
5161 goto parse_operands_return;
5162 }
5163
5164 /* Clear any error record after the omitted optional operand has been
5165 successfully handled. */
5166 clear_error ();
5167 }
5168
5169 /* Check if we have parsed all the operands. */
5170 if (*str != '\0' && ! error_p ())
5171 {
5172 /* Set I to the index of the last present operand; this is
5173 for the purpose of diagnostics. */
5174 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5175 ;
5176 set_fatal_syntax_error
5177 (_("unexpected characters following instruction"));
5178 }
5179
5180 parse_operands_return:
5181
5182 if (error_p ())
5183 {
5184 DEBUG_TRACE ("parsing FAIL: %s - %s",
5185 operand_mismatch_kind_names[get_error_kind ()],
5186 get_error_message ());
5187 /* Record the operand error properly; this is useful when there
5188 are multiple instruction templates for a mnemonic name, so that
5189 later on, we can select the error that most closely describes
5190 the problem. */
5191 record_operand_error (opcode, i, get_error_kind (),
5192 get_error_message ());
5193 return FALSE;
5194 }
5195 else
5196 {
5197 DEBUG_TRACE ("parsing SUCCESS");
5198 return TRUE;
5199 }
5200 }
5201
5202 /* It does some fix-up to provide some programmer friendly feature while
5203 keeping the libopcodes happy, i.e. libopcodes only accepts
5204 the preferred architectural syntax.
5205 Return FALSE if there is any failure; otherwise return TRUE. */
5206
5207 static bfd_boolean
5208 programmer_friendly_fixup (aarch64_instruction *instr)
5209 {
5210 aarch64_inst *base = &instr->base;
5211 const aarch64_opcode *opcode = base->opcode;
5212 enum aarch64_op op = opcode->op;
5213 aarch64_opnd_info *operands = base->operands;
5214
5215 DEBUG_TRACE ("enter");
5216
5217 switch (opcode->iclass)
5218 {
5219 case testbranch:
5220 /* TBNZ Xn|Wn, #uimm6, label
5221 Test and Branch Not Zero: conditionally jumps to label if bit number
5222 uimm6 in register Xn is not zero. The bit number implies the width of
5223 the register, which may be written and should be disassembled as Wn if
5224 uimm is less than 32. */
5225 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5226 {
5227 if (operands[1].imm.value >= 32)
5228 {
5229 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5230 0, 31);
5231 return FALSE;
5232 }
5233 operands[0].qualifier = AARCH64_OPND_QLF_X;
5234 }
5235 break;
5236 case loadlit:
5237 /* LDR Wt, label | =value
5238 As a convenience assemblers will typically permit the notation
5239 "=value" in conjunction with the pc-relative literal load instructions
5240 to automatically place an immediate value or symbolic address in a
5241 nearby literal pool and generate a hidden label which references it.
5242 ISREG has been set to 0 in the case of =value. */
5243 if (instr->gen_lit_pool
5244 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5245 {
5246 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5247 if (op == OP_LDRSW_LIT)
5248 size = 4;
5249 if (instr->reloc.exp.X_op != O_constant
5250 && instr->reloc.exp.X_op != O_big
5251 && instr->reloc.exp.X_op != O_symbol)
5252 {
5253 record_operand_error (opcode, 1,
5254 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5255 _("constant expression expected"));
5256 return FALSE;
5257 }
5258 if (! add_to_lit_pool (&instr->reloc.exp, size))
5259 {
5260 record_operand_error (opcode, 1,
5261 AARCH64_OPDE_OTHER_ERROR,
5262 _("literal pool insertion failed"));
5263 return FALSE;
5264 }
5265 }
5266 break;
5267 case log_shift:
5268 case bitfield:
5269 /* UXT[BHW] Wd, Wn
5270 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5271 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5272 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5273 A programmer-friendly assembler should accept a destination Xd in
5274 place of Wd, however that is not the preferred form for disassembly.
5275 */
5276 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5277 && operands[1].qualifier == AARCH64_OPND_QLF_W
5278 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5279 operands[0].qualifier = AARCH64_OPND_QLF_W;
5280 break;
5281
5282 case addsub_ext:
5283 {
5284 /* In the 64-bit form, the final register operand is written as Wm
5285 for all but the (possibly omitted) UXTX/LSL and SXTX
5286 operators.
5287 As a programmer-friendly assembler, we accept e.g.
5288 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5289 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5290 int idx = aarch64_operand_index (opcode->operands,
5291 AARCH64_OPND_Rm_EXT);
5292 gas_assert (idx == 1 || idx == 2);
5293 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5294 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5295 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5296 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5297 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5298 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5299 }
5300 break;
5301
5302 default:
5303 break;
5304 }
5305
5306 DEBUG_TRACE ("exit with SUCCESS");
5307 return TRUE;
5308 }
5309
5310 /* A wrapper function to interface with libopcodes on encoding and
5311 record the error message if there is any.
5312
5313 Return TRUE on success; otherwise return FALSE. */
5314
5315 static bfd_boolean
5316 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5317 aarch64_insn *code)
5318 {
5319 aarch64_operand_error error_info;
5320 error_info.kind = AARCH64_OPDE_NIL;
5321 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5322 return TRUE;
5323 else
5324 {
5325 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5326 record_operand_error_info (opcode, &error_info);
5327 return FALSE;
5328 }
5329 }
5330
5331 #ifdef DEBUG_AARCH64
5332 static inline void
5333 dump_opcode_operands (const aarch64_opcode *opcode)
5334 {
5335 int i = 0;
5336 while (opcode->operands[i] != AARCH64_OPND_NIL)
5337 {
5338 aarch64_verbose ("\t\t opnd%d: %s", i,
5339 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5340 ? aarch64_get_operand_name (opcode->operands[i])
5341 : aarch64_get_operand_desc (opcode->operands[i]));
5342 ++i;
5343 }
5344 }
5345 #endif /* DEBUG_AARCH64 */
5346
5347 /* This is the guts of the machine-dependent assembler. STR points to a
5348 machine dependent instruction. This function is supposed to emit
5349 the frags/bytes it assembles to. */
5350
5351 void
5352 md_assemble (char *str)
5353 {
5354 char *p = str;
5355 templates *template;
5356 aarch64_opcode *opcode;
5357 aarch64_inst *inst_base;
5358 unsigned saved_cond;
5359
5360 /* Align the previous label if needed. */
5361 if (last_label_seen != NULL)
5362 {
5363 symbol_set_frag (last_label_seen, frag_now);
5364 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5365 S_SET_SEGMENT (last_label_seen, now_seg);
5366 }
5367
5368 inst.reloc.type = BFD_RELOC_UNUSED;
5369
5370 DEBUG_TRACE ("\n\n");
5371 DEBUG_TRACE ("==============================");
5372 DEBUG_TRACE ("Enter md_assemble with %s", str);
5373
5374 template = opcode_lookup (&p);
5375 if (!template)
5376 {
5377 /* It wasn't an instruction, but it might be a register alias of
5378 the form alias .req reg directive. */
5379 if (!create_register_alias (str, p))
5380 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5381 str);
5382 return;
5383 }
5384
5385 skip_whitespace (p);
5386 if (*p == ',')
5387 {
5388 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5389 get_mnemonic_name (str), str);
5390 return;
5391 }
5392
5393 init_operand_error_report ();
5394
5395 saved_cond = inst.cond;
5396 reset_aarch64_instruction (&inst);
5397 inst.cond = saved_cond;
5398
5399 /* Iterate through all opcode entries with the same mnemonic name. */
5400 do
5401 {
5402 opcode = template->opcode;
5403
5404 DEBUG_TRACE ("opcode %s found", opcode->name);
5405 #ifdef DEBUG_AARCH64
5406 if (debug_dump)
5407 dump_opcode_operands (opcode);
5408 #endif /* DEBUG_AARCH64 */
5409
5410 /* Check that this instruction is supported for this CPU. */
5411 if (!opcode->avariant
5412 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5413 {
5414 as_bad (_("selected processor does not support `%s'"), str);
5415 return;
5416 }
5417
5418 mapping_state (MAP_INSN);
5419
5420 inst_base = &inst.base;
5421 inst_base->opcode = opcode;
5422
5423 /* Truly conditionally executed instructions, e.g. b.cond. */
5424 if (opcode->flags & F_COND)
5425 {
5426 gas_assert (inst.cond != COND_ALWAYS);
5427 inst_base->cond = get_cond_from_value (inst.cond);
5428 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5429 }
5430 else if (inst.cond != COND_ALWAYS)
5431 {
5432 /* It shouldn't arrive here, where the assembly looks like a
5433 conditional instruction but the found opcode is unconditional. */
5434 gas_assert (0);
5435 continue;
5436 }
5437
5438 if (parse_operands (p, opcode)
5439 && programmer_friendly_fixup (&inst)
5440 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5441 {
5442 if (inst.reloc.type == BFD_RELOC_UNUSED
5443 || !inst.reloc.need_libopcodes_p)
5444 output_inst (NULL);
5445 else
5446 {
5447 /* If there is relocation generated for the instruction,
5448 store the instruction information for the future fix-up. */
5449 struct aarch64_inst *copy;
5450 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5451 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5452 abort ();
5453 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5454 output_inst (copy);
5455 }
5456 return;
5457 }
5458
5459 template = template->next;
5460 if (template != NULL)
5461 {
5462 reset_aarch64_instruction (&inst);
5463 inst.cond = saved_cond;
5464 }
5465 }
5466 while (template != NULL);
5467
5468 /* Issue the error messages if any. */
5469 output_operand_error_report (str);
5470 }
5471
5472 /* Various frobbings of labels and their addresses. */
5473
5474 void
5475 aarch64_start_line_hook (void)
5476 {
5477 last_label_seen = NULL;
5478 }
5479
5480 void
5481 aarch64_frob_label (symbolS * sym)
5482 {
5483 last_label_seen = sym;
5484
5485 dwarf2_emit_label (sym);
5486 }
5487
5488 int
5489 aarch64_data_in_code (void)
5490 {
5491 if (!strncmp (input_line_pointer + 1, "data:", 5))
5492 {
5493 *input_line_pointer = '/';
5494 input_line_pointer += 5;
5495 *input_line_pointer = 0;
5496 return 1;
5497 }
5498
5499 return 0;
5500 }
5501
5502 char *
5503 aarch64_canonicalize_symbol_name (char *name)
5504 {
5505 int len;
5506
5507 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5508 *(name + len - 5) = 0;
5509
5510 return name;
5511 }
5512 \f
5513 /* Table of all register names defined by default. The user can
5514 define additional names with .req. Note that all register names
5515 should appear in both upper and lowercase variants. Some registers
5516 also have mixed-case names. */
5517
5518 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5519 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5520 #define REGSET31(p,t) \
5521 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5522 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5523 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5524 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5525 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5526 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5527 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5528 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5529 #define REGSET(p,t) \
5530 REGSET31(p,t), REGNUM(p,31,t)
5531
5532 /* These go into aarch64_reg_hsh hash-table. */
5533 static const reg_entry reg_names[] = {
5534 /* Integer registers. */
5535 REGSET31 (x, R_64), REGSET31 (X, R_64),
5536 REGSET31 (w, R_32), REGSET31 (W, R_32),
5537
5538 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5539 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5540
5541 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5542 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5543
5544 /* Coprocessor register numbers. */
5545 REGSET (c, CN), REGSET (C, CN),
5546
5547 /* Floating-point single precision registers. */
5548 REGSET (s, FP_S), REGSET (S, FP_S),
5549
5550 /* Floating-point double precision registers. */
5551 REGSET (d, FP_D), REGSET (D, FP_D),
5552
5553 /* Floating-point half precision registers. */
5554 REGSET (h, FP_H), REGSET (H, FP_H),
5555
5556 /* Floating-point byte precision registers. */
5557 REGSET (b, FP_B), REGSET (B, FP_B),
5558
5559 /* Floating-point quad precision registers. */
5560 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5561
5562 /* FP/SIMD registers. */
5563 REGSET (v, VN), REGSET (V, VN),
5564 };
5565
5566 #undef REGDEF
5567 #undef REGNUM
5568 #undef REGSET
5569
5570 #define N 1
5571 #define n 0
5572 #define Z 1
5573 #define z 0
5574 #define C 1
5575 #define c 0
5576 #define V 1
5577 #define v 0
5578 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5579 static const asm_nzcv nzcv_names[] = {
5580 {"nzcv", B (n, z, c, v)},
5581 {"nzcV", B (n, z, c, V)},
5582 {"nzCv", B (n, z, C, v)},
5583 {"nzCV", B (n, z, C, V)},
5584 {"nZcv", B (n, Z, c, v)},
5585 {"nZcV", B (n, Z, c, V)},
5586 {"nZCv", B (n, Z, C, v)},
5587 {"nZCV", B (n, Z, C, V)},
5588 {"Nzcv", B (N, z, c, v)},
5589 {"NzcV", B (N, z, c, V)},
5590 {"NzCv", B (N, z, C, v)},
5591 {"NzCV", B (N, z, C, V)},
5592 {"NZcv", B (N, Z, c, v)},
5593 {"NZcV", B (N, Z, c, V)},
5594 {"NZCv", B (N, Z, C, v)},
5595 {"NZCV", B (N, Z, C, V)}
5596 };
5597
5598 #undef N
5599 #undef n
5600 #undef Z
5601 #undef z
5602 #undef C
5603 #undef c
5604 #undef V
5605 #undef v
5606 #undef B
5607 \f
5608 /* MD interface: bits in the object file. */
5609
5610 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5611 for use in the a.out file, and stores them in the array pointed to by buf.
5612 This knows about the endian-ness of the target machine and does
5613 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5614 2 (short) and 4 (long) Floating numbers are put out as a series of
5615 LITTLENUMS (shorts, here at least). */
5616
5617 void
5618 md_number_to_chars (char *buf, valueT val, int n)
5619 {
5620 if (target_big_endian)
5621 number_to_chars_bigendian (buf, val, n);
5622 else
5623 number_to_chars_littleendian (buf, val, n);
5624 }
5625
5626 /* MD interface: Sections. */
5627
5628 /* Estimate the size of a frag before relaxing. Assume everything fits in
5629 4 bytes. */
5630
5631 int
5632 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5633 {
5634 fragp->fr_var = 4;
5635 return 4;
5636 }
5637
5638 /* Round up a section size to the appropriate boundary. */
5639
5640 valueT
5641 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5642 {
5643 return size;
5644 }
5645
5646 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5647 of an rs_align_code fragment. */
5648
5649 void
5650 aarch64_handle_align (fragS * fragP)
5651 {
5652 /* NOP = d503201f */
5653 /* AArch64 instructions are always little-endian. */
5654 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5655
5656 int bytes, fix, noop_size;
5657 char *p;
5658 const char *noop;
5659
5660 if (fragP->fr_type != rs_align_code)
5661 return;
5662
5663 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5664 p = fragP->fr_literal + fragP->fr_fix;
5665 fix = 0;
5666
5667 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
5668 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
5669
5670 #ifdef OBJ_ELF
5671 gas_assert (fragP->tc_frag_data.recorded);
5672 #endif
5673
5674 noop = aarch64_noop;
5675 noop_size = sizeof (aarch64_noop);
5676 fragP->fr_var = noop_size;
5677
5678 if (bytes & (noop_size - 1))
5679 {
5680 fix = bytes & (noop_size - 1);
5681 #ifdef OBJ_ELF
5682 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5683 #endif
5684 memset (p, 0, fix);
5685 p += fix;
5686 bytes -= fix;
5687 }
5688
5689 while (bytes >= noop_size)
5690 {
5691 memcpy (p, noop, noop_size);
5692 p += noop_size;
5693 bytes -= noop_size;
5694 fix += noop_size;
5695 }
5696
5697 fragP->fr_fix += fix;
5698 }
5699
5700 /* Called from md_do_align. Used to create an alignment
5701 frag in a code section. */
5702
5703 void
5704 aarch64_frag_align_code (int n, int max)
5705 {
5706 char *p;
5707
5708 /* We assume that there will never be a requirement
5709 to support alignments greater than x bytes. */
5710 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
5711 as_fatal (_
5712 ("alignments greater than %d bytes not supported in .text sections"),
5713 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
5714
5715 p = frag_var (rs_align_code,
5716 MAX_MEM_FOR_RS_ALIGN_CODE,
5717 1,
5718 (relax_substateT) max,
5719 (symbolS *) NULL, (offsetT) n, (char *) NULL);
5720 *p = 0;
5721 }
5722
5723 /* Perform target specific initialisation of a frag.
5724 Note - despite the name this initialisation is not done when the frag
5725 is created, but only when its type is assigned. A frag can be created
5726 and used a long time before its type is set, so beware of assuming that
5727 this initialisationis performed first. */
5728
5729 #ifndef OBJ_ELF
5730 void
5731 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5732 int max_chars ATTRIBUTE_UNUSED)
5733 {
5734 }
5735
5736 #else /* OBJ_ELF is defined. */
5737 void
5738 aarch64_init_frag (fragS * fragP, int max_chars)
5739 {
5740 /* Record a mapping symbol for alignment frags. We will delete this
5741 later if the alignment ends up empty. */
5742 if (!fragP->tc_frag_data.recorded)
5743 {
5744 fragP->tc_frag_data.recorded = 1;
5745 switch (fragP->fr_type)
5746 {
5747 case rs_align:
5748 case rs_align_test:
5749 case rs_fill:
5750 mapping_state_2 (MAP_DATA, max_chars);
5751 break;
5752 case rs_align_code:
5753 mapping_state_2 (MAP_INSN, max_chars);
5754 break;
5755 default:
5756 break;
5757 }
5758 }
5759 }
5760 \f
5761 /* Initialize the DWARF-2 unwind information for this procedure. */
5762
5763 void
5764 tc_aarch64_frame_initial_instructions (void)
5765 {
5766 cfi_add_CFA_def_cfa (REG_SP, 0);
5767 }
5768 #endif /* OBJ_ELF */
5769
5770 /* Convert REGNAME to a DWARF-2 register number. */
5771
5772 int
5773 tc_aarch64_regname_to_dw2regnum (char *regname)
5774 {
5775 const reg_entry *reg = parse_reg (&regname);
5776 if (reg == NULL)
5777 return -1;
5778
5779 switch (reg->type)
5780 {
5781 case REG_TYPE_SP_32:
5782 case REG_TYPE_SP_64:
5783 case REG_TYPE_R_32:
5784 case REG_TYPE_R_64:
5785 case REG_TYPE_FP_B:
5786 case REG_TYPE_FP_H:
5787 case REG_TYPE_FP_S:
5788 case REG_TYPE_FP_D:
5789 case REG_TYPE_FP_Q:
5790 return reg->number;
5791 default:
5792 break;
5793 }
5794 return -1;
5795 }
5796
5797 /* MD interface: Symbol and relocation handling. */
5798
5799 /* Return the address within the segment that a PC-relative fixup is
5800 relative to. For AArch64 PC-relative fixups applied to instructions
5801 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
5802
5803 long
5804 md_pcrel_from_section (fixS * fixP, segT seg)
5805 {
5806 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
5807
5808 /* If this is pc-relative and we are going to emit a relocation
5809 then we just want to put out any pipeline compensation that the linker
5810 will need. Otherwise we want to use the calculated base. */
5811 if (fixP->fx_pcrel
5812 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
5813 || aarch64_force_relocation (fixP)))
5814 base = 0;
5815
5816 /* AArch64 should be consistent for all pc-relative relocations. */
5817 return base + AARCH64_PCREL_OFFSET;
5818 }
5819
5820 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
5821 Otherwise we have no need to default values of symbols. */
5822
5823 symbolS *
5824 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
5825 {
5826 #ifdef OBJ_ELF
5827 if (name[0] == '_' && name[1] == 'G'
5828 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
5829 {
5830 if (!GOT_symbol)
5831 {
5832 if (symbol_find (name))
5833 as_bad (_("GOT already in the symbol table"));
5834
5835 GOT_symbol = symbol_new (name, undefined_section,
5836 (valueT) 0, &zero_address_frag);
5837 }
5838
5839 return GOT_symbol;
5840 }
5841 #endif
5842
5843 return 0;
5844 }
5845
5846 /* Return non-zero if the indicated VALUE has overflowed the maximum
5847 range expressible by a unsigned number with the indicated number of
5848 BITS. */
5849
5850 static bfd_boolean
5851 unsigned_overflow (valueT value, unsigned bits)
5852 {
5853 valueT lim;
5854 if (bits >= sizeof (valueT) * 8)
5855 return FALSE;
5856 lim = (valueT) 1 << bits;
5857 return (value >= lim);
5858 }
5859
5860
5861 /* Return non-zero if the indicated VALUE has overflowed the maximum
5862 range expressible by an signed number with the indicated number of
5863 BITS. */
5864
5865 static bfd_boolean
5866 signed_overflow (offsetT value, unsigned bits)
5867 {
5868 offsetT lim;
5869 if (bits >= sizeof (offsetT) * 8)
5870 return FALSE;
5871 lim = (offsetT) 1 << (bits - 1);
5872 return (value < -lim || value >= lim);
5873 }
5874
5875 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
5876 unsigned immediate offset load/store instruction, try to encode it as
5877 an unscaled, 9-bit, signed immediate offset load/store instruction.
5878 Return TRUE if it is successful; otherwise return FALSE.
5879
5880 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
5881 in response to the standard LDR/STR mnemonics when the immediate offset is
5882 unambiguous, i.e. when it is negative or unaligned. */
5883
5884 static bfd_boolean
5885 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
5886 {
5887 int idx;
5888 enum aarch64_op new_op;
5889 const aarch64_opcode *new_opcode;
5890
5891 gas_assert (instr->opcode->iclass == ldst_pos);
5892
5893 switch (instr->opcode->op)
5894 {
5895 case OP_LDRB_POS:new_op = OP_LDURB; break;
5896 case OP_STRB_POS: new_op = OP_STURB; break;
5897 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
5898 case OP_LDRH_POS: new_op = OP_LDURH; break;
5899 case OP_STRH_POS: new_op = OP_STURH; break;
5900 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
5901 case OP_LDR_POS: new_op = OP_LDUR; break;
5902 case OP_STR_POS: new_op = OP_STUR; break;
5903 case OP_LDRF_POS: new_op = OP_LDURV; break;
5904 case OP_STRF_POS: new_op = OP_STURV; break;
5905 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
5906 case OP_PRFM_POS: new_op = OP_PRFUM; break;
5907 default: new_op = OP_NIL; break;
5908 }
5909
5910 if (new_op == OP_NIL)
5911 return FALSE;
5912
5913 new_opcode = aarch64_get_opcode (new_op);
5914 gas_assert (new_opcode != NULL);
5915
5916 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
5917 instr->opcode->op, new_opcode->op);
5918
5919 aarch64_replace_opcode (instr, new_opcode);
5920
5921 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
5922 qualifier matching may fail because the out-of-date qualifier will
5923 prevent the operand being updated with a new and correct qualifier. */
5924 idx = aarch64_operand_index (instr->opcode->operands,
5925 AARCH64_OPND_ADDR_SIMM9);
5926 gas_assert (idx == 1);
5927 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
5928
5929 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
5930
5931 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
5932 return FALSE;
5933
5934 return TRUE;
5935 }
5936
5937 /* Called by fix_insn to fix a MOV immediate alias instruction.
5938
5939 Operand for a generic move immediate instruction, which is an alias
5940 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
5941 a 32-bit/64-bit immediate value into general register. An assembler error
5942 shall result if the immediate cannot be created by a single one of these
5943 instructions. If there is a choice, then to ensure reversability an
5944 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
5945
5946 static void
5947 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
5948 {
5949 const aarch64_opcode *opcode;
5950
5951 /* Need to check if the destination is SP/ZR. The check has to be done
5952 before any aarch64_replace_opcode. */
5953 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
5954 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
5955
5956 instr->operands[1].imm.value = value;
5957 instr->operands[1].skip = 0;
5958
5959 if (try_mov_wide_p)
5960 {
5961 /* Try the MOVZ alias. */
5962 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
5963 aarch64_replace_opcode (instr, opcode);
5964 if (aarch64_opcode_encode (instr->opcode, instr,
5965 &instr->value, NULL, NULL))
5966 {
5967 put_aarch64_insn (buf, instr->value);
5968 return;
5969 }
5970 /* Try the MOVK alias. */
5971 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
5972 aarch64_replace_opcode (instr, opcode);
5973 if (aarch64_opcode_encode (instr->opcode, instr,
5974 &instr->value, NULL, NULL))
5975 {
5976 put_aarch64_insn (buf, instr->value);
5977 return;
5978 }
5979 }
5980
5981 if (try_mov_bitmask_p)
5982 {
5983 /* Try the ORR alias. */
5984 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
5985 aarch64_replace_opcode (instr, opcode);
5986 if (aarch64_opcode_encode (instr->opcode, instr,
5987 &instr->value, NULL, NULL))
5988 {
5989 put_aarch64_insn (buf, instr->value);
5990 return;
5991 }
5992 }
5993
5994 as_bad_where (fixP->fx_file, fixP->fx_line,
5995 _("immediate cannot be moved by a single instruction"));
5996 }
5997
5998 /* An instruction operand which is immediate related may have symbol used
5999 in the assembly, e.g.
6000
6001 mov w0, u32
6002 .set u32, 0x00ffff00
6003
6004 At the time when the assembly instruction is parsed, a referenced symbol,
6005 like 'u32' in the above example may not have been seen; a fixS is created
6006 in such a case and is handled here after symbols have been resolved.
6007 Instruction is fixed up with VALUE using the information in *FIXP plus
6008 extra information in FLAGS.
6009
6010 This function is called by md_apply_fix to fix up instructions that need
6011 a fix-up described above but does not involve any linker-time relocation. */
6012
6013 static void
6014 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6015 {
6016 int idx;
6017 uint32_t insn;
6018 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6019 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6020 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6021
6022 if (new_inst)
6023 {
6024 /* Now the instruction is about to be fixed-up, so the operand that
6025 was previously marked as 'ignored' needs to be unmarked in order
6026 to get the encoding done properly. */
6027 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6028 new_inst->operands[idx].skip = 0;
6029 }
6030
6031 gas_assert (opnd != AARCH64_OPND_NIL);
6032
6033 switch (opnd)
6034 {
6035 case AARCH64_OPND_EXCEPTION:
6036 if (unsigned_overflow (value, 16))
6037 as_bad_where (fixP->fx_file, fixP->fx_line,
6038 _("immediate out of range"));
6039 insn = get_aarch64_insn (buf);
6040 insn |= encode_svc_imm (value);
6041 put_aarch64_insn (buf, insn);
6042 break;
6043
6044 case AARCH64_OPND_AIMM:
6045 /* ADD or SUB with immediate.
6046 NOTE this assumes we come here with a add/sub shifted reg encoding
6047 3 322|2222|2 2 2 21111 111111
6048 1 098|7654|3 2 1 09876 543210 98765 43210
6049 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6050 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6051 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6052 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6053 ->
6054 3 322|2222|2 2 221111111111
6055 1 098|7654|3 2 109876543210 98765 43210
6056 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6057 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6058 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6059 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6060 Fields sf Rn Rd are already set. */
6061 insn = get_aarch64_insn (buf);
6062 if (value < 0)
6063 {
6064 /* Add <-> sub. */
6065 insn = reencode_addsub_switch_add_sub (insn);
6066 value = -value;
6067 }
6068
6069 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6070 && unsigned_overflow (value, 12))
6071 {
6072 /* Try to shift the value by 12 to make it fit. */
6073 if (((value >> 12) << 12) == value
6074 && ! unsigned_overflow (value, 12 + 12))
6075 {
6076 value >>= 12;
6077 insn |= encode_addsub_imm_shift_amount (1);
6078 }
6079 }
6080
6081 if (unsigned_overflow (value, 12))
6082 as_bad_where (fixP->fx_file, fixP->fx_line,
6083 _("immediate out of range"));
6084
6085 insn |= encode_addsub_imm (value);
6086
6087 put_aarch64_insn (buf, insn);
6088 break;
6089
6090 case AARCH64_OPND_SIMD_IMM:
6091 case AARCH64_OPND_SIMD_IMM_SFT:
6092 case AARCH64_OPND_LIMM:
6093 /* Bit mask immediate. */
6094 gas_assert (new_inst != NULL);
6095 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6096 new_inst->operands[idx].imm.value = value;
6097 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6098 &new_inst->value, NULL, NULL))
6099 put_aarch64_insn (buf, new_inst->value);
6100 else
6101 as_bad_where (fixP->fx_file, fixP->fx_line,
6102 _("invalid immediate"));
6103 break;
6104
6105 case AARCH64_OPND_HALF:
6106 /* 16-bit unsigned immediate. */
6107 if (unsigned_overflow (value, 16))
6108 as_bad_where (fixP->fx_file, fixP->fx_line,
6109 _("immediate out of range"));
6110 insn = get_aarch64_insn (buf);
6111 insn |= encode_movw_imm (value & 0xffff);
6112 put_aarch64_insn (buf, insn);
6113 break;
6114
6115 case AARCH64_OPND_IMM_MOV:
6116 /* Operand for a generic move immediate instruction, which is
6117 an alias instruction that generates a single MOVZ, MOVN or ORR
6118 instruction to loads a 32-bit/64-bit immediate value into general
6119 register. An assembler error shall result if the immediate cannot be
6120 created by a single one of these instructions. If there is a choice,
6121 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6122 and MOVZ or MOVN to ORR. */
6123 gas_assert (new_inst != NULL);
6124 fix_mov_imm_insn (fixP, buf, new_inst, value);
6125 break;
6126
6127 case AARCH64_OPND_ADDR_SIMM7:
6128 case AARCH64_OPND_ADDR_SIMM9:
6129 case AARCH64_OPND_ADDR_SIMM9_2:
6130 case AARCH64_OPND_ADDR_UIMM12:
6131 /* Immediate offset in an address. */
6132 insn = get_aarch64_insn (buf);
6133
6134 gas_assert (new_inst != NULL && new_inst->value == insn);
6135 gas_assert (new_inst->opcode->operands[1] == opnd
6136 || new_inst->opcode->operands[2] == opnd);
6137
6138 /* Get the index of the address operand. */
6139 if (new_inst->opcode->operands[1] == opnd)
6140 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6141 idx = 1;
6142 else
6143 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6144 idx = 2;
6145
6146 /* Update the resolved offset value. */
6147 new_inst->operands[idx].addr.offset.imm = value;
6148
6149 /* Encode/fix-up. */
6150 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6151 &new_inst->value, NULL, NULL))
6152 {
6153 put_aarch64_insn (buf, new_inst->value);
6154 break;
6155 }
6156 else if (new_inst->opcode->iclass == ldst_pos
6157 && try_to_encode_as_unscaled_ldst (new_inst))
6158 {
6159 put_aarch64_insn (buf, new_inst->value);
6160 break;
6161 }
6162
6163 as_bad_where (fixP->fx_file, fixP->fx_line,
6164 _("immediate offset out of range"));
6165 break;
6166
6167 default:
6168 gas_assert (0);
6169 as_fatal (_("unhandled operand code %d"), opnd);
6170 }
6171 }
6172
6173 /* Apply a fixup (fixP) to segment data, once it has been determined
6174 by our caller that we have all the info we need to fix it up.
6175
6176 Parameter valP is the pointer to the value of the bits. */
6177
6178 void
6179 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6180 {
6181 offsetT value = *valP;
6182 uint32_t insn;
6183 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6184 int scale;
6185 unsigned flags = fixP->fx_addnumber;
6186
6187 DEBUG_TRACE ("\n\n");
6188 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6189 DEBUG_TRACE ("Enter md_apply_fix");
6190
6191 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6192
6193 /* Note whether this will delete the relocation. */
6194
6195 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6196 fixP->fx_done = 1;
6197
6198 /* Process the relocations. */
6199 switch (fixP->fx_r_type)
6200 {
6201 case BFD_RELOC_NONE:
6202 /* This will need to go in the object file. */
6203 fixP->fx_done = 0;
6204 break;
6205
6206 case BFD_RELOC_8:
6207 case BFD_RELOC_8_PCREL:
6208 if (fixP->fx_done || !seg->use_rela_p)
6209 md_number_to_chars (buf, value, 1);
6210 break;
6211
6212 case BFD_RELOC_16:
6213 case BFD_RELOC_16_PCREL:
6214 if (fixP->fx_done || !seg->use_rela_p)
6215 md_number_to_chars (buf, value, 2);
6216 break;
6217
6218 case BFD_RELOC_32:
6219 case BFD_RELOC_32_PCREL:
6220 if (fixP->fx_done || !seg->use_rela_p)
6221 md_number_to_chars (buf, value, 4);
6222 break;
6223
6224 case BFD_RELOC_64:
6225 case BFD_RELOC_64_PCREL:
6226 if (fixP->fx_done || !seg->use_rela_p)
6227 md_number_to_chars (buf, value, 8);
6228 break;
6229
6230 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6231 /* We claim that these fixups have been processed here, even if
6232 in fact we generate an error because we do not have a reloc
6233 for them, so tc_gen_reloc() will reject them. */
6234 fixP->fx_done = 1;
6235 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6236 {
6237 as_bad_where (fixP->fx_file, fixP->fx_line,
6238 _("undefined symbol %s used as an immediate value"),
6239 S_GET_NAME (fixP->fx_addsy));
6240 goto apply_fix_return;
6241 }
6242 fix_insn (fixP, flags, value);
6243 break;
6244
6245 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6246 if (value & 3)
6247 as_bad_where (fixP->fx_file, fixP->fx_line,
6248 _("pc-relative load offset not word aligned"));
6249 if (signed_overflow (value, 21))
6250 as_bad_where (fixP->fx_file, fixP->fx_line,
6251 _("pc-relative load offset out of range"));
6252 if (fixP->fx_done || !seg->use_rela_p)
6253 {
6254 insn = get_aarch64_insn (buf);
6255 insn |= encode_ld_lit_ofs_19 (value >> 2);
6256 put_aarch64_insn (buf, insn);
6257 }
6258 break;
6259
6260 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6261 if (signed_overflow (value, 21))
6262 as_bad_where (fixP->fx_file, fixP->fx_line,
6263 _("pc-relative address offset out of range"));
6264 if (fixP->fx_done || !seg->use_rela_p)
6265 {
6266 insn = get_aarch64_insn (buf);
6267 insn |= encode_adr_imm (value);
6268 put_aarch64_insn (buf, insn);
6269 }
6270 break;
6271
6272 case BFD_RELOC_AARCH64_BRANCH19:
6273 if (value & 3)
6274 as_bad_where (fixP->fx_file, fixP->fx_line,
6275 _("conditional branch target not word aligned"));
6276 if (signed_overflow (value, 21))
6277 as_bad_where (fixP->fx_file, fixP->fx_line,
6278 _("conditional branch out of range"));
6279 if (fixP->fx_done || !seg->use_rela_p)
6280 {
6281 insn = get_aarch64_insn (buf);
6282 insn |= encode_cond_branch_ofs_19 (value >> 2);
6283 put_aarch64_insn (buf, insn);
6284 }
6285 break;
6286
6287 case BFD_RELOC_AARCH64_TSTBR14:
6288 if (value & 3)
6289 as_bad_where (fixP->fx_file, fixP->fx_line,
6290 _("conditional branch target not word aligned"));
6291 if (signed_overflow (value, 16))
6292 as_bad_where (fixP->fx_file, fixP->fx_line,
6293 _("conditional branch out of range"));
6294 if (fixP->fx_done || !seg->use_rela_p)
6295 {
6296 insn = get_aarch64_insn (buf);
6297 insn |= encode_tst_branch_ofs_14 (value >> 2);
6298 put_aarch64_insn (buf, insn);
6299 }
6300 break;
6301
6302 case BFD_RELOC_AARCH64_JUMP26:
6303 case BFD_RELOC_AARCH64_CALL26:
6304 if (value & 3)
6305 as_bad_where (fixP->fx_file, fixP->fx_line,
6306 _("branch target not word aligned"));
6307 if (signed_overflow (value, 28))
6308 as_bad_where (fixP->fx_file, fixP->fx_line, _("branch out of range"));
6309 if (fixP->fx_done || !seg->use_rela_p)
6310 {
6311 insn = get_aarch64_insn (buf);
6312 insn |= encode_branch_ofs_26 (value >> 2);
6313 put_aarch64_insn (buf, insn);
6314 }
6315 break;
6316
6317 case BFD_RELOC_AARCH64_MOVW_G0:
6318 case BFD_RELOC_AARCH64_MOVW_G0_S:
6319 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6320 scale = 0;
6321 goto movw_common;
6322 case BFD_RELOC_AARCH64_MOVW_G1:
6323 case BFD_RELOC_AARCH64_MOVW_G1_S:
6324 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6325 scale = 16;
6326 goto movw_common;
6327 case BFD_RELOC_AARCH64_MOVW_G2:
6328 case BFD_RELOC_AARCH64_MOVW_G2_S:
6329 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6330 scale = 32;
6331 goto movw_common;
6332 case BFD_RELOC_AARCH64_MOVW_G3:
6333 scale = 48;
6334 movw_common:
6335 if (fixP->fx_done || !seg->use_rela_p)
6336 {
6337 insn = get_aarch64_insn (buf);
6338
6339 if (!fixP->fx_done)
6340 {
6341 /* REL signed addend must fit in 16 bits */
6342 if (signed_overflow (value, 16))
6343 as_bad_where (fixP->fx_file, fixP->fx_line,
6344 _("offset out of range"));
6345 }
6346 else
6347 {
6348 /* Check for overflow and scale. */
6349 switch (fixP->fx_r_type)
6350 {
6351 case BFD_RELOC_AARCH64_MOVW_G0:
6352 case BFD_RELOC_AARCH64_MOVW_G1:
6353 case BFD_RELOC_AARCH64_MOVW_G2:
6354 case BFD_RELOC_AARCH64_MOVW_G3:
6355 if (unsigned_overflow (value, scale + 16))
6356 as_bad_where (fixP->fx_file, fixP->fx_line,
6357 _("unsigned value out of range"));
6358 break;
6359 case BFD_RELOC_AARCH64_MOVW_G0_S:
6360 case BFD_RELOC_AARCH64_MOVW_G1_S:
6361 case BFD_RELOC_AARCH64_MOVW_G2_S:
6362 /* NOTE: We can only come here with movz or movn. */
6363 if (signed_overflow (value, scale + 16))
6364 as_bad_where (fixP->fx_file, fixP->fx_line,
6365 _("signed value out of range"));
6366 if (value < 0)
6367 {
6368 /* Force use of MOVN. */
6369 value = ~value;
6370 insn = reencode_movzn_to_movn (insn);
6371 }
6372 else
6373 {
6374 /* Force use of MOVZ. */
6375 insn = reencode_movzn_to_movz (insn);
6376 }
6377 break;
6378 default:
6379 /* Unchecked relocations. */
6380 break;
6381 }
6382 value >>= scale;
6383 }
6384
6385 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6386 insn |= encode_movw_imm (value & 0xffff);
6387
6388 put_aarch64_insn (buf, insn);
6389 }
6390 break;
6391
6392 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6393 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6394 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6395 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6396 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6397 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6398 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6399 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6400 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6401 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6402 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6403 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6404 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE:
6405 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6406 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6407 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6408 /* Should always be exported to object file, see
6409 aarch64_force_relocation(). */
6410 gas_assert (!fixP->fx_done);
6411 gas_assert (seg->use_rela_p);
6412 break;
6413
6414 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6415 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6416 case BFD_RELOC_AARCH64_ADD_LO12:
6417 case BFD_RELOC_AARCH64_LDST8_LO12:
6418 case BFD_RELOC_AARCH64_LDST16_LO12:
6419 case BFD_RELOC_AARCH64_LDST32_LO12:
6420 case BFD_RELOC_AARCH64_LDST64_LO12:
6421 case BFD_RELOC_AARCH64_LDST128_LO12:
6422 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6423 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6424 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6425 /* Should always be exported to object file, see
6426 aarch64_force_relocation(). */
6427 gas_assert (!fixP->fx_done);
6428 gas_assert (seg->use_rela_p);
6429 break;
6430
6431 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6432 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6433 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6434 break;
6435
6436 default:
6437 as_bad_where (fixP->fx_file, fixP->fx_line,
6438 _("unexpected %s fixup"),
6439 bfd_get_reloc_code_name (fixP->fx_r_type));
6440 break;
6441 }
6442
6443 apply_fix_return:
6444 /* Free the allocated the struct aarch64_inst.
6445 N.B. currently there are very limited number of fix-up types actually use
6446 this field, so the impact on the performance should be minimal . */
6447 if (fixP->tc_fix_data.inst != NULL)
6448 free (fixP->tc_fix_data.inst);
6449
6450 return;
6451 }
6452
6453 /* Translate internal representation of relocation info to BFD target
6454 format. */
6455
6456 arelent *
6457 tc_gen_reloc (asection * section, fixS * fixp)
6458 {
6459 arelent *reloc;
6460 bfd_reloc_code_real_type code;
6461
6462 reloc = xmalloc (sizeof (arelent));
6463
6464 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6465 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6466 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6467
6468 if (fixp->fx_pcrel)
6469 {
6470 if (section->use_rela_p)
6471 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6472 else
6473 fixp->fx_offset = reloc->address;
6474 }
6475 reloc->addend = fixp->fx_offset;
6476
6477 code = fixp->fx_r_type;
6478 switch (code)
6479 {
6480 case BFD_RELOC_16:
6481 if (fixp->fx_pcrel)
6482 code = BFD_RELOC_16_PCREL;
6483 break;
6484
6485 case BFD_RELOC_32:
6486 if (fixp->fx_pcrel)
6487 code = BFD_RELOC_32_PCREL;
6488 break;
6489
6490 case BFD_RELOC_64:
6491 if (fixp->fx_pcrel)
6492 code = BFD_RELOC_64_PCREL;
6493 break;
6494
6495 default:
6496 break;
6497 }
6498
6499 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6500 if (reloc->howto == NULL)
6501 {
6502 as_bad_where (fixp->fx_file, fixp->fx_line,
6503 _
6504 ("cannot represent %s relocation in this object file format"),
6505 bfd_get_reloc_code_name (code));
6506 return NULL;
6507 }
6508
6509 return reloc;
6510 }
6511
6512 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6513
6514 void
6515 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6516 {
6517 bfd_reloc_code_real_type type;
6518 int pcrel = 0;
6519
6520 /* Pick a reloc.
6521 FIXME: @@ Should look at CPU word size. */
6522 switch (size)
6523 {
6524 case 1:
6525 type = BFD_RELOC_8;
6526 break;
6527 case 2:
6528 type = BFD_RELOC_16;
6529 break;
6530 case 4:
6531 type = BFD_RELOC_32;
6532 break;
6533 case 8:
6534 type = BFD_RELOC_64;
6535 break;
6536 default:
6537 as_bad (_("cannot do %u-byte relocation"), size);
6538 type = BFD_RELOC_UNUSED;
6539 break;
6540 }
6541
6542 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6543 }
6544
6545 int
6546 aarch64_force_relocation (struct fix *fixp)
6547 {
6548 switch (fixp->fx_r_type)
6549 {
6550 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6551 /* Perform these "immediate" internal relocations
6552 even if the symbol is extern or weak. */
6553 return 0;
6554
6555 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6556 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6557 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6558 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6559 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6560 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6561 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6562 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6563 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6564 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6565 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6566 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6567 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE:
6568 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6569 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6570 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6571 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6572 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6573 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6574 case BFD_RELOC_AARCH64_ADD_LO12:
6575 case BFD_RELOC_AARCH64_LDST8_LO12:
6576 case BFD_RELOC_AARCH64_LDST16_LO12:
6577 case BFD_RELOC_AARCH64_LDST32_LO12:
6578 case BFD_RELOC_AARCH64_LDST64_LO12:
6579 case BFD_RELOC_AARCH64_LDST128_LO12:
6580 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6581 /* Always leave these relocations for the linker. */
6582 return 1;
6583
6584 default:
6585 break;
6586 }
6587
6588 return generic_force_reloc (fixp);
6589 }
6590
6591 #ifdef OBJ_ELF
6592
6593 const char *
6594 elf64_aarch64_target_format (void)
6595 {
6596 if (target_big_endian)
6597 return "elf64-bigaarch64";
6598 else
6599 return "elf64-littleaarch64";
6600 }
6601
6602 void
6603 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6604 {
6605 elf_frob_symbol (symp, puntp);
6606 }
6607 #endif
6608
6609 /* MD interface: Finalization. */
6610
6611 /* A good place to do this, although this was probably not intended
6612 for this kind of use. We need to dump the literal pool before
6613 references are made to a null symbol pointer. */
6614
6615 void
6616 aarch64_cleanup (void)
6617 {
6618 literal_pool *pool;
6619
6620 for (pool = list_of_pools; pool; pool = pool->next)
6621 {
6622 /* Put it at the end of the relevant section. */
6623 subseg_set (pool->section, pool->sub_section);
6624 s_ltorg (0);
6625 }
6626 }
6627
6628 #ifdef OBJ_ELF
6629 /* Remove any excess mapping symbols generated for alignment frags in
6630 SEC. We may have created a mapping symbol before a zero byte
6631 alignment; remove it if there's a mapping symbol after the
6632 alignment. */
6633 static void
6634 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6635 void *dummy ATTRIBUTE_UNUSED)
6636 {
6637 segment_info_type *seginfo = seg_info (sec);
6638 fragS *fragp;
6639
6640 if (seginfo == NULL || seginfo->frchainP == NULL)
6641 return;
6642
6643 for (fragp = seginfo->frchainP->frch_root;
6644 fragp != NULL; fragp = fragp->fr_next)
6645 {
6646 symbolS *sym = fragp->tc_frag_data.last_map;
6647 fragS *next = fragp->fr_next;
6648
6649 /* Variable-sized frags have been converted to fixed size by
6650 this point. But if this was variable-sized to start with,
6651 there will be a fixed-size frag after it. So don't handle
6652 next == NULL. */
6653 if (sym == NULL || next == NULL)
6654 continue;
6655
6656 if (S_GET_VALUE (sym) < next->fr_address)
6657 /* Not at the end of this frag. */
6658 continue;
6659 know (S_GET_VALUE (sym) == next->fr_address);
6660
6661 do
6662 {
6663 if (next->tc_frag_data.first_map != NULL)
6664 {
6665 /* Next frag starts with a mapping symbol. Discard this
6666 one. */
6667 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6668 break;
6669 }
6670
6671 if (next->fr_next == NULL)
6672 {
6673 /* This mapping symbol is at the end of the section. Discard
6674 it. */
6675 know (next->fr_fix == 0 && next->fr_var == 0);
6676 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6677 break;
6678 }
6679
6680 /* As long as we have empty frags without any mapping symbols,
6681 keep looking. */
6682 /* If the next frag is non-empty and does not start with a
6683 mapping symbol, then this mapping symbol is required. */
6684 if (next->fr_address != next->fr_next->fr_address)
6685 break;
6686
6687 next = next->fr_next;
6688 }
6689 while (next != NULL);
6690 }
6691 }
6692 #endif
6693
6694 /* Adjust the symbol table. */
6695
6696 void
6697 aarch64_adjust_symtab (void)
6698 {
6699 #ifdef OBJ_ELF
6700 /* Remove any overlapping mapping symbols generated by alignment frags. */
6701 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6702 /* Now do generic ELF adjustments. */
6703 elf_adjust_symtab ();
6704 #endif
6705 }
6706
6707 static void
6708 checked_hash_insert (struct hash_control *table, const char *key, void *value)
6709 {
6710 const char *hash_err;
6711
6712 hash_err = hash_insert (table, key, value);
6713 if (hash_err)
6714 printf ("Internal Error: Can't hash %s\n", key);
6715 }
6716
6717 static void
6718 fill_instruction_hash_table (void)
6719 {
6720 aarch64_opcode *opcode = aarch64_opcode_table;
6721
6722 while (opcode->name != NULL)
6723 {
6724 templates *templ, *new_templ;
6725 templ = hash_find (aarch64_ops_hsh, opcode->name);
6726
6727 new_templ = (templates *) xmalloc (sizeof (templates));
6728 new_templ->opcode = opcode;
6729 new_templ->next = NULL;
6730
6731 if (!templ)
6732 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6733 else
6734 {
6735 new_templ->next = templ->next;
6736 templ->next = new_templ;
6737 }
6738 ++opcode;
6739 }
6740 }
6741
6742 static inline void
6743 convert_to_upper (char *dst, const char *src, size_t num)
6744 {
6745 unsigned int i;
6746 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6747 *dst = TOUPPER (*src);
6748 *dst = '\0';
6749 }
6750
6751 /* Assume STR point to a lower-case string, allocate, convert and return
6752 the corresponding upper-case string. */
6753 static inline const char*
6754 get_upper_str (const char *str)
6755 {
6756 char *ret;
6757 size_t len = strlen (str);
6758 if ((ret = xmalloc (len + 1)) == NULL)
6759 abort ();
6760 convert_to_upper (ret, str, len);
6761 return ret;
6762 }
6763
6764 /* MD interface: Initialization. */
6765
6766 void
6767 md_begin (void)
6768 {
6769 unsigned mach;
6770 unsigned int i;
6771
6772 if ((aarch64_ops_hsh = hash_new ()) == NULL
6773 || (aarch64_cond_hsh = hash_new ()) == NULL
6774 || (aarch64_shift_hsh = hash_new ()) == NULL
6775 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
6776 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
6777 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
6778 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
6779 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
6780 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
6781 || (aarch64_reg_hsh = hash_new ()) == NULL
6782 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
6783 || (aarch64_nzcv_hsh = hash_new ()) == NULL
6784 || (aarch64_pldop_hsh = hash_new ()) == NULL)
6785 as_fatal (_("virtual memory exhausted"));
6786
6787 fill_instruction_hash_table ();
6788
6789 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
6790 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
6791 (void *) (aarch64_sys_regs + i));
6792
6793 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
6794 checked_hash_insert (aarch64_pstatefield_hsh,
6795 aarch64_pstatefields[i].name,
6796 (void *) (aarch64_pstatefields + i));
6797
6798 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
6799 checked_hash_insert (aarch64_sys_regs_ic_hsh,
6800 aarch64_sys_regs_ic[i].template,
6801 (void *) (aarch64_sys_regs_ic + i));
6802
6803 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
6804 checked_hash_insert (aarch64_sys_regs_dc_hsh,
6805 aarch64_sys_regs_dc[i].template,
6806 (void *) (aarch64_sys_regs_dc + i));
6807
6808 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
6809 checked_hash_insert (aarch64_sys_regs_at_hsh,
6810 aarch64_sys_regs_at[i].template,
6811 (void *) (aarch64_sys_regs_at + i));
6812
6813 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
6814 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
6815 aarch64_sys_regs_tlbi[i].template,
6816 (void *) (aarch64_sys_regs_tlbi + i));
6817
6818 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
6819 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
6820 (void *) (reg_names + i));
6821
6822 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
6823 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
6824 (void *) (nzcv_names + i));
6825
6826 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
6827 {
6828 const char *name = aarch64_operand_modifiers[i].name;
6829 checked_hash_insert (aarch64_shift_hsh, name,
6830 (void *) (aarch64_operand_modifiers + i));
6831 /* Also hash the name in the upper case. */
6832 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
6833 (void *) (aarch64_operand_modifiers + i));
6834 }
6835
6836 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
6837 {
6838 unsigned int j;
6839 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
6840 the same condition code. */
6841 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
6842 {
6843 const char *name = aarch64_conds[i].names[j];
6844 if (name == NULL)
6845 break;
6846 checked_hash_insert (aarch64_cond_hsh, name,
6847 (void *) (aarch64_conds + i));
6848 /* Also hash the name in the upper case. */
6849 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
6850 (void *) (aarch64_conds + i));
6851 }
6852 }
6853
6854 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
6855 {
6856 const char *name = aarch64_barrier_options[i].name;
6857 /* Skip xx00 - the unallocated values of option. */
6858 if ((i & 0x3) == 0)
6859 continue;
6860 checked_hash_insert (aarch64_barrier_opt_hsh, name,
6861 (void *) (aarch64_barrier_options + i));
6862 /* Also hash the name in the upper case. */
6863 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
6864 (void *) (aarch64_barrier_options + i));
6865 }
6866
6867 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
6868 {
6869 const char* name = aarch64_prfops[i].name;
6870 /* Skip 0011x, 01xxx, 1011x and 11xxx - the unallocated hint encodings
6871 as a 5-bit immediate #uimm5. */
6872 if ((i & 0xf) >= 6)
6873 continue;
6874 checked_hash_insert (aarch64_pldop_hsh, name,
6875 (void *) (aarch64_prfops + i));
6876 /* Also hash the name in the upper case. */
6877 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
6878 (void *) (aarch64_prfops + i));
6879 }
6880
6881 /* Set the cpu variant based on the command-line options. */
6882 if (!mcpu_cpu_opt)
6883 mcpu_cpu_opt = march_cpu_opt;
6884
6885 if (!mcpu_cpu_opt)
6886 mcpu_cpu_opt = &cpu_default;
6887
6888 cpu_variant = *mcpu_cpu_opt;
6889
6890 /* Record the CPU type. */
6891 mach = bfd_mach_aarch64;
6892
6893 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
6894 }
6895
6896 /* Command line processing. */
6897
6898 const char *md_shortopts = "m:";
6899
6900 #ifdef AARCH64_BI_ENDIAN
6901 #define OPTION_EB (OPTION_MD_BASE + 0)
6902 #define OPTION_EL (OPTION_MD_BASE + 1)
6903 #else
6904 #if TARGET_BYTES_BIG_ENDIAN
6905 #define OPTION_EB (OPTION_MD_BASE + 0)
6906 #else
6907 #define OPTION_EL (OPTION_MD_BASE + 1)
6908 #endif
6909 #endif
6910
6911 struct option md_longopts[] = {
6912 #ifdef OPTION_EB
6913 {"EB", no_argument, NULL, OPTION_EB},
6914 #endif
6915 #ifdef OPTION_EL
6916 {"EL", no_argument, NULL, OPTION_EL},
6917 #endif
6918 {NULL, no_argument, NULL, 0}
6919 };
6920
6921 size_t md_longopts_size = sizeof (md_longopts);
6922
6923 struct aarch64_option_table
6924 {
6925 char *option; /* Option name to match. */
6926 char *help; /* Help information. */
6927 int *var; /* Variable to change. */
6928 int value; /* What to change it to. */
6929 char *deprecated; /* If non-null, print this message. */
6930 };
6931
6932 static struct aarch64_option_table aarch64_opts[] = {
6933 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
6934 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
6935 NULL},
6936 #ifdef DEBUG_AARCH64
6937 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
6938 #endif /* DEBUG_AARCH64 */
6939 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
6940 NULL},
6941 {NULL, NULL, NULL, 0, NULL}
6942 };
6943
6944 struct aarch64_cpu_option_table
6945 {
6946 char *name;
6947 const aarch64_feature_set value;
6948 /* The canonical name of the CPU, or NULL to use NAME converted to upper
6949 case. */
6950 const char *canonical_name;
6951 };
6952
6953 /* This list should, at a minimum, contain all the cpu names
6954 recognized by GCC. */
6955 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
6956 {"all", AARCH64_ANY, NULL},
6957 {"cortex-a53", AARCH64_ARCH_V8, "Cortex-A53"},
6958 {"cortex-a57", AARCH64_ARCH_V8, "Cortex-A57"},
6959 {"generic", AARCH64_ARCH_V8, NULL},
6960
6961 /* These two are example CPUs supported in GCC, once we have real
6962 CPUs they will be removed. */
6963 {"example-1", AARCH64_ARCH_V8, NULL},
6964 {"example-2", AARCH64_ARCH_V8, NULL},
6965
6966 {NULL, AARCH64_ARCH_NONE, NULL}
6967 };
6968
6969 struct aarch64_arch_option_table
6970 {
6971 char *name;
6972 const aarch64_feature_set value;
6973 };
6974
6975 /* This list should, at a minimum, contain all the architecture names
6976 recognized by GCC. */
6977 static const struct aarch64_arch_option_table aarch64_archs[] = {
6978 {"all", AARCH64_ANY},
6979 {"armv8-a", AARCH64_ARCH_V8},
6980 {NULL, AARCH64_ARCH_NONE}
6981 };
6982
6983 /* ISA extensions. */
6984 struct aarch64_option_cpu_value_table
6985 {
6986 char *name;
6987 const aarch64_feature_set value;
6988 };
6989
6990 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
6991 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
6992 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
6993 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
6994 {NULL, AARCH64_ARCH_NONE}
6995 };
6996
6997 struct aarch64_long_option_table
6998 {
6999 char *option; /* Substring to match. */
7000 char *help; /* Help information. */
7001 int (*func) (char *subopt); /* Function to decode sub-option. */
7002 char *deprecated; /* If non-null, print this message. */
7003 };
7004
7005 static int
7006 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7007 {
7008 /* We insist on extensions being added before being removed. We achieve
7009 this by using the ADDING_VALUE variable to indicate whether we are
7010 adding an extension (1) or removing it (0) and only allowing it to
7011 change in the order -1 -> 1 -> 0. */
7012 int adding_value = -1;
7013 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7014
7015 /* Copy the feature set, so that we can modify it. */
7016 *ext_set = **opt_p;
7017 *opt_p = ext_set;
7018
7019 while (str != NULL && *str != 0)
7020 {
7021 const struct aarch64_option_cpu_value_table *opt;
7022 char *ext;
7023 int optlen;
7024
7025 if (*str != '+')
7026 {
7027 as_bad (_("invalid architectural extension"));
7028 return 0;
7029 }
7030
7031 str++;
7032 ext = strchr (str, '+');
7033
7034 if (ext != NULL)
7035 optlen = ext - str;
7036 else
7037 optlen = strlen (str);
7038
7039 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7040 {
7041 if (adding_value != 0)
7042 adding_value = 0;
7043 optlen -= 2;
7044 str += 2;
7045 }
7046 else if (optlen > 0)
7047 {
7048 if (adding_value == -1)
7049 adding_value = 1;
7050 else if (adding_value != 1)
7051 {
7052 as_bad (_("must specify extensions to add before specifying "
7053 "those to remove"));
7054 return FALSE;
7055 }
7056 }
7057
7058 if (optlen == 0)
7059 {
7060 as_bad (_("missing architectural extension"));
7061 return 0;
7062 }
7063
7064 gas_assert (adding_value != -1);
7065
7066 for (opt = aarch64_features; opt->name != NULL; opt++)
7067 if (strncmp (opt->name, str, optlen) == 0)
7068 {
7069 /* Add or remove the extension. */
7070 if (adding_value)
7071 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7072 else
7073 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7074 break;
7075 }
7076
7077 if (opt->name == NULL)
7078 {
7079 as_bad (_("unknown architectural extension `%s'"), str);
7080 return 0;
7081 }
7082
7083 str = ext;
7084 };
7085
7086 return 1;
7087 }
7088
7089 static int
7090 aarch64_parse_cpu (char *str)
7091 {
7092 const struct aarch64_cpu_option_table *opt;
7093 char *ext = strchr (str, '+');
7094 size_t optlen;
7095
7096 if (ext != NULL)
7097 optlen = ext - str;
7098 else
7099 optlen = strlen (str);
7100
7101 if (optlen == 0)
7102 {
7103 as_bad (_("missing cpu name `%s'"), str);
7104 return 0;
7105 }
7106
7107 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7108 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7109 {
7110 mcpu_cpu_opt = &opt->value;
7111 if (ext != NULL)
7112 return aarch64_parse_features (ext, &mcpu_cpu_opt);
7113
7114 return 1;
7115 }
7116
7117 as_bad (_("unknown cpu `%s'"), str);
7118 return 0;
7119 }
7120
7121 static int
7122 aarch64_parse_arch (char *str)
7123 {
7124 const struct aarch64_arch_option_table *opt;
7125 char *ext = strchr (str, '+');
7126 size_t optlen;
7127
7128 if (ext != NULL)
7129 optlen = ext - str;
7130 else
7131 optlen = strlen (str);
7132
7133 if (optlen == 0)
7134 {
7135 as_bad (_("missing architecture name `%s'"), str);
7136 return 0;
7137 }
7138
7139 for (opt = aarch64_archs; opt->name != NULL; opt++)
7140 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7141 {
7142 march_cpu_opt = &opt->value;
7143 if (ext != NULL)
7144 return aarch64_parse_features (ext, &march_cpu_opt);
7145
7146 return 1;
7147 }
7148
7149 as_bad (_("unknown architecture `%s'\n"), str);
7150 return 0;
7151 }
7152
7153 static struct aarch64_long_option_table aarch64_long_opts[] = {
7154 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7155 aarch64_parse_cpu, NULL},
7156 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7157 aarch64_parse_arch, NULL},
7158 {NULL, NULL, 0, NULL}
7159 };
7160
7161 int
7162 md_parse_option (int c, char *arg)
7163 {
7164 struct aarch64_option_table *opt;
7165 struct aarch64_long_option_table *lopt;
7166
7167 switch (c)
7168 {
7169 #ifdef OPTION_EB
7170 case OPTION_EB:
7171 target_big_endian = 1;
7172 break;
7173 #endif
7174
7175 #ifdef OPTION_EL
7176 case OPTION_EL:
7177 target_big_endian = 0;
7178 break;
7179 #endif
7180
7181 case 'a':
7182 /* Listing option. Just ignore these, we don't support additional
7183 ones. */
7184 return 0;
7185
7186 default:
7187 for (opt = aarch64_opts; opt->option != NULL; opt++)
7188 {
7189 if (c == opt->option[0]
7190 && ((arg == NULL && opt->option[1] == 0)
7191 || streq (arg, opt->option + 1)))
7192 {
7193 /* If the option is deprecated, tell the user. */
7194 if (opt->deprecated != NULL)
7195 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7196 arg ? arg : "", _(opt->deprecated));
7197
7198 if (opt->var != NULL)
7199 *opt->var = opt->value;
7200
7201 return 1;
7202 }
7203 }
7204
7205 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7206 {
7207 /* These options are expected to have an argument. */
7208 if (c == lopt->option[0]
7209 && arg != NULL
7210 && strncmp (arg, lopt->option + 1,
7211 strlen (lopt->option + 1)) == 0)
7212 {
7213 /* If the option is deprecated, tell the user. */
7214 if (lopt->deprecated != NULL)
7215 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7216 _(lopt->deprecated));
7217
7218 /* Call the sup-option parser. */
7219 return lopt->func (arg + strlen (lopt->option) - 1);
7220 }
7221 }
7222
7223 return 0;
7224 }
7225
7226 return 1;
7227 }
7228
7229 void
7230 md_show_usage (FILE * fp)
7231 {
7232 struct aarch64_option_table *opt;
7233 struct aarch64_long_option_table *lopt;
7234
7235 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7236
7237 for (opt = aarch64_opts; opt->option != NULL; opt++)
7238 if (opt->help != NULL)
7239 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7240
7241 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7242 if (lopt->help != NULL)
7243 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7244
7245 #ifdef OPTION_EB
7246 fprintf (fp, _("\
7247 -EB assemble code for a big-endian cpu\n"));
7248 #endif
7249
7250 #ifdef OPTION_EL
7251 fprintf (fp, _("\
7252 -EL assemble code for a little-endian cpu\n"));
7253 #endif
7254 }
7255
7256 /* Parse a .cpu directive. */
7257
7258 static void
7259 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7260 {
7261 const struct aarch64_cpu_option_table *opt;
7262 char saved_char;
7263 char *name;
7264 char *ext;
7265 size_t optlen;
7266
7267 name = input_line_pointer;
7268 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7269 input_line_pointer++;
7270 saved_char = *input_line_pointer;
7271 *input_line_pointer = 0;
7272
7273 ext = strchr (name, '+');
7274
7275 if (ext != NULL)
7276 optlen = ext - name;
7277 else
7278 optlen = strlen (name);
7279
7280 /* Skip the first "all" entry. */
7281 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7282 if (strlen (opt->name) == optlen
7283 && strncmp (name, opt->name, optlen) == 0)
7284 {
7285 mcpu_cpu_opt = &opt->value;
7286 if (ext != NULL)
7287 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7288 return;
7289
7290 cpu_variant = *mcpu_cpu_opt;
7291
7292 *input_line_pointer = saved_char;
7293 demand_empty_rest_of_line ();
7294 return;
7295 }
7296 as_bad (_("unknown cpu `%s'"), name);
7297 *input_line_pointer = saved_char;
7298 ignore_rest_of_line ();
7299 }
7300
7301
7302 /* Parse a .arch directive. */
7303
7304 static void
7305 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7306 {
7307 const struct aarch64_arch_option_table *opt;
7308 char saved_char;
7309 char *name;
7310 char *ext;
7311 size_t optlen;
7312
7313 name = input_line_pointer;
7314 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7315 input_line_pointer++;
7316 saved_char = *input_line_pointer;
7317 *input_line_pointer = 0;
7318
7319 ext = strchr (name, '+');
7320
7321 if (ext != NULL)
7322 optlen = ext - name;
7323 else
7324 optlen = strlen (name);
7325
7326 /* Skip the first "all" entry. */
7327 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7328 if (strlen (opt->name) == optlen
7329 && strncmp (name, opt->name, optlen) == 0)
7330 {
7331 mcpu_cpu_opt = &opt->value;
7332 if (ext != NULL)
7333 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7334 return;
7335
7336 cpu_variant = *mcpu_cpu_opt;
7337
7338 *input_line_pointer = saved_char;
7339 demand_empty_rest_of_line ();
7340 return;
7341 }
7342
7343 as_bad (_("unknown architecture `%s'\n"), name);
7344 *input_line_pointer = saved_char;
7345 ignore_rest_of_line ();
7346 }
7347
7348 /* Copy symbol information. */
7349
7350 void
7351 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7352 {
7353 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7354 }
This page took 0.246268 seconds and 4 git commands to generate.