Adding adr_type and prevent adr :got:
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline void
175 set_error_message (const char *error)
176 {
177 inst.parsing_error.error = error;
178 }
179
180 static inline enum aarch64_operand_error_kind
181 get_error_kind (void)
182 {
183 return inst.parsing_error.kind;
184 }
185
186 static inline void
187 set_error_kind (enum aarch64_operand_error_kind kind)
188 {
189 inst.parsing_error.kind = kind;
190 }
191
192 static inline void
193 set_error (enum aarch64_operand_error_kind kind, const char *error)
194 {
195 inst.parsing_error.kind = kind;
196 inst.parsing_error.error = error;
197 }
198
199 static inline void
200 set_recoverable_error (const char *error)
201 {
202 set_error (AARCH64_OPDE_RECOVERABLE, error);
203 }
204
205 /* Use the DESC field of the corresponding aarch64_operand entry to compose
206 the error message. */
207 static inline void
208 set_default_error (void)
209 {
210 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
211 }
212
213 static inline void
214 set_syntax_error (const char *error)
215 {
216 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
217 }
218
219 static inline void
220 set_first_syntax_error (const char *error)
221 {
222 if (! error_p ())
223 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
224 }
225
226 static inline void
227 set_fatal_syntax_error (const char *error)
228 {
229 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
230 }
231 \f
232 /* Number of littlenums required to hold an extended precision number. */
233 #define MAX_LITTLENUMS 6
234
235 /* Return value for certain parsers when the parsing fails; those parsers
236 return the information of the parsed result, e.g. register number, on
237 success. */
238 #define PARSE_FAIL -1
239
240 /* This is an invalid condition code that means no conditional field is
241 present. */
242 #define COND_ALWAYS 0x10
243
244 typedef struct
245 {
246 const char *template;
247 unsigned long value;
248 } asm_barrier_opt;
249
250 typedef struct
251 {
252 const char *template;
253 uint32_t value;
254 } asm_nzcv;
255
256 struct reloc_entry
257 {
258 char *name;
259 bfd_reloc_code_real_type reloc;
260 };
261
262 /* Structure for a hash table entry for a register. */
263 typedef struct
264 {
265 const char *name;
266 unsigned char number;
267 unsigned char type;
268 unsigned char builtin;
269 } reg_entry;
270
271 /* Macros to define the register types and masks for the purpose
272 of parsing. */
273
274 #undef AARCH64_REG_TYPES
275 #define AARCH64_REG_TYPES \
276 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
277 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
278 BASIC_REG_TYPE(SP_32) /* wsp */ \
279 BASIC_REG_TYPE(SP_64) /* sp */ \
280 BASIC_REG_TYPE(Z_32) /* wzr */ \
281 BASIC_REG_TYPE(Z_64) /* xzr */ \
282 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
283 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
284 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
285 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
286 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
287 BASIC_REG_TYPE(CN) /* c[0-7] */ \
288 BASIC_REG_TYPE(VN) /* v[0-31] */ \
289 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
290 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
291 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
292 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Typecheck: any [BHSDQ]P FP. */ \
296 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
297 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
298 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
299 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
300 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
301 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
302 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
303 /* Any integer register; used for error messages only. */ \
304 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
306 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
307 /* Pseudo type to mark the end of the enumerator sequence. */ \
308 BASIC_REG_TYPE(MAX)
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
312 #undef MULTI_REG_TYPE
313 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
314
315 /* Register type enumerators. */
316 typedef enum
317 {
318 /* A list of REG_TYPE_*. */
319 AARCH64_REG_TYPES
320 } aarch64_reg_type;
321
322 #undef BASIC_REG_TYPE
323 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
324 #undef REG_TYPE
325 #define REG_TYPE(T) (1 << REG_TYPE_##T)
326 #undef MULTI_REG_TYPE
327 #define MULTI_REG_TYPE(T,V) V,
328
329 /* Values indexed by aarch64_reg_type to assist the type checking. */
330 static const unsigned reg_type_masks[] =
331 {
332 AARCH64_REG_TYPES
333 };
334
335 #undef BASIC_REG_TYPE
336 #undef REG_TYPE
337 #undef MULTI_REG_TYPE
338 #undef AARCH64_REG_TYPES
339
340 /* Diagnostics used when we don't get a register of the expected type.
341 Note: this has to synchronized with aarch64_reg_type definitions
342 above. */
343 static const char *
344 get_reg_expected_msg (aarch64_reg_type reg_type)
345 {
346 const char *msg;
347
348 switch (reg_type)
349 {
350 case REG_TYPE_R_32:
351 msg = N_("integer 32-bit register expected");
352 break;
353 case REG_TYPE_R_64:
354 msg = N_("integer 64-bit register expected");
355 break;
356 case REG_TYPE_R_N:
357 msg = N_("integer register expected");
358 break;
359 case REG_TYPE_R_Z_SP:
360 msg = N_("integer, zero or SP register expected");
361 break;
362 case REG_TYPE_FP_B:
363 msg = N_("8-bit SIMD scalar register expected");
364 break;
365 case REG_TYPE_FP_H:
366 msg = N_("16-bit SIMD scalar or floating-point half precision "
367 "register expected");
368 break;
369 case REG_TYPE_FP_S:
370 msg = N_("32-bit SIMD scalar or floating-point single precision "
371 "register expected");
372 break;
373 case REG_TYPE_FP_D:
374 msg = N_("64-bit SIMD scalar or floating-point double precision "
375 "register expected");
376 break;
377 case REG_TYPE_FP_Q:
378 msg = N_("128-bit SIMD scalar or floating-point quad precision "
379 "register expected");
380 break;
381 case REG_TYPE_CN:
382 msg = N_("C0 - C15 expected");
383 break;
384 case REG_TYPE_R_Z_BHSDQ_V:
385 msg = N_("register expected");
386 break;
387 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
388 msg = N_("SIMD scalar or floating-point register expected");
389 break;
390 case REG_TYPE_VN: /* any V reg */
391 msg = N_("vector register expected");
392 break;
393 default:
394 as_fatal (_("invalid register type %d"), reg_type);
395 }
396 return msg;
397 }
398
399 /* Some well known registers that we refer to directly elsewhere. */
400 #define REG_SP 31
401
402 /* Instructions take 4 bytes in the object file. */
403 #define INSN_SIZE 4
404
405 /* Define some common error messages. */
406 #define BAD_SP _("SP not allowed here")
407
408 static struct hash_control *aarch64_ops_hsh;
409 static struct hash_control *aarch64_cond_hsh;
410 static struct hash_control *aarch64_shift_hsh;
411 static struct hash_control *aarch64_sys_regs_hsh;
412 static struct hash_control *aarch64_pstatefield_hsh;
413 static struct hash_control *aarch64_sys_regs_ic_hsh;
414 static struct hash_control *aarch64_sys_regs_dc_hsh;
415 static struct hash_control *aarch64_sys_regs_at_hsh;
416 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
417 static struct hash_control *aarch64_reg_hsh;
418 static struct hash_control *aarch64_barrier_opt_hsh;
419 static struct hash_control *aarch64_nzcv_hsh;
420 static struct hash_control *aarch64_pldop_hsh;
421
422 /* Stuff needed to resolve the label ambiguity
423 As:
424 ...
425 label: <insn>
426 may differ from:
427 ...
428 label:
429 <insn> */
430
431 static symbolS *last_label_seen;
432
433 /* Literal pool structure. Held on a per-section
434 and per-sub-section basis. */
435
436 #define MAX_LITERAL_POOL_SIZE 1024
437 typedef struct literal_expression
438 {
439 expressionS exp;
440 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
441 LITTLENUM_TYPE * bignum;
442 } literal_expression;
443
444 typedef struct literal_pool
445 {
446 literal_expression literals[MAX_LITERAL_POOL_SIZE];
447 unsigned int next_free_entry;
448 unsigned int id;
449 symbolS *symbol;
450 segT section;
451 subsegT sub_section;
452 int size;
453 struct literal_pool *next;
454 } literal_pool;
455
456 /* Pointer to a linked list of literal pools. */
457 static literal_pool *list_of_pools = NULL;
458 \f
459 /* Pure syntax. */
460
461 /* This array holds the chars that always start a comment. If the
462 pre-processor is disabled, these aren't very useful. */
463 const char comment_chars[] = "";
464
465 /* This array holds the chars that only start a comment at the beginning of
466 a line. If the line seems to have the form '# 123 filename'
467 .line and .file directives will appear in the pre-processed output. */
468 /* Note that input_file.c hand checks for '#' at the beginning of the
469 first line of the input file. This is because the compiler outputs
470 #NO_APP at the beginning of its output. */
471 /* Also note that comments like this one will always work. */
472 const char line_comment_chars[] = "#";
473
474 const char line_separator_chars[] = ";";
475
476 /* Chars that can be used to separate mant
477 from exp in floating point numbers. */
478 const char EXP_CHARS[] = "eE";
479
480 /* Chars that mean this number is a floating point constant. */
481 /* As in 0f12.456 */
482 /* or 0d1.2345e12 */
483
484 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
485
486 /* Prefix character that indicates the start of an immediate value. */
487 #define is_immediate_prefix(C) ((C) == '#')
488
489 /* Separator character handling. */
490
491 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
492
493 static inline bfd_boolean
494 skip_past_char (char **str, char c)
495 {
496 if (**str == c)
497 {
498 (*str)++;
499 return TRUE;
500 }
501 else
502 return FALSE;
503 }
504
505 #define skip_past_comma(str) skip_past_char (str, ',')
506
507 /* Arithmetic expressions (possibly involving symbols). */
508
509 static bfd_boolean in_my_get_expression_p = FALSE;
510
511 /* Third argument to my_get_expression. */
512 #define GE_NO_PREFIX 0
513 #define GE_OPT_PREFIX 1
514
515 /* Return TRUE if the string pointed by *STR is successfully parsed
516 as an valid expression; *EP will be filled with the information of
517 such an expression. Otherwise return FALSE. */
518
519 static bfd_boolean
520 my_get_expression (expressionS * ep, char **str, int prefix_mode,
521 int reject_absent)
522 {
523 char *save_in;
524 segT seg;
525 int prefix_present_p = 0;
526
527 switch (prefix_mode)
528 {
529 case GE_NO_PREFIX:
530 break;
531 case GE_OPT_PREFIX:
532 if (is_immediate_prefix (**str))
533 {
534 (*str)++;
535 prefix_present_p = 1;
536 }
537 break;
538 default:
539 abort ();
540 }
541
542 memset (ep, 0, sizeof (expressionS));
543
544 save_in = input_line_pointer;
545 input_line_pointer = *str;
546 in_my_get_expression_p = TRUE;
547 seg = expression (ep);
548 in_my_get_expression_p = FALSE;
549
550 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
551 {
552 /* We found a bad expression in md_operand(). */
553 *str = input_line_pointer;
554 input_line_pointer = save_in;
555 if (prefix_present_p && ! error_p ())
556 set_fatal_syntax_error (_("bad expression"));
557 else
558 set_first_syntax_error (_("bad expression"));
559 return FALSE;
560 }
561
562 #ifdef OBJ_AOUT
563 if (seg != absolute_section
564 && seg != text_section
565 && seg != data_section
566 && seg != bss_section && seg != undefined_section)
567 {
568 set_syntax_error (_("bad segment"));
569 *str = input_line_pointer;
570 input_line_pointer = save_in;
571 return FALSE;
572 }
573 #else
574 (void) seg;
575 #endif
576
577 *str = input_line_pointer;
578 input_line_pointer = save_in;
579 return TRUE;
580 }
581
582 /* Turn a string in input_line_pointer into a floating point constant
583 of type TYPE, and store the appropriate bytes in *LITP. The number
584 of LITTLENUMS emitted is stored in *SIZEP. An error message is
585 returned, or NULL on OK. */
586
587 char *
588 md_atof (int type, char *litP, int *sizeP)
589 {
590 return ieee_md_atof (type, litP, sizeP, target_big_endian);
591 }
592
593 /* We handle all bad expressions here, so that we can report the faulty
594 instruction in the error message. */
595 void
596 md_operand (expressionS * exp)
597 {
598 if (in_my_get_expression_p)
599 exp->X_op = O_illegal;
600 }
601
602 /* Immediate values. */
603
604 /* Errors may be set multiple times during parsing or bit encoding
605 (particularly in the Neon bits), but usually the earliest error which is set
606 will be the most meaningful. Avoid overwriting it with later (cascading)
607 errors by calling this function. */
608
609 static void
610 first_error (const char *error)
611 {
612 if (! error_p ())
613 set_syntax_error (error);
614 }
615
616 /* Similiar to first_error, but this function accepts formatted error
617 message. */
618 static void
619 first_error_fmt (const char *format, ...)
620 {
621 va_list args;
622 enum
623 { size = 100 };
624 /* N.B. this single buffer will not cause error messages for different
625 instructions to pollute each other; this is because at the end of
626 processing of each assembly line, error message if any will be
627 collected by as_bad. */
628 static char buffer[size];
629
630 if (! error_p ())
631 {
632 int ret ATTRIBUTE_UNUSED;
633 va_start (args, format);
634 ret = vsnprintf (buffer, size, format, args);
635 know (ret <= size - 1 && ret >= 0);
636 va_end (args);
637 set_syntax_error (buffer);
638 }
639 }
640
641 /* Register parsing. */
642
643 /* Generic register parser which is called by other specialized
644 register parsers.
645 CCP points to what should be the beginning of a register name.
646 If it is indeed a valid register name, advance CCP over it and
647 return the reg_entry structure; otherwise return NULL.
648 It does not issue diagnostics. */
649
650 static reg_entry *
651 parse_reg (char **ccp)
652 {
653 char *start = *ccp;
654 char *p;
655 reg_entry *reg;
656
657 #ifdef REGISTER_PREFIX
658 if (*start != REGISTER_PREFIX)
659 return NULL;
660 start++;
661 #endif
662
663 p = start;
664 if (!ISALPHA (*p) || !is_name_beginner (*p))
665 return NULL;
666
667 do
668 p++;
669 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
670
671 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
672
673 if (!reg)
674 return NULL;
675
676 *ccp = p;
677 return reg;
678 }
679
680 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
681 return FALSE. */
682 static bfd_boolean
683 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
684 {
685 if (reg->type == type)
686 return TRUE;
687
688 switch (type)
689 {
690 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
691 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
692 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
693 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
694 case REG_TYPE_VN: /* Vector register. */
695 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
696 return ((reg_type_masks[reg->type] & reg_type_masks[type])
697 == reg_type_masks[reg->type]);
698 default:
699 as_fatal ("unhandled type %d", type);
700 abort ();
701 }
702 }
703
704 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
705 Return the register number otherwise. *ISREG32 is set to one if the
706 register is 32-bit wide; *ISREGZERO is set to one if the register is
707 of type Z_32 or Z_64.
708 Note that this function does not issue any diagnostics. */
709
710 static int
711 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
712 int *isreg32, int *isregzero)
713 {
714 char *str = *ccp;
715 const reg_entry *reg = parse_reg (&str);
716
717 if (reg == NULL)
718 return PARSE_FAIL;
719
720 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
721 return PARSE_FAIL;
722
723 switch (reg->type)
724 {
725 case REG_TYPE_SP_32:
726 case REG_TYPE_SP_64:
727 if (reject_sp)
728 return PARSE_FAIL;
729 *isreg32 = reg->type == REG_TYPE_SP_32;
730 *isregzero = 0;
731 break;
732 case REG_TYPE_R_32:
733 case REG_TYPE_R_64:
734 *isreg32 = reg->type == REG_TYPE_R_32;
735 *isregzero = 0;
736 break;
737 case REG_TYPE_Z_32:
738 case REG_TYPE_Z_64:
739 if (reject_rz)
740 return PARSE_FAIL;
741 *isreg32 = reg->type == REG_TYPE_Z_32;
742 *isregzero = 1;
743 break;
744 default:
745 return PARSE_FAIL;
746 }
747
748 *ccp = str;
749
750 return reg->number;
751 }
752
753 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
754 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
755 otherwise return FALSE.
756
757 Accept only one occurrence of:
758 8b 16b 4h 8h 2s 4s 1d 2d
759 b h s d q */
760 static bfd_boolean
761 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
762 {
763 char *ptr = *str;
764 unsigned width;
765 unsigned element_size;
766 enum neon_el_type type;
767
768 /* skip '.' */
769 ptr++;
770
771 if (!ISDIGIT (*ptr))
772 {
773 width = 0;
774 goto elt_size;
775 }
776 width = strtoul (ptr, &ptr, 10);
777 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
778 {
779 first_error_fmt (_("bad size %d in vector width specifier"), width);
780 return FALSE;
781 }
782
783 elt_size:
784 switch (TOLOWER (*ptr))
785 {
786 case 'b':
787 type = NT_b;
788 element_size = 8;
789 break;
790 case 'h':
791 type = NT_h;
792 element_size = 16;
793 break;
794 case 's':
795 type = NT_s;
796 element_size = 32;
797 break;
798 case 'd':
799 type = NT_d;
800 element_size = 64;
801 break;
802 case 'q':
803 if (width == 1)
804 {
805 type = NT_q;
806 element_size = 128;
807 break;
808 }
809 /* fall through. */
810 default:
811 if (*ptr != '\0')
812 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
813 else
814 first_error (_("missing element size"));
815 return FALSE;
816 }
817 if (width != 0 && width * element_size != 64 && width * element_size != 128)
818 {
819 first_error_fmt (_
820 ("invalid element size %d and vector size combination %c"),
821 width, *ptr);
822 return FALSE;
823 }
824 ptr++;
825
826 parsed_type->type = type;
827 parsed_type->width = width;
828
829 *str = ptr;
830
831 return TRUE;
832 }
833
834 /* Parse a single type, e.g. ".8b", leading period included.
835 Only applicable to Vn registers.
836
837 Return TRUE on success; otherwise return FALSE. */
838 static bfd_boolean
839 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
840 {
841 char *str = *ccp;
842
843 if (*str == '.')
844 {
845 if (! parse_neon_type_for_operand (vectype, &str))
846 {
847 first_error (_("vector type expected"));
848 return FALSE;
849 }
850 }
851 else
852 return FALSE;
853
854 *ccp = str;
855
856 return TRUE;
857 }
858
859 /* Parse a register of the type TYPE.
860
861 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
862 name or the parsed register is not of TYPE.
863
864 Otherwise return the register number, and optionally fill in the actual
865 type of the register in *RTYPE when multiple alternatives were given, and
866 return the register shape and element index information in *TYPEINFO.
867
868 IN_REG_LIST should be set with TRUE if the caller is parsing a register
869 list. */
870
871 static int
872 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
873 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
874 {
875 char *str = *ccp;
876 const reg_entry *reg = parse_reg (&str);
877 struct neon_type_el atype;
878 struct neon_type_el parsetype;
879 bfd_boolean is_typed_vecreg = FALSE;
880
881 atype.defined = 0;
882 atype.type = NT_invtype;
883 atype.width = -1;
884 atype.index = 0;
885
886 if (reg == NULL)
887 {
888 if (typeinfo)
889 *typeinfo = atype;
890 set_default_error ();
891 return PARSE_FAIL;
892 }
893
894 if (! aarch64_check_reg_type (reg, type))
895 {
896 DEBUG_TRACE ("reg type check failed");
897 set_default_error ();
898 return PARSE_FAIL;
899 }
900 type = reg->type;
901
902 if (type == REG_TYPE_VN
903 && parse_neon_operand_type (&parsetype, &str))
904 {
905 /* Register if of the form Vn.[bhsdq]. */
906 is_typed_vecreg = TRUE;
907
908 if (parsetype.width == 0)
909 /* Expect index. In the new scheme we cannot have
910 Vn.[bhsdq] represent a scalar. Therefore any
911 Vn.[bhsdq] should have an index following it.
912 Except in reglists ofcourse. */
913 atype.defined |= NTA_HASINDEX;
914 else
915 atype.defined |= NTA_HASTYPE;
916
917 atype.type = parsetype.type;
918 atype.width = parsetype.width;
919 }
920
921 if (skip_past_char (&str, '['))
922 {
923 expressionS exp;
924
925 /* Reject Sn[index] syntax. */
926 if (!is_typed_vecreg)
927 {
928 first_error (_("this type of register can't be indexed"));
929 return PARSE_FAIL;
930 }
931
932 if (in_reg_list == TRUE)
933 {
934 first_error (_("index not allowed inside register list"));
935 return PARSE_FAIL;
936 }
937
938 atype.defined |= NTA_HASINDEX;
939
940 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
941
942 if (exp.X_op != O_constant)
943 {
944 first_error (_("constant expression required"));
945 return PARSE_FAIL;
946 }
947
948 if (! skip_past_char (&str, ']'))
949 return PARSE_FAIL;
950
951 atype.index = exp.X_add_number;
952 }
953 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
954 {
955 /* Indexed vector register expected. */
956 first_error (_("indexed vector register expected"));
957 return PARSE_FAIL;
958 }
959
960 /* A vector reg Vn should be typed or indexed. */
961 if (type == REG_TYPE_VN && atype.defined == 0)
962 {
963 first_error (_("invalid use of vector register"));
964 }
965
966 if (typeinfo)
967 *typeinfo = atype;
968
969 if (rtype)
970 *rtype = type;
971
972 *ccp = str;
973
974 return reg->number;
975 }
976
977 /* Parse register.
978
979 Return the register number on success; return PARSE_FAIL otherwise.
980
981 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
982 the register (e.g. NEON double or quad reg when either has been requested).
983
984 If this is a NEON vector register with additional type information, fill
985 in the struct pointed to by VECTYPE (if non-NULL).
986
987 This parser does not handle register list. */
988
989 static int
990 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
991 aarch64_reg_type *rtype, struct neon_type_el *vectype)
992 {
993 struct neon_type_el atype;
994 char *str = *ccp;
995 int reg = parse_typed_reg (&str, type, rtype, &atype,
996 /*in_reg_list= */ FALSE);
997
998 if (reg == PARSE_FAIL)
999 return PARSE_FAIL;
1000
1001 if (vectype)
1002 *vectype = atype;
1003
1004 *ccp = str;
1005
1006 return reg;
1007 }
1008
1009 static inline bfd_boolean
1010 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
1011 {
1012 return
1013 e1.type == e2.type
1014 && e1.defined == e2.defined
1015 && e1.width == e2.width && e1.index == e2.index;
1016 }
1017
1018 /* This function parses the NEON register list. On success, it returns
1019 the parsed register list information in the following encoded format:
1020
1021 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1022 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1023
1024 The information of the register shape and/or index is returned in
1025 *VECTYPE.
1026
1027 It returns PARSE_FAIL if the register list is invalid.
1028
1029 The list contains one to four registers.
1030 Each register can be one of:
1031 <Vt>.<T>[<index>]
1032 <Vt>.<T>
1033 All <T> should be identical.
1034 All <index> should be identical.
1035 There are restrictions on <Vt> numbers which are checked later
1036 (by reg_list_valid_p). */
1037
1038 static int
1039 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1040 {
1041 char *str = *ccp;
1042 int nb_regs;
1043 struct neon_type_el typeinfo, typeinfo_first;
1044 int val, val_range;
1045 int in_range;
1046 int ret_val;
1047 int i;
1048 bfd_boolean error = FALSE;
1049 bfd_boolean expect_index = FALSE;
1050
1051 if (*str != '{')
1052 {
1053 set_syntax_error (_("expecting {"));
1054 return PARSE_FAIL;
1055 }
1056 str++;
1057
1058 nb_regs = 0;
1059 typeinfo_first.defined = 0;
1060 typeinfo_first.type = NT_invtype;
1061 typeinfo_first.width = -1;
1062 typeinfo_first.index = 0;
1063 ret_val = 0;
1064 val = -1;
1065 val_range = -1;
1066 in_range = 0;
1067 do
1068 {
1069 if (in_range)
1070 {
1071 str++; /* skip over '-' */
1072 val_range = val;
1073 }
1074 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1075 /*in_reg_list= */ TRUE);
1076 if (val == PARSE_FAIL)
1077 {
1078 set_first_syntax_error (_("invalid vector register in list"));
1079 error = TRUE;
1080 continue;
1081 }
1082 /* reject [bhsd]n */
1083 if (typeinfo.defined == 0)
1084 {
1085 set_first_syntax_error (_("invalid scalar register in list"));
1086 error = TRUE;
1087 continue;
1088 }
1089
1090 if (typeinfo.defined & NTA_HASINDEX)
1091 expect_index = TRUE;
1092
1093 if (in_range)
1094 {
1095 if (val < val_range)
1096 {
1097 set_first_syntax_error
1098 (_("invalid range in vector register list"));
1099 error = TRUE;
1100 }
1101 val_range++;
1102 }
1103 else
1104 {
1105 val_range = val;
1106 if (nb_regs == 0)
1107 typeinfo_first = typeinfo;
1108 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1109 {
1110 set_first_syntax_error
1111 (_("type mismatch in vector register list"));
1112 error = TRUE;
1113 }
1114 }
1115 if (! error)
1116 for (i = val_range; i <= val; i++)
1117 {
1118 ret_val |= i << (5 * nb_regs);
1119 nb_regs++;
1120 }
1121 in_range = 0;
1122 }
1123 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1124
1125 skip_whitespace (str);
1126 if (*str != '}')
1127 {
1128 set_first_syntax_error (_("end of vector register list not found"));
1129 error = TRUE;
1130 }
1131 str++;
1132
1133 skip_whitespace (str);
1134
1135 if (expect_index)
1136 {
1137 if (skip_past_char (&str, '['))
1138 {
1139 expressionS exp;
1140
1141 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1142 if (exp.X_op != O_constant)
1143 {
1144 set_first_syntax_error (_("constant expression required."));
1145 error = TRUE;
1146 }
1147 if (! skip_past_char (&str, ']'))
1148 error = TRUE;
1149 else
1150 typeinfo_first.index = exp.X_add_number;
1151 }
1152 else
1153 {
1154 set_first_syntax_error (_("expected index"));
1155 error = TRUE;
1156 }
1157 }
1158
1159 if (nb_regs > 4)
1160 {
1161 set_first_syntax_error (_("too many registers in vector register list"));
1162 error = TRUE;
1163 }
1164 else if (nb_regs == 0)
1165 {
1166 set_first_syntax_error (_("empty vector register list"));
1167 error = TRUE;
1168 }
1169
1170 *ccp = str;
1171 if (! error)
1172 *vectype = typeinfo_first;
1173
1174 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1175 }
1176
1177 /* Directives: register aliases. */
1178
1179 static reg_entry *
1180 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1181 {
1182 reg_entry *new;
1183 const char *name;
1184
1185 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1186 {
1187 if (new->builtin)
1188 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1189 str);
1190
1191 /* Only warn about a redefinition if it's not defined as the
1192 same register. */
1193 else if (new->number != number || new->type != type)
1194 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1195
1196 return NULL;
1197 }
1198
1199 name = xstrdup (str);
1200 new = xmalloc (sizeof (reg_entry));
1201
1202 new->name = name;
1203 new->number = number;
1204 new->type = type;
1205 new->builtin = FALSE;
1206
1207 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1208 abort ();
1209
1210 return new;
1211 }
1212
1213 /* Look for the .req directive. This is of the form:
1214
1215 new_register_name .req existing_register_name
1216
1217 If we find one, or if it looks sufficiently like one that we want to
1218 handle any error here, return TRUE. Otherwise return FALSE. */
1219
1220 static bfd_boolean
1221 create_register_alias (char *newname, char *p)
1222 {
1223 const reg_entry *old;
1224 char *oldname, *nbuf;
1225 size_t nlen;
1226
1227 /* The input scrubber ensures that whitespace after the mnemonic is
1228 collapsed to single spaces. */
1229 oldname = p;
1230 if (strncmp (oldname, " .req ", 6) != 0)
1231 return FALSE;
1232
1233 oldname += 6;
1234 if (*oldname == '\0')
1235 return FALSE;
1236
1237 old = hash_find (aarch64_reg_hsh, oldname);
1238 if (!old)
1239 {
1240 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1241 return TRUE;
1242 }
1243
1244 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1245 the desired alias name, and p points to its end. If not, then
1246 the desired alias name is in the global original_case_string. */
1247 #ifdef TC_CASE_SENSITIVE
1248 nlen = p - newname;
1249 #else
1250 newname = original_case_string;
1251 nlen = strlen (newname);
1252 #endif
1253
1254 nbuf = alloca (nlen + 1);
1255 memcpy (nbuf, newname, nlen);
1256 nbuf[nlen] = '\0';
1257
1258 /* Create aliases under the new name as stated; an all-lowercase
1259 version of the new name; and an all-uppercase version of the new
1260 name. */
1261 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1262 {
1263 for (p = nbuf; *p; p++)
1264 *p = TOUPPER (*p);
1265
1266 if (strncmp (nbuf, newname, nlen))
1267 {
1268 /* If this attempt to create an additional alias fails, do not bother
1269 trying to create the all-lower case alias. We will fail and issue
1270 a second, duplicate error message. This situation arises when the
1271 programmer does something like:
1272 foo .req r0
1273 Foo .req r1
1274 The second .req creates the "Foo" alias but then fails to create
1275 the artificial FOO alias because it has already been created by the
1276 first .req. */
1277 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1278 return TRUE;
1279 }
1280
1281 for (p = nbuf; *p; p++)
1282 *p = TOLOWER (*p);
1283
1284 if (strncmp (nbuf, newname, nlen))
1285 insert_reg_alias (nbuf, old->number, old->type);
1286 }
1287
1288 return TRUE;
1289 }
1290
1291 /* Should never be called, as .req goes between the alias and the
1292 register name, not at the beginning of the line. */
1293 static void
1294 s_req (int a ATTRIBUTE_UNUSED)
1295 {
1296 as_bad (_("invalid syntax for .req directive"));
1297 }
1298
1299 /* The .unreq directive deletes an alias which was previously defined
1300 by .req. For example:
1301
1302 my_alias .req r11
1303 .unreq my_alias */
1304
1305 static void
1306 s_unreq (int a ATTRIBUTE_UNUSED)
1307 {
1308 char *name;
1309 char saved_char;
1310
1311 name = input_line_pointer;
1312
1313 while (*input_line_pointer != 0
1314 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1315 ++input_line_pointer;
1316
1317 saved_char = *input_line_pointer;
1318 *input_line_pointer = 0;
1319
1320 if (!*name)
1321 as_bad (_("invalid syntax for .unreq directive"));
1322 else
1323 {
1324 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1325
1326 if (!reg)
1327 as_bad (_("unknown register alias '%s'"), name);
1328 else if (reg->builtin)
1329 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1330 name);
1331 else
1332 {
1333 char *p;
1334 char *nbuf;
1335
1336 hash_delete (aarch64_reg_hsh, name, FALSE);
1337 free ((char *) reg->name);
1338 free (reg);
1339
1340 /* Also locate the all upper case and all lower case versions.
1341 Do not complain if we cannot find one or the other as it
1342 was probably deleted above. */
1343
1344 nbuf = strdup (name);
1345 for (p = nbuf; *p; p++)
1346 *p = TOUPPER (*p);
1347 reg = hash_find (aarch64_reg_hsh, nbuf);
1348 if (reg)
1349 {
1350 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1351 free ((char *) reg->name);
1352 free (reg);
1353 }
1354
1355 for (p = nbuf; *p; p++)
1356 *p = TOLOWER (*p);
1357 reg = hash_find (aarch64_reg_hsh, nbuf);
1358 if (reg)
1359 {
1360 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1361 free ((char *) reg->name);
1362 free (reg);
1363 }
1364
1365 free (nbuf);
1366 }
1367 }
1368
1369 *input_line_pointer = saved_char;
1370 demand_empty_rest_of_line ();
1371 }
1372
1373 /* Directives: Instruction set selection. */
1374
1375 #ifdef OBJ_ELF
1376 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1377 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1378 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1379 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1380
1381 /* Create a new mapping symbol for the transition to STATE. */
1382
1383 static void
1384 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1385 {
1386 symbolS *symbolP;
1387 const char *symname;
1388 int type;
1389
1390 switch (state)
1391 {
1392 case MAP_DATA:
1393 symname = "$d";
1394 type = BSF_NO_FLAGS;
1395 break;
1396 case MAP_INSN:
1397 symname = "$x";
1398 type = BSF_NO_FLAGS;
1399 break;
1400 default:
1401 abort ();
1402 }
1403
1404 symbolP = symbol_new (symname, now_seg, value, frag);
1405 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1406
1407 /* Save the mapping symbols for future reference. Also check that
1408 we do not place two mapping symbols at the same offset within a
1409 frag. We'll handle overlap between frags in
1410 check_mapping_symbols.
1411
1412 If .fill or other data filling directive generates zero sized data,
1413 the mapping symbol for the following code will have the same value
1414 as the one generated for the data filling directive. In this case,
1415 we replace the old symbol with the new one at the same address. */
1416 if (value == 0)
1417 {
1418 if (frag->tc_frag_data.first_map != NULL)
1419 {
1420 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1421 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1422 &symbol_lastP);
1423 }
1424 frag->tc_frag_data.first_map = symbolP;
1425 }
1426 if (frag->tc_frag_data.last_map != NULL)
1427 {
1428 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1429 S_GET_VALUE (symbolP));
1430 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1431 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1432 &symbol_lastP);
1433 }
1434 frag->tc_frag_data.last_map = symbolP;
1435 }
1436
1437 /* We must sometimes convert a region marked as code to data during
1438 code alignment, if an odd number of bytes have to be padded. The
1439 code mapping symbol is pushed to an aligned address. */
1440
1441 static void
1442 insert_data_mapping_symbol (enum mstate state,
1443 valueT value, fragS * frag, offsetT bytes)
1444 {
1445 /* If there was already a mapping symbol, remove it. */
1446 if (frag->tc_frag_data.last_map != NULL
1447 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1448 frag->fr_address + value)
1449 {
1450 symbolS *symp = frag->tc_frag_data.last_map;
1451
1452 if (value == 0)
1453 {
1454 know (frag->tc_frag_data.first_map == symp);
1455 frag->tc_frag_data.first_map = NULL;
1456 }
1457 frag->tc_frag_data.last_map = NULL;
1458 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1459 }
1460
1461 make_mapping_symbol (MAP_DATA, value, frag);
1462 make_mapping_symbol (state, value + bytes, frag);
1463 }
1464
1465 static void mapping_state_2 (enum mstate state, int max_chars);
1466
1467 /* Set the mapping state to STATE. Only call this when about to
1468 emit some STATE bytes to the file. */
1469
1470 void
1471 mapping_state (enum mstate state)
1472 {
1473 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1474
1475 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1476
1477 if (mapstate == state)
1478 /* The mapping symbol has already been emitted.
1479 There is nothing else to do. */
1480 return;
1481 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1482 /* This case will be evaluated later in the next else. */
1483 return;
1484 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1485 {
1486 /* Only add the symbol if the offset is > 0:
1487 if we're at the first frag, check it's size > 0;
1488 if we're not at the first frag, then for sure
1489 the offset is > 0. */
1490 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1491 const int add_symbol = (frag_now != frag_first)
1492 || (frag_now_fix () > 0);
1493
1494 if (add_symbol)
1495 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1496 }
1497
1498 mapping_state_2 (state, 0);
1499 #undef TRANSITION
1500 }
1501
1502 /* Same as mapping_state, but MAX_CHARS bytes have already been
1503 allocated. Put the mapping symbol that far back. */
1504
1505 static void
1506 mapping_state_2 (enum mstate state, int max_chars)
1507 {
1508 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1509
1510 if (!SEG_NORMAL (now_seg))
1511 return;
1512
1513 if (mapstate == state)
1514 /* The mapping symbol has already been emitted.
1515 There is nothing else to do. */
1516 return;
1517
1518 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1519 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1520 }
1521 #else
1522 #define mapping_state(x) /* nothing */
1523 #define mapping_state_2(x, y) /* nothing */
1524 #endif
1525
1526 /* Directives: sectioning and alignment. */
1527
1528 static void
1529 s_bss (int ignore ATTRIBUTE_UNUSED)
1530 {
1531 /* We don't support putting frags in the BSS segment, we fake it by
1532 marking in_bss, then looking at s_skip for clues. */
1533 subseg_set (bss_section, 0);
1534 demand_empty_rest_of_line ();
1535 mapping_state (MAP_DATA);
1536 }
1537
1538 static void
1539 s_even (int ignore ATTRIBUTE_UNUSED)
1540 {
1541 /* Never make frag if expect extra pass. */
1542 if (!need_pass_2)
1543 frag_align (1, 0, 0);
1544
1545 record_alignment (now_seg, 1);
1546
1547 demand_empty_rest_of_line ();
1548 }
1549
1550 /* Directives: Literal pools. */
1551
1552 static literal_pool *
1553 find_literal_pool (int size)
1554 {
1555 literal_pool *pool;
1556
1557 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1558 {
1559 if (pool->section == now_seg
1560 && pool->sub_section == now_subseg && pool->size == size)
1561 break;
1562 }
1563
1564 return pool;
1565 }
1566
1567 static literal_pool *
1568 find_or_make_literal_pool (int size)
1569 {
1570 /* Next literal pool ID number. */
1571 static unsigned int latest_pool_num = 1;
1572 literal_pool *pool;
1573
1574 pool = find_literal_pool (size);
1575
1576 if (pool == NULL)
1577 {
1578 /* Create a new pool. */
1579 pool = xmalloc (sizeof (*pool));
1580 if (!pool)
1581 return NULL;
1582
1583 /* Currently we always put the literal pool in the current text
1584 section. If we were generating "small" model code where we
1585 knew that all code and initialised data was within 1MB then
1586 we could output literals to mergeable, read-only data
1587 sections. */
1588
1589 pool->next_free_entry = 0;
1590 pool->section = now_seg;
1591 pool->sub_section = now_subseg;
1592 pool->size = size;
1593 pool->next = list_of_pools;
1594 pool->symbol = NULL;
1595
1596 /* Add it to the list. */
1597 list_of_pools = pool;
1598 }
1599
1600 /* New pools, and emptied pools, will have a NULL symbol. */
1601 if (pool->symbol == NULL)
1602 {
1603 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1604 (valueT) 0, &zero_address_frag);
1605 pool->id = latest_pool_num++;
1606 }
1607
1608 /* Done. */
1609 return pool;
1610 }
1611
1612 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1613 Return TRUE on success, otherwise return FALSE. */
1614 static bfd_boolean
1615 add_to_lit_pool (expressionS *exp, int size)
1616 {
1617 literal_pool *pool;
1618 unsigned int entry;
1619
1620 pool = find_or_make_literal_pool (size);
1621
1622 /* Check if this literal value is already in the pool. */
1623 for (entry = 0; entry < pool->next_free_entry; entry++)
1624 {
1625 expressionS * litexp = & pool->literals[entry].exp;
1626
1627 if ((litexp->X_op == exp->X_op)
1628 && (exp->X_op == O_constant)
1629 && (litexp->X_add_number == exp->X_add_number)
1630 && (litexp->X_unsigned == exp->X_unsigned))
1631 break;
1632
1633 if ((litexp->X_op == exp->X_op)
1634 && (exp->X_op == O_symbol)
1635 && (litexp->X_add_number == exp->X_add_number)
1636 && (litexp->X_add_symbol == exp->X_add_symbol)
1637 && (litexp->X_op_symbol == exp->X_op_symbol))
1638 break;
1639 }
1640
1641 /* Do we need to create a new entry? */
1642 if (entry == pool->next_free_entry)
1643 {
1644 if (entry >= MAX_LITERAL_POOL_SIZE)
1645 {
1646 set_syntax_error (_("literal pool overflow"));
1647 return FALSE;
1648 }
1649
1650 pool->literals[entry].exp = *exp;
1651 pool->next_free_entry += 1;
1652 if (exp->X_op == O_big)
1653 {
1654 /* PR 16688: Bignums are held in a single global array. We must
1655 copy and preserve that value now, before it is overwritten. */
1656 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1657 memcpy (pool->literals[entry].bignum, generic_bignum,
1658 CHARS_PER_LITTLENUM * exp->X_add_number);
1659 }
1660 else
1661 pool->literals[entry].bignum = NULL;
1662 }
1663
1664 exp->X_op = O_symbol;
1665 exp->X_add_number = ((int) entry) * size;
1666 exp->X_add_symbol = pool->symbol;
1667
1668 return TRUE;
1669 }
1670
1671 /* Can't use symbol_new here, so have to create a symbol and then at
1672 a later date assign it a value. Thats what these functions do. */
1673
1674 static void
1675 symbol_locate (symbolS * symbolP,
1676 const char *name,/* It is copied, the caller can modify. */
1677 segT segment, /* Segment identifier (SEG_<something>). */
1678 valueT valu, /* Symbol value. */
1679 fragS * frag) /* Associated fragment. */
1680 {
1681 size_t name_length;
1682 char *preserved_copy_of_name;
1683
1684 name_length = strlen (name) + 1; /* +1 for \0. */
1685 obstack_grow (&notes, name, name_length);
1686 preserved_copy_of_name = obstack_finish (&notes);
1687
1688 #ifdef tc_canonicalize_symbol_name
1689 preserved_copy_of_name =
1690 tc_canonicalize_symbol_name (preserved_copy_of_name);
1691 #endif
1692
1693 S_SET_NAME (symbolP, preserved_copy_of_name);
1694
1695 S_SET_SEGMENT (symbolP, segment);
1696 S_SET_VALUE (symbolP, valu);
1697 symbol_clear_list_pointers (symbolP);
1698
1699 symbol_set_frag (symbolP, frag);
1700
1701 /* Link to end of symbol chain. */
1702 {
1703 extern int symbol_table_frozen;
1704
1705 if (symbol_table_frozen)
1706 abort ();
1707 }
1708
1709 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1710
1711 obj_symbol_new_hook (symbolP);
1712
1713 #ifdef tc_symbol_new_hook
1714 tc_symbol_new_hook (symbolP);
1715 #endif
1716
1717 #ifdef DEBUG_SYMS
1718 verify_symbol_chain (symbol_rootP, symbol_lastP);
1719 #endif /* DEBUG_SYMS */
1720 }
1721
1722
1723 static void
1724 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1725 {
1726 unsigned int entry;
1727 literal_pool *pool;
1728 char sym_name[20];
1729 int align;
1730
1731 for (align = 2; align <= 4; align++)
1732 {
1733 int size = 1 << align;
1734
1735 pool = find_literal_pool (size);
1736 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1737 continue;
1738
1739 mapping_state (MAP_DATA);
1740
1741 /* Align pool as you have word accesses.
1742 Only make a frag if we have to. */
1743 if (!need_pass_2)
1744 frag_align (align, 0, 0);
1745
1746 record_alignment (now_seg, align);
1747
1748 sprintf (sym_name, "$$lit_\002%x", pool->id);
1749
1750 symbol_locate (pool->symbol, sym_name, now_seg,
1751 (valueT) frag_now_fix (), frag_now);
1752 symbol_table_insert (pool->symbol);
1753
1754 for (entry = 0; entry < pool->next_free_entry; entry++)
1755 {
1756 expressionS * exp = & pool->literals[entry].exp;
1757
1758 if (exp->X_op == O_big)
1759 {
1760 /* PR 16688: Restore the global bignum value. */
1761 gas_assert (pool->literals[entry].bignum != NULL);
1762 memcpy (generic_bignum, pool->literals[entry].bignum,
1763 CHARS_PER_LITTLENUM * exp->X_add_number);
1764 }
1765
1766 /* First output the expression in the instruction to the pool. */
1767 emit_expr (exp, size); /* .word|.xword */
1768
1769 if (exp->X_op == O_big)
1770 {
1771 free (pool->literals[entry].bignum);
1772 pool->literals[entry].bignum = NULL;
1773 }
1774 }
1775
1776 /* Mark the pool as empty. */
1777 pool->next_free_entry = 0;
1778 pool->symbol = NULL;
1779 }
1780 }
1781
1782 #ifdef OBJ_ELF
1783 /* Forward declarations for functions below, in the MD interface
1784 section. */
1785 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1786 static struct reloc_table_entry * find_reloc_table_entry (char **);
1787
1788 /* Directives: Data. */
1789 /* N.B. the support for relocation suffix in this directive needs to be
1790 implemented properly. */
1791
1792 static void
1793 s_aarch64_elf_cons (int nbytes)
1794 {
1795 expressionS exp;
1796
1797 #ifdef md_flush_pending_output
1798 md_flush_pending_output ();
1799 #endif
1800
1801 if (is_it_end_of_statement ())
1802 {
1803 demand_empty_rest_of_line ();
1804 return;
1805 }
1806
1807 #ifdef md_cons_align
1808 md_cons_align (nbytes);
1809 #endif
1810
1811 mapping_state (MAP_DATA);
1812 do
1813 {
1814 struct reloc_table_entry *reloc;
1815
1816 expression (&exp);
1817
1818 if (exp.X_op != O_symbol)
1819 emit_expr (&exp, (unsigned int) nbytes);
1820 else
1821 {
1822 skip_past_char (&input_line_pointer, '#');
1823 if (skip_past_char (&input_line_pointer, ':'))
1824 {
1825 reloc = find_reloc_table_entry (&input_line_pointer);
1826 if (reloc == NULL)
1827 as_bad (_("unrecognized relocation suffix"));
1828 else
1829 as_bad (_("unimplemented relocation suffix"));
1830 ignore_rest_of_line ();
1831 return;
1832 }
1833 else
1834 emit_expr (&exp, (unsigned int) nbytes);
1835 }
1836 }
1837 while (*input_line_pointer++ == ',');
1838
1839 /* Put terminator back into stream. */
1840 input_line_pointer--;
1841 demand_empty_rest_of_line ();
1842 }
1843
1844 #endif /* OBJ_ELF */
1845
1846 /* Output a 32-bit word, but mark as an instruction. */
1847
1848 static void
1849 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1850 {
1851 expressionS exp;
1852
1853 #ifdef md_flush_pending_output
1854 md_flush_pending_output ();
1855 #endif
1856
1857 if (is_it_end_of_statement ())
1858 {
1859 demand_empty_rest_of_line ();
1860 return;
1861 }
1862
1863 if (!need_pass_2)
1864 frag_align_code (2, 0);
1865 #ifdef OBJ_ELF
1866 mapping_state (MAP_INSN);
1867 #endif
1868
1869 do
1870 {
1871 expression (&exp);
1872 if (exp.X_op != O_constant)
1873 {
1874 as_bad (_("constant expression required"));
1875 ignore_rest_of_line ();
1876 return;
1877 }
1878
1879 if (target_big_endian)
1880 {
1881 unsigned int val = exp.X_add_number;
1882 exp.X_add_number = SWAP_32 (val);
1883 }
1884 emit_expr (&exp, 4);
1885 }
1886 while (*input_line_pointer++ == ',');
1887
1888 /* Put terminator back into stream. */
1889 input_line_pointer--;
1890 demand_empty_rest_of_line ();
1891 }
1892
1893 #ifdef OBJ_ELF
1894 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1895
1896 static void
1897 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1898 {
1899 expressionS exp;
1900
1901 /* Since we're just labelling the code, there's no need to define a
1902 mapping symbol. */
1903 expression (&exp);
1904 /* Make sure there is enough room in this frag for the following
1905 blr. This trick only works if the blr follows immediately after
1906 the .tlsdesc directive. */
1907 frag_grow (4);
1908 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1909 BFD_RELOC_AARCH64_TLSDESC_CALL);
1910
1911 demand_empty_rest_of_line ();
1912 }
1913 #endif /* OBJ_ELF */
1914
1915 static void s_aarch64_arch (int);
1916 static void s_aarch64_cpu (int);
1917 static void s_aarch64_arch_extension (int);
1918
1919 /* This table describes all the machine specific pseudo-ops the assembler
1920 has to support. The fields are:
1921 pseudo-op name without dot
1922 function to call to execute this pseudo-op
1923 Integer arg to pass to the function. */
1924
1925 const pseudo_typeS md_pseudo_table[] = {
1926 /* Never called because '.req' does not start a line. */
1927 {"req", s_req, 0},
1928 {"unreq", s_unreq, 0},
1929 {"bss", s_bss, 0},
1930 {"even", s_even, 0},
1931 {"ltorg", s_ltorg, 0},
1932 {"pool", s_ltorg, 0},
1933 {"cpu", s_aarch64_cpu, 0},
1934 {"arch", s_aarch64_arch, 0},
1935 {"arch_extension", s_aarch64_arch_extension, 0},
1936 {"inst", s_aarch64_inst, 0},
1937 #ifdef OBJ_ELF
1938 {"tlsdesccall", s_tlsdesccall, 0},
1939 {"word", s_aarch64_elf_cons, 4},
1940 {"long", s_aarch64_elf_cons, 4},
1941 {"xword", s_aarch64_elf_cons, 8},
1942 {"dword", s_aarch64_elf_cons, 8},
1943 #endif
1944 {0, 0, 0}
1945 };
1946 \f
1947
1948 /* Check whether STR points to a register name followed by a comma or the
1949 end of line; REG_TYPE indicates which register types are checked
1950 against. Return TRUE if STR is such a register name; otherwise return
1951 FALSE. The function does not intend to produce any diagnostics, but since
1952 the register parser aarch64_reg_parse, which is called by this function,
1953 does produce diagnostics, we call clear_error to clear any diagnostics
1954 that may be generated by aarch64_reg_parse.
1955 Also, the function returns FALSE directly if there is any user error
1956 present at the function entry. This prevents the existing diagnostics
1957 state from being spoiled.
1958 The function currently serves parse_constant_immediate and
1959 parse_big_immediate only. */
1960 static bfd_boolean
1961 reg_name_p (char *str, aarch64_reg_type reg_type)
1962 {
1963 int reg;
1964
1965 /* Prevent the diagnostics state from being spoiled. */
1966 if (error_p ())
1967 return FALSE;
1968
1969 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1970
1971 /* Clear the parsing error that may be set by the reg parser. */
1972 clear_error ();
1973
1974 if (reg == PARSE_FAIL)
1975 return FALSE;
1976
1977 skip_whitespace (str);
1978 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1979 return TRUE;
1980
1981 return FALSE;
1982 }
1983
1984 /* Parser functions used exclusively in instruction operands. */
1985
1986 /* Parse an immediate expression which may not be constant.
1987
1988 To prevent the expression parser from pushing a register name
1989 into the symbol table as an undefined symbol, firstly a check is
1990 done to find out whether STR is a valid register name followed
1991 by a comma or the end of line. Return FALSE if STR is such a
1992 string. */
1993
1994 static bfd_boolean
1995 parse_immediate_expression (char **str, expressionS *exp)
1996 {
1997 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1998 {
1999 set_recoverable_error (_("immediate operand required"));
2000 return FALSE;
2001 }
2002
2003 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2004
2005 if (exp->X_op == O_absent)
2006 {
2007 set_fatal_syntax_error (_("missing immediate expression"));
2008 return FALSE;
2009 }
2010
2011 return TRUE;
2012 }
2013
2014 /* Constant immediate-value read function for use in insn parsing.
2015 STR points to the beginning of the immediate (with the optional
2016 leading #); *VAL receives the value.
2017
2018 Return TRUE on success; otherwise return FALSE. */
2019
2020 static bfd_boolean
2021 parse_constant_immediate (char **str, int64_t * val)
2022 {
2023 expressionS exp;
2024
2025 if (! parse_immediate_expression (str, &exp))
2026 return FALSE;
2027
2028 if (exp.X_op != O_constant)
2029 {
2030 set_syntax_error (_("constant expression required"));
2031 return FALSE;
2032 }
2033
2034 *val = exp.X_add_number;
2035 return TRUE;
2036 }
2037
2038 static uint32_t
2039 encode_imm_float_bits (uint32_t imm)
2040 {
2041 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2042 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2043 }
2044
2045 /* Return TRUE if the single-precision floating-point value encoded in IMM
2046 can be expressed in the AArch64 8-bit signed floating-point format with
2047 3-bit exponent and normalized 4 bits of precision; in other words, the
2048 floating-point value must be expressable as
2049 (+/-) n / 16 * power (2, r)
2050 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2051
2052 static bfd_boolean
2053 aarch64_imm_float_p (uint32_t imm)
2054 {
2055 /* If a single-precision floating-point value has the following bit
2056 pattern, it can be expressed in the AArch64 8-bit floating-point
2057 format:
2058
2059 3 32222222 2221111111111
2060 1 09876543 21098765432109876543210
2061 n Eeeeeexx xxxx0000000000000000000
2062
2063 where n, e and each x are either 0 or 1 independently, with
2064 E == ~ e. */
2065
2066 uint32_t pattern;
2067
2068 /* Prepare the pattern for 'Eeeeee'. */
2069 if (((imm >> 30) & 0x1) == 0)
2070 pattern = 0x3e000000;
2071 else
2072 pattern = 0x40000000;
2073
2074 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2075 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2076 }
2077
2078 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2079
2080 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2081 8-bit signed floating-point format with 3-bit exponent and normalized 4
2082 bits of precision (i.e. can be used in an FMOV instruction); return the
2083 equivalent single-precision encoding in *FPWORD.
2084
2085 Otherwise return FALSE. */
2086
2087 static bfd_boolean
2088 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2089 {
2090 /* If a double-precision floating-point value has the following bit
2091 pattern, it can be expressed in the AArch64 8-bit floating-point
2092 format:
2093
2094 6 66655555555 554444444...21111111111
2095 3 21098765432 109876543...098765432109876543210
2096 n Eeeeeeeeexx xxxx00000...000000000000000000000
2097
2098 where n, e and each x are either 0 or 1 independently, with
2099 E == ~ e. */
2100
2101 uint32_t pattern;
2102 uint32_t high32 = imm >> 32;
2103
2104 /* Lower 32 bits need to be 0s. */
2105 if ((imm & 0xffffffff) != 0)
2106 return FALSE;
2107
2108 /* Prepare the pattern for 'Eeeeeeeee'. */
2109 if (((high32 >> 30) & 0x1) == 0)
2110 pattern = 0x3fc00000;
2111 else
2112 pattern = 0x40000000;
2113
2114 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2115 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2116 {
2117 /* Convert to the single-precision encoding.
2118 i.e. convert
2119 n Eeeeeeeeexx xxxx00000...000000000000000000000
2120 to
2121 n Eeeeeexx xxxx0000000000000000000. */
2122 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2123 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2124 return TRUE;
2125 }
2126 else
2127 return FALSE;
2128 }
2129
2130 /* Parse a floating-point immediate. Return TRUE on success and return the
2131 value in *IMMED in the format of IEEE754 single-precision encoding.
2132 *CCP points to the start of the string; DP_P is TRUE when the immediate
2133 is expected to be in double-precision (N.B. this only matters when
2134 hexadecimal representation is involved).
2135
2136 N.B. 0.0 is accepted by this function. */
2137
2138 static bfd_boolean
2139 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2140 {
2141 char *str = *ccp;
2142 char *fpnum;
2143 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2144 int found_fpchar = 0;
2145 int64_t val = 0;
2146 unsigned fpword = 0;
2147 bfd_boolean hex_p = FALSE;
2148
2149 skip_past_char (&str, '#');
2150
2151 fpnum = str;
2152 skip_whitespace (fpnum);
2153
2154 if (strncmp (fpnum, "0x", 2) == 0)
2155 {
2156 /* Support the hexadecimal representation of the IEEE754 encoding.
2157 Double-precision is expected when DP_P is TRUE, otherwise the
2158 representation should be in single-precision. */
2159 if (! parse_constant_immediate (&str, &val))
2160 goto invalid_fp;
2161
2162 if (dp_p)
2163 {
2164 if (! aarch64_double_precision_fmovable (val, &fpword))
2165 goto invalid_fp;
2166 }
2167 else if ((uint64_t) val > 0xffffffff)
2168 goto invalid_fp;
2169 else
2170 fpword = val;
2171
2172 hex_p = TRUE;
2173 }
2174 else
2175 {
2176 /* We must not accidentally parse an integer as a floating-point number.
2177 Make sure that the value we parse is not an integer by checking for
2178 special characters '.' or 'e'. */
2179 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2180 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2181 {
2182 found_fpchar = 1;
2183 break;
2184 }
2185
2186 if (!found_fpchar)
2187 return FALSE;
2188 }
2189
2190 if (! hex_p)
2191 {
2192 int i;
2193
2194 if ((str = atof_ieee (str, 's', words)) == NULL)
2195 goto invalid_fp;
2196
2197 /* Our FP word must be 32 bits (single-precision FP). */
2198 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2199 {
2200 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2201 fpword |= words[i];
2202 }
2203 }
2204
2205 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2206 {
2207 *immed = fpword;
2208 *ccp = str;
2209 return TRUE;
2210 }
2211
2212 invalid_fp:
2213 set_fatal_syntax_error (_("invalid floating-point constant"));
2214 return FALSE;
2215 }
2216
2217 /* Less-generic immediate-value read function with the possibility of loading
2218 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2219 instructions.
2220
2221 To prevent the expression parser from pushing a register name into the
2222 symbol table as an undefined symbol, a check is firstly done to find
2223 out whether STR is a valid register name followed by a comma or the end
2224 of line. Return FALSE if STR is such a register. */
2225
2226 static bfd_boolean
2227 parse_big_immediate (char **str, int64_t *imm)
2228 {
2229 char *ptr = *str;
2230
2231 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2232 {
2233 set_syntax_error (_("immediate operand required"));
2234 return FALSE;
2235 }
2236
2237 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2238
2239 if (inst.reloc.exp.X_op == O_constant)
2240 *imm = inst.reloc.exp.X_add_number;
2241
2242 *str = ptr;
2243
2244 return TRUE;
2245 }
2246
2247 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2248 if NEED_LIBOPCODES is non-zero, the fixup will need
2249 assistance from the libopcodes. */
2250
2251 static inline void
2252 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2253 const aarch64_opnd_info *operand,
2254 int need_libopcodes_p)
2255 {
2256 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2257 reloc->opnd = operand->type;
2258 if (need_libopcodes_p)
2259 reloc->need_libopcodes_p = 1;
2260 };
2261
2262 /* Return TRUE if the instruction needs to be fixed up later internally by
2263 the GAS; otherwise return FALSE. */
2264
2265 static inline bfd_boolean
2266 aarch64_gas_internal_fixup_p (void)
2267 {
2268 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2269 }
2270
2271 /* Assign the immediate value to the relavant field in *OPERAND if
2272 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2273 needs an internal fixup in a later stage.
2274 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2275 IMM.VALUE that may get assigned with the constant. */
2276 static inline void
2277 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2278 aarch64_opnd_info *operand,
2279 int addr_off_p,
2280 int need_libopcodes_p,
2281 int skip_p)
2282 {
2283 if (reloc->exp.X_op == O_constant)
2284 {
2285 if (addr_off_p)
2286 operand->addr.offset.imm = reloc->exp.X_add_number;
2287 else
2288 operand->imm.value = reloc->exp.X_add_number;
2289 reloc->type = BFD_RELOC_UNUSED;
2290 }
2291 else
2292 {
2293 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2294 /* Tell libopcodes to ignore this operand or not. This is helpful
2295 when one of the operands needs to be fixed up later but we need
2296 libopcodes to check the other operands. */
2297 operand->skip = skip_p;
2298 }
2299 }
2300
2301 /* Relocation modifiers. Each entry in the table contains the textual
2302 name for the relocation which may be placed before a symbol used as
2303 a load/store offset, or add immediate. It must be surrounded by a
2304 leading and trailing colon, for example:
2305
2306 ldr x0, [x1, #:rello:varsym]
2307 add x0, x1, #:rello:varsym */
2308
2309 struct reloc_table_entry
2310 {
2311 const char *name;
2312 int pc_rel;
2313 bfd_reloc_code_real_type adr_type;
2314 bfd_reloc_code_real_type adrp_type;
2315 bfd_reloc_code_real_type movw_type;
2316 bfd_reloc_code_real_type add_type;
2317 bfd_reloc_code_real_type ldst_type;
2318 };
2319
2320 static struct reloc_table_entry reloc_table[] = {
2321 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2322 {"lo12", 0,
2323 0, /* adr_type */
2324 0,
2325 0,
2326 BFD_RELOC_AARCH64_ADD_LO12,
2327 BFD_RELOC_AARCH64_LDST_LO12},
2328
2329 /* Higher 21 bits of pc-relative page offset: ADRP */
2330 {"pg_hi21", 1,
2331 0, /* adr_type */
2332 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2333 0,
2334 0,
2335 0},
2336
2337 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2338 {"pg_hi21_nc", 1,
2339 0, /* adr_type */
2340 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2341 0,
2342 0,
2343 0},
2344
2345 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2346 {"abs_g0", 0,
2347 0, /* adr_type */
2348 0,
2349 BFD_RELOC_AARCH64_MOVW_G0,
2350 0,
2351 0},
2352
2353 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2354 {"abs_g0_s", 0,
2355 0, /* adr_type */
2356 0,
2357 BFD_RELOC_AARCH64_MOVW_G0_S,
2358 0,
2359 0},
2360
2361 /* Less significant bits 0-15 of address/value: MOVK, no check */
2362 {"abs_g0_nc", 0,
2363 0, /* adr_type */
2364 0,
2365 BFD_RELOC_AARCH64_MOVW_G0_NC,
2366 0,
2367 0},
2368
2369 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2370 {"abs_g1", 0,
2371 0, /* adr_type */
2372 0,
2373 BFD_RELOC_AARCH64_MOVW_G1,
2374 0,
2375 0},
2376
2377 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2378 {"abs_g1_s", 0,
2379 0, /* adr_type */
2380 0,
2381 BFD_RELOC_AARCH64_MOVW_G1_S,
2382 0,
2383 0},
2384
2385 /* Less significant bits 16-31 of address/value: MOVK, no check */
2386 {"abs_g1_nc", 0,
2387 0, /* adr_type */
2388 0,
2389 BFD_RELOC_AARCH64_MOVW_G1_NC,
2390 0,
2391 0},
2392
2393 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2394 {"abs_g2", 0,
2395 0, /* adr_type */
2396 0,
2397 BFD_RELOC_AARCH64_MOVW_G2,
2398 0,
2399 0},
2400
2401 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2402 {"abs_g2_s", 0,
2403 0, /* adr_type */
2404 0,
2405 BFD_RELOC_AARCH64_MOVW_G2_S,
2406 0,
2407 0},
2408
2409 /* Less significant bits 32-47 of address/value: MOVK, no check */
2410 {"abs_g2_nc", 0,
2411 0, /* adr_type */
2412 0,
2413 BFD_RELOC_AARCH64_MOVW_G2_NC,
2414 0,
2415 0},
2416
2417 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2418 {"abs_g3", 0,
2419 0, /* adr_type */
2420 0,
2421 BFD_RELOC_AARCH64_MOVW_G3,
2422 0,
2423 0},
2424
2425 /* Get to the page containing GOT entry for a symbol. */
2426 {"got", 1,
2427 0, /* adr_type */
2428 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2429 0,
2430 0,
2431 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2432
2433 /* 12 bit offset into the page containing GOT entry for that symbol. */
2434 {"got_lo12", 0,
2435 0, /* adr_type */
2436 0,
2437 0,
2438 0,
2439 BFD_RELOC_AARCH64_LD_GOT_LO12_NC},
2440
2441 /* Get to the page containing GOT TLS entry for a symbol */
2442 {"tlsgd", 0,
2443 0, /* adr_type */
2444 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2445 0,
2446 0,
2447 0},
2448
2449 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2450 {"tlsgd_lo12", 0,
2451 0, /* adr_type */
2452 0,
2453 0,
2454 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2455 0},
2456
2457 /* Get to the page containing GOT TLS entry for a symbol */
2458 {"tlsdesc", 0,
2459 0, /* adr_type */
2460 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2461 0,
2462 0,
2463 0},
2464
2465 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2466 {"tlsdesc_lo12", 0,
2467 0, /* adr_type */
2468 0,
2469 0,
2470 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2471 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC},
2472
2473 /* Get to the page containing GOT TLS entry for a symbol */
2474 {"gottprel", 0,
2475 0, /* adr_type */
2476 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2477 0,
2478 0,
2479 0},
2480
2481 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2482 {"gottprel_lo12", 0,
2483 0, /* adr_type */
2484 0,
2485 0,
2486 0,
2487 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC},
2488
2489 /* Get tp offset for a symbol. */
2490 {"tprel", 0,
2491 0, /* adr_type */
2492 0,
2493 0,
2494 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2495 0},
2496
2497 /* Get tp offset for a symbol. */
2498 {"tprel_lo12", 0,
2499 0, /* adr_type */
2500 0,
2501 0,
2502 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2503 0},
2504
2505 /* Get tp offset for a symbol. */
2506 {"tprel_hi12", 0,
2507 0, /* adr_type */
2508 0,
2509 0,
2510 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2511 0},
2512
2513 /* Get tp offset for a symbol. */
2514 {"tprel_lo12_nc", 0,
2515 0, /* adr_type */
2516 0,
2517 0,
2518 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2519 0},
2520
2521 /* Most significant bits 32-47 of address/value: MOVZ. */
2522 {"tprel_g2", 0,
2523 0, /* adr_type */
2524 0,
2525 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2526 0,
2527 0},
2528
2529 /* Most significant bits 16-31 of address/value: MOVZ. */
2530 {"tprel_g1", 0,
2531 0, /* adr_type */
2532 0,
2533 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2534 0,
2535 0},
2536
2537 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2538 {"tprel_g1_nc", 0,
2539 0, /* adr_type */
2540 0,
2541 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2542 0,
2543 0},
2544
2545 /* Most significant bits 0-15 of address/value: MOVZ. */
2546 {"tprel_g0", 0,
2547 0, /* adr_type */
2548 0,
2549 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2550 0,
2551 0},
2552
2553 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2554 {"tprel_g0_nc", 0,
2555 0, /* adr_type */
2556 0,
2557 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2558 0,
2559 0},
2560 };
2561
2562 /* Given the address of a pointer pointing to the textual name of a
2563 relocation as may appear in assembler source, attempt to find its
2564 details in reloc_table. The pointer will be updated to the character
2565 after the trailing colon. On failure, NULL will be returned;
2566 otherwise return the reloc_table_entry. */
2567
2568 static struct reloc_table_entry *
2569 find_reloc_table_entry (char **str)
2570 {
2571 unsigned int i;
2572 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2573 {
2574 int length = strlen (reloc_table[i].name);
2575
2576 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2577 && (*str)[length] == ':')
2578 {
2579 *str += (length + 1);
2580 return &reloc_table[i];
2581 }
2582 }
2583
2584 return NULL;
2585 }
2586
2587 /* Mode argument to parse_shift and parser_shifter_operand. */
2588 enum parse_shift_mode
2589 {
2590 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2591 "#imm{,lsl #n}" */
2592 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2593 "#imm" */
2594 SHIFTED_LSL, /* bare "lsl #n" */
2595 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2596 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2597 };
2598
2599 /* Parse a <shift> operator on an AArch64 data processing instruction.
2600 Return TRUE on success; otherwise return FALSE. */
2601 static bfd_boolean
2602 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2603 {
2604 const struct aarch64_name_value_pair *shift_op;
2605 enum aarch64_modifier_kind kind;
2606 expressionS exp;
2607 int exp_has_prefix;
2608 char *s = *str;
2609 char *p = s;
2610
2611 for (p = *str; ISALPHA (*p); p++)
2612 ;
2613
2614 if (p == *str)
2615 {
2616 set_syntax_error (_("shift expression expected"));
2617 return FALSE;
2618 }
2619
2620 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2621
2622 if (shift_op == NULL)
2623 {
2624 set_syntax_error (_("shift operator expected"));
2625 return FALSE;
2626 }
2627
2628 kind = aarch64_get_operand_modifier (shift_op);
2629
2630 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2631 {
2632 set_syntax_error (_("invalid use of 'MSL'"));
2633 return FALSE;
2634 }
2635
2636 switch (mode)
2637 {
2638 case SHIFTED_LOGIC_IMM:
2639 if (aarch64_extend_operator_p (kind) == TRUE)
2640 {
2641 set_syntax_error (_("extending shift is not permitted"));
2642 return FALSE;
2643 }
2644 break;
2645
2646 case SHIFTED_ARITH_IMM:
2647 if (kind == AARCH64_MOD_ROR)
2648 {
2649 set_syntax_error (_("'ROR' shift is not permitted"));
2650 return FALSE;
2651 }
2652 break;
2653
2654 case SHIFTED_LSL:
2655 if (kind != AARCH64_MOD_LSL)
2656 {
2657 set_syntax_error (_("only 'LSL' shift is permitted"));
2658 return FALSE;
2659 }
2660 break;
2661
2662 case SHIFTED_REG_OFFSET:
2663 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2664 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2665 {
2666 set_fatal_syntax_error
2667 (_("invalid shift for the register offset addressing mode"));
2668 return FALSE;
2669 }
2670 break;
2671
2672 case SHIFTED_LSL_MSL:
2673 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2674 {
2675 set_syntax_error (_("invalid shift operator"));
2676 return FALSE;
2677 }
2678 break;
2679
2680 default:
2681 abort ();
2682 }
2683
2684 /* Whitespace can appear here if the next thing is a bare digit. */
2685 skip_whitespace (p);
2686
2687 /* Parse shift amount. */
2688 exp_has_prefix = 0;
2689 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2690 exp.X_op = O_absent;
2691 else
2692 {
2693 if (is_immediate_prefix (*p))
2694 {
2695 p++;
2696 exp_has_prefix = 1;
2697 }
2698 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2699 }
2700 if (exp.X_op == O_absent)
2701 {
2702 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2703 {
2704 set_syntax_error (_("missing shift amount"));
2705 return FALSE;
2706 }
2707 operand->shifter.amount = 0;
2708 }
2709 else if (exp.X_op != O_constant)
2710 {
2711 set_syntax_error (_("constant shift amount required"));
2712 return FALSE;
2713 }
2714 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2715 {
2716 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2717 return FALSE;
2718 }
2719 else
2720 {
2721 operand->shifter.amount = exp.X_add_number;
2722 operand->shifter.amount_present = 1;
2723 }
2724
2725 operand->shifter.operator_present = 1;
2726 operand->shifter.kind = kind;
2727
2728 *str = p;
2729 return TRUE;
2730 }
2731
2732 /* Parse a <shifter_operand> for a data processing instruction:
2733
2734 #<immediate>
2735 #<immediate>, LSL #imm
2736
2737 Validation of immediate operands is deferred to md_apply_fix.
2738
2739 Return TRUE on success; otherwise return FALSE. */
2740
2741 static bfd_boolean
2742 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2743 enum parse_shift_mode mode)
2744 {
2745 char *p;
2746
2747 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2748 return FALSE;
2749
2750 p = *str;
2751
2752 /* Accept an immediate expression. */
2753 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2754 return FALSE;
2755
2756 /* Accept optional LSL for arithmetic immediate values. */
2757 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2758 if (! parse_shift (&p, operand, SHIFTED_LSL))
2759 return FALSE;
2760
2761 /* Not accept any shifter for logical immediate values. */
2762 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2763 && parse_shift (&p, operand, mode))
2764 {
2765 set_syntax_error (_("unexpected shift operator"));
2766 return FALSE;
2767 }
2768
2769 *str = p;
2770 return TRUE;
2771 }
2772
2773 /* Parse a <shifter_operand> for a data processing instruction:
2774
2775 <Rm>
2776 <Rm>, <shift>
2777 #<immediate>
2778 #<immediate>, LSL #imm
2779
2780 where <shift> is handled by parse_shift above, and the last two
2781 cases are handled by the function above.
2782
2783 Validation of immediate operands is deferred to md_apply_fix.
2784
2785 Return TRUE on success; otherwise return FALSE. */
2786
2787 static bfd_boolean
2788 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2789 enum parse_shift_mode mode)
2790 {
2791 int reg;
2792 int isreg32, isregzero;
2793 enum aarch64_operand_class opd_class
2794 = aarch64_get_operand_class (operand->type);
2795
2796 if ((reg =
2797 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2798 {
2799 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2800 {
2801 set_syntax_error (_("unexpected register in the immediate operand"));
2802 return FALSE;
2803 }
2804
2805 if (!isregzero && reg == REG_SP)
2806 {
2807 set_syntax_error (BAD_SP);
2808 return FALSE;
2809 }
2810
2811 operand->reg.regno = reg;
2812 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2813
2814 /* Accept optional shift operation on register. */
2815 if (! skip_past_comma (str))
2816 return TRUE;
2817
2818 if (! parse_shift (str, operand, mode))
2819 return FALSE;
2820
2821 return TRUE;
2822 }
2823 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2824 {
2825 set_syntax_error
2826 (_("integer register expected in the extended/shifted operand "
2827 "register"));
2828 return FALSE;
2829 }
2830
2831 /* We have a shifted immediate variable. */
2832 return parse_shifter_operand_imm (str, operand, mode);
2833 }
2834
2835 /* Return TRUE on success; return FALSE otherwise. */
2836
2837 static bfd_boolean
2838 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2839 enum parse_shift_mode mode)
2840 {
2841 char *p = *str;
2842
2843 /* Determine if we have the sequence of characters #: or just :
2844 coming next. If we do, then we check for a :rello: relocation
2845 modifier. If we don't, punt the whole lot to
2846 parse_shifter_operand. */
2847
2848 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2849 {
2850 struct reloc_table_entry *entry;
2851
2852 if (p[0] == '#')
2853 p += 2;
2854 else
2855 p++;
2856 *str = p;
2857
2858 /* Try to parse a relocation. Anything else is an error. */
2859 if (!(entry = find_reloc_table_entry (str)))
2860 {
2861 set_syntax_error (_("unknown relocation modifier"));
2862 return FALSE;
2863 }
2864
2865 if (entry->add_type == 0)
2866 {
2867 set_syntax_error
2868 (_("this relocation modifier is not allowed on this instruction"));
2869 return FALSE;
2870 }
2871
2872 /* Save str before we decompose it. */
2873 p = *str;
2874
2875 /* Next, we parse the expression. */
2876 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2877 return FALSE;
2878
2879 /* Record the relocation type (use the ADD variant here). */
2880 inst.reloc.type = entry->add_type;
2881 inst.reloc.pc_rel = entry->pc_rel;
2882
2883 /* If str is empty, we've reached the end, stop here. */
2884 if (**str == '\0')
2885 return TRUE;
2886
2887 /* Otherwise, we have a shifted reloc modifier, so rewind to
2888 recover the variable name and continue parsing for the shifter. */
2889 *str = p;
2890 return parse_shifter_operand_imm (str, operand, mode);
2891 }
2892
2893 return parse_shifter_operand (str, operand, mode);
2894 }
2895
2896 /* Parse all forms of an address expression. Information is written
2897 to *OPERAND and/or inst.reloc.
2898
2899 The A64 instruction set has the following addressing modes:
2900
2901 Offset
2902 [base] // in SIMD ld/st structure
2903 [base{,#0}] // in ld/st exclusive
2904 [base{,#imm}]
2905 [base,Xm{,LSL #imm}]
2906 [base,Xm,SXTX {#imm}]
2907 [base,Wm,(S|U)XTW {#imm}]
2908 Pre-indexed
2909 [base,#imm]!
2910 Post-indexed
2911 [base],#imm
2912 [base],Xm // in SIMD ld/st structure
2913 PC-relative (literal)
2914 label
2915 =immediate
2916
2917 (As a convenience, the notation "=immediate" is permitted in conjunction
2918 with the pc-relative literal load instructions to automatically place an
2919 immediate value or symbolic address in a nearby literal pool and generate
2920 a hidden label which references it.)
2921
2922 Upon a successful parsing, the address structure in *OPERAND will be
2923 filled in the following way:
2924
2925 .base_regno = <base>
2926 .offset.is_reg // 1 if the offset is a register
2927 .offset.imm = <imm>
2928 .offset.regno = <Rm>
2929
2930 For different addressing modes defined in the A64 ISA:
2931
2932 Offset
2933 .pcrel=0; .preind=1; .postind=0; .writeback=0
2934 Pre-indexed
2935 .pcrel=0; .preind=1; .postind=0; .writeback=1
2936 Post-indexed
2937 .pcrel=0; .preind=0; .postind=1; .writeback=1
2938 PC-relative (literal)
2939 .pcrel=1; .preind=1; .postind=0; .writeback=0
2940
2941 The shift/extension information, if any, will be stored in .shifter.
2942
2943 It is the caller's responsibility to check for addressing modes not
2944 supported by the instruction, and to set inst.reloc.type. */
2945
2946 static bfd_boolean
2947 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2948 int accept_reg_post_index)
2949 {
2950 char *p = *str;
2951 int reg;
2952 int isreg32, isregzero;
2953 expressionS *exp = &inst.reloc.exp;
2954
2955 if (! skip_past_char (&p, '['))
2956 {
2957 /* =immediate or label. */
2958 operand->addr.pcrel = 1;
2959 operand->addr.preind = 1;
2960
2961 /* #:<reloc_op>:<symbol> */
2962 skip_past_char (&p, '#');
2963 if (reloc && skip_past_char (&p, ':'))
2964 {
2965 bfd_reloc_code_real_type ty;
2966 struct reloc_table_entry *entry;
2967
2968 /* Try to parse a relocation modifier. Anything else is
2969 an error. */
2970 entry = find_reloc_table_entry (&p);
2971 if (! entry)
2972 {
2973 set_syntax_error (_("unknown relocation modifier"));
2974 return FALSE;
2975 }
2976
2977 switch (operand->type)
2978 {
2979 case AARCH64_OPND_ADDR_PCREL21:
2980 /* adr */
2981 ty = entry->adr_type;
2982 break;
2983
2984 default:
2985 ty = entry->ldst_type;
2986 break;
2987 }
2988
2989 if (ty == 0)
2990 {
2991 set_syntax_error
2992 (_("this relocation modifier is not allowed on this "
2993 "instruction"));
2994 return FALSE;
2995 }
2996
2997 /* #:<reloc_op>: */
2998 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2999 {
3000 set_syntax_error (_("invalid relocation expression"));
3001 return FALSE;
3002 }
3003
3004 /* #:<reloc_op>:<expr> */
3005 /* Record the relocation type. */
3006 inst.reloc.type = ty;
3007 inst.reloc.pc_rel = entry->pc_rel;
3008 }
3009 else
3010 {
3011
3012 if (skip_past_char (&p, '='))
3013 /* =immediate; need to generate the literal in the literal pool. */
3014 inst.gen_lit_pool = 1;
3015
3016 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3017 {
3018 set_syntax_error (_("invalid address"));
3019 return FALSE;
3020 }
3021 }
3022
3023 *str = p;
3024 return TRUE;
3025 }
3026
3027 /* [ */
3028
3029 /* Accept SP and reject ZR */
3030 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3031 if (reg == PARSE_FAIL || isreg32)
3032 {
3033 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3034 return FALSE;
3035 }
3036 operand->addr.base_regno = reg;
3037
3038 /* [Xn */
3039 if (skip_past_comma (&p))
3040 {
3041 /* [Xn, */
3042 operand->addr.preind = 1;
3043
3044 /* Reject SP and accept ZR */
3045 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3046 if (reg != PARSE_FAIL)
3047 {
3048 /* [Xn,Rm */
3049 operand->addr.offset.regno = reg;
3050 operand->addr.offset.is_reg = 1;
3051 /* Shifted index. */
3052 if (skip_past_comma (&p))
3053 {
3054 /* [Xn,Rm, */
3055 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3056 /* Use the diagnostics set in parse_shift, so not set new
3057 error message here. */
3058 return FALSE;
3059 }
3060 /* We only accept:
3061 [base,Xm{,LSL #imm}]
3062 [base,Xm,SXTX {#imm}]
3063 [base,Wm,(S|U)XTW {#imm}] */
3064 if (operand->shifter.kind == AARCH64_MOD_NONE
3065 || operand->shifter.kind == AARCH64_MOD_LSL
3066 || operand->shifter.kind == AARCH64_MOD_SXTX)
3067 {
3068 if (isreg32)
3069 {
3070 set_syntax_error (_("invalid use of 32-bit register offset"));
3071 return FALSE;
3072 }
3073 }
3074 else if (!isreg32)
3075 {
3076 set_syntax_error (_("invalid use of 64-bit register offset"));
3077 return FALSE;
3078 }
3079 }
3080 else
3081 {
3082 /* [Xn,#:<reloc_op>:<symbol> */
3083 skip_past_char (&p, '#');
3084 if (reloc && skip_past_char (&p, ':'))
3085 {
3086 struct reloc_table_entry *entry;
3087
3088 /* Try to parse a relocation modifier. Anything else is
3089 an error. */
3090 if (!(entry = find_reloc_table_entry (&p)))
3091 {
3092 set_syntax_error (_("unknown relocation modifier"));
3093 return FALSE;
3094 }
3095
3096 if (entry->ldst_type == 0)
3097 {
3098 set_syntax_error
3099 (_("this relocation modifier is not allowed on this "
3100 "instruction"));
3101 return FALSE;
3102 }
3103
3104 /* [Xn,#:<reloc_op>: */
3105 /* We now have the group relocation table entry corresponding to
3106 the name in the assembler source. Next, we parse the
3107 expression. */
3108 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3109 {
3110 set_syntax_error (_("invalid relocation expression"));
3111 return FALSE;
3112 }
3113
3114 /* [Xn,#:<reloc_op>:<expr> */
3115 /* Record the load/store relocation type. */
3116 inst.reloc.type = entry->ldst_type;
3117 inst.reloc.pc_rel = entry->pc_rel;
3118 }
3119 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3120 {
3121 set_syntax_error (_("invalid expression in the address"));
3122 return FALSE;
3123 }
3124 /* [Xn,<expr> */
3125 }
3126 }
3127
3128 if (! skip_past_char (&p, ']'))
3129 {
3130 set_syntax_error (_("']' expected"));
3131 return FALSE;
3132 }
3133
3134 if (skip_past_char (&p, '!'))
3135 {
3136 if (operand->addr.preind && operand->addr.offset.is_reg)
3137 {
3138 set_syntax_error (_("register offset not allowed in pre-indexed "
3139 "addressing mode"));
3140 return FALSE;
3141 }
3142 /* [Xn]! */
3143 operand->addr.writeback = 1;
3144 }
3145 else if (skip_past_comma (&p))
3146 {
3147 /* [Xn], */
3148 operand->addr.postind = 1;
3149 operand->addr.writeback = 1;
3150
3151 if (operand->addr.preind)
3152 {
3153 set_syntax_error (_("cannot combine pre- and post-indexing"));
3154 return FALSE;
3155 }
3156
3157 if (accept_reg_post_index
3158 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3159 &isregzero)) != PARSE_FAIL)
3160 {
3161 /* [Xn],Xm */
3162 if (isreg32)
3163 {
3164 set_syntax_error (_("invalid 32-bit register offset"));
3165 return FALSE;
3166 }
3167 operand->addr.offset.regno = reg;
3168 operand->addr.offset.is_reg = 1;
3169 }
3170 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3171 {
3172 /* [Xn],#expr */
3173 set_syntax_error (_("invalid expression in the address"));
3174 return FALSE;
3175 }
3176 }
3177
3178 /* If at this point neither .preind nor .postind is set, we have a
3179 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3180 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3181 {
3182 if (operand->addr.writeback)
3183 {
3184 /* Reject [Rn]! */
3185 set_syntax_error (_("missing offset in the pre-indexed address"));
3186 return FALSE;
3187 }
3188 operand->addr.preind = 1;
3189 inst.reloc.exp.X_op = O_constant;
3190 inst.reloc.exp.X_add_number = 0;
3191 }
3192
3193 *str = p;
3194 return TRUE;
3195 }
3196
3197 /* Return TRUE on success; otherwise return FALSE. */
3198 static bfd_boolean
3199 parse_address (char **str, aarch64_opnd_info *operand,
3200 int accept_reg_post_index)
3201 {
3202 return parse_address_main (str, operand, 0, accept_reg_post_index);
3203 }
3204
3205 /* Return TRUE on success; otherwise return FALSE. */
3206 static bfd_boolean
3207 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3208 {
3209 return parse_address_main (str, operand, 1, 0);
3210 }
3211
3212 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3213 Return TRUE on success; otherwise return FALSE. */
3214 static bfd_boolean
3215 parse_half (char **str, int *internal_fixup_p)
3216 {
3217 char *p, *saved;
3218 int dummy;
3219
3220 p = *str;
3221 skip_past_char (&p, '#');
3222
3223 gas_assert (internal_fixup_p);
3224 *internal_fixup_p = 0;
3225
3226 if (*p == ':')
3227 {
3228 struct reloc_table_entry *entry;
3229
3230 /* Try to parse a relocation. Anything else is an error. */
3231 ++p;
3232 if (!(entry = find_reloc_table_entry (&p)))
3233 {
3234 set_syntax_error (_("unknown relocation modifier"));
3235 return FALSE;
3236 }
3237
3238 if (entry->movw_type == 0)
3239 {
3240 set_syntax_error
3241 (_("this relocation modifier is not allowed on this instruction"));
3242 return FALSE;
3243 }
3244
3245 inst.reloc.type = entry->movw_type;
3246 }
3247 else
3248 *internal_fixup_p = 1;
3249
3250 /* Avoid parsing a register as a general symbol. */
3251 saved = p;
3252 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3253 return FALSE;
3254 p = saved;
3255
3256 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3257 return FALSE;
3258
3259 *str = p;
3260 return TRUE;
3261 }
3262
3263 /* Parse an operand for an ADRP instruction:
3264 ADRP <Xd>, <label>
3265 Return TRUE on success; otherwise return FALSE. */
3266
3267 static bfd_boolean
3268 parse_adrp (char **str)
3269 {
3270 char *p;
3271
3272 p = *str;
3273 if (*p == ':')
3274 {
3275 struct reloc_table_entry *entry;
3276
3277 /* Try to parse a relocation. Anything else is an error. */
3278 ++p;
3279 if (!(entry = find_reloc_table_entry (&p)))
3280 {
3281 set_syntax_error (_("unknown relocation modifier"));
3282 return FALSE;
3283 }
3284
3285 if (entry->adrp_type == 0)
3286 {
3287 set_syntax_error
3288 (_("this relocation modifier is not allowed on this instruction"));
3289 return FALSE;
3290 }
3291
3292 inst.reloc.type = entry->adrp_type;
3293 }
3294 else
3295 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3296
3297 inst.reloc.pc_rel = 1;
3298
3299 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3300 return FALSE;
3301
3302 *str = p;
3303 return TRUE;
3304 }
3305
3306 /* Miscellaneous. */
3307
3308 /* Parse an option for a preload instruction. Returns the encoding for the
3309 option, or PARSE_FAIL. */
3310
3311 static int
3312 parse_pldop (char **str)
3313 {
3314 char *p, *q;
3315 const struct aarch64_name_value_pair *o;
3316
3317 p = q = *str;
3318 while (ISALNUM (*q))
3319 q++;
3320
3321 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3322 if (!o)
3323 return PARSE_FAIL;
3324
3325 *str = q;
3326 return o->value;
3327 }
3328
3329 /* Parse an option for a barrier instruction. Returns the encoding for the
3330 option, or PARSE_FAIL. */
3331
3332 static int
3333 parse_barrier (char **str)
3334 {
3335 char *p, *q;
3336 const asm_barrier_opt *o;
3337
3338 p = q = *str;
3339 while (ISALPHA (*q))
3340 q++;
3341
3342 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3343 if (!o)
3344 return PARSE_FAIL;
3345
3346 *str = q;
3347 return o->value;
3348 }
3349
3350 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3351 Returns the encoding for the option, or PARSE_FAIL.
3352
3353 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3354 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3355
3356 static int
3357 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3358 {
3359 char *p, *q;
3360 char buf[32];
3361 const aarch64_sys_reg *o;
3362 int value;
3363
3364 p = buf;
3365 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3366 if (p < buf + 31)
3367 *p++ = TOLOWER (*q);
3368 *p = '\0';
3369 /* Assert that BUF be large enough. */
3370 gas_assert (p - buf == q - *str);
3371
3372 o = hash_find (sys_regs, buf);
3373 if (!o)
3374 {
3375 if (!imple_defined_p)
3376 return PARSE_FAIL;
3377 else
3378 {
3379 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3380 unsigned int op0, op1, cn, cm, op2;
3381
3382 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3383 != 5)
3384 return PARSE_FAIL;
3385 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3386 return PARSE_FAIL;
3387 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3388 }
3389 }
3390 else
3391 {
3392 if (aarch64_sys_reg_deprecated_p (o))
3393 as_warn (_("system register name '%s' is deprecated and may be "
3394 "removed in a future release"), buf);
3395 value = o->value;
3396 }
3397
3398 *str = q;
3399 return value;
3400 }
3401
3402 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3403 for the option, or NULL. */
3404
3405 static const aarch64_sys_ins_reg *
3406 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3407 {
3408 char *p, *q;
3409 char buf[32];
3410 const aarch64_sys_ins_reg *o;
3411
3412 p = buf;
3413 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3414 if (p < buf + 31)
3415 *p++ = TOLOWER (*q);
3416 *p = '\0';
3417
3418 o = hash_find (sys_ins_regs, buf);
3419 if (!o)
3420 return NULL;
3421
3422 *str = q;
3423 return o;
3424 }
3425 \f
3426 #define po_char_or_fail(chr) do { \
3427 if (! skip_past_char (&str, chr)) \
3428 goto failure; \
3429 } while (0)
3430
3431 #define po_reg_or_fail(regtype) do { \
3432 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3433 if (val == PARSE_FAIL) \
3434 { \
3435 set_default_error (); \
3436 goto failure; \
3437 } \
3438 } while (0)
3439
3440 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3441 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3442 &isreg32, &isregzero); \
3443 if (val == PARSE_FAIL) \
3444 { \
3445 set_default_error (); \
3446 goto failure; \
3447 } \
3448 info->reg.regno = val; \
3449 if (isreg32) \
3450 info->qualifier = AARCH64_OPND_QLF_W; \
3451 else \
3452 info->qualifier = AARCH64_OPND_QLF_X; \
3453 } while (0)
3454
3455 #define po_imm_nc_or_fail() do { \
3456 if (! parse_constant_immediate (&str, &val)) \
3457 goto failure; \
3458 } while (0)
3459
3460 #define po_imm_or_fail(min, max) do { \
3461 if (! parse_constant_immediate (&str, &val)) \
3462 goto failure; \
3463 if (val < min || val > max) \
3464 { \
3465 set_fatal_syntax_error (_("immediate value out of range "\
3466 #min " to "#max)); \
3467 goto failure; \
3468 } \
3469 } while (0)
3470
3471 #define po_misc_or_fail(expr) do { \
3472 if (!expr) \
3473 goto failure; \
3474 } while (0)
3475 \f
3476 /* encode the 12-bit imm field of Add/sub immediate */
3477 static inline uint32_t
3478 encode_addsub_imm (uint32_t imm)
3479 {
3480 return imm << 10;
3481 }
3482
3483 /* encode the shift amount field of Add/sub immediate */
3484 static inline uint32_t
3485 encode_addsub_imm_shift_amount (uint32_t cnt)
3486 {
3487 return cnt << 22;
3488 }
3489
3490
3491 /* encode the imm field of Adr instruction */
3492 static inline uint32_t
3493 encode_adr_imm (uint32_t imm)
3494 {
3495 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3496 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3497 }
3498
3499 /* encode the immediate field of Move wide immediate */
3500 static inline uint32_t
3501 encode_movw_imm (uint32_t imm)
3502 {
3503 return imm << 5;
3504 }
3505
3506 /* encode the 26-bit offset of unconditional branch */
3507 static inline uint32_t
3508 encode_branch_ofs_26 (uint32_t ofs)
3509 {
3510 return ofs & ((1 << 26) - 1);
3511 }
3512
3513 /* encode the 19-bit offset of conditional branch and compare & branch */
3514 static inline uint32_t
3515 encode_cond_branch_ofs_19 (uint32_t ofs)
3516 {
3517 return (ofs & ((1 << 19) - 1)) << 5;
3518 }
3519
3520 /* encode the 19-bit offset of ld literal */
3521 static inline uint32_t
3522 encode_ld_lit_ofs_19 (uint32_t ofs)
3523 {
3524 return (ofs & ((1 << 19) - 1)) << 5;
3525 }
3526
3527 /* Encode the 14-bit offset of test & branch. */
3528 static inline uint32_t
3529 encode_tst_branch_ofs_14 (uint32_t ofs)
3530 {
3531 return (ofs & ((1 << 14) - 1)) << 5;
3532 }
3533
3534 /* Encode the 16-bit imm field of svc/hvc/smc. */
3535 static inline uint32_t
3536 encode_svc_imm (uint32_t imm)
3537 {
3538 return imm << 5;
3539 }
3540
3541 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3542 static inline uint32_t
3543 reencode_addsub_switch_add_sub (uint32_t opcode)
3544 {
3545 return opcode ^ (1 << 30);
3546 }
3547
3548 static inline uint32_t
3549 reencode_movzn_to_movz (uint32_t opcode)
3550 {
3551 return opcode | (1 << 30);
3552 }
3553
3554 static inline uint32_t
3555 reencode_movzn_to_movn (uint32_t opcode)
3556 {
3557 return opcode & ~(1 << 30);
3558 }
3559
3560 /* Overall per-instruction processing. */
3561
3562 /* We need to be able to fix up arbitrary expressions in some statements.
3563 This is so that we can handle symbols that are an arbitrary distance from
3564 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3565 which returns part of an address in a form which will be valid for
3566 a data instruction. We do this by pushing the expression into a symbol
3567 in the expr_section, and creating a fix for that. */
3568
3569 static fixS *
3570 fix_new_aarch64 (fragS * frag,
3571 int where,
3572 short int size, expressionS * exp, int pc_rel, int reloc)
3573 {
3574 fixS *new_fix;
3575
3576 switch (exp->X_op)
3577 {
3578 case O_constant:
3579 case O_symbol:
3580 case O_add:
3581 case O_subtract:
3582 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3583 break;
3584
3585 default:
3586 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3587 pc_rel, reloc);
3588 break;
3589 }
3590 return new_fix;
3591 }
3592 \f
3593 /* Diagnostics on operands errors. */
3594
3595 /* By default, output verbose error message.
3596 Disable the verbose error message by -mno-verbose-error. */
3597 static int verbose_error_p = 1;
3598
3599 #ifdef DEBUG_AARCH64
3600 /* N.B. this is only for the purpose of debugging. */
3601 const char* operand_mismatch_kind_names[] =
3602 {
3603 "AARCH64_OPDE_NIL",
3604 "AARCH64_OPDE_RECOVERABLE",
3605 "AARCH64_OPDE_SYNTAX_ERROR",
3606 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3607 "AARCH64_OPDE_INVALID_VARIANT",
3608 "AARCH64_OPDE_OUT_OF_RANGE",
3609 "AARCH64_OPDE_UNALIGNED",
3610 "AARCH64_OPDE_REG_LIST",
3611 "AARCH64_OPDE_OTHER_ERROR",
3612 };
3613 #endif /* DEBUG_AARCH64 */
3614
3615 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3616
3617 When multiple errors of different kinds are found in the same assembly
3618 line, only the error of the highest severity will be picked up for
3619 issuing the diagnostics. */
3620
3621 static inline bfd_boolean
3622 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3623 enum aarch64_operand_error_kind rhs)
3624 {
3625 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3626 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3627 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3628 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3629 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3630 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3631 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3632 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3633 return lhs > rhs;
3634 }
3635
3636 /* Helper routine to get the mnemonic name from the assembly instruction
3637 line; should only be called for the diagnosis purpose, as there is
3638 string copy operation involved, which may affect the runtime
3639 performance if used in elsewhere. */
3640
3641 static const char*
3642 get_mnemonic_name (const char *str)
3643 {
3644 static char mnemonic[32];
3645 char *ptr;
3646
3647 /* Get the first 15 bytes and assume that the full name is included. */
3648 strncpy (mnemonic, str, 31);
3649 mnemonic[31] = '\0';
3650
3651 /* Scan up to the end of the mnemonic, which must end in white space,
3652 '.', or end of string. */
3653 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3654 ;
3655
3656 *ptr = '\0';
3657
3658 /* Append '...' to the truncated long name. */
3659 if (ptr - mnemonic == 31)
3660 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3661
3662 return mnemonic;
3663 }
3664
3665 static void
3666 reset_aarch64_instruction (aarch64_instruction *instruction)
3667 {
3668 memset (instruction, '\0', sizeof (aarch64_instruction));
3669 instruction->reloc.type = BFD_RELOC_UNUSED;
3670 }
3671
3672 /* Data strutures storing one user error in the assembly code related to
3673 operands. */
3674
3675 struct operand_error_record
3676 {
3677 const aarch64_opcode *opcode;
3678 aarch64_operand_error detail;
3679 struct operand_error_record *next;
3680 };
3681
3682 typedef struct operand_error_record operand_error_record;
3683
3684 struct operand_errors
3685 {
3686 operand_error_record *head;
3687 operand_error_record *tail;
3688 };
3689
3690 typedef struct operand_errors operand_errors;
3691
3692 /* Top-level data structure reporting user errors for the current line of
3693 the assembly code.
3694 The way md_assemble works is that all opcodes sharing the same mnemonic
3695 name are iterated to find a match to the assembly line. In this data
3696 structure, each of the such opcodes will have one operand_error_record
3697 allocated and inserted. In other words, excessive errors related with
3698 a single opcode are disregarded. */
3699 operand_errors operand_error_report;
3700
3701 /* Free record nodes. */
3702 static operand_error_record *free_opnd_error_record_nodes = NULL;
3703
3704 /* Initialize the data structure that stores the operand mismatch
3705 information on assembling one line of the assembly code. */
3706 static void
3707 init_operand_error_report (void)
3708 {
3709 if (operand_error_report.head != NULL)
3710 {
3711 gas_assert (operand_error_report.tail != NULL);
3712 operand_error_report.tail->next = free_opnd_error_record_nodes;
3713 free_opnd_error_record_nodes = operand_error_report.head;
3714 operand_error_report.head = NULL;
3715 operand_error_report.tail = NULL;
3716 return;
3717 }
3718 gas_assert (operand_error_report.tail == NULL);
3719 }
3720
3721 /* Return TRUE if some operand error has been recorded during the
3722 parsing of the current assembly line using the opcode *OPCODE;
3723 otherwise return FALSE. */
3724 static inline bfd_boolean
3725 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3726 {
3727 operand_error_record *record = operand_error_report.head;
3728 return record && record->opcode == opcode;
3729 }
3730
3731 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3732 OPCODE field is initialized with OPCODE.
3733 N.B. only one record for each opcode, i.e. the maximum of one error is
3734 recorded for each instruction template. */
3735
3736 static void
3737 add_operand_error_record (const operand_error_record* new_record)
3738 {
3739 const aarch64_opcode *opcode = new_record->opcode;
3740 operand_error_record* record = operand_error_report.head;
3741
3742 /* The record may have been created for this opcode. If not, we need
3743 to prepare one. */
3744 if (! opcode_has_operand_error_p (opcode))
3745 {
3746 /* Get one empty record. */
3747 if (free_opnd_error_record_nodes == NULL)
3748 {
3749 record = xmalloc (sizeof (operand_error_record));
3750 if (record == NULL)
3751 abort ();
3752 }
3753 else
3754 {
3755 record = free_opnd_error_record_nodes;
3756 free_opnd_error_record_nodes = record->next;
3757 }
3758 record->opcode = opcode;
3759 /* Insert at the head. */
3760 record->next = operand_error_report.head;
3761 operand_error_report.head = record;
3762 if (operand_error_report.tail == NULL)
3763 operand_error_report.tail = record;
3764 }
3765 else if (record->detail.kind != AARCH64_OPDE_NIL
3766 && record->detail.index <= new_record->detail.index
3767 && operand_error_higher_severity_p (record->detail.kind,
3768 new_record->detail.kind))
3769 {
3770 /* In the case of multiple errors found on operands related with a
3771 single opcode, only record the error of the leftmost operand and
3772 only if the error is of higher severity. */
3773 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3774 " the existing error %s on operand %d",
3775 operand_mismatch_kind_names[new_record->detail.kind],
3776 new_record->detail.index,
3777 operand_mismatch_kind_names[record->detail.kind],
3778 record->detail.index);
3779 return;
3780 }
3781
3782 record->detail = new_record->detail;
3783 }
3784
3785 static inline void
3786 record_operand_error_info (const aarch64_opcode *opcode,
3787 aarch64_operand_error *error_info)
3788 {
3789 operand_error_record record;
3790 record.opcode = opcode;
3791 record.detail = *error_info;
3792 add_operand_error_record (&record);
3793 }
3794
3795 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3796 error message *ERROR, for operand IDX (count from 0). */
3797
3798 static void
3799 record_operand_error (const aarch64_opcode *opcode, int idx,
3800 enum aarch64_operand_error_kind kind,
3801 const char* error)
3802 {
3803 aarch64_operand_error info;
3804 memset(&info, 0, sizeof (info));
3805 info.index = idx;
3806 info.kind = kind;
3807 info.error = error;
3808 record_operand_error_info (opcode, &info);
3809 }
3810
3811 static void
3812 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3813 enum aarch64_operand_error_kind kind,
3814 const char* error, const int *extra_data)
3815 {
3816 aarch64_operand_error info;
3817 info.index = idx;
3818 info.kind = kind;
3819 info.error = error;
3820 info.data[0] = extra_data[0];
3821 info.data[1] = extra_data[1];
3822 info.data[2] = extra_data[2];
3823 record_operand_error_info (opcode, &info);
3824 }
3825
3826 static void
3827 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3828 const char* error, int lower_bound,
3829 int upper_bound)
3830 {
3831 int data[3] = {lower_bound, upper_bound, 0};
3832 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3833 error, data);
3834 }
3835
3836 /* Remove the operand error record for *OPCODE. */
3837 static void ATTRIBUTE_UNUSED
3838 remove_operand_error_record (const aarch64_opcode *opcode)
3839 {
3840 if (opcode_has_operand_error_p (opcode))
3841 {
3842 operand_error_record* record = operand_error_report.head;
3843 gas_assert (record != NULL && operand_error_report.tail != NULL);
3844 operand_error_report.head = record->next;
3845 record->next = free_opnd_error_record_nodes;
3846 free_opnd_error_record_nodes = record;
3847 if (operand_error_report.head == NULL)
3848 {
3849 gas_assert (operand_error_report.tail == record);
3850 operand_error_report.tail = NULL;
3851 }
3852 }
3853 }
3854
3855 /* Given the instruction in *INSTR, return the index of the best matched
3856 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3857
3858 Return -1 if there is no qualifier sequence; return the first match
3859 if there is multiple matches found. */
3860
3861 static int
3862 find_best_match (const aarch64_inst *instr,
3863 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3864 {
3865 int i, num_opnds, max_num_matched, idx;
3866
3867 num_opnds = aarch64_num_of_operands (instr->opcode);
3868 if (num_opnds == 0)
3869 {
3870 DEBUG_TRACE ("no operand");
3871 return -1;
3872 }
3873
3874 max_num_matched = 0;
3875 idx = -1;
3876
3877 /* For each pattern. */
3878 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3879 {
3880 int j, num_matched;
3881 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3882
3883 /* Most opcodes has much fewer patterns in the list. */
3884 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3885 {
3886 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3887 if (i != 0 && idx == -1)
3888 /* If nothing has been matched, return the 1st sequence. */
3889 idx = 0;
3890 break;
3891 }
3892
3893 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3894 if (*qualifiers == instr->operands[j].qualifier)
3895 ++num_matched;
3896
3897 if (num_matched > max_num_matched)
3898 {
3899 max_num_matched = num_matched;
3900 idx = i;
3901 }
3902 }
3903
3904 DEBUG_TRACE ("return with %d", idx);
3905 return idx;
3906 }
3907
3908 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3909 corresponding operands in *INSTR. */
3910
3911 static inline void
3912 assign_qualifier_sequence (aarch64_inst *instr,
3913 const aarch64_opnd_qualifier_t *qualifiers)
3914 {
3915 int i = 0;
3916 int num_opnds = aarch64_num_of_operands (instr->opcode);
3917 gas_assert (num_opnds);
3918 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3919 instr->operands[i].qualifier = *qualifiers;
3920 }
3921
3922 /* Print operands for the diagnosis purpose. */
3923
3924 static void
3925 print_operands (char *buf, const aarch64_opcode *opcode,
3926 const aarch64_opnd_info *opnds)
3927 {
3928 int i;
3929
3930 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3931 {
3932 const size_t size = 128;
3933 char str[size];
3934
3935 /* We regard the opcode operand info more, however we also look into
3936 the inst->operands to support the disassembling of the optional
3937 operand.
3938 The two operand code should be the same in all cases, apart from
3939 when the operand can be optional. */
3940 if (opcode->operands[i] == AARCH64_OPND_NIL
3941 || opnds[i].type == AARCH64_OPND_NIL)
3942 break;
3943
3944 /* Generate the operand string in STR. */
3945 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3946
3947 /* Delimiter. */
3948 if (str[0] != '\0')
3949 strcat (buf, i == 0 ? " " : ",");
3950
3951 /* Append the operand string. */
3952 strcat (buf, str);
3953 }
3954 }
3955
3956 /* Send to stderr a string as information. */
3957
3958 static void
3959 output_info (const char *format, ...)
3960 {
3961 char *file;
3962 unsigned int line;
3963 va_list args;
3964
3965 as_where (&file, &line);
3966 if (file)
3967 {
3968 if (line != 0)
3969 fprintf (stderr, "%s:%u: ", file, line);
3970 else
3971 fprintf (stderr, "%s: ", file);
3972 }
3973 fprintf (stderr, _("Info: "));
3974 va_start (args, format);
3975 vfprintf (stderr, format, args);
3976 va_end (args);
3977 (void) putc ('\n', stderr);
3978 }
3979
3980 /* Output one operand error record. */
3981
3982 static void
3983 output_operand_error_record (const operand_error_record *record, char *str)
3984 {
3985 const aarch64_operand_error *detail = &record->detail;
3986 int idx = detail->index;
3987 const aarch64_opcode *opcode = record->opcode;
3988 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
3989 : AARCH64_OPND_NIL);
3990
3991 switch (detail->kind)
3992 {
3993 case AARCH64_OPDE_NIL:
3994 gas_assert (0);
3995 break;
3996
3997 case AARCH64_OPDE_SYNTAX_ERROR:
3998 case AARCH64_OPDE_RECOVERABLE:
3999 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4000 case AARCH64_OPDE_OTHER_ERROR:
4001 /* Use the prepared error message if there is, otherwise use the
4002 operand description string to describe the error. */
4003 if (detail->error != NULL)
4004 {
4005 if (idx < 0)
4006 as_bad (_("%s -- `%s'"), detail->error, str);
4007 else
4008 as_bad (_("%s at operand %d -- `%s'"),
4009 detail->error, idx + 1, str);
4010 }
4011 else
4012 {
4013 gas_assert (idx >= 0);
4014 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4015 aarch64_get_operand_desc (opd_code), str);
4016 }
4017 break;
4018
4019 case AARCH64_OPDE_INVALID_VARIANT:
4020 as_bad (_("operand mismatch -- `%s'"), str);
4021 if (verbose_error_p)
4022 {
4023 /* We will try to correct the erroneous instruction and also provide
4024 more information e.g. all other valid variants.
4025
4026 The string representation of the corrected instruction and other
4027 valid variants are generated by
4028
4029 1) obtaining the intermediate representation of the erroneous
4030 instruction;
4031 2) manipulating the IR, e.g. replacing the operand qualifier;
4032 3) printing out the instruction by calling the printer functions
4033 shared with the disassembler.
4034
4035 The limitation of this method is that the exact input assembly
4036 line cannot be accurately reproduced in some cases, for example an
4037 optional operand present in the actual assembly line will be
4038 omitted in the output; likewise for the optional syntax rules,
4039 e.g. the # before the immediate. Another limitation is that the
4040 assembly symbols and relocation operations in the assembly line
4041 currently cannot be printed out in the error report. Last but not
4042 least, when there is other error(s) co-exist with this error, the
4043 'corrected' instruction may be still incorrect, e.g. given
4044 'ldnp h0,h1,[x0,#6]!'
4045 this diagnosis will provide the version:
4046 'ldnp s0,s1,[x0,#6]!'
4047 which is still not right. */
4048 size_t len = strlen (get_mnemonic_name (str));
4049 int i, qlf_idx;
4050 bfd_boolean result;
4051 const size_t size = 2048;
4052 char buf[size];
4053 aarch64_inst *inst_base = &inst.base;
4054 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4055
4056 /* Init inst. */
4057 reset_aarch64_instruction (&inst);
4058 inst_base->opcode = opcode;
4059
4060 /* Reset the error report so that there is no side effect on the
4061 following operand parsing. */
4062 init_operand_error_report ();
4063
4064 /* Fill inst. */
4065 result = parse_operands (str + len, opcode)
4066 && programmer_friendly_fixup (&inst);
4067 gas_assert (result);
4068 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4069 NULL, NULL);
4070 gas_assert (!result);
4071
4072 /* Find the most matched qualifier sequence. */
4073 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4074 gas_assert (qlf_idx > -1);
4075
4076 /* Assign the qualifiers. */
4077 assign_qualifier_sequence (inst_base,
4078 opcode->qualifiers_list[qlf_idx]);
4079
4080 /* Print the hint. */
4081 output_info (_(" did you mean this?"));
4082 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4083 print_operands (buf, opcode, inst_base->operands);
4084 output_info (_(" %s"), buf);
4085
4086 /* Print out other variant(s) if there is any. */
4087 if (qlf_idx != 0 ||
4088 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4089 output_info (_(" other valid variant(s):"));
4090
4091 /* For each pattern. */
4092 qualifiers_list = opcode->qualifiers_list;
4093 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4094 {
4095 /* Most opcodes has much fewer patterns in the list.
4096 First NIL qualifier indicates the end in the list. */
4097 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4098 break;
4099
4100 if (i != qlf_idx)
4101 {
4102 /* Mnemonics name. */
4103 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4104
4105 /* Assign the qualifiers. */
4106 assign_qualifier_sequence (inst_base, *qualifiers_list);
4107
4108 /* Print instruction. */
4109 print_operands (buf, opcode, inst_base->operands);
4110
4111 output_info (_(" %s"), buf);
4112 }
4113 }
4114 }
4115 break;
4116
4117 case AARCH64_OPDE_OUT_OF_RANGE:
4118 if (detail->data[0] != detail->data[1])
4119 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4120 detail->error ? detail->error : _("immediate value"),
4121 detail->data[0], detail->data[1], idx + 1, str);
4122 else
4123 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4124 detail->error ? detail->error : _("immediate value"),
4125 detail->data[0], idx + 1, str);
4126 break;
4127
4128 case AARCH64_OPDE_REG_LIST:
4129 if (detail->data[0] == 1)
4130 as_bad (_("invalid number of registers in the list; "
4131 "only 1 register is expected at operand %d -- `%s'"),
4132 idx + 1, str);
4133 else
4134 as_bad (_("invalid number of registers in the list; "
4135 "%d registers are expected at operand %d -- `%s'"),
4136 detail->data[0], idx + 1, str);
4137 break;
4138
4139 case AARCH64_OPDE_UNALIGNED:
4140 as_bad (_("immediate value should be a multiple of "
4141 "%d at operand %d -- `%s'"),
4142 detail->data[0], idx + 1, str);
4143 break;
4144
4145 default:
4146 gas_assert (0);
4147 break;
4148 }
4149 }
4150
4151 /* Process and output the error message about the operand mismatching.
4152
4153 When this function is called, the operand error information had
4154 been collected for an assembly line and there will be multiple
4155 errors in the case of mulitple instruction templates; output the
4156 error message that most closely describes the problem. */
4157
4158 static void
4159 output_operand_error_report (char *str)
4160 {
4161 int largest_error_pos;
4162 const char *msg = NULL;
4163 enum aarch64_operand_error_kind kind;
4164 operand_error_record *curr;
4165 operand_error_record *head = operand_error_report.head;
4166 operand_error_record *record = NULL;
4167
4168 /* No error to report. */
4169 if (head == NULL)
4170 return;
4171
4172 gas_assert (head != NULL && operand_error_report.tail != NULL);
4173
4174 /* Only one error. */
4175 if (head == operand_error_report.tail)
4176 {
4177 DEBUG_TRACE ("single opcode entry with error kind: %s",
4178 operand_mismatch_kind_names[head->detail.kind]);
4179 output_operand_error_record (head, str);
4180 return;
4181 }
4182
4183 /* Find the error kind of the highest severity. */
4184 DEBUG_TRACE ("multiple opcode entres with error kind");
4185 kind = AARCH64_OPDE_NIL;
4186 for (curr = head; curr != NULL; curr = curr->next)
4187 {
4188 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4189 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4190 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4191 kind = curr->detail.kind;
4192 }
4193 gas_assert (kind != AARCH64_OPDE_NIL);
4194
4195 /* Pick up one of errors of KIND to report. */
4196 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4197 for (curr = head; curr != NULL; curr = curr->next)
4198 {
4199 if (curr->detail.kind != kind)
4200 continue;
4201 /* If there are multiple errors, pick up the one with the highest
4202 mismatching operand index. In the case of multiple errors with
4203 the equally highest operand index, pick up the first one or the
4204 first one with non-NULL error message. */
4205 if (curr->detail.index > largest_error_pos
4206 || (curr->detail.index == largest_error_pos && msg == NULL
4207 && curr->detail.error != NULL))
4208 {
4209 largest_error_pos = curr->detail.index;
4210 record = curr;
4211 msg = record->detail.error;
4212 }
4213 }
4214
4215 gas_assert (largest_error_pos != -2 && record != NULL);
4216 DEBUG_TRACE ("Pick up error kind %s to report",
4217 operand_mismatch_kind_names[record->detail.kind]);
4218
4219 /* Output. */
4220 output_operand_error_record (record, str);
4221 }
4222 \f
4223 /* Write an AARCH64 instruction to buf - always little-endian. */
4224 static void
4225 put_aarch64_insn (char *buf, uint32_t insn)
4226 {
4227 unsigned char *where = (unsigned char *) buf;
4228 where[0] = insn;
4229 where[1] = insn >> 8;
4230 where[2] = insn >> 16;
4231 where[3] = insn >> 24;
4232 }
4233
4234 static uint32_t
4235 get_aarch64_insn (char *buf)
4236 {
4237 unsigned char *where = (unsigned char *) buf;
4238 uint32_t result;
4239 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4240 return result;
4241 }
4242
4243 static void
4244 output_inst (struct aarch64_inst *new_inst)
4245 {
4246 char *to = NULL;
4247
4248 to = frag_more (INSN_SIZE);
4249
4250 frag_now->tc_frag_data.recorded = 1;
4251
4252 put_aarch64_insn (to, inst.base.value);
4253
4254 if (inst.reloc.type != BFD_RELOC_UNUSED)
4255 {
4256 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4257 INSN_SIZE, &inst.reloc.exp,
4258 inst.reloc.pc_rel,
4259 inst.reloc.type);
4260 DEBUG_TRACE ("Prepared relocation fix up");
4261 /* Don't check the addend value against the instruction size,
4262 that's the job of our code in md_apply_fix(). */
4263 fixp->fx_no_overflow = 1;
4264 if (new_inst != NULL)
4265 fixp->tc_fix_data.inst = new_inst;
4266 if (aarch64_gas_internal_fixup_p ())
4267 {
4268 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4269 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4270 fixp->fx_addnumber = inst.reloc.flags;
4271 }
4272 }
4273
4274 dwarf2_emit_insn (INSN_SIZE);
4275 }
4276
4277 /* Link together opcodes of the same name. */
4278
4279 struct templates
4280 {
4281 aarch64_opcode *opcode;
4282 struct templates *next;
4283 };
4284
4285 typedef struct templates templates;
4286
4287 static templates *
4288 lookup_mnemonic (const char *start, int len)
4289 {
4290 templates *templ = NULL;
4291
4292 templ = hash_find_n (aarch64_ops_hsh, start, len);
4293 return templ;
4294 }
4295
4296 /* Subroutine of md_assemble, responsible for looking up the primary
4297 opcode from the mnemonic the user wrote. STR points to the
4298 beginning of the mnemonic. */
4299
4300 static templates *
4301 opcode_lookup (char **str)
4302 {
4303 char *end, *base;
4304 const aarch64_cond *cond;
4305 char condname[16];
4306 int len;
4307
4308 /* Scan up to the end of the mnemonic, which must end in white space,
4309 '.', or end of string. */
4310 for (base = end = *str; is_part_of_name(*end); end++)
4311 if (*end == '.')
4312 break;
4313
4314 if (end == base)
4315 return 0;
4316
4317 inst.cond = COND_ALWAYS;
4318
4319 /* Handle a possible condition. */
4320 if (end[0] == '.')
4321 {
4322 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4323 if (cond)
4324 {
4325 inst.cond = cond->value;
4326 *str = end + 3;
4327 }
4328 else
4329 {
4330 *str = end;
4331 return 0;
4332 }
4333 }
4334 else
4335 *str = end;
4336
4337 len = end - base;
4338
4339 if (inst.cond == COND_ALWAYS)
4340 {
4341 /* Look for unaffixed mnemonic. */
4342 return lookup_mnemonic (base, len);
4343 }
4344 else if (len <= 13)
4345 {
4346 /* append ".c" to mnemonic if conditional */
4347 memcpy (condname, base, len);
4348 memcpy (condname + len, ".c", 2);
4349 base = condname;
4350 len += 2;
4351 return lookup_mnemonic (base, len);
4352 }
4353
4354 return NULL;
4355 }
4356
4357 /* Internal helper routine converting a vector neon_type_el structure
4358 *VECTYPE to a corresponding operand qualifier. */
4359
4360 static inline aarch64_opnd_qualifier_t
4361 vectype_to_qualifier (const struct neon_type_el *vectype)
4362 {
4363 /* Element size in bytes indexed by neon_el_type. */
4364 const unsigned char ele_size[5]
4365 = {1, 2, 4, 8, 16};
4366
4367 if (!vectype->defined || vectype->type == NT_invtype)
4368 goto vectype_conversion_fail;
4369
4370 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4371
4372 if (vectype->defined & NTA_HASINDEX)
4373 /* Vector element register. */
4374 return AARCH64_OPND_QLF_S_B + vectype->type;
4375 else
4376 {
4377 /* Vector register. */
4378 int reg_size = ele_size[vectype->type] * vectype->width;
4379 unsigned offset;
4380 if (reg_size != 16 && reg_size != 8)
4381 goto vectype_conversion_fail;
4382 /* The conversion is calculated based on the relation of the order of
4383 qualifiers to the vector element size and vector register size. */
4384 offset = (vectype->type == NT_q)
4385 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4386 gas_assert (offset <= 8);
4387 return AARCH64_OPND_QLF_V_8B + offset;
4388 }
4389
4390 vectype_conversion_fail:
4391 first_error (_("bad vector arrangement type"));
4392 return AARCH64_OPND_QLF_NIL;
4393 }
4394
4395 /* Process an optional operand that is found omitted from the assembly line.
4396 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4397 instruction's opcode entry while IDX is the index of this omitted operand.
4398 */
4399
4400 static void
4401 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4402 int idx, aarch64_opnd_info *operand)
4403 {
4404 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4405 gas_assert (optional_operand_p (opcode, idx));
4406 gas_assert (!operand->present);
4407
4408 switch (type)
4409 {
4410 case AARCH64_OPND_Rd:
4411 case AARCH64_OPND_Rn:
4412 case AARCH64_OPND_Rm:
4413 case AARCH64_OPND_Rt:
4414 case AARCH64_OPND_Rt2:
4415 case AARCH64_OPND_Rs:
4416 case AARCH64_OPND_Ra:
4417 case AARCH64_OPND_Rt_SYS:
4418 case AARCH64_OPND_Rd_SP:
4419 case AARCH64_OPND_Rn_SP:
4420 case AARCH64_OPND_Fd:
4421 case AARCH64_OPND_Fn:
4422 case AARCH64_OPND_Fm:
4423 case AARCH64_OPND_Fa:
4424 case AARCH64_OPND_Ft:
4425 case AARCH64_OPND_Ft2:
4426 case AARCH64_OPND_Sd:
4427 case AARCH64_OPND_Sn:
4428 case AARCH64_OPND_Sm:
4429 case AARCH64_OPND_Vd:
4430 case AARCH64_OPND_Vn:
4431 case AARCH64_OPND_Vm:
4432 case AARCH64_OPND_VdD1:
4433 case AARCH64_OPND_VnD1:
4434 operand->reg.regno = default_value;
4435 break;
4436
4437 case AARCH64_OPND_Ed:
4438 case AARCH64_OPND_En:
4439 case AARCH64_OPND_Em:
4440 operand->reglane.regno = default_value;
4441 break;
4442
4443 case AARCH64_OPND_IDX:
4444 case AARCH64_OPND_BIT_NUM:
4445 case AARCH64_OPND_IMMR:
4446 case AARCH64_OPND_IMMS:
4447 case AARCH64_OPND_SHLL_IMM:
4448 case AARCH64_OPND_IMM_VLSL:
4449 case AARCH64_OPND_IMM_VLSR:
4450 case AARCH64_OPND_CCMP_IMM:
4451 case AARCH64_OPND_FBITS:
4452 case AARCH64_OPND_UIMM4:
4453 case AARCH64_OPND_UIMM3_OP1:
4454 case AARCH64_OPND_UIMM3_OP2:
4455 case AARCH64_OPND_IMM:
4456 case AARCH64_OPND_WIDTH:
4457 case AARCH64_OPND_UIMM7:
4458 case AARCH64_OPND_NZCV:
4459 operand->imm.value = default_value;
4460 break;
4461
4462 case AARCH64_OPND_EXCEPTION:
4463 inst.reloc.type = BFD_RELOC_UNUSED;
4464 break;
4465
4466 case AARCH64_OPND_BARRIER_ISB:
4467 operand->barrier = aarch64_barrier_options + default_value;
4468
4469 default:
4470 break;
4471 }
4472 }
4473
4474 /* Process the relocation type for move wide instructions.
4475 Return TRUE on success; otherwise return FALSE. */
4476
4477 static bfd_boolean
4478 process_movw_reloc_info (void)
4479 {
4480 int is32;
4481 unsigned shift;
4482
4483 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4484
4485 if (inst.base.opcode->op == OP_MOVK)
4486 switch (inst.reloc.type)
4487 {
4488 case BFD_RELOC_AARCH64_MOVW_G0_S:
4489 case BFD_RELOC_AARCH64_MOVW_G1_S:
4490 case BFD_RELOC_AARCH64_MOVW_G2_S:
4491 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4492 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4493 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4494 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4495 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4496 set_syntax_error
4497 (_("the specified relocation type is not allowed for MOVK"));
4498 return FALSE;
4499 default:
4500 break;
4501 }
4502
4503 switch (inst.reloc.type)
4504 {
4505 case BFD_RELOC_AARCH64_MOVW_G0:
4506 case BFD_RELOC_AARCH64_MOVW_G0_S:
4507 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4508 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4509 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4510 shift = 0;
4511 break;
4512 case BFD_RELOC_AARCH64_MOVW_G1:
4513 case BFD_RELOC_AARCH64_MOVW_G1_S:
4514 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4515 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4516 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4517 shift = 16;
4518 break;
4519 case BFD_RELOC_AARCH64_MOVW_G2:
4520 case BFD_RELOC_AARCH64_MOVW_G2_S:
4521 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4522 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4523 if (is32)
4524 {
4525 set_fatal_syntax_error
4526 (_("the specified relocation type is not allowed for 32-bit "
4527 "register"));
4528 return FALSE;
4529 }
4530 shift = 32;
4531 break;
4532 case BFD_RELOC_AARCH64_MOVW_G3:
4533 if (is32)
4534 {
4535 set_fatal_syntax_error
4536 (_("the specified relocation type is not allowed for 32-bit "
4537 "register"));
4538 return FALSE;
4539 }
4540 shift = 48;
4541 break;
4542 default:
4543 /* More cases should be added when more MOVW-related relocation types
4544 are supported in GAS. */
4545 gas_assert (aarch64_gas_internal_fixup_p ());
4546 /* The shift amount should have already been set by the parser. */
4547 return TRUE;
4548 }
4549 inst.base.operands[1].shifter.amount = shift;
4550 return TRUE;
4551 }
4552
4553 /* A primitive log caculator. */
4554
4555 static inline unsigned int
4556 get_logsz (unsigned int size)
4557 {
4558 const unsigned char ls[16] =
4559 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4560 if (size > 16)
4561 {
4562 gas_assert (0);
4563 return -1;
4564 }
4565 gas_assert (ls[size - 1] != (unsigned char)-1);
4566 return ls[size - 1];
4567 }
4568
4569 /* Determine and return the real reloc type code for an instruction
4570 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4571
4572 static inline bfd_reloc_code_real_type
4573 ldst_lo12_determine_real_reloc_type (void)
4574 {
4575 int logsz;
4576 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4577 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4578
4579 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4580 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4581 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4582 BFD_RELOC_AARCH64_LDST128_LO12
4583 };
4584
4585 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4586 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4587
4588 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4589 opd1_qlf =
4590 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4591 1, opd0_qlf, 0);
4592 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4593
4594 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4595 gas_assert (logsz >= 0 && logsz <= 4);
4596
4597 return reloc_ldst_lo12[logsz];
4598 }
4599
4600 /* Check whether a register list REGINFO is valid. The registers must be
4601 numbered in increasing order (modulo 32), in increments of one or two.
4602
4603 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4604 increments of two.
4605
4606 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4607
4608 static bfd_boolean
4609 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4610 {
4611 uint32_t i, nb_regs, prev_regno, incr;
4612
4613 nb_regs = 1 + (reginfo & 0x3);
4614 reginfo >>= 2;
4615 prev_regno = reginfo & 0x1f;
4616 incr = accept_alternate ? 2 : 1;
4617
4618 for (i = 1; i < nb_regs; ++i)
4619 {
4620 uint32_t curr_regno;
4621 reginfo >>= 5;
4622 curr_regno = reginfo & 0x1f;
4623 if (curr_regno != ((prev_regno + incr) & 0x1f))
4624 return FALSE;
4625 prev_regno = curr_regno;
4626 }
4627
4628 return TRUE;
4629 }
4630
4631 /* Generic instruction operand parser. This does no encoding and no
4632 semantic validation; it merely squirrels values away in the inst
4633 structure. Returns TRUE or FALSE depending on whether the
4634 specified grammar matched. */
4635
4636 static bfd_boolean
4637 parse_operands (char *str, const aarch64_opcode *opcode)
4638 {
4639 int i;
4640 char *backtrack_pos = 0;
4641 const enum aarch64_opnd *operands = opcode->operands;
4642
4643 clear_error ();
4644 skip_whitespace (str);
4645
4646 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4647 {
4648 int64_t val;
4649 int isreg32, isregzero;
4650 int comma_skipped_p = 0;
4651 aarch64_reg_type rtype;
4652 struct neon_type_el vectype;
4653 aarch64_opnd_info *info = &inst.base.operands[i];
4654
4655 DEBUG_TRACE ("parse operand %d", i);
4656
4657 /* Assign the operand code. */
4658 info->type = operands[i];
4659
4660 if (optional_operand_p (opcode, i))
4661 {
4662 /* Remember where we are in case we need to backtrack. */
4663 gas_assert (!backtrack_pos);
4664 backtrack_pos = str;
4665 }
4666
4667 /* Expect comma between operands; the backtrack mechanizm will take
4668 care of cases of omitted optional operand. */
4669 if (i > 0 && ! skip_past_char (&str, ','))
4670 {
4671 set_syntax_error (_("comma expected between operands"));
4672 goto failure;
4673 }
4674 else
4675 comma_skipped_p = 1;
4676
4677 switch (operands[i])
4678 {
4679 case AARCH64_OPND_Rd:
4680 case AARCH64_OPND_Rn:
4681 case AARCH64_OPND_Rm:
4682 case AARCH64_OPND_Rt:
4683 case AARCH64_OPND_Rt2:
4684 case AARCH64_OPND_Rs:
4685 case AARCH64_OPND_Ra:
4686 case AARCH64_OPND_Rt_SYS:
4687 case AARCH64_OPND_PAIRREG:
4688 po_int_reg_or_fail (1, 0);
4689 break;
4690
4691 case AARCH64_OPND_Rd_SP:
4692 case AARCH64_OPND_Rn_SP:
4693 po_int_reg_or_fail (0, 1);
4694 break;
4695
4696 case AARCH64_OPND_Rm_EXT:
4697 case AARCH64_OPND_Rm_SFT:
4698 po_misc_or_fail (parse_shifter_operand
4699 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4700 ? SHIFTED_ARITH_IMM
4701 : SHIFTED_LOGIC_IMM)));
4702 if (!info->shifter.operator_present)
4703 {
4704 /* Default to LSL if not present. Libopcodes prefers shifter
4705 kind to be explicit. */
4706 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4707 info->shifter.kind = AARCH64_MOD_LSL;
4708 /* For Rm_EXT, libopcodes will carry out further check on whether
4709 or not stack pointer is used in the instruction (Recall that
4710 "the extend operator is not optional unless at least one of
4711 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4712 }
4713 break;
4714
4715 case AARCH64_OPND_Fd:
4716 case AARCH64_OPND_Fn:
4717 case AARCH64_OPND_Fm:
4718 case AARCH64_OPND_Fa:
4719 case AARCH64_OPND_Ft:
4720 case AARCH64_OPND_Ft2:
4721 case AARCH64_OPND_Sd:
4722 case AARCH64_OPND_Sn:
4723 case AARCH64_OPND_Sm:
4724 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4725 if (val == PARSE_FAIL)
4726 {
4727 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4728 goto failure;
4729 }
4730 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4731
4732 info->reg.regno = val;
4733 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4734 break;
4735
4736 case AARCH64_OPND_Vd:
4737 case AARCH64_OPND_Vn:
4738 case AARCH64_OPND_Vm:
4739 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4740 if (val == PARSE_FAIL)
4741 {
4742 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4743 goto failure;
4744 }
4745 if (vectype.defined & NTA_HASINDEX)
4746 goto failure;
4747
4748 info->reg.regno = val;
4749 info->qualifier = vectype_to_qualifier (&vectype);
4750 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4751 goto failure;
4752 break;
4753
4754 case AARCH64_OPND_VdD1:
4755 case AARCH64_OPND_VnD1:
4756 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4757 if (val == PARSE_FAIL)
4758 {
4759 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4760 goto failure;
4761 }
4762 if (vectype.type != NT_d || vectype.index != 1)
4763 {
4764 set_fatal_syntax_error
4765 (_("the top half of a 128-bit FP/SIMD register is expected"));
4766 goto failure;
4767 }
4768 info->reg.regno = val;
4769 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4770 here; it is correct for the purpose of encoding/decoding since
4771 only the register number is explicitly encoded in the related
4772 instructions, although this appears a bit hacky. */
4773 info->qualifier = AARCH64_OPND_QLF_S_D;
4774 break;
4775
4776 case AARCH64_OPND_Ed:
4777 case AARCH64_OPND_En:
4778 case AARCH64_OPND_Em:
4779 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4780 if (val == PARSE_FAIL)
4781 {
4782 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4783 goto failure;
4784 }
4785 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4786 goto failure;
4787
4788 info->reglane.regno = val;
4789 info->reglane.index = vectype.index;
4790 info->qualifier = vectype_to_qualifier (&vectype);
4791 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4792 goto failure;
4793 break;
4794
4795 case AARCH64_OPND_LVn:
4796 case AARCH64_OPND_LVt:
4797 case AARCH64_OPND_LVt_AL:
4798 case AARCH64_OPND_LEt:
4799 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4800 goto failure;
4801 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4802 {
4803 set_fatal_syntax_error (_("invalid register list"));
4804 goto failure;
4805 }
4806 info->reglist.first_regno = (val >> 2) & 0x1f;
4807 info->reglist.num_regs = (val & 0x3) + 1;
4808 if (operands[i] == AARCH64_OPND_LEt)
4809 {
4810 if (!(vectype.defined & NTA_HASINDEX))
4811 goto failure;
4812 info->reglist.has_index = 1;
4813 info->reglist.index = vectype.index;
4814 }
4815 else if (!(vectype.defined & NTA_HASTYPE))
4816 goto failure;
4817 info->qualifier = vectype_to_qualifier (&vectype);
4818 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4819 goto failure;
4820 break;
4821
4822 case AARCH64_OPND_Cn:
4823 case AARCH64_OPND_Cm:
4824 po_reg_or_fail (REG_TYPE_CN);
4825 if (val > 15)
4826 {
4827 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4828 goto failure;
4829 }
4830 inst.base.operands[i].reg.regno = val;
4831 break;
4832
4833 case AARCH64_OPND_SHLL_IMM:
4834 case AARCH64_OPND_IMM_VLSR:
4835 po_imm_or_fail (1, 64);
4836 info->imm.value = val;
4837 break;
4838
4839 case AARCH64_OPND_CCMP_IMM:
4840 case AARCH64_OPND_FBITS:
4841 case AARCH64_OPND_UIMM4:
4842 case AARCH64_OPND_UIMM3_OP1:
4843 case AARCH64_OPND_UIMM3_OP2:
4844 case AARCH64_OPND_IMM_VLSL:
4845 case AARCH64_OPND_IMM:
4846 case AARCH64_OPND_WIDTH:
4847 po_imm_nc_or_fail ();
4848 info->imm.value = val;
4849 break;
4850
4851 case AARCH64_OPND_UIMM7:
4852 po_imm_or_fail (0, 127);
4853 info->imm.value = val;
4854 break;
4855
4856 case AARCH64_OPND_IDX:
4857 case AARCH64_OPND_BIT_NUM:
4858 case AARCH64_OPND_IMMR:
4859 case AARCH64_OPND_IMMS:
4860 po_imm_or_fail (0, 63);
4861 info->imm.value = val;
4862 break;
4863
4864 case AARCH64_OPND_IMM0:
4865 po_imm_nc_or_fail ();
4866 if (val != 0)
4867 {
4868 set_fatal_syntax_error (_("immediate zero expected"));
4869 goto failure;
4870 }
4871 info->imm.value = 0;
4872 break;
4873
4874 case AARCH64_OPND_FPIMM0:
4875 {
4876 int qfloat;
4877 bfd_boolean res1 = FALSE, res2 = FALSE;
4878 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4879 it is probably not worth the effort to support it. */
4880 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4881 && !(res2 = parse_constant_immediate (&str, &val)))
4882 goto failure;
4883 if ((res1 && qfloat == 0) || (res2 && val == 0))
4884 {
4885 info->imm.value = 0;
4886 info->imm.is_fp = 1;
4887 break;
4888 }
4889 set_fatal_syntax_error (_("immediate zero expected"));
4890 goto failure;
4891 }
4892
4893 case AARCH64_OPND_IMM_MOV:
4894 {
4895 char *saved = str;
4896 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4897 reg_name_p (str, REG_TYPE_VN))
4898 goto failure;
4899 str = saved;
4900 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4901 GE_OPT_PREFIX, 1));
4902 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4903 later. fix_mov_imm_insn will try to determine a machine
4904 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4905 message if the immediate cannot be moved by a single
4906 instruction. */
4907 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4908 inst.base.operands[i].skip = 1;
4909 }
4910 break;
4911
4912 case AARCH64_OPND_SIMD_IMM:
4913 case AARCH64_OPND_SIMD_IMM_SFT:
4914 if (! parse_big_immediate (&str, &val))
4915 goto failure;
4916 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4917 /* addr_off_p */ 0,
4918 /* need_libopcodes_p */ 1,
4919 /* skip_p */ 1);
4920 /* Parse shift.
4921 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4922 shift, we don't check it here; we leave the checking to
4923 the libopcodes (operand_general_constraint_met_p). By
4924 doing this, we achieve better diagnostics. */
4925 if (skip_past_comma (&str)
4926 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4927 goto failure;
4928 if (!info->shifter.operator_present
4929 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4930 {
4931 /* Default to LSL if not present. Libopcodes prefers shifter
4932 kind to be explicit. */
4933 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4934 info->shifter.kind = AARCH64_MOD_LSL;
4935 }
4936 break;
4937
4938 case AARCH64_OPND_FPIMM:
4939 case AARCH64_OPND_SIMD_FPIMM:
4940 {
4941 int qfloat;
4942 bfd_boolean dp_p
4943 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4944 == 8);
4945 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4946 goto failure;
4947 if (qfloat == 0)
4948 {
4949 set_fatal_syntax_error (_("invalid floating-point constant"));
4950 goto failure;
4951 }
4952 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4953 inst.base.operands[i].imm.is_fp = 1;
4954 }
4955 break;
4956
4957 case AARCH64_OPND_LIMM:
4958 po_misc_or_fail (parse_shifter_operand (&str, info,
4959 SHIFTED_LOGIC_IMM));
4960 if (info->shifter.operator_present)
4961 {
4962 set_fatal_syntax_error
4963 (_("shift not allowed for bitmask immediate"));
4964 goto failure;
4965 }
4966 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4967 /* addr_off_p */ 0,
4968 /* need_libopcodes_p */ 1,
4969 /* skip_p */ 1);
4970 break;
4971
4972 case AARCH64_OPND_AIMM:
4973 if (opcode->op == OP_ADD)
4974 /* ADD may have relocation types. */
4975 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4976 SHIFTED_ARITH_IMM));
4977 else
4978 po_misc_or_fail (parse_shifter_operand (&str, info,
4979 SHIFTED_ARITH_IMM));
4980 switch (inst.reloc.type)
4981 {
4982 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4983 info->shifter.amount = 12;
4984 break;
4985 case BFD_RELOC_UNUSED:
4986 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4987 if (info->shifter.kind != AARCH64_MOD_NONE)
4988 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4989 inst.reloc.pc_rel = 0;
4990 break;
4991 default:
4992 break;
4993 }
4994 info->imm.value = 0;
4995 if (!info->shifter.operator_present)
4996 {
4997 /* Default to LSL if not present. Libopcodes prefers shifter
4998 kind to be explicit. */
4999 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5000 info->shifter.kind = AARCH64_MOD_LSL;
5001 }
5002 break;
5003
5004 case AARCH64_OPND_HALF:
5005 {
5006 /* #<imm16> or relocation. */
5007 int internal_fixup_p;
5008 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5009 if (internal_fixup_p)
5010 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5011 skip_whitespace (str);
5012 if (skip_past_comma (&str))
5013 {
5014 /* {, LSL #<shift>} */
5015 if (! aarch64_gas_internal_fixup_p ())
5016 {
5017 set_fatal_syntax_error (_("can't mix relocation modifier "
5018 "with explicit shift"));
5019 goto failure;
5020 }
5021 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5022 }
5023 else
5024 inst.base.operands[i].shifter.amount = 0;
5025 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5026 inst.base.operands[i].imm.value = 0;
5027 if (! process_movw_reloc_info ())
5028 goto failure;
5029 }
5030 break;
5031
5032 case AARCH64_OPND_EXCEPTION:
5033 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5034 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5035 /* addr_off_p */ 0,
5036 /* need_libopcodes_p */ 0,
5037 /* skip_p */ 1);
5038 break;
5039
5040 case AARCH64_OPND_NZCV:
5041 {
5042 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5043 if (nzcv != NULL)
5044 {
5045 str += 4;
5046 info->imm.value = nzcv->value;
5047 break;
5048 }
5049 po_imm_or_fail (0, 15);
5050 info->imm.value = val;
5051 }
5052 break;
5053
5054 case AARCH64_OPND_COND:
5055 case AARCH64_OPND_COND1:
5056 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5057 str += 2;
5058 if (info->cond == NULL)
5059 {
5060 set_syntax_error (_("invalid condition"));
5061 goto failure;
5062 }
5063 else if (operands[i] == AARCH64_OPND_COND1
5064 && (info->cond->value & 0xe) == 0xe)
5065 {
5066 /* Not allow AL or NV. */
5067 set_default_error ();
5068 goto failure;
5069 }
5070 break;
5071
5072 case AARCH64_OPND_ADDR_ADRP:
5073 po_misc_or_fail (parse_adrp (&str));
5074 /* Clear the value as operand needs to be relocated. */
5075 info->imm.value = 0;
5076 break;
5077
5078 case AARCH64_OPND_ADDR_PCREL14:
5079 case AARCH64_OPND_ADDR_PCREL19:
5080 case AARCH64_OPND_ADDR_PCREL21:
5081 case AARCH64_OPND_ADDR_PCREL26:
5082 po_misc_or_fail (parse_address_reloc (&str, info));
5083 if (!info->addr.pcrel)
5084 {
5085 set_syntax_error (_("invalid pc-relative address"));
5086 goto failure;
5087 }
5088 if (inst.gen_lit_pool
5089 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5090 {
5091 /* Only permit "=value" in the literal load instructions.
5092 The literal will be generated by programmer_friendly_fixup. */
5093 set_syntax_error (_("invalid use of \"=immediate\""));
5094 goto failure;
5095 }
5096 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5097 {
5098 set_syntax_error (_("unrecognized relocation suffix"));
5099 goto failure;
5100 }
5101 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5102 {
5103 info->imm.value = inst.reloc.exp.X_add_number;
5104 inst.reloc.type = BFD_RELOC_UNUSED;
5105 }
5106 else
5107 {
5108 info->imm.value = 0;
5109 if (inst.reloc.type == BFD_RELOC_UNUSED)
5110 switch (opcode->iclass)
5111 {
5112 case compbranch:
5113 case condbranch:
5114 /* e.g. CBZ or B.COND */
5115 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5116 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5117 break;
5118 case testbranch:
5119 /* e.g. TBZ */
5120 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5121 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5122 break;
5123 case branch_imm:
5124 /* e.g. B or BL */
5125 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5126 inst.reloc.type =
5127 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5128 : BFD_RELOC_AARCH64_JUMP26;
5129 break;
5130 case loadlit:
5131 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5132 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5133 break;
5134 case pcreladdr:
5135 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5136 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5137 break;
5138 default:
5139 gas_assert (0);
5140 abort ();
5141 }
5142 inst.reloc.pc_rel = 1;
5143 }
5144 break;
5145
5146 case AARCH64_OPND_ADDR_SIMPLE:
5147 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5148 /* [<Xn|SP>{, #<simm>}] */
5149 po_char_or_fail ('[');
5150 po_reg_or_fail (REG_TYPE_R64_SP);
5151 /* Accept optional ", #0". */
5152 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5153 && skip_past_char (&str, ','))
5154 {
5155 skip_past_char (&str, '#');
5156 if (! skip_past_char (&str, '0'))
5157 {
5158 set_fatal_syntax_error
5159 (_("the optional immediate offset can only be 0"));
5160 goto failure;
5161 }
5162 }
5163 po_char_or_fail (']');
5164 info->addr.base_regno = val;
5165 break;
5166
5167 case AARCH64_OPND_ADDR_REGOFF:
5168 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5169 po_misc_or_fail (parse_address (&str, info, 0));
5170 if (info->addr.pcrel || !info->addr.offset.is_reg
5171 || !info->addr.preind || info->addr.postind
5172 || info->addr.writeback)
5173 {
5174 set_syntax_error (_("invalid addressing mode"));
5175 goto failure;
5176 }
5177 if (!info->shifter.operator_present)
5178 {
5179 /* Default to LSL if not present. Libopcodes prefers shifter
5180 kind to be explicit. */
5181 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5182 info->shifter.kind = AARCH64_MOD_LSL;
5183 }
5184 /* Qualifier to be deduced by libopcodes. */
5185 break;
5186
5187 case AARCH64_OPND_ADDR_SIMM7:
5188 po_misc_or_fail (parse_address (&str, info, 0));
5189 if (info->addr.pcrel || info->addr.offset.is_reg
5190 || (!info->addr.preind && !info->addr.postind))
5191 {
5192 set_syntax_error (_("invalid addressing mode"));
5193 goto failure;
5194 }
5195 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5196 /* addr_off_p */ 1,
5197 /* need_libopcodes_p */ 1,
5198 /* skip_p */ 0);
5199 break;
5200
5201 case AARCH64_OPND_ADDR_SIMM9:
5202 case AARCH64_OPND_ADDR_SIMM9_2:
5203 po_misc_or_fail (parse_address_reloc (&str, info));
5204 if (info->addr.pcrel || info->addr.offset.is_reg
5205 || (!info->addr.preind && !info->addr.postind)
5206 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5207 && info->addr.writeback))
5208 {
5209 set_syntax_error (_("invalid addressing mode"));
5210 goto failure;
5211 }
5212 if (inst.reloc.type != BFD_RELOC_UNUSED)
5213 {
5214 set_syntax_error (_("relocation not allowed"));
5215 goto failure;
5216 }
5217 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5218 /* addr_off_p */ 1,
5219 /* need_libopcodes_p */ 1,
5220 /* skip_p */ 0);
5221 break;
5222
5223 case AARCH64_OPND_ADDR_UIMM12:
5224 po_misc_or_fail (parse_address_reloc (&str, info));
5225 if (info->addr.pcrel || info->addr.offset.is_reg
5226 || !info->addr.preind || info->addr.writeback)
5227 {
5228 set_syntax_error (_("invalid addressing mode"));
5229 goto failure;
5230 }
5231 if (inst.reloc.type == BFD_RELOC_UNUSED)
5232 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5233 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5234 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5235 /* Leave qualifier to be determined by libopcodes. */
5236 break;
5237
5238 case AARCH64_OPND_SIMD_ADDR_POST:
5239 /* [<Xn|SP>], <Xm|#<amount>> */
5240 po_misc_or_fail (parse_address (&str, info, 1));
5241 if (!info->addr.postind || !info->addr.writeback)
5242 {
5243 set_syntax_error (_("invalid addressing mode"));
5244 goto failure;
5245 }
5246 if (!info->addr.offset.is_reg)
5247 {
5248 if (inst.reloc.exp.X_op == O_constant)
5249 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5250 else
5251 {
5252 set_fatal_syntax_error
5253 (_("writeback value should be an immediate constant"));
5254 goto failure;
5255 }
5256 }
5257 /* No qualifier. */
5258 break;
5259
5260 case AARCH64_OPND_SYSREG:
5261 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5262 == PARSE_FAIL)
5263 {
5264 set_syntax_error (_("unknown or missing system register name"));
5265 goto failure;
5266 }
5267 inst.base.operands[i].sysreg = val;
5268 break;
5269
5270 case AARCH64_OPND_PSTATEFIELD:
5271 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5272 == PARSE_FAIL)
5273 {
5274 set_syntax_error (_("unknown or missing PSTATE field name"));
5275 goto failure;
5276 }
5277 inst.base.operands[i].pstatefield = val;
5278 break;
5279
5280 case AARCH64_OPND_SYSREG_IC:
5281 inst.base.operands[i].sysins_op =
5282 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5283 goto sys_reg_ins;
5284 case AARCH64_OPND_SYSREG_DC:
5285 inst.base.operands[i].sysins_op =
5286 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5287 goto sys_reg_ins;
5288 case AARCH64_OPND_SYSREG_AT:
5289 inst.base.operands[i].sysins_op =
5290 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5291 goto sys_reg_ins;
5292 case AARCH64_OPND_SYSREG_TLBI:
5293 inst.base.operands[i].sysins_op =
5294 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5295 sys_reg_ins:
5296 if (inst.base.operands[i].sysins_op == NULL)
5297 {
5298 set_fatal_syntax_error ( _("unknown or missing operation name"));
5299 goto failure;
5300 }
5301 break;
5302
5303 case AARCH64_OPND_BARRIER:
5304 case AARCH64_OPND_BARRIER_ISB:
5305 val = parse_barrier (&str);
5306 if (val != PARSE_FAIL
5307 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5308 {
5309 /* ISB only accepts options name 'sy'. */
5310 set_syntax_error
5311 (_("the specified option is not accepted in ISB"));
5312 /* Turn off backtrack as this optional operand is present. */
5313 backtrack_pos = 0;
5314 goto failure;
5315 }
5316 /* This is an extension to accept a 0..15 immediate. */
5317 if (val == PARSE_FAIL)
5318 po_imm_or_fail (0, 15);
5319 info->barrier = aarch64_barrier_options + val;
5320 break;
5321
5322 case AARCH64_OPND_PRFOP:
5323 val = parse_pldop (&str);
5324 /* This is an extension to accept a 0..31 immediate. */
5325 if (val == PARSE_FAIL)
5326 po_imm_or_fail (0, 31);
5327 inst.base.operands[i].prfop = aarch64_prfops + val;
5328 break;
5329
5330 default:
5331 as_fatal (_("unhandled operand code %d"), operands[i]);
5332 }
5333
5334 /* If we get here, this operand was successfully parsed. */
5335 inst.base.operands[i].present = 1;
5336 continue;
5337
5338 failure:
5339 /* The parse routine should already have set the error, but in case
5340 not, set a default one here. */
5341 if (! error_p ())
5342 set_default_error ();
5343
5344 if (! backtrack_pos)
5345 goto parse_operands_return;
5346
5347 {
5348 /* We reach here because this operand is marked as optional, and
5349 either no operand was supplied or the operand was supplied but it
5350 was syntactically incorrect. In the latter case we report an
5351 error. In the former case we perform a few more checks before
5352 dropping through to the code to insert the default operand. */
5353
5354 char *tmp = backtrack_pos;
5355 char endchar = END_OF_INSN;
5356
5357 if (i != (aarch64_num_of_operands (opcode) - 1))
5358 endchar = ',';
5359 skip_past_char (&tmp, ',');
5360
5361 if (*tmp != endchar)
5362 /* The user has supplied an operand in the wrong format. */
5363 goto parse_operands_return;
5364
5365 /* Make sure there is not a comma before the optional operand.
5366 For example the fifth operand of 'sys' is optional:
5367
5368 sys #0,c0,c0,#0, <--- wrong
5369 sys #0,c0,c0,#0 <--- correct. */
5370 if (comma_skipped_p && i && endchar == END_OF_INSN)
5371 {
5372 set_fatal_syntax_error
5373 (_("unexpected comma before the omitted optional operand"));
5374 goto parse_operands_return;
5375 }
5376 }
5377
5378 /* Reaching here means we are dealing with an optional operand that is
5379 omitted from the assembly line. */
5380 gas_assert (optional_operand_p (opcode, i));
5381 info->present = 0;
5382 process_omitted_operand (operands[i], opcode, i, info);
5383
5384 /* Try again, skipping the optional operand at backtrack_pos. */
5385 str = backtrack_pos;
5386 backtrack_pos = 0;
5387
5388 /* Clear any error record after the omitted optional operand has been
5389 successfully handled. */
5390 clear_error ();
5391 }
5392
5393 /* Check if we have parsed all the operands. */
5394 if (*str != '\0' && ! error_p ())
5395 {
5396 /* Set I to the index of the last present operand; this is
5397 for the purpose of diagnostics. */
5398 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5399 ;
5400 set_fatal_syntax_error
5401 (_("unexpected characters following instruction"));
5402 }
5403
5404 parse_operands_return:
5405
5406 if (error_p ())
5407 {
5408 DEBUG_TRACE ("parsing FAIL: %s - %s",
5409 operand_mismatch_kind_names[get_error_kind ()],
5410 get_error_message ());
5411 /* Record the operand error properly; this is useful when there
5412 are multiple instruction templates for a mnemonic name, so that
5413 later on, we can select the error that most closely describes
5414 the problem. */
5415 record_operand_error (opcode, i, get_error_kind (),
5416 get_error_message ());
5417 return FALSE;
5418 }
5419 else
5420 {
5421 DEBUG_TRACE ("parsing SUCCESS");
5422 return TRUE;
5423 }
5424 }
5425
5426 /* It does some fix-up to provide some programmer friendly feature while
5427 keeping the libopcodes happy, i.e. libopcodes only accepts
5428 the preferred architectural syntax.
5429 Return FALSE if there is any failure; otherwise return TRUE. */
5430
5431 static bfd_boolean
5432 programmer_friendly_fixup (aarch64_instruction *instr)
5433 {
5434 aarch64_inst *base = &instr->base;
5435 const aarch64_opcode *opcode = base->opcode;
5436 enum aarch64_op op = opcode->op;
5437 aarch64_opnd_info *operands = base->operands;
5438
5439 DEBUG_TRACE ("enter");
5440
5441 switch (opcode->iclass)
5442 {
5443 case testbranch:
5444 /* TBNZ Xn|Wn, #uimm6, label
5445 Test and Branch Not Zero: conditionally jumps to label if bit number
5446 uimm6 in register Xn is not zero. The bit number implies the width of
5447 the register, which may be written and should be disassembled as Wn if
5448 uimm is less than 32. */
5449 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5450 {
5451 if (operands[1].imm.value >= 32)
5452 {
5453 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5454 0, 31);
5455 return FALSE;
5456 }
5457 operands[0].qualifier = AARCH64_OPND_QLF_X;
5458 }
5459 break;
5460 case loadlit:
5461 /* LDR Wt, label | =value
5462 As a convenience assemblers will typically permit the notation
5463 "=value" in conjunction with the pc-relative literal load instructions
5464 to automatically place an immediate value or symbolic address in a
5465 nearby literal pool and generate a hidden label which references it.
5466 ISREG has been set to 0 in the case of =value. */
5467 if (instr->gen_lit_pool
5468 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5469 {
5470 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5471 if (op == OP_LDRSW_LIT)
5472 size = 4;
5473 if (instr->reloc.exp.X_op != O_constant
5474 && instr->reloc.exp.X_op != O_big
5475 && instr->reloc.exp.X_op != O_symbol)
5476 {
5477 record_operand_error (opcode, 1,
5478 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5479 _("constant expression expected"));
5480 return FALSE;
5481 }
5482 if (! add_to_lit_pool (&instr->reloc.exp, size))
5483 {
5484 record_operand_error (opcode, 1,
5485 AARCH64_OPDE_OTHER_ERROR,
5486 _("literal pool insertion failed"));
5487 return FALSE;
5488 }
5489 }
5490 break;
5491 case log_shift:
5492 case bitfield:
5493 /* UXT[BHW] Wd, Wn
5494 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5495 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5496 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5497 A programmer-friendly assembler should accept a destination Xd in
5498 place of Wd, however that is not the preferred form for disassembly.
5499 */
5500 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5501 && operands[1].qualifier == AARCH64_OPND_QLF_W
5502 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5503 operands[0].qualifier = AARCH64_OPND_QLF_W;
5504 break;
5505
5506 case addsub_ext:
5507 {
5508 /* In the 64-bit form, the final register operand is written as Wm
5509 for all but the (possibly omitted) UXTX/LSL and SXTX
5510 operators.
5511 As a programmer-friendly assembler, we accept e.g.
5512 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5513 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5514 int idx = aarch64_operand_index (opcode->operands,
5515 AARCH64_OPND_Rm_EXT);
5516 gas_assert (idx == 1 || idx == 2);
5517 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5518 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5519 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5520 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5521 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5522 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5523 }
5524 break;
5525
5526 default:
5527 break;
5528 }
5529
5530 DEBUG_TRACE ("exit with SUCCESS");
5531 return TRUE;
5532 }
5533
5534 /* Check for loads and stores that will cause unpredictable behavior. */
5535
5536 static void
5537 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5538 {
5539 aarch64_inst *base = &instr->base;
5540 const aarch64_opcode *opcode = base->opcode;
5541 const aarch64_opnd_info *opnds = base->operands;
5542 switch (opcode->iclass)
5543 {
5544 case ldst_pos:
5545 case ldst_imm9:
5546 case ldst_unscaled:
5547 case ldst_unpriv:
5548 /* Loading/storing the base register is unpredictable if writeback. */
5549 if ((aarch64_get_operand_class (opnds[0].type)
5550 == AARCH64_OPND_CLASS_INT_REG)
5551 && opnds[0].reg.regno == opnds[1].addr.base_regno
5552 && opnds[1].addr.writeback)
5553 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5554 break;
5555 case ldstpair_off:
5556 case ldstnapair_offs:
5557 case ldstpair_indexed:
5558 /* Loading/storing the base register is unpredictable if writeback. */
5559 if ((aarch64_get_operand_class (opnds[0].type)
5560 == AARCH64_OPND_CLASS_INT_REG)
5561 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5562 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5563 && opnds[2].addr.writeback)
5564 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5565 /* Load operations must load different registers. */
5566 if ((opcode->opcode & (1 << 22))
5567 && opnds[0].reg.regno == opnds[1].reg.regno)
5568 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5569 break;
5570 default:
5571 break;
5572 }
5573 }
5574
5575 /* A wrapper function to interface with libopcodes on encoding and
5576 record the error message if there is any.
5577
5578 Return TRUE on success; otherwise return FALSE. */
5579
5580 static bfd_boolean
5581 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5582 aarch64_insn *code)
5583 {
5584 aarch64_operand_error error_info;
5585 error_info.kind = AARCH64_OPDE_NIL;
5586 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5587 return TRUE;
5588 else
5589 {
5590 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5591 record_operand_error_info (opcode, &error_info);
5592 return FALSE;
5593 }
5594 }
5595
5596 #ifdef DEBUG_AARCH64
5597 static inline void
5598 dump_opcode_operands (const aarch64_opcode *opcode)
5599 {
5600 int i = 0;
5601 while (opcode->operands[i] != AARCH64_OPND_NIL)
5602 {
5603 aarch64_verbose ("\t\t opnd%d: %s", i,
5604 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5605 ? aarch64_get_operand_name (opcode->operands[i])
5606 : aarch64_get_operand_desc (opcode->operands[i]));
5607 ++i;
5608 }
5609 }
5610 #endif /* DEBUG_AARCH64 */
5611
5612 /* This is the guts of the machine-dependent assembler. STR points to a
5613 machine dependent instruction. This function is supposed to emit
5614 the frags/bytes it assembles to. */
5615
5616 void
5617 md_assemble (char *str)
5618 {
5619 char *p = str;
5620 templates *template;
5621 aarch64_opcode *opcode;
5622 aarch64_inst *inst_base;
5623 unsigned saved_cond;
5624
5625 /* Align the previous label if needed. */
5626 if (last_label_seen != NULL)
5627 {
5628 symbol_set_frag (last_label_seen, frag_now);
5629 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5630 S_SET_SEGMENT (last_label_seen, now_seg);
5631 }
5632
5633 inst.reloc.type = BFD_RELOC_UNUSED;
5634
5635 DEBUG_TRACE ("\n\n");
5636 DEBUG_TRACE ("==============================");
5637 DEBUG_TRACE ("Enter md_assemble with %s", str);
5638
5639 template = opcode_lookup (&p);
5640 if (!template)
5641 {
5642 /* It wasn't an instruction, but it might be a register alias of
5643 the form alias .req reg directive. */
5644 if (!create_register_alias (str, p))
5645 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5646 str);
5647 return;
5648 }
5649
5650 skip_whitespace (p);
5651 if (*p == ',')
5652 {
5653 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5654 get_mnemonic_name (str), str);
5655 return;
5656 }
5657
5658 init_operand_error_report ();
5659
5660 saved_cond = inst.cond;
5661 reset_aarch64_instruction (&inst);
5662 inst.cond = saved_cond;
5663
5664 /* Iterate through all opcode entries with the same mnemonic name. */
5665 do
5666 {
5667 opcode = template->opcode;
5668
5669 DEBUG_TRACE ("opcode %s found", opcode->name);
5670 #ifdef DEBUG_AARCH64
5671 if (debug_dump)
5672 dump_opcode_operands (opcode);
5673 #endif /* DEBUG_AARCH64 */
5674
5675 mapping_state (MAP_INSN);
5676
5677 inst_base = &inst.base;
5678 inst_base->opcode = opcode;
5679
5680 /* Truly conditionally executed instructions, e.g. b.cond. */
5681 if (opcode->flags & F_COND)
5682 {
5683 gas_assert (inst.cond != COND_ALWAYS);
5684 inst_base->cond = get_cond_from_value (inst.cond);
5685 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5686 }
5687 else if (inst.cond != COND_ALWAYS)
5688 {
5689 /* It shouldn't arrive here, where the assembly looks like a
5690 conditional instruction but the found opcode is unconditional. */
5691 gas_assert (0);
5692 continue;
5693 }
5694
5695 if (parse_operands (p, opcode)
5696 && programmer_friendly_fixup (&inst)
5697 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5698 {
5699 /* Check that this instruction is supported for this CPU. */
5700 if (!opcode->avariant
5701 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5702 {
5703 as_bad (_("selected processor does not support `%s'"), str);
5704 return;
5705 }
5706
5707 warn_unpredictable_ldst (&inst, str);
5708
5709 if (inst.reloc.type == BFD_RELOC_UNUSED
5710 || !inst.reloc.need_libopcodes_p)
5711 output_inst (NULL);
5712 else
5713 {
5714 /* If there is relocation generated for the instruction,
5715 store the instruction information for the future fix-up. */
5716 struct aarch64_inst *copy;
5717 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5718 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5719 abort ();
5720 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5721 output_inst (copy);
5722 }
5723 return;
5724 }
5725
5726 template = template->next;
5727 if (template != NULL)
5728 {
5729 reset_aarch64_instruction (&inst);
5730 inst.cond = saved_cond;
5731 }
5732 }
5733 while (template != NULL);
5734
5735 /* Issue the error messages if any. */
5736 output_operand_error_report (str);
5737 }
5738
5739 /* Various frobbings of labels and their addresses. */
5740
5741 void
5742 aarch64_start_line_hook (void)
5743 {
5744 last_label_seen = NULL;
5745 }
5746
5747 void
5748 aarch64_frob_label (symbolS * sym)
5749 {
5750 last_label_seen = sym;
5751
5752 dwarf2_emit_label (sym);
5753 }
5754
5755 int
5756 aarch64_data_in_code (void)
5757 {
5758 if (!strncmp (input_line_pointer + 1, "data:", 5))
5759 {
5760 *input_line_pointer = '/';
5761 input_line_pointer += 5;
5762 *input_line_pointer = 0;
5763 return 1;
5764 }
5765
5766 return 0;
5767 }
5768
5769 char *
5770 aarch64_canonicalize_symbol_name (char *name)
5771 {
5772 int len;
5773
5774 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5775 *(name + len - 5) = 0;
5776
5777 return name;
5778 }
5779 \f
5780 /* Table of all register names defined by default. The user can
5781 define additional names with .req. Note that all register names
5782 should appear in both upper and lowercase variants. Some registers
5783 also have mixed-case names. */
5784
5785 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5786 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5787 #define REGSET31(p,t) \
5788 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5789 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5790 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5791 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5792 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5793 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5794 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5795 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5796 #define REGSET(p,t) \
5797 REGSET31(p,t), REGNUM(p,31,t)
5798
5799 /* These go into aarch64_reg_hsh hash-table. */
5800 static const reg_entry reg_names[] = {
5801 /* Integer registers. */
5802 REGSET31 (x, R_64), REGSET31 (X, R_64),
5803 REGSET31 (w, R_32), REGSET31 (W, R_32),
5804
5805 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5806 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5807
5808 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5809 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5810
5811 /* Coprocessor register numbers. */
5812 REGSET (c, CN), REGSET (C, CN),
5813
5814 /* Floating-point single precision registers. */
5815 REGSET (s, FP_S), REGSET (S, FP_S),
5816
5817 /* Floating-point double precision registers. */
5818 REGSET (d, FP_D), REGSET (D, FP_D),
5819
5820 /* Floating-point half precision registers. */
5821 REGSET (h, FP_H), REGSET (H, FP_H),
5822
5823 /* Floating-point byte precision registers. */
5824 REGSET (b, FP_B), REGSET (B, FP_B),
5825
5826 /* Floating-point quad precision registers. */
5827 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5828
5829 /* FP/SIMD registers. */
5830 REGSET (v, VN), REGSET (V, VN),
5831 };
5832
5833 #undef REGDEF
5834 #undef REGNUM
5835 #undef REGSET
5836
5837 #define N 1
5838 #define n 0
5839 #define Z 1
5840 #define z 0
5841 #define C 1
5842 #define c 0
5843 #define V 1
5844 #define v 0
5845 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5846 static const asm_nzcv nzcv_names[] = {
5847 {"nzcv", B (n, z, c, v)},
5848 {"nzcV", B (n, z, c, V)},
5849 {"nzCv", B (n, z, C, v)},
5850 {"nzCV", B (n, z, C, V)},
5851 {"nZcv", B (n, Z, c, v)},
5852 {"nZcV", B (n, Z, c, V)},
5853 {"nZCv", B (n, Z, C, v)},
5854 {"nZCV", B (n, Z, C, V)},
5855 {"Nzcv", B (N, z, c, v)},
5856 {"NzcV", B (N, z, c, V)},
5857 {"NzCv", B (N, z, C, v)},
5858 {"NzCV", B (N, z, C, V)},
5859 {"NZcv", B (N, Z, c, v)},
5860 {"NZcV", B (N, Z, c, V)},
5861 {"NZCv", B (N, Z, C, v)},
5862 {"NZCV", B (N, Z, C, V)}
5863 };
5864
5865 #undef N
5866 #undef n
5867 #undef Z
5868 #undef z
5869 #undef C
5870 #undef c
5871 #undef V
5872 #undef v
5873 #undef B
5874 \f
5875 /* MD interface: bits in the object file. */
5876
5877 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5878 for use in the a.out file, and stores them in the array pointed to by buf.
5879 This knows about the endian-ness of the target machine and does
5880 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5881 2 (short) and 4 (long) Floating numbers are put out as a series of
5882 LITTLENUMS (shorts, here at least). */
5883
5884 void
5885 md_number_to_chars (char *buf, valueT val, int n)
5886 {
5887 if (target_big_endian)
5888 number_to_chars_bigendian (buf, val, n);
5889 else
5890 number_to_chars_littleendian (buf, val, n);
5891 }
5892
5893 /* MD interface: Sections. */
5894
5895 /* Estimate the size of a frag before relaxing. Assume everything fits in
5896 4 bytes. */
5897
5898 int
5899 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5900 {
5901 fragp->fr_var = 4;
5902 return 4;
5903 }
5904
5905 /* Round up a section size to the appropriate boundary. */
5906
5907 valueT
5908 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5909 {
5910 return size;
5911 }
5912
5913 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5914 of an rs_align_code fragment.
5915
5916 Here we fill the frag with the appropriate info for padding the
5917 output stream. The resulting frag will consist of a fixed (fr_fix)
5918 and of a repeating (fr_var) part.
5919
5920 The fixed content is always emitted before the repeating content and
5921 these two parts are used as follows in constructing the output:
5922 - the fixed part will be used to align to a valid instruction word
5923 boundary, in case that we start at a misaligned address; as no
5924 executable instruction can live at the misaligned location, we
5925 simply fill with zeros;
5926 - the variable part will be used to cover the remaining padding and
5927 we fill using the AArch64 NOP instruction.
5928
5929 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
5930 enough storage space for up to 3 bytes for padding the back to a valid
5931 instruction alignment and exactly 4 bytes to store the NOP pattern. */
5932
5933 void
5934 aarch64_handle_align (fragS * fragP)
5935 {
5936 /* NOP = d503201f */
5937 /* AArch64 instructions are always little-endian. */
5938 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5939
5940 int bytes, fix, noop_size;
5941 char *p;
5942
5943 if (fragP->fr_type != rs_align_code)
5944 return;
5945
5946 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5947 p = fragP->fr_literal + fragP->fr_fix;
5948
5949 #ifdef OBJ_ELF
5950 gas_assert (fragP->tc_frag_data.recorded);
5951 #endif
5952
5953 noop_size = sizeof (aarch64_noop);
5954
5955 fix = bytes & (noop_size - 1);
5956 if (fix)
5957 {
5958 #ifdef OBJ_ELF
5959 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5960 #endif
5961 memset (p, 0, fix);
5962 p += fix;
5963 fragP->fr_fix += fix;
5964 }
5965
5966 if (noop_size)
5967 memcpy (p, aarch64_noop, noop_size);
5968 fragP->fr_var = noop_size;
5969 }
5970
5971 /* Perform target specific initialisation of a frag.
5972 Note - despite the name this initialisation is not done when the frag
5973 is created, but only when its type is assigned. A frag can be created
5974 and used a long time before its type is set, so beware of assuming that
5975 this initialisationis performed first. */
5976
5977 #ifndef OBJ_ELF
5978 void
5979 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5980 int max_chars ATTRIBUTE_UNUSED)
5981 {
5982 }
5983
5984 #else /* OBJ_ELF is defined. */
5985 void
5986 aarch64_init_frag (fragS * fragP, int max_chars)
5987 {
5988 /* Record a mapping symbol for alignment frags. We will delete this
5989 later if the alignment ends up empty. */
5990 if (!fragP->tc_frag_data.recorded)
5991 {
5992 fragP->tc_frag_data.recorded = 1;
5993 switch (fragP->fr_type)
5994 {
5995 case rs_align:
5996 case rs_align_test:
5997 case rs_fill:
5998 mapping_state_2 (MAP_DATA, max_chars);
5999 break;
6000 case rs_align_code:
6001 mapping_state_2 (MAP_INSN, max_chars);
6002 break;
6003 default:
6004 break;
6005 }
6006 }
6007 }
6008 \f
6009 /* Initialize the DWARF-2 unwind information for this procedure. */
6010
6011 void
6012 tc_aarch64_frame_initial_instructions (void)
6013 {
6014 cfi_add_CFA_def_cfa (REG_SP, 0);
6015 }
6016 #endif /* OBJ_ELF */
6017
6018 /* Convert REGNAME to a DWARF-2 register number. */
6019
6020 int
6021 tc_aarch64_regname_to_dw2regnum (char *regname)
6022 {
6023 const reg_entry *reg = parse_reg (&regname);
6024 if (reg == NULL)
6025 return -1;
6026
6027 switch (reg->type)
6028 {
6029 case REG_TYPE_SP_32:
6030 case REG_TYPE_SP_64:
6031 case REG_TYPE_R_32:
6032 case REG_TYPE_R_64:
6033 return reg->number;
6034
6035 case REG_TYPE_FP_B:
6036 case REG_TYPE_FP_H:
6037 case REG_TYPE_FP_S:
6038 case REG_TYPE_FP_D:
6039 case REG_TYPE_FP_Q:
6040 return reg->number + 64;
6041
6042 default:
6043 break;
6044 }
6045 return -1;
6046 }
6047
6048 /* Implement DWARF2_ADDR_SIZE. */
6049
6050 int
6051 aarch64_dwarf2_addr_size (void)
6052 {
6053 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6054 if (ilp32_p)
6055 return 4;
6056 #endif
6057 return bfd_arch_bits_per_address (stdoutput) / 8;
6058 }
6059
6060 /* MD interface: Symbol and relocation handling. */
6061
6062 /* Return the address within the segment that a PC-relative fixup is
6063 relative to. For AArch64 PC-relative fixups applied to instructions
6064 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6065
6066 long
6067 md_pcrel_from_section (fixS * fixP, segT seg)
6068 {
6069 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6070
6071 /* If this is pc-relative and we are going to emit a relocation
6072 then we just want to put out any pipeline compensation that the linker
6073 will need. Otherwise we want to use the calculated base. */
6074 if (fixP->fx_pcrel
6075 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6076 || aarch64_force_relocation (fixP)))
6077 base = 0;
6078
6079 /* AArch64 should be consistent for all pc-relative relocations. */
6080 return base + AARCH64_PCREL_OFFSET;
6081 }
6082
6083 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6084 Otherwise we have no need to default values of symbols. */
6085
6086 symbolS *
6087 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6088 {
6089 #ifdef OBJ_ELF
6090 if (name[0] == '_' && name[1] == 'G'
6091 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6092 {
6093 if (!GOT_symbol)
6094 {
6095 if (symbol_find (name))
6096 as_bad (_("GOT already in the symbol table"));
6097
6098 GOT_symbol = symbol_new (name, undefined_section,
6099 (valueT) 0, &zero_address_frag);
6100 }
6101
6102 return GOT_symbol;
6103 }
6104 #endif
6105
6106 return 0;
6107 }
6108
6109 /* Return non-zero if the indicated VALUE has overflowed the maximum
6110 range expressible by a unsigned number with the indicated number of
6111 BITS. */
6112
6113 static bfd_boolean
6114 unsigned_overflow (valueT value, unsigned bits)
6115 {
6116 valueT lim;
6117 if (bits >= sizeof (valueT) * 8)
6118 return FALSE;
6119 lim = (valueT) 1 << bits;
6120 return (value >= lim);
6121 }
6122
6123
6124 /* Return non-zero if the indicated VALUE has overflowed the maximum
6125 range expressible by an signed number with the indicated number of
6126 BITS. */
6127
6128 static bfd_boolean
6129 signed_overflow (offsetT value, unsigned bits)
6130 {
6131 offsetT lim;
6132 if (bits >= sizeof (offsetT) * 8)
6133 return FALSE;
6134 lim = (offsetT) 1 << (bits - 1);
6135 return (value < -lim || value >= lim);
6136 }
6137
6138 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6139 unsigned immediate offset load/store instruction, try to encode it as
6140 an unscaled, 9-bit, signed immediate offset load/store instruction.
6141 Return TRUE if it is successful; otherwise return FALSE.
6142
6143 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6144 in response to the standard LDR/STR mnemonics when the immediate offset is
6145 unambiguous, i.e. when it is negative or unaligned. */
6146
6147 static bfd_boolean
6148 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6149 {
6150 int idx;
6151 enum aarch64_op new_op;
6152 const aarch64_opcode *new_opcode;
6153
6154 gas_assert (instr->opcode->iclass == ldst_pos);
6155
6156 switch (instr->opcode->op)
6157 {
6158 case OP_LDRB_POS:new_op = OP_LDURB; break;
6159 case OP_STRB_POS: new_op = OP_STURB; break;
6160 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6161 case OP_LDRH_POS: new_op = OP_LDURH; break;
6162 case OP_STRH_POS: new_op = OP_STURH; break;
6163 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6164 case OP_LDR_POS: new_op = OP_LDUR; break;
6165 case OP_STR_POS: new_op = OP_STUR; break;
6166 case OP_LDRF_POS: new_op = OP_LDURV; break;
6167 case OP_STRF_POS: new_op = OP_STURV; break;
6168 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6169 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6170 default: new_op = OP_NIL; break;
6171 }
6172
6173 if (new_op == OP_NIL)
6174 return FALSE;
6175
6176 new_opcode = aarch64_get_opcode (new_op);
6177 gas_assert (new_opcode != NULL);
6178
6179 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6180 instr->opcode->op, new_opcode->op);
6181
6182 aarch64_replace_opcode (instr, new_opcode);
6183
6184 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6185 qualifier matching may fail because the out-of-date qualifier will
6186 prevent the operand being updated with a new and correct qualifier. */
6187 idx = aarch64_operand_index (instr->opcode->operands,
6188 AARCH64_OPND_ADDR_SIMM9);
6189 gas_assert (idx == 1);
6190 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6191
6192 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6193
6194 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6195 return FALSE;
6196
6197 return TRUE;
6198 }
6199
6200 /* Called by fix_insn to fix a MOV immediate alias instruction.
6201
6202 Operand for a generic move immediate instruction, which is an alias
6203 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6204 a 32-bit/64-bit immediate value into general register. An assembler error
6205 shall result if the immediate cannot be created by a single one of these
6206 instructions. If there is a choice, then to ensure reversability an
6207 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6208
6209 static void
6210 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6211 {
6212 const aarch64_opcode *opcode;
6213
6214 /* Need to check if the destination is SP/ZR. The check has to be done
6215 before any aarch64_replace_opcode. */
6216 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6217 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6218
6219 instr->operands[1].imm.value = value;
6220 instr->operands[1].skip = 0;
6221
6222 if (try_mov_wide_p)
6223 {
6224 /* Try the MOVZ alias. */
6225 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6226 aarch64_replace_opcode (instr, opcode);
6227 if (aarch64_opcode_encode (instr->opcode, instr,
6228 &instr->value, NULL, NULL))
6229 {
6230 put_aarch64_insn (buf, instr->value);
6231 return;
6232 }
6233 /* Try the MOVK alias. */
6234 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6235 aarch64_replace_opcode (instr, opcode);
6236 if (aarch64_opcode_encode (instr->opcode, instr,
6237 &instr->value, NULL, NULL))
6238 {
6239 put_aarch64_insn (buf, instr->value);
6240 return;
6241 }
6242 }
6243
6244 if (try_mov_bitmask_p)
6245 {
6246 /* Try the ORR alias. */
6247 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6248 aarch64_replace_opcode (instr, opcode);
6249 if (aarch64_opcode_encode (instr->opcode, instr,
6250 &instr->value, NULL, NULL))
6251 {
6252 put_aarch64_insn (buf, instr->value);
6253 return;
6254 }
6255 }
6256
6257 as_bad_where (fixP->fx_file, fixP->fx_line,
6258 _("immediate cannot be moved by a single instruction"));
6259 }
6260
6261 /* An instruction operand which is immediate related may have symbol used
6262 in the assembly, e.g.
6263
6264 mov w0, u32
6265 .set u32, 0x00ffff00
6266
6267 At the time when the assembly instruction is parsed, a referenced symbol,
6268 like 'u32' in the above example may not have been seen; a fixS is created
6269 in such a case and is handled here after symbols have been resolved.
6270 Instruction is fixed up with VALUE using the information in *FIXP plus
6271 extra information in FLAGS.
6272
6273 This function is called by md_apply_fix to fix up instructions that need
6274 a fix-up described above but does not involve any linker-time relocation. */
6275
6276 static void
6277 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6278 {
6279 int idx;
6280 uint32_t insn;
6281 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6282 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6283 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6284
6285 if (new_inst)
6286 {
6287 /* Now the instruction is about to be fixed-up, so the operand that
6288 was previously marked as 'ignored' needs to be unmarked in order
6289 to get the encoding done properly. */
6290 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6291 new_inst->operands[idx].skip = 0;
6292 }
6293
6294 gas_assert (opnd != AARCH64_OPND_NIL);
6295
6296 switch (opnd)
6297 {
6298 case AARCH64_OPND_EXCEPTION:
6299 if (unsigned_overflow (value, 16))
6300 as_bad_where (fixP->fx_file, fixP->fx_line,
6301 _("immediate out of range"));
6302 insn = get_aarch64_insn (buf);
6303 insn |= encode_svc_imm (value);
6304 put_aarch64_insn (buf, insn);
6305 break;
6306
6307 case AARCH64_OPND_AIMM:
6308 /* ADD or SUB with immediate.
6309 NOTE this assumes we come here with a add/sub shifted reg encoding
6310 3 322|2222|2 2 2 21111 111111
6311 1 098|7654|3 2 1 09876 543210 98765 43210
6312 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6313 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6314 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6315 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6316 ->
6317 3 322|2222|2 2 221111111111
6318 1 098|7654|3 2 109876543210 98765 43210
6319 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6320 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6321 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6322 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6323 Fields sf Rn Rd are already set. */
6324 insn = get_aarch64_insn (buf);
6325 if (value < 0)
6326 {
6327 /* Add <-> sub. */
6328 insn = reencode_addsub_switch_add_sub (insn);
6329 value = -value;
6330 }
6331
6332 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6333 && unsigned_overflow (value, 12))
6334 {
6335 /* Try to shift the value by 12 to make it fit. */
6336 if (((value >> 12) << 12) == value
6337 && ! unsigned_overflow (value, 12 + 12))
6338 {
6339 value >>= 12;
6340 insn |= encode_addsub_imm_shift_amount (1);
6341 }
6342 }
6343
6344 if (unsigned_overflow (value, 12))
6345 as_bad_where (fixP->fx_file, fixP->fx_line,
6346 _("immediate out of range"));
6347
6348 insn |= encode_addsub_imm (value);
6349
6350 put_aarch64_insn (buf, insn);
6351 break;
6352
6353 case AARCH64_OPND_SIMD_IMM:
6354 case AARCH64_OPND_SIMD_IMM_SFT:
6355 case AARCH64_OPND_LIMM:
6356 /* Bit mask immediate. */
6357 gas_assert (new_inst != NULL);
6358 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6359 new_inst->operands[idx].imm.value = value;
6360 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6361 &new_inst->value, NULL, NULL))
6362 put_aarch64_insn (buf, new_inst->value);
6363 else
6364 as_bad_where (fixP->fx_file, fixP->fx_line,
6365 _("invalid immediate"));
6366 break;
6367
6368 case AARCH64_OPND_HALF:
6369 /* 16-bit unsigned immediate. */
6370 if (unsigned_overflow (value, 16))
6371 as_bad_where (fixP->fx_file, fixP->fx_line,
6372 _("immediate out of range"));
6373 insn = get_aarch64_insn (buf);
6374 insn |= encode_movw_imm (value & 0xffff);
6375 put_aarch64_insn (buf, insn);
6376 break;
6377
6378 case AARCH64_OPND_IMM_MOV:
6379 /* Operand for a generic move immediate instruction, which is
6380 an alias instruction that generates a single MOVZ, MOVN or ORR
6381 instruction to loads a 32-bit/64-bit immediate value into general
6382 register. An assembler error shall result if the immediate cannot be
6383 created by a single one of these instructions. If there is a choice,
6384 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6385 and MOVZ or MOVN to ORR. */
6386 gas_assert (new_inst != NULL);
6387 fix_mov_imm_insn (fixP, buf, new_inst, value);
6388 break;
6389
6390 case AARCH64_OPND_ADDR_SIMM7:
6391 case AARCH64_OPND_ADDR_SIMM9:
6392 case AARCH64_OPND_ADDR_SIMM9_2:
6393 case AARCH64_OPND_ADDR_UIMM12:
6394 /* Immediate offset in an address. */
6395 insn = get_aarch64_insn (buf);
6396
6397 gas_assert (new_inst != NULL && new_inst->value == insn);
6398 gas_assert (new_inst->opcode->operands[1] == opnd
6399 || new_inst->opcode->operands[2] == opnd);
6400
6401 /* Get the index of the address operand. */
6402 if (new_inst->opcode->operands[1] == opnd)
6403 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6404 idx = 1;
6405 else
6406 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6407 idx = 2;
6408
6409 /* Update the resolved offset value. */
6410 new_inst->operands[idx].addr.offset.imm = value;
6411
6412 /* Encode/fix-up. */
6413 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6414 &new_inst->value, NULL, NULL))
6415 {
6416 put_aarch64_insn (buf, new_inst->value);
6417 break;
6418 }
6419 else if (new_inst->opcode->iclass == ldst_pos
6420 && try_to_encode_as_unscaled_ldst (new_inst))
6421 {
6422 put_aarch64_insn (buf, new_inst->value);
6423 break;
6424 }
6425
6426 as_bad_where (fixP->fx_file, fixP->fx_line,
6427 _("immediate offset out of range"));
6428 break;
6429
6430 default:
6431 gas_assert (0);
6432 as_fatal (_("unhandled operand code %d"), opnd);
6433 }
6434 }
6435
6436 /* Apply a fixup (fixP) to segment data, once it has been determined
6437 by our caller that we have all the info we need to fix it up.
6438
6439 Parameter valP is the pointer to the value of the bits. */
6440
6441 void
6442 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6443 {
6444 offsetT value = *valP;
6445 uint32_t insn;
6446 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6447 int scale;
6448 unsigned flags = fixP->fx_addnumber;
6449
6450 DEBUG_TRACE ("\n\n");
6451 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6452 DEBUG_TRACE ("Enter md_apply_fix");
6453
6454 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6455
6456 /* Note whether this will delete the relocation. */
6457
6458 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6459 fixP->fx_done = 1;
6460
6461 /* Process the relocations. */
6462 switch (fixP->fx_r_type)
6463 {
6464 case BFD_RELOC_NONE:
6465 /* This will need to go in the object file. */
6466 fixP->fx_done = 0;
6467 break;
6468
6469 case BFD_RELOC_8:
6470 case BFD_RELOC_8_PCREL:
6471 if (fixP->fx_done || !seg->use_rela_p)
6472 md_number_to_chars (buf, value, 1);
6473 break;
6474
6475 case BFD_RELOC_16:
6476 case BFD_RELOC_16_PCREL:
6477 if (fixP->fx_done || !seg->use_rela_p)
6478 md_number_to_chars (buf, value, 2);
6479 break;
6480
6481 case BFD_RELOC_32:
6482 case BFD_RELOC_32_PCREL:
6483 if (fixP->fx_done || !seg->use_rela_p)
6484 md_number_to_chars (buf, value, 4);
6485 break;
6486
6487 case BFD_RELOC_64:
6488 case BFD_RELOC_64_PCREL:
6489 if (fixP->fx_done || !seg->use_rela_p)
6490 md_number_to_chars (buf, value, 8);
6491 break;
6492
6493 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6494 /* We claim that these fixups have been processed here, even if
6495 in fact we generate an error because we do not have a reloc
6496 for them, so tc_gen_reloc() will reject them. */
6497 fixP->fx_done = 1;
6498 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6499 {
6500 as_bad_where (fixP->fx_file, fixP->fx_line,
6501 _("undefined symbol %s used as an immediate value"),
6502 S_GET_NAME (fixP->fx_addsy));
6503 goto apply_fix_return;
6504 }
6505 fix_insn (fixP, flags, value);
6506 break;
6507
6508 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6509 if (fixP->fx_done || !seg->use_rela_p)
6510 {
6511 if (value & 3)
6512 as_bad_where (fixP->fx_file, fixP->fx_line,
6513 _("pc-relative load offset not word aligned"));
6514 if (signed_overflow (value, 21))
6515 as_bad_where (fixP->fx_file, fixP->fx_line,
6516 _("pc-relative load offset out of range"));
6517 insn = get_aarch64_insn (buf);
6518 insn |= encode_ld_lit_ofs_19 (value >> 2);
6519 put_aarch64_insn (buf, insn);
6520 }
6521 break;
6522
6523 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6524 if (fixP->fx_done || !seg->use_rela_p)
6525 {
6526 if (signed_overflow (value, 21))
6527 as_bad_where (fixP->fx_file, fixP->fx_line,
6528 _("pc-relative address offset out of range"));
6529 insn = get_aarch64_insn (buf);
6530 insn |= encode_adr_imm (value);
6531 put_aarch64_insn (buf, insn);
6532 }
6533 break;
6534
6535 case BFD_RELOC_AARCH64_BRANCH19:
6536 if (fixP->fx_done || !seg->use_rela_p)
6537 {
6538 if (value & 3)
6539 as_bad_where (fixP->fx_file, fixP->fx_line,
6540 _("conditional branch target not word aligned"));
6541 if (signed_overflow (value, 21))
6542 as_bad_where (fixP->fx_file, fixP->fx_line,
6543 _("conditional branch out of range"));
6544 insn = get_aarch64_insn (buf);
6545 insn |= encode_cond_branch_ofs_19 (value >> 2);
6546 put_aarch64_insn (buf, insn);
6547 }
6548 break;
6549
6550 case BFD_RELOC_AARCH64_TSTBR14:
6551 if (fixP->fx_done || !seg->use_rela_p)
6552 {
6553 if (value & 3)
6554 as_bad_where (fixP->fx_file, fixP->fx_line,
6555 _("conditional branch target not word aligned"));
6556 if (signed_overflow (value, 16))
6557 as_bad_where (fixP->fx_file, fixP->fx_line,
6558 _("conditional branch out of range"));
6559 insn = get_aarch64_insn (buf);
6560 insn |= encode_tst_branch_ofs_14 (value >> 2);
6561 put_aarch64_insn (buf, insn);
6562 }
6563 break;
6564
6565 case BFD_RELOC_AARCH64_JUMP26:
6566 case BFD_RELOC_AARCH64_CALL26:
6567 if (fixP->fx_done || !seg->use_rela_p)
6568 {
6569 if (value & 3)
6570 as_bad_where (fixP->fx_file, fixP->fx_line,
6571 _("branch target not word aligned"));
6572 if (signed_overflow (value, 28))
6573 as_bad_where (fixP->fx_file, fixP->fx_line,
6574 _("branch out of range"));
6575 insn = get_aarch64_insn (buf);
6576 insn |= encode_branch_ofs_26 (value >> 2);
6577 put_aarch64_insn (buf, insn);
6578 }
6579 break;
6580
6581 case BFD_RELOC_AARCH64_MOVW_G0:
6582 case BFD_RELOC_AARCH64_MOVW_G0_S:
6583 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6584 scale = 0;
6585 goto movw_common;
6586 case BFD_RELOC_AARCH64_MOVW_G1:
6587 case BFD_RELOC_AARCH64_MOVW_G1_S:
6588 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6589 scale = 16;
6590 goto movw_common;
6591 case BFD_RELOC_AARCH64_MOVW_G2:
6592 case BFD_RELOC_AARCH64_MOVW_G2_S:
6593 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6594 scale = 32;
6595 goto movw_common;
6596 case BFD_RELOC_AARCH64_MOVW_G3:
6597 scale = 48;
6598 movw_common:
6599 if (fixP->fx_done || !seg->use_rela_p)
6600 {
6601 insn = get_aarch64_insn (buf);
6602
6603 if (!fixP->fx_done)
6604 {
6605 /* REL signed addend must fit in 16 bits */
6606 if (signed_overflow (value, 16))
6607 as_bad_where (fixP->fx_file, fixP->fx_line,
6608 _("offset out of range"));
6609 }
6610 else
6611 {
6612 /* Check for overflow and scale. */
6613 switch (fixP->fx_r_type)
6614 {
6615 case BFD_RELOC_AARCH64_MOVW_G0:
6616 case BFD_RELOC_AARCH64_MOVW_G1:
6617 case BFD_RELOC_AARCH64_MOVW_G2:
6618 case BFD_RELOC_AARCH64_MOVW_G3:
6619 if (unsigned_overflow (value, scale + 16))
6620 as_bad_where (fixP->fx_file, fixP->fx_line,
6621 _("unsigned value out of range"));
6622 break;
6623 case BFD_RELOC_AARCH64_MOVW_G0_S:
6624 case BFD_RELOC_AARCH64_MOVW_G1_S:
6625 case BFD_RELOC_AARCH64_MOVW_G2_S:
6626 /* NOTE: We can only come here with movz or movn. */
6627 if (signed_overflow (value, scale + 16))
6628 as_bad_where (fixP->fx_file, fixP->fx_line,
6629 _("signed value out of range"));
6630 if (value < 0)
6631 {
6632 /* Force use of MOVN. */
6633 value = ~value;
6634 insn = reencode_movzn_to_movn (insn);
6635 }
6636 else
6637 {
6638 /* Force use of MOVZ. */
6639 insn = reencode_movzn_to_movz (insn);
6640 }
6641 break;
6642 default:
6643 /* Unchecked relocations. */
6644 break;
6645 }
6646 value >>= scale;
6647 }
6648
6649 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6650 insn |= encode_movw_imm (value & 0xffff);
6651
6652 put_aarch64_insn (buf, insn);
6653 }
6654 break;
6655
6656 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6657 fixP->fx_r_type = (ilp32_p
6658 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6659 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6660 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6661 /* Should always be exported to object file, see
6662 aarch64_force_relocation(). */
6663 gas_assert (!fixP->fx_done);
6664 gas_assert (seg->use_rela_p);
6665 break;
6666
6667 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6668 fixP->fx_r_type = (ilp32_p
6669 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6670 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6671 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6672 /* Should always be exported to object file, see
6673 aarch64_force_relocation(). */
6674 gas_assert (!fixP->fx_done);
6675 gas_assert (seg->use_rela_p);
6676 break;
6677
6678 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6679 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6680 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6681 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6682 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6683 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6684 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6685 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6686 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6687 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6688 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6689 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6690 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6691 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6692 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6693 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6694 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6695 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6696 /* Should always be exported to object file, see
6697 aarch64_force_relocation(). */
6698 gas_assert (!fixP->fx_done);
6699 gas_assert (seg->use_rela_p);
6700 break;
6701
6702 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6703 /* Should always be exported to object file, see
6704 aarch64_force_relocation(). */
6705 fixP->fx_r_type = (ilp32_p
6706 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6707 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6708 gas_assert (!fixP->fx_done);
6709 gas_assert (seg->use_rela_p);
6710 break;
6711
6712 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6713 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6714 case BFD_RELOC_AARCH64_ADD_LO12:
6715 case BFD_RELOC_AARCH64_LDST8_LO12:
6716 case BFD_RELOC_AARCH64_LDST16_LO12:
6717 case BFD_RELOC_AARCH64_LDST32_LO12:
6718 case BFD_RELOC_AARCH64_LDST64_LO12:
6719 case BFD_RELOC_AARCH64_LDST128_LO12:
6720 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6721 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6722 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6723 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6724 /* Should always be exported to object file, see
6725 aarch64_force_relocation(). */
6726 gas_assert (!fixP->fx_done);
6727 gas_assert (seg->use_rela_p);
6728 break;
6729
6730 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6731 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6732 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6733 break;
6734
6735 case BFD_RELOC_UNUSED:
6736 /* An error will already have been reported. */
6737 break;
6738
6739 default:
6740 as_bad_where (fixP->fx_file, fixP->fx_line,
6741 _("unexpected %s fixup"),
6742 bfd_get_reloc_code_name (fixP->fx_r_type));
6743 break;
6744 }
6745
6746 apply_fix_return:
6747 /* Free the allocated the struct aarch64_inst.
6748 N.B. currently there are very limited number of fix-up types actually use
6749 this field, so the impact on the performance should be minimal . */
6750 if (fixP->tc_fix_data.inst != NULL)
6751 free (fixP->tc_fix_data.inst);
6752
6753 return;
6754 }
6755
6756 /* Translate internal representation of relocation info to BFD target
6757 format. */
6758
6759 arelent *
6760 tc_gen_reloc (asection * section, fixS * fixp)
6761 {
6762 arelent *reloc;
6763 bfd_reloc_code_real_type code;
6764
6765 reloc = xmalloc (sizeof (arelent));
6766
6767 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6768 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6769 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6770
6771 if (fixp->fx_pcrel)
6772 {
6773 if (section->use_rela_p)
6774 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6775 else
6776 fixp->fx_offset = reloc->address;
6777 }
6778 reloc->addend = fixp->fx_offset;
6779
6780 code = fixp->fx_r_type;
6781 switch (code)
6782 {
6783 case BFD_RELOC_16:
6784 if (fixp->fx_pcrel)
6785 code = BFD_RELOC_16_PCREL;
6786 break;
6787
6788 case BFD_RELOC_32:
6789 if (fixp->fx_pcrel)
6790 code = BFD_RELOC_32_PCREL;
6791 break;
6792
6793 case BFD_RELOC_64:
6794 if (fixp->fx_pcrel)
6795 code = BFD_RELOC_64_PCREL;
6796 break;
6797
6798 default:
6799 break;
6800 }
6801
6802 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6803 if (reloc->howto == NULL)
6804 {
6805 as_bad_where (fixp->fx_file, fixp->fx_line,
6806 _
6807 ("cannot represent %s relocation in this object file format"),
6808 bfd_get_reloc_code_name (code));
6809 return NULL;
6810 }
6811
6812 return reloc;
6813 }
6814
6815 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6816
6817 void
6818 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6819 {
6820 bfd_reloc_code_real_type type;
6821 int pcrel = 0;
6822
6823 /* Pick a reloc.
6824 FIXME: @@ Should look at CPU word size. */
6825 switch (size)
6826 {
6827 case 1:
6828 type = BFD_RELOC_8;
6829 break;
6830 case 2:
6831 type = BFD_RELOC_16;
6832 break;
6833 case 4:
6834 type = BFD_RELOC_32;
6835 break;
6836 case 8:
6837 type = BFD_RELOC_64;
6838 break;
6839 default:
6840 as_bad (_("cannot do %u-byte relocation"), size);
6841 type = BFD_RELOC_UNUSED;
6842 break;
6843 }
6844
6845 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6846 }
6847
6848 int
6849 aarch64_force_relocation (struct fix *fixp)
6850 {
6851 switch (fixp->fx_r_type)
6852 {
6853 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6854 /* Perform these "immediate" internal relocations
6855 even if the symbol is extern or weak. */
6856 return 0;
6857
6858 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6859 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6860 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6861 /* Pseudo relocs that need to be fixed up according to
6862 ilp32_p. */
6863 return 0;
6864
6865 case BFD_RELOC_AARCH64_ADD_LO12:
6866 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6867 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6868 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6869 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6870 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6871 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6872 case BFD_RELOC_AARCH64_LDST128_LO12:
6873 case BFD_RELOC_AARCH64_LDST16_LO12:
6874 case BFD_RELOC_AARCH64_LDST32_LO12:
6875 case BFD_RELOC_AARCH64_LDST64_LO12:
6876 case BFD_RELOC_AARCH64_LDST8_LO12:
6877 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6878 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6879 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6880 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6881 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6882 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6883 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6884 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6885 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6886 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6887 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6888 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6889 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6890 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6891 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6892 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6893 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6894 /* Always leave these relocations for the linker. */
6895 return 1;
6896
6897 default:
6898 break;
6899 }
6900
6901 return generic_force_reloc (fixp);
6902 }
6903
6904 #ifdef OBJ_ELF
6905
6906 const char *
6907 elf64_aarch64_target_format (void)
6908 {
6909 if (target_big_endian)
6910 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6911 else
6912 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6913 }
6914
6915 void
6916 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6917 {
6918 elf_frob_symbol (symp, puntp);
6919 }
6920 #endif
6921
6922 /* MD interface: Finalization. */
6923
6924 /* A good place to do this, although this was probably not intended
6925 for this kind of use. We need to dump the literal pool before
6926 references are made to a null symbol pointer. */
6927
6928 void
6929 aarch64_cleanup (void)
6930 {
6931 literal_pool *pool;
6932
6933 for (pool = list_of_pools; pool; pool = pool->next)
6934 {
6935 /* Put it at the end of the relevant section. */
6936 subseg_set (pool->section, pool->sub_section);
6937 s_ltorg (0);
6938 }
6939 }
6940
6941 #ifdef OBJ_ELF
6942 /* Remove any excess mapping symbols generated for alignment frags in
6943 SEC. We may have created a mapping symbol before a zero byte
6944 alignment; remove it if there's a mapping symbol after the
6945 alignment. */
6946 static void
6947 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6948 void *dummy ATTRIBUTE_UNUSED)
6949 {
6950 segment_info_type *seginfo = seg_info (sec);
6951 fragS *fragp;
6952
6953 if (seginfo == NULL || seginfo->frchainP == NULL)
6954 return;
6955
6956 for (fragp = seginfo->frchainP->frch_root;
6957 fragp != NULL; fragp = fragp->fr_next)
6958 {
6959 symbolS *sym = fragp->tc_frag_data.last_map;
6960 fragS *next = fragp->fr_next;
6961
6962 /* Variable-sized frags have been converted to fixed size by
6963 this point. But if this was variable-sized to start with,
6964 there will be a fixed-size frag after it. So don't handle
6965 next == NULL. */
6966 if (sym == NULL || next == NULL)
6967 continue;
6968
6969 if (S_GET_VALUE (sym) < next->fr_address)
6970 /* Not at the end of this frag. */
6971 continue;
6972 know (S_GET_VALUE (sym) == next->fr_address);
6973
6974 do
6975 {
6976 if (next->tc_frag_data.first_map != NULL)
6977 {
6978 /* Next frag starts with a mapping symbol. Discard this
6979 one. */
6980 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6981 break;
6982 }
6983
6984 if (next->fr_next == NULL)
6985 {
6986 /* This mapping symbol is at the end of the section. Discard
6987 it. */
6988 know (next->fr_fix == 0 && next->fr_var == 0);
6989 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6990 break;
6991 }
6992
6993 /* As long as we have empty frags without any mapping symbols,
6994 keep looking. */
6995 /* If the next frag is non-empty and does not start with a
6996 mapping symbol, then this mapping symbol is required. */
6997 if (next->fr_address != next->fr_next->fr_address)
6998 break;
6999
7000 next = next->fr_next;
7001 }
7002 while (next != NULL);
7003 }
7004 }
7005 #endif
7006
7007 /* Adjust the symbol table. */
7008
7009 void
7010 aarch64_adjust_symtab (void)
7011 {
7012 #ifdef OBJ_ELF
7013 /* Remove any overlapping mapping symbols generated by alignment frags. */
7014 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7015 /* Now do generic ELF adjustments. */
7016 elf_adjust_symtab ();
7017 #endif
7018 }
7019
7020 static void
7021 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7022 {
7023 const char *hash_err;
7024
7025 hash_err = hash_insert (table, key, value);
7026 if (hash_err)
7027 printf ("Internal Error: Can't hash %s\n", key);
7028 }
7029
7030 static void
7031 fill_instruction_hash_table (void)
7032 {
7033 aarch64_opcode *opcode = aarch64_opcode_table;
7034
7035 while (opcode->name != NULL)
7036 {
7037 templates *templ, *new_templ;
7038 templ = hash_find (aarch64_ops_hsh, opcode->name);
7039
7040 new_templ = (templates *) xmalloc (sizeof (templates));
7041 new_templ->opcode = opcode;
7042 new_templ->next = NULL;
7043
7044 if (!templ)
7045 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7046 else
7047 {
7048 new_templ->next = templ->next;
7049 templ->next = new_templ;
7050 }
7051 ++opcode;
7052 }
7053 }
7054
7055 static inline void
7056 convert_to_upper (char *dst, const char *src, size_t num)
7057 {
7058 unsigned int i;
7059 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7060 *dst = TOUPPER (*src);
7061 *dst = '\0';
7062 }
7063
7064 /* Assume STR point to a lower-case string, allocate, convert and return
7065 the corresponding upper-case string. */
7066 static inline const char*
7067 get_upper_str (const char *str)
7068 {
7069 char *ret;
7070 size_t len = strlen (str);
7071 if ((ret = xmalloc (len + 1)) == NULL)
7072 abort ();
7073 convert_to_upper (ret, str, len);
7074 return ret;
7075 }
7076
7077 /* MD interface: Initialization. */
7078
7079 void
7080 md_begin (void)
7081 {
7082 unsigned mach;
7083 unsigned int i;
7084
7085 if ((aarch64_ops_hsh = hash_new ()) == NULL
7086 || (aarch64_cond_hsh = hash_new ()) == NULL
7087 || (aarch64_shift_hsh = hash_new ()) == NULL
7088 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7089 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7090 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7091 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7092 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7093 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7094 || (aarch64_reg_hsh = hash_new ()) == NULL
7095 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7096 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7097 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7098 as_fatal (_("virtual memory exhausted"));
7099
7100 fill_instruction_hash_table ();
7101
7102 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7103 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7104 (void *) (aarch64_sys_regs + i));
7105
7106 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7107 checked_hash_insert (aarch64_pstatefield_hsh,
7108 aarch64_pstatefields[i].name,
7109 (void *) (aarch64_pstatefields + i));
7110
7111 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7112 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7113 aarch64_sys_regs_ic[i].template,
7114 (void *) (aarch64_sys_regs_ic + i));
7115
7116 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7117 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7118 aarch64_sys_regs_dc[i].template,
7119 (void *) (aarch64_sys_regs_dc + i));
7120
7121 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7122 checked_hash_insert (aarch64_sys_regs_at_hsh,
7123 aarch64_sys_regs_at[i].template,
7124 (void *) (aarch64_sys_regs_at + i));
7125
7126 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7127 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7128 aarch64_sys_regs_tlbi[i].template,
7129 (void *) (aarch64_sys_regs_tlbi + i));
7130
7131 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7132 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7133 (void *) (reg_names + i));
7134
7135 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7136 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7137 (void *) (nzcv_names + i));
7138
7139 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7140 {
7141 const char *name = aarch64_operand_modifiers[i].name;
7142 checked_hash_insert (aarch64_shift_hsh, name,
7143 (void *) (aarch64_operand_modifiers + i));
7144 /* Also hash the name in the upper case. */
7145 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7146 (void *) (aarch64_operand_modifiers + i));
7147 }
7148
7149 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7150 {
7151 unsigned int j;
7152 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7153 the same condition code. */
7154 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7155 {
7156 const char *name = aarch64_conds[i].names[j];
7157 if (name == NULL)
7158 break;
7159 checked_hash_insert (aarch64_cond_hsh, name,
7160 (void *) (aarch64_conds + i));
7161 /* Also hash the name in the upper case. */
7162 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7163 (void *) (aarch64_conds + i));
7164 }
7165 }
7166
7167 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7168 {
7169 const char *name = aarch64_barrier_options[i].name;
7170 /* Skip xx00 - the unallocated values of option. */
7171 if ((i & 0x3) == 0)
7172 continue;
7173 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7174 (void *) (aarch64_barrier_options + i));
7175 /* Also hash the name in the upper case. */
7176 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7177 (void *) (aarch64_barrier_options + i));
7178 }
7179
7180 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7181 {
7182 const char* name = aarch64_prfops[i].name;
7183 /* Skip the unallocated hint encodings. */
7184 if (name == NULL)
7185 continue;
7186 checked_hash_insert (aarch64_pldop_hsh, name,
7187 (void *) (aarch64_prfops + i));
7188 /* Also hash the name in the upper case. */
7189 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7190 (void *) (aarch64_prfops + i));
7191 }
7192
7193 /* Set the cpu variant based on the command-line options. */
7194 if (!mcpu_cpu_opt)
7195 mcpu_cpu_opt = march_cpu_opt;
7196
7197 if (!mcpu_cpu_opt)
7198 mcpu_cpu_opt = &cpu_default;
7199
7200 cpu_variant = *mcpu_cpu_opt;
7201
7202 /* Record the CPU type. */
7203 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7204
7205 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7206 }
7207
7208 /* Command line processing. */
7209
7210 const char *md_shortopts = "m:";
7211
7212 #ifdef AARCH64_BI_ENDIAN
7213 #define OPTION_EB (OPTION_MD_BASE + 0)
7214 #define OPTION_EL (OPTION_MD_BASE + 1)
7215 #else
7216 #if TARGET_BYTES_BIG_ENDIAN
7217 #define OPTION_EB (OPTION_MD_BASE + 0)
7218 #else
7219 #define OPTION_EL (OPTION_MD_BASE + 1)
7220 #endif
7221 #endif
7222
7223 struct option md_longopts[] = {
7224 #ifdef OPTION_EB
7225 {"EB", no_argument, NULL, OPTION_EB},
7226 #endif
7227 #ifdef OPTION_EL
7228 {"EL", no_argument, NULL, OPTION_EL},
7229 #endif
7230 {NULL, no_argument, NULL, 0}
7231 };
7232
7233 size_t md_longopts_size = sizeof (md_longopts);
7234
7235 struct aarch64_option_table
7236 {
7237 char *option; /* Option name to match. */
7238 char *help; /* Help information. */
7239 int *var; /* Variable to change. */
7240 int value; /* What to change it to. */
7241 char *deprecated; /* If non-null, print this message. */
7242 };
7243
7244 static struct aarch64_option_table aarch64_opts[] = {
7245 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7246 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7247 NULL},
7248 #ifdef DEBUG_AARCH64
7249 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7250 #endif /* DEBUG_AARCH64 */
7251 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7252 NULL},
7253 {"mno-verbose-error", N_("do not output verbose error messages"),
7254 &verbose_error_p, 0, NULL},
7255 {NULL, NULL, NULL, 0, NULL}
7256 };
7257
7258 struct aarch64_cpu_option_table
7259 {
7260 char *name;
7261 const aarch64_feature_set value;
7262 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7263 case. */
7264 const char *canonical_name;
7265 };
7266
7267 /* This list should, at a minimum, contain all the cpu names
7268 recognized by GCC. */
7269 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7270 {"all", AARCH64_ANY, NULL},
7271 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7272 AARCH64_FEATURE_CRC), "Cortex-A53"},
7273 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7274 AARCH64_FEATURE_CRC), "Cortex-A57"},
7275 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7276 AARCH64_FEATURE_CRC), "Cortex-A72"},
7277 {"thunderx", AARCH64_ARCH_V8, "Cavium ThunderX"},
7278 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7279 in earlier releases and is superseded by 'xgene1' in all
7280 tools. */
7281 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7282 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7283 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7284 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7285 {"generic", AARCH64_ARCH_V8, NULL},
7286
7287 {NULL, AARCH64_ARCH_NONE, NULL}
7288 };
7289
7290 struct aarch64_arch_option_table
7291 {
7292 char *name;
7293 const aarch64_feature_set value;
7294 };
7295
7296 /* This list should, at a minimum, contain all the architecture names
7297 recognized by GCC. */
7298 static const struct aarch64_arch_option_table aarch64_archs[] = {
7299 {"all", AARCH64_ANY},
7300 {"armv8-a", AARCH64_ARCH_V8},
7301 {NULL, AARCH64_ARCH_NONE}
7302 };
7303
7304 /* ISA extensions. */
7305 struct aarch64_option_cpu_value_table
7306 {
7307 char *name;
7308 const aarch64_feature_set value;
7309 };
7310
7311 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7312 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7313 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7314 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7315 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7316 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7317 {NULL, AARCH64_ARCH_NONE}
7318 };
7319
7320 struct aarch64_long_option_table
7321 {
7322 char *option; /* Substring to match. */
7323 char *help; /* Help information. */
7324 int (*func) (char *subopt); /* Function to decode sub-option. */
7325 char *deprecated; /* If non-null, print this message. */
7326 };
7327
7328 static int
7329 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7330 bfd_boolean ext_only)
7331 {
7332 /* We insist on extensions being added before being removed. We achieve
7333 this by using the ADDING_VALUE variable to indicate whether we are
7334 adding an extension (1) or removing it (0) and only allowing it to
7335 change in the order -1 -> 1 -> 0. */
7336 int adding_value = -1;
7337 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7338
7339 /* Copy the feature set, so that we can modify it. */
7340 *ext_set = **opt_p;
7341 *opt_p = ext_set;
7342
7343 while (str != NULL && *str != 0)
7344 {
7345 const struct aarch64_option_cpu_value_table *opt;
7346 char *ext = NULL;
7347 int optlen;
7348
7349 if (!ext_only)
7350 {
7351 if (*str != '+')
7352 {
7353 as_bad (_("invalid architectural extension"));
7354 return 0;
7355 }
7356
7357 ext = strchr (++str, '+');
7358 }
7359
7360 if (ext != NULL)
7361 optlen = ext - str;
7362 else
7363 optlen = strlen (str);
7364
7365 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7366 {
7367 if (adding_value != 0)
7368 adding_value = 0;
7369 optlen -= 2;
7370 str += 2;
7371 }
7372 else if (optlen > 0)
7373 {
7374 if (adding_value == -1)
7375 adding_value = 1;
7376 else if (adding_value != 1)
7377 {
7378 as_bad (_("must specify extensions to add before specifying "
7379 "those to remove"));
7380 return FALSE;
7381 }
7382 }
7383
7384 if (optlen == 0)
7385 {
7386 as_bad (_("missing architectural extension"));
7387 return 0;
7388 }
7389
7390 gas_assert (adding_value != -1);
7391
7392 for (opt = aarch64_features; opt->name != NULL; opt++)
7393 if (strncmp (opt->name, str, optlen) == 0)
7394 {
7395 /* Add or remove the extension. */
7396 if (adding_value)
7397 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7398 else
7399 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7400 break;
7401 }
7402
7403 if (opt->name == NULL)
7404 {
7405 as_bad (_("unknown architectural extension `%s'"), str);
7406 return 0;
7407 }
7408
7409 str = ext;
7410 };
7411
7412 return 1;
7413 }
7414
7415 static int
7416 aarch64_parse_cpu (char *str)
7417 {
7418 const struct aarch64_cpu_option_table *opt;
7419 char *ext = strchr (str, '+');
7420 size_t optlen;
7421
7422 if (ext != NULL)
7423 optlen = ext - str;
7424 else
7425 optlen = strlen (str);
7426
7427 if (optlen == 0)
7428 {
7429 as_bad (_("missing cpu name `%s'"), str);
7430 return 0;
7431 }
7432
7433 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7434 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7435 {
7436 mcpu_cpu_opt = &opt->value;
7437 if (ext != NULL)
7438 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7439
7440 return 1;
7441 }
7442
7443 as_bad (_("unknown cpu `%s'"), str);
7444 return 0;
7445 }
7446
7447 static int
7448 aarch64_parse_arch (char *str)
7449 {
7450 const struct aarch64_arch_option_table *opt;
7451 char *ext = strchr (str, '+');
7452 size_t optlen;
7453
7454 if (ext != NULL)
7455 optlen = ext - str;
7456 else
7457 optlen = strlen (str);
7458
7459 if (optlen == 0)
7460 {
7461 as_bad (_("missing architecture name `%s'"), str);
7462 return 0;
7463 }
7464
7465 for (opt = aarch64_archs; opt->name != NULL; opt++)
7466 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7467 {
7468 march_cpu_opt = &opt->value;
7469 if (ext != NULL)
7470 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7471
7472 return 1;
7473 }
7474
7475 as_bad (_("unknown architecture `%s'\n"), str);
7476 return 0;
7477 }
7478
7479 /* ABIs. */
7480 struct aarch64_option_abi_value_table
7481 {
7482 char *name;
7483 enum aarch64_abi_type value;
7484 };
7485
7486 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7487 {"ilp32", AARCH64_ABI_ILP32},
7488 {"lp64", AARCH64_ABI_LP64},
7489 {NULL, 0}
7490 };
7491
7492 static int
7493 aarch64_parse_abi (char *str)
7494 {
7495 const struct aarch64_option_abi_value_table *opt;
7496 size_t optlen = strlen (str);
7497
7498 if (optlen == 0)
7499 {
7500 as_bad (_("missing abi name `%s'"), str);
7501 return 0;
7502 }
7503
7504 for (opt = aarch64_abis; opt->name != NULL; opt++)
7505 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7506 {
7507 aarch64_abi = opt->value;
7508 return 1;
7509 }
7510
7511 as_bad (_("unknown abi `%s'\n"), str);
7512 return 0;
7513 }
7514
7515 static struct aarch64_long_option_table aarch64_long_opts[] = {
7516 #ifdef OBJ_ELF
7517 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7518 aarch64_parse_abi, NULL},
7519 #endif /* OBJ_ELF */
7520 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7521 aarch64_parse_cpu, NULL},
7522 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7523 aarch64_parse_arch, NULL},
7524 {NULL, NULL, 0, NULL}
7525 };
7526
7527 int
7528 md_parse_option (int c, char *arg)
7529 {
7530 struct aarch64_option_table *opt;
7531 struct aarch64_long_option_table *lopt;
7532
7533 switch (c)
7534 {
7535 #ifdef OPTION_EB
7536 case OPTION_EB:
7537 target_big_endian = 1;
7538 break;
7539 #endif
7540
7541 #ifdef OPTION_EL
7542 case OPTION_EL:
7543 target_big_endian = 0;
7544 break;
7545 #endif
7546
7547 case 'a':
7548 /* Listing option. Just ignore these, we don't support additional
7549 ones. */
7550 return 0;
7551
7552 default:
7553 for (opt = aarch64_opts; opt->option != NULL; opt++)
7554 {
7555 if (c == opt->option[0]
7556 && ((arg == NULL && opt->option[1] == 0)
7557 || streq (arg, opt->option + 1)))
7558 {
7559 /* If the option is deprecated, tell the user. */
7560 if (opt->deprecated != NULL)
7561 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7562 arg ? arg : "", _(opt->deprecated));
7563
7564 if (opt->var != NULL)
7565 *opt->var = opt->value;
7566
7567 return 1;
7568 }
7569 }
7570
7571 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7572 {
7573 /* These options are expected to have an argument. */
7574 if (c == lopt->option[0]
7575 && arg != NULL
7576 && strncmp (arg, lopt->option + 1,
7577 strlen (lopt->option + 1)) == 0)
7578 {
7579 /* If the option is deprecated, tell the user. */
7580 if (lopt->deprecated != NULL)
7581 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7582 _(lopt->deprecated));
7583
7584 /* Call the sup-option parser. */
7585 return lopt->func (arg + strlen (lopt->option) - 1);
7586 }
7587 }
7588
7589 return 0;
7590 }
7591
7592 return 1;
7593 }
7594
7595 void
7596 md_show_usage (FILE * fp)
7597 {
7598 struct aarch64_option_table *opt;
7599 struct aarch64_long_option_table *lopt;
7600
7601 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7602
7603 for (opt = aarch64_opts; opt->option != NULL; opt++)
7604 if (opt->help != NULL)
7605 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7606
7607 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7608 if (lopt->help != NULL)
7609 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7610
7611 #ifdef OPTION_EB
7612 fprintf (fp, _("\
7613 -EB assemble code for a big-endian cpu\n"));
7614 #endif
7615
7616 #ifdef OPTION_EL
7617 fprintf (fp, _("\
7618 -EL assemble code for a little-endian cpu\n"));
7619 #endif
7620 }
7621
7622 /* Parse a .cpu directive. */
7623
7624 static void
7625 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7626 {
7627 const struct aarch64_cpu_option_table *opt;
7628 char saved_char;
7629 char *name;
7630 char *ext;
7631 size_t optlen;
7632
7633 name = input_line_pointer;
7634 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7635 input_line_pointer++;
7636 saved_char = *input_line_pointer;
7637 *input_line_pointer = 0;
7638
7639 ext = strchr (name, '+');
7640
7641 if (ext != NULL)
7642 optlen = ext - name;
7643 else
7644 optlen = strlen (name);
7645
7646 /* Skip the first "all" entry. */
7647 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7648 if (strlen (opt->name) == optlen
7649 && strncmp (name, opt->name, optlen) == 0)
7650 {
7651 mcpu_cpu_opt = &opt->value;
7652 if (ext != NULL)
7653 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7654 return;
7655
7656 cpu_variant = *mcpu_cpu_opt;
7657
7658 *input_line_pointer = saved_char;
7659 demand_empty_rest_of_line ();
7660 return;
7661 }
7662 as_bad (_("unknown cpu `%s'"), name);
7663 *input_line_pointer = saved_char;
7664 ignore_rest_of_line ();
7665 }
7666
7667
7668 /* Parse a .arch directive. */
7669
7670 static void
7671 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7672 {
7673 const struct aarch64_arch_option_table *opt;
7674 char saved_char;
7675 char *name;
7676 char *ext;
7677 size_t optlen;
7678
7679 name = input_line_pointer;
7680 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7681 input_line_pointer++;
7682 saved_char = *input_line_pointer;
7683 *input_line_pointer = 0;
7684
7685 ext = strchr (name, '+');
7686
7687 if (ext != NULL)
7688 optlen = ext - name;
7689 else
7690 optlen = strlen (name);
7691
7692 /* Skip the first "all" entry. */
7693 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7694 if (strlen (opt->name) == optlen
7695 && strncmp (name, opt->name, optlen) == 0)
7696 {
7697 mcpu_cpu_opt = &opt->value;
7698 if (ext != NULL)
7699 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7700 return;
7701
7702 cpu_variant = *mcpu_cpu_opt;
7703
7704 *input_line_pointer = saved_char;
7705 demand_empty_rest_of_line ();
7706 return;
7707 }
7708
7709 as_bad (_("unknown architecture `%s'\n"), name);
7710 *input_line_pointer = saved_char;
7711 ignore_rest_of_line ();
7712 }
7713
7714 /* Parse a .arch_extension directive. */
7715
7716 static void
7717 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
7718 {
7719 char saved_char;
7720 char *ext = input_line_pointer;;
7721
7722 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7723 input_line_pointer++;
7724 saved_char = *input_line_pointer;
7725 *input_line_pointer = 0;
7726
7727 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
7728 return;
7729
7730 cpu_variant = *mcpu_cpu_opt;
7731
7732 *input_line_pointer = saved_char;
7733 demand_empty_rest_of_line ();
7734 }
7735
7736 /* Copy symbol information. */
7737
7738 void
7739 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7740 {
7741 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7742 }
This page took 0.252915 seconds and 4 git commands to generate.