Add ADR :tlsgd: directive and TLSGD_ADR_PREL21 support.
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline void
175 set_error_message (const char *error)
176 {
177 inst.parsing_error.error = error;
178 }
179
180 static inline enum aarch64_operand_error_kind
181 get_error_kind (void)
182 {
183 return inst.parsing_error.kind;
184 }
185
186 static inline void
187 set_error_kind (enum aarch64_operand_error_kind kind)
188 {
189 inst.parsing_error.kind = kind;
190 }
191
192 static inline void
193 set_error (enum aarch64_operand_error_kind kind, const char *error)
194 {
195 inst.parsing_error.kind = kind;
196 inst.parsing_error.error = error;
197 }
198
199 static inline void
200 set_recoverable_error (const char *error)
201 {
202 set_error (AARCH64_OPDE_RECOVERABLE, error);
203 }
204
205 /* Use the DESC field of the corresponding aarch64_operand entry to compose
206 the error message. */
207 static inline void
208 set_default_error (void)
209 {
210 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
211 }
212
213 static inline void
214 set_syntax_error (const char *error)
215 {
216 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
217 }
218
219 static inline void
220 set_first_syntax_error (const char *error)
221 {
222 if (! error_p ())
223 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
224 }
225
226 static inline void
227 set_fatal_syntax_error (const char *error)
228 {
229 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
230 }
231 \f
232 /* Number of littlenums required to hold an extended precision number. */
233 #define MAX_LITTLENUMS 6
234
235 /* Return value for certain parsers when the parsing fails; those parsers
236 return the information of the parsed result, e.g. register number, on
237 success. */
238 #define PARSE_FAIL -1
239
240 /* This is an invalid condition code that means no conditional field is
241 present. */
242 #define COND_ALWAYS 0x10
243
244 typedef struct
245 {
246 const char *template;
247 unsigned long value;
248 } asm_barrier_opt;
249
250 typedef struct
251 {
252 const char *template;
253 uint32_t value;
254 } asm_nzcv;
255
256 struct reloc_entry
257 {
258 char *name;
259 bfd_reloc_code_real_type reloc;
260 };
261
262 /* Structure for a hash table entry for a register. */
263 typedef struct
264 {
265 const char *name;
266 unsigned char number;
267 unsigned char type;
268 unsigned char builtin;
269 } reg_entry;
270
271 /* Macros to define the register types and masks for the purpose
272 of parsing. */
273
274 #undef AARCH64_REG_TYPES
275 #define AARCH64_REG_TYPES \
276 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
277 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
278 BASIC_REG_TYPE(SP_32) /* wsp */ \
279 BASIC_REG_TYPE(SP_64) /* sp */ \
280 BASIC_REG_TYPE(Z_32) /* wzr */ \
281 BASIC_REG_TYPE(Z_64) /* xzr */ \
282 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
283 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
284 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
285 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
286 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
287 BASIC_REG_TYPE(CN) /* c[0-7] */ \
288 BASIC_REG_TYPE(VN) /* v[0-31] */ \
289 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
290 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
291 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
292 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Typecheck: any [BHSDQ]P FP. */ \
296 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
297 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
298 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
299 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
300 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
301 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
302 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
303 /* Any integer register; used for error messages only. */ \
304 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
306 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
307 /* Pseudo type to mark the end of the enumerator sequence. */ \
308 BASIC_REG_TYPE(MAX)
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
312 #undef MULTI_REG_TYPE
313 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
314
315 /* Register type enumerators. */
316 typedef enum
317 {
318 /* A list of REG_TYPE_*. */
319 AARCH64_REG_TYPES
320 } aarch64_reg_type;
321
322 #undef BASIC_REG_TYPE
323 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
324 #undef REG_TYPE
325 #define REG_TYPE(T) (1 << REG_TYPE_##T)
326 #undef MULTI_REG_TYPE
327 #define MULTI_REG_TYPE(T,V) V,
328
329 /* Values indexed by aarch64_reg_type to assist the type checking. */
330 static const unsigned reg_type_masks[] =
331 {
332 AARCH64_REG_TYPES
333 };
334
335 #undef BASIC_REG_TYPE
336 #undef REG_TYPE
337 #undef MULTI_REG_TYPE
338 #undef AARCH64_REG_TYPES
339
340 /* Diagnostics used when we don't get a register of the expected type.
341 Note: this has to synchronized with aarch64_reg_type definitions
342 above. */
343 static const char *
344 get_reg_expected_msg (aarch64_reg_type reg_type)
345 {
346 const char *msg;
347
348 switch (reg_type)
349 {
350 case REG_TYPE_R_32:
351 msg = N_("integer 32-bit register expected");
352 break;
353 case REG_TYPE_R_64:
354 msg = N_("integer 64-bit register expected");
355 break;
356 case REG_TYPE_R_N:
357 msg = N_("integer register expected");
358 break;
359 case REG_TYPE_R_Z_SP:
360 msg = N_("integer, zero or SP register expected");
361 break;
362 case REG_TYPE_FP_B:
363 msg = N_("8-bit SIMD scalar register expected");
364 break;
365 case REG_TYPE_FP_H:
366 msg = N_("16-bit SIMD scalar or floating-point half precision "
367 "register expected");
368 break;
369 case REG_TYPE_FP_S:
370 msg = N_("32-bit SIMD scalar or floating-point single precision "
371 "register expected");
372 break;
373 case REG_TYPE_FP_D:
374 msg = N_("64-bit SIMD scalar or floating-point double precision "
375 "register expected");
376 break;
377 case REG_TYPE_FP_Q:
378 msg = N_("128-bit SIMD scalar or floating-point quad precision "
379 "register expected");
380 break;
381 case REG_TYPE_CN:
382 msg = N_("C0 - C15 expected");
383 break;
384 case REG_TYPE_R_Z_BHSDQ_V:
385 msg = N_("register expected");
386 break;
387 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
388 msg = N_("SIMD scalar or floating-point register expected");
389 break;
390 case REG_TYPE_VN: /* any V reg */
391 msg = N_("vector register expected");
392 break;
393 default:
394 as_fatal (_("invalid register type %d"), reg_type);
395 }
396 return msg;
397 }
398
399 /* Some well known registers that we refer to directly elsewhere. */
400 #define REG_SP 31
401
402 /* Instructions take 4 bytes in the object file. */
403 #define INSN_SIZE 4
404
405 /* Define some common error messages. */
406 #define BAD_SP _("SP not allowed here")
407
408 static struct hash_control *aarch64_ops_hsh;
409 static struct hash_control *aarch64_cond_hsh;
410 static struct hash_control *aarch64_shift_hsh;
411 static struct hash_control *aarch64_sys_regs_hsh;
412 static struct hash_control *aarch64_pstatefield_hsh;
413 static struct hash_control *aarch64_sys_regs_ic_hsh;
414 static struct hash_control *aarch64_sys_regs_dc_hsh;
415 static struct hash_control *aarch64_sys_regs_at_hsh;
416 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
417 static struct hash_control *aarch64_reg_hsh;
418 static struct hash_control *aarch64_barrier_opt_hsh;
419 static struct hash_control *aarch64_nzcv_hsh;
420 static struct hash_control *aarch64_pldop_hsh;
421
422 /* Stuff needed to resolve the label ambiguity
423 As:
424 ...
425 label: <insn>
426 may differ from:
427 ...
428 label:
429 <insn> */
430
431 static symbolS *last_label_seen;
432
433 /* Literal pool structure. Held on a per-section
434 and per-sub-section basis. */
435
436 #define MAX_LITERAL_POOL_SIZE 1024
437 typedef struct literal_expression
438 {
439 expressionS exp;
440 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
441 LITTLENUM_TYPE * bignum;
442 } literal_expression;
443
444 typedef struct literal_pool
445 {
446 literal_expression literals[MAX_LITERAL_POOL_SIZE];
447 unsigned int next_free_entry;
448 unsigned int id;
449 symbolS *symbol;
450 segT section;
451 subsegT sub_section;
452 int size;
453 struct literal_pool *next;
454 } literal_pool;
455
456 /* Pointer to a linked list of literal pools. */
457 static literal_pool *list_of_pools = NULL;
458 \f
459 /* Pure syntax. */
460
461 /* This array holds the chars that always start a comment. If the
462 pre-processor is disabled, these aren't very useful. */
463 const char comment_chars[] = "";
464
465 /* This array holds the chars that only start a comment at the beginning of
466 a line. If the line seems to have the form '# 123 filename'
467 .line and .file directives will appear in the pre-processed output. */
468 /* Note that input_file.c hand checks for '#' at the beginning of the
469 first line of the input file. This is because the compiler outputs
470 #NO_APP at the beginning of its output. */
471 /* Also note that comments like this one will always work. */
472 const char line_comment_chars[] = "#";
473
474 const char line_separator_chars[] = ";";
475
476 /* Chars that can be used to separate mant
477 from exp in floating point numbers. */
478 const char EXP_CHARS[] = "eE";
479
480 /* Chars that mean this number is a floating point constant. */
481 /* As in 0f12.456 */
482 /* or 0d1.2345e12 */
483
484 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
485
486 /* Prefix character that indicates the start of an immediate value. */
487 #define is_immediate_prefix(C) ((C) == '#')
488
489 /* Separator character handling. */
490
491 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
492
493 static inline bfd_boolean
494 skip_past_char (char **str, char c)
495 {
496 if (**str == c)
497 {
498 (*str)++;
499 return TRUE;
500 }
501 else
502 return FALSE;
503 }
504
505 #define skip_past_comma(str) skip_past_char (str, ',')
506
507 /* Arithmetic expressions (possibly involving symbols). */
508
509 static bfd_boolean in_my_get_expression_p = FALSE;
510
511 /* Third argument to my_get_expression. */
512 #define GE_NO_PREFIX 0
513 #define GE_OPT_PREFIX 1
514
515 /* Return TRUE if the string pointed by *STR is successfully parsed
516 as an valid expression; *EP will be filled with the information of
517 such an expression. Otherwise return FALSE. */
518
519 static bfd_boolean
520 my_get_expression (expressionS * ep, char **str, int prefix_mode,
521 int reject_absent)
522 {
523 char *save_in;
524 segT seg;
525 int prefix_present_p = 0;
526
527 switch (prefix_mode)
528 {
529 case GE_NO_PREFIX:
530 break;
531 case GE_OPT_PREFIX:
532 if (is_immediate_prefix (**str))
533 {
534 (*str)++;
535 prefix_present_p = 1;
536 }
537 break;
538 default:
539 abort ();
540 }
541
542 memset (ep, 0, sizeof (expressionS));
543
544 save_in = input_line_pointer;
545 input_line_pointer = *str;
546 in_my_get_expression_p = TRUE;
547 seg = expression (ep);
548 in_my_get_expression_p = FALSE;
549
550 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
551 {
552 /* We found a bad expression in md_operand(). */
553 *str = input_line_pointer;
554 input_line_pointer = save_in;
555 if (prefix_present_p && ! error_p ())
556 set_fatal_syntax_error (_("bad expression"));
557 else
558 set_first_syntax_error (_("bad expression"));
559 return FALSE;
560 }
561
562 #ifdef OBJ_AOUT
563 if (seg != absolute_section
564 && seg != text_section
565 && seg != data_section
566 && seg != bss_section && seg != undefined_section)
567 {
568 set_syntax_error (_("bad segment"));
569 *str = input_line_pointer;
570 input_line_pointer = save_in;
571 return FALSE;
572 }
573 #else
574 (void) seg;
575 #endif
576
577 *str = input_line_pointer;
578 input_line_pointer = save_in;
579 return TRUE;
580 }
581
582 /* Turn a string in input_line_pointer into a floating point constant
583 of type TYPE, and store the appropriate bytes in *LITP. The number
584 of LITTLENUMS emitted is stored in *SIZEP. An error message is
585 returned, or NULL on OK. */
586
587 char *
588 md_atof (int type, char *litP, int *sizeP)
589 {
590 return ieee_md_atof (type, litP, sizeP, target_big_endian);
591 }
592
593 /* We handle all bad expressions here, so that we can report the faulty
594 instruction in the error message. */
595 void
596 md_operand (expressionS * exp)
597 {
598 if (in_my_get_expression_p)
599 exp->X_op = O_illegal;
600 }
601
602 /* Immediate values. */
603
604 /* Errors may be set multiple times during parsing or bit encoding
605 (particularly in the Neon bits), but usually the earliest error which is set
606 will be the most meaningful. Avoid overwriting it with later (cascading)
607 errors by calling this function. */
608
609 static void
610 first_error (const char *error)
611 {
612 if (! error_p ())
613 set_syntax_error (error);
614 }
615
616 /* Similiar to first_error, but this function accepts formatted error
617 message. */
618 static void
619 first_error_fmt (const char *format, ...)
620 {
621 va_list args;
622 enum
623 { size = 100 };
624 /* N.B. this single buffer will not cause error messages for different
625 instructions to pollute each other; this is because at the end of
626 processing of each assembly line, error message if any will be
627 collected by as_bad. */
628 static char buffer[size];
629
630 if (! error_p ())
631 {
632 int ret ATTRIBUTE_UNUSED;
633 va_start (args, format);
634 ret = vsnprintf (buffer, size, format, args);
635 know (ret <= size - 1 && ret >= 0);
636 va_end (args);
637 set_syntax_error (buffer);
638 }
639 }
640
641 /* Register parsing. */
642
643 /* Generic register parser which is called by other specialized
644 register parsers.
645 CCP points to what should be the beginning of a register name.
646 If it is indeed a valid register name, advance CCP over it and
647 return the reg_entry structure; otherwise return NULL.
648 It does not issue diagnostics. */
649
650 static reg_entry *
651 parse_reg (char **ccp)
652 {
653 char *start = *ccp;
654 char *p;
655 reg_entry *reg;
656
657 #ifdef REGISTER_PREFIX
658 if (*start != REGISTER_PREFIX)
659 return NULL;
660 start++;
661 #endif
662
663 p = start;
664 if (!ISALPHA (*p) || !is_name_beginner (*p))
665 return NULL;
666
667 do
668 p++;
669 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
670
671 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
672
673 if (!reg)
674 return NULL;
675
676 *ccp = p;
677 return reg;
678 }
679
680 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
681 return FALSE. */
682 static bfd_boolean
683 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
684 {
685 if (reg->type == type)
686 return TRUE;
687
688 switch (type)
689 {
690 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
691 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
692 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
693 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
694 case REG_TYPE_VN: /* Vector register. */
695 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
696 return ((reg_type_masks[reg->type] & reg_type_masks[type])
697 == reg_type_masks[reg->type]);
698 default:
699 as_fatal ("unhandled type %d", type);
700 abort ();
701 }
702 }
703
704 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
705 Return the register number otherwise. *ISREG32 is set to one if the
706 register is 32-bit wide; *ISREGZERO is set to one if the register is
707 of type Z_32 or Z_64.
708 Note that this function does not issue any diagnostics. */
709
710 static int
711 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
712 int *isreg32, int *isregzero)
713 {
714 char *str = *ccp;
715 const reg_entry *reg = parse_reg (&str);
716
717 if (reg == NULL)
718 return PARSE_FAIL;
719
720 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
721 return PARSE_FAIL;
722
723 switch (reg->type)
724 {
725 case REG_TYPE_SP_32:
726 case REG_TYPE_SP_64:
727 if (reject_sp)
728 return PARSE_FAIL;
729 *isreg32 = reg->type == REG_TYPE_SP_32;
730 *isregzero = 0;
731 break;
732 case REG_TYPE_R_32:
733 case REG_TYPE_R_64:
734 *isreg32 = reg->type == REG_TYPE_R_32;
735 *isregzero = 0;
736 break;
737 case REG_TYPE_Z_32:
738 case REG_TYPE_Z_64:
739 if (reject_rz)
740 return PARSE_FAIL;
741 *isreg32 = reg->type == REG_TYPE_Z_32;
742 *isregzero = 1;
743 break;
744 default:
745 return PARSE_FAIL;
746 }
747
748 *ccp = str;
749
750 return reg->number;
751 }
752
753 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
754 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
755 otherwise return FALSE.
756
757 Accept only one occurrence of:
758 8b 16b 4h 8h 2s 4s 1d 2d
759 b h s d q */
760 static bfd_boolean
761 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
762 {
763 char *ptr = *str;
764 unsigned width;
765 unsigned element_size;
766 enum neon_el_type type;
767
768 /* skip '.' */
769 ptr++;
770
771 if (!ISDIGIT (*ptr))
772 {
773 width = 0;
774 goto elt_size;
775 }
776 width = strtoul (ptr, &ptr, 10);
777 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
778 {
779 first_error_fmt (_("bad size %d in vector width specifier"), width);
780 return FALSE;
781 }
782
783 elt_size:
784 switch (TOLOWER (*ptr))
785 {
786 case 'b':
787 type = NT_b;
788 element_size = 8;
789 break;
790 case 'h':
791 type = NT_h;
792 element_size = 16;
793 break;
794 case 's':
795 type = NT_s;
796 element_size = 32;
797 break;
798 case 'd':
799 type = NT_d;
800 element_size = 64;
801 break;
802 case 'q':
803 if (width == 1)
804 {
805 type = NT_q;
806 element_size = 128;
807 break;
808 }
809 /* fall through. */
810 default:
811 if (*ptr != '\0')
812 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
813 else
814 first_error (_("missing element size"));
815 return FALSE;
816 }
817 if (width != 0 && width * element_size != 64 && width * element_size != 128)
818 {
819 first_error_fmt (_
820 ("invalid element size %d and vector size combination %c"),
821 width, *ptr);
822 return FALSE;
823 }
824 ptr++;
825
826 parsed_type->type = type;
827 parsed_type->width = width;
828
829 *str = ptr;
830
831 return TRUE;
832 }
833
834 /* Parse a single type, e.g. ".8b", leading period included.
835 Only applicable to Vn registers.
836
837 Return TRUE on success; otherwise return FALSE. */
838 static bfd_boolean
839 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
840 {
841 char *str = *ccp;
842
843 if (*str == '.')
844 {
845 if (! parse_neon_type_for_operand (vectype, &str))
846 {
847 first_error (_("vector type expected"));
848 return FALSE;
849 }
850 }
851 else
852 return FALSE;
853
854 *ccp = str;
855
856 return TRUE;
857 }
858
859 /* Parse a register of the type TYPE.
860
861 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
862 name or the parsed register is not of TYPE.
863
864 Otherwise return the register number, and optionally fill in the actual
865 type of the register in *RTYPE when multiple alternatives were given, and
866 return the register shape and element index information in *TYPEINFO.
867
868 IN_REG_LIST should be set with TRUE if the caller is parsing a register
869 list. */
870
871 static int
872 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
873 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
874 {
875 char *str = *ccp;
876 const reg_entry *reg = parse_reg (&str);
877 struct neon_type_el atype;
878 struct neon_type_el parsetype;
879 bfd_boolean is_typed_vecreg = FALSE;
880
881 atype.defined = 0;
882 atype.type = NT_invtype;
883 atype.width = -1;
884 atype.index = 0;
885
886 if (reg == NULL)
887 {
888 if (typeinfo)
889 *typeinfo = atype;
890 set_default_error ();
891 return PARSE_FAIL;
892 }
893
894 if (! aarch64_check_reg_type (reg, type))
895 {
896 DEBUG_TRACE ("reg type check failed");
897 set_default_error ();
898 return PARSE_FAIL;
899 }
900 type = reg->type;
901
902 if (type == REG_TYPE_VN
903 && parse_neon_operand_type (&parsetype, &str))
904 {
905 /* Register if of the form Vn.[bhsdq]. */
906 is_typed_vecreg = TRUE;
907
908 if (parsetype.width == 0)
909 /* Expect index. In the new scheme we cannot have
910 Vn.[bhsdq] represent a scalar. Therefore any
911 Vn.[bhsdq] should have an index following it.
912 Except in reglists ofcourse. */
913 atype.defined |= NTA_HASINDEX;
914 else
915 atype.defined |= NTA_HASTYPE;
916
917 atype.type = parsetype.type;
918 atype.width = parsetype.width;
919 }
920
921 if (skip_past_char (&str, '['))
922 {
923 expressionS exp;
924
925 /* Reject Sn[index] syntax. */
926 if (!is_typed_vecreg)
927 {
928 first_error (_("this type of register can't be indexed"));
929 return PARSE_FAIL;
930 }
931
932 if (in_reg_list == TRUE)
933 {
934 first_error (_("index not allowed inside register list"));
935 return PARSE_FAIL;
936 }
937
938 atype.defined |= NTA_HASINDEX;
939
940 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
941
942 if (exp.X_op != O_constant)
943 {
944 first_error (_("constant expression required"));
945 return PARSE_FAIL;
946 }
947
948 if (! skip_past_char (&str, ']'))
949 return PARSE_FAIL;
950
951 atype.index = exp.X_add_number;
952 }
953 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
954 {
955 /* Indexed vector register expected. */
956 first_error (_("indexed vector register expected"));
957 return PARSE_FAIL;
958 }
959
960 /* A vector reg Vn should be typed or indexed. */
961 if (type == REG_TYPE_VN && atype.defined == 0)
962 {
963 first_error (_("invalid use of vector register"));
964 }
965
966 if (typeinfo)
967 *typeinfo = atype;
968
969 if (rtype)
970 *rtype = type;
971
972 *ccp = str;
973
974 return reg->number;
975 }
976
977 /* Parse register.
978
979 Return the register number on success; return PARSE_FAIL otherwise.
980
981 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
982 the register (e.g. NEON double or quad reg when either has been requested).
983
984 If this is a NEON vector register with additional type information, fill
985 in the struct pointed to by VECTYPE (if non-NULL).
986
987 This parser does not handle register list. */
988
989 static int
990 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
991 aarch64_reg_type *rtype, struct neon_type_el *vectype)
992 {
993 struct neon_type_el atype;
994 char *str = *ccp;
995 int reg = parse_typed_reg (&str, type, rtype, &atype,
996 /*in_reg_list= */ FALSE);
997
998 if (reg == PARSE_FAIL)
999 return PARSE_FAIL;
1000
1001 if (vectype)
1002 *vectype = atype;
1003
1004 *ccp = str;
1005
1006 return reg;
1007 }
1008
1009 static inline bfd_boolean
1010 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
1011 {
1012 return
1013 e1.type == e2.type
1014 && e1.defined == e2.defined
1015 && e1.width == e2.width && e1.index == e2.index;
1016 }
1017
1018 /* This function parses the NEON register list. On success, it returns
1019 the parsed register list information in the following encoded format:
1020
1021 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1022 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1023
1024 The information of the register shape and/or index is returned in
1025 *VECTYPE.
1026
1027 It returns PARSE_FAIL if the register list is invalid.
1028
1029 The list contains one to four registers.
1030 Each register can be one of:
1031 <Vt>.<T>[<index>]
1032 <Vt>.<T>
1033 All <T> should be identical.
1034 All <index> should be identical.
1035 There are restrictions on <Vt> numbers which are checked later
1036 (by reg_list_valid_p). */
1037
1038 static int
1039 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1040 {
1041 char *str = *ccp;
1042 int nb_regs;
1043 struct neon_type_el typeinfo, typeinfo_first;
1044 int val, val_range;
1045 int in_range;
1046 int ret_val;
1047 int i;
1048 bfd_boolean error = FALSE;
1049 bfd_boolean expect_index = FALSE;
1050
1051 if (*str != '{')
1052 {
1053 set_syntax_error (_("expecting {"));
1054 return PARSE_FAIL;
1055 }
1056 str++;
1057
1058 nb_regs = 0;
1059 typeinfo_first.defined = 0;
1060 typeinfo_first.type = NT_invtype;
1061 typeinfo_first.width = -1;
1062 typeinfo_first.index = 0;
1063 ret_val = 0;
1064 val = -1;
1065 val_range = -1;
1066 in_range = 0;
1067 do
1068 {
1069 if (in_range)
1070 {
1071 str++; /* skip over '-' */
1072 val_range = val;
1073 }
1074 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1075 /*in_reg_list= */ TRUE);
1076 if (val == PARSE_FAIL)
1077 {
1078 set_first_syntax_error (_("invalid vector register in list"));
1079 error = TRUE;
1080 continue;
1081 }
1082 /* reject [bhsd]n */
1083 if (typeinfo.defined == 0)
1084 {
1085 set_first_syntax_error (_("invalid scalar register in list"));
1086 error = TRUE;
1087 continue;
1088 }
1089
1090 if (typeinfo.defined & NTA_HASINDEX)
1091 expect_index = TRUE;
1092
1093 if (in_range)
1094 {
1095 if (val < val_range)
1096 {
1097 set_first_syntax_error
1098 (_("invalid range in vector register list"));
1099 error = TRUE;
1100 }
1101 val_range++;
1102 }
1103 else
1104 {
1105 val_range = val;
1106 if (nb_regs == 0)
1107 typeinfo_first = typeinfo;
1108 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1109 {
1110 set_first_syntax_error
1111 (_("type mismatch in vector register list"));
1112 error = TRUE;
1113 }
1114 }
1115 if (! error)
1116 for (i = val_range; i <= val; i++)
1117 {
1118 ret_val |= i << (5 * nb_regs);
1119 nb_regs++;
1120 }
1121 in_range = 0;
1122 }
1123 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1124
1125 skip_whitespace (str);
1126 if (*str != '}')
1127 {
1128 set_first_syntax_error (_("end of vector register list not found"));
1129 error = TRUE;
1130 }
1131 str++;
1132
1133 skip_whitespace (str);
1134
1135 if (expect_index)
1136 {
1137 if (skip_past_char (&str, '['))
1138 {
1139 expressionS exp;
1140
1141 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1142 if (exp.X_op != O_constant)
1143 {
1144 set_first_syntax_error (_("constant expression required."));
1145 error = TRUE;
1146 }
1147 if (! skip_past_char (&str, ']'))
1148 error = TRUE;
1149 else
1150 typeinfo_first.index = exp.X_add_number;
1151 }
1152 else
1153 {
1154 set_first_syntax_error (_("expected index"));
1155 error = TRUE;
1156 }
1157 }
1158
1159 if (nb_regs > 4)
1160 {
1161 set_first_syntax_error (_("too many registers in vector register list"));
1162 error = TRUE;
1163 }
1164 else if (nb_regs == 0)
1165 {
1166 set_first_syntax_error (_("empty vector register list"));
1167 error = TRUE;
1168 }
1169
1170 *ccp = str;
1171 if (! error)
1172 *vectype = typeinfo_first;
1173
1174 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1175 }
1176
1177 /* Directives: register aliases. */
1178
1179 static reg_entry *
1180 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1181 {
1182 reg_entry *new;
1183 const char *name;
1184
1185 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1186 {
1187 if (new->builtin)
1188 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1189 str);
1190
1191 /* Only warn about a redefinition if it's not defined as the
1192 same register. */
1193 else if (new->number != number || new->type != type)
1194 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1195
1196 return NULL;
1197 }
1198
1199 name = xstrdup (str);
1200 new = xmalloc (sizeof (reg_entry));
1201
1202 new->name = name;
1203 new->number = number;
1204 new->type = type;
1205 new->builtin = FALSE;
1206
1207 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1208 abort ();
1209
1210 return new;
1211 }
1212
1213 /* Look for the .req directive. This is of the form:
1214
1215 new_register_name .req existing_register_name
1216
1217 If we find one, or if it looks sufficiently like one that we want to
1218 handle any error here, return TRUE. Otherwise return FALSE. */
1219
1220 static bfd_boolean
1221 create_register_alias (char *newname, char *p)
1222 {
1223 const reg_entry *old;
1224 char *oldname, *nbuf;
1225 size_t nlen;
1226
1227 /* The input scrubber ensures that whitespace after the mnemonic is
1228 collapsed to single spaces. */
1229 oldname = p;
1230 if (strncmp (oldname, " .req ", 6) != 0)
1231 return FALSE;
1232
1233 oldname += 6;
1234 if (*oldname == '\0')
1235 return FALSE;
1236
1237 old = hash_find (aarch64_reg_hsh, oldname);
1238 if (!old)
1239 {
1240 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1241 return TRUE;
1242 }
1243
1244 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1245 the desired alias name, and p points to its end. If not, then
1246 the desired alias name is in the global original_case_string. */
1247 #ifdef TC_CASE_SENSITIVE
1248 nlen = p - newname;
1249 #else
1250 newname = original_case_string;
1251 nlen = strlen (newname);
1252 #endif
1253
1254 nbuf = alloca (nlen + 1);
1255 memcpy (nbuf, newname, nlen);
1256 nbuf[nlen] = '\0';
1257
1258 /* Create aliases under the new name as stated; an all-lowercase
1259 version of the new name; and an all-uppercase version of the new
1260 name. */
1261 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1262 {
1263 for (p = nbuf; *p; p++)
1264 *p = TOUPPER (*p);
1265
1266 if (strncmp (nbuf, newname, nlen))
1267 {
1268 /* If this attempt to create an additional alias fails, do not bother
1269 trying to create the all-lower case alias. We will fail and issue
1270 a second, duplicate error message. This situation arises when the
1271 programmer does something like:
1272 foo .req r0
1273 Foo .req r1
1274 The second .req creates the "Foo" alias but then fails to create
1275 the artificial FOO alias because it has already been created by the
1276 first .req. */
1277 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1278 return TRUE;
1279 }
1280
1281 for (p = nbuf; *p; p++)
1282 *p = TOLOWER (*p);
1283
1284 if (strncmp (nbuf, newname, nlen))
1285 insert_reg_alias (nbuf, old->number, old->type);
1286 }
1287
1288 return TRUE;
1289 }
1290
1291 /* Should never be called, as .req goes between the alias and the
1292 register name, not at the beginning of the line. */
1293 static void
1294 s_req (int a ATTRIBUTE_UNUSED)
1295 {
1296 as_bad (_("invalid syntax for .req directive"));
1297 }
1298
1299 /* The .unreq directive deletes an alias which was previously defined
1300 by .req. For example:
1301
1302 my_alias .req r11
1303 .unreq my_alias */
1304
1305 static void
1306 s_unreq (int a ATTRIBUTE_UNUSED)
1307 {
1308 char *name;
1309 char saved_char;
1310
1311 name = input_line_pointer;
1312
1313 while (*input_line_pointer != 0
1314 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1315 ++input_line_pointer;
1316
1317 saved_char = *input_line_pointer;
1318 *input_line_pointer = 0;
1319
1320 if (!*name)
1321 as_bad (_("invalid syntax for .unreq directive"));
1322 else
1323 {
1324 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1325
1326 if (!reg)
1327 as_bad (_("unknown register alias '%s'"), name);
1328 else if (reg->builtin)
1329 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1330 name);
1331 else
1332 {
1333 char *p;
1334 char *nbuf;
1335
1336 hash_delete (aarch64_reg_hsh, name, FALSE);
1337 free ((char *) reg->name);
1338 free (reg);
1339
1340 /* Also locate the all upper case and all lower case versions.
1341 Do not complain if we cannot find one or the other as it
1342 was probably deleted above. */
1343
1344 nbuf = strdup (name);
1345 for (p = nbuf; *p; p++)
1346 *p = TOUPPER (*p);
1347 reg = hash_find (aarch64_reg_hsh, nbuf);
1348 if (reg)
1349 {
1350 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1351 free ((char *) reg->name);
1352 free (reg);
1353 }
1354
1355 for (p = nbuf; *p; p++)
1356 *p = TOLOWER (*p);
1357 reg = hash_find (aarch64_reg_hsh, nbuf);
1358 if (reg)
1359 {
1360 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1361 free ((char *) reg->name);
1362 free (reg);
1363 }
1364
1365 free (nbuf);
1366 }
1367 }
1368
1369 *input_line_pointer = saved_char;
1370 demand_empty_rest_of_line ();
1371 }
1372
1373 /* Directives: Instruction set selection. */
1374
1375 #ifdef OBJ_ELF
1376 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1377 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1378 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1379 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1380
1381 /* Create a new mapping symbol for the transition to STATE. */
1382
1383 static void
1384 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1385 {
1386 symbolS *symbolP;
1387 const char *symname;
1388 int type;
1389
1390 switch (state)
1391 {
1392 case MAP_DATA:
1393 symname = "$d";
1394 type = BSF_NO_FLAGS;
1395 break;
1396 case MAP_INSN:
1397 symname = "$x";
1398 type = BSF_NO_FLAGS;
1399 break;
1400 default:
1401 abort ();
1402 }
1403
1404 symbolP = symbol_new (symname, now_seg, value, frag);
1405 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1406
1407 /* Save the mapping symbols for future reference. Also check that
1408 we do not place two mapping symbols at the same offset within a
1409 frag. We'll handle overlap between frags in
1410 check_mapping_symbols.
1411
1412 If .fill or other data filling directive generates zero sized data,
1413 the mapping symbol for the following code will have the same value
1414 as the one generated for the data filling directive. In this case,
1415 we replace the old symbol with the new one at the same address. */
1416 if (value == 0)
1417 {
1418 if (frag->tc_frag_data.first_map != NULL)
1419 {
1420 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1421 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1422 &symbol_lastP);
1423 }
1424 frag->tc_frag_data.first_map = symbolP;
1425 }
1426 if (frag->tc_frag_data.last_map != NULL)
1427 {
1428 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1429 S_GET_VALUE (symbolP));
1430 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1431 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1432 &symbol_lastP);
1433 }
1434 frag->tc_frag_data.last_map = symbolP;
1435 }
1436
1437 /* We must sometimes convert a region marked as code to data during
1438 code alignment, if an odd number of bytes have to be padded. The
1439 code mapping symbol is pushed to an aligned address. */
1440
1441 static void
1442 insert_data_mapping_symbol (enum mstate state,
1443 valueT value, fragS * frag, offsetT bytes)
1444 {
1445 /* If there was already a mapping symbol, remove it. */
1446 if (frag->tc_frag_data.last_map != NULL
1447 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1448 frag->fr_address + value)
1449 {
1450 symbolS *symp = frag->tc_frag_data.last_map;
1451
1452 if (value == 0)
1453 {
1454 know (frag->tc_frag_data.first_map == symp);
1455 frag->tc_frag_data.first_map = NULL;
1456 }
1457 frag->tc_frag_data.last_map = NULL;
1458 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1459 }
1460
1461 make_mapping_symbol (MAP_DATA, value, frag);
1462 make_mapping_symbol (state, value + bytes, frag);
1463 }
1464
1465 static void mapping_state_2 (enum mstate state, int max_chars);
1466
1467 /* Set the mapping state to STATE. Only call this when about to
1468 emit some STATE bytes to the file. */
1469
1470 void
1471 mapping_state (enum mstate state)
1472 {
1473 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1474
1475 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1476
1477 if (mapstate == state)
1478 /* The mapping symbol has already been emitted.
1479 There is nothing else to do. */
1480 return;
1481 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1482 /* This case will be evaluated later in the next else. */
1483 return;
1484 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1485 {
1486 /* Only add the symbol if the offset is > 0:
1487 if we're at the first frag, check it's size > 0;
1488 if we're not at the first frag, then for sure
1489 the offset is > 0. */
1490 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1491 const int add_symbol = (frag_now != frag_first)
1492 || (frag_now_fix () > 0);
1493
1494 if (add_symbol)
1495 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1496 }
1497
1498 mapping_state_2 (state, 0);
1499 #undef TRANSITION
1500 }
1501
1502 /* Same as mapping_state, but MAX_CHARS bytes have already been
1503 allocated. Put the mapping symbol that far back. */
1504
1505 static void
1506 mapping_state_2 (enum mstate state, int max_chars)
1507 {
1508 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1509
1510 if (!SEG_NORMAL (now_seg))
1511 return;
1512
1513 if (mapstate == state)
1514 /* The mapping symbol has already been emitted.
1515 There is nothing else to do. */
1516 return;
1517
1518 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1519 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1520 }
1521 #else
1522 #define mapping_state(x) /* nothing */
1523 #define mapping_state_2(x, y) /* nothing */
1524 #endif
1525
1526 /* Directives: sectioning and alignment. */
1527
1528 static void
1529 s_bss (int ignore ATTRIBUTE_UNUSED)
1530 {
1531 /* We don't support putting frags in the BSS segment, we fake it by
1532 marking in_bss, then looking at s_skip for clues. */
1533 subseg_set (bss_section, 0);
1534 demand_empty_rest_of_line ();
1535 mapping_state (MAP_DATA);
1536 }
1537
1538 static void
1539 s_even (int ignore ATTRIBUTE_UNUSED)
1540 {
1541 /* Never make frag if expect extra pass. */
1542 if (!need_pass_2)
1543 frag_align (1, 0, 0);
1544
1545 record_alignment (now_seg, 1);
1546
1547 demand_empty_rest_of_line ();
1548 }
1549
1550 /* Directives: Literal pools. */
1551
1552 static literal_pool *
1553 find_literal_pool (int size)
1554 {
1555 literal_pool *pool;
1556
1557 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1558 {
1559 if (pool->section == now_seg
1560 && pool->sub_section == now_subseg && pool->size == size)
1561 break;
1562 }
1563
1564 return pool;
1565 }
1566
1567 static literal_pool *
1568 find_or_make_literal_pool (int size)
1569 {
1570 /* Next literal pool ID number. */
1571 static unsigned int latest_pool_num = 1;
1572 literal_pool *pool;
1573
1574 pool = find_literal_pool (size);
1575
1576 if (pool == NULL)
1577 {
1578 /* Create a new pool. */
1579 pool = xmalloc (sizeof (*pool));
1580 if (!pool)
1581 return NULL;
1582
1583 /* Currently we always put the literal pool in the current text
1584 section. If we were generating "small" model code where we
1585 knew that all code and initialised data was within 1MB then
1586 we could output literals to mergeable, read-only data
1587 sections. */
1588
1589 pool->next_free_entry = 0;
1590 pool->section = now_seg;
1591 pool->sub_section = now_subseg;
1592 pool->size = size;
1593 pool->next = list_of_pools;
1594 pool->symbol = NULL;
1595
1596 /* Add it to the list. */
1597 list_of_pools = pool;
1598 }
1599
1600 /* New pools, and emptied pools, will have a NULL symbol. */
1601 if (pool->symbol == NULL)
1602 {
1603 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1604 (valueT) 0, &zero_address_frag);
1605 pool->id = latest_pool_num++;
1606 }
1607
1608 /* Done. */
1609 return pool;
1610 }
1611
1612 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1613 Return TRUE on success, otherwise return FALSE. */
1614 static bfd_boolean
1615 add_to_lit_pool (expressionS *exp, int size)
1616 {
1617 literal_pool *pool;
1618 unsigned int entry;
1619
1620 pool = find_or_make_literal_pool (size);
1621
1622 /* Check if this literal value is already in the pool. */
1623 for (entry = 0; entry < pool->next_free_entry; entry++)
1624 {
1625 expressionS * litexp = & pool->literals[entry].exp;
1626
1627 if ((litexp->X_op == exp->X_op)
1628 && (exp->X_op == O_constant)
1629 && (litexp->X_add_number == exp->X_add_number)
1630 && (litexp->X_unsigned == exp->X_unsigned))
1631 break;
1632
1633 if ((litexp->X_op == exp->X_op)
1634 && (exp->X_op == O_symbol)
1635 && (litexp->X_add_number == exp->X_add_number)
1636 && (litexp->X_add_symbol == exp->X_add_symbol)
1637 && (litexp->X_op_symbol == exp->X_op_symbol))
1638 break;
1639 }
1640
1641 /* Do we need to create a new entry? */
1642 if (entry == pool->next_free_entry)
1643 {
1644 if (entry >= MAX_LITERAL_POOL_SIZE)
1645 {
1646 set_syntax_error (_("literal pool overflow"));
1647 return FALSE;
1648 }
1649
1650 pool->literals[entry].exp = *exp;
1651 pool->next_free_entry += 1;
1652 if (exp->X_op == O_big)
1653 {
1654 /* PR 16688: Bignums are held in a single global array. We must
1655 copy and preserve that value now, before it is overwritten. */
1656 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1657 memcpy (pool->literals[entry].bignum, generic_bignum,
1658 CHARS_PER_LITTLENUM * exp->X_add_number);
1659 }
1660 else
1661 pool->literals[entry].bignum = NULL;
1662 }
1663
1664 exp->X_op = O_symbol;
1665 exp->X_add_number = ((int) entry) * size;
1666 exp->X_add_symbol = pool->symbol;
1667
1668 return TRUE;
1669 }
1670
1671 /* Can't use symbol_new here, so have to create a symbol and then at
1672 a later date assign it a value. Thats what these functions do. */
1673
1674 static void
1675 symbol_locate (symbolS * symbolP,
1676 const char *name,/* It is copied, the caller can modify. */
1677 segT segment, /* Segment identifier (SEG_<something>). */
1678 valueT valu, /* Symbol value. */
1679 fragS * frag) /* Associated fragment. */
1680 {
1681 size_t name_length;
1682 char *preserved_copy_of_name;
1683
1684 name_length = strlen (name) + 1; /* +1 for \0. */
1685 obstack_grow (&notes, name, name_length);
1686 preserved_copy_of_name = obstack_finish (&notes);
1687
1688 #ifdef tc_canonicalize_symbol_name
1689 preserved_copy_of_name =
1690 tc_canonicalize_symbol_name (preserved_copy_of_name);
1691 #endif
1692
1693 S_SET_NAME (symbolP, preserved_copy_of_name);
1694
1695 S_SET_SEGMENT (symbolP, segment);
1696 S_SET_VALUE (symbolP, valu);
1697 symbol_clear_list_pointers (symbolP);
1698
1699 symbol_set_frag (symbolP, frag);
1700
1701 /* Link to end of symbol chain. */
1702 {
1703 extern int symbol_table_frozen;
1704
1705 if (symbol_table_frozen)
1706 abort ();
1707 }
1708
1709 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1710
1711 obj_symbol_new_hook (symbolP);
1712
1713 #ifdef tc_symbol_new_hook
1714 tc_symbol_new_hook (symbolP);
1715 #endif
1716
1717 #ifdef DEBUG_SYMS
1718 verify_symbol_chain (symbol_rootP, symbol_lastP);
1719 #endif /* DEBUG_SYMS */
1720 }
1721
1722
1723 static void
1724 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1725 {
1726 unsigned int entry;
1727 literal_pool *pool;
1728 char sym_name[20];
1729 int align;
1730
1731 for (align = 2; align <= 4; align++)
1732 {
1733 int size = 1 << align;
1734
1735 pool = find_literal_pool (size);
1736 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1737 continue;
1738
1739 mapping_state (MAP_DATA);
1740
1741 /* Align pool as you have word accesses.
1742 Only make a frag if we have to. */
1743 if (!need_pass_2)
1744 frag_align (align, 0, 0);
1745
1746 record_alignment (now_seg, align);
1747
1748 sprintf (sym_name, "$$lit_\002%x", pool->id);
1749
1750 symbol_locate (pool->symbol, sym_name, now_seg,
1751 (valueT) frag_now_fix (), frag_now);
1752 symbol_table_insert (pool->symbol);
1753
1754 for (entry = 0; entry < pool->next_free_entry; entry++)
1755 {
1756 expressionS * exp = & pool->literals[entry].exp;
1757
1758 if (exp->X_op == O_big)
1759 {
1760 /* PR 16688: Restore the global bignum value. */
1761 gas_assert (pool->literals[entry].bignum != NULL);
1762 memcpy (generic_bignum, pool->literals[entry].bignum,
1763 CHARS_PER_LITTLENUM * exp->X_add_number);
1764 }
1765
1766 /* First output the expression in the instruction to the pool. */
1767 emit_expr (exp, size); /* .word|.xword */
1768
1769 if (exp->X_op == O_big)
1770 {
1771 free (pool->literals[entry].bignum);
1772 pool->literals[entry].bignum = NULL;
1773 }
1774 }
1775
1776 /* Mark the pool as empty. */
1777 pool->next_free_entry = 0;
1778 pool->symbol = NULL;
1779 }
1780 }
1781
1782 #ifdef OBJ_ELF
1783 /* Forward declarations for functions below, in the MD interface
1784 section. */
1785 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1786 static struct reloc_table_entry * find_reloc_table_entry (char **);
1787
1788 /* Directives: Data. */
1789 /* N.B. the support for relocation suffix in this directive needs to be
1790 implemented properly. */
1791
1792 static void
1793 s_aarch64_elf_cons (int nbytes)
1794 {
1795 expressionS exp;
1796
1797 #ifdef md_flush_pending_output
1798 md_flush_pending_output ();
1799 #endif
1800
1801 if (is_it_end_of_statement ())
1802 {
1803 demand_empty_rest_of_line ();
1804 return;
1805 }
1806
1807 #ifdef md_cons_align
1808 md_cons_align (nbytes);
1809 #endif
1810
1811 mapping_state (MAP_DATA);
1812 do
1813 {
1814 struct reloc_table_entry *reloc;
1815
1816 expression (&exp);
1817
1818 if (exp.X_op != O_symbol)
1819 emit_expr (&exp, (unsigned int) nbytes);
1820 else
1821 {
1822 skip_past_char (&input_line_pointer, '#');
1823 if (skip_past_char (&input_line_pointer, ':'))
1824 {
1825 reloc = find_reloc_table_entry (&input_line_pointer);
1826 if (reloc == NULL)
1827 as_bad (_("unrecognized relocation suffix"));
1828 else
1829 as_bad (_("unimplemented relocation suffix"));
1830 ignore_rest_of_line ();
1831 return;
1832 }
1833 else
1834 emit_expr (&exp, (unsigned int) nbytes);
1835 }
1836 }
1837 while (*input_line_pointer++ == ',');
1838
1839 /* Put terminator back into stream. */
1840 input_line_pointer--;
1841 demand_empty_rest_of_line ();
1842 }
1843
1844 #endif /* OBJ_ELF */
1845
1846 /* Output a 32-bit word, but mark as an instruction. */
1847
1848 static void
1849 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1850 {
1851 expressionS exp;
1852
1853 #ifdef md_flush_pending_output
1854 md_flush_pending_output ();
1855 #endif
1856
1857 if (is_it_end_of_statement ())
1858 {
1859 demand_empty_rest_of_line ();
1860 return;
1861 }
1862
1863 if (!need_pass_2)
1864 frag_align_code (2, 0);
1865 #ifdef OBJ_ELF
1866 mapping_state (MAP_INSN);
1867 #endif
1868
1869 do
1870 {
1871 expression (&exp);
1872 if (exp.X_op != O_constant)
1873 {
1874 as_bad (_("constant expression required"));
1875 ignore_rest_of_line ();
1876 return;
1877 }
1878
1879 if (target_big_endian)
1880 {
1881 unsigned int val = exp.X_add_number;
1882 exp.X_add_number = SWAP_32 (val);
1883 }
1884 emit_expr (&exp, 4);
1885 }
1886 while (*input_line_pointer++ == ',');
1887
1888 /* Put terminator back into stream. */
1889 input_line_pointer--;
1890 demand_empty_rest_of_line ();
1891 }
1892
1893 #ifdef OBJ_ELF
1894 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1895
1896 static void
1897 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1898 {
1899 expressionS exp;
1900
1901 /* Since we're just labelling the code, there's no need to define a
1902 mapping symbol. */
1903 expression (&exp);
1904 /* Make sure there is enough room in this frag for the following
1905 blr. This trick only works if the blr follows immediately after
1906 the .tlsdesc directive. */
1907 frag_grow (4);
1908 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1909 BFD_RELOC_AARCH64_TLSDESC_CALL);
1910
1911 demand_empty_rest_of_line ();
1912 }
1913 #endif /* OBJ_ELF */
1914
1915 static void s_aarch64_arch (int);
1916 static void s_aarch64_cpu (int);
1917 static void s_aarch64_arch_extension (int);
1918
1919 /* This table describes all the machine specific pseudo-ops the assembler
1920 has to support. The fields are:
1921 pseudo-op name without dot
1922 function to call to execute this pseudo-op
1923 Integer arg to pass to the function. */
1924
1925 const pseudo_typeS md_pseudo_table[] = {
1926 /* Never called because '.req' does not start a line. */
1927 {"req", s_req, 0},
1928 {"unreq", s_unreq, 0},
1929 {"bss", s_bss, 0},
1930 {"even", s_even, 0},
1931 {"ltorg", s_ltorg, 0},
1932 {"pool", s_ltorg, 0},
1933 {"cpu", s_aarch64_cpu, 0},
1934 {"arch", s_aarch64_arch, 0},
1935 {"arch_extension", s_aarch64_arch_extension, 0},
1936 {"inst", s_aarch64_inst, 0},
1937 #ifdef OBJ_ELF
1938 {"tlsdesccall", s_tlsdesccall, 0},
1939 {"word", s_aarch64_elf_cons, 4},
1940 {"long", s_aarch64_elf_cons, 4},
1941 {"xword", s_aarch64_elf_cons, 8},
1942 {"dword", s_aarch64_elf_cons, 8},
1943 #endif
1944 {0, 0, 0}
1945 };
1946 \f
1947
1948 /* Check whether STR points to a register name followed by a comma or the
1949 end of line; REG_TYPE indicates which register types are checked
1950 against. Return TRUE if STR is such a register name; otherwise return
1951 FALSE. The function does not intend to produce any diagnostics, but since
1952 the register parser aarch64_reg_parse, which is called by this function,
1953 does produce diagnostics, we call clear_error to clear any diagnostics
1954 that may be generated by aarch64_reg_parse.
1955 Also, the function returns FALSE directly if there is any user error
1956 present at the function entry. This prevents the existing diagnostics
1957 state from being spoiled.
1958 The function currently serves parse_constant_immediate and
1959 parse_big_immediate only. */
1960 static bfd_boolean
1961 reg_name_p (char *str, aarch64_reg_type reg_type)
1962 {
1963 int reg;
1964
1965 /* Prevent the diagnostics state from being spoiled. */
1966 if (error_p ())
1967 return FALSE;
1968
1969 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1970
1971 /* Clear the parsing error that may be set by the reg parser. */
1972 clear_error ();
1973
1974 if (reg == PARSE_FAIL)
1975 return FALSE;
1976
1977 skip_whitespace (str);
1978 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1979 return TRUE;
1980
1981 return FALSE;
1982 }
1983
1984 /* Parser functions used exclusively in instruction operands. */
1985
1986 /* Parse an immediate expression which may not be constant.
1987
1988 To prevent the expression parser from pushing a register name
1989 into the symbol table as an undefined symbol, firstly a check is
1990 done to find out whether STR is a valid register name followed
1991 by a comma or the end of line. Return FALSE if STR is such a
1992 string. */
1993
1994 static bfd_boolean
1995 parse_immediate_expression (char **str, expressionS *exp)
1996 {
1997 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1998 {
1999 set_recoverable_error (_("immediate operand required"));
2000 return FALSE;
2001 }
2002
2003 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2004
2005 if (exp->X_op == O_absent)
2006 {
2007 set_fatal_syntax_error (_("missing immediate expression"));
2008 return FALSE;
2009 }
2010
2011 return TRUE;
2012 }
2013
2014 /* Constant immediate-value read function for use in insn parsing.
2015 STR points to the beginning of the immediate (with the optional
2016 leading #); *VAL receives the value.
2017
2018 Return TRUE on success; otherwise return FALSE. */
2019
2020 static bfd_boolean
2021 parse_constant_immediate (char **str, int64_t * val)
2022 {
2023 expressionS exp;
2024
2025 if (! parse_immediate_expression (str, &exp))
2026 return FALSE;
2027
2028 if (exp.X_op != O_constant)
2029 {
2030 set_syntax_error (_("constant expression required"));
2031 return FALSE;
2032 }
2033
2034 *val = exp.X_add_number;
2035 return TRUE;
2036 }
2037
2038 static uint32_t
2039 encode_imm_float_bits (uint32_t imm)
2040 {
2041 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2042 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2043 }
2044
2045 /* Return TRUE if the single-precision floating-point value encoded in IMM
2046 can be expressed in the AArch64 8-bit signed floating-point format with
2047 3-bit exponent and normalized 4 bits of precision; in other words, the
2048 floating-point value must be expressable as
2049 (+/-) n / 16 * power (2, r)
2050 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2051
2052 static bfd_boolean
2053 aarch64_imm_float_p (uint32_t imm)
2054 {
2055 /* If a single-precision floating-point value has the following bit
2056 pattern, it can be expressed in the AArch64 8-bit floating-point
2057 format:
2058
2059 3 32222222 2221111111111
2060 1 09876543 21098765432109876543210
2061 n Eeeeeexx xxxx0000000000000000000
2062
2063 where n, e and each x are either 0 or 1 independently, with
2064 E == ~ e. */
2065
2066 uint32_t pattern;
2067
2068 /* Prepare the pattern for 'Eeeeee'. */
2069 if (((imm >> 30) & 0x1) == 0)
2070 pattern = 0x3e000000;
2071 else
2072 pattern = 0x40000000;
2073
2074 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2075 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2076 }
2077
2078 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2079
2080 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2081 8-bit signed floating-point format with 3-bit exponent and normalized 4
2082 bits of precision (i.e. can be used in an FMOV instruction); return the
2083 equivalent single-precision encoding in *FPWORD.
2084
2085 Otherwise return FALSE. */
2086
2087 static bfd_boolean
2088 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2089 {
2090 /* If a double-precision floating-point value has the following bit
2091 pattern, it can be expressed in the AArch64 8-bit floating-point
2092 format:
2093
2094 6 66655555555 554444444...21111111111
2095 3 21098765432 109876543...098765432109876543210
2096 n Eeeeeeeeexx xxxx00000...000000000000000000000
2097
2098 where n, e and each x are either 0 or 1 independently, with
2099 E == ~ e. */
2100
2101 uint32_t pattern;
2102 uint32_t high32 = imm >> 32;
2103
2104 /* Lower 32 bits need to be 0s. */
2105 if ((imm & 0xffffffff) != 0)
2106 return FALSE;
2107
2108 /* Prepare the pattern for 'Eeeeeeeee'. */
2109 if (((high32 >> 30) & 0x1) == 0)
2110 pattern = 0x3fc00000;
2111 else
2112 pattern = 0x40000000;
2113
2114 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2115 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2116 {
2117 /* Convert to the single-precision encoding.
2118 i.e. convert
2119 n Eeeeeeeeexx xxxx00000...000000000000000000000
2120 to
2121 n Eeeeeexx xxxx0000000000000000000. */
2122 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2123 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2124 return TRUE;
2125 }
2126 else
2127 return FALSE;
2128 }
2129
2130 /* Parse a floating-point immediate. Return TRUE on success and return the
2131 value in *IMMED in the format of IEEE754 single-precision encoding.
2132 *CCP points to the start of the string; DP_P is TRUE when the immediate
2133 is expected to be in double-precision (N.B. this only matters when
2134 hexadecimal representation is involved).
2135
2136 N.B. 0.0 is accepted by this function. */
2137
2138 static bfd_boolean
2139 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2140 {
2141 char *str = *ccp;
2142 char *fpnum;
2143 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2144 int found_fpchar = 0;
2145 int64_t val = 0;
2146 unsigned fpword = 0;
2147 bfd_boolean hex_p = FALSE;
2148
2149 skip_past_char (&str, '#');
2150
2151 fpnum = str;
2152 skip_whitespace (fpnum);
2153
2154 if (strncmp (fpnum, "0x", 2) == 0)
2155 {
2156 /* Support the hexadecimal representation of the IEEE754 encoding.
2157 Double-precision is expected when DP_P is TRUE, otherwise the
2158 representation should be in single-precision. */
2159 if (! parse_constant_immediate (&str, &val))
2160 goto invalid_fp;
2161
2162 if (dp_p)
2163 {
2164 if (! aarch64_double_precision_fmovable (val, &fpword))
2165 goto invalid_fp;
2166 }
2167 else if ((uint64_t) val > 0xffffffff)
2168 goto invalid_fp;
2169 else
2170 fpword = val;
2171
2172 hex_p = TRUE;
2173 }
2174 else
2175 {
2176 /* We must not accidentally parse an integer as a floating-point number.
2177 Make sure that the value we parse is not an integer by checking for
2178 special characters '.' or 'e'. */
2179 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2180 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2181 {
2182 found_fpchar = 1;
2183 break;
2184 }
2185
2186 if (!found_fpchar)
2187 return FALSE;
2188 }
2189
2190 if (! hex_p)
2191 {
2192 int i;
2193
2194 if ((str = atof_ieee (str, 's', words)) == NULL)
2195 goto invalid_fp;
2196
2197 /* Our FP word must be 32 bits (single-precision FP). */
2198 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2199 {
2200 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2201 fpword |= words[i];
2202 }
2203 }
2204
2205 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2206 {
2207 *immed = fpword;
2208 *ccp = str;
2209 return TRUE;
2210 }
2211
2212 invalid_fp:
2213 set_fatal_syntax_error (_("invalid floating-point constant"));
2214 return FALSE;
2215 }
2216
2217 /* Less-generic immediate-value read function with the possibility of loading
2218 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2219 instructions.
2220
2221 To prevent the expression parser from pushing a register name into the
2222 symbol table as an undefined symbol, a check is firstly done to find
2223 out whether STR is a valid register name followed by a comma or the end
2224 of line. Return FALSE if STR is such a register. */
2225
2226 static bfd_boolean
2227 parse_big_immediate (char **str, int64_t *imm)
2228 {
2229 char *ptr = *str;
2230
2231 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2232 {
2233 set_syntax_error (_("immediate operand required"));
2234 return FALSE;
2235 }
2236
2237 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2238
2239 if (inst.reloc.exp.X_op == O_constant)
2240 *imm = inst.reloc.exp.X_add_number;
2241
2242 *str = ptr;
2243
2244 return TRUE;
2245 }
2246
2247 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2248 if NEED_LIBOPCODES is non-zero, the fixup will need
2249 assistance from the libopcodes. */
2250
2251 static inline void
2252 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2253 const aarch64_opnd_info *operand,
2254 int need_libopcodes_p)
2255 {
2256 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2257 reloc->opnd = operand->type;
2258 if (need_libopcodes_p)
2259 reloc->need_libopcodes_p = 1;
2260 };
2261
2262 /* Return TRUE if the instruction needs to be fixed up later internally by
2263 the GAS; otherwise return FALSE. */
2264
2265 static inline bfd_boolean
2266 aarch64_gas_internal_fixup_p (void)
2267 {
2268 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2269 }
2270
2271 /* Assign the immediate value to the relavant field in *OPERAND if
2272 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2273 needs an internal fixup in a later stage.
2274 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2275 IMM.VALUE that may get assigned with the constant. */
2276 static inline void
2277 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2278 aarch64_opnd_info *operand,
2279 int addr_off_p,
2280 int need_libopcodes_p,
2281 int skip_p)
2282 {
2283 if (reloc->exp.X_op == O_constant)
2284 {
2285 if (addr_off_p)
2286 operand->addr.offset.imm = reloc->exp.X_add_number;
2287 else
2288 operand->imm.value = reloc->exp.X_add_number;
2289 reloc->type = BFD_RELOC_UNUSED;
2290 }
2291 else
2292 {
2293 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2294 /* Tell libopcodes to ignore this operand or not. This is helpful
2295 when one of the operands needs to be fixed up later but we need
2296 libopcodes to check the other operands. */
2297 operand->skip = skip_p;
2298 }
2299 }
2300
2301 /* Relocation modifiers. Each entry in the table contains the textual
2302 name for the relocation which may be placed before a symbol used as
2303 a load/store offset, or add immediate. It must be surrounded by a
2304 leading and trailing colon, for example:
2305
2306 ldr x0, [x1, #:rello:varsym]
2307 add x0, x1, #:rello:varsym */
2308
2309 struct reloc_table_entry
2310 {
2311 const char *name;
2312 int pc_rel;
2313 bfd_reloc_code_real_type adr_type;
2314 bfd_reloc_code_real_type adrp_type;
2315 bfd_reloc_code_real_type movw_type;
2316 bfd_reloc_code_real_type add_type;
2317 bfd_reloc_code_real_type ldst_type;
2318 bfd_reloc_code_real_type ld_literal_type;
2319 };
2320
2321 static struct reloc_table_entry reloc_table[] = {
2322 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2323 {"lo12", 0,
2324 0, /* adr_type */
2325 0,
2326 0,
2327 BFD_RELOC_AARCH64_ADD_LO12,
2328 BFD_RELOC_AARCH64_LDST_LO12,
2329 0},
2330
2331 /* Higher 21 bits of pc-relative page offset: ADRP */
2332 {"pg_hi21", 1,
2333 0, /* adr_type */
2334 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2335 0,
2336 0,
2337 0,
2338 0},
2339
2340 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2341 {"pg_hi21_nc", 1,
2342 0, /* adr_type */
2343 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2344 0,
2345 0,
2346 0,
2347 0},
2348
2349 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2350 {"abs_g0", 0,
2351 0, /* adr_type */
2352 0,
2353 BFD_RELOC_AARCH64_MOVW_G0,
2354 0,
2355 0,
2356 0},
2357
2358 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2359 {"abs_g0_s", 0,
2360 0, /* adr_type */
2361 0,
2362 BFD_RELOC_AARCH64_MOVW_G0_S,
2363 0,
2364 0,
2365 0},
2366
2367 /* Less significant bits 0-15 of address/value: MOVK, no check */
2368 {"abs_g0_nc", 0,
2369 0, /* adr_type */
2370 0,
2371 BFD_RELOC_AARCH64_MOVW_G0_NC,
2372 0,
2373 0,
2374 0},
2375
2376 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2377 {"abs_g1", 0,
2378 0, /* adr_type */
2379 0,
2380 BFD_RELOC_AARCH64_MOVW_G1,
2381 0,
2382 0,
2383 0},
2384
2385 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2386 {"abs_g1_s", 0,
2387 0, /* adr_type */
2388 0,
2389 BFD_RELOC_AARCH64_MOVW_G1_S,
2390 0,
2391 0,
2392 0},
2393
2394 /* Less significant bits 16-31 of address/value: MOVK, no check */
2395 {"abs_g1_nc", 0,
2396 0, /* adr_type */
2397 0,
2398 BFD_RELOC_AARCH64_MOVW_G1_NC,
2399 0,
2400 0,
2401 0},
2402
2403 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2404 {"abs_g2", 0,
2405 0, /* adr_type */
2406 0,
2407 BFD_RELOC_AARCH64_MOVW_G2,
2408 0,
2409 0,
2410 0},
2411
2412 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2413 {"abs_g2_s", 0,
2414 0, /* adr_type */
2415 0,
2416 BFD_RELOC_AARCH64_MOVW_G2_S,
2417 0,
2418 0,
2419 0},
2420
2421 /* Less significant bits 32-47 of address/value: MOVK, no check */
2422 {"abs_g2_nc", 0,
2423 0, /* adr_type */
2424 0,
2425 BFD_RELOC_AARCH64_MOVW_G2_NC,
2426 0,
2427 0,
2428 0},
2429
2430 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2431 {"abs_g3", 0,
2432 0, /* adr_type */
2433 0,
2434 BFD_RELOC_AARCH64_MOVW_G3,
2435 0,
2436 0,
2437 0},
2438
2439 /* Get to the page containing GOT entry for a symbol. */
2440 {"got", 1,
2441 0, /* adr_type */
2442 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2443 0,
2444 0,
2445 0,
2446 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2447
2448 /* 12 bit offset into the page containing GOT entry for that symbol. */
2449 {"got_lo12", 0,
2450 0, /* adr_type */
2451 0,
2452 0,
2453 0,
2454 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2455 0},
2456
2457 /* Get to the page containing GOT TLS entry for a symbol */
2458 {"tlsgd", 0,
2459 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2460 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2461 0,
2462 0,
2463 0,
2464 0},
2465
2466 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2467 {"tlsgd_lo12", 0,
2468 0, /* adr_type */
2469 0,
2470 0,
2471 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2472 0,
2473 0},
2474
2475 /* Get to the page containing GOT TLS entry for a symbol */
2476 {"tlsdesc", 0,
2477 0, /* adr_type */
2478 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2479 0,
2480 0,
2481 0,
2482 0},
2483
2484 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2485 {"tlsdesc_lo12", 0,
2486 0, /* adr_type */
2487 0,
2488 0,
2489 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2490 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2491 0},
2492
2493 /* Get to the page containing GOT TLS entry for a symbol */
2494 {"gottprel", 0,
2495 0, /* adr_type */
2496 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2497 0,
2498 0,
2499 0,
2500 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2501
2502 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2503 {"gottprel_lo12", 0,
2504 0, /* adr_type */
2505 0,
2506 0,
2507 0,
2508 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2509 0},
2510
2511 /* Get tp offset for a symbol. */
2512 {"tprel", 0,
2513 0, /* adr_type */
2514 0,
2515 0,
2516 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2517 0,
2518 0},
2519
2520 /* Get tp offset for a symbol. */
2521 {"tprel_lo12", 0,
2522 0, /* adr_type */
2523 0,
2524 0,
2525 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2526 0,
2527 0},
2528
2529 /* Get tp offset for a symbol. */
2530 {"tprel_hi12", 0,
2531 0, /* adr_type */
2532 0,
2533 0,
2534 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2535 0,
2536 0},
2537
2538 /* Get tp offset for a symbol. */
2539 {"tprel_lo12_nc", 0,
2540 0, /* adr_type */
2541 0,
2542 0,
2543 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2544 0,
2545 0},
2546
2547 /* Most significant bits 32-47 of address/value: MOVZ. */
2548 {"tprel_g2", 0,
2549 0, /* adr_type */
2550 0,
2551 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2552 0,
2553 0,
2554 0},
2555
2556 /* Most significant bits 16-31 of address/value: MOVZ. */
2557 {"tprel_g1", 0,
2558 0, /* adr_type */
2559 0,
2560 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2561 0,
2562 0,
2563 0},
2564
2565 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2566 {"tprel_g1_nc", 0,
2567 0, /* adr_type */
2568 0,
2569 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2570 0,
2571 0,
2572 0},
2573
2574 /* Most significant bits 0-15 of address/value: MOVZ. */
2575 {"tprel_g0", 0,
2576 0, /* adr_type */
2577 0,
2578 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2579 0,
2580 0,
2581 0},
2582
2583 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2584 {"tprel_g0_nc", 0,
2585 0, /* adr_type */
2586 0,
2587 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2588 0,
2589 0,
2590 0},
2591 };
2592
2593 /* Given the address of a pointer pointing to the textual name of a
2594 relocation as may appear in assembler source, attempt to find its
2595 details in reloc_table. The pointer will be updated to the character
2596 after the trailing colon. On failure, NULL will be returned;
2597 otherwise return the reloc_table_entry. */
2598
2599 static struct reloc_table_entry *
2600 find_reloc_table_entry (char **str)
2601 {
2602 unsigned int i;
2603 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2604 {
2605 int length = strlen (reloc_table[i].name);
2606
2607 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2608 && (*str)[length] == ':')
2609 {
2610 *str += (length + 1);
2611 return &reloc_table[i];
2612 }
2613 }
2614
2615 return NULL;
2616 }
2617
2618 /* Mode argument to parse_shift and parser_shifter_operand. */
2619 enum parse_shift_mode
2620 {
2621 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2622 "#imm{,lsl #n}" */
2623 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2624 "#imm" */
2625 SHIFTED_LSL, /* bare "lsl #n" */
2626 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2627 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2628 };
2629
2630 /* Parse a <shift> operator on an AArch64 data processing instruction.
2631 Return TRUE on success; otherwise return FALSE. */
2632 static bfd_boolean
2633 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2634 {
2635 const struct aarch64_name_value_pair *shift_op;
2636 enum aarch64_modifier_kind kind;
2637 expressionS exp;
2638 int exp_has_prefix;
2639 char *s = *str;
2640 char *p = s;
2641
2642 for (p = *str; ISALPHA (*p); p++)
2643 ;
2644
2645 if (p == *str)
2646 {
2647 set_syntax_error (_("shift expression expected"));
2648 return FALSE;
2649 }
2650
2651 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2652
2653 if (shift_op == NULL)
2654 {
2655 set_syntax_error (_("shift operator expected"));
2656 return FALSE;
2657 }
2658
2659 kind = aarch64_get_operand_modifier (shift_op);
2660
2661 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2662 {
2663 set_syntax_error (_("invalid use of 'MSL'"));
2664 return FALSE;
2665 }
2666
2667 switch (mode)
2668 {
2669 case SHIFTED_LOGIC_IMM:
2670 if (aarch64_extend_operator_p (kind) == TRUE)
2671 {
2672 set_syntax_error (_("extending shift is not permitted"));
2673 return FALSE;
2674 }
2675 break;
2676
2677 case SHIFTED_ARITH_IMM:
2678 if (kind == AARCH64_MOD_ROR)
2679 {
2680 set_syntax_error (_("'ROR' shift is not permitted"));
2681 return FALSE;
2682 }
2683 break;
2684
2685 case SHIFTED_LSL:
2686 if (kind != AARCH64_MOD_LSL)
2687 {
2688 set_syntax_error (_("only 'LSL' shift is permitted"));
2689 return FALSE;
2690 }
2691 break;
2692
2693 case SHIFTED_REG_OFFSET:
2694 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2695 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2696 {
2697 set_fatal_syntax_error
2698 (_("invalid shift for the register offset addressing mode"));
2699 return FALSE;
2700 }
2701 break;
2702
2703 case SHIFTED_LSL_MSL:
2704 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2705 {
2706 set_syntax_error (_("invalid shift operator"));
2707 return FALSE;
2708 }
2709 break;
2710
2711 default:
2712 abort ();
2713 }
2714
2715 /* Whitespace can appear here if the next thing is a bare digit. */
2716 skip_whitespace (p);
2717
2718 /* Parse shift amount. */
2719 exp_has_prefix = 0;
2720 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2721 exp.X_op = O_absent;
2722 else
2723 {
2724 if (is_immediate_prefix (*p))
2725 {
2726 p++;
2727 exp_has_prefix = 1;
2728 }
2729 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2730 }
2731 if (exp.X_op == O_absent)
2732 {
2733 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2734 {
2735 set_syntax_error (_("missing shift amount"));
2736 return FALSE;
2737 }
2738 operand->shifter.amount = 0;
2739 }
2740 else if (exp.X_op != O_constant)
2741 {
2742 set_syntax_error (_("constant shift amount required"));
2743 return FALSE;
2744 }
2745 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2746 {
2747 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2748 return FALSE;
2749 }
2750 else
2751 {
2752 operand->shifter.amount = exp.X_add_number;
2753 operand->shifter.amount_present = 1;
2754 }
2755
2756 operand->shifter.operator_present = 1;
2757 operand->shifter.kind = kind;
2758
2759 *str = p;
2760 return TRUE;
2761 }
2762
2763 /* Parse a <shifter_operand> for a data processing instruction:
2764
2765 #<immediate>
2766 #<immediate>, LSL #imm
2767
2768 Validation of immediate operands is deferred to md_apply_fix.
2769
2770 Return TRUE on success; otherwise return FALSE. */
2771
2772 static bfd_boolean
2773 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2774 enum parse_shift_mode mode)
2775 {
2776 char *p;
2777
2778 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2779 return FALSE;
2780
2781 p = *str;
2782
2783 /* Accept an immediate expression. */
2784 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2785 return FALSE;
2786
2787 /* Accept optional LSL for arithmetic immediate values. */
2788 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2789 if (! parse_shift (&p, operand, SHIFTED_LSL))
2790 return FALSE;
2791
2792 /* Not accept any shifter for logical immediate values. */
2793 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2794 && parse_shift (&p, operand, mode))
2795 {
2796 set_syntax_error (_("unexpected shift operator"));
2797 return FALSE;
2798 }
2799
2800 *str = p;
2801 return TRUE;
2802 }
2803
2804 /* Parse a <shifter_operand> for a data processing instruction:
2805
2806 <Rm>
2807 <Rm>, <shift>
2808 #<immediate>
2809 #<immediate>, LSL #imm
2810
2811 where <shift> is handled by parse_shift above, and the last two
2812 cases are handled by the function above.
2813
2814 Validation of immediate operands is deferred to md_apply_fix.
2815
2816 Return TRUE on success; otherwise return FALSE. */
2817
2818 static bfd_boolean
2819 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2820 enum parse_shift_mode mode)
2821 {
2822 int reg;
2823 int isreg32, isregzero;
2824 enum aarch64_operand_class opd_class
2825 = aarch64_get_operand_class (operand->type);
2826
2827 if ((reg =
2828 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2829 {
2830 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2831 {
2832 set_syntax_error (_("unexpected register in the immediate operand"));
2833 return FALSE;
2834 }
2835
2836 if (!isregzero && reg == REG_SP)
2837 {
2838 set_syntax_error (BAD_SP);
2839 return FALSE;
2840 }
2841
2842 operand->reg.regno = reg;
2843 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2844
2845 /* Accept optional shift operation on register. */
2846 if (! skip_past_comma (str))
2847 return TRUE;
2848
2849 if (! parse_shift (str, operand, mode))
2850 return FALSE;
2851
2852 return TRUE;
2853 }
2854 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2855 {
2856 set_syntax_error
2857 (_("integer register expected in the extended/shifted operand "
2858 "register"));
2859 return FALSE;
2860 }
2861
2862 /* We have a shifted immediate variable. */
2863 return parse_shifter_operand_imm (str, operand, mode);
2864 }
2865
2866 /* Return TRUE on success; return FALSE otherwise. */
2867
2868 static bfd_boolean
2869 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2870 enum parse_shift_mode mode)
2871 {
2872 char *p = *str;
2873
2874 /* Determine if we have the sequence of characters #: or just :
2875 coming next. If we do, then we check for a :rello: relocation
2876 modifier. If we don't, punt the whole lot to
2877 parse_shifter_operand. */
2878
2879 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2880 {
2881 struct reloc_table_entry *entry;
2882
2883 if (p[0] == '#')
2884 p += 2;
2885 else
2886 p++;
2887 *str = p;
2888
2889 /* Try to parse a relocation. Anything else is an error. */
2890 if (!(entry = find_reloc_table_entry (str)))
2891 {
2892 set_syntax_error (_("unknown relocation modifier"));
2893 return FALSE;
2894 }
2895
2896 if (entry->add_type == 0)
2897 {
2898 set_syntax_error
2899 (_("this relocation modifier is not allowed on this instruction"));
2900 return FALSE;
2901 }
2902
2903 /* Save str before we decompose it. */
2904 p = *str;
2905
2906 /* Next, we parse the expression. */
2907 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2908 return FALSE;
2909
2910 /* Record the relocation type (use the ADD variant here). */
2911 inst.reloc.type = entry->add_type;
2912 inst.reloc.pc_rel = entry->pc_rel;
2913
2914 /* If str is empty, we've reached the end, stop here. */
2915 if (**str == '\0')
2916 return TRUE;
2917
2918 /* Otherwise, we have a shifted reloc modifier, so rewind to
2919 recover the variable name and continue parsing for the shifter. */
2920 *str = p;
2921 return parse_shifter_operand_imm (str, operand, mode);
2922 }
2923
2924 return parse_shifter_operand (str, operand, mode);
2925 }
2926
2927 /* Parse all forms of an address expression. Information is written
2928 to *OPERAND and/or inst.reloc.
2929
2930 The A64 instruction set has the following addressing modes:
2931
2932 Offset
2933 [base] // in SIMD ld/st structure
2934 [base{,#0}] // in ld/st exclusive
2935 [base{,#imm}]
2936 [base,Xm{,LSL #imm}]
2937 [base,Xm,SXTX {#imm}]
2938 [base,Wm,(S|U)XTW {#imm}]
2939 Pre-indexed
2940 [base,#imm]!
2941 Post-indexed
2942 [base],#imm
2943 [base],Xm // in SIMD ld/st structure
2944 PC-relative (literal)
2945 label
2946 =immediate
2947
2948 (As a convenience, the notation "=immediate" is permitted in conjunction
2949 with the pc-relative literal load instructions to automatically place an
2950 immediate value or symbolic address in a nearby literal pool and generate
2951 a hidden label which references it.)
2952
2953 Upon a successful parsing, the address structure in *OPERAND will be
2954 filled in the following way:
2955
2956 .base_regno = <base>
2957 .offset.is_reg // 1 if the offset is a register
2958 .offset.imm = <imm>
2959 .offset.regno = <Rm>
2960
2961 For different addressing modes defined in the A64 ISA:
2962
2963 Offset
2964 .pcrel=0; .preind=1; .postind=0; .writeback=0
2965 Pre-indexed
2966 .pcrel=0; .preind=1; .postind=0; .writeback=1
2967 Post-indexed
2968 .pcrel=0; .preind=0; .postind=1; .writeback=1
2969 PC-relative (literal)
2970 .pcrel=1; .preind=1; .postind=0; .writeback=0
2971
2972 The shift/extension information, if any, will be stored in .shifter.
2973
2974 It is the caller's responsibility to check for addressing modes not
2975 supported by the instruction, and to set inst.reloc.type. */
2976
2977 static bfd_boolean
2978 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2979 int accept_reg_post_index)
2980 {
2981 char *p = *str;
2982 int reg;
2983 int isreg32, isregzero;
2984 expressionS *exp = &inst.reloc.exp;
2985
2986 if (! skip_past_char (&p, '['))
2987 {
2988 /* =immediate or label. */
2989 operand->addr.pcrel = 1;
2990 operand->addr.preind = 1;
2991
2992 /* #:<reloc_op>:<symbol> */
2993 skip_past_char (&p, '#');
2994 if (reloc && skip_past_char (&p, ':'))
2995 {
2996 bfd_reloc_code_real_type ty;
2997 struct reloc_table_entry *entry;
2998
2999 /* Try to parse a relocation modifier. Anything else is
3000 an error. */
3001 entry = find_reloc_table_entry (&p);
3002 if (! entry)
3003 {
3004 set_syntax_error (_("unknown relocation modifier"));
3005 return FALSE;
3006 }
3007
3008 switch (operand->type)
3009 {
3010 case AARCH64_OPND_ADDR_PCREL21:
3011 /* adr */
3012 ty = entry->adr_type;
3013 break;
3014
3015 default:
3016 ty = entry->ld_literal_type;
3017 break;
3018 }
3019
3020 if (ty == 0)
3021 {
3022 set_syntax_error
3023 (_("this relocation modifier is not allowed on this "
3024 "instruction"));
3025 return FALSE;
3026 }
3027
3028 /* #:<reloc_op>: */
3029 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3030 {
3031 set_syntax_error (_("invalid relocation expression"));
3032 return FALSE;
3033 }
3034
3035 /* #:<reloc_op>:<expr> */
3036 /* Record the relocation type. */
3037 inst.reloc.type = ty;
3038 inst.reloc.pc_rel = entry->pc_rel;
3039 }
3040 else
3041 {
3042
3043 if (skip_past_char (&p, '='))
3044 /* =immediate; need to generate the literal in the literal pool. */
3045 inst.gen_lit_pool = 1;
3046
3047 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3048 {
3049 set_syntax_error (_("invalid address"));
3050 return FALSE;
3051 }
3052 }
3053
3054 *str = p;
3055 return TRUE;
3056 }
3057
3058 /* [ */
3059
3060 /* Accept SP and reject ZR */
3061 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3062 if (reg == PARSE_FAIL || isreg32)
3063 {
3064 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3065 return FALSE;
3066 }
3067 operand->addr.base_regno = reg;
3068
3069 /* [Xn */
3070 if (skip_past_comma (&p))
3071 {
3072 /* [Xn, */
3073 operand->addr.preind = 1;
3074
3075 /* Reject SP and accept ZR */
3076 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3077 if (reg != PARSE_FAIL)
3078 {
3079 /* [Xn,Rm */
3080 operand->addr.offset.regno = reg;
3081 operand->addr.offset.is_reg = 1;
3082 /* Shifted index. */
3083 if (skip_past_comma (&p))
3084 {
3085 /* [Xn,Rm, */
3086 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3087 /* Use the diagnostics set in parse_shift, so not set new
3088 error message here. */
3089 return FALSE;
3090 }
3091 /* We only accept:
3092 [base,Xm{,LSL #imm}]
3093 [base,Xm,SXTX {#imm}]
3094 [base,Wm,(S|U)XTW {#imm}] */
3095 if (operand->shifter.kind == AARCH64_MOD_NONE
3096 || operand->shifter.kind == AARCH64_MOD_LSL
3097 || operand->shifter.kind == AARCH64_MOD_SXTX)
3098 {
3099 if (isreg32)
3100 {
3101 set_syntax_error (_("invalid use of 32-bit register offset"));
3102 return FALSE;
3103 }
3104 }
3105 else if (!isreg32)
3106 {
3107 set_syntax_error (_("invalid use of 64-bit register offset"));
3108 return FALSE;
3109 }
3110 }
3111 else
3112 {
3113 /* [Xn,#:<reloc_op>:<symbol> */
3114 skip_past_char (&p, '#');
3115 if (reloc && skip_past_char (&p, ':'))
3116 {
3117 struct reloc_table_entry *entry;
3118
3119 /* Try to parse a relocation modifier. Anything else is
3120 an error. */
3121 if (!(entry = find_reloc_table_entry (&p)))
3122 {
3123 set_syntax_error (_("unknown relocation modifier"));
3124 return FALSE;
3125 }
3126
3127 if (entry->ldst_type == 0)
3128 {
3129 set_syntax_error
3130 (_("this relocation modifier is not allowed on this "
3131 "instruction"));
3132 return FALSE;
3133 }
3134
3135 /* [Xn,#:<reloc_op>: */
3136 /* We now have the group relocation table entry corresponding to
3137 the name in the assembler source. Next, we parse the
3138 expression. */
3139 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3140 {
3141 set_syntax_error (_("invalid relocation expression"));
3142 return FALSE;
3143 }
3144
3145 /* [Xn,#:<reloc_op>:<expr> */
3146 /* Record the load/store relocation type. */
3147 inst.reloc.type = entry->ldst_type;
3148 inst.reloc.pc_rel = entry->pc_rel;
3149 }
3150 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3151 {
3152 set_syntax_error (_("invalid expression in the address"));
3153 return FALSE;
3154 }
3155 /* [Xn,<expr> */
3156 }
3157 }
3158
3159 if (! skip_past_char (&p, ']'))
3160 {
3161 set_syntax_error (_("']' expected"));
3162 return FALSE;
3163 }
3164
3165 if (skip_past_char (&p, '!'))
3166 {
3167 if (operand->addr.preind && operand->addr.offset.is_reg)
3168 {
3169 set_syntax_error (_("register offset not allowed in pre-indexed "
3170 "addressing mode"));
3171 return FALSE;
3172 }
3173 /* [Xn]! */
3174 operand->addr.writeback = 1;
3175 }
3176 else if (skip_past_comma (&p))
3177 {
3178 /* [Xn], */
3179 operand->addr.postind = 1;
3180 operand->addr.writeback = 1;
3181
3182 if (operand->addr.preind)
3183 {
3184 set_syntax_error (_("cannot combine pre- and post-indexing"));
3185 return FALSE;
3186 }
3187
3188 if (accept_reg_post_index
3189 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3190 &isregzero)) != PARSE_FAIL)
3191 {
3192 /* [Xn],Xm */
3193 if (isreg32)
3194 {
3195 set_syntax_error (_("invalid 32-bit register offset"));
3196 return FALSE;
3197 }
3198 operand->addr.offset.regno = reg;
3199 operand->addr.offset.is_reg = 1;
3200 }
3201 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3202 {
3203 /* [Xn],#expr */
3204 set_syntax_error (_("invalid expression in the address"));
3205 return FALSE;
3206 }
3207 }
3208
3209 /* If at this point neither .preind nor .postind is set, we have a
3210 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3211 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3212 {
3213 if (operand->addr.writeback)
3214 {
3215 /* Reject [Rn]! */
3216 set_syntax_error (_("missing offset in the pre-indexed address"));
3217 return FALSE;
3218 }
3219 operand->addr.preind = 1;
3220 inst.reloc.exp.X_op = O_constant;
3221 inst.reloc.exp.X_add_number = 0;
3222 }
3223
3224 *str = p;
3225 return TRUE;
3226 }
3227
3228 /* Return TRUE on success; otherwise return FALSE. */
3229 static bfd_boolean
3230 parse_address (char **str, aarch64_opnd_info *operand,
3231 int accept_reg_post_index)
3232 {
3233 return parse_address_main (str, operand, 0, accept_reg_post_index);
3234 }
3235
3236 /* Return TRUE on success; otherwise return FALSE. */
3237 static bfd_boolean
3238 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3239 {
3240 return parse_address_main (str, operand, 1, 0);
3241 }
3242
3243 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3244 Return TRUE on success; otherwise return FALSE. */
3245 static bfd_boolean
3246 parse_half (char **str, int *internal_fixup_p)
3247 {
3248 char *p, *saved;
3249 int dummy;
3250
3251 p = *str;
3252 skip_past_char (&p, '#');
3253
3254 gas_assert (internal_fixup_p);
3255 *internal_fixup_p = 0;
3256
3257 if (*p == ':')
3258 {
3259 struct reloc_table_entry *entry;
3260
3261 /* Try to parse a relocation. Anything else is an error. */
3262 ++p;
3263 if (!(entry = find_reloc_table_entry (&p)))
3264 {
3265 set_syntax_error (_("unknown relocation modifier"));
3266 return FALSE;
3267 }
3268
3269 if (entry->movw_type == 0)
3270 {
3271 set_syntax_error
3272 (_("this relocation modifier is not allowed on this instruction"));
3273 return FALSE;
3274 }
3275
3276 inst.reloc.type = entry->movw_type;
3277 }
3278 else
3279 *internal_fixup_p = 1;
3280
3281 /* Avoid parsing a register as a general symbol. */
3282 saved = p;
3283 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3284 return FALSE;
3285 p = saved;
3286
3287 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3288 return FALSE;
3289
3290 *str = p;
3291 return TRUE;
3292 }
3293
3294 /* Parse an operand for an ADRP instruction:
3295 ADRP <Xd>, <label>
3296 Return TRUE on success; otherwise return FALSE. */
3297
3298 static bfd_boolean
3299 parse_adrp (char **str)
3300 {
3301 char *p;
3302
3303 p = *str;
3304 if (*p == ':')
3305 {
3306 struct reloc_table_entry *entry;
3307
3308 /* Try to parse a relocation. Anything else is an error. */
3309 ++p;
3310 if (!(entry = find_reloc_table_entry (&p)))
3311 {
3312 set_syntax_error (_("unknown relocation modifier"));
3313 return FALSE;
3314 }
3315
3316 if (entry->adrp_type == 0)
3317 {
3318 set_syntax_error
3319 (_("this relocation modifier is not allowed on this instruction"));
3320 return FALSE;
3321 }
3322
3323 inst.reloc.type = entry->adrp_type;
3324 }
3325 else
3326 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3327
3328 inst.reloc.pc_rel = 1;
3329
3330 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3331 return FALSE;
3332
3333 *str = p;
3334 return TRUE;
3335 }
3336
3337 /* Miscellaneous. */
3338
3339 /* Parse an option for a preload instruction. Returns the encoding for the
3340 option, or PARSE_FAIL. */
3341
3342 static int
3343 parse_pldop (char **str)
3344 {
3345 char *p, *q;
3346 const struct aarch64_name_value_pair *o;
3347
3348 p = q = *str;
3349 while (ISALNUM (*q))
3350 q++;
3351
3352 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3353 if (!o)
3354 return PARSE_FAIL;
3355
3356 *str = q;
3357 return o->value;
3358 }
3359
3360 /* Parse an option for a barrier instruction. Returns the encoding for the
3361 option, or PARSE_FAIL. */
3362
3363 static int
3364 parse_barrier (char **str)
3365 {
3366 char *p, *q;
3367 const asm_barrier_opt *o;
3368
3369 p = q = *str;
3370 while (ISALPHA (*q))
3371 q++;
3372
3373 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3374 if (!o)
3375 return PARSE_FAIL;
3376
3377 *str = q;
3378 return o->value;
3379 }
3380
3381 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3382 Returns the encoding for the option, or PARSE_FAIL.
3383
3384 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3385 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3386
3387 static int
3388 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3389 {
3390 char *p, *q;
3391 char buf[32];
3392 const aarch64_sys_reg *o;
3393 int value;
3394
3395 p = buf;
3396 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3397 if (p < buf + 31)
3398 *p++ = TOLOWER (*q);
3399 *p = '\0';
3400 /* Assert that BUF be large enough. */
3401 gas_assert (p - buf == q - *str);
3402
3403 o = hash_find (sys_regs, buf);
3404 if (!o)
3405 {
3406 if (!imple_defined_p)
3407 return PARSE_FAIL;
3408 else
3409 {
3410 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3411 unsigned int op0, op1, cn, cm, op2;
3412
3413 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3414 != 5)
3415 return PARSE_FAIL;
3416 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3417 return PARSE_FAIL;
3418 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3419 }
3420 }
3421 else
3422 {
3423 if (aarch64_sys_reg_deprecated_p (o))
3424 as_warn (_("system register name '%s' is deprecated and may be "
3425 "removed in a future release"), buf);
3426 value = o->value;
3427 }
3428
3429 *str = q;
3430 return value;
3431 }
3432
3433 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3434 for the option, or NULL. */
3435
3436 static const aarch64_sys_ins_reg *
3437 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3438 {
3439 char *p, *q;
3440 char buf[32];
3441 const aarch64_sys_ins_reg *o;
3442
3443 p = buf;
3444 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3445 if (p < buf + 31)
3446 *p++ = TOLOWER (*q);
3447 *p = '\0';
3448
3449 o = hash_find (sys_ins_regs, buf);
3450 if (!o)
3451 return NULL;
3452
3453 *str = q;
3454 return o;
3455 }
3456 \f
3457 #define po_char_or_fail(chr) do { \
3458 if (! skip_past_char (&str, chr)) \
3459 goto failure; \
3460 } while (0)
3461
3462 #define po_reg_or_fail(regtype) do { \
3463 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3464 if (val == PARSE_FAIL) \
3465 { \
3466 set_default_error (); \
3467 goto failure; \
3468 } \
3469 } while (0)
3470
3471 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3472 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3473 &isreg32, &isregzero); \
3474 if (val == PARSE_FAIL) \
3475 { \
3476 set_default_error (); \
3477 goto failure; \
3478 } \
3479 info->reg.regno = val; \
3480 if (isreg32) \
3481 info->qualifier = AARCH64_OPND_QLF_W; \
3482 else \
3483 info->qualifier = AARCH64_OPND_QLF_X; \
3484 } while (0)
3485
3486 #define po_imm_nc_or_fail() do { \
3487 if (! parse_constant_immediate (&str, &val)) \
3488 goto failure; \
3489 } while (0)
3490
3491 #define po_imm_or_fail(min, max) do { \
3492 if (! parse_constant_immediate (&str, &val)) \
3493 goto failure; \
3494 if (val < min || val > max) \
3495 { \
3496 set_fatal_syntax_error (_("immediate value out of range "\
3497 #min " to "#max)); \
3498 goto failure; \
3499 } \
3500 } while (0)
3501
3502 #define po_misc_or_fail(expr) do { \
3503 if (!expr) \
3504 goto failure; \
3505 } while (0)
3506 \f
3507 /* encode the 12-bit imm field of Add/sub immediate */
3508 static inline uint32_t
3509 encode_addsub_imm (uint32_t imm)
3510 {
3511 return imm << 10;
3512 }
3513
3514 /* encode the shift amount field of Add/sub immediate */
3515 static inline uint32_t
3516 encode_addsub_imm_shift_amount (uint32_t cnt)
3517 {
3518 return cnt << 22;
3519 }
3520
3521
3522 /* encode the imm field of Adr instruction */
3523 static inline uint32_t
3524 encode_adr_imm (uint32_t imm)
3525 {
3526 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3527 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3528 }
3529
3530 /* encode the immediate field of Move wide immediate */
3531 static inline uint32_t
3532 encode_movw_imm (uint32_t imm)
3533 {
3534 return imm << 5;
3535 }
3536
3537 /* encode the 26-bit offset of unconditional branch */
3538 static inline uint32_t
3539 encode_branch_ofs_26 (uint32_t ofs)
3540 {
3541 return ofs & ((1 << 26) - 1);
3542 }
3543
3544 /* encode the 19-bit offset of conditional branch and compare & branch */
3545 static inline uint32_t
3546 encode_cond_branch_ofs_19 (uint32_t ofs)
3547 {
3548 return (ofs & ((1 << 19) - 1)) << 5;
3549 }
3550
3551 /* encode the 19-bit offset of ld literal */
3552 static inline uint32_t
3553 encode_ld_lit_ofs_19 (uint32_t ofs)
3554 {
3555 return (ofs & ((1 << 19) - 1)) << 5;
3556 }
3557
3558 /* Encode the 14-bit offset of test & branch. */
3559 static inline uint32_t
3560 encode_tst_branch_ofs_14 (uint32_t ofs)
3561 {
3562 return (ofs & ((1 << 14) - 1)) << 5;
3563 }
3564
3565 /* Encode the 16-bit imm field of svc/hvc/smc. */
3566 static inline uint32_t
3567 encode_svc_imm (uint32_t imm)
3568 {
3569 return imm << 5;
3570 }
3571
3572 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3573 static inline uint32_t
3574 reencode_addsub_switch_add_sub (uint32_t opcode)
3575 {
3576 return opcode ^ (1 << 30);
3577 }
3578
3579 static inline uint32_t
3580 reencode_movzn_to_movz (uint32_t opcode)
3581 {
3582 return opcode | (1 << 30);
3583 }
3584
3585 static inline uint32_t
3586 reencode_movzn_to_movn (uint32_t opcode)
3587 {
3588 return opcode & ~(1 << 30);
3589 }
3590
3591 /* Overall per-instruction processing. */
3592
3593 /* We need to be able to fix up arbitrary expressions in some statements.
3594 This is so that we can handle symbols that are an arbitrary distance from
3595 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3596 which returns part of an address in a form which will be valid for
3597 a data instruction. We do this by pushing the expression into a symbol
3598 in the expr_section, and creating a fix for that. */
3599
3600 static fixS *
3601 fix_new_aarch64 (fragS * frag,
3602 int where,
3603 short int size, expressionS * exp, int pc_rel, int reloc)
3604 {
3605 fixS *new_fix;
3606
3607 switch (exp->X_op)
3608 {
3609 case O_constant:
3610 case O_symbol:
3611 case O_add:
3612 case O_subtract:
3613 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3614 break;
3615
3616 default:
3617 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3618 pc_rel, reloc);
3619 break;
3620 }
3621 return new_fix;
3622 }
3623 \f
3624 /* Diagnostics on operands errors. */
3625
3626 /* By default, output verbose error message.
3627 Disable the verbose error message by -mno-verbose-error. */
3628 static int verbose_error_p = 1;
3629
3630 #ifdef DEBUG_AARCH64
3631 /* N.B. this is only for the purpose of debugging. */
3632 const char* operand_mismatch_kind_names[] =
3633 {
3634 "AARCH64_OPDE_NIL",
3635 "AARCH64_OPDE_RECOVERABLE",
3636 "AARCH64_OPDE_SYNTAX_ERROR",
3637 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3638 "AARCH64_OPDE_INVALID_VARIANT",
3639 "AARCH64_OPDE_OUT_OF_RANGE",
3640 "AARCH64_OPDE_UNALIGNED",
3641 "AARCH64_OPDE_REG_LIST",
3642 "AARCH64_OPDE_OTHER_ERROR",
3643 };
3644 #endif /* DEBUG_AARCH64 */
3645
3646 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3647
3648 When multiple errors of different kinds are found in the same assembly
3649 line, only the error of the highest severity will be picked up for
3650 issuing the diagnostics. */
3651
3652 static inline bfd_boolean
3653 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3654 enum aarch64_operand_error_kind rhs)
3655 {
3656 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3657 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3658 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3659 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3660 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3661 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3662 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3663 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3664 return lhs > rhs;
3665 }
3666
3667 /* Helper routine to get the mnemonic name from the assembly instruction
3668 line; should only be called for the diagnosis purpose, as there is
3669 string copy operation involved, which may affect the runtime
3670 performance if used in elsewhere. */
3671
3672 static const char*
3673 get_mnemonic_name (const char *str)
3674 {
3675 static char mnemonic[32];
3676 char *ptr;
3677
3678 /* Get the first 15 bytes and assume that the full name is included. */
3679 strncpy (mnemonic, str, 31);
3680 mnemonic[31] = '\0';
3681
3682 /* Scan up to the end of the mnemonic, which must end in white space,
3683 '.', or end of string. */
3684 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3685 ;
3686
3687 *ptr = '\0';
3688
3689 /* Append '...' to the truncated long name. */
3690 if (ptr - mnemonic == 31)
3691 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3692
3693 return mnemonic;
3694 }
3695
3696 static void
3697 reset_aarch64_instruction (aarch64_instruction *instruction)
3698 {
3699 memset (instruction, '\0', sizeof (aarch64_instruction));
3700 instruction->reloc.type = BFD_RELOC_UNUSED;
3701 }
3702
3703 /* Data strutures storing one user error in the assembly code related to
3704 operands. */
3705
3706 struct operand_error_record
3707 {
3708 const aarch64_opcode *opcode;
3709 aarch64_operand_error detail;
3710 struct operand_error_record *next;
3711 };
3712
3713 typedef struct operand_error_record operand_error_record;
3714
3715 struct operand_errors
3716 {
3717 operand_error_record *head;
3718 operand_error_record *tail;
3719 };
3720
3721 typedef struct operand_errors operand_errors;
3722
3723 /* Top-level data structure reporting user errors for the current line of
3724 the assembly code.
3725 The way md_assemble works is that all opcodes sharing the same mnemonic
3726 name are iterated to find a match to the assembly line. In this data
3727 structure, each of the such opcodes will have one operand_error_record
3728 allocated and inserted. In other words, excessive errors related with
3729 a single opcode are disregarded. */
3730 operand_errors operand_error_report;
3731
3732 /* Free record nodes. */
3733 static operand_error_record *free_opnd_error_record_nodes = NULL;
3734
3735 /* Initialize the data structure that stores the operand mismatch
3736 information on assembling one line of the assembly code. */
3737 static void
3738 init_operand_error_report (void)
3739 {
3740 if (operand_error_report.head != NULL)
3741 {
3742 gas_assert (operand_error_report.tail != NULL);
3743 operand_error_report.tail->next = free_opnd_error_record_nodes;
3744 free_opnd_error_record_nodes = operand_error_report.head;
3745 operand_error_report.head = NULL;
3746 operand_error_report.tail = NULL;
3747 return;
3748 }
3749 gas_assert (operand_error_report.tail == NULL);
3750 }
3751
3752 /* Return TRUE if some operand error has been recorded during the
3753 parsing of the current assembly line using the opcode *OPCODE;
3754 otherwise return FALSE. */
3755 static inline bfd_boolean
3756 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3757 {
3758 operand_error_record *record = operand_error_report.head;
3759 return record && record->opcode == opcode;
3760 }
3761
3762 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3763 OPCODE field is initialized with OPCODE.
3764 N.B. only one record for each opcode, i.e. the maximum of one error is
3765 recorded for each instruction template. */
3766
3767 static void
3768 add_operand_error_record (const operand_error_record* new_record)
3769 {
3770 const aarch64_opcode *opcode = new_record->opcode;
3771 operand_error_record* record = operand_error_report.head;
3772
3773 /* The record may have been created for this opcode. If not, we need
3774 to prepare one. */
3775 if (! opcode_has_operand_error_p (opcode))
3776 {
3777 /* Get one empty record. */
3778 if (free_opnd_error_record_nodes == NULL)
3779 {
3780 record = xmalloc (sizeof (operand_error_record));
3781 if (record == NULL)
3782 abort ();
3783 }
3784 else
3785 {
3786 record = free_opnd_error_record_nodes;
3787 free_opnd_error_record_nodes = record->next;
3788 }
3789 record->opcode = opcode;
3790 /* Insert at the head. */
3791 record->next = operand_error_report.head;
3792 operand_error_report.head = record;
3793 if (operand_error_report.tail == NULL)
3794 operand_error_report.tail = record;
3795 }
3796 else if (record->detail.kind != AARCH64_OPDE_NIL
3797 && record->detail.index <= new_record->detail.index
3798 && operand_error_higher_severity_p (record->detail.kind,
3799 new_record->detail.kind))
3800 {
3801 /* In the case of multiple errors found on operands related with a
3802 single opcode, only record the error of the leftmost operand and
3803 only if the error is of higher severity. */
3804 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3805 " the existing error %s on operand %d",
3806 operand_mismatch_kind_names[new_record->detail.kind],
3807 new_record->detail.index,
3808 operand_mismatch_kind_names[record->detail.kind],
3809 record->detail.index);
3810 return;
3811 }
3812
3813 record->detail = new_record->detail;
3814 }
3815
3816 static inline void
3817 record_operand_error_info (const aarch64_opcode *opcode,
3818 aarch64_operand_error *error_info)
3819 {
3820 operand_error_record record;
3821 record.opcode = opcode;
3822 record.detail = *error_info;
3823 add_operand_error_record (&record);
3824 }
3825
3826 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3827 error message *ERROR, for operand IDX (count from 0). */
3828
3829 static void
3830 record_operand_error (const aarch64_opcode *opcode, int idx,
3831 enum aarch64_operand_error_kind kind,
3832 const char* error)
3833 {
3834 aarch64_operand_error info;
3835 memset(&info, 0, sizeof (info));
3836 info.index = idx;
3837 info.kind = kind;
3838 info.error = error;
3839 record_operand_error_info (opcode, &info);
3840 }
3841
3842 static void
3843 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3844 enum aarch64_operand_error_kind kind,
3845 const char* error, const int *extra_data)
3846 {
3847 aarch64_operand_error info;
3848 info.index = idx;
3849 info.kind = kind;
3850 info.error = error;
3851 info.data[0] = extra_data[0];
3852 info.data[1] = extra_data[1];
3853 info.data[2] = extra_data[2];
3854 record_operand_error_info (opcode, &info);
3855 }
3856
3857 static void
3858 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3859 const char* error, int lower_bound,
3860 int upper_bound)
3861 {
3862 int data[3] = {lower_bound, upper_bound, 0};
3863 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3864 error, data);
3865 }
3866
3867 /* Remove the operand error record for *OPCODE. */
3868 static void ATTRIBUTE_UNUSED
3869 remove_operand_error_record (const aarch64_opcode *opcode)
3870 {
3871 if (opcode_has_operand_error_p (opcode))
3872 {
3873 operand_error_record* record = operand_error_report.head;
3874 gas_assert (record != NULL && operand_error_report.tail != NULL);
3875 operand_error_report.head = record->next;
3876 record->next = free_opnd_error_record_nodes;
3877 free_opnd_error_record_nodes = record;
3878 if (operand_error_report.head == NULL)
3879 {
3880 gas_assert (operand_error_report.tail == record);
3881 operand_error_report.tail = NULL;
3882 }
3883 }
3884 }
3885
3886 /* Given the instruction in *INSTR, return the index of the best matched
3887 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3888
3889 Return -1 if there is no qualifier sequence; return the first match
3890 if there is multiple matches found. */
3891
3892 static int
3893 find_best_match (const aarch64_inst *instr,
3894 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3895 {
3896 int i, num_opnds, max_num_matched, idx;
3897
3898 num_opnds = aarch64_num_of_operands (instr->opcode);
3899 if (num_opnds == 0)
3900 {
3901 DEBUG_TRACE ("no operand");
3902 return -1;
3903 }
3904
3905 max_num_matched = 0;
3906 idx = -1;
3907
3908 /* For each pattern. */
3909 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3910 {
3911 int j, num_matched;
3912 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3913
3914 /* Most opcodes has much fewer patterns in the list. */
3915 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3916 {
3917 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3918 if (i != 0 && idx == -1)
3919 /* If nothing has been matched, return the 1st sequence. */
3920 idx = 0;
3921 break;
3922 }
3923
3924 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3925 if (*qualifiers == instr->operands[j].qualifier)
3926 ++num_matched;
3927
3928 if (num_matched > max_num_matched)
3929 {
3930 max_num_matched = num_matched;
3931 idx = i;
3932 }
3933 }
3934
3935 DEBUG_TRACE ("return with %d", idx);
3936 return idx;
3937 }
3938
3939 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3940 corresponding operands in *INSTR. */
3941
3942 static inline void
3943 assign_qualifier_sequence (aarch64_inst *instr,
3944 const aarch64_opnd_qualifier_t *qualifiers)
3945 {
3946 int i = 0;
3947 int num_opnds = aarch64_num_of_operands (instr->opcode);
3948 gas_assert (num_opnds);
3949 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3950 instr->operands[i].qualifier = *qualifiers;
3951 }
3952
3953 /* Print operands for the diagnosis purpose. */
3954
3955 static void
3956 print_operands (char *buf, const aarch64_opcode *opcode,
3957 const aarch64_opnd_info *opnds)
3958 {
3959 int i;
3960
3961 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3962 {
3963 const size_t size = 128;
3964 char str[size];
3965
3966 /* We regard the opcode operand info more, however we also look into
3967 the inst->operands to support the disassembling of the optional
3968 operand.
3969 The two operand code should be the same in all cases, apart from
3970 when the operand can be optional. */
3971 if (opcode->operands[i] == AARCH64_OPND_NIL
3972 || opnds[i].type == AARCH64_OPND_NIL)
3973 break;
3974
3975 /* Generate the operand string in STR. */
3976 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3977
3978 /* Delimiter. */
3979 if (str[0] != '\0')
3980 strcat (buf, i == 0 ? " " : ",");
3981
3982 /* Append the operand string. */
3983 strcat (buf, str);
3984 }
3985 }
3986
3987 /* Send to stderr a string as information. */
3988
3989 static void
3990 output_info (const char *format, ...)
3991 {
3992 char *file;
3993 unsigned int line;
3994 va_list args;
3995
3996 as_where (&file, &line);
3997 if (file)
3998 {
3999 if (line != 0)
4000 fprintf (stderr, "%s:%u: ", file, line);
4001 else
4002 fprintf (stderr, "%s: ", file);
4003 }
4004 fprintf (stderr, _("Info: "));
4005 va_start (args, format);
4006 vfprintf (stderr, format, args);
4007 va_end (args);
4008 (void) putc ('\n', stderr);
4009 }
4010
4011 /* Output one operand error record. */
4012
4013 static void
4014 output_operand_error_record (const operand_error_record *record, char *str)
4015 {
4016 const aarch64_operand_error *detail = &record->detail;
4017 int idx = detail->index;
4018 const aarch64_opcode *opcode = record->opcode;
4019 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4020 : AARCH64_OPND_NIL);
4021
4022 switch (detail->kind)
4023 {
4024 case AARCH64_OPDE_NIL:
4025 gas_assert (0);
4026 break;
4027
4028 case AARCH64_OPDE_SYNTAX_ERROR:
4029 case AARCH64_OPDE_RECOVERABLE:
4030 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4031 case AARCH64_OPDE_OTHER_ERROR:
4032 /* Use the prepared error message if there is, otherwise use the
4033 operand description string to describe the error. */
4034 if (detail->error != NULL)
4035 {
4036 if (idx < 0)
4037 as_bad (_("%s -- `%s'"), detail->error, str);
4038 else
4039 as_bad (_("%s at operand %d -- `%s'"),
4040 detail->error, idx + 1, str);
4041 }
4042 else
4043 {
4044 gas_assert (idx >= 0);
4045 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4046 aarch64_get_operand_desc (opd_code), str);
4047 }
4048 break;
4049
4050 case AARCH64_OPDE_INVALID_VARIANT:
4051 as_bad (_("operand mismatch -- `%s'"), str);
4052 if (verbose_error_p)
4053 {
4054 /* We will try to correct the erroneous instruction and also provide
4055 more information e.g. all other valid variants.
4056
4057 The string representation of the corrected instruction and other
4058 valid variants are generated by
4059
4060 1) obtaining the intermediate representation of the erroneous
4061 instruction;
4062 2) manipulating the IR, e.g. replacing the operand qualifier;
4063 3) printing out the instruction by calling the printer functions
4064 shared with the disassembler.
4065
4066 The limitation of this method is that the exact input assembly
4067 line cannot be accurately reproduced in some cases, for example an
4068 optional operand present in the actual assembly line will be
4069 omitted in the output; likewise for the optional syntax rules,
4070 e.g. the # before the immediate. Another limitation is that the
4071 assembly symbols and relocation operations in the assembly line
4072 currently cannot be printed out in the error report. Last but not
4073 least, when there is other error(s) co-exist with this error, the
4074 'corrected' instruction may be still incorrect, e.g. given
4075 'ldnp h0,h1,[x0,#6]!'
4076 this diagnosis will provide the version:
4077 'ldnp s0,s1,[x0,#6]!'
4078 which is still not right. */
4079 size_t len = strlen (get_mnemonic_name (str));
4080 int i, qlf_idx;
4081 bfd_boolean result;
4082 const size_t size = 2048;
4083 char buf[size];
4084 aarch64_inst *inst_base = &inst.base;
4085 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4086
4087 /* Init inst. */
4088 reset_aarch64_instruction (&inst);
4089 inst_base->opcode = opcode;
4090
4091 /* Reset the error report so that there is no side effect on the
4092 following operand parsing. */
4093 init_operand_error_report ();
4094
4095 /* Fill inst. */
4096 result = parse_operands (str + len, opcode)
4097 && programmer_friendly_fixup (&inst);
4098 gas_assert (result);
4099 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4100 NULL, NULL);
4101 gas_assert (!result);
4102
4103 /* Find the most matched qualifier sequence. */
4104 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4105 gas_assert (qlf_idx > -1);
4106
4107 /* Assign the qualifiers. */
4108 assign_qualifier_sequence (inst_base,
4109 opcode->qualifiers_list[qlf_idx]);
4110
4111 /* Print the hint. */
4112 output_info (_(" did you mean this?"));
4113 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4114 print_operands (buf, opcode, inst_base->operands);
4115 output_info (_(" %s"), buf);
4116
4117 /* Print out other variant(s) if there is any. */
4118 if (qlf_idx != 0 ||
4119 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4120 output_info (_(" other valid variant(s):"));
4121
4122 /* For each pattern. */
4123 qualifiers_list = opcode->qualifiers_list;
4124 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4125 {
4126 /* Most opcodes has much fewer patterns in the list.
4127 First NIL qualifier indicates the end in the list. */
4128 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4129 break;
4130
4131 if (i != qlf_idx)
4132 {
4133 /* Mnemonics name. */
4134 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4135
4136 /* Assign the qualifiers. */
4137 assign_qualifier_sequence (inst_base, *qualifiers_list);
4138
4139 /* Print instruction. */
4140 print_operands (buf, opcode, inst_base->operands);
4141
4142 output_info (_(" %s"), buf);
4143 }
4144 }
4145 }
4146 break;
4147
4148 case AARCH64_OPDE_OUT_OF_RANGE:
4149 if (detail->data[0] != detail->data[1])
4150 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4151 detail->error ? detail->error : _("immediate value"),
4152 detail->data[0], detail->data[1], idx + 1, str);
4153 else
4154 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4155 detail->error ? detail->error : _("immediate value"),
4156 detail->data[0], idx + 1, str);
4157 break;
4158
4159 case AARCH64_OPDE_REG_LIST:
4160 if (detail->data[0] == 1)
4161 as_bad (_("invalid number of registers in the list; "
4162 "only 1 register is expected at operand %d -- `%s'"),
4163 idx + 1, str);
4164 else
4165 as_bad (_("invalid number of registers in the list; "
4166 "%d registers are expected at operand %d -- `%s'"),
4167 detail->data[0], idx + 1, str);
4168 break;
4169
4170 case AARCH64_OPDE_UNALIGNED:
4171 as_bad (_("immediate value should be a multiple of "
4172 "%d at operand %d -- `%s'"),
4173 detail->data[0], idx + 1, str);
4174 break;
4175
4176 default:
4177 gas_assert (0);
4178 break;
4179 }
4180 }
4181
4182 /* Process and output the error message about the operand mismatching.
4183
4184 When this function is called, the operand error information had
4185 been collected for an assembly line and there will be multiple
4186 errors in the case of mulitple instruction templates; output the
4187 error message that most closely describes the problem. */
4188
4189 static void
4190 output_operand_error_report (char *str)
4191 {
4192 int largest_error_pos;
4193 const char *msg = NULL;
4194 enum aarch64_operand_error_kind kind;
4195 operand_error_record *curr;
4196 operand_error_record *head = operand_error_report.head;
4197 operand_error_record *record = NULL;
4198
4199 /* No error to report. */
4200 if (head == NULL)
4201 return;
4202
4203 gas_assert (head != NULL && operand_error_report.tail != NULL);
4204
4205 /* Only one error. */
4206 if (head == operand_error_report.tail)
4207 {
4208 DEBUG_TRACE ("single opcode entry with error kind: %s",
4209 operand_mismatch_kind_names[head->detail.kind]);
4210 output_operand_error_record (head, str);
4211 return;
4212 }
4213
4214 /* Find the error kind of the highest severity. */
4215 DEBUG_TRACE ("multiple opcode entres with error kind");
4216 kind = AARCH64_OPDE_NIL;
4217 for (curr = head; curr != NULL; curr = curr->next)
4218 {
4219 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4220 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4221 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4222 kind = curr->detail.kind;
4223 }
4224 gas_assert (kind != AARCH64_OPDE_NIL);
4225
4226 /* Pick up one of errors of KIND to report. */
4227 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4228 for (curr = head; curr != NULL; curr = curr->next)
4229 {
4230 if (curr->detail.kind != kind)
4231 continue;
4232 /* If there are multiple errors, pick up the one with the highest
4233 mismatching operand index. In the case of multiple errors with
4234 the equally highest operand index, pick up the first one or the
4235 first one with non-NULL error message. */
4236 if (curr->detail.index > largest_error_pos
4237 || (curr->detail.index == largest_error_pos && msg == NULL
4238 && curr->detail.error != NULL))
4239 {
4240 largest_error_pos = curr->detail.index;
4241 record = curr;
4242 msg = record->detail.error;
4243 }
4244 }
4245
4246 gas_assert (largest_error_pos != -2 && record != NULL);
4247 DEBUG_TRACE ("Pick up error kind %s to report",
4248 operand_mismatch_kind_names[record->detail.kind]);
4249
4250 /* Output. */
4251 output_operand_error_record (record, str);
4252 }
4253 \f
4254 /* Write an AARCH64 instruction to buf - always little-endian. */
4255 static void
4256 put_aarch64_insn (char *buf, uint32_t insn)
4257 {
4258 unsigned char *where = (unsigned char *) buf;
4259 where[0] = insn;
4260 where[1] = insn >> 8;
4261 where[2] = insn >> 16;
4262 where[3] = insn >> 24;
4263 }
4264
4265 static uint32_t
4266 get_aarch64_insn (char *buf)
4267 {
4268 unsigned char *where = (unsigned char *) buf;
4269 uint32_t result;
4270 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4271 return result;
4272 }
4273
4274 static void
4275 output_inst (struct aarch64_inst *new_inst)
4276 {
4277 char *to = NULL;
4278
4279 to = frag_more (INSN_SIZE);
4280
4281 frag_now->tc_frag_data.recorded = 1;
4282
4283 put_aarch64_insn (to, inst.base.value);
4284
4285 if (inst.reloc.type != BFD_RELOC_UNUSED)
4286 {
4287 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4288 INSN_SIZE, &inst.reloc.exp,
4289 inst.reloc.pc_rel,
4290 inst.reloc.type);
4291 DEBUG_TRACE ("Prepared relocation fix up");
4292 /* Don't check the addend value against the instruction size,
4293 that's the job of our code in md_apply_fix(). */
4294 fixp->fx_no_overflow = 1;
4295 if (new_inst != NULL)
4296 fixp->tc_fix_data.inst = new_inst;
4297 if (aarch64_gas_internal_fixup_p ())
4298 {
4299 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4300 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4301 fixp->fx_addnumber = inst.reloc.flags;
4302 }
4303 }
4304
4305 dwarf2_emit_insn (INSN_SIZE);
4306 }
4307
4308 /* Link together opcodes of the same name. */
4309
4310 struct templates
4311 {
4312 aarch64_opcode *opcode;
4313 struct templates *next;
4314 };
4315
4316 typedef struct templates templates;
4317
4318 static templates *
4319 lookup_mnemonic (const char *start, int len)
4320 {
4321 templates *templ = NULL;
4322
4323 templ = hash_find_n (aarch64_ops_hsh, start, len);
4324 return templ;
4325 }
4326
4327 /* Subroutine of md_assemble, responsible for looking up the primary
4328 opcode from the mnemonic the user wrote. STR points to the
4329 beginning of the mnemonic. */
4330
4331 static templates *
4332 opcode_lookup (char **str)
4333 {
4334 char *end, *base;
4335 const aarch64_cond *cond;
4336 char condname[16];
4337 int len;
4338
4339 /* Scan up to the end of the mnemonic, which must end in white space,
4340 '.', or end of string. */
4341 for (base = end = *str; is_part_of_name(*end); end++)
4342 if (*end == '.')
4343 break;
4344
4345 if (end == base)
4346 return 0;
4347
4348 inst.cond = COND_ALWAYS;
4349
4350 /* Handle a possible condition. */
4351 if (end[0] == '.')
4352 {
4353 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4354 if (cond)
4355 {
4356 inst.cond = cond->value;
4357 *str = end + 3;
4358 }
4359 else
4360 {
4361 *str = end;
4362 return 0;
4363 }
4364 }
4365 else
4366 *str = end;
4367
4368 len = end - base;
4369
4370 if (inst.cond == COND_ALWAYS)
4371 {
4372 /* Look for unaffixed mnemonic. */
4373 return lookup_mnemonic (base, len);
4374 }
4375 else if (len <= 13)
4376 {
4377 /* append ".c" to mnemonic if conditional */
4378 memcpy (condname, base, len);
4379 memcpy (condname + len, ".c", 2);
4380 base = condname;
4381 len += 2;
4382 return lookup_mnemonic (base, len);
4383 }
4384
4385 return NULL;
4386 }
4387
4388 /* Internal helper routine converting a vector neon_type_el structure
4389 *VECTYPE to a corresponding operand qualifier. */
4390
4391 static inline aarch64_opnd_qualifier_t
4392 vectype_to_qualifier (const struct neon_type_el *vectype)
4393 {
4394 /* Element size in bytes indexed by neon_el_type. */
4395 const unsigned char ele_size[5]
4396 = {1, 2, 4, 8, 16};
4397
4398 if (!vectype->defined || vectype->type == NT_invtype)
4399 goto vectype_conversion_fail;
4400
4401 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4402
4403 if (vectype->defined & NTA_HASINDEX)
4404 /* Vector element register. */
4405 return AARCH64_OPND_QLF_S_B + vectype->type;
4406 else
4407 {
4408 /* Vector register. */
4409 int reg_size = ele_size[vectype->type] * vectype->width;
4410 unsigned offset;
4411 if (reg_size != 16 && reg_size != 8)
4412 goto vectype_conversion_fail;
4413 /* The conversion is calculated based on the relation of the order of
4414 qualifiers to the vector element size and vector register size. */
4415 offset = (vectype->type == NT_q)
4416 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4417 gas_assert (offset <= 8);
4418 return AARCH64_OPND_QLF_V_8B + offset;
4419 }
4420
4421 vectype_conversion_fail:
4422 first_error (_("bad vector arrangement type"));
4423 return AARCH64_OPND_QLF_NIL;
4424 }
4425
4426 /* Process an optional operand that is found omitted from the assembly line.
4427 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4428 instruction's opcode entry while IDX is the index of this omitted operand.
4429 */
4430
4431 static void
4432 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4433 int idx, aarch64_opnd_info *operand)
4434 {
4435 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4436 gas_assert (optional_operand_p (opcode, idx));
4437 gas_assert (!operand->present);
4438
4439 switch (type)
4440 {
4441 case AARCH64_OPND_Rd:
4442 case AARCH64_OPND_Rn:
4443 case AARCH64_OPND_Rm:
4444 case AARCH64_OPND_Rt:
4445 case AARCH64_OPND_Rt2:
4446 case AARCH64_OPND_Rs:
4447 case AARCH64_OPND_Ra:
4448 case AARCH64_OPND_Rt_SYS:
4449 case AARCH64_OPND_Rd_SP:
4450 case AARCH64_OPND_Rn_SP:
4451 case AARCH64_OPND_Fd:
4452 case AARCH64_OPND_Fn:
4453 case AARCH64_OPND_Fm:
4454 case AARCH64_OPND_Fa:
4455 case AARCH64_OPND_Ft:
4456 case AARCH64_OPND_Ft2:
4457 case AARCH64_OPND_Sd:
4458 case AARCH64_OPND_Sn:
4459 case AARCH64_OPND_Sm:
4460 case AARCH64_OPND_Vd:
4461 case AARCH64_OPND_Vn:
4462 case AARCH64_OPND_Vm:
4463 case AARCH64_OPND_VdD1:
4464 case AARCH64_OPND_VnD1:
4465 operand->reg.regno = default_value;
4466 break;
4467
4468 case AARCH64_OPND_Ed:
4469 case AARCH64_OPND_En:
4470 case AARCH64_OPND_Em:
4471 operand->reglane.regno = default_value;
4472 break;
4473
4474 case AARCH64_OPND_IDX:
4475 case AARCH64_OPND_BIT_NUM:
4476 case AARCH64_OPND_IMMR:
4477 case AARCH64_OPND_IMMS:
4478 case AARCH64_OPND_SHLL_IMM:
4479 case AARCH64_OPND_IMM_VLSL:
4480 case AARCH64_OPND_IMM_VLSR:
4481 case AARCH64_OPND_CCMP_IMM:
4482 case AARCH64_OPND_FBITS:
4483 case AARCH64_OPND_UIMM4:
4484 case AARCH64_OPND_UIMM3_OP1:
4485 case AARCH64_OPND_UIMM3_OP2:
4486 case AARCH64_OPND_IMM:
4487 case AARCH64_OPND_WIDTH:
4488 case AARCH64_OPND_UIMM7:
4489 case AARCH64_OPND_NZCV:
4490 operand->imm.value = default_value;
4491 break;
4492
4493 case AARCH64_OPND_EXCEPTION:
4494 inst.reloc.type = BFD_RELOC_UNUSED;
4495 break;
4496
4497 case AARCH64_OPND_BARRIER_ISB:
4498 operand->barrier = aarch64_barrier_options + default_value;
4499
4500 default:
4501 break;
4502 }
4503 }
4504
4505 /* Process the relocation type for move wide instructions.
4506 Return TRUE on success; otherwise return FALSE. */
4507
4508 static bfd_boolean
4509 process_movw_reloc_info (void)
4510 {
4511 int is32;
4512 unsigned shift;
4513
4514 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4515
4516 if (inst.base.opcode->op == OP_MOVK)
4517 switch (inst.reloc.type)
4518 {
4519 case BFD_RELOC_AARCH64_MOVW_G0_S:
4520 case BFD_RELOC_AARCH64_MOVW_G1_S:
4521 case BFD_RELOC_AARCH64_MOVW_G2_S:
4522 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4523 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4524 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4525 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4526 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4527 set_syntax_error
4528 (_("the specified relocation type is not allowed for MOVK"));
4529 return FALSE;
4530 default:
4531 break;
4532 }
4533
4534 switch (inst.reloc.type)
4535 {
4536 case BFD_RELOC_AARCH64_MOVW_G0:
4537 case BFD_RELOC_AARCH64_MOVW_G0_S:
4538 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4539 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4540 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4541 shift = 0;
4542 break;
4543 case BFD_RELOC_AARCH64_MOVW_G1:
4544 case BFD_RELOC_AARCH64_MOVW_G1_S:
4545 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4546 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4547 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4548 shift = 16;
4549 break;
4550 case BFD_RELOC_AARCH64_MOVW_G2:
4551 case BFD_RELOC_AARCH64_MOVW_G2_S:
4552 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4553 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4554 if (is32)
4555 {
4556 set_fatal_syntax_error
4557 (_("the specified relocation type is not allowed for 32-bit "
4558 "register"));
4559 return FALSE;
4560 }
4561 shift = 32;
4562 break;
4563 case BFD_RELOC_AARCH64_MOVW_G3:
4564 if (is32)
4565 {
4566 set_fatal_syntax_error
4567 (_("the specified relocation type is not allowed for 32-bit "
4568 "register"));
4569 return FALSE;
4570 }
4571 shift = 48;
4572 break;
4573 default:
4574 /* More cases should be added when more MOVW-related relocation types
4575 are supported in GAS. */
4576 gas_assert (aarch64_gas_internal_fixup_p ());
4577 /* The shift amount should have already been set by the parser. */
4578 return TRUE;
4579 }
4580 inst.base.operands[1].shifter.amount = shift;
4581 return TRUE;
4582 }
4583
4584 /* A primitive log caculator. */
4585
4586 static inline unsigned int
4587 get_logsz (unsigned int size)
4588 {
4589 const unsigned char ls[16] =
4590 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4591 if (size > 16)
4592 {
4593 gas_assert (0);
4594 return -1;
4595 }
4596 gas_assert (ls[size - 1] != (unsigned char)-1);
4597 return ls[size - 1];
4598 }
4599
4600 /* Determine and return the real reloc type code for an instruction
4601 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4602
4603 static inline bfd_reloc_code_real_type
4604 ldst_lo12_determine_real_reloc_type (void)
4605 {
4606 int logsz;
4607 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4608 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4609
4610 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4611 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4612 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4613 BFD_RELOC_AARCH64_LDST128_LO12
4614 };
4615
4616 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4617 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4618
4619 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4620 opd1_qlf =
4621 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4622 1, opd0_qlf, 0);
4623 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4624
4625 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4626 gas_assert (logsz >= 0 && logsz <= 4);
4627
4628 return reloc_ldst_lo12[logsz];
4629 }
4630
4631 /* Check whether a register list REGINFO is valid. The registers must be
4632 numbered in increasing order (modulo 32), in increments of one or two.
4633
4634 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4635 increments of two.
4636
4637 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4638
4639 static bfd_boolean
4640 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4641 {
4642 uint32_t i, nb_regs, prev_regno, incr;
4643
4644 nb_regs = 1 + (reginfo & 0x3);
4645 reginfo >>= 2;
4646 prev_regno = reginfo & 0x1f;
4647 incr = accept_alternate ? 2 : 1;
4648
4649 for (i = 1; i < nb_regs; ++i)
4650 {
4651 uint32_t curr_regno;
4652 reginfo >>= 5;
4653 curr_regno = reginfo & 0x1f;
4654 if (curr_regno != ((prev_regno + incr) & 0x1f))
4655 return FALSE;
4656 prev_regno = curr_regno;
4657 }
4658
4659 return TRUE;
4660 }
4661
4662 /* Generic instruction operand parser. This does no encoding and no
4663 semantic validation; it merely squirrels values away in the inst
4664 structure. Returns TRUE or FALSE depending on whether the
4665 specified grammar matched. */
4666
4667 static bfd_boolean
4668 parse_operands (char *str, const aarch64_opcode *opcode)
4669 {
4670 int i;
4671 char *backtrack_pos = 0;
4672 const enum aarch64_opnd *operands = opcode->operands;
4673
4674 clear_error ();
4675 skip_whitespace (str);
4676
4677 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4678 {
4679 int64_t val;
4680 int isreg32, isregzero;
4681 int comma_skipped_p = 0;
4682 aarch64_reg_type rtype;
4683 struct neon_type_el vectype;
4684 aarch64_opnd_info *info = &inst.base.operands[i];
4685
4686 DEBUG_TRACE ("parse operand %d", i);
4687
4688 /* Assign the operand code. */
4689 info->type = operands[i];
4690
4691 if (optional_operand_p (opcode, i))
4692 {
4693 /* Remember where we are in case we need to backtrack. */
4694 gas_assert (!backtrack_pos);
4695 backtrack_pos = str;
4696 }
4697
4698 /* Expect comma between operands; the backtrack mechanizm will take
4699 care of cases of omitted optional operand. */
4700 if (i > 0 && ! skip_past_char (&str, ','))
4701 {
4702 set_syntax_error (_("comma expected between operands"));
4703 goto failure;
4704 }
4705 else
4706 comma_skipped_p = 1;
4707
4708 switch (operands[i])
4709 {
4710 case AARCH64_OPND_Rd:
4711 case AARCH64_OPND_Rn:
4712 case AARCH64_OPND_Rm:
4713 case AARCH64_OPND_Rt:
4714 case AARCH64_OPND_Rt2:
4715 case AARCH64_OPND_Rs:
4716 case AARCH64_OPND_Ra:
4717 case AARCH64_OPND_Rt_SYS:
4718 case AARCH64_OPND_PAIRREG:
4719 po_int_reg_or_fail (1, 0);
4720 break;
4721
4722 case AARCH64_OPND_Rd_SP:
4723 case AARCH64_OPND_Rn_SP:
4724 po_int_reg_or_fail (0, 1);
4725 break;
4726
4727 case AARCH64_OPND_Rm_EXT:
4728 case AARCH64_OPND_Rm_SFT:
4729 po_misc_or_fail (parse_shifter_operand
4730 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4731 ? SHIFTED_ARITH_IMM
4732 : SHIFTED_LOGIC_IMM)));
4733 if (!info->shifter.operator_present)
4734 {
4735 /* Default to LSL if not present. Libopcodes prefers shifter
4736 kind to be explicit. */
4737 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4738 info->shifter.kind = AARCH64_MOD_LSL;
4739 /* For Rm_EXT, libopcodes will carry out further check on whether
4740 or not stack pointer is used in the instruction (Recall that
4741 "the extend operator is not optional unless at least one of
4742 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4743 }
4744 break;
4745
4746 case AARCH64_OPND_Fd:
4747 case AARCH64_OPND_Fn:
4748 case AARCH64_OPND_Fm:
4749 case AARCH64_OPND_Fa:
4750 case AARCH64_OPND_Ft:
4751 case AARCH64_OPND_Ft2:
4752 case AARCH64_OPND_Sd:
4753 case AARCH64_OPND_Sn:
4754 case AARCH64_OPND_Sm:
4755 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4756 if (val == PARSE_FAIL)
4757 {
4758 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4759 goto failure;
4760 }
4761 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4762
4763 info->reg.regno = val;
4764 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4765 break;
4766
4767 case AARCH64_OPND_Vd:
4768 case AARCH64_OPND_Vn:
4769 case AARCH64_OPND_Vm:
4770 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4771 if (val == PARSE_FAIL)
4772 {
4773 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4774 goto failure;
4775 }
4776 if (vectype.defined & NTA_HASINDEX)
4777 goto failure;
4778
4779 info->reg.regno = val;
4780 info->qualifier = vectype_to_qualifier (&vectype);
4781 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4782 goto failure;
4783 break;
4784
4785 case AARCH64_OPND_VdD1:
4786 case AARCH64_OPND_VnD1:
4787 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4788 if (val == PARSE_FAIL)
4789 {
4790 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4791 goto failure;
4792 }
4793 if (vectype.type != NT_d || vectype.index != 1)
4794 {
4795 set_fatal_syntax_error
4796 (_("the top half of a 128-bit FP/SIMD register is expected"));
4797 goto failure;
4798 }
4799 info->reg.regno = val;
4800 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4801 here; it is correct for the purpose of encoding/decoding since
4802 only the register number is explicitly encoded in the related
4803 instructions, although this appears a bit hacky. */
4804 info->qualifier = AARCH64_OPND_QLF_S_D;
4805 break;
4806
4807 case AARCH64_OPND_Ed:
4808 case AARCH64_OPND_En:
4809 case AARCH64_OPND_Em:
4810 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4811 if (val == PARSE_FAIL)
4812 {
4813 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4814 goto failure;
4815 }
4816 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4817 goto failure;
4818
4819 info->reglane.regno = val;
4820 info->reglane.index = vectype.index;
4821 info->qualifier = vectype_to_qualifier (&vectype);
4822 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4823 goto failure;
4824 break;
4825
4826 case AARCH64_OPND_LVn:
4827 case AARCH64_OPND_LVt:
4828 case AARCH64_OPND_LVt_AL:
4829 case AARCH64_OPND_LEt:
4830 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4831 goto failure;
4832 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4833 {
4834 set_fatal_syntax_error (_("invalid register list"));
4835 goto failure;
4836 }
4837 info->reglist.first_regno = (val >> 2) & 0x1f;
4838 info->reglist.num_regs = (val & 0x3) + 1;
4839 if (operands[i] == AARCH64_OPND_LEt)
4840 {
4841 if (!(vectype.defined & NTA_HASINDEX))
4842 goto failure;
4843 info->reglist.has_index = 1;
4844 info->reglist.index = vectype.index;
4845 }
4846 else if (!(vectype.defined & NTA_HASTYPE))
4847 goto failure;
4848 info->qualifier = vectype_to_qualifier (&vectype);
4849 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4850 goto failure;
4851 break;
4852
4853 case AARCH64_OPND_Cn:
4854 case AARCH64_OPND_Cm:
4855 po_reg_or_fail (REG_TYPE_CN);
4856 if (val > 15)
4857 {
4858 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4859 goto failure;
4860 }
4861 inst.base.operands[i].reg.regno = val;
4862 break;
4863
4864 case AARCH64_OPND_SHLL_IMM:
4865 case AARCH64_OPND_IMM_VLSR:
4866 po_imm_or_fail (1, 64);
4867 info->imm.value = val;
4868 break;
4869
4870 case AARCH64_OPND_CCMP_IMM:
4871 case AARCH64_OPND_FBITS:
4872 case AARCH64_OPND_UIMM4:
4873 case AARCH64_OPND_UIMM3_OP1:
4874 case AARCH64_OPND_UIMM3_OP2:
4875 case AARCH64_OPND_IMM_VLSL:
4876 case AARCH64_OPND_IMM:
4877 case AARCH64_OPND_WIDTH:
4878 po_imm_nc_or_fail ();
4879 info->imm.value = val;
4880 break;
4881
4882 case AARCH64_OPND_UIMM7:
4883 po_imm_or_fail (0, 127);
4884 info->imm.value = val;
4885 break;
4886
4887 case AARCH64_OPND_IDX:
4888 case AARCH64_OPND_BIT_NUM:
4889 case AARCH64_OPND_IMMR:
4890 case AARCH64_OPND_IMMS:
4891 po_imm_or_fail (0, 63);
4892 info->imm.value = val;
4893 break;
4894
4895 case AARCH64_OPND_IMM0:
4896 po_imm_nc_or_fail ();
4897 if (val != 0)
4898 {
4899 set_fatal_syntax_error (_("immediate zero expected"));
4900 goto failure;
4901 }
4902 info->imm.value = 0;
4903 break;
4904
4905 case AARCH64_OPND_FPIMM0:
4906 {
4907 int qfloat;
4908 bfd_boolean res1 = FALSE, res2 = FALSE;
4909 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4910 it is probably not worth the effort to support it. */
4911 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4912 && !(res2 = parse_constant_immediate (&str, &val)))
4913 goto failure;
4914 if ((res1 && qfloat == 0) || (res2 && val == 0))
4915 {
4916 info->imm.value = 0;
4917 info->imm.is_fp = 1;
4918 break;
4919 }
4920 set_fatal_syntax_error (_("immediate zero expected"));
4921 goto failure;
4922 }
4923
4924 case AARCH64_OPND_IMM_MOV:
4925 {
4926 char *saved = str;
4927 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4928 reg_name_p (str, REG_TYPE_VN))
4929 goto failure;
4930 str = saved;
4931 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4932 GE_OPT_PREFIX, 1));
4933 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4934 later. fix_mov_imm_insn will try to determine a machine
4935 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4936 message if the immediate cannot be moved by a single
4937 instruction. */
4938 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4939 inst.base.operands[i].skip = 1;
4940 }
4941 break;
4942
4943 case AARCH64_OPND_SIMD_IMM:
4944 case AARCH64_OPND_SIMD_IMM_SFT:
4945 if (! parse_big_immediate (&str, &val))
4946 goto failure;
4947 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4948 /* addr_off_p */ 0,
4949 /* need_libopcodes_p */ 1,
4950 /* skip_p */ 1);
4951 /* Parse shift.
4952 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4953 shift, we don't check it here; we leave the checking to
4954 the libopcodes (operand_general_constraint_met_p). By
4955 doing this, we achieve better diagnostics. */
4956 if (skip_past_comma (&str)
4957 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4958 goto failure;
4959 if (!info->shifter.operator_present
4960 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4961 {
4962 /* Default to LSL if not present. Libopcodes prefers shifter
4963 kind to be explicit. */
4964 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4965 info->shifter.kind = AARCH64_MOD_LSL;
4966 }
4967 break;
4968
4969 case AARCH64_OPND_FPIMM:
4970 case AARCH64_OPND_SIMD_FPIMM:
4971 {
4972 int qfloat;
4973 bfd_boolean dp_p
4974 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4975 == 8);
4976 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4977 goto failure;
4978 if (qfloat == 0)
4979 {
4980 set_fatal_syntax_error (_("invalid floating-point constant"));
4981 goto failure;
4982 }
4983 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4984 inst.base.operands[i].imm.is_fp = 1;
4985 }
4986 break;
4987
4988 case AARCH64_OPND_LIMM:
4989 po_misc_or_fail (parse_shifter_operand (&str, info,
4990 SHIFTED_LOGIC_IMM));
4991 if (info->shifter.operator_present)
4992 {
4993 set_fatal_syntax_error
4994 (_("shift not allowed for bitmask immediate"));
4995 goto failure;
4996 }
4997 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4998 /* addr_off_p */ 0,
4999 /* need_libopcodes_p */ 1,
5000 /* skip_p */ 1);
5001 break;
5002
5003 case AARCH64_OPND_AIMM:
5004 if (opcode->op == OP_ADD)
5005 /* ADD may have relocation types. */
5006 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5007 SHIFTED_ARITH_IMM));
5008 else
5009 po_misc_or_fail (parse_shifter_operand (&str, info,
5010 SHIFTED_ARITH_IMM));
5011 switch (inst.reloc.type)
5012 {
5013 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5014 info->shifter.amount = 12;
5015 break;
5016 case BFD_RELOC_UNUSED:
5017 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5018 if (info->shifter.kind != AARCH64_MOD_NONE)
5019 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5020 inst.reloc.pc_rel = 0;
5021 break;
5022 default:
5023 break;
5024 }
5025 info->imm.value = 0;
5026 if (!info->shifter.operator_present)
5027 {
5028 /* Default to LSL if not present. Libopcodes prefers shifter
5029 kind to be explicit. */
5030 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5031 info->shifter.kind = AARCH64_MOD_LSL;
5032 }
5033 break;
5034
5035 case AARCH64_OPND_HALF:
5036 {
5037 /* #<imm16> or relocation. */
5038 int internal_fixup_p;
5039 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5040 if (internal_fixup_p)
5041 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5042 skip_whitespace (str);
5043 if (skip_past_comma (&str))
5044 {
5045 /* {, LSL #<shift>} */
5046 if (! aarch64_gas_internal_fixup_p ())
5047 {
5048 set_fatal_syntax_error (_("can't mix relocation modifier "
5049 "with explicit shift"));
5050 goto failure;
5051 }
5052 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5053 }
5054 else
5055 inst.base.operands[i].shifter.amount = 0;
5056 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5057 inst.base.operands[i].imm.value = 0;
5058 if (! process_movw_reloc_info ())
5059 goto failure;
5060 }
5061 break;
5062
5063 case AARCH64_OPND_EXCEPTION:
5064 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5065 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5066 /* addr_off_p */ 0,
5067 /* need_libopcodes_p */ 0,
5068 /* skip_p */ 1);
5069 break;
5070
5071 case AARCH64_OPND_NZCV:
5072 {
5073 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5074 if (nzcv != NULL)
5075 {
5076 str += 4;
5077 info->imm.value = nzcv->value;
5078 break;
5079 }
5080 po_imm_or_fail (0, 15);
5081 info->imm.value = val;
5082 }
5083 break;
5084
5085 case AARCH64_OPND_COND:
5086 case AARCH64_OPND_COND1:
5087 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5088 str += 2;
5089 if (info->cond == NULL)
5090 {
5091 set_syntax_error (_("invalid condition"));
5092 goto failure;
5093 }
5094 else if (operands[i] == AARCH64_OPND_COND1
5095 && (info->cond->value & 0xe) == 0xe)
5096 {
5097 /* Not allow AL or NV. */
5098 set_default_error ();
5099 goto failure;
5100 }
5101 break;
5102
5103 case AARCH64_OPND_ADDR_ADRP:
5104 po_misc_or_fail (parse_adrp (&str));
5105 /* Clear the value as operand needs to be relocated. */
5106 info->imm.value = 0;
5107 break;
5108
5109 case AARCH64_OPND_ADDR_PCREL14:
5110 case AARCH64_OPND_ADDR_PCREL19:
5111 case AARCH64_OPND_ADDR_PCREL21:
5112 case AARCH64_OPND_ADDR_PCREL26:
5113 po_misc_or_fail (parse_address_reloc (&str, info));
5114 if (!info->addr.pcrel)
5115 {
5116 set_syntax_error (_("invalid pc-relative address"));
5117 goto failure;
5118 }
5119 if (inst.gen_lit_pool
5120 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5121 {
5122 /* Only permit "=value" in the literal load instructions.
5123 The literal will be generated by programmer_friendly_fixup. */
5124 set_syntax_error (_("invalid use of \"=immediate\""));
5125 goto failure;
5126 }
5127 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5128 {
5129 set_syntax_error (_("unrecognized relocation suffix"));
5130 goto failure;
5131 }
5132 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5133 {
5134 info->imm.value = inst.reloc.exp.X_add_number;
5135 inst.reloc.type = BFD_RELOC_UNUSED;
5136 }
5137 else
5138 {
5139 info->imm.value = 0;
5140 if (inst.reloc.type == BFD_RELOC_UNUSED)
5141 switch (opcode->iclass)
5142 {
5143 case compbranch:
5144 case condbranch:
5145 /* e.g. CBZ or B.COND */
5146 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5147 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5148 break;
5149 case testbranch:
5150 /* e.g. TBZ */
5151 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5152 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5153 break;
5154 case branch_imm:
5155 /* e.g. B or BL */
5156 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5157 inst.reloc.type =
5158 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5159 : BFD_RELOC_AARCH64_JUMP26;
5160 break;
5161 case loadlit:
5162 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5163 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5164 break;
5165 case pcreladdr:
5166 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5167 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5168 break;
5169 default:
5170 gas_assert (0);
5171 abort ();
5172 }
5173 inst.reloc.pc_rel = 1;
5174 }
5175 break;
5176
5177 case AARCH64_OPND_ADDR_SIMPLE:
5178 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5179 /* [<Xn|SP>{, #<simm>}] */
5180 po_char_or_fail ('[');
5181 po_reg_or_fail (REG_TYPE_R64_SP);
5182 /* Accept optional ", #0". */
5183 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5184 && skip_past_char (&str, ','))
5185 {
5186 skip_past_char (&str, '#');
5187 if (! skip_past_char (&str, '0'))
5188 {
5189 set_fatal_syntax_error
5190 (_("the optional immediate offset can only be 0"));
5191 goto failure;
5192 }
5193 }
5194 po_char_or_fail (']');
5195 info->addr.base_regno = val;
5196 break;
5197
5198 case AARCH64_OPND_ADDR_REGOFF:
5199 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5200 po_misc_or_fail (parse_address (&str, info, 0));
5201 if (info->addr.pcrel || !info->addr.offset.is_reg
5202 || !info->addr.preind || info->addr.postind
5203 || info->addr.writeback)
5204 {
5205 set_syntax_error (_("invalid addressing mode"));
5206 goto failure;
5207 }
5208 if (!info->shifter.operator_present)
5209 {
5210 /* Default to LSL if not present. Libopcodes prefers shifter
5211 kind to be explicit. */
5212 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5213 info->shifter.kind = AARCH64_MOD_LSL;
5214 }
5215 /* Qualifier to be deduced by libopcodes. */
5216 break;
5217
5218 case AARCH64_OPND_ADDR_SIMM7:
5219 po_misc_or_fail (parse_address (&str, info, 0));
5220 if (info->addr.pcrel || info->addr.offset.is_reg
5221 || (!info->addr.preind && !info->addr.postind))
5222 {
5223 set_syntax_error (_("invalid addressing mode"));
5224 goto failure;
5225 }
5226 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5227 /* addr_off_p */ 1,
5228 /* need_libopcodes_p */ 1,
5229 /* skip_p */ 0);
5230 break;
5231
5232 case AARCH64_OPND_ADDR_SIMM9:
5233 case AARCH64_OPND_ADDR_SIMM9_2:
5234 po_misc_or_fail (parse_address_reloc (&str, info));
5235 if (info->addr.pcrel || info->addr.offset.is_reg
5236 || (!info->addr.preind && !info->addr.postind)
5237 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5238 && info->addr.writeback))
5239 {
5240 set_syntax_error (_("invalid addressing mode"));
5241 goto failure;
5242 }
5243 if (inst.reloc.type != BFD_RELOC_UNUSED)
5244 {
5245 set_syntax_error (_("relocation not allowed"));
5246 goto failure;
5247 }
5248 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5249 /* addr_off_p */ 1,
5250 /* need_libopcodes_p */ 1,
5251 /* skip_p */ 0);
5252 break;
5253
5254 case AARCH64_OPND_ADDR_UIMM12:
5255 po_misc_or_fail (parse_address_reloc (&str, info));
5256 if (info->addr.pcrel || info->addr.offset.is_reg
5257 || !info->addr.preind || info->addr.writeback)
5258 {
5259 set_syntax_error (_("invalid addressing mode"));
5260 goto failure;
5261 }
5262 if (inst.reloc.type == BFD_RELOC_UNUSED)
5263 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5264 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5265 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5266 /* Leave qualifier to be determined by libopcodes. */
5267 break;
5268
5269 case AARCH64_OPND_SIMD_ADDR_POST:
5270 /* [<Xn|SP>], <Xm|#<amount>> */
5271 po_misc_or_fail (parse_address (&str, info, 1));
5272 if (!info->addr.postind || !info->addr.writeback)
5273 {
5274 set_syntax_error (_("invalid addressing mode"));
5275 goto failure;
5276 }
5277 if (!info->addr.offset.is_reg)
5278 {
5279 if (inst.reloc.exp.X_op == O_constant)
5280 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5281 else
5282 {
5283 set_fatal_syntax_error
5284 (_("writeback value should be an immediate constant"));
5285 goto failure;
5286 }
5287 }
5288 /* No qualifier. */
5289 break;
5290
5291 case AARCH64_OPND_SYSREG:
5292 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5293 == PARSE_FAIL)
5294 {
5295 set_syntax_error (_("unknown or missing system register name"));
5296 goto failure;
5297 }
5298 inst.base.operands[i].sysreg = val;
5299 break;
5300
5301 case AARCH64_OPND_PSTATEFIELD:
5302 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5303 == PARSE_FAIL)
5304 {
5305 set_syntax_error (_("unknown or missing PSTATE field name"));
5306 goto failure;
5307 }
5308 inst.base.operands[i].pstatefield = val;
5309 break;
5310
5311 case AARCH64_OPND_SYSREG_IC:
5312 inst.base.operands[i].sysins_op =
5313 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5314 goto sys_reg_ins;
5315 case AARCH64_OPND_SYSREG_DC:
5316 inst.base.operands[i].sysins_op =
5317 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5318 goto sys_reg_ins;
5319 case AARCH64_OPND_SYSREG_AT:
5320 inst.base.operands[i].sysins_op =
5321 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5322 goto sys_reg_ins;
5323 case AARCH64_OPND_SYSREG_TLBI:
5324 inst.base.operands[i].sysins_op =
5325 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5326 sys_reg_ins:
5327 if (inst.base.operands[i].sysins_op == NULL)
5328 {
5329 set_fatal_syntax_error ( _("unknown or missing operation name"));
5330 goto failure;
5331 }
5332 break;
5333
5334 case AARCH64_OPND_BARRIER:
5335 case AARCH64_OPND_BARRIER_ISB:
5336 val = parse_barrier (&str);
5337 if (val != PARSE_FAIL
5338 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5339 {
5340 /* ISB only accepts options name 'sy'. */
5341 set_syntax_error
5342 (_("the specified option is not accepted in ISB"));
5343 /* Turn off backtrack as this optional operand is present. */
5344 backtrack_pos = 0;
5345 goto failure;
5346 }
5347 /* This is an extension to accept a 0..15 immediate. */
5348 if (val == PARSE_FAIL)
5349 po_imm_or_fail (0, 15);
5350 info->barrier = aarch64_barrier_options + val;
5351 break;
5352
5353 case AARCH64_OPND_PRFOP:
5354 val = parse_pldop (&str);
5355 /* This is an extension to accept a 0..31 immediate. */
5356 if (val == PARSE_FAIL)
5357 po_imm_or_fail (0, 31);
5358 inst.base.operands[i].prfop = aarch64_prfops + val;
5359 break;
5360
5361 default:
5362 as_fatal (_("unhandled operand code %d"), operands[i]);
5363 }
5364
5365 /* If we get here, this operand was successfully parsed. */
5366 inst.base.operands[i].present = 1;
5367 continue;
5368
5369 failure:
5370 /* The parse routine should already have set the error, but in case
5371 not, set a default one here. */
5372 if (! error_p ())
5373 set_default_error ();
5374
5375 if (! backtrack_pos)
5376 goto parse_operands_return;
5377
5378 {
5379 /* We reach here because this operand is marked as optional, and
5380 either no operand was supplied or the operand was supplied but it
5381 was syntactically incorrect. In the latter case we report an
5382 error. In the former case we perform a few more checks before
5383 dropping through to the code to insert the default operand. */
5384
5385 char *tmp = backtrack_pos;
5386 char endchar = END_OF_INSN;
5387
5388 if (i != (aarch64_num_of_operands (opcode) - 1))
5389 endchar = ',';
5390 skip_past_char (&tmp, ',');
5391
5392 if (*tmp != endchar)
5393 /* The user has supplied an operand in the wrong format. */
5394 goto parse_operands_return;
5395
5396 /* Make sure there is not a comma before the optional operand.
5397 For example the fifth operand of 'sys' is optional:
5398
5399 sys #0,c0,c0,#0, <--- wrong
5400 sys #0,c0,c0,#0 <--- correct. */
5401 if (comma_skipped_p && i && endchar == END_OF_INSN)
5402 {
5403 set_fatal_syntax_error
5404 (_("unexpected comma before the omitted optional operand"));
5405 goto parse_operands_return;
5406 }
5407 }
5408
5409 /* Reaching here means we are dealing with an optional operand that is
5410 omitted from the assembly line. */
5411 gas_assert (optional_operand_p (opcode, i));
5412 info->present = 0;
5413 process_omitted_operand (operands[i], opcode, i, info);
5414
5415 /* Try again, skipping the optional operand at backtrack_pos. */
5416 str = backtrack_pos;
5417 backtrack_pos = 0;
5418
5419 /* Clear any error record after the omitted optional operand has been
5420 successfully handled. */
5421 clear_error ();
5422 }
5423
5424 /* Check if we have parsed all the operands. */
5425 if (*str != '\0' && ! error_p ())
5426 {
5427 /* Set I to the index of the last present operand; this is
5428 for the purpose of diagnostics. */
5429 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5430 ;
5431 set_fatal_syntax_error
5432 (_("unexpected characters following instruction"));
5433 }
5434
5435 parse_operands_return:
5436
5437 if (error_p ())
5438 {
5439 DEBUG_TRACE ("parsing FAIL: %s - %s",
5440 operand_mismatch_kind_names[get_error_kind ()],
5441 get_error_message ());
5442 /* Record the operand error properly; this is useful when there
5443 are multiple instruction templates for a mnemonic name, so that
5444 later on, we can select the error that most closely describes
5445 the problem. */
5446 record_operand_error (opcode, i, get_error_kind (),
5447 get_error_message ());
5448 return FALSE;
5449 }
5450 else
5451 {
5452 DEBUG_TRACE ("parsing SUCCESS");
5453 return TRUE;
5454 }
5455 }
5456
5457 /* It does some fix-up to provide some programmer friendly feature while
5458 keeping the libopcodes happy, i.e. libopcodes only accepts
5459 the preferred architectural syntax.
5460 Return FALSE if there is any failure; otherwise return TRUE. */
5461
5462 static bfd_boolean
5463 programmer_friendly_fixup (aarch64_instruction *instr)
5464 {
5465 aarch64_inst *base = &instr->base;
5466 const aarch64_opcode *opcode = base->opcode;
5467 enum aarch64_op op = opcode->op;
5468 aarch64_opnd_info *operands = base->operands;
5469
5470 DEBUG_TRACE ("enter");
5471
5472 switch (opcode->iclass)
5473 {
5474 case testbranch:
5475 /* TBNZ Xn|Wn, #uimm6, label
5476 Test and Branch Not Zero: conditionally jumps to label if bit number
5477 uimm6 in register Xn is not zero. The bit number implies the width of
5478 the register, which may be written and should be disassembled as Wn if
5479 uimm is less than 32. */
5480 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5481 {
5482 if (operands[1].imm.value >= 32)
5483 {
5484 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5485 0, 31);
5486 return FALSE;
5487 }
5488 operands[0].qualifier = AARCH64_OPND_QLF_X;
5489 }
5490 break;
5491 case loadlit:
5492 /* LDR Wt, label | =value
5493 As a convenience assemblers will typically permit the notation
5494 "=value" in conjunction with the pc-relative literal load instructions
5495 to automatically place an immediate value or symbolic address in a
5496 nearby literal pool and generate a hidden label which references it.
5497 ISREG has been set to 0 in the case of =value. */
5498 if (instr->gen_lit_pool
5499 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5500 {
5501 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5502 if (op == OP_LDRSW_LIT)
5503 size = 4;
5504 if (instr->reloc.exp.X_op != O_constant
5505 && instr->reloc.exp.X_op != O_big
5506 && instr->reloc.exp.X_op != O_symbol)
5507 {
5508 record_operand_error (opcode, 1,
5509 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5510 _("constant expression expected"));
5511 return FALSE;
5512 }
5513 if (! add_to_lit_pool (&instr->reloc.exp, size))
5514 {
5515 record_operand_error (opcode, 1,
5516 AARCH64_OPDE_OTHER_ERROR,
5517 _("literal pool insertion failed"));
5518 return FALSE;
5519 }
5520 }
5521 break;
5522 case log_shift:
5523 case bitfield:
5524 /* UXT[BHW] Wd, Wn
5525 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5526 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5527 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5528 A programmer-friendly assembler should accept a destination Xd in
5529 place of Wd, however that is not the preferred form for disassembly.
5530 */
5531 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5532 && operands[1].qualifier == AARCH64_OPND_QLF_W
5533 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5534 operands[0].qualifier = AARCH64_OPND_QLF_W;
5535 break;
5536
5537 case addsub_ext:
5538 {
5539 /* In the 64-bit form, the final register operand is written as Wm
5540 for all but the (possibly omitted) UXTX/LSL and SXTX
5541 operators.
5542 As a programmer-friendly assembler, we accept e.g.
5543 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5544 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5545 int idx = aarch64_operand_index (opcode->operands,
5546 AARCH64_OPND_Rm_EXT);
5547 gas_assert (idx == 1 || idx == 2);
5548 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5549 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5550 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5551 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5552 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5553 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5554 }
5555 break;
5556
5557 default:
5558 break;
5559 }
5560
5561 DEBUG_TRACE ("exit with SUCCESS");
5562 return TRUE;
5563 }
5564
5565 /* Check for loads and stores that will cause unpredictable behavior. */
5566
5567 static void
5568 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5569 {
5570 aarch64_inst *base = &instr->base;
5571 const aarch64_opcode *opcode = base->opcode;
5572 const aarch64_opnd_info *opnds = base->operands;
5573 switch (opcode->iclass)
5574 {
5575 case ldst_pos:
5576 case ldst_imm9:
5577 case ldst_unscaled:
5578 case ldst_unpriv:
5579 /* Loading/storing the base register is unpredictable if writeback. */
5580 if ((aarch64_get_operand_class (opnds[0].type)
5581 == AARCH64_OPND_CLASS_INT_REG)
5582 && opnds[0].reg.regno == opnds[1].addr.base_regno
5583 && opnds[1].addr.writeback)
5584 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5585 break;
5586 case ldstpair_off:
5587 case ldstnapair_offs:
5588 case ldstpair_indexed:
5589 /* Loading/storing the base register is unpredictable if writeback. */
5590 if ((aarch64_get_operand_class (opnds[0].type)
5591 == AARCH64_OPND_CLASS_INT_REG)
5592 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5593 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5594 && opnds[2].addr.writeback)
5595 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5596 /* Load operations must load different registers. */
5597 if ((opcode->opcode & (1 << 22))
5598 && opnds[0].reg.regno == opnds[1].reg.regno)
5599 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5600 break;
5601 default:
5602 break;
5603 }
5604 }
5605
5606 /* A wrapper function to interface with libopcodes on encoding and
5607 record the error message if there is any.
5608
5609 Return TRUE on success; otherwise return FALSE. */
5610
5611 static bfd_boolean
5612 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5613 aarch64_insn *code)
5614 {
5615 aarch64_operand_error error_info;
5616 error_info.kind = AARCH64_OPDE_NIL;
5617 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5618 return TRUE;
5619 else
5620 {
5621 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5622 record_operand_error_info (opcode, &error_info);
5623 return FALSE;
5624 }
5625 }
5626
5627 #ifdef DEBUG_AARCH64
5628 static inline void
5629 dump_opcode_operands (const aarch64_opcode *opcode)
5630 {
5631 int i = 0;
5632 while (opcode->operands[i] != AARCH64_OPND_NIL)
5633 {
5634 aarch64_verbose ("\t\t opnd%d: %s", i,
5635 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5636 ? aarch64_get_operand_name (opcode->operands[i])
5637 : aarch64_get_operand_desc (opcode->operands[i]));
5638 ++i;
5639 }
5640 }
5641 #endif /* DEBUG_AARCH64 */
5642
5643 /* This is the guts of the machine-dependent assembler. STR points to a
5644 machine dependent instruction. This function is supposed to emit
5645 the frags/bytes it assembles to. */
5646
5647 void
5648 md_assemble (char *str)
5649 {
5650 char *p = str;
5651 templates *template;
5652 aarch64_opcode *opcode;
5653 aarch64_inst *inst_base;
5654 unsigned saved_cond;
5655
5656 /* Align the previous label if needed. */
5657 if (last_label_seen != NULL)
5658 {
5659 symbol_set_frag (last_label_seen, frag_now);
5660 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5661 S_SET_SEGMENT (last_label_seen, now_seg);
5662 }
5663
5664 inst.reloc.type = BFD_RELOC_UNUSED;
5665
5666 DEBUG_TRACE ("\n\n");
5667 DEBUG_TRACE ("==============================");
5668 DEBUG_TRACE ("Enter md_assemble with %s", str);
5669
5670 template = opcode_lookup (&p);
5671 if (!template)
5672 {
5673 /* It wasn't an instruction, but it might be a register alias of
5674 the form alias .req reg directive. */
5675 if (!create_register_alias (str, p))
5676 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5677 str);
5678 return;
5679 }
5680
5681 skip_whitespace (p);
5682 if (*p == ',')
5683 {
5684 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5685 get_mnemonic_name (str), str);
5686 return;
5687 }
5688
5689 init_operand_error_report ();
5690
5691 saved_cond = inst.cond;
5692 reset_aarch64_instruction (&inst);
5693 inst.cond = saved_cond;
5694
5695 /* Iterate through all opcode entries with the same mnemonic name. */
5696 do
5697 {
5698 opcode = template->opcode;
5699
5700 DEBUG_TRACE ("opcode %s found", opcode->name);
5701 #ifdef DEBUG_AARCH64
5702 if (debug_dump)
5703 dump_opcode_operands (opcode);
5704 #endif /* DEBUG_AARCH64 */
5705
5706 mapping_state (MAP_INSN);
5707
5708 inst_base = &inst.base;
5709 inst_base->opcode = opcode;
5710
5711 /* Truly conditionally executed instructions, e.g. b.cond. */
5712 if (opcode->flags & F_COND)
5713 {
5714 gas_assert (inst.cond != COND_ALWAYS);
5715 inst_base->cond = get_cond_from_value (inst.cond);
5716 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5717 }
5718 else if (inst.cond != COND_ALWAYS)
5719 {
5720 /* It shouldn't arrive here, where the assembly looks like a
5721 conditional instruction but the found opcode is unconditional. */
5722 gas_assert (0);
5723 continue;
5724 }
5725
5726 if (parse_operands (p, opcode)
5727 && programmer_friendly_fixup (&inst)
5728 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5729 {
5730 /* Check that this instruction is supported for this CPU. */
5731 if (!opcode->avariant
5732 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5733 {
5734 as_bad (_("selected processor does not support `%s'"), str);
5735 return;
5736 }
5737
5738 warn_unpredictable_ldst (&inst, str);
5739
5740 if (inst.reloc.type == BFD_RELOC_UNUSED
5741 || !inst.reloc.need_libopcodes_p)
5742 output_inst (NULL);
5743 else
5744 {
5745 /* If there is relocation generated for the instruction,
5746 store the instruction information for the future fix-up. */
5747 struct aarch64_inst *copy;
5748 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5749 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5750 abort ();
5751 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5752 output_inst (copy);
5753 }
5754 return;
5755 }
5756
5757 template = template->next;
5758 if (template != NULL)
5759 {
5760 reset_aarch64_instruction (&inst);
5761 inst.cond = saved_cond;
5762 }
5763 }
5764 while (template != NULL);
5765
5766 /* Issue the error messages if any. */
5767 output_operand_error_report (str);
5768 }
5769
5770 /* Various frobbings of labels and their addresses. */
5771
5772 void
5773 aarch64_start_line_hook (void)
5774 {
5775 last_label_seen = NULL;
5776 }
5777
5778 void
5779 aarch64_frob_label (symbolS * sym)
5780 {
5781 last_label_seen = sym;
5782
5783 dwarf2_emit_label (sym);
5784 }
5785
5786 int
5787 aarch64_data_in_code (void)
5788 {
5789 if (!strncmp (input_line_pointer + 1, "data:", 5))
5790 {
5791 *input_line_pointer = '/';
5792 input_line_pointer += 5;
5793 *input_line_pointer = 0;
5794 return 1;
5795 }
5796
5797 return 0;
5798 }
5799
5800 char *
5801 aarch64_canonicalize_symbol_name (char *name)
5802 {
5803 int len;
5804
5805 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5806 *(name + len - 5) = 0;
5807
5808 return name;
5809 }
5810 \f
5811 /* Table of all register names defined by default. The user can
5812 define additional names with .req. Note that all register names
5813 should appear in both upper and lowercase variants. Some registers
5814 also have mixed-case names. */
5815
5816 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5817 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5818 #define REGSET31(p,t) \
5819 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5820 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5821 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5822 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5823 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5824 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5825 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5826 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5827 #define REGSET(p,t) \
5828 REGSET31(p,t), REGNUM(p,31,t)
5829
5830 /* These go into aarch64_reg_hsh hash-table. */
5831 static const reg_entry reg_names[] = {
5832 /* Integer registers. */
5833 REGSET31 (x, R_64), REGSET31 (X, R_64),
5834 REGSET31 (w, R_32), REGSET31 (W, R_32),
5835
5836 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5837 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5838
5839 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5840 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5841
5842 /* Coprocessor register numbers. */
5843 REGSET (c, CN), REGSET (C, CN),
5844
5845 /* Floating-point single precision registers. */
5846 REGSET (s, FP_S), REGSET (S, FP_S),
5847
5848 /* Floating-point double precision registers. */
5849 REGSET (d, FP_D), REGSET (D, FP_D),
5850
5851 /* Floating-point half precision registers. */
5852 REGSET (h, FP_H), REGSET (H, FP_H),
5853
5854 /* Floating-point byte precision registers. */
5855 REGSET (b, FP_B), REGSET (B, FP_B),
5856
5857 /* Floating-point quad precision registers. */
5858 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5859
5860 /* FP/SIMD registers. */
5861 REGSET (v, VN), REGSET (V, VN),
5862 };
5863
5864 #undef REGDEF
5865 #undef REGNUM
5866 #undef REGSET
5867
5868 #define N 1
5869 #define n 0
5870 #define Z 1
5871 #define z 0
5872 #define C 1
5873 #define c 0
5874 #define V 1
5875 #define v 0
5876 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5877 static const asm_nzcv nzcv_names[] = {
5878 {"nzcv", B (n, z, c, v)},
5879 {"nzcV", B (n, z, c, V)},
5880 {"nzCv", B (n, z, C, v)},
5881 {"nzCV", B (n, z, C, V)},
5882 {"nZcv", B (n, Z, c, v)},
5883 {"nZcV", B (n, Z, c, V)},
5884 {"nZCv", B (n, Z, C, v)},
5885 {"nZCV", B (n, Z, C, V)},
5886 {"Nzcv", B (N, z, c, v)},
5887 {"NzcV", B (N, z, c, V)},
5888 {"NzCv", B (N, z, C, v)},
5889 {"NzCV", B (N, z, C, V)},
5890 {"NZcv", B (N, Z, c, v)},
5891 {"NZcV", B (N, Z, c, V)},
5892 {"NZCv", B (N, Z, C, v)},
5893 {"NZCV", B (N, Z, C, V)}
5894 };
5895
5896 #undef N
5897 #undef n
5898 #undef Z
5899 #undef z
5900 #undef C
5901 #undef c
5902 #undef V
5903 #undef v
5904 #undef B
5905 \f
5906 /* MD interface: bits in the object file. */
5907
5908 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5909 for use in the a.out file, and stores them in the array pointed to by buf.
5910 This knows about the endian-ness of the target machine and does
5911 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5912 2 (short) and 4 (long) Floating numbers are put out as a series of
5913 LITTLENUMS (shorts, here at least). */
5914
5915 void
5916 md_number_to_chars (char *buf, valueT val, int n)
5917 {
5918 if (target_big_endian)
5919 number_to_chars_bigendian (buf, val, n);
5920 else
5921 number_to_chars_littleendian (buf, val, n);
5922 }
5923
5924 /* MD interface: Sections. */
5925
5926 /* Estimate the size of a frag before relaxing. Assume everything fits in
5927 4 bytes. */
5928
5929 int
5930 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5931 {
5932 fragp->fr_var = 4;
5933 return 4;
5934 }
5935
5936 /* Round up a section size to the appropriate boundary. */
5937
5938 valueT
5939 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5940 {
5941 return size;
5942 }
5943
5944 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5945 of an rs_align_code fragment.
5946
5947 Here we fill the frag with the appropriate info for padding the
5948 output stream. The resulting frag will consist of a fixed (fr_fix)
5949 and of a repeating (fr_var) part.
5950
5951 The fixed content is always emitted before the repeating content and
5952 these two parts are used as follows in constructing the output:
5953 - the fixed part will be used to align to a valid instruction word
5954 boundary, in case that we start at a misaligned address; as no
5955 executable instruction can live at the misaligned location, we
5956 simply fill with zeros;
5957 - the variable part will be used to cover the remaining padding and
5958 we fill using the AArch64 NOP instruction.
5959
5960 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
5961 enough storage space for up to 3 bytes for padding the back to a valid
5962 instruction alignment and exactly 4 bytes to store the NOP pattern. */
5963
5964 void
5965 aarch64_handle_align (fragS * fragP)
5966 {
5967 /* NOP = d503201f */
5968 /* AArch64 instructions are always little-endian. */
5969 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5970
5971 int bytes, fix, noop_size;
5972 char *p;
5973
5974 if (fragP->fr_type != rs_align_code)
5975 return;
5976
5977 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5978 p = fragP->fr_literal + fragP->fr_fix;
5979
5980 #ifdef OBJ_ELF
5981 gas_assert (fragP->tc_frag_data.recorded);
5982 #endif
5983
5984 noop_size = sizeof (aarch64_noop);
5985
5986 fix = bytes & (noop_size - 1);
5987 if (fix)
5988 {
5989 #ifdef OBJ_ELF
5990 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5991 #endif
5992 memset (p, 0, fix);
5993 p += fix;
5994 fragP->fr_fix += fix;
5995 }
5996
5997 if (noop_size)
5998 memcpy (p, aarch64_noop, noop_size);
5999 fragP->fr_var = noop_size;
6000 }
6001
6002 /* Perform target specific initialisation of a frag.
6003 Note - despite the name this initialisation is not done when the frag
6004 is created, but only when its type is assigned. A frag can be created
6005 and used a long time before its type is set, so beware of assuming that
6006 this initialisationis performed first. */
6007
6008 #ifndef OBJ_ELF
6009 void
6010 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6011 int max_chars ATTRIBUTE_UNUSED)
6012 {
6013 }
6014
6015 #else /* OBJ_ELF is defined. */
6016 void
6017 aarch64_init_frag (fragS * fragP, int max_chars)
6018 {
6019 /* Record a mapping symbol for alignment frags. We will delete this
6020 later if the alignment ends up empty. */
6021 if (!fragP->tc_frag_data.recorded)
6022 {
6023 fragP->tc_frag_data.recorded = 1;
6024 switch (fragP->fr_type)
6025 {
6026 case rs_align:
6027 case rs_align_test:
6028 case rs_fill:
6029 mapping_state_2 (MAP_DATA, max_chars);
6030 break;
6031 case rs_align_code:
6032 mapping_state_2 (MAP_INSN, max_chars);
6033 break;
6034 default:
6035 break;
6036 }
6037 }
6038 }
6039 \f
6040 /* Initialize the DWARF-2 unwind information for this procedure. */
6041
6042 void
6043 tc_aarch64_frame_initial_instructions (void)
6044 {
6045 cfi_add_CFA_def_cfa (REG_SP, 0);
6046 }
6047 #endif /* OBJ_ELF */
6048
6049 /* Convert REGNAME to a DWARF-2 register number. */
6050
6051 int
6052 tc_aarch64_regname_to_dw2regnum (char *regname)
6053 {
6054 const reg_entry *reg = parse_reg (&regname);
6055 if (reg == NULL)
6056 return -1;
6057
6058 switch (reg->type)
6059 {
6060 case REG_TYPE_SP_32:
6061 case REG_TYPE_SP_64:
6062 case REG_TYPE_R_32:
6063 case REG_TYPE_R_64:
6064 return reg->number;
6065
6066 case REG_TYPE_FP_B:
6067 case REG_TYPE_FP_H:
6068 case REG_TYPE_FP_S:
6069 case REG_TYPE_FP_D:
6070 case REG_TYPE_FP_Q:
6071 return reg->number + 64;
6072
6073 default:
6074 break;
6075 }
6076 return -1;
6077 }
6078
6079 /* Implement DWARF2_ADDR_SIZE. */
6080
6081 int
6082 aarch64_dwarf2_addr_size (void)
6083 {
6084 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6085 if (ilp32_p)
6086 return 4;
6087 #endif
6088 return bfd_arch_bits_per_address (stdoutput) / 8;
6089 }
6090
6091 /* MD interface: Symbol and relocation handling. */
6092
6093 /* Return the address within the segment that a PC-relative fixup is
6094 relative to. For AArch64 PC-relative fixups applied to instructions
6095 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6096
6097 long
6098 md_pcrel_from_section (fixS * fixP, segT seg)
6099 {
6100 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6101
6102 /* If this is pc-relative and we are going to emit a relocation
6103 then we just want to put out any pipeline compensation that the linker
6104 will need. Otherwise we want to use the calculated base. */
6105 if (fixP->fx_pcrel
6106 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6107 || aarch64_force_relocation (fixP)))
6108 base = 0;
6109
6110 /* AArch64 should be consistent for all pc-relative relocations. */
6111 return base + AARCH64_PCREL_OFFSET;
6112 }
6113
6114 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6115 Otherwise we have no need to default values of symbols. */
6116
6117 symbolS *
6118 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6119 {
6120 #ifdef OBJ_ELF
6121 if (name[0] == '_' && name[1] == 'G'
6122 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6123 {
6124 if (!GOT_symbol)
6125 {
6126 if (symbol_find (name))
6127 as_bad (_("GOT already in the symbol table"));
6128
6129 GOT_symbol = symbol_new (name, undefined_section,
6130 (valueT) 0, &zero_address_frag);
6131 }
6132
6133 return GOT_symbol;
6134 }
6135 #endif
6136
6137 return 0;
6138 }
6139
6140 /* Return non-zero if the indicated VALUE has overflowed the maximum
6141 range expressible by a unsigned number with the indicated number of
6142 BITS. */
6143
6144 static bfd_boolean
6145 unsigned_overflow (valueT value, unsigned bits)
6146 {
6147 valueT lim;
6148 if (bits >= sizeof (valueT) * 8)
6149 return FALSE;
6150 lim = (valueT) 1 << bits;
6151 return (value >= lim);
6152 }
6153
6154
6155 /* Return non-zero if the indicated VALUE has overflowed the maximum
6156 range expressible by an signed number with the indicated number of
6157 BITS. */
6158
6159 static bfd_boolean
6160 signed_overflow (offsetT value, unsigned bits)
6161 {
6162 offsetT lim;
6163 if (bits >= sizeof (offsetT) * 8)
6164 return FALSE;
6165 lim = (offsetT) 1 << (bits - 1);
6166 return (value < -lim || value >= lim);
6167 }
6168
6169 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6170 unsigned immediate offset load/store instruction, try to encode it as
6171 an unscaled, 9-bit, signed immediate offset load/store instruction.
6172 Return TRUE if it is successful; otherwise return FALSE.
6173
6174 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6175 in response to the standard LDR/STR mnemonics when the immediate offset is
6176 unambiguous, i.e. when it is negative or unaligned. */
6177
6178 static bfd_boolean
6179 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6180 {
6181 int idx;
6182 enum aarch64_op new_op;
6183 const aarch64_opcode *new_opcode;
6184
6185 gas_assert (instr->opcode->iclass == ldst_pos);
6186
6187 switch (instr->opcode->op)
6188 {
6189 case OP_LDRB_POS:new_op = OP_LDURB; break;
6190 case OP_STRB_POS: new_op = OP_STURB; break;
6191 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6192 case OP_LDRH_POS: new_op = OP_LDURH; break;
6193 case OP_STRH_POS: new_op = OP_STURH; break;
6194 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6195 case OP_LDR_POS: new_op = OP_LDUR; break;
6196 case OP_STR_POS: new_op = OP_STUR; break;
6197 case OP_LDRF_POS: new_op = OP_LDURV; break;
6198 case OP_STRF_POS: new_op = OP_STURV; break;
6199 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6200 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6201 default: new_op = OP_NIL; break;
6202 }
6203
6204 if (new_op == OP_NIL)
6205 return FALSE;
6206
6207 new_opcode = aarch64_get_opcode (new_op);
6208 gas_assert (new_opcode != NULL);
6209
6210 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6211 instr->opcode->op, new_opcode->op);
6212
6213 aarch64_replace_opcode (instr, new_opcode);
6214
6215 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6216 qualifier matching may fail because the out-of-date qualifier will
6217 prevent the operand being updated with a new and correct qualifier. */
6218 idx = aarch64_operand_index (instr->opcode->operands,
6219 AARCH64_OPND_ADDR_SIMM9);
6220 gas_assert (idx == 1);
6221 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6222
6223 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6224
6225 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6226 return FALSE;
6227
6228 return TRUE;
6229 }
6230
6231 /* Called by fix_insn to fix a MOV immediate alias instruction.
6232
6233 Operand for a generic move immediate instruction, which is an alias
6234 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6235 a 32-bit/64-bit immediate value into general register. An assembler error
6236 shall result if the immediate cannot be created by a single one of these
6237 instructions. If there is a choice, then to ensure reversability an
6238 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6239
6240 static void
6241 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6242 {
6243 const aarch64_opcode *opcode;
6244
6245 /* Need to check if the destination is SP/ZR. The check has to be done
6246 before any aarch64_replace_opcode. */
6247 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6248 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6249
6250 instr->operands[1].imm.value = value;
6251 instr->operands[1].skip = 0;
6252
6253 if (try_mov_wide_p)
6254 {
6255 /* Try the MOVZ alias. */
6256 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6257 aarch64_replace_opcode (instr, opcode);
6258 if (aarch64_opcode_encode (instr->opcode, instr,
6259 &instr->value, NULL, NULL))
6260 {
6261 put_aarch64_insn (buf, instr->value);
6262 return;
6263 }
6264 /* Try the MOVK alias. */
6265 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6266 aarch64_replace_opcode (instr, opcode);
6267 if (aarch64_opcode_encode (instr->opcode, instr,
6268 &instr->value, NULL, NULL))
6269 {
6270 put_aarch64_insn (buf, instr->value);
6271 return;
6272 }
6273 }
6274
6275 if (try_mov_bitmask_p)
6276 {
6277 /* Try the ORR alias. */
6278 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6279 aarch64_replace_opcode (instr, opcode);
6280 if (aarch64_opcode_encode (instr->opcode, instr,
6281 &instr->value, NULL, NULL))
6282 {
6283 put_aarch64_insn (buf, instr->value);
6284 return;
6285 }
6286 }
6287
6288 as_bad_where (fixP->fx_file, fixP->fx_line,
6289 _("immediate cannot be moved by a single instruction"));
6290 }
6291
6292 /* An instruction operand which is immediate related may have symbol used
6293 in the assembly, e.g.
6294
6295 mov w0, u32
6296 .set u32, 0x00ffff00
6297
6298 At the time when the assembly instruction is parsed, a referenced symbol,
6299 like 'u32' in the above example may not have been seen; a fixS is created
6300 in such a case and is handled here after symbols have been resolved.
6301 Instruction is fixed up with VALUE using the information in *FIXP plus
6302 extra information in FLAGS.
6303
6304 This function is called by md_apply_fix to fix up instructions that need
6305 a fix-up described above but does not involve any linker-time relocation. */
6306
6307 static void
6308 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6309 {
6310 int idx;
6311 uint32_t insn;
6312 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6313 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6314 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6315
6316 if (new_inst)
6317 {
6318 /* Now the instruction is about to be fixed-up, so the operand that
6319 was previously marked as 'ignored' needs to be unmarked in order
6320 to get the encoding done properly. */
6321 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6322 new_inst->operands[idx].skip = 0;
6323 }
6324
6325 gas_assert (opnd != AARCH64_OPND_NIL);
6326
6327 switch (opnd)
6328 {
6329 case AARCH64_OPND_EXCEPTION:
6330 if (unsigned_overflow (value, 16))
6331 as_bad_where (fixP->fx_file, fixP->fx_line,
6332 _("immediate out of range"));
6333 insn = get_aarch64_insn (buf);
6334 insn |= encode_svc_imm (value);
6335 put_aarch64_insn (buf, insn);
6336 break;
6337
6338 case AARCH64_OPND_AIMM:
6339 /* ADD or SUB with immediate.
6340 NOTE this assumes we come here with a add/sub shifted reg encoding
6341 3 322|2222|2 2 2 21111 111111
6342 1 098|7654|3 2 1 09876 543210 98765 43210
6343 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6344 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6345 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6346 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6347 ->
6348 3 322|2222|2 2 221111111111
6349 1 098|7654|3 2 109876543210 98765 43210
6350 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6351 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6352 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6353 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6354 Fields sf Rn Rd are already set. */
6355 insn = get_aarch64_insn (buf);
6356 if (value < 0)
6357 {
6358 /* Add <-> sub. */
6359 insn = reencode_addsub_switch_add_sub (insn);
6360 value = -value;
6361 }
6362
6363 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6364 && unsigned_overflow (value, 12))
6365 {
6366 /* Try to shift the value by 12 to make it fit. */
6367 if (((value >> 12) << 12) == value
6368 && ! unsigned_overflow (value, 12 + 12))
6369 {
6370 value >>= 12;
6371 insn |= encode_addsub_imm_shift_amount (1);
6372 }
6373 }
6374
6375 if (unsigned_overflow (value, 12))
6376 as_bad_where (fixP->fx_file, fixP->fx_line,
6377 _("immediate out of range"));
6378
6379 insn |= encode_addsub_imm (value);
6380
6381 put_aarch64_insn (buf, insn);
6382 break;
6383
6384 case AARCH64_OPND_SIMD_IMM:
6385 case AARCH64_OPND_SIMD_IMM_SFT:
6386 case AARCH64_OPND_LIMM:
6387 /* Bit mask immediate. */
6388 gas_assert (new_inst != NULL);
6389 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6390 new_inst->operands[idx].imm.value = value;
6391 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6392 &new_inst->value, NULL, NULL))
6393 put_aarch64_insn (buf, new_inst->value);
6394 else
6395 as_bad_where (fixP->fx_file, fixP->fx_line,
6396 _("invalid immediate"));
6397 break;
6398
6399 case AARCH64_OPND_HALF:
6400 /* 16-bit unsigned immediate. */
6401 if (unsigned_overflow (value, 16))
6402 as_bad_where (fixP->fx_file, fixP->fx_line,
6403 _("immediate out of range"));
6404 insn = get_aarch64_insn (buf);
6405 insn |= encode_movw_imm (value & 0xffff);
6406 put_aarch64_insn (buf, insn);
6407 break;
6408
6409 case AARCH64_OPND_IMM_MOV:
6410 /* Operand for a generic move immediate instruction, which is
6411 an alias instruction that generates a single MOVZ, MOVN or ORR
6412 instruction to loads a 32-bit/64-bit immediate value into general
6413 register. An assembler error shall result if the immediate cannot be
6414 created by a single one of these instructions. If there is a choice,
6415 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6416 and MOVZ or MOVN to ORR. */
6417 gas_assert (new_inst != NULL);
6418 fix_mov_imm_insn (fixP, buf, new_inst, value);
6419 break;
6420
6421 case AARCH64_OPND_ADDR_SIMM7:
6422 case AARCH64_OPND_ADDR_SIMM9:
6423 case AARCH64_OPND_ADDR_SIMM9_2:
6424 case AARCH64_OPND_ADDR_UIMM12:
6425 /* Immediate offset in an address. */
6426 insn = get_aarch64_insn (buf);
6427
6428 gas_assert (new_inst != NULL && new_inst->value == insn);
6429 gas_assert (new_inst->opcode->operands[1] == opnd
6430 || new_inst->opcode->operands[2] == opnd);
6431
6432 /* Get the index of the address operand. */
6433 if (new_inst->opcode->operands[1] == opnd)
6434 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6435 idx = 1;
6436 else
6437 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6438 idx = 2;
6439
6440 /* Update the resolved offset value. */
6441 new_inst->operands[idx].addr.offset.imm = value;
6442
6443 /* Encode/fix-up. */
6444 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6445 &new_inst->value, NULL, NULL))
6446 {
6447 put_aarch64_insn (buf, new_inst->value);
6448 break;
6449 }
6450 else if (new_inst->opcode->iclass == ldst_pos
6451 && try_to_encode_as_unscaled_ldst (new_inst))
6452 {
6453 put_aarch64_insn (buf, new_inst->value);
6454 break;
6455 }
6456
6457 as_bad_where (fixP->fx_file, fixP->fx_line,
6458 _("immediate offset out of range"));
6459 break;
6460
6461 default:
6462 gas_assert (0);
6463 as_fatal (_("unhandled operand code %d"), opnd);
6464 }
6465 }
6466
6467 /* Apply a fixup (fixP) to segment data, once it has been determined
6468 by our caller that we have all the info we need to fix it up.
6469
6470 Parameter valP is the pointer to the value of the bits. */
6471
6472 void
6473 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6474 {
6475 offsetT value = *valP;
6476 uint32_t insn;
6477 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6478 int scale;
6479 unsigned flags = fixP->fx_addnumber;
6480
6481 DEBUG_TRACE ("\n\n");
6482 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6483 DEBUG_TRACE ("Enter md_apply_fix");
6484
6485 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6486
6487 /* Note whether this will delete the relocation. */
6488
6489 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6490 fixP->fx_done = 1;
6491
6492 /* Process the relocations. */
6493 switch (fixP->fx_r_type)
6494 {
6495 case BFD_RELOC_NONE:
6496 /* This will need to go in the object file. */
6497 fixP->fx_done = 0;
6498 break;
6499
6500 case BFD_RELOC_8:
6501 case BFD_RELOC_8_PCREL:
6502 if (fixP->fx_done || !seg->use_rela_p)
6503 md_number_to_chars (buf, value, 1);
6504 break;
6505
6506 case BFD_RELOC_16:
6507 case BFD_RELOC_16_PCREL:
6508 if (fixP->fx_done || !seg->use_rela_p)
6509 md_number_to_chars (buf, value, 2);
6510 break;
6511
6512 case BFD_RELOC_32:
6513 case BFD_RELOC_32_PCREL:
6514 if (fixP->fx_done || !seg->use_rela_p)
6515 md_number_to_chars (buf, value, 4);
6516 break;
6517
6518 case BFD_RELOC_64:
6519 case BFD_RELOC_64_PCREL:
6520 if (fixP->fx_done || !seg->use_rela_p)
6521 md_number_to_chars (buf, value, 8);
6522 break;
6523
6524 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6525 /* We claim that these fixups have been processed here, even if
6526 in fact we generate an error because we do not have a reloc
6527 for them, so tc_gen_reloc() will reject them. */
6528 fixP->fx_done = 1;
6529 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6530 {
6531 as_bad_where (fixP->fx_file, fixP->fx_line,
6532 _("undefined symbol %s used as an immediate value"),
6533 S_GET_NAME (fixP->fx_addsy));
6534 goto apply_fix_return;
6535 }
6536 fix_insn (fixP, flags, value);
6537 break;
6538
6539 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6540 if (fixP->fx_done || !seg->use_rela_p)
6541 {
6542 if (value & 3)
6543 as_bad_where (fixP->fx_file, fixP->fx_line,
6544 _("pc-relative load offset not word aligned"));
6545 if (signed_overflow (value, 21))
6546 as_bad_where (fixP->fx_file, fixP->fx_line,
6547 _("pc-relative load offset out of range"));
6548 insn = get_aarch64_insn (buf);
6549 insn |= encode_ld_lit_ofs_19 (value >> 2);
6550 put_aarch64_insn (buf, insn);
6551 }
6552 break;
6553
6554 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6555 if (fixP->fx_done || !seg->use_rela_p)
6556 {
6557 if (signed_overflow (value, 21))
6558 as_bad_where (fixP->fx_file, fixP->fx_line,
6559 _("pc-relative address offset out of range"));
6560 insn = get_aarch64_insn (buf);
6561 insn |= encode_adr_imm (value);
6562 put_aarch64_insn (buf, insn);
6563 }
6564 break;
6565
6566 case BFD_RELOC_AARCH64_BRANCH19:
6567 if (fixP->fx_done || !seg->use_rela_p)
6568 {
6569 if (value & 3)
6570 as_bad_where (fixP->fx_file, fixP->fx_line,
6571 _("conditional branch target not word aligned"));
6572 if (signed_overflow (value, 21))
6573 as_bad_where (fixP->fx_file, fixP->fx_line,
6574 _("conditional branch out of range"));
6575 insn = get_aarch64_insn (buf);
6576 insn |= encode_cond_branch_ofs_19 (value >> 2);
6577 put_aarch64_insn (buf, insn);
6578 }
6579 break;
6580
6581 case BFD_RELOC_AARCH64_TSTBR14:
6582 if (fixP->fx_done || !seg->use_rela_p)
6583 {
6584 if (value & 3)
6585 as_bad_where (fixP->fx_file, fixP->fx_line,
6586 _("conditional branch target not word aligned"));
6587 if (signed_overflow (value, 16))
6588 as_bad_where (fixP->fx_file, fixP->fx_line,
6589 _("conditional branch out of range"));
6590 insn = get_aarch64_insn (buf);
6591 insn |= encode_tst_branch_ofs_14 (value >> 2);
6592 put_aarch64_insn (buf, insn);
6593 }
6594 break;
6595
6596 case BFD_RELOC_AARCH64_JUMP26:
6597 case BFD_RELOC_AARCH64_CALL26:
6598 if (fixP->fx_done || !seg->use_rela_p)
6599 {
6600 if (value & 3)
6601 as_bad_where (fixP->fx_file, fixP->fx_line,
6602 _("branch target not word aligned"));
6603 if (signed_overflow (value, 28))
6604 as_bad_where (fixP->fx_file, fixP->fx_line,
6605 _("branch out of range"));
6606 insn = get_aarch64_insn (buf);
6607 insn |= encode_branch_ofs_26 (value >> 2);
6608 put_aarch64_insn (buf, insn);
6609 }
6610 break;
6611
6612 case BFD_RELOC_AARCH64_MOVW_G0:
6613 case BFD_RELOC_AARCH64_MOVW_G0_S:
6614 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6615 scale = 0;
6616 goto movw_common;
6617 case BFD_RELOC_AARCH64_MOVW_G1:
6618 case BFD_RELOC_AARCH64_MOVW_G1_S:
6619 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6620 scale = 16;
6621 goto movw_common;
6622 case BFD_RELOC_AARCH64_MOVW_G2:
6623 case BFD_RELOC_AARCH64_MOVW_G2_S:
6624 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6625 scale = 32;
6626 goto movw_common;
6627 case BFD_RELOC_AARCH64_MOVW_G3:
6628 scale = 48;
6629 movw_common:
6630 if (fixP->fx_done || !seg->use_rela_p)
6631 {
6632 insn = get_aarch64_insn (buf);
6633
6634 if (!fixP->fx_done)
6635 {
6636 /* REL signed addend must fit in 16 bits */
6637 if (signed_overflow (value, 16))
6638 as_bad_where (fixP->fx_file, fixP->fx_line,
6639 _("offset out of range"));
6640 }
6641 else
6642 {
6643 /* Check for overflow and scale. */
6644 switch (fixP->fx_r_type)
6645 {
6646 case BFD_RELOC_AARCH64_MOVW_G0:
6647 case BFD_RELOC_AARCH64_MOVW_G1:
6648 case BFD_RELOC_AARCH64_MOVW_G2:
6649 case BFD_RELOC_AARCH64_MOVW_G3:
6650 if (unsigned_overflow (value, scale + 16))
6651 as_bad_where (fixP->fx_file, fixP->fx_line,
6652 _("unsigned value out of range"));
6653 break;
6654 case BFD_RELOC_AARCH64_MOVW_G0_S:
6655 case BFD_RELOC_AARCH64_MOVW_G1_S:
6656 case BFD_RELOC_AARCH64_MOVW_G2_S:
6657 /* NOTE: We can only come here with movz or movn. */
6658 if (signed_overflow (value, scale + 16))
6659 as_bad_where (fixP->fx_file, fixP->fx_line,
6660 _("signed value out of range"));
6661 if (value < 0)
6662 {
6663 /* Force use of MOVN. */
6664 value = ~value;
6665 insn = reencode_movzn_to_movn (insn);
6666 }
6667 else
6668 {
6669 /* Force use of MOVZ. */
6670 insn = reencode_movzn_to_movz (insn);
6671 }
6672 break;
6673 default:
6674 /* Unchecked relocations. */
6675 break;
6676 }
6677 value >>= scale;
6678 }
6679
6680 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6681 insn |= encode_movw_imm (value & 0xffff);
6682
6683 put_aarch64_insn (buf, insn);
6684 }
6685 break;
6686
6687 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6688 fixP->fx_r_type = (ilp32_p
6689 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6690 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6691 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6692 /* Should always be exported to object file, see
6693 aarch64_force_relocation(). */
6694 gas_assert (!fixP->fx_done);
6695 gas_assert (seg->use_rela_p);
6696 break;
6697
6698 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6699 fixP->fx_r_type = (ilp32_p
6700 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6701 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6702 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6703 /* Should always be exported to object file, see
6704 aarch64_force_relocation(). */
6705 gas_assert (!fixP->fx_done);
6706 gas_assert (seg->use_rela_p);
6707 break;
6708
6709 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6710 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6711 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6712 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6713 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6714 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6715 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6716 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6717 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6718 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6719 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6720 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6721 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6722 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6723 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6724 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6725 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6726 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6727 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6728 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6729 /* Should always be exported to object file, see
6730 aarch64_force_relocation(). */
6731 gas_assert (!fixP->fx_done);
6732 gas_assert (seg->use_rela_p);
6733 break;
6734
6735 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6736 /* Should always be exported to object file, see
6737 aarch64_force_relocation(). */
6738 fixP->fx_r_type = (ilp32_p
6739 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6740 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6741 gas_assert (!fixP->fx_done);
6742 gas_assert (seg->use_rela_p);
6743 break;
6744
6745 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6746 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6747 case BFD_RELOC_AARCH64_ADD_LO12:
6748 case BFD_RELOC_AARCH64_LDST8_LO12:
6749 case BFD_RELOC_AARCH64_LDST16_LO12:
6750 case BFD_RELOC_AARCH64_LDST32_LO12:
6751 case BFD_RELOC_AARCH64_LDST64_LO12:
6752 case BFD_RELOC_AARCH64_LDST128_LO12:
6753 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6754 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6755 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6756 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6757 /* Should always be exported to object file, see
6758 aarch64_force_relocation(). */
6759 gas_assert (!fixP->fx_done);
6760 gas_assert (seg->use_rela_p);
6761 break;
6762
6763 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6764 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6765 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6766 break;
6767
6768 case BFD_RELOC_UNUSED:
6769 /* An error will already have been reported. */
6770 break;
6771
6772 default:
6773 as_bad_where (fixP->fx_file, fixP->fx_line,
6774 _("unexpected %s fixup"),
6775 bfd_get_reloc_code_name (fixP->fx_r_type));
6776 break;
6777 }
6778
6779 apply_fix_return:
6780 /* Free the allocated the struct aarch64_inst.
6781 N.B. currently there are very limited number of fix-up types actually use
6782 this field, so the impact on the performance should be minimal . */
6783 if (fixP->tc_fix_data.inst != NULL)
6784 free (fixP->tc_fix_data.inst);
6785
6786 return;
6787 }
6788
6789 /* Translate internal representation of relocation info to BFD target
6790 format. */
6791
6792 arelent *
6793 tc_gen_reloc (asection * section, fixS * fixp)
6794 {
6795 arelent *reloc;
6796 bfd_reloc_code_real_type code;
6797
6798 reloc = xmalloc (sizeof (arelent));
6799
6800 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6801 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6802 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6803
6804 if (fixp->fx_pcrel)
6805 {
6806 if (section->use_rela_p)
6807 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6808 else
6809 fixp->fx_offset = reloc->address;
6810 }
6811 reloc->addend = fixp->fx_offset;
6812
6813 code = fixp->fx_r_type;
6814 switch (code)
6815 {
6816 case BFD_RELOC_16:
6817 if (fixp->fx_pcrel)
6818 code = BFD_RELOC_16_PCREL;
6819 break;
6820
6821 case BFD_RELOC_32:
6822 if (fixp->fx_pcrel)
6823 code = BFD_RELOC_32_PCREL;
6824 break;
6825
6826 case BFD_RELOC_64:
6827 if (fixp->fx_pcrel)
6828 code = BFD_RELOC_64_PCREL;
6829 break;
6830
6831 default:
6832 break;
6833 }
6834
6835 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6836 if (reloc->howto == NULL)
6837 {
6838 as_bad_where (fixp->fx_file, fixp->fx_line,
6839 _
6840 ("cannot represent %s relocation in this object file format"),
6841 bfd_get_reloc_code_name (code));
6842 return NULL;
6843 }
6844
6845 return reloc;
6846 }
6847
6848 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6849
6850 void
6851 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6852 {
6853 bfd_reloc_code_real_type type;
6854 int pcrel = 0;
6855
6856 /* Pick a reloc.
6857 FIXME: @@ Should look at CPU word size. */
6858 switch (size)
6859 {
6860 case 1:
6861 type = BFD_RELOC_8;
6862 break;
6863 case 2:
6864 type = BFD_RELOC_16;
6865 break;
6866 case 4:
6867 type = BFD_RELOC_32;
6868 break;
6869 case 8:
6870 type = BFD_RELOC_64;
6871 break;
6872 default:
6873 as_bad (_("cannot do %u-byte relocation"), size);
6874 type = BFD_RELOC_UNUSED;
6875 break;
6876 }
6877
6878 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6879 }
6880
6881 int
6882 aarch64_force_relocation (struct fix *fixp)
6883 {
6884 switch (fixp->fx_r_type)
6885 {
6886 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6887 /* Perform these "immediate" internal relocations
6888 even if the symbol is extern or weak. */
6889 return 0;
6890
6891 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6892 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6893 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6894 /* Pseudo relocs that need to be fixed up according to
6895 ilp32_p. */
6896 return 0;
6897
6898 case BFD_RELOC_AARCH64_ADD_LO12:
6899 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6900 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6901 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6902 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6903 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6904 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6905 case BFD_RELOC_AARCH64_LDST128_LO12:
6906 case BFD_RELOC_AARCH64_LDST16_LO12:
6907 case BFD_RELOC_AARCH64_LDST32_LO12:
6908 case BFD_RELOC_AARCH64_LDST64_LO12:
6909 case BFD_RELOC_AARCH64_LDST8_LO12:
6910 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6911 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6912 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6913 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6914 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6915 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6916 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6917 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6918 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6919 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6920 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6921 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6922 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6923 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6924 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6925 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6926 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6927 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6928 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6929 /* Always leave these relocations for the linker. */
6930 return 1;
6931
6932 default:
6933 break;
6934 }
6935
6936 return generic_force_reloc (fixp);
6937 }
6938
6939 #ifdef OBJ_ELF
6940
6941 const char *
6942 elf64_aarch64_target_format (void)
6943 {
6944 if (target_big_endian)
6945 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6946 else
6947 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6948 }
6949
6950 void
6951 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6952 {
6953 elf_frob_symbol (symp, puntp);
6954 }
6955 #endif
6956
6957 /* MD interface: Finalization. */
6958
6959 /* A good place to do this, although this was probably not intended
6960 for this kind of use. We need to dump the literal pool before
6961 references are made to a null symbol pointer. */
6962
6963 void
6964 aarch64_cleanup (void)
6965 {
6966 literal_pool *pool;
6967
6968 for (pool = list_of_pools; pool; pool = pool->next)
6969 {
6970 /* Put it at the end of the relevant section. */
6971 subseg_set (pool->section, pool->sub_section);
6972 s_ltorg (0);
6973 }
6974 }
6975
6976 #ifdef OBJ_ELF
6977 /* Remove any excess mapping symbols generated for alignment frags in
6978 SEC. We may have created a mapping symbol before a zero byte
6979 alignment; remove it if there's a mapping symbol after the
6980 alignment. */
6981 static void
6982 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6983 void *dummy ATTRIBUTE_UNUSED)
6984 {
6985 segment_info_type *seginfo = seg_info (sec);
6986 fragS *fragp;
6987
6988 if (seginfo == NULL || seginfo->frchainP == NULL)
6989 return;
6990
6991 for (fragp = seginfo->frchainP->frch_root;
6992 fragp != NULL; fragp = fragp->fr_next)
6993 {
6994 symbolS *sym = fragp->tc_frag_data.last_map;
6995 fragS *next = fragp->fr_next;
6996
6997 /* Variable-sized frags have been converted to fixed size by
6998 this point. But if this was variable-sized to start with,
6999 there will be a fixed-size frag after it. So don't handle
7000 next == NULL. */
7001 if (sym == NULL || next == NULL)
7002 continue;
7003
7004 if (S_GET_VALUE (sym) < next->fr_address)
7005 /* Not at the end of this frag. */
7006 continue;
7007 know (S_GET_VALUE (sym) == next->fr_address);
7008
7009 do
7010 {
7011 if (next->tc_frag_data.first_map != NULL)
7012 {
7013 /* Next frag starts with a mapping symbol. Discard this
7014 one. */
7015 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7016 break;
7017 }
7018
7019 if (next->fr_next == NULL)
7020 {
7021 /* This mapping symbol is at the end of the section. Discard
7022 it. */
7023 know (next->fr_fix == 0 && next->fr_var == 0);
7024 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7025 break;
7026 }
7027
7028 /* As long as we have empty frags without any mapping symbols,
7029 keep looking. */
7030 /* If the next frag is non-empty and does not start with a
7031 mapping symbol, then this mapping symbol is required. */
7032 if (next->fr_address != next->fr_next->fr_address)
7033 break;
7034
7035 next = next->fr_next;
7036 }
7037 while (next != NULL);
7038 }
7039 }
7040 #endif
7041
7042 /* Adjust the symbol table. */
7043
7044 void
7045 aarch64_adjust_symtab (void)
7046 {
7047 #ifdef OBJ_ELF
7048 /* Remove any overlapping mapping symbols generated by alignment frags. */
7049 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7050 /* Now do generic ELF adjustments. */
7051 elf_adjust_symtab ();
7052 #endif
7053 }
7054
7055 static void
7056 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7057 {
7058 const char *hash_err;
7059
7060 hash_err = hash_insert (table, key, value);
7061 if (hash_err)
7062 printf ("Internal Error: Can't hash %s\n", key);
7063 }
7064
7065 static void
7066 fill_instruction_hash_table (void)
7067 {
7068 aarch64_opcode *opcode = aarch64_opcode_table;
7069
7070 while (opcode->name != NULL)
7071 {
7072 templates *templ, *new_templ;
7073 templ = hash_find (aarch64_ops_hsh, opcode->name);
7074
7075 new_templ = (templates *) xmalloc (sizeof (templates));
7076 new_templ->opcode = opcode;
7077 new_templ->next = NULL;
7078
7079 if (!templ)
7080 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7081 else
7082 {
7083 new_templ->next = templ->next;
7084 templ->next = new_templ;
7085 }
7086 ++opcode;
7087 }
7088 }
7089
7090 static inline void
7091 convert_to_upper (char *dst, const char *src, size_t num)
7092 {
7093 unsigned int i;
7094 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7095 *dst = TOUPPER (*src);
7096 *dst = '\0';
7097 }
7098
7099 /* Assume STR point to a lower-case string, allocate, convert and return
7100 the corresponding upper-case string. */
7101 static inline const char*
7102 get_upper_str (const char *str)
7103 {
7104 char *ret;
7105 size_t len = strlen (str);
7106 if ((ret = xmalloc (len + 1)) == NULL)
7107 abort ();
7108 convert_to_upper (ret, str, len);
7109 return ret;
7110 }
7111
7112 /* MD interface: Initialization. */
7113
7114 void
7115 md_begin (void)
7116 {
7117 unsigned mach;
7118 unsigned int i;
7119
7120 if ((aarch64_ops_hsh = hash_new ()) == NULL
7121 || (aarch64_cond_hsh = hash_new ()) == NULL
7122 || (aarch64_shift_hsh = hash_new ()) == NULL
7123 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7124 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7125 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7126 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7127 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7128 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7129 || (aarch64_reg_hsh = hash_new ()) == NULL
7130 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7131 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7132 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7133 as_fatal (_("virtual memory exhausted"));
7134
7135 fill_instruction_hash_table ();
7136
7137 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7138 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7139 (void *) (aarch64_sys_regs + i));
7140
7141 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7142 checked_hash_insert (aarch64_pstatefield_hsh,
7143 aarch64_pstatefields[i].name,
7144 (void *) (aarch64_pstatefields + i));
7145
7146 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7147 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7148 aarch64_sys_regs_ic[i].template,
7149 (void *) (aarch64_sys_regs_ic + i));
7150
7151 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7152 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7153 aarch64_sys_regs_dc[i].template,
7154 (void *) (aarch64_sys_regs_dc + i));
7155
7156 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7157 checked_hash_insert (aarch64_sys_regs_at_hsh,
7158 aarch64_sys_regs_at[i].template,
7159 (void *) (aarch64_sys_regs_at + i));
7160
7161 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7162 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7163 aarch64_sys_regs_tlbi[i].template,
7164 (void *) (aarch64_sys_regs_tlbi + i));
7165
7166 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7167 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7168 (void *) (reg_names + i));
7169
7170 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7171 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7172 (void *) (nzcv_names + i));
7173
7174 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7175 {
7176 const char *name = aarch64_operand_modifiers[i].name;
7177 checked_hash_insert (aarch64_shift_hsh, name,
7178 (void *) (aarch64_operand_modifiers + i));
7179 /* Also hash the name in the upper case. */
7180 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7181 (void *) (aarch64_operand_modifiers + i));
7182 }
7183
7184 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7185 {
7186 unsigned int j;
7187 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7188 the same condition code. */
7189 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7190 {
7191 const char *name = aarch64_conds[i].names[j];
7192 if (name == NULL)
7193 break;
7194 checked_hash_insert (aarch64_cond_hsh, name,
7195 (void *) (aarch64_conds + i));
7196 /* Also hash the name in the upper case. */
7197 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7198 (void *) (aarch64_conds + i));
7199 }
7200 }
7201
7202 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7203 {
7204 const char *name = aarch64_barrier_options[i].name;
7205 /* Skip xx00 - the unallocated values of option. */
7206 if ((i & 0x3) == 0)
7207 continue;
7208 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7209 (void *) (aarch64_barrier_options + i));
7210 /* Also hash the name in the upper case. */
7211 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7212 (void *) (aarch64_barrier_options + i));
7213 }
7214
7215 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7216 {
7217 const char* name = aarch64_prfops[i].name;
7218 /* Skip the unallocated hint encodings. */
7219 if (name == NULL)
7220 continue;
7221 checked_hash_insert (aarch64_pldop_hsh, name,
7222 (void *) (aarch64_prfops + i));
7223 /* Also hash the name in the upper case. */
7224 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7225 (void *) (aarch64_prfops + i));
7226 }
7227
7228 /* Set the cpu variant based on the command-line options. */
7229 if (!mcpu_cpu_opt)
7230 mcpu_cpu_opt = march_cpu_opt;
7231
7232 if (!mcpu_cpu_opt)
7233 mcpu_cpu_opt = &cpu_default;
7234
7235 cpu_variant = *mcpu_cpu_opt;
7236
7237 /* Record the CPU type. */
7238 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7239
7240 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7241 }
7242
7243 /* Command line processing. */
7244
7245 const char *md_shortopts = "m:";
7246
7247 #ifdef AARCH64_BI_ENDIAN
7248 #define OPTION_EB (OPTION_MD_BASE + 0)
7249 #define OPTION_EL (OPTION_MD_BASE + 1)
7250 #else
7251 #if TARGET_BYTES_BIG_ENDIAN
7252 #define OPTION_EB (OPTION_MD_BASE + 0)
7253 #else
7254 #define OPTION_EL (OPTION_MD_BASE + 1)
7255 #endif
7256 #endif
7257
7258 struct option md_longopts[] = {
7259 #ifdef OPTION_EB
7260 {"EB", no_argument, NULL, OPTION_EB},
7261 #endif
7262 #ifdef OPTION_EL
7263 {"EL", no_argument, NULL, OPTION_EL},
7264 #endif
7265 {NULL, no_argument, NULL, 0}
7266 };
7267
7268 size_t md_longopts_size = sizeof (md_longopts);
7269
7270 struct aarch64_option_table
7271 {
7272 char *option; /* Option name to match. */
7273 char *help; /* Help information. */
7274 int *var; /* Variable to change. */
7275 int value; /* What to change it to. */
7276 char *deprecated; /* If non-null, print this message. */
7277 };
7278
7279 static struct aarch64_option_table aarch64_opts[] = {
7280 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7281 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7282 NULL},
7283 #ifdef DEBUG_AARCH64
7284 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7285 #endif /* DEBUG_AARCH64 */
7286 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7287 NULL},
7288 {"mno-verbose-error", N_("do not output verbose error messages"),
7289 &verbose_error_p, 0, NULL},
7290 {NULL, NULL, NULL, 0, NULL}
7291 };
7292
7293 struct aarch64_cpu_option_table
7294 {
7295 char *name;
7296 const aarch64_feature_set value;
7297 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7298 case. */
7299 const char *canonical_name;
7300 };
7301
7302 /* This list should, at a minimum, contain all the cpu names
7303 recognized by GCC. */
7304 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7305 {"all", AARCH64_ANY, NULL},
7306 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7307 AARCH64_FEATURE_CRC), "Cortex-A53"},
7308 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7309 AARCH64_FEATURE_CRC), "Cortex-A57"},
7310 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7311 AARCH64_FEATURE_CRC), "Cortex-A72"},
7312 {"thunderx", AARCH64_ARCH_V8, "Cavium ThunderX"},
7313 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7314 in earlier releases and is superseded by 'xgene1' in all
7315 tools. */
7316 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7317 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7318 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7319 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7320 {"generic", AARCH64_ARCH_V8, NULL},
7321
7322 {NULL, AARCH64_ARCH_NONE, NULL}
7323 };
7324
7325 struct aarch64_arch_option_table
7326 {
7327 char *name;
7328 const aarch64_feature_set value;
7329 };
7330
7331 /* This list should, at a minimum, contain all the architecture names
7332 recognized by GCC. */
7333 static const struct aarch64_arch_option_table aarch64_archs[] = {
7334 {"all", AARCH64_ANY},
7335 {"armv8-a", AARCH64_ARCH_V8},
7336 {NULL, AARCH64_ARCH_NONE}
7337 };
7338
7339 /* ISA extensions. */
7340 struct aarch64_option_cpu_value_table
7341 {
7342 char *name;
7343 const aarch64_feature_set value;
7344 };
7345
7346 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7347 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7348 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7349 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7350 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7351 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7352 {NULL, AARCH64_ARCH_NONE}
7353 };
7354
7355 struct aarch64_long_option_table
7356 {
7357 char *option; /* Substring to match. */
7358 char *help; /* Help information. */
7359 int (*func) (char *subopt); /* Function to decode sub-option. */
7360 char *deprecated; /* If non-null, print this message. */
7361 };
7362
7363 static int
7364 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7365 bfd_boolean ext_only)
7366 {
7367 /* We insist on extensions being added before being removed. We achieve
7368 this by using the ADDING_VALUE variable to indicate whether we are
7369 adding an extension (1) or removing it (0) and only allowing it to
7370 change in the order -1 -> 1 -> 0. */
7371 int adding_value = -1;
7372 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7373
7374 /* Copy the feature set, so that we can modify it. */
7375 *ext_set = **opt_p;
7376 *opt_p = ext_set;
7377
7378 while (str != NULL && *str != 0)
7379 {
7380 const struct aarch64_option_cpu_value_table *opt;
7381 char *ext = NULL;
7382 int optlen;
7383
7384 if (!ext_only)
7385 {
7386 if (*str != '+')
7387 {
7388 as_bad (_("invalid architectural extension"));
7389 return 0;
7390 }
7391
7392 ext = strchr (++str, '+');
7393 }
7394
7395 if (ext != NULL)
7396 optlen = ext - str;
7397 else
7398 optlen = strlen (str);
7399
7400 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7401 {
7402 if (adding_value != 0)
7403 adding_value = 0;
7404 optlen -= 2;
7405 str += 2;
7406 }
7407 else if (optlen > 0)
7408 {
7409 if (adding_value == -1)
7410 adding_value = 1;
7411 else if (adding_value != 1)
7412 {
7413 as_bad (_("must specify extensions to add before specifying "
7414 "those to remove"));
7415 return FALSE;
7416 }
7417 }
7418
7419 if (optlen == 0)
7420 {
7421 as_bad (_("missing architectural extension"));
7422 return 0;
7423 }
7424
7425 gas_assert (adding_value != -1);
7426
7427 for (opt = aarch64_features; opt->name != NULL; opt++)
7428 if (strncmp (opt->name, str, optlen) == 0)
7429 {
7430 /* Add or remove the extension. */
7431 if (adding_value)
7432 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7433 else
7434 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7435 break;
7436 }
7437
7438 if (opt->name == NULL)
7439 {
7440 as_bad (_("unknown architectural extension `%s'"), str);
7441 return 0;
7442 }
7443
7444 str = ext;
7445 };
7446
7447 return 1;
7448 }
7449
7450 static int
7451 aarch64_parse_cpu (char *str)
7452 {
7453 const struct aarch64_cpu_option_table *opt;
7454 char *ext = strchr (str, '+');
7455 size_t optlen;
7456
7457 if (ext != NULL)
7458 optlen = ext - str;
7459 else
7460 optlen = strlen (str);
7461
7462 if (optlen == 0)
7463 {
7464 as_bad (_("missing cpu name `%s'"), str);
7465 return 0;
7466 }
7467
7468 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7469 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7470 {
7471 mcpu_cpu_opt = &opt->value;
7472 if (ext != NULL)
7473 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7474
7475 return 1;
7476 }
7477
7478 as_bad (_("unknown cpu `%s'"), str);
7479 return 0;
7480 }
7481
7482 static int
7483 aarch64_parse_arch (char *str)
7484 {
7485 const struct aarch64_arch_option_table *opt;
7486 char *ext = strchr (str, '+');
7487 size_t optlen;
7488
7489 if (ext != NULL)
7490 optlen = ext - str;
7491 else
7492 optlen = strlen (str);
7493
7494 if (optlen == 0)
7495 {
7496 as_bad (_("missing architecture name `%s'"), str);
7497 return 0;
7498 }
7499
7500 for (opt = aarch64_archs; opt->name != NULL; opt++)
7501 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7502 {
7503 march_cpu_opt = &opt->value;
7504 if (ext != NULL)
7505 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7506
7507 return 1;
7508 }
7509
7510 as_bad (_("unknown architecture `%s'\n"), str);
7511 return 0;
7512 }
7513
7514 /* ABIs. */
7515 struct aarch64_option_abi_value_table
7516 {
7517 char *name;
7518 enum aarch64_abi_type value;
7519 };
7520
7521 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7522 {"ilp32", AARCH64_ABI_ILP32},
7523 {"lp64", AARCH64_ABI_LP64},
7524 {NULL, 0}
7525 };
7526
7527 static int
7528 aarch64_parse_abi (char *str)
7529 {
7530 const struct aarch64_option_abi_value_table *opt;
7531 size_t optlen = strlen (str);
7532
7533 if (optlen == 0)
7534 {
7535 as_bad (_("missing abi name `%s'"), str);
7536 return 0;
7537 }
7538
7539 for (opt = aarch64_abis; opt->name != NULL; opt++)
7540 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7541 {
7542 aarch64_abi = opt->value;
7543 return 1;
7544 }
7545
7546 as_bad (_("unknown abi `%s'\n"), str);
7547 return 0;
7548 }
7549
7550 static struct aarch64_long_option_table aarch64_long_opts[] = {
7551 #ifdef OBJ_ELF
7552 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7553 aarch64_parse_abi, NULL},
7554 #endif /* OBJ_ELF */
7555 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7556 aarch64_parse_cpu, NULL},
7557 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7558 aarch64_parse_arch, NULL},
7559 {NULL, NULL, 0, NULL}
7560 };
7561
7562 int
7563 md_parse_option (int c, char *arg)
7564 {
7565 struct aarch64_option_table *opt;
7566 struct aarch64_long_option_table *lopt;
7567
7568 switch (c)
7569 {
7570 #ifdef OPTION_EB
7571 case OPTION_EB:
7572 target_big_endian = 1;
7573 break;
7574 #endif
7575
7576 #ifdef OPTION_EL
7577 case OPTION_EL:
7578 target_big_endian = 0;
7579 break;
7580 #endif
7581
7582 case 'a':
7583 /* Listing option. Just ignore these, we don't support additional
7584 ones. */
7585 return 0;
7586
7587 default:
7588 for (opt = aarch64_opts; opt->option != NULL; opt++)
7589 {
7590 if (c == opt->option[0]
7591 && ((arg == NULL && opt->option[1] == 0)
7592 || streq (arg, opt->option + 1)))
7593 {
7594 /* If the option is deprecated, tell the user. */
7595 if (opt->deprecated != NULL)
7596 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7597 arg ? arg : "", _(opt->deprecated));
7598
7599 if (opt->var != NULL)
7600 *opt->var = opt->value;
7601
7602 return 1;
7603 }
7604 }
7605
7606 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7607 {
7608 /* These options are expected to have an argument. */
7609 if (c == lopt->option[0]
7610 && arg != NULL
7611 && strncmp (arg, lopt->option + 1,
7612 strlen (lopt->option + 1)) == 0)
7613 {
7614 /* If the option is deprecated, tell the user. */
7615 if (lopt->deprecated != NULL)
7616 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7617 _(lopt->deprecated));
7618
7619 /* Call the sup-option parser. */
7620 return lopt->func (arg + strlen (lopt->option) - 1);
7621 }
7622 }
7623
7624 return 0;
7625 }
7626
7627 return 1;
7628 }
7629
7630 void
7631 md_show_usage (FILE * fp)
7632 {
7633 struct aarch64_option_table *opt;
7634 struct aarch64_long_option_table *lopt;
7635
7636 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7637
7638 for (opt = aarch64_opts; opt->option != NULL; opt++)
7639 if (opt->help != NULL)
7640 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7641
7642 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7643 if (lopt->help != NULL)
7644 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7645
7646 #ifdef OPTION_EB
7647 fprintf (fp, _("\
7648 -EB assemble code for a big-endian cpu\n"));
7649 #endif
7650
7651 #ifdef OPTION_EL
7652 fprintf (fp, _("\
7653 -EL assemble code for a little-endian cpu\n"));
7654 #endif
7655 }
7656
7657 /* Parse a .cpu directive. */
7658
7659 static void
7660 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7661 {
7662 const struct aarch64_cpu_option_table *opt;
7663 char saved_char;
7664 char *name;
7665 char *ext;
7666 size_t optlen;
7667
7668 name = input_line_pointer;
7669 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7670 input_line_pointer++;
7671 saved_char = *input_line_pointer;
7672 *input_line_pointer = 0;
7673
7674 ext = strchr (name, '+');
7675
7676 if (ext != NULL)
7677 optlen = ext - name;
7678 else
7679 optlen = strlen (name);
7680
7681 /* Skip the first "all" entry. */
7682 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7683 if (strlen (opt->name) == optlen
7684 && strncmp (name, opt->name, optlen) == 0)
7685 {
7686 mcpu_cpu_opt = &opt->value;
7687 if (ext != NULL)
7688 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7689 return;
7690
7691 cpu_variant = *mcpu_cpu_opt;
7692
7693 *input_line_pointer = saved_char;
7694 demand_empty_rest_of_line ();
7695 return;
7696 }
7697 as_bad (_("unknown cpu `%s'"), name);
7698 *input_line_pointer = saved_char;
7699 ignore_rest_of_line ();
7700 }
7701
7702
7703 /* Parse a .arch directive. */
7704
7705 static void
7706 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7707 {
7708 const struct aarch64_arch_option_table *opt;
7709 char saved_char;
7710 char *name;
7711 char *ext;
7712 size_t optlen;
7713
7714 name = input_line_pointer;
7715 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7716 input_line_pointer++;
7717 saved_char = *input_line_pointer;
7718 *input_line_pointer = 0;
7719
7720 ext = strchr (name, '+');
7721
7722 if (ext != NULL)
7723 optlen = ext - name;
7724 else
7725 optlen = strlen (name);
7726
7727 /* Skip the first "all" entry. */
7728 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7729 if (strlen (opt->name) == optlen
7730 && strncmp (name, opt->name, optlen) == 0)
7731 {
7732 mcpu_cpu_opt = &opt->value;
7733 if (ext != NULL)
7734 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7735 return;
7736
7737 cpu_variant = *mcpu_cpu_opt;
7738
7739 *input_line_pointer = saved_char;
7740 demand_empty_rest_of_line ();
7741 return;
7742 }
7743
7744 as_bad (_("unknown architecture `%s'\n"), name);
7745 *input_line_pointer = saved_char;
7746 ignore_rest_of_line ();
7747 }
7748
7749 /* Parse a .arch_extension directive. */
7750
7751 static void
7752 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
7753 {
7754 char saved_char;
7755 char *ext = input_line_pointer;;
7756
7757 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7758 input_line_pointer++;
7759 saved_char = *input_line_pointer;
7760 *input_line_pointer = 0;
7761
7762 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
7763 return;
7764
7765 cpu_variant = *mcpu_cpu_opt;
7766
7767 *input_line_pointer = saved_char;
7768 demand_empty_rest_of_line ();
7769 }
7770
7771 /* Copy symbol information. */
7772
7773 void
7774 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7775 {
7776 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7777 }
This page took 0.210933 seconds and 4 git commands to generate.