[AArch64, ILP32] Retire -milp32 and -mlp64; use -mabi=ilp32 and -mabi=lp64.
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright 2009, 2010, 2011, 2012, 2013
4 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GAS.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the license, or
12 (at your option) any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING3. If not,
21 see <http://www.gnu.org/licenses/>. */
22
23 #include "as.h"
24 #include <limits.h>
25 #include <stdarg.h>
26 #include "bfd_stdint.h"
27 #define NO_RELOC 0
28 #include "safe-ctype.h"
29 #include "subsegs.h"
30 #include "obstack.h"
31
32 #ifdef OBJ_ELF
33 #include "elf/aarch64.h"
34 #include "dw2gencfi.h"
35 #endif
36
37 #include "dwarf2dbg.h"
38
39 /* Types of processor to assemble for. */
40 #ifndef CPU_DEFAULT
41 #define CPU_DEFAULT AARCH64_ARCH_V8
42 #endif
43
44 #define streq(a, b) (strcmp (a, b) == 0)
45
46 static aarch64_feature_set cpu_variant;
47
48 /* Variables that we set while parsing command-line options. Once all
49 options have been read we re-process these values to set the real
50 assembly flags. */
51 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
52 static const aarch64_feature_set *march_cpu_opt = NULL;
53
54 /* Constants for known architecture features. */
55 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
56
57 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
58 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
59
60 #ifdef OBJ_ELF
61 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
62 static symbolS *GOT_symbol;
63
64 /* Which ABI to use. */
65 enum aarch64_abi_type
66 {
67 AARCH64_ABI_LP64 = 0,
68 AARCH64_ABI_ILP32 = 1
69 };
70
71 /* AArch64 ABI for the output file. */
72 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
73
74 /* When non-zero, program to a 32-bit model, in which the C data types
75 int, long and all pointer types are 32-bit objects (ILP32); or to a
76 64-bit model, in which the C int type is 32-bits but the C long type
77 and all pointer types are 64-bit objects (LP64). */
78 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
79 #endif
80
81 enum neon_el_type
82 {
83 NT_invtype = -1,
84 NT_b,
85 NT_h,
86 NT_s,
87 NT_d,
88 NT_q
89 };
90
91 /* Bits for DEFINED field in neon_type_el. */
92 #define NTA_HASTYPE 1
93 #define NTA_HASINDEX 2
94
95 struct neon_type_el
96 {
97 enum neon_el_type type;
98 unsigned char defined;
99 unsigned width;
100 int64_t index;
101 };
102
103 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
104
105 struct reloc
106 {
107 bfd_reloc_code_real_type type;
108 expressionS exp;
109 int pc_rel;
110 enum aarch64_opnd opnd;
111 uint32_t flags;
112 unsigned need_libopcodes_p : 1;
113 };
114
115 struct aarch64_instruction
116 {
117 /* libopcodes structure for instruction intermediate representation. */
118 aarch64_inst base;
119 /* Record assembly errors found during the parsing. */
120 struct
121 {
122 enum aarch64_operand_error_kind kind;
123 const char *error;
124 } parsing_error;
125 /* The condition that appears in the assembly line. */
126 int cond;
127 /* Relocation information (including the GAS internal fixup). */
128 struct reloc reloc;
129 /* Need to generate an immediate in the literal pool. */
130 unsigned gen_lit_pool : 1;
131 };
132
133 typedef struct aarch64_instruction aarch64_instruction;
134
135 static aarch64_instruction inst;
136
137 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
138 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
139
140 /* Diagnostics inline function utilites.
141
142 These are lightweight utlities which should only be called by parse_operands
143 and other parsers. GAS processes each assembly line by parsing it against
144 instruction template(s), in the case of multiple templates (for the same
145 mnemonic name), those templates are tried one by one until one succeeds or
146 all fail. An assembly line may fail a few templates before being
147 successfully parsed; an error saved here in most cases is not a user error
148 but an error indicating the current template is not the right template.
149 Therefore it is very important that errors can be saved at a low cost during
150 the parsing; we don't want to slow down the whole parsing by recording
151 non-user errors in detail.
152
153 Remember that the objective is to help GAS pick up the most approapriate
154 error message in the case of multiple templates, e.g. FMOV which has 8
155 templates. */
156
157 static inline void
158 clear_error (void)
159 {
160 inst.parsing_error.kind = AARCH64_OPDE_NIL;
161 inst.parsing_error.error = NULL;
162 }
163
164 static inline bfd_boolean
165 error_p (void)
166 {
167 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
168 }
169
170 static inline const char *
171 get_error_message (void)
172 {
173 return inst.parsing_error.error;
174 }
175
176 static inline void
177 set_error_message (const char *error)
178 {
179 inst.parsing_error.error = error;
180 }
181
182 static inline enum aarch64_operand_error_kind
183 get_error_kind (void)
184 {
185 return inst.parsing_error.kind;
186 }
187
188 static inline void
189 set_error_kind (enum aarch64_operand_error_kind kind)
190 {
191 inst.parsing_error.kind = kind;
192 }
193
194 static inline void
195 set_error (enum aarch64_operand_error_kind kind, const char *error)
196 {
197 inst.parsing_error.kind = kind;
198 inst.parsing_error.error = error;
199 }
200
201 static inline void
202 set_recoverable_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_RECOVERABLE, error);
205 }
206
207 /* Use the DESC field of the corresponding aarch64_operand entry to compose
208 the error message. */
209 static inline void
210 set_default_error (void)
211 {
212 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
213 }
214
215 static inline void
216 set_syntax_error (const char *error)
217 {
218 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
219 }
220
221 static inline void
222 set_first_syntax_error (const char *error)
223 {
224 if (! error_p ())
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_fatal_syntax_error (const char *error)
230 {
231 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
232 }
233 \f
234 /* Number of littlenums required to hold an extended precision number. */
235 #define MAX_LITTLENUMS 6
236
237 /* Return value for certain parsers when the parsing fails; those parsers
238 return the information of the parsed result, e.g. register number, on
239 success. */
240 #define PARSE_FAIL -1
241
242 /* This is an invalid condition code that means no conditional field is
243 present. */
244 #define COND_ALWAYS 0x10
245
246 typedef struct
247 {
248 const char *template;
249 unsigned long value;
250 } asm_barrier_opt;
251
252 typedef struct
253 {
254 const char *template;
255 uint32_t value;
256 } asm_nzcv;
257
258 struct reloc_entry
259 {
260 char *name;
261 bfd_reloc_code_real_type reloc;
262 };
263
264 /* Structure for a hash table entry for a register. */
265 typedef struct
266 {
267 const char *name;
268 unsigned char number;
269 unsigned char type;
270 unsigned char builtin;
271 } reg_entry;
272
273 /* Macros to define the register types and masks for the purpose
274 of parsing. */
275
276 #undef AARCH64_REG_TYPES
277 #define AARCH64_REG_TYPES \
278 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
279 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
280 BASIC_REG_TYPE(SP_32) /* wsp */ \
281 BASIC_REG_TYPE(SP_64) /* sp */ \
282 BASIC_REG_TYPE(Z_32) /* wzr */ \
283 BASIC_REG_TYPE(Z_64) /* xzr */ \
284 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
285 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
286 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
287 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
288 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
289 BASIC_REG_TYPE(CN) /* c[0-7] */ \
290 BASIC_REG_TYPE(VN) /* v[0-31] */ \
291 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
292 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
293 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
294 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
296 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
297 /* Typecheck: any [BHSDQ]P FP. */ \
298 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
299 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
300 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
301 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
303 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
304 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
305 /* Any integer register; used for error messages only. */ \
306 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
307 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
309 /* Pseudo type to mark the end of the enumerator sequence. */ \
310 BASIC_REG_TYPE(MAX)
311
312 #undef BASIC_REG_TYPE
313 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
316
317 /* Register type enumerators. */
318 typedef enum
319 {
320 /* A list of REG_TYPE_*. */
321 AARCH64_REG_TYPES
322 } aarch64_reg_type;
323
324 #undef BASIC_REG_TYPE
325 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
326 #undef REG_TYPE
327 #define REG_TYPE(T) (1 << REG_TYPE_##T)
328 #undef MULTI_REG_TYPE
329 #define MULTI_REG_TYPE(T,V) V,
330
331 /* Values indexed by aarch64_reg_type to assist the type checking. */
332 static const unsigned reg_type_masks[] =
333 {
334 AARCH64_REG_TYPES
335 };
336
337 #undef BASIC_REG_TYPE
338 #undef REG_TYPE
339 #undef MULTI_REG_TYPE
340 #undef AARCH64_REG_TYPES
341
342 /* Diagnostics used when we don't get a register of the expected type.
343 Note: this has to synchronized with aarch64_reg_type definitions
344 above. */
345 static const char *
346 get_reg_expected_msg (aarch64_reg_type reg_type)
347 {
348 const char *msg;
349
350 switch (reg_type)
351 {
352 case REG_TYPE_R_32:
353 msg = N_("integer 32-bit register expected");
354 break;
355 case REG_TYPE_R_64:
356 msg = N_("integer 64-bit register expected");
357 break;
358 case REG_TYPE_R_N:
359 msg = N_("integer register expected");
360 break;
361 case REG_TYPE_R_Z_SP:
362 msg = N_("integer, zero or SP register expected");
363 break;
364 case REG_TYPE_FP_B:
365 msg = N_("8-bit SIMD scalar register expected");
366 break;
367 case REG_TYPE_FP_H:
368 msg = N_("16-bit SIMD scalar or floating-point half precision "
369 "register expected");
370 break;
371 case REG_TYPE_FP_S:
372 msg = N_("32-bit SIMD scalar or floating-point single precision "
373 "register expected");
374 break;
375 case REG_TYPE_FP_D:
376 msg = N_("64-bit SIMD scalar or floating-point double precision "
377 "register expected");
378 break;
379 case REG_TYPE_FP_Q:
380 msg = N_("128-bit SIMD scalar or floating-point quad precision "
381 "register expected");
382 break;
383 case REG_TYPE_CN:
384 msg = N_("C0 - C15 expected");
385 break;
386 case REG_TYPE_R_Z_BHSDQ_V:
387 msg = N_("register expected");
388 break;
389 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
390 msg = N_("SIMD scalar or floating-point register expected");
391 break;
392 case REG_TYPE_VN: /* any V reg */
393 msg = N_("vector register expected");
394 break;
395 default:
396 as_fatal (_("invalid register type %d"), reg_type);
397 }
398 return msg;
399 }
400
401 /* Some well known registers that we refer to directly elsewhere. */
402 #define REG_SP 31
403
404 /* Instructions take 4 bytes in the object file. */
405 #define INSN_SIZE 4
406
407 /* Define some common error messages. */
408 #define BAD_SP _("SP not allowed here")
409
410 static struct hash_control *aarch64_ops_hsh;
411 static struct hash_control *aarch64_cond_hsh;
412 static struct hash_control *aarch64_shift_hsh;
413 static struct hash_control *aarch64_sys_regs_hsh;
414 static struct hash_control *aarch64_pstatefield_hsh;
415 static struct hash_control *aarch64_sys_regs_ic_hsh;
416 static struct hash_control *aarch64_sys_regs_dc_hsh;
417 static struct hash_control *aarch64_sys_regs_at_hsh;
418 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
419 static struct hash_control *aarch64_reg_hsh;
420 static struct hash_control *aarch64_barrier_opt_hsh;
421 static struct hash_control *aarch64_nzcv_hsh;
422 static struct hash_control *aarch64_pldop_hsh;
423
424 /* Stuff needed to resolve the label ambiguity
425 As:
426 ...
427 label: <insn>
428 may differ from:
429 ...
430 label:
431 <insn> */
432
433 static symbolS *last_label_seen;
434
435 /* Literal pool structure. Held on a per-section
436 and per-sub-section basis. */
437
438 #define MAX_LITERAL_POOL_SIZE 1024
439 typedef struct literal_pool
440 {
441 expressionS literals[MAX_LITERAL_POOL_SIZE];
442 unsigned int next_free_entry;
443 unsigned int id;
444 symbolS *symbol;
445 segT section;
446 subsegT sub_section;
447 int size;
448 struct literal_pool *next;
449 } literal_pool;
450
451 /* Pointer to a linked list of literal pools. */
452 static literal_pool *list_of_pools = NULL;
453 \f
454 /* Pure syntax. */
455
456 /* This array holds the chars that always start a comment. If the
457 pre-processor is disabled, these aren't very useful. */
458 const char comment_chars[] = "";
459
460 /* This array holds the chars that only start a comment at the beginning of
461 a line. If the line seems to have the form '# 123 filename'
462 .line and .file directives will appear in the pre-processed output. */
463 /* Note that input_file.c hand checks for '#' at the beginning of the
464 first line of the input file. This is because the compiler outputs
465 #NO_APP at the beginning of its output. */
466 /* Also note that comments like this one will always work. */
467 const char line_comment_chars[] = "#";
468
469 const char line_separator_chars[] = ";";
470
471 /* Chars that can be used to separate mant
472 from exp in floating point numbers. */
473 const char EXP_CHARS[] = "eE";
474
475 /* Chars that mean this number is a floating point constant. */
476 /* As in 0f12.456 */
477 /* or 0d1.2345e12 */
478
479 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
480
481 /* Prefix character that indicates the start of an immediate value. */
482 #define is_immediate_prefix(C) ((C) == '#')
483
484 /* Separator character handling. */
485
486 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
487
488 static inline bfd_boolean
489 skip_past_char (char **str, char c)
490 {
491 if (**str == c)
492 {
493 (*str)++;
494 return TRUE;
495 }
496 else
497 return FALSE;
498 }
499
500 #define skip_past_comma(str) skip_past_char (str, ',')
501
502 /* Arithmetic expressions (possibly involving symbols). */
503
504 static bfd_boolean in_my_get_expression_p = FALSE;
505
506 /* Third argument to my_get_expression. */
507 #define GE_NO_PREFIX 0
508 #define GE_OPT_PREFIX 1
509
510 /* Return TRUE if the string pointed by *STR is successfully parsed
511 as an valid expression; *EP will be filled with the information of
512 such an expression. Otherwise return FALSE. */
513
514 static bfd_boolean
515 my_get_expression (expressionS * ep, char **str, int prefix_mode,
516 int reject_absent)
517 {
518 char *save_in;
519 segT seg;
520 int prefix_present_p = 0;
521
522 switch (prefix_mode)
523 {
524 case GE_NO_PREFIX:
525 break;
526 case GE_OPT_PREFIX:
527 if (is_immediate_prefix (**str))
528 {
529 (*str)++;
530 prefix_present_p = 1;
531 }
532 break;
533 default:
534 abort ();
535 }
536
537 memset (ep, 0, sizeof (expressionS));
538
539 save_in = input_line_pointer;
540 input_line_pointer = *str;
541 in_my_get_expression_p = TRUE;
542 seg = expression (ep);
543 in_my_get_expression_p = FALSE;
544
545 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
546 {
547 /* We found a bad expression in md_operand(). */
548 *str = input_line_pointer;
549 input_line_pointer = save_in;
550 if (prefix_present_p && ! error_p ())
551 set_fatal_syntax_error (_("bad expression"));
552 else
553 set_first_syntax_error (_("bad expression"));
554 return FALSE;
555 }
556
557 #ifdef OBJ_AOUT
558 if (seg != absolute_section
559 && seg != text_section
560 && seg != data_section
561 && seg != bss_section && seg != undefined_section)
562 {
563 set_syntax_error (_("bad segment"));
564 *str = input_line_pointer;
565 input_line_pointer = save_in;
566 return FALSE;
567 }
568 #else
569 (void) seg;
570 #endif
571
572 *str = input_line_pointer;
573 input_line_pointer = save_in;
574 return TRUE;
575 }
576
577 /* Turn a string in input_line_pointer into a floating point constant
578 of type TYPE, and store the appropriate bytes in *LITP. The number
579 of LITTLENUMS emitted is stored in *SIZEP. An error message is
580 returned, or NULL on OK. */
581
582 char *
583 md_atof (int type, char *litP, int *sizeP)
584 {
585 return ieee_md_atof (type, litP, sizeP, target_big_endian);
586 }
587
588 /* We handle all bad expressions here, so that we can report the faulty
589 instruction in the error message. */
590 void
591 md_operand (expressionS * exp)
592 {
593 if (in_my_get_expression_p)
594 exp->X_op = O_illegal;
595 }
596
597 /* Immediate values. */
598
599 /* Errors may be set multiple times during parsing or bit encoding
600 (particularly in the Neon bits), but usually the earliest error which is set
601 will be the most meaningful. Avoid overwriting it with later (cascading)
602 errors by calling this function. */
603
604 static void
605 first_error (const char *error)
606 {
607 if (! error_p ())
608 set_syntax_error (error);
609 }
610
611 /* Similiar to first_error, but this function accepts formatted error
612 message. */
613 static void
614 first_error_fmt (const char *format, ...)
615 {
616 va_list args;
617 enum
618 { size = 100 };
619 /* N.B. this single buffer will not cause error messages for different
620 instructions to pollute each other; this is because at the end of
621 processing of each assembly line, error message if any will be
622 collected by as_bad. */
623 static char buffer[size];
624
625 if (! error_p ())
626 {
627 int ret ATTRIBUTE_UNUSED;
628 va_start (args, format);
629 ret = vsnprintf (buffer, size, format, args);
630 know (ret <= size - 1 && ret >= 0);
631 va_end (args);
632 set_syntax_error (buffer);
633 }
634 }
635
636 /* Register parsing. */
637
638 /* Generic register parser which is called by other specialized
639 register parsers.
640 CCP points to what should be the beginning of a register name.
641 If it is indeed a valid register name, advance CCP over it and
642 return the reg_entry structure; otherwise return NULL.
643 It does not issue diagnostics. */
644
645 static reg_entry *
646 parse_reg (char **ccp)
647 {
648 char *start = *ccp;
649 char *p;
650 reg_entry *reg;
651
652 #ifdef REGISTER_PREFIX
653 if (*start != REGISTER_PREFIX)
654 return NULL;
655 start++;
656 #endif
657
658 p = start;
659 if (!ISALPHA (*p) || !is_name_beginner (*p))
660 return NULL;
661
662 do
663 p++;
664 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
665
666 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
667
668 if (!reg)
669 return NULL;
670
671 *ccp = p;
672 return reg;
673 }
674
675 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
676 return FALSE. */
677 static bfd_boolean
678 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
679 {
680 if (reg->type == type)
681 return TRUE;
682
683 switch (type)
684 {
685 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
686 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
687 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
688 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
689 case REG_TYPE_VN: /* Vector register. */
690 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
691 return ((reg_type_masks[reg->type] & reg_type_masks[type])
692 == reg_type_masks[reg->type]);
693 default:
694 as_fatal ("unhandled type %d", type);
695 abort ();
696 }
697 }
698
699 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
700 Return the register number otherwise. *ISREG32 is set to one if the
701 register is 32-bit wide; *ISREGZERO is set to one if the register is
702 of type Z_32 or Z_64.
703 Note that this function does not issue any diagnostics. */
704
705 static int
706 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
707 int *isreg32, int *isregzero)
708 {
709 char *str = *ccp;
710 const reg_entry *reg = parse_reg (&str);
711
712 if (reg == NULL)
713 return PARSE_FAIL;
714
715 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
716 return PARSE_FAIL;
717
718 switch (reg->type)
719 {
720 case REG_TYPE_SP_32:
721 case REG_TYPE_SP_64:
722 if (reject_sp)
723 return PARSE_FAIL;
724 *isreg32 = reg->type == REG_TYPE_SP_32;
725 *isregzero = 0;
726 break;
727 case REG_TYPE_R_32:
728 case REG_TYPE_R_64:
729 *isreg32 = reg->type == REG_TYPE_R_32;
730 *isregzero = 0;
731 break;
732 case REG_TYPE_Z_32:
733 case REG_TYPE_Z_64:
734 if (reject_rz)
735 return PARSE_FAIL;
736 *isreg32 = reg->type == REG_TYPE_Z_32;
737 *isregzero = 1;
738 break;
739 default:
740 return PARSE_FAIL;
741 }
742
743 *ccp = str;
744
745 return reg->number;
746 }
747
748 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
749 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
750 otherwise return FALSE.
751
752 Accept only one occurrence of:
753 8b 16b 4h 8h 2s 4s 1d 2d
754 b h s d q */
755 static bfd_boolean
756 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
757 {
758 char *ptr = *str;
759 unsigned width;
760 unsigned element_size;
761 enum neon_el_type type;
762
763 /* skip '.' */
764 ptr++;
765
766 if (!ISDIGIT (*ptr))
767 {
768 width = 0;
769 goto elt_size;
770 }
771 width = strtoul (ptr, &ptr, 10);
772 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
773 {
774 first_error_fmt (_("bad size %d in vector width specifier"), width);
775 return FALSE;
776 }
777
778 elt_size:
779 switch (TOLOWER (*ptr))
780 {
781 case 'b':
782 type = NT_b;
783 element_size = 8;
784 break;
785 case 'h':
786 type = NT_h;
787 element_size = 16;
788 break;
789 case 's':
790 type = NT_s;
791 element_size = 32;
792 break;
793 case 'd':
794 type = NT_d;
795 element_size = 64;
796 break;
797 case 'q':
798 if (width == 1)
799 {
800 type = NT_q;
801 element_size = 128;
802 break;
803 }
804 /* fall through. */
805 default:
806 if (*ptr != '\0')
807 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
808 else
809 first_error (_("missing element size"));
810 return FALSE;
811 }
812 if (width != 0 && width * element_size != 64 && width * element_size != 128)
813 {
814 first_error_fmt (_
815 ("invalid element size %d and vector size combination %c"),
816 width, *ptr);
817 return FALSE;
818 }
819 ptr++;
820
821 parsed_type->type = type;
822 parsed_type->width = width;
823
824 *str = ptr;
825
826 return TRUE;
827 }
828
829 /* Parse a single type, e.g. ".8b", leading period included.
830 Only applicable to Vn registers.
831
832 Return TRUE on success; otherwise return FALSE. */
833 static bfd_boolean
834 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
835 {
836 char *str = *ccp;
837
838 if (*str == '.')
839 {
840 if (! parse_neon_type_for_operand (vectype, &str))
841 {
842 first_error (_("vector type expected"));
843 return FALSE;
844 }
845 }
846 else
847 return FALSE;
848
849 *ccp = str;
850
851 return TRUE;
852 }
853
854 /* Parse a register of the type TYPE.
855
856 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
857 name or the parsed register is not of TYPE.
858
859 Otherwise return the register number, and optionally fill in the actual
860 type of the register in *RTYPE when multiple alternatives were given, and
861 return the register shape and element index information in *TYPEINFO.
862
863 IN_REG_LIST should be set with TRUE if the caller is parsing a register
864 list. */
865
866 static int
867 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
868 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
869 {
870 char *str = *ccp;
871 const reg_entry *reg = parse_reg (&str);
872 struct neon_type_el atype;
873 struct neon_type_el parsetype;
874 bfd_boolean is_typed_vecreg = FALSE;
875
876 atype.defined = 0;
877 atype.type = NT_invtype;
878 atype.width = -1;
879 atype.index = 0;
880
881 if (reg == NULL)
882 {
883 if (typeinfo)
884 *typeinfo = atype;
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888
889 if (! aarch64_check_reg_type (reg, type))
890 {
891 DEBUG_TRACE ("reg type check failed");
892 set_default_error ();
893 return PARSE_FAIL;
894 }
895 type = reg->type;
896
897 if (type == REG_TYPE_VN
898 && parse_neon_operand_type (&parsetype, &str))
899 {
900 /* Register if of the form Vn.[bhsdq]. */
901 is_typed_vecreg = TRUE;
902
903 if (parsetype.width == 0)
904 /* Expect index. In the new scheme we cannot have
905 Vn.[bhsdq] represent a scalar. Therefore any
906 Vn.[bhsdq] should have an index following it.
907 Except in reglists ofcourse. */
908 atype.defined |= NTA_HASINDEX;
909 else
910 atype.defined |= NTA_HASTYPE;
911
912 atype.type = parsetype.type;
913 atype.width = parsetype.width;
914 }
915
916 if (skip_past_char (&str, '['))
917 {
918 expressionS exp;
919
920 /* Reject Sn[index] syntax. */
921 if (!is_typed_vecreg)
922 {
923 first_error (_("this type of register can't be indexed"));
924 return PARSE_FAIL;
925 }
926
927 if (in_reg_list == TRUE)
928 {
929 first_error (_("index not allowed inside register list"));
930 return PARSE_FAIL;
931 }
932
933 atype.defined |= NTA_HASINDEX;
934
935 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
936
937 if (exp.X_op != O_constant)
938 {
939 first_error (_("constant expression required"));
940 return PARSE_FAIL;
941 }
942
943 if (! skip_past_char (&str, ']'))
944 return PARSE_FAIL;
945
946 atype.index = exp.X_add_number;
947 }
948 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
949 {
950 /* Indexed vector register expected. */
951 first_error (_("indexed vector register expected"));
952 return PARSE_FAIL;
953 }
954
955 /* A vector reg Vn should be typed or indexed. */
956 if (type == REG_TYPE_VN && atype.defined == 0)
957 {
958 first_error (_("invalid use of vector register"));
959 }
960
961 if (typeinfo)
962 *typeinfo = atype;
963
964 if (rtype)
965 *rtype = type;
966
967 *ccp = str;
968
969 return reg->number;
970 }
971
972 /* Parse register.
973
974 Return the register number on success; return PARSE_FAIL otherwise.
975
976 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
977 the register (e.g. NEON double or quad reg when either has been requested).
978
979 If this is a NEON vector register with additional type information, fill
980 in the struct pointed to by VECTYPE (if non-NULL).
981
982 This parser does not handle register list. */
983
984 static int
985 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
986 aarch64_reg_type *rtype, struct neon_type_el *vectype)
987 {
988 struct neon_type_el atype;
989 char *str = *ccp;
990 int reg = parse_typed_reg (&str, type, rtype, &atype,
991 /*in_reg_list= */ FALSE);
992
993 if (reg == PARSE_FAIL)
994 return PARSE_FAIL;
995
996 if (vectype)
997 *vectype = atype;
998
999 *ccp = str;
1000
1001 return reg;
1002 }
1003
1004 static inline bfd_boolean
1005 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
1006 {
1007 return
1008 e1.type == e2.type
1009 && e1.defined == e2.defined
1010 && e1.width == e2.width && e1.index == e2.index;
1011 }
1012
1013 /* This function parses the NEON register list. On success, it returns
1014 the parsed register list information in the following encoded format:
1015
1016 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1017 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1018
1019 The information of the register shape and/or index is returned in
1020 *VECTYPE.
1021
1022 It returns PARSE_FAIL if the register list is invalid.
1023
1024 The list contains one to four registers.
1025 Each register can be one of:
1026 <Vt>.<T>[<index>]
1027 <Vt>.<T>
1028 All <T> should be identical.
1029 All <index> should be identical.
1030 There are restrictions on <Vt> numbers which are checked later
1031 (by reg_list_valid_p). */
1032
1033 static int
1034 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1035 {
1036 char *str = *ccp;
1037 int nb_regs;
1038 struct neon_type_el typeinfo, typeinfo_first;
1039 int val, val_range;
1040 int in_range;
1041 int ret_val;
1042 int i;
1043 bfd_boolean error = FALSE;
1044 bfd_boolean expect_index = FALSE;
1045
1046 if (*str != '{')
1047 {
1048 set_syntax_error (_("expecting {"));
1049 return PARSE_FAIL;
1050 }
1051 str++;
1052
1053 nb_regs = 0;
1054 typeinfo_first.defined = 0;
1055 typeinfo_first.type = NT_invtype;
1056 typeinfo_first.width = -1;
1057 typeinfo_first.index = 0;
1058 ret_val = 0;
1059 val = -1;
1060 val_range = -1;
1061 in_range = 0;
1062 do
1063 {
1064 if (in_range)
1065 {
1066 str++; /* skip over '-' */
1067 val_range = val;
1068 }
1069 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1070 /*in_reg_list= */ TRUE);
1071 if (val == PARSE_FAIL)
1072 {
1073 set_first_syntax_error (_("invalid vector register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077 /* reject [bhsd]n */
1078 if (typeinfo.defined == 0)
1079 {
1080 set_first_syntax_error (_("invalid scalar register in list"));
1081 error = TRUE;
1082 continue;
1083 }
1084
1085 if (typeinfo.defined & NTA_HASINDEX)
1086 expect_index = TRUE;
1087
1088 if (in_range)
1089 {
1090 if (val < val_range)
1091 {
1092 set_first_syntax_error
1093 (_("invalid range in vector register list"));
1094 error = TRUE;
1095 }
1096 val_range++;
1097 }
1098 else
1099 {
1100 val_range = val;
1101 if (nb_regs == 0)
1102 typeinfo_first = typeinfo;
1103 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1104 {
1105 set_first_syntax_error
1106 (_("type mismatch in vector register list"));
1107 error = TRUE;
1108 }
1109 }
1110 if (! error)
1111 for (i = val_range; i <= val; i++)
1112 {
1113 ret_val |= i << (5 * nb_regs);
1114 nb_regs++;
1115 }
1116 in_range = 0;
1117 }
1118 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1119
1120 skip_whitespace (str);
1121 if (*str != '}')
1122 {
1123 set_first_syntax_error (_("end of vector register list not found"));
1124 error = TRUE;
1125 }
1126 str++;
1127
1128 skip_whitespace (str);
1129
1130 if (expect_index)
1131 {
1132 if (skip_past_char (&str, '['))
1133 {
1134 expressionS exp;
1135
1136 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1137 if (exp.X_op != O_constant)
1138 {
1139 set_first_syntax_error (_("constant expression required."));
1140 error = TRUE;
1141 }
1142 if (! skip_past_char (&str, ']'))
1143 error = TRUE;
1144 else
1145 typeinfo_first.index = exp.X_add_number;
1146 }
1147 else
1148 {
1149 set_first_syntax_error (_("expected index"));
1150 error = TRUE;
1151 }
1152 }
1153
1154 if (nb_regs > 4)
1155 {
1156 set_first_syntax_error (_("too many registers in vector register list"));
1157 error = TRUE;
1158 }
1159 else if (nb_regs == 0)
1160 {
1161 set_first_syntax_error (_("empty vector register list"));
1162 error = TRUE;
1163 }
1164
1165 *ccp = str;
1166 if (! error)
1167 *vectype = typeinfo_first;
1168
1169 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1170 }
1171
1172 /* Directives: register aliases. */
1173
1174 static reg_entry *
1175 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1176 {
1177 reg_entry *new;
1178 const char *name;
1179
1180 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1181 {
1182 if (new->builtin)
1183 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1184 str);
1185
1186 /* Only warn about a redefinition if it's not defined as the
1187 same register. */
1188 else if (new->number != number || new->type != type)
1189 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1190
1191 return NULL;
1192 }
1193
1194 name = xstrdup (str);
1195 new = xmalloc (sizeof (reg_entry));
1196
1197 new->name = name;
1198 new->number = number;
1199 new->type = type;
1200 new->builtin = FALSE;
1201
1202 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1203 abort ();
1204
1205 return new;
1206 }
1207
1208 /* Look for the .req directive. This is of the form:
1209
1210 new_register_name .req existing_register_name
1211
1212 If we find one, or if it looks sufficiently like one that we want to
1213 handle any error here, return TRUE. Otherwise return FALSE. */
1214
1215 static bfd_boolean
1216 create_register_alias (char *newname, char *p)
1217 {
1218 const reg_entry *old;
1219 char *oldname, *nbuf;
1220 size_t nlen;
1221
1222 /* The input scrubber ensures that whitespace after the mnemonic is
1223 collapsed to single spaces. */
1224 oldname = p;
1225 if (strncmp (oldname, " .req ", 6) != 0)
1226 return FALSE;
1227
1228 oldname += 6;
1229 if (*oldname == '\0')
1230 return FALSE;
1231
1232 old = hash_find (aarch64_reg_hsh, oldname);
1233 if (!old)
1234 {
1235 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1236 return TRUE;
1237 }
1238
1239 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1240 the desired alias name, and p points to its end. If not, then
1241 the desired alias name is in the global original_case_string. */
1242 #ifdef TC_CASE_SENSITIVE
1243 nlen = p - newname;
1244 #else
1245 newname = original_case_string;
1246 nlen = strlen (newname);
1247 #endif
1248
1249 nbuf = alloca (nlen + 1);
1250 memcpy (nbuf, newname, nlen);
1251 nbuf[nlen] = '\0';
1252
1253 /* Create aliases under the new name as stated; an all-lowercase
1254 version of the new name; and an all-uppercase version of the new
1255 name. */
1256 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1257 {
1258 for (p = nbuf; *p; p++)
1259 *p = TOUPPER (*p);
1260
1261 if (strncmp (nbuf, newname, nlen))
1262 {
1263 /* If this attempt to create an additional alias fails, do not bother
1264 trying to create the all-lower case alias. We will fail and issue
1265 a second, duplicate error message. This situation arises when the
1266 programmer does something like:
1267 foo .req r0
1268 Foo .req r1
1269 The second .req creates the "Foo" alias but then fails to create
1270 the artificial FOO alias because it has already been created by the
1271 first .req. */
1272 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1273 return TRUE;
1274 }
1275
1276 for (p = nbuf; *p; p++)
1277 *p = TOLOWER (*p);
1278
1279 if (strncmp (nbuf, newname, nlen))
1280 insert_reg_alias (nbuf, old->number, old->type);
1281 }
1282
1283 return TRUE;
1284 }
1285
1286 /* Should never be called, as .req goes between the alias and the
1287 register name, not at the beginning of the line. */
1288 static void
1289 s_req (int a ATTRIBUTE_UNUSED)
1290 {
1291 as_bad (_("invalid syntax for .req directive"));
1292 }
1293
1294 /* The .unreq directive deletes an alias which was previously defined
1295 by .req. For example:
1296
1297 my_alias .req r11
1298 .unreq my_alias */
1299
1300 static void
1301 s_unreq (int a ATTRIBUTE_UNUSED)
1302 {
1303 char *name;
1304 char saved_char;
1305
1306 name = input_line_pointer;
1307
1308 while (*input_line_pointer != 0
1309 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1310 ++input_line_pointer;
1311
1312 saved_char = *input_line_pointer;
1313 *input_line_pointer = 0;
1314
1315 if (!*name)
1316 as_bad (_("invalid syntax for .unreq directive"));
1317 else
1318 {
1319 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1320
1321 if (!reg)
1322 as_bad (_("unknown register alias '%s'"), name);
1323 else if (reg->builtin)
1324 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1325 name);
1326 else
1327 {
1328 char *p;
1329 char *nbuf;
1330
1331 hash_delete (aarch64_reg_hsh, name, FALSE);
1332 free ((char *) reg->name);
1333 free (reg);
1334
1335 /* Also locate the all upper case and all lower case versions.
1336 Do not complain if we cannot find one or the other as it
1337 was probably deleted above. */
1338
1339 nbuf = strdup (name);
1340 for (p = nbuf; *p; p++)
1341 *p = TOUPPER (*p);
1342 reg = hash_find (aarch64_reg_hsh, nbuf);
1343 if (reg)
1344 {
1345 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1346 free ((char *) reg->name);
1347 free (reg);
1348 }
1349
1350 for (p = nbuf; *p; p++)
1351 *p = TOLOWER (*p);
1352 reg = hash_find (aarch64_reg_hsh, nbuf);
1353 if (reg)
1354 {
1355 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1356 free ((char *) reg->name);
1357 free (reg);
1358 }
1359
1360 free (nbuf);
1361 }
1362 }
1363
1364 *input_line_pointer = saved_char;
1365 demand_empty_rest_of_line ();
1366 }
1367
1368 /* Directives: Instruction set selection. */
1369
1370 #ifdef OBJ_ELF
1371 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1372 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1373 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1374 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1375
1376 /* Create a new mapping symbol for the transition to STATE. */
1377
1378 static void
1379 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1380 {
1381 symbolS *symbolP;
1382 const char *symname;
1383 int type;
1384
1385 switch (state)
1386 {
1387 case MAP_DATA:
1388 symname = "$d";
1389 type = BSF_NO_FLAGS;
1390 break;
1391 case MAP_INSN:
1392 symname = "$x";
1393 type = BSF_NO_FLAGS;
1394 break;
1395 default:
1396 abort ();
1397 }
1398
1399 symbolP = symbol_new (symname, now_seg, value, frag);
1400 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1401
1402 /* Save the mapping symbols for future reference. Also check that
1403 we do not place two mapping symbols at the same offset within a
1404 frag. We'll handle overlap between frags in
1405 check_mapping_symbols.
1406
1407 If .fill or other data filling directive generates zero sized data,
1408 the mapping symbol for the following code will have the same value
1409 as the one generated for the data filling directive. In this case,
1410 we replace the old symbol with the new one at the same address. */
1411 if (value == 0)
1412 {
1413 if (frag->tc_frag_data.first_map != NULL)
1414 {
1415 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1416 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1417 &symbol_lastP);
1418 }
1419 frag->tc_frag_data.first_map = symbolP;
1420 }
1421 if (frag->tc_frag_data.last_map != NULL)
1422 {
1423 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1424 S_GET_VALUE (symbolP));
1425 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1426 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1427 &symbol_lastP);
1428 }
1429 frag->tc_frag_data.last_map = symbolP;
1430 }
1431
1432 /* We must sometimes convert a region marked as code to data during
1433 code alignment, if an odd number of bytes have to be padded. The
1434 code mapping symbol is pushed to an aligned address. */
1435
1436 static void
1437 insert_data_mapping_symbol (enum mstate state,
1438 valueT value, fragS * frag, offsetT bytes)
1439 {
1440 /* If there was already a mapping symbol, remove it. */
1441 if (frag->tc_frag_data.last_map != NULL
1442 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1443 frag->fr_address + value)
1444 {
1445 symbolS *symp = frag->tc_frag_data.last_map;
1446
1447 if (value == 0)
1448 {
1449 know (frag->tc_frag_data.first_map == symp);
1450 frag->tc_frag_data.first_map = NULL;
1451 }
1452 frag->tc_frag_data.last_map = NULL;
1453 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1454 }
1455
1456 make_mapping_symbol (MAP_DATA, value, frag);
1457 make_mapping_symbol (state, value + bytes, frag);
1458 }
1459
1460 static void mapping_state_2 (enum mstate state, int max_chars);
1461
1462 /* Set the mapping state to STATE. Only call this when about to
1463 emit some STATE bytes to the file. */
1464
1465 void
1466 mapping_state (enum mstate state)
1467 {
1468 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1469
1470 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1471
1472 if (mapstate == state)
1473 /* The mapping symbol has already been emitted.
1474 There is nothing else to do. */
1475 return;
1476 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1477 /* This case will be evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492
1493 mapping_state_2 (state, 0);
1494 #undef TRANSITION
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 if ((pool->literals[entry].X_op == exp->X_op)
1621 && (exp->X_op == O_constant)
1622 && (pool->literals[entry].X_add_number == exp->X_add_number)
1623 && (pool->literals[entry].X_unsigned == exp->X_unsigned))
1624 break;
1625
1626 if ((pool->literals[entry].X_op == exp->X_op)
1627 && (exp->X_op == O_symbol)
1628 && (pool->literals[entry].X_add_number == exp->X_add_number)
1629 && (pool->literals[entry].X_add_symbol == exp->X_add_symbol)
1630 && (pool->literals[entry].X_op_symbol == exp->X_op_symbol))
1631 break;
1632 }
1633
1634 /* Do we need to create a new entry? */
1635 if (entry == pool->next_free_entry)
1636 {
1637 if (entry >= MAX_LITERAL_POOL_SIZE)
1638 {
1639 set_syntax_error (_("literal pool overflow"));
1640 return FALSE;
1641 }
1642
1643 pool->literals[entry] = *exp;
1644 pool->next_free_entry += 1;
1645 }
1646
1647 exp->X_op = O_symbol;
1648 exp->X_add_number = ((int) entry) * size;
1649 exp->X_add_symbol = pool->symbol;
1650
1651 return TRUE;
1652 }
1653
1654 /* Can't use symbol_new here, so have to create a symbol and then at
1655 a later date assign it a value. Thats what these functions do. */
1656
1657 static void
1658 symbol_locate (symbolS * symbolP,
1659 const char *name,/* It is copied, the caller can modify. */
1660 segT segment, /* Segment identifier (SEG_<something>). */
1661 valueT valu, /* Symbol value. */
1662 fragS * frag) /* Associated fragment. */
1663 {
1664 unsigned int name_length;
1665 char *preserved_copy_of_name;
1666
1667 name_length = strlen (name) + 1; /* +1 for \0. */
1668 obstack_grow (&notes, name, name_length);
1669 preserved_copy_of_name = obstack_finish (&notes);
1670
1671 #ifdef tc_canonicalize_symbol_name
1672 preserved_copy_of_name =
1673 tc_canonicalize_symbol_name (preserved_copy_of_name);
1674 #endif
1675
1676 S_SET_NAME (symbolP, preserved_copy_of_name);
1677
1678 S_SET_SEGMENT (symbolP, segment);
1679 S_SET_VALUE (symbolP, valu);
1680 symbol_clear_list_pointers (symbolP);
1681
1682 symbol_set_frag (symbolP, frag);
1683
1684 /* Link to end of symbol chain. */
1685 {
1686 extern int symbol_table_frozen;
1687
1688 if (symbol_table_frozen)
1689 abort ();
1690 }
1691
1692 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1693
1694 obj_symbol_new_hook (symbolP);
1695
1696 #ifdef tc_symbol_new_hook
1697 tc_symbol_new_hook (symbolP);
1698 #endif
1699
1700 #ifdef DEBUG_SYMS
1701 verify_symbol_chain (symbol_rootP, symbol_lastP);
1702 #endif /* DEBUG_SYMS */
1703 }
1704
1705
1706 static void
1707 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1708 {
1709 unsigned int entry;
1710 literal_pool *pool;
1711 char sym_name[20];
1712 int align;
1713
1714 for (align = 2; align <= 4; align++)
1715 {
1716 int size = 1 << align;
1717
1718 pool = find_literal_pool (size);
1719 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1720 continue;
1721
1722 mapping_state (MAP_DATA);
1723
1724 /* Align pool as you have word accesses.
1725 Only make a frag if we have to. */
1726 if (!need_pass_2)
1727 frag_align (align, 0, 0);
1728
1729 record_alignment (now_seg, align);
1730
1731 sprintf (sym_name, "$$lit_\002%x", pool->id);
1732
1733 symbol_locate (pool->symbol, sym_name, now_seg,
1734 (valueT) frag_now_fix (), frag_now);
1735 symbol_table_insert (pool->symbol);
1736
1737 for (entry = 0; entry < pool->next_free_entry; entry++)
1738 /* First output the expression in the instruction to the pool. */
1739 emit_expr (&(pool->literals[entry]), size); /* .word|.xword */
1740
1741 /* Mark the pool as empty. */
1742 pool->next_free_entry = 0;
1743 pool->symbol = NULL;
1744 }
1745 }
1746
1747 #ifdef OBJ_ELF
1748 /* Forward declarations for functions below, in the MD interface
1749 section. */
1750 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1751 static struct reloc_table_entry * find_reloc_table_entry (char **);
1752
1753 /* Directives: Data. */
1754 /* N.B. the support for relocation suffix in this directive needs to be
1755 implemented properly. */
1756
1757 static void
1758 s_aarch64_elf_cons (int nbytes)
1759 {
1760 expressionS exp;
1761
1762 #ifdef md_flush_pending_output
1763 md_flush_pending_output ();
1764 #endif
1765
1766 if (is_it_end_of_statement ())
1767 {
1768 demand_empty_rest_of_line ();
1769 return;
1770 }
1771
1772 #ifdef md_cons_align
1773 md_cons_align (nbytes);
1774 #endif
1775
1776 mapping_state (MAP_DATA);
1777 do
1778 {
1779 struct reloc_table_entry *reloc;
1780
1781 expression (&exp);
1782
1783 if (exp.X_op != O_symbol)
1784 emit_expr (&exp, (unsigned int) nbytes);
1785 else
1786 {
1787 skip_past_char (&input_line_pointer, '#');
1788 if (skip_past_char (&input_line_pointer, ':'))
1789 {
1790 reloc = find_reloc_table_entry (&input_line_pointer);
1791 if (reloc == NULL)
1792 as_bad (_("unrecognized relocation suffix"));
1793 else
1794 as_bad (_("unimplemented relocation suffix"));
1795 ignore_rest_of_line ();
1796 return;
1797 }
1798 else
1799 emit_expr (&exp, (unsigned int) nbytes);
1800 }
1801 }
1802 while (*input_line_pointer++ == ',');
1803
1804 /* Put terminator back into stream. */
1805 input_line_pointer--;
1806 demand_empty_rest_of_line ();
1807 }
1808
1809 #endif /* OBJ_ELF */
1810
1811 /* Output a 32-bit word, but mark as an instruction. */
1812
1813 static void
1814 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1815 {
1816 expressionS exp;
1817
1818 #ifdef md_flush_pending_output
1819 md_flush_pending_output ();
1820 #endif
1821
1822 if (is_it_end_of_statement ())
1823 {
1824 demand_empty_rest_of_line ();
1825 return;
1826 }
1827
1828 if (!need_pass_2)
1829 frag_align_code (2, 0);
1830 #ifdef OBJ_ELF
1831 mapping_state (MAP_INSN);
1832 #endif
1833
1834 do
1835 {
1836 expression (&exp);
1837 if (exp.X_op != O_constant)
1838 {
1839 as_bad (_("constant expression required"));
1840 ignore_rest_of_line ();
1841 return;
1842 }
1843
1844 if (target_big_endian)
1845 {
1846 unsigned int val = exp.X_add_number;
1847 exp.X_add_number = SWAP_32 (val);
1848 }
1849 emit_expr (&exp, 4);
1850 }
1851 while (*input_line_pointer++ == ',');
1852
1853 /* Put terminator back into stream. */
1854 input_line_pointer--;
1855 demand_empty_rest_of_line ();
1856 }
1857
1858 #ifdef OBJ_ELF
1859 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1860
1861 static void
1862 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1863 {
1864 expressionS exp;
1865
1866 /* Since we're just labelling the code, there's no need to define a
1867 mapping symbol. */
1868 expression (&exp);
1869 /* Make sure there is enough room in this frag for the following
1870 blr. This trick only works if the blr follows immediately after
1871 the .tlsdesc directive. */
1872 frag_grow (4);
1873 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1874 BFD_RELOC_AARCH64_TLSDESC_CALL);
1875
1876 demand_empty_rest_of_line ();
1877 }
1878 #endif /* OBJ_ELF */
1879
1880 static void s_aarch64_arch (int);
1881 static void s_aarch64_cpu (int);
1882
1883 /* This table describes all the machine specific pseudo-ops the assembler
1884 has to support. The fields are:
1885 pseudo-op name without dot
1886 function to call to execute this pseudo-op
1887 Integer arg to pass to the function. */
1888
1889 const pseudo_typeS md_pseudo_table[] = {
1890 /* Never called because '.req' does not start a line. */
1891 {"req", s_req, 0},
1892 {"unreq", s_unreq, 0},
1893 {"bss", s_bss, 0},
1894 {"even", s_even, 0},
1895 {"ltorg", s_ltorg, 0},
1896 {"pool", s_ltorg, 0},
1897 {"cpu", s_aarch64_cpu, 0},
1898 {"arch", s_aarch64_arch, 0},
1899 {"inst", s_aarch64_inst, 0},
1900 #ifdef OBJ_ELF
1901 {"tlsdesccall", s_tlsdesccall, 0},
1902 {"word", s_aarch64_elf_cons, 4},
1903 {"long", s_aarch64_elf_cons, 4},
1904 {"xword", s_aarch64_elf_cons, 8},
1905 {"dword", s_aarch64_elf_cons, 8},
1906 #endif
1907 {0, 0, 0}
1908 };
1909 \f
1910
1911 /* Check whether STR points to a register name followed by a comma or the
1912 end of line; REG_TYPE indicates which register types are checked
1913 against. Return TRUE if STR is such a register name; otherwise return
1914 FALSE. The function does not intend to produce any diagnostics, but since
1915 the register parser aarch64_reg_parse, which is called by this function,
1916 does produce diagnostics, we call clear_error to clear any diagnostics
1917 that may be generated by aarch64_reg_parse.
1918 Also, the function returns FALSE directly if there is any user error
1919 present at the function entry. This prevents the existing diagnostics
1920 state from being spoiled.
1921 The function currently serves parse_constant_immediate and
1922 parse_big_immediate only. */
1923 static bfd_boolean
1924 reg_name_p (char *str, aarch64_reg_type reg_type)
1925 {
1926 int reg;
1927
1928 /* Prevent the diagnostics state from being spoiled. */
1929 if (error_p ())
1930 return FALSE;
1931
1932 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1933
1934 /* Clear the parsing error that may be set by the reg parser. */
1935 clear_error ();
1936
1937 if (reg == PARSE_FAIL)
1938 return FALSE;
1939
1940 skip_whitespace (str);
1941 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1942 return TRUE;
1943
1944 return FALSE;
1945 }
1946
1947 /* Parser functions used exclusively in instruction operands. */
1948
1949 /* Parse an immediate expression which may not be constant.
1950
1951 To prevent the expression parser from pushing a register name
1952 into the symbol table as an undefined symbol, firstly a check is
1953 done to find out whether STR is a valid register name followed
1954 by a comma or the end of line. Return FALSE if STR is such a
1955 string. */
1956
1957 static bfd_boolean
1958 parse_immediate_expression (char **str, expressionS *exp)
1959 {
1960 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1961 {
1962 set_recoverable_error (_("immediate operand required"));
1963 return FALSE;
1964 }
1965
1966 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
1967
1968 if (exp->X_op == O_absent)
1969 {
1970 set_fatal_syntax_error (_("missing immediate expression"));
1971 return FALSE;
1972 }
1973
1974 return TRUE;
1975 }
1976
1977 /* Constant immediate-value read function for use in insn parsing.
1978 STR points to the beginning of the immediate (with the optional
1979 leading #); *VAL receives the value.
1980
1981 Return TRUE on success; otherwise return FALSE. */
1982
1983 static bfd_boolean
1984 parse_constant_immediate (char **str, int64_t * val)
1985 {
1986 expressionS exp;
1987
1988 if (! parse_immediate_expression (str, &exp))
1989 return FALSE;
1990
1991 if (exp.X_op != O_constant)
1992 {
1993 set_syntax_error (_("constant expression required"));
1994 return FALSE;
1995 }
1996
1997 *val = exp.X_add_number;
1998 return TRUE;
1999 }
2000
2001 static uint32_t
2002 encode_imm_float_bits (uint32_t imm)
2003 {
2004 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2005 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2006 }
2007
2008 /* Return TRUE if the single-precision floating-point value encoded in IMM
2009 can be expressed in the AArch64 8-bit signed floating-point format with
2010 3-bit exponent and normalized 4 bits of precision; in other words, the
2011 floating-point value must be expressable as
2012 (+/-) n / 16 * power (2, r)
2013 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2014
2015 static bfd_boolean
2016 aarch64_imm_float_p (uint32_t imm)
2017 {
2018 /* If a single-precision floating-point value has the following bit
2019 pattern, it can be expressed in the AArch64 8-bit floating-point
2020 format:
2021
2022 3 32222222 2221111111111
2023 1 09876543 21098765432109876543210
2024 n Eeeeeexx xxxx0000000000000000000
2025
2026 where n, e and each x are either 0 or 1 independently, with
2027 E == ~ e. */
2028
2029 uint32_t pattern;
2030
2031 /* Prepare the pattern for 'Eeeeee'. */
2032 if (((imm >> 30) & 0x1) == 0)
2033 pattern = 0x3e000000;
2034 else
2035 pattern = 0x40000000;
2036
2037 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2038 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2039 }
2040
2041 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2042
2043 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2044 8-bit signed floating-point format with 3-bit exponent and normalized 4
2045 bits of precision (i.e. can be used in an FMOV instruction); return the
2046 equivalent single-precision encoding in *FPWORD.
2047
2048 Otherwise return FALSE. */
2049
2050 static bfd_boolean
2051 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2052 {
2053 /* If a double-precision floating-point value has the following bit
2054 pattern, it can be expressed in the AArch64 8-bit floating-point
2055 format:
2056
2057 6 66655555555 554444444...21111111111
2058 3 21098765432 109876543...098765432109876543210
2059 n Eeeeeeeeexx xxxx00000...000000000000000000000
2060
2061 where n, e and each x are either 0 or 1 independently, with
2062 E == ~ e. */
2063
2064 uint32_t pattern;
2065 uint32_t high32 = imm >> 32;
2066
2067 /* Lower 32 bits need to be 0s. */
2068 if ((imm & 0xffffffff) != 0)
2069 return FALSE;
2070
2071 /* Prepare the pattern for 'Eeeeeeeee'. */
2072 if (((high32 >> 30) & 0x1) == 0)
2073 pattern = 0x3fc00000;
2074 else
2075 pattern = 0x40000000;
2076
2077 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2078 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2079 {
2080 /* Convert to the single-precision encoding.
2081 i.e. convert
2082 n Eeeeeeeeexx xxxx00000...000000000000000000000
2083 to
2084 n Eeeeeexx xxxx0000000000000000000. */
2085 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2086 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2087 return TRUE;
2088 }
2089 else
2090 return FALSE;
2091 }
2092
2093 /* Parse a floating-point immediate. Return TRUE on success and return the
2094 value in *IMMED in the format of IEEE754 single-precision encoding.
2095 *CCP points to the start of the string; DP_P is TRUE when the immediate
2096 is expected to be in double-precision (N.B. this only matters when
2097 hexadecimal representation is involved).
2098
2099 N.B. 0.0 is accepted by this function. */
2100
2101 static bfd_boolean
2102 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2103 {
2104 char *str = *ccp;
2105 char *fpnum;
2106 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2107 int found_fpchar = 0;
2108 int64_t val = 0;
2109 unsigned fpword = 0;
2110 bfd_boolean hex_p = FALSE;
2111
2112 skip_past_char (&str, '#');
2113
2114 fpnum = str;
2115 skip_whitespace (fpnum);
2116
2117 if (strncmp (fpnum, "0x", 2) == 0)
2118 {
2119 /* Support the hexadecimal representation of the IEEE754 encoding.
2120 Double-precision is expected when DP_P is TRUE, otherwise the
2121 representation should be in single-precision. */
2122 if (! parse_constant_immediate (&str, &val))
2123 goto invalid_fp;
2124
2125 if (dp_p)
2126 {
2127 if (! aarch64_double_precision_fmovable (val, &fpword))
2128 goto invalid_fp;
2129 }
2130 else if ((uint64_t) val > 0xffffffff)
2131 goto invalid_fp;
2132 else
2133 fpword = val;
2134
2135 hex_p = TRUE;
2136 }
2137 else
2138 {
2139 /* We must not accidentally parse an integer as a floating-point number.
2140 Make sure that the value we parse is not an integer by checking for
2141 special characters '.' or 'e'. */
2142 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2143 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2144 {
2145 found_fpchar = 1;
2146 break;
2147 }
2148
2149 if (!found_fpchar)
2150 return FALSE;
2151 }
2152
2153 if (! hex_p)
2154 {
2155 int i;
2156
2157 if ((str = atof_ieee (str, 's', words)) == NULL)
2158 goto invalid_fp;
2159
2160 /* Our FP word must be 32 bits (single-precision FP). */
2161 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2162 {
2163 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2164 fpword |= words[i];
2165 }
2166 }
2167
2168 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2169 {
2170 *immed = fpword;
2171 *ccp = str;
2172 return TRUE;
2173 }
2174
2175 invalid_fp:
2176 set_fatal_syntax_error (_("invalid floating-point constant"));
2177 return FALSE;
2178 }
2179
2180 /* Less-generic immediate-value read function with the possibility of loading
2181 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2182 instructions.
2183
2184 To prevent the expression parser from pushing a register name into the
2185 symbol table as an undefined symbol, a check is firstly done to find
2186 out whether STR is a valid register name followed by a comma or the end
2187 of line. Return FALSE if STR is such a register. */
2188
2189 static bfd_boolean
2190 parse_big_immediate (char **str, int64_t *imm)
2191 {
2192 char *ptr = *str;
2193
2194 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2195 {
2196 set_syntax_error (_("immediate operand required"));
2197 return FALSE;
2198 }
2199
2200 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2201
2202 if (inst.reloc.exp.X_op == O_constant)
2203 *imm = inst.reloc.exp.X_add_number;
2204
2205 *str = ptr;
2206
2207 return TRUE;
2208 }
2209
2210 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2211 if NEED_LIBOPCODES is non-zero, the fixup will need
2212 assistance from the libopcodes. */
2213
2214 static inline void
2215 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2216 const aarch64_opnd_info *operand,
2217 int need_libopcodes_p)
2218 {
2219 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2220 reloc->opnd = operand->type;
2221 if (need_libopcodes_p)
2222 reloc->need_libopcodes_p = 1;
2223 };
2224
2225 /* Return TRUE if the instruction needs to be fixed up later internally by
2226 the GAS; otherwise return FALSE. */
2227
2228 static inline bfd_boolean
2229 aarch64_gas_internal_fixup_p (void)
2230 {
2231 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2232 }
2233
2234 /* Assign the immediate value to the relavant field in *OPERAND if
2235 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2236 needs an internal fixup in a later stage.
2237 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2238 IMM.VALUE that may get assigned with the constant. */
2239 static inline void
2240 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2241 aarch64_opnd_info *operand,
2242 int addr_off_p,
2243 int need_libopcodes_p,
2244 int skip_p)
2245 {
2246 if (reloc->exp.X_op == O_constant)
2247 {
2248 if (addr_off_p)
2249 operand->addr.offset.imm = reloc->exp.X_add_number;
2250 else
2251 operand->imm.value = reloc->exp.X_add_number;
2252 reloc->type = BFD_RELOC_UNUSED;
2253 }
2254 else
2255 {
2256 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2257 /* Tell libopcodes to ignore this operand or not. This is helpful
2258 when one of the operands needs to be fixed up later but we need
2259 libopcodes to check the other operands. */
2260 operand->skip = skip_p;
2261 }
2262 }
2263
2264 /* Relocation modifiers. Each entry in the table contains the textual
2265 name for the relocation which may be placed before a symbol used as
2266 a load/store offset, or add immediate. It must be surrounded by a
2267 leading and trailing colon, for example:
2268
2269 ldr x0, [x1, #:rello:varsym]
2270 add x0, x1, #:rello:varsym */
2271
2272 struct reloc_table_entry
2273 {
2274 const char *name;
2275 int pc_rel;
2276 bfd_reloc_code_real_type adrp_type;
2277 bfd_reloc_code_real_type movw_type;
2278 bfd_reloc_code_real_type add_type;
2279 bfd_reloc_code_real_type ldst_type;
2280 };
2281
2282 static struct reloc_table_entry reloc_table[] = {
2283 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2284 {"lo12", 0,
2285 0,
2286 0,
2287 BFD_RELOC_AARCH64_ADD_LO12,
2288 BFD_RELOC_AARCH64_LDST_LO12},
2289
2290 /* Higher 21 bits of pc-relative page offset: ADRP */
2291 {"pg_hi21", 1,
2292 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2293 0,
2294 0,
2295 0},
2296
2297 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2298 {"pg_hi21_nc", 1,
2299 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2300 0,
2301 0,
2302 0},
2303
2304 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2305 {"abs_g0", 0,
2306 0,
2307 BFD_RELOC_AARCH64_MOVW_G0,
2308 0,
2309 0},
2310
2311 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2312 {"abs_g0_s", 0,
2313 0,
2314 BFD_RELOC_AARCH64_MOVW_G0_S,
2315 0,
2316 0},
2317
2318 /* Less significant bits 0-15 of address/value: MOVK, no check */
2319 {"abs_g0_nc", 0,
2320 0,
2321 BFD_RELOC_AARCH64_MOVW_G0_NC,
2322 0,
2323 0},
2324
2325 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2326 {"abs_g1", 0,
2327 0,
2328 BFD_RELOC_AARCH64_MOVW_G1,
2329 0,
2330 0},
2331
2332 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2333 {"abs_g1_s", 0,
2334 0,
2335 BFD_RELOC_AARCH64_MOVW_G1_S,
2336 0,
2337 0},
2338
2339 /* Less significant bits 16-31 of address/value: MOVK, no check */
2340 {"abs_g1_nc", 0,
2341 0,
2342 BFD_RELOC_AARCH64_MOVW_G1_NC,
2343 0,
2344 0},
2345
2346 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2347 {"abs_g2", 0,
2348 0,
2349 BFD_RELOC_AARCH64_MOVW_G2,
2350 0,
2351 0},
2352
2353 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2354 {"abs_g2_s", 0,
2355 0,
2356 BFD_RELOC_AARCH64_MOVW_G2_S,
2357 0,
2358 0},
2359
2360 /* Less significant bits 32-47 of address/value: MOVK, no check */
2361 {"abs_g2_nc", 0,
2362 0,
2363 BFD_RELOC_AARCH64_MOVW_G2_NC,
2364 0,
2365 0},
2366
2367 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2368 {"abs_g3", 0,
2369 0,
2370 BFD_RELOC_AARCH64_MOVW_G3,
2371 0,
2372 0},
2373
2374 /* Get to the page containing GOT entry for a symbol. */
2375 {"got", 1,
2376 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2377 0,
2378 0,
2379 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2380
2381 /* 12 bit offset into the page containing GOT entry for that symbol. */
2382 {"got_lo12", 0,
2383 0,
2384 0,
2385 0,
2386 BFD_RELOC_AARCH64_LD_GOT_LO12_NC},
2387
2388 /* Get to the page containing GOT TLS entry for a symbol */
2389 {"tlsgd", 0,
2390 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2391 0,
2392 0,
2393 0},
2394
2395 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2396 {"tlsgd_lo12", 0,
2397 0,
2398 0,
2399 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2400 0},
2401
2402 /* Get to the page containing GOT TLS entry for a symbol */
2403 {"tlsdesc", 0,
2404 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2405 0,
2406 0,
2407 0},
2408
2409 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2410 {"tlsdesc_lo12", 0,
2411 0,
2412 0,
2413 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2414 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC},
2415
2416 /* Get to the page containing GOT TLS entry for a symbol */
2417 {"gottprel", 0,
2418 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2419 0,
2420 0,
2421 0},
2422
2423 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2424 {"gottprel_lo12", 0,
2425 0,
2426 0,
2427 0,
2428 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC},
2429
2430 /* Get tp offset for a symbol. */
2431 {"tprel", 0,
2432 0,
2433 0,
2434 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2435 0},
2436
2437 /* Get tp offset for a symbol. */
2438 {"tprel_lo12", 0,
2439 0,
2440 0,
2441 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2442 0},
2443
2444 /* Get tp offset for a symbol. */
2445 {"tprel_hi12", 0,
2446 0,
2447 0,
2448 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2449 0},
2450
2451 /* Get tp offset for a symbol. */
2452 {"tprel_lo12_nc", 0,
2453 0,
2454 0,
2455 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2456 0},
2457
2458 /* Most significant bits 32-47 of address/value: MOVZ. */
2459 {"tprel_g2", 0,
2460 0,
2461 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2462 0,
2463 0},
2464
2465 /* Most significant bits 16-31 of address/value: MOVZ. */
2466 {"tprel_g1", 0,
2467 0,
2468 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2469 0,
2470 0},
2471
2472 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2473 {"tprel_g1_nc", 0,
2474 0,
2475 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2476 0,
2477 0},
2478
2479 /* Most significant bits 0-15 of address/value: MOVZ. */
2480 {"tprel_g0", 0,
2481 0,
2482 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2483 0,
2484 0},
2485
2486 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2487 {"tprel_g0_nc", 0,
2488 0,
2489 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2490 0,
2491 0},
2492 };
2493
2494 /* Given the address of a pointer pointing to the textual name of a
2495 relocation as may appear in assembler source, attempt to find its
2496 details in reloc_table. The pointer will be updated to the character
2497 after the trailing colon. On failure, NULL will be returned;
2498 otherwise return the reloc_table_entry. */
2499
2500 static struct reloc_table_entry *
2501 find_reloc_table_entry (char **str)
2502 {
2503 unsigned int i;
2504 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2505 {
2506 int length = strlen (reloc_table[i].name);
2507
2508 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2509 && (*str)[length] == ':')
2510 {
2511 *str += (length + 1);
2512 return &reloc_table[i];
2513 }
2514 }
2515
2516 return NULL;
2517 }
2518
2519 /* Mode argument to parse_shift and parser_shifter_operand. */
2520 enum parse_shift_mode
2521 {
2522 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2523 "#imm{,lsl #n}" */
2524 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2525 "#imm" */
2526 SHIFTED_LSL, /* bare "lsl #n" */
2527 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2528 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2529 };
2530
2531 /* Parse a <shift> operator on an AArch64 data processing instruction.
2532 Return TRUE on success; otherwise return FALSE. */
2533 static bfd_boolean
2534 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2535 {
2536 const struct aarch64_name_value_pair *shift_op;
2537 enum aarch64_modifier_kind kind;
2538 expressionS exp;
2539 int exp_has_prefix;
2540 char *s = *str;
2541 char *p = s;
2542
2543 for (p = *str; ISALPHA (*p); p++)
2544 ;
2545
2546 if (p == *str)
2547 {
2548 set_syntax_error (_("shift expression expected"));
2549 return FALSE;
2550 }
2551
2552 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2553
2554 if (shift_op == NULL)
2555 {
2556 set_syntax_error (_("shift operator expected"));
2557 return FALSE;
2558 }
2559
2560 kind = aarch64_get_operand_modifier (shift_op);
2561
2562 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2563 {
2564 set_syntax_error (_("invalid use of 'MSL'"));
2565 return FALSE;
2566 }
2567
2568 switch (mode)
2569 {
2570 case SHIFTED_LOGIC_IMM:
2571 if (aarch64_extend_operator_p (kind) == TRUE)
2572 {
2573 set_syntax_error (_("extending shift is not permitted"));
2574 return FALSE;
2575 }
2576 break;
2577
2578 case SHIFTED_ARITH_IMM:
2579 if (kind == AARCH64_MOD_ROR)
2580 {
2581 set_syntax_error (_("'ROR' shift is not permitted"));
2582 return FALSE;
2583 }
2584 break;
2585
2586 case SHIFTED_LSL:
2587 if (kind != AARCH64_MOD_LSL)
2588 {
2589 set_syntax_error (_("only 'LSL' shift is permitted"));
2590 return FALSE;
2591 }
2592 break;
2593
2594 case SHIFTED_REG_OFFSET:
2595 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2596 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2597 {
2598 set_fatal_syntax_error
2599 (_("invalid shift for the register offset addressing mode"));
2600 return FALSE;
2601 }
2602 break;
2603
2604 case SHIFTED_LSL_MSL:
2605 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2606 {
2607 set_syntax_error (_("invalid shift operator"));
2608 return FALSE;
2609 }
2610 break;
2611
2612 default:
2613 abort ();
2614 }
2615
2616 /* Whitespace can appear here if the next thing is a bare digit. */
2617 skip_whitespace (p);
2618
2619 /* Parse shift amount. */
2620 exp_has_prefix = 0;
2621 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2622 exp.X_op = O_absent;
2623 else
2624 {
2625 if (is_immediate_prefix (*p))
2626 {
2627 p++;
2628 exp_has_prefix = 1;
2629 }
2630 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2631 }
2632 if (exp.X_op == O_absent)
2633 {
2634 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2635 {
2636 set_syntax_error (_("missing shift amount"));
2637 return FALSE;
2638 }
2639 operand->shifter.amount = 0;
2640 }
2641 else if (exp.X_op != O_constant)
2642 {
2643 set_syntax_error (_("constant shift amount required"));
2644 return FALSE;
2645 }
2646 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2647 {
2648 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2649 return FALSE;
2650 }
2651 else
2652 {
2653 operand->shifter.amount = exp.X_add_number;
2654 operand->shifter.amount_present = 1;
2655 }
2656
2657 operand->shifter.operator_present = 1;
2658 operand->shifter.kind = kind;
2659
2660 *str = p;
2661 return TRUE;
2662 }
2663
2664 /* Parse a <shifter_operand> for a data processing instruction:
2665
2666 #<immediate>
2667 #<immediate>, LSL #imm
2668
2669 Validation of immediate operands is deferred to md_apply_fix.
2670
2671 Return TRUE on success; otherwise return FALSE. */
2672
2673 static bfd_boolean
2674 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2675 enum parse_shift_mode mode)
2676 {
2677 char *p;
2678
2679 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2680 return FALSE;
2681
2682 p = *str;
2683
2684 /* Accept an immediate expression. */
2685 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2686 return FALSE;
2687
2688 /* Accept optional LSL for arithmetic immediate values. */
2689 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2690 if (! parse_shift (&p, operand, SHIFTED_LSL))
2691 return FALSE;
2692
2693 /* Not accept any shifter for logical immediate values. */
2694 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2695 && parse_shift (&p, operand, mode))
2696 {
2697 set_syntax_error (_("unexpected shift operator"));
2698 return FALSE;
2699 }
2700
2701 *str = p;
2702 return TRUE;
2703 }
2704
2705 /* Parse a <shifter_operand> for a data processing instruction:
2706
2707 <Rm>
2708 <Rm>, <shift>
2709 #<immediate>
2710 #<immediate>, LSL #imm
2711
2712 where <shift> is handled by parse_shift above, and the last two
2713 cases are handled by the function above.
2714
2715 Validation of immediate operands is deferred to md_apply_fix.
2716
2717 Return TRUE on success; otherwise return FALSE. */
2718
2719 static bfd_boolean
2720 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2721 enum parse_shift_mode mode)
2722 {
2723 int reg;
2724 int isreg32, isregzero;
2725 enum aarch64_operand_class opd_class
2726 = aarch64_get_operand_class (operand->type);
2727
2728 if ((reg =
2729 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2730 {
2731 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2732 {
2733 set_syntax_error (_("unexpected register in the immediate operand"));
2734 return FALSE;
2735 }
2736
2737 if (!isregzero && reg == REG_SP)
2738 {
2739 set_syntax_error (BAD_SP);
2740 return FALSE;
2741 }
2742
2743 operand->reg.regno = reg;
2744 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2745
2746 /* Accept optional shift operation on register. */
2747 if (! skip_past_comma (str))
2748 return TRUE;
2749
2750 if (! parse_shift (str, operand, mode))
2751 return FALSE;
2752
2753 return TRUE;
2754 }
2755 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2756 {
2757 set_syntax_error
2758 (_("integer register expected in the extended/shifted operand "
2759 "register"));
2760 return FALSE;
2761 }
2762
2763 /* We have a shifted immediate variable. */
2764 return parse_shifter_operand_imm (str, operand, mode);
2765 }
2766
2767 /* Return TRUE on success; return FALSE otherwise. */
2768
2769 static bfd_boolean
2770 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2771 enum parse_shift_mode mode)
2772 {
2773 char *p = *str;
2774
2775 /* Determine if we have the sequence of characters #: or just :
2776 coming next. If we do, then we check for a :rello: relocation
2777 modifier. If we don't, punt the whole lot to
2778 parse_shifter_operand. */
2779
2780 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2781 {
2782 struct reloc_table_entry *entry;
2783
2784 if (p[0] == '#')
2785 p += 2;
2786 else
2787 p++;
2788 *str = p;
2789
2790 /* Try to parse a relocation. Anything else is an error. */
2791 if (!(entry = find_reloc_table_entry (str)))
2792 {
2793 set_syntax_error (_("unknown relocation modifier"));
2794 return FALSE;
2795 }
2796
2797 if (entry->add_type == 0)
2798 {
2799 set_syntax_error
2800 (_("this relocation modifier is not allowed on this instruction"));
2801 return FALSE;
2802 }
2803
2804 /* Save str before we decompose it. */
2805 p = *str;
2806
2807 /* Next, we parse the expression. */
2808 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2809 return FALSE;
2810
2811 /* Record the relocation type (use the ADD variant here). */
2812 inst.reloc.type = entry->add_type;
2813 inst.reloc.pc_rel = entry->pc_rel;
2814
2815 /* If str is empty, we've reached the end, stop here. */
2816 if (**str == '\0')
2817 return TRUE;
2818
2819 /* Otherwise, we have a shifted reloc modifier, so rewind to
2820 recover the variable name and continue parsing for the shifter. */
2821 *str = p;
2822 return parse_shifter_operand_imm (str, operand, mode);
2823 }
2824
2825 return parse_shifter_operand (str, operand, mode);
2826 }
2827
2828 /* Parse all forms of an address expression. Information is written
2829 to *OPERAND and/or inst.reloc.
2830
2831 The A64 instruction set has the following addressing modes:
2832
2833 Offset
2834 [base] // in SIMD ld/st structure
2835 [base{,#0}] // in ld/st exclusive
2836 [base{,#imm}]
2837 [base,Xm{,LSL #imm}]
2838 [base,Xm,SXTX {#imm}]
2839 [base,Wm,(S|U)XTW {#imm}]
2840 Pre-indexed
2841 [base,#imm]!
2842 Post-indexed
2843 [base],#imm
2844 [base],Xm // in SIMD ld/st structure
2845 PC-relative (literal)
2846 label
2847 =immediate
2848
2849 (As a convenience, the notation "=immediate" is permitted in conjunction
2850 with the pc-relative literal load instructions to automatically place an
2851 immediate value or symbolic address in a nearby literal pool and generate
2852 a hidden label which references it.)
2853
2854 Upon a successful parsing, the address structure in *OPERAND will be
2855 filled in the following way:
2856
2857 .base_regno = <base>
2858 .offset.is_reg // 1 if the offset is a register
2859 .offset.imm = <imm>
2860 .offset.regno = <Rm>
2861
2862 For different addressing modes defined in the A64 ISA:
2863
2864 Offset
2865 .pcrel=0; .preind=1; .postind=0; .writeback=0
2866 Pre-indexed
2867 .pcrel=0; .preind=1; .postind=0; .writeback=1
2868 Post-indexed
2869 .pcrel=0; .preind=0; .postind=1; .writeback=1
2870 PC-relative (literal)
2871 .pcrel=1; .preind=1; .postind=0; .writeback=0
2872
2873 The shift/extension information, if any, will be stored in .shifter.
2874
2875 It is the caller's responsibility to check for addressing modes not
2876 supported by the instruction, and to set inst.reloc.type. */
2877
2878 static bfd_boolean
2879 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2880 int accept_reg_post_index)
2881 {
2882 char *p = *str;
2883 int reg;
2884 int isreg32, isregzero;
2885 expressionS *exp = &inst.reloc.exp;
2886
2887 if (! skip_past_char (&p, '['))
2888 {
2889 /* =immediate or label. */
2890 operand->addr.pcrel = 1;
2891 operand->addr.preind = 1;
2892
2893 /* #:<reloc_op>:<symbol> */
2894 skip_past_char (&p, '#');
2895 if (reloc && skip_past_char (&p, ':'))
2896 {
2897 struct reloc_table_entry *entry;
2898
2899 /* Try to parse a relocation modifier. Anything else is
2900 an error. */
2901 entry = find_reloc_table_entry (&p);
2902 if (! entry)
2903 {
2904 set_syntax_error (_("unknown relocation modifier"));
2905 return FALSE;
2906 }
2907
2908 if (entry->ldst_type == 0)
2909 {
2910 set_syntax_error
2911 (_("this relocation modifier is not allowed on this "
2912 "instruction"));
2913 return FALSE;
2914 }
2915
2916 /* #:<reloc_op>: */
2917 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2918 {
2919 set_syntax_error (_("invalid relocation expression"));
2920 return FALSE;
2921 }
2922
2923 /* #:<reloc_op>:<expr> */
2924 /* Record the load/store relocation type. */
2925 inst.reloc.type = entry->ldst_type;
2926 inst.reloc.pc_rel = entry->pc_rel;
2927 }
2928 else
2929 {
2930
2931 if (skip_past_char (&p, '='))
2932 /* =immediate; need to generate the literal in the literal pool. */
2933 inst.gen_lit_pool = 1;
2934
2935 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2936 {
2937 set_syntax_error (_("invalid address"));
2938 return FALSE;
2939 }
2940 }
2941
2942 *str = p;
2943 return TRUE;
2944 }
2945
2946 /* [ */
2947
2948 /* Accept SP and reject ZR */
2949 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2950 if (reg == PARSE_FAIL || isreg32)
2951 {
2952 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2953 return FALSE;
2954 }
2955 operand->addr.base_regno = reg;
2956
2957 /* [Xn */
2958 if (skip_past_comma (&p))
2959 {
2960 /* [Xn, */
2961 operand->addr.preind = 1;
2962
2963 /* Reject SP and accept ZR */
2964 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
2965 if (reg != PARSE_FAIL)
2966 {
2967 /* [Xn,Rm */
2968 operand->addr.offset.regno = reg;
2969 operand->addr.offset.is_reg = 1;
2970 /* Shifted index. */
2971 if (skip_past_comma (&p))
2972 {
2973 /* [Xn,Rm, */
2974 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
2975 /* Use the diagnostics set in parse_shift, so not set new
2976 error message here. */
2977 return FALSE;
2978 }
2979 /* We only accept:
2980 [base,Xm{,LSL #imm}]
2981 [base,Xm,SXTX {#imm}]
2982 [base,Wm,(S|U)XTW {#imm}] */
2983 if (operand->shifter.kind == AARCH64_MOD_NONE
2984 || operand->shifter.kind == AARCH64_MOD_LSL
2985 || operand->shifter.kind == AARCH64_MOD_SXTX)
2986 {
2987 if (isreg32)
2988 {
2989 set_syntax_error (_("invalid use of 32-bit register offset"));
2990 return FALSE;
2991 }
2992 }
2993 else if (!isreg32)
2994 {
2995 set_syntax_error (_("invalid use of 64-bit register offset"));
2996 return FALSE;
2997 }
2998 }
2999 else
3000 {
3001 /* [Xn,#:<reloc_op>:<symbol> */
3002 skip_past_char (&p, '#');
3003 if (reloc && skip_past_char (&p, ':'))
3004 {
3005 struct reloc_table_entry *entry;
3006
3007 /* Try to parse a relocation modifier. Anything else is
3008 an error. */
3009 if (!(entry = find_reloc_table_entry (&p)))
3010 {
3011 set_syntax_error (_("unknown relocation modifier"));
3012 return FALSE;
3013 }
3014
3015 if (entry->ldst_type == 0)
3016 {
3017 set_syntax_error
3018 (_("this relocation modifier is not allowed on this "
3019 "instruction"));
3020 return FALSE;
3021 }
3022
3023 /* [Xn,#:<reloc_op>: */
3024 /* We now have the group relocation table entry corresponding to
3025 the name in the assembler source. Next, we parse the
3026 expression. */
3027 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3028 {
3029 set_syntax_error (_("invalid relocation expression"));
3030 return FALSE;
3031 }
3032
3033 /* [Xn,#:<reloc_op>:<expr> */
3034 /* Record the load/store relocation type. */
3035 inst.reloc.type = entry->ldst_type;
3036 inst.reloc.pc_rel = entry->pc_rel;
3037 }
3038 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3039 {
3040 set_syntax_error (_("invalid expression in the address"));
3041 return FALSE;
3042 }
3043 /* [Xn,<expr> */
3044 }
3045 }
3046
3047 if (! skip_past_char (&p, ']'))
3048 {
3049 set_syntax_error (_("']' expected"));
3050 return FALSE;
3051 }
3052
3053 if (skip_past_char (&p, '!'))
3054 {
3055 if (operand->addr.preind && operand->addr.offset.is_reg)
3056 {
3057 set_syntax_error (_("register offset not allowed in pre-indexed "
3058 "addressing mode"));
3059 return FALSE;
3060 }
3061 /* [Xn]! */
3062 operand->addr.writeback = 1;
3063 }
3064 else if (skip_past_comma (&p))
3065 {
3066 /* [Xn], */
3067 operand->addr.postind = 1;
3068 operand->addr.writeback = 1;
3069
3070 if (operand->addr.preind)
3071 {
3072 set_syntax_error (_("cannot combine pre- and post-indexing"));
3073 return FALSE;
3074 }
3075
3076 if (accept_reg_post_index
3077 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3078 &isregzero)) != PARSE_FAIL)
3079 {
3080 /* [Xn],Xm */
3081 if (isreg32)
3082 {
3083 set_syntax_error (_("invalid 32-bit register offset"));
3084 return FALSE;
3085 }
3086 operand->addr.offset.regno = reg;
3087 operand->addr.offset.is_reg = 1;
3088 }
3089 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3090 {
3091 /* [Xn],#expr */
3092 set_syntax_error (_("invalid expression in the address"));
3093 return FALSE;
3094 }
3095 }
3096
3097 /* If at this point neither .preind nor .postind is set, we have a
3098 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3099 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3100 {
3101 if (operand->addr.writeback)
3102 {
3103 /* Reject [Rn]! */
3104 set_syntax_error (_("missing offset in the pre-indexed address"));
3105 return FALSE;
3106 }
3107 operand->addr.preind = 1;
3108 inst.reloc.exp.X_op = O_constant;
3109 inst.reloc.exp.X_add_number = 0;
3110 }
3111
3112 *str = p;
3113 return TRUE;
3114 }
3115
3116 /* Return TRUE on success; otherwise return FALSE. */
3117 static bfd_boolean
3118 parse_address (char **str, aarch64_opnd_info *operand,
3119 int accept_reg_post_index)
3120 {
3121 return parse_address_main (str, operand, 0, accept_reg_post_index);
3122 }
3123
3124 /* Return TRUE on success; otherwise return FALSE. */
3125 static bfd_boolean
3126 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3127 {
3128 return parse_address_main (str, operand, 1, 0);
3129 }
3130
3131 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3132 Return TRUE on success; otherwise return FALSE. */
3133 static bfd_boolean
3134 parse_half (char **str, int *internal_fixup_p)
3135 {
3136 char *p, *saved;
3137 int dummy;
3138
3139 p = *str;
3140 skip_past_char (&p, '#');
3141
3142 gas_assert (internal_fixup_p);
3143 *internal_fixup_p = 0;
3144
3145 if (*p == ':')
3146 {
3147 struct reloc_table_entry *entry;
3148
3149 /* Try to parse a relocation. Anything else is an error. */
3150 ++p;
3151 if (!(entry = find_reloc_table_entry (&p)))
3152 {
3153 set_syntax_error (_("unknown relocation modifier"));
3154 return FALSE;
3155 }
3156
3157 if (entry->movw_type == 0)
3158 {
3159 set_syntax_error
3160 (_("this relocation modifier is not allowed on this instruction"));
3161 return FALSE;
3162 }
3163
3164 inst.reloc.type = entry->movw_type;
3165 }
3166 else
3167 *internal_fixup_p = 1;
3168
3169 /* Avoid parsing a register as a general symbol. */
3170 saved = p;
3171 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3172 return FALSE;
3173 p = saved;
3174
3175 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3176 return FALSE;
3177
3178 *str = p;
3179 return TRUE;
3180 }
3181
3182 /* Parse an operand for an ADRP instruction:
3183 ADRP <Xd>, <label>
3184 Return TRUE on success; otherwise return FALSE. */
3185
3186 static bfd_boolean
3187 parse_adrp (char **str)
3188 {
3189 char *p;
3190
3191 p = *str;
3192 if (*p == ':')
3193 {
3194 struct reloc_table_entry *entry;
3195
3196 /* Try to parse a relocation. Anything else is an error. */
3197 ++p;
3198 if (!(entry = find_reloc_table_entry (&p)))
3199 {
3200 set_syntax_error (_("unknown relocation modifier"));
3201 return FALSE;
3202 }
3203
3204 if (entry->adrp_type == 0)
3205 {
3206 set_syntax_error
3207 (_("this relocation modifier is not allowed on this instruction"));
3208 return FALSE;
3209 }
3210
3211 inst.reloc.type = entry->adrp_type;
3212 }
3213 else
3214 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3215
3216 inst.reloc.pc_rel = 1;
3217
3218 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3219 return FALSE;
3220
3221 *str = p;
3222 return TRUE;
3223 }
3224
3225 /* Miscellaneous. */
3226
3227 /* Parse an option for a preload instruction. Returns the encoding for the
3228 option, or PARSE_FAIL. */
3229
3230 static int
3231 parse_pldop (char **str)
3232 {
3233 char *p, *q;
3234 const struct aarch64_name_value_pair *o;
3235
3236 p = q = *str;
3237 while (ISALNUM (*q))
3238 q++;
3239
3240 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3241 if (!o)
3242 return PARSE_FAIL;
3243
3244 *str = q;
3245 return o->value;
3246 }
3247
3248 /* Parse an option for a barrier instruction. Returns the encoding for the
3249 option, or PARSE_FAIL. */
3250
3251 static int
3252 parse_barrier (char **str)
3253 {
3254 char *p, *q;
3255 const asm_barrier_opt *o;
3256
3257 p = q = *str;
3258 while (ISALPHA (*q))
3259 q++;
3260
3261 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3262 if (!o)
3263 return PARSE_FAIL;
3264
3265 *str = q;
3266 return o->value;
3267 }
3268
3269 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3270 Returns the encoding for the option, or PARSE_FAIL.
3271
3272 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3273 implementation defined system register name S3_<op1>_<Cn>_<Cm>_<op2>. */
3274
3275 static int
3276 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3277 {
3278 char *p, *q;
3279 char buf[32];
3280 const struct aarch64_name_value_pair *o;
3281 int value;
3282
3283 p = buf;
3284 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3285 if (p < buf + 31)
3286 *p++ = TOLOWER (*q);
3287 *p = '\0';
3288 /* Assert that BUF be large enough. */
3289 gas_assert (p - buf == q - *str);
3290
3291 o = hash_find (sys_regs, buf);
3292 if (!o)
3293 {
3294 if (!imple_defined_p)
3295 return PARSE_FAIL;
3296 else
3297 {
3298 /* Parse S3_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
3299 registers. */
3300 unsigned int op0, op1, cn, cm, op2;
3301 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
3302 return PARSE_FAIL;
3303 /* The architecture specifies the encoding space for implementation
3304 defined registers as:
3305 op0 op1 CRn CRm op2
3306 11 xxx 1x11 xxxx xxx
3307 For convenience GAS accepts a wider encoding space, as follows:
3308 op0 op1 CRn CRm op2
3309 11 xxx xxxx xxxx xxx */
3310 if (op0 != 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3311 return PARSE_FAIL;
3312 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3313 }
3314 }
3315 else
3316 value = o->value;
3317
3318 *str = q;
3319 return value;
3320 }
3321
3322 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3323 for the option, or NULL. */
3324
3325 static const aarch64_sys_ins_reg *
3326 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3327 {
3328 char *p, *q;
3329 char buf[32];
3330 const aarch64_sys_ins_reg *o;
3331
3332 p = buf;
3333 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3334 if (p < buf + 31)
3335 *p++ = TOLOWER (*q);
3336 *p = '\0';
3337
3338 o = hash_find (sys_ins_regs, buf);
3339 if (!o)
3340 return NULL;
3341
3342 *str = q;
3343 return o;
3344 }
3345 \f
3346 #define po_char_or_fail(chr) do { \
3347 if (! skip_past_char (&str, chr)) \
3348 goto failure; \
3349 } while (0)
3350
3351 #define po_reg_or_fail(regtype) do { \
3352 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3353 if (val == PARSE_FAIL) \
3354 { \
3355 set_default_error (); \
3356 goto failure; \
3357 } \
3358 } while (0)
3359
3360 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3361 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3362 &isreg32, &isregzero); \
3363 if (val == PARSE_FAIL) \
3364 { \
3365 set_default_error (); \
3366 goto failure; \
3367 } \
3368 info->reg.regno = val; \
3369 if (isreg32) \
3370 info->qualifier = AARCH64_OPND_QLF_W; \
3371 else \
3372 info->qualifier = AARCH64_OPND_QLF_X; \
3373 } while (0)
3374
3375 #define po_imm_nc_or_fail() do { \
3376 if (! parse_constant_immediate (&str, &val)) \
3377 goto failure; \
3378 } while (0)
3379
3380 #define po_imm_or_fail(min, max) do { \
3381 if (! parse_constant_immediate (&str, &val)) \
3382 goto failure; \
3383 if (val < min || val > max) \
3384 { \
3385 set_fatal_syntax_error (_("immediate value out of range "\
3386 #min " to "#max)); \
3387 goto failure; \
3388 } \
3389 } while (0)
3390
3391 #define po_misc_or_fail(expr) do { \
3392 if (!expr) \
3393 goto failure; \
3394 } while (0)
3395 \f
3396 /* encode the 12-bit imm field of Add/sub immediate */
3397 static inline uint32_t
3398 encode_addsub_imm (uint32_t imm)
3399 {
3400 return imm << 10;
3401 }
3402
3403 /* encode the shift amount field of Add/sub immediate */
3404 static inline uint32_t
3405 encode_addsub_imm_shift_amount (uint32_t cnt)
3406 {
3407 return cnt << 22;
3408 }
3409
3410
3411 /* encode the imm field of Adr instruction */
3412 static inline uint32_t
3413 encode_adr_imm (uint32_t imm)
3414 {
3415 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3416 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3417 }
3418
3419 /* encode the immediate field of Move wide immediate */
3420 static inline uint32_t
3421 encode_movw_imm (uint32_t imm)
3422 {
3423 return imm << 5;
3424 }
3425
3426 /* encode the 26-bit offset of unconditional branch */
3427 static inline uint32_t
3428 encode_branch_ofs_26 (uint32_t ofs)
3429 {
3430 return ofs & ((1 << 26) - 1);
3431 }
3432
3433 /* encode the 19-bit offset of conditional branch and compare & branch */
3434 static inline uint32_t
3435 encode_cond_branch_ofs_19 (uint32_t ofs)
3436 {
3437 return (ofs & ((1 << 19) - 1)) << 5;
3438 }
3439
3440 /* encode the 19-bit offset of ld literal */
3441 static inline uint32_t
3442 encode_ld_lit_ofs_19 (uint32_t ofs)
3443 {
3444 return (ofs & ((1 << 19) - 1)) << 5;
3445 }
3446
3447 /* Encode the 14-bit offset of test & branch. */
3448 static inline uint32_t
3449 encode_tst_branch_ofs_14 (uint32_t ofs)
3450 {
3451 return (ofs & ((1 << 14) - 1)) << 5;
3452 }
3453
3454 /* Encode the 16-bit imm field of svc/hvc/smc. */
3455 static inline uint32_t
3456 encode_svc_imm (uint32_t imm)
3457 {
3458 return imm << 5;
3459 }
3460
3461 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3462 static inline uint32_t
3463 reencode_addsub_switch_add_sub (uint32_t opcode)
3464 {
3465 return opcode ^ (1 << 30);
3466 }
3467
3468 static inline uint32_t
3469 reencode_movzn_to_movz (uint32_t opcode)
3470 {
3471 return opcode | (1 << 30);
3472 }
3473
3474 static inline uint32_t
3475 reencode_movzn_to_movn (uint32_t opcode)
3476 {
3477 return opcode & ~(1 << 30);
3478 }
3479
3480 /* Overall per-instruction processing. */
3481
3482 /* We need to be able to fix up arbitrary expressions in some statements.
3483 This is so that we can handle symbols that are an arbitrary distance from
3484 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3485 which returns part of an address in a form which will be valid for
3486 a data instruction. We do this by pushing the expression into a symbol
3487 in the expr_section, and creating a fix for that. */
3488
3489 static fixS *
3490 fix_new_aarch64 (fragS * frag,
3491 int where,
3492 short int size, expressionS * exp, int pc_rel, int reloc)
3493 {
3494 fixS *new_fix;
3495
3496 switch (exp->X_op)
3497 {
3498 case O_constant:
3499 case O_symbol:
3500 case O_add:
3501 case O_subtract:
3502 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3503 break;
3504
3505 default:
3506 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3507 pc_rel, reloc);
3508 break;
3509 }
3510 return new_fix;
3511 }
3512 \f
3513 /* Diagnostics on operands errors. */
3514
3515 /* By default, output one-line error message only.
3516 Enable the verbose error message by -merror-verbose. */
3517 static int verbose_error_p = 0;
3518
3519 #ifdef DEBUG_AARCH64
3520 /* N.B. this is only for the purpose of debugging. */
3521 const char* operand_mismatch_kind_names[] =
3522 {
3523 "AARCH64_OPDE_NIL",
3524 "AARCH64_OPDE_RECOVERABLE",
3525 "AARCH64_OPDE_SYNTAX_ERROR",
3526 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3527 "AARCH64_OPDE_INVALID_VARIANT",
3528 "AARCH64_OPDE_OUT_OF_RANGE",
3529 "AARCH64_OPDE_UNALIGNED",
3530 "AARCH64_OPDE_REG_LIST",
3531 "AARCH64_OPDE_OTHER_ERROR",
3532 };
3533 #endif /* DEBUG_AARCH64 */
3534
3535 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3536
3537 When multiple errors of different kinds are found in the same assembly
3538 line, only the error of the highest severity will be picked up for
3539 issuing the diagnostics. */
3540
3541 static inline bfd_boolean
3542 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3543 enum aarch64_operand_error_kind rhs)
3544 {
3545 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3546 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3547 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3548 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3549 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3550 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3551 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3552 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3553 return lhs > rhs;
3554 }
3555
3556 /* Helper routine to get the mnemonic name from the assembly instruction
3557 line; should only be called for the diagnosis purpose, as there is
3558 string copy operation involved, which may affect the runtime
3559 performance if used in elsewhere. */
3560
3561 static const char*
3562 get_mnemonic_name (const char *str)
3563 {
3564 static char mnemonic[32];
3565 char *ptr;
3566
3567 /* Get the first 15 bytes and assume that the full name is included. */
3568 strncpy (mnemonic, str, 31);
3569 mnemonic[31] = '\0';
3570
3571 /* Scan up to the end of the mnemonic, which must end in white space,
3572 '.', or end of string. */
3573 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3574 ;
3575
3576 *ptr = '\0';
3577
3578 /* Append '...' to the truncated long name. */
3579 if (ptr - mnemonic == 31)
3580 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3581
3582 return mnemonic;
3583 }
3584
3585 static void
3586 reset_aarch64_instruction (aarch64_instruction *instruction)
3587 {
3588 memset (instruction, '\0', sizeof (aarch64_instruction));
3589 instruction->reloc.type = BFD_RELOC_UNUSED;
3590 }
3591
3592 /* Data strutures storing one user error in the assembly code related to
3593 operands. */
3594
3595 struct operand_error_record
3596 {
3597 const aarch64_opcode *opcode;
3598 aarch64_operand_error detail;
3599 struct operand_error_record *next;
3600 };
3601
3602 typedef struct operand_error_record operand_error_record;
3603
3604 struct operand_errors
3605 {
3606 operand_error_record *head;
3607 operand_error_record *tail;
3608 };
3609
3610 typedef struct operand_errors operand_errors;
3611
3612 /* Top-level data structure reporting user errors for the current line of
3613 the assembly code.
3614 The way md_assemble works is that all opcodes sharing the same mnemonic
3615 name are iterated to find a match to the assembly line. In this data
3616 structure, each of the such opcodes will have one operand_error_record
3617 allocated and inserted. In other words, excessive errors related with
3618 a single opcode are disregarded. */
3619 operand_errors operand_error_report;
3620
3621 /* Free record nodes. */
3622 static operand_error_record *free_opnd_error_record_nodes = NULL;
3623
3624 /* Initialize the data structure that stores the operand mismatch
3625 information on assembling one line of the assembly code. */
3626 static void
3627 init_operand_error_report (void)
3628 {
3629 if (operand_error_report.head != NULL)
3630 {
3631 gas_assert (operand_error_report.tail != NULL);
3632 operand_error_report.tail->next = free_opnd_error_record_nodes;
3633 free_opnd_error_record_nodes = operand_error_report.head;
3634 operand_error_report.head = NULL;
3635 operand_error_report.tail = NULL;
3636 return;
3637 }
3638 gas_assert (operand_error_report.tail == NULL);
3639 }
3640
3641 /* Return TRUE if some operand error has been recorded during the
3642 parsing of the current assembly line using the opcode *OPCODE;
3643 otherwise return FALSE. */
3644 static inline bfd_boolean
3645 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3646 {
3647 operand_error_record *record = operand_error_report.head;
3648 return record && record->opcode == opcode;
3649 }
3650
3651 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3652 OPCODE field is initialized with OPCODE.
3653 N.B. only one record for each opcode, i.e. the maximum of one error is
3654 recorded for each instruction template. */
3655
3656 static void
3657 add_operand_error_record (const operand_error_record* new_record)
3658 {
3659 const aarch64_opcode *opcode = new_record->opcode;
3660 operand_error_record* record = operand_error_report.head;
3661
3662 /* The record may have been created for this opcode. If not, we need
3663 to prepare one. */
3664 if (! opcode_has_operand_error_p (opcode))
3665 {
3666 /* Get one empty record. */
3667 if (free_opnd_error_record_nodes == NULL)
3668 {
3669 record = xmalloc (sizeof (operand_error_record));
3670 if (record == NULL)
3671 abort ();
3672 }
3673 else
3674 {
3675 record = free_opnd_error_record_nodes;
3676 free_opnd_error_record_nodes = record->next;
3677 }
3678 record->opcode = opcode;
3679 /* Insert at the head. */
3680 record->next = operand_error_report.head;
3681 operand_error_report.head = record;
3682 if (operand_error_report.tail == NULL)
3683 operand_error_report.tail = record;
3684 }
3685 else if (record->detail.kind != AARCH64_OPDE_NIL
3686 && record->detail.index <= new_record->detail.index
3687 && operand_error_higher_severity_p (record->detail.kind,
3688 new_record->detail.kind))
3689 {
3690 /* In the case of multiple errors found on operands related with a
3691 single opcode, only record the error of the leftmost operand and
3692 only if the error is of higher severity. */
3693 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3694 " the existing error %s on operand %d",
3695 operand_mismatch_kind_names[new_record->detail.kind],
3696 new_record->detail.index,
3697 operand_mismatch_kind_names[record->detail.kind],
3698 record->detail.index);
3699 return;
3700 }
3701
3702 record->detail = new_record->detail;
3703 }
3704
3705 static inline void
3706 record_operand_error_info (const aarch64_opcode *opcode,
3707 aarch64_operand_error *error_info)
3708 {
3709 operand_error_record record;
3710 record.opcode = opcode;
3711 record.detail = *error_info;
3712 add_operand_error_record (&record);
3713 }
3714
3715 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3716 error message *ERROR, for operand IDX (count from 0). */
3717
3718 static void
3719 record_operand_error (const aarch64_opcode *opcode, int idx,
3720 enum aarch64_operand_error_kind kind,
3721 const char* error)
3722 {
3723 aarch64_operand_error info;
3724 memset(&info, 0, sizeof (info));
3725 info.index = idx;
3726 info.kind = kind;
3727 info.error = error;
3728 record_operand_error_info (opcode, &info);
3729 }
3730
3731 static void
3732 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3733 enum aarch64_operand_error_kind kind,
3734 const char* error, const int *extra_data)
3735 {
3736 aarch64_operand_error info;
3737 info.index = idx;
3738 info.kind = kind;
3739 info.error = error;
3740 info.data[0] = extra_data[0];
3741 info.data[1] = extra_data[1];
3742 info.data[2] = extra_data[2];
3743 record_operand_error_info (opcode, &info);
3744 }
3745
3746 static void
3747 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3748 const char* error, int lower_bound,
3749 int upper_bound)
3750 {
3751 int data[3] = {lower_bound, upper_bound, 0};
3752 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3753 error, data);
3754 }
3755
3756 /* Remove the operand error record for *OPCODE. */
3757 static void ATTRIBUTE_UNUSED
3758 remove_operand_error_record (const aarch64_opcode *opcode)
3759 {
3760 if (opcode_has_operand_error_p (opcode))
3761 {
3762 operand_error_record* record = operand_error_report.head;
3763 gas_assert (record != NULL && operand_error_report.tail != NULL);
3764 operand_error_report.head = record->next;
3765 record->next = free_opnd_error_record_nodes;
3766 free_opnd_error_record_nodes = record;
3767 if (operand_error_report.head == NULL)
3768 {
3769 gas_assert (operand_error_report.tail == record);
3770 operand_error_report.tail = NULL;
3771 }
3772 }
3773 }
3774
3775 /* Given the instruction in *INSTR, return the index of the best matched
3776 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3777
3778 Return -1 if there is no qualifier sequence; return the first match
3779 if there is multiple matches found. */
3780
3781 static int
3782 find_best_match (const aarch64_inst *instr,
3783 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3784 {
3785 int i, num_opnds, max_num_matched, idx;
3786
3787 num_opnds = aarch64_num_of_operands (instr->opcode);
3788 if (num_opnds == 0)
3789 {
3790 DEBUG_TRACE ("no operand");
3791 return -1;
3792 }
3793
3794 max_num_matched = 0;
3795 idx = -1;
3796
3797 /* For each pattern. */
3798 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3799 {
3800 int j, num_matched;
3801 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3802
3803 /* Most opcodes has much fewer patterns in the list. */
3804 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3805 {
3806 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3807 if (i != 0 && idx == -1)
3808 /* If nothing has been matched, return the 1st sequence. */
3809 idx = 0;
3810 break;
3811 }
3812
3813 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3814 if (*qualifiers == instr->operands[j].qualifier)
3815 ++num_matched;
3816
3817 if (num_matched > max_num_matched)
3818 {
3819 max_num_matched = num_matched;
3820 idx = i;
3821 }
3822 }
3823
3824 DEBUG_TRACE ("return with %d", idx);
3825 return idx;
3826 }
3827
3828 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3829 corresponding operands in *INSTR. */
3830
3831 static inline void
3832 assign_qualifier_sequence (aarch64_inst *instr,
3833 const aarch64_opnd_qualifier_t *qualifiers)
3834 {
3835 int i = 0;
3836 int num_opnds = aarch64_num_of_operands (instr->opcode);
3837 gas_assert (num_opnds);
3838 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3839 instr->operands[i].qualifier = *qualifiers;
3840 }
3841
3842 /* Print operands for the diagnosis purpose. */
3843
3844 static void
3845 print_operands (char *buf, const aarch64_opcode *opcode,
3846 const aarch64_opnd_info *opnds)
3847 {
3848 int i;
3849
3850 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3851 {
3852 const size_t size = 128;
3853 char str[size];
3854
3855 /* We regard the opcode operand info more, however we also look into
3856 the inst->operands to support the disassembling of the optional
3857 operand.
3858 The two operand code should be the same in all cases, apart from
3859 when the operand can be optional. */
3860 if (opcode->operands[i] == AARCH64_OPND_NIL
3861 || opnds[i].type == AARCH64_OPND_NIL)
3862 break;
3863
3864 /* Generate the operand string in STR. */
3865 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3866
3867 /* Delimiter. */
3868 if (str[0] != '\0')
3869 strcat (buf, i == 0 ? " " : ",");
3870
3871 /* Append the operand string. */
3872 strcat (buf, str);
3873 }
3874 }
3875
3876 /* Send to stderr a string as information. */
3877
3878 static void
3879 output_info (const char *format, ...)
3880 {
3881 char *file;
3882 unsigned int line;
3883 va_list args;
3884
3885 as_where (&file, &line);
3886 if (file)
3887 {
3888 if (line != 0)
3889 fprintf (stderr, "%s:%u: ", file, line);
3890 else
3891 fprintf (stderr, "%s: ", file);
3892 }
3893 fprintf (stderr, _("Info: "));
3894 va_start (args, format);
3895 vfprintf (stderr, format, args);
3896 va_end (args);
3897 (void) putc ('\n', stderr);
3898 }
3899
3900 /* Output one operand error record. */
3901
3902 static void
3903 output_operand_error_record (const operand_error_record *record, char *str)
3904 {
3905 int idx = record->detail.index;
3906 const aarch64_opcode *opcode = record->opcode;
3907 enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
3908 : AARCH64_OPND_NIL);
3909 const aarch64_operand_error *detail = &record->detail;
3910
3911 switch (detail->kind)
3912 {
3913 case AARCH64_OPDE_NIL:
3914 gas_assert (0);
3915 break;
3916
3917 case AARCH64_OPDE_SYNTAX_ERROR:
3918 case AARCH64_OPDE_RECOVERABLE:
3919 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3920 case AARCH64_OPDE_OTHER_ERROR:
3921 gas_assert (idx >= 0);
3922 /* Use the prepared error message if there is, otherwise use the
3923 operand description string to describe the error. */
3924 if (detail->error != NULL)
3925 {
3926 if (detail->index == -1)
3927 as_bad (_("%s -- `%s'"), detail->error, str);
3928 else
3929 as_bad (_("%s at operand %d -- `%s'"),
3930 detail->error, detail->index + 1, str);
3931 }
3932 else
3933 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3934 aarch64_get_operand_desc (opd_code), str);
3935 break;
3936
3937 case AARCH64_OPDE_INVALID_VARIANT:
3938 as_bad (_("operand mismatch -- `%s'"), str);
3939 if (verbose_error_p)
3940 {
3941 /* We will try to correct the erroneous instruction and also provide
3942 more information e.g. all other valid variants.
3943
3944 The string representation of the corrected instruction and other
3945 valid variants are generated by
3946
3947 1) obtaining the intermediate representation of the erroneous
3948 instruction;
3949 2) manipulating the IR, e.g. replacing the operand qualifier;
3950 3) printing out the instruction by calling the printer functions
3951 shared with the disassembler.
3952
3953 The limitation of this method is that the exact input assembly
3954 line cannot be accurately reproduced in some cases, for example an
3955 optional operand present in the actual assembly line will be
3956 omitted in the output; likewise for the optional syntax rules,
3957 e.g. the # before the immediate. Another limitation is that the
3958 assembly symbols and relocation operations in the assembly line
3959 currently cannot be printed out in the error report. Last but not
3960 least, when there is other error(s) co-exist with this error, the
3961 'corrected' instruction may be still incorrect, e.g. given
3962 'ldnp h0,h1,[x0,#6]!'
3963 this diagnosis will provide the version:
3964 'ldnp s0,s1,[x0,#6]!'
3965 which is still not right. */
3966 size_t len = strlen (get_mnemonic_name (str));
3967 int i, qlf_idx;
3968 bfd_boolean result;
3969 const size_t size = 2048;
3970 char buf[size];
3971 aarch64_inst *inst_base = &inst.base;
3972 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
3973
3974 /* Init inst. */
3975 reset_aarch64_instruction (&inst);
3976 inst_base->opcode = opcode;
3977
3978 /* Reset the error report so that there is no side effect on the
3979 following operand parsing. */
3980 init_operand_error_report ();
3981
3982 /* Fill inst. */
3983 result = parse_operands (str + len, opcode)
3984 && programmer_friendly_fixup (&inst);
3985 gas_assert (result);
3986 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
3987 NULL, NULL);
3988 gas_assert (!result);
3989
3990 /* Find the most matched qualifier sequence. */
3991 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
3992 gas_assert (qlf_idx > -1);
3993
3994 /* Assign the qualifiers. */
3995 assign_qualifier_sequence (inst_base,
3996 opcode->qualifiers_list[qlf_idx]);
3997
3998 /* Print the hint. */
3999 output_info (_(" did you mean this?"));
4000 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4001 print_operands (buf, opcode, inst_base->operands);
4002 output_info (_(" %s"), buf);
4003
4004 /* Print out other variant(s) if there is any. */
4005 if (qlf_idx != 0 ||
4006 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4007 output_info (_(" other valid variant(s):"));
4008
4009 /* For each pattern. */
4010 qualifiers_list = opcode->qualifiers_list;
4011 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4012 {
4013 /* Most opcodes has much fewer patterns in the list.
4014 First NIL qualifier indicates the end in the list. */
4015 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4016 break;
4017
4018 if (i != qlf_idx)
4019 {
4020 /* Mnemonics name. */
4021 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4022
4023 /* Assign the qualifiers. */
4024 assign_qualifier_sequence (inst_base, *qualifiers_list);
4025
4026 /* Print instruction. */
4027 print_operands (buf, opcode, inst_base->operands);
4028
4029 output_info (_(" %s"), buf);
4030 }
4031 }
4032 }
4033 break;
4034
4035 case AARCH64_OPDE_OUT_OF_RANGE:
4036 if (detail->data[0] != detail->data[1])
4037 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4038 detail->error ? detail->error : _("immediate value"),
4039 detail->data[0], detail->data[1], detail->index + 1, str);
4040 else
4041 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4042 detail->error ? detail->error : _("immediate value"),
4043 detail->data[0], detail->index + 1, str);
4044 break;
4045
4046 case AARCH64_OPDE_REG_LIST:
4047 if (detail->data[0] == 1)
4048 as_bad (_("invalid number of registers in the list; "
4049 "only 1 register is expected at operand %d -- `%s'"),
4050 detail->index + 1, str);
4051 else
4052 as_bad (_("invalid number of registers in the list; "
4053 "%d registers are expected at operand %d -- `%s'"),
4054 detail->data[0], detail->index + 1, str);
4055 break;
4056
4057 case AARCH64_OPDE_UNALIGNED:
4058 as_bad (_("immediate value should be a multiple of "
4059 "%d at operand %d -- `%s'"),
4060 detail->data[0], detail->index + 1, str);
4061 break;
4062
4063 default:
4064 gas_assert (0);
4065 break;
4066 }
4067 }
4068
4069 /* Process and output the error message about the operand mismatching.
4070
4071 When this function is called, the operand error information had
4072 been collected for an assembly line and there will be multiple
4073 errors in the case of mulitple instruction templates; output the
4074 error message that most closely describes the problem. */
4075
4076 static void
4077 output_operand_error_report (char *str)
4078 {
4079 int largest_error_pos;
4080 const char *msg = NULL;
4081 enum aarch64_operand_error_kind kind;
4082 operand_error_record *curr;
4083 operand_error_record *head = operand_error_report.head;
4084 operand_error_record *record = NULL;
4085
4086 /* No error to report. */
4087 if (head == NULL)
4088 return;
4089
4090 gas_assert (head != NULL && operand_error_report.tail != NULL);
4091
4092 /* Only one error. */
4093 if (head == operand_error_report.tail)
4094 {
4095 DEBUG_TRACE ("single opcode entry with error kind: %s",
4096 operand_mismatch_kind_names[head->detail.kind]);
4097 output_operand_error_record (head, str);
4098 return;
4099 }
4100
4101 /* Find the error kind of the highest severity. */
4102 DEBUG_TRACE ("multiple opcode entres with error kind");
4103 kind = AARCH64_OPDE_NIL;
4104 for (curr = head; curr != NULL; curr = curr->next)
4105 {
4106 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4107 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4108 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4109 kind = curr->detail.kind;
4110 }
4111 gas_assert (kind != AARCH64_OPDE_NIL);
4112
4113 /* Pick up one of errors of KIND to report. */
4114 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4115 for (curr = head; curr != NULL; curr = curr->next)
4116 {
4117 if (curr->detail.kind != kind)
4118 continue;
4119 /* If there are multiple errors, pick up the one with the highest
4120 mismatching operand index. In the case of multiple errors with
4121 the equally highest operand index, pick up the first one or the
4122 first one with non-NULL error message. */
4123 if (curr->detail.index > largest_error_pos
4124 || (curr->detail.index == largest_error_pos && msg == NULL
4125 && curr->detail.error != NULL))
4126 {
4127 largest_error_pos = curr->detail.index;
4128 record = curr;
4129 msg = record->detail.error;
4130 }
4131 }
4132
4133 gas_assert (largest_error_pos != -2 && record != NULL);
4134 DEBUG_TRACE ("Pick up error kind %s to report",
4135 operand_mismatch_kind_names[record->detail.kind]);
4136
4137 /* Output. */
4138 output_operand_error_record (record, str);
4139 }
4140 \f
4141 /* Write an AARCH64 instruction to buf - always little-endian. */
4142 static void
4143 put_aarch64_insn (char *buf, uint32_t insn)
4144 {
4145 unsigned char *where = (unsigned char *) buf;
4146 where[0] = insn;
4147 where[1] = insn >> 8;
4148 where[2] = insn >> 16;
4149 where[3] = insn >> 24;
4150 }
4151
4152 static uint32_t
4153 get_aarch64_insn (char *buf)
4154 {
4155 unsigned char *where = (unsigned char *) buf;
4156 uint32_t result;
4157 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4158 return result;
4159 }
4160
4161 static void
4162 output_inst (struct aarch64_inst *new_inst)
4163 {
4164 char *to = NULL;
4165
4166 to = frag_more (INSN_SIZE);
4167
4168 frag_now->tc_frag_data.recorded = 1;
4169
4170 put_aarch64_insn (to, inst.base.value);
4171
4172 if (inst.reloc.type != BFD_RELOC_UNUSED)
4173 {
4174 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4175 INSN_SIZE, &inst.reloc.exp,
4176 inst.reloc.pc_rel,
4177 inst.reloc.type);
4178 DEBUG_TRACE ("Prepared relocation fix up");
4179 /* Don't check the addend value against the instruction size,
4180 that's the job of our code in md_apply_fix(). */
4181 fixp->fx_no_overflow = 1;
4182 if (new_inst != NULL)
4183 fixp->tc_fix_data.inst = new_inst;
4184 if (aarch64_gas_internal_fixup_p ())
4185 {
4186 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4187 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4188 fixp->fx_addnumber = inst.reloc.flags;
4189 }
4190 }
4191
4192 dwarf2_emit_insn (INSN_SIZE);
4193 }
4194
4195 /* Link together opcodes of the same name. */
4196
4197 struct templates
4198 {
4199 aarch64_opcode *opcode;
4200 struct templates *next;
4201 };
4202
4203 typedef struct templates templates;
4204
4205 static templates *
4206 lookup_mnemonic (const char *start, int len)
4207 {
4208 templates *templ = NULL;
4209
4210 templ = hash_find_n (aarch64_ops_hsh, start, len);
4211 return templ;
4212 }
4213
4214 /* Subroutine of md_assemble, responsible for looking up the primary
4215 opcode from the mnemonic the user wrote. STR points to the
4216 beginning of the mnemonic. */
4217
4218 static templates *
4219 opcode_lookup (char **str)
4220 {
4221 char *end, *base;
4222 const aarch64_cond *cond;
4223 char condname[16];
4224 int len;
4225
4226 /* Scan up to the end of the mnemonic, which must end in white space,
4227 '.', or end of string. */
4228 for (base = end = *str; is_part_of_name(*end); end++)
4229 if (*end == '.')
4230 break;
4231
4232 if (end == base)
4233 return 0;
4234
4235 inst.cond = COND_ALWAYS;
4236
4237 /* Handle a possible condition. */
4238 if (end[0] == '.')
4239 {
4240 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4241 if (cond)
4242 {
4243 inst.cond = cond->value;
4244 *str = end + 3;
4245 }
4246 else
4247 {
4248 *str = end;
4249 return 0;
4250 }
4251 }
4252 else
4253 *str = end;
4254
4255 len = end - base;
4256
4257 if (inst.cond == COND_ALWAYS)
4258 {
4259 /* Look for unaffixed mnemonic. */
4260 return lookup_mnemonic (base, len);
4261 }
4262 else if (len <= 13)
4263 {
4264 /* append ".c" to mnemonic if conditional */
4265 memcpy (condname, base, len);
4266 memcpy (condname + len, ".c", 2);
4267 base = condname;
4268 len += 2;
4269 return lookup_mnemonic (base, len);
4270 }
4271
4272 return NULL;
4273 }
4274
4275 /* Internal helper routine converting a vector neon_type_el structure
4276 *VECTYPE to a corresponding operand qualifier. */
4277
4278 static inline aarch64_opnd_qualifier_t
4279 vectype_to_qualifier (const struct neon_type_el *vectype)
4280 {
4281 /* Element size in bytes indexed by neon_el_type. */
4282 const unsigned char ele_size[5]
4283 = {1, 2, 4, 8, 16};
4284
4285 if (!vectype->defined || vectype->type == NT_invtype)
4286 goto vectype_conversion_fail;
4287
4288 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4289
4290 if (vectype->defined & NTA_HASINDEX)
4291 /* Vector element register. */
4292 return AARCH64_OPND_QLF_S_B + vectype->type;
4293 else
4294 {
4295 /* Vector register. */
4296 int reg_size = ele_size[vectype->type] * vectype->width;
4297 unsigned offset;
4298 if (reg_size != 16 && reg_size != 8)
4299 goto vectype_conversion_fail;
4300 /* The conversion is calculated based on the relation of the order of
4301 qualifiers to the vector element size and vector register size. */
4302 offset = (vectype->type == NT_q)
4303 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4304 gas_assert (offset <= 8);
4305 return AARCH64_OPND_QLF_V_8B + offset;
4306 }
4307
4308 vectype_conversion_fail:
4309 first_error (_("bad vector arrangement type"));
4310 return AARCH64_OPND_QLF_NIL;
4311 }
4312
4313 /* Process an optional operand that is found omitted from the assembly line.
4314 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4315 instruction's opcode entry while IDX is the index of this omitted operand.
4316 */
4317
4318 static void
4319 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4320 int idx, aarch64_opnd_info *operand)
4321 {
4322 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4323 gas_assert (optional_operand_p (opcode, idx));
4324 gas_assert (!operand->present);
4325
4326 switch (type)
4327 {
4328 case AARCH64_OPND_Rd:
4329 case AARCH64_OPND_Rn:
4330 case AARCH64_OPND_Rm:
4331 case AARCH64_OPND_Rt:
4332 case AARCH64_OPND_Rt2:
4333 case AARCH64_OPND_Rs:
4334 case AARCH64_OPND_Ra:
4335 case AARCH64_OPND_Rt_SYS:
4336 case AARCH64_OPND_Rd_SP:
4337 case AARCH64_OPND_Rn_SP:
4338 case AARCH64_OPND_Fd:
4339 case AARCH64_OPND_Fn:
4340 case AARCH64_OPND_Fm:
4341 case AARCH64_OPND_Fa:
4342 case AARCH64_OPND_Ft:
4343 case AARCH64_OPND_Ft2:
4344 case AARCH64_OPND_Sd:
4345 case AARCH64_OPND_Sn:
4346 case AARCH64_OPND_Sm:
4347 case AARCH64_OPND_Vd:
4348 case AARCH64_OPND_Vn:
4349 case AARCH64_OPND_Vm:
4350 case AARCH64_OPND_VdD1:
4351 case AARCH64_OPND_VnD1:
4352 operand->reg.regno = default_value;
4353 break;
4354
4355 case AARCH64_OPND_Ed:
4356 case AARCH64_OPND_En:
4357 case AARCH64_OPND_Em:
4358 operand->reglane.regno = default_value;
4359 break;
4360
4361 case AARCH64_OPND_IDX:
4362 case AARCH64_OPND_BIT_NUM:
4363 case AARCH64_OPND_IMMR:
4364 case AARCH64_OPND_IMMS:
4365 case AARCH64_OPND_SHLL_IMM:
4366 case AARCH64_OPND_IMM_VLSL:
4367 case AARCH64_OPND_IMM_VLSR:
4368 case AARCH64_OPND_CCMP_IMM:
4369 case AARCH64_OPND_FBITS:
4370 case AARCH64_OPND_UIMM4:
4371 case AARCH64_OPND_UIMM3_OP1:
4372 case AARCH64_OPND_UIMM3_OP2:
4373 case AARCH64_OPND_IMM:
4374 case AARCH64_OPND_WIDTH:
4375 case AARCH64_OPND_UIMM7:
4376 case AARCH64_OPND_NZCV:
4377 operand->imm.value = default_value;
4378 break;
4379
4380 case AARCH64_OPND_EXCEPTION:
4381 inst.reloc.type = BFD_RELOC_UNUSED;
4382 break;
4383
4384 case AARCH64_OPND_BARRIER_ISB:
4385 operand->barrier = aarch64_barrier_options + default_value;
4386
4387 default:
4388 break;
4389 }
4390 }
4391
4392 /* Process the relocation type for move wide instructions.
4393 Return TRUE on success; otherwise return FALSE. */
4394
4395 static bfd_boolean
4396 process_movw_reloc_info (void)
4397 {
4398 int is32;
4399 unsigned shift;
4400
4401 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4402
4403 if (inst.base.opcode->op == OP_MOVK)
4404 switch (inst.reloc.type)
4405 {
4406 case BFD_RELOC_AARCH64_MOVW_G0_S:
4407 case BFD_RELOC_AARCH64_MOVW_G1_S:
4408 case BFD_RELOC_AARCH64_MOVW_G2_S:
4409 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4410 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4411 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4412 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4413 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4414 set_syntax_error
4415 (_("the specified relocation type is not allowed for MOVK"));
4416 return FALSE;
4417 default:
4418 break;
4419 }
4420
4421 switch (inst.reloc.type)
4422 {
4423 case BFD_RELOC_AARCH64_MOVW_G0:
4424 case BFD_RELOC_AARCH64_MOVW_G0_S:
4425 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4426 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4427 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4428 shift = 0;
4429 break;
4430 case BFD_RELOC_AARCH64_MOVW_G1:
4431 case BFD_RELOC_AARCH64_MOVW_G1_S:
4432 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4433 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4434 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4435 shift = 16;
4436 break;
4437 case BFD_RELOC_AARCH64_MOVW_G2:
4438 case BFD_RELOC_AARCH64_MOVW_G2_S:
4439 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4440 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4441 if (is32)
4442 {
4443 set_fatal_syntax_error
4444 (_("the specified relocation type is not allowed for 32-bit "
4445 "register"));
4446 return FALSE;
4447 }
4448 shift = 32;
4449 break;
4450 case BFD_RELOC_AARCH64_MOVW_G3:
4451 if (is32)
4452 {
4453 set_fatal_syntax_error
4454 (_("the specified relocation type is not allowed for 32-bit "
4455 "register"));
4456 return FALSE;
4457 }
4458 shift = 48;
4459 break;
4460 default:
4461 /* More cases should be added when more MOVW-related relocation types
4462 are supported in GAS. */
4463 gas_assert (aarch64_gas_internal_fixup_p ());
4464 /* The shift amount should have already been set by the parser. */
4465 return TRUE;
4466 }
4467 inst.base.operands[1].shifter.amount = shift;
4468 return TRUE;
4469 }
4470
4471 /* A primitive log caculator. */
4472
4473 static inline unsigned int
4474 get_logsz (unsigned int size)
4475 {
4476 const unsigned char ls[16] =
4477 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4478 if (size > 16)
4479 {
4480 gas_assert (0);
4481 return -1;
4482 }
4483 gas_assert (ls[size - 1] != (unsigned char)-1);
4484 return ls[size - 1];
4485 }
4486
4487 /* Determine and return the real reloc type code for an instruction
4488 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4489
4490 static inline bfd_reloc_code_real_type
4491 ldst_lo12_determine_real_reloc_type (void)
4492 {
4493 int logsz;
4494 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4495 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4496
4497 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4498 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4499 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4500 BFD_RELOC_AARCH64_LDST128_LO12
4501 };
4502
4503 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4504 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4505
4506 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4507 opd1_qlf =
4508 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4509 1, opd0_qlf, 0);
4510 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4511
4512 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4513 gas_assert (logsz >= 0 && logsz <= 4);
4514
4515 return reloc_ldst_lo12[logsz];
4516 }
4517
4518 /* Check whether a register list REGINFO is valid. The registers must be
4519 numbered in increasing order (modulo 32), in increments of one or two.
4520
4521 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4522 increments of two.
4523
4524 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4525
4526 static bfd_boolean
4527 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4528 {
4529 uint32_t i, nb_regs, prev_regno, incr;
4530
4531 nb_regs = 1 + (reginfo & 0x3);
4532 reginfo >>= 2;
4533 prev_regno = reginfo & 0x1f;
4534 incr = accept_alternate ? 2 : 1;
4535
4536 for (i = 1; i < nb_regs; ++i)
4537 {
4538 uint32_t curr_regno;
4539 reginfo >>= 5;
4540 curr_regno = reginfo & 0x1f;
4541 if (curr_regno != ((prev_regno + incr) & 0x1f))
4542 return FALSE;
4543 prev_regno = curr_regno;
4544 }
4545
4546 return TRUE;
4547 }
4548
4549 /* Generic instruction operand parser. This does no encoding and no
4550 semantic validation; it merely squirrels values away in the inst
4551 structure. Returns TRUE or FALSE depending on whether the
4552 specified grammar matched. */
4553
4554 static bfd_boolean
4555 parse_operands (char *str, const aarch64_opcode *opcode)
4556 {
4557 int i;
4558 char *backtrack_pos = 0;
4559 const enum aarch64_opnd *operands = opcode->operands;
4560
4561 clear_error ();
4562 skip_whitespace (str);
4563
4564 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4565 {
4566 int64_t val;
4567 int isreg32, isregzero;
4568 int comma_skipped_p = 0;
4569 aarch64_reg_type rtype;
4570 struct neon_type_el vectype;
4571 aarch64_opnd_info *info = &inst.base.operands[i];
4572
4573 DEBUG_TRACE ("parse operand %d", i);
4574
4575 /* Assign the operand code. */
4576 info->type = operands[i];
4577
4578 if (optional_operand_p (opcode, i))
4579 {
4580 /* Remember where we are in case we need to backtrack. */
4581 gas_assert (!backtrack_pos);
4582 backtrack_pos = str;
4583 }
4584
4585 /* Expect comma between operands; the backtrack mechanizm will take
4586 care of cases of omitted optional operand. */
4587 if (i > 0 && ! skip_past_char (&str, ','))
4588 {
4589 set_syntax_error (_("comma expected between operands"));
4590 goto failure;
4591 }
4592 else
4593 comma_skipped_p = 1;
4594
4595 switch (operands[i])
4596 {
4597 case AARCH64_OPND_Rd:
4598 case AARCH64_OPND_Rn:
4599 case AARCH64_OPND_Rm:
4600 case AARCH64_OPND_Rt:
4601 case AARCH64_OPND_Rt2:
4602 case AARCH64_OPND_Rs:
4603 case AARCH64_OPND_Ra:
4604 case AARCH64_OPND_Rt_SYS:
4605 po_int_reg_or_fail (1, 0);
4606 break;
4607
4608 case AARCH64_OPND_Rd_SP:
4609 case AARCH64_OPND_Rn_SP:
4610 po_int_reg_or_fail (0, 1);
4611 break;
4612
4613 case AARCH64_OPND_Rm_EXT:
4614 case AARCH64_OPND_Rm_SFT:
4615 po_misc_or_fail (parse_shifter_operand
4616 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4617 ? SHIFTED_ARITH_IMM
4618 : SHIFTED_LOGIC_IMM)));
4619 if (!info->shifter.operator_present)
4620 {
4621 /* Default to LSL if not present. Libopcodes prefers shifter
4622 kind to be explicit. */
4623 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4624 info->shifter.kind = AARCH64_MOD_LSL;
4625 /* For Rm_EXT, libopcodes will carry out further check on whether
4626 or not stack pointer is used in the instruction (Recall that
4627 "the extend operator is not optional unless at least one of
4628 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4629 }
4630 break;
4631
4632 case AARCH64_OPND_Fd:
4633 case AARCH64_OPND_Fn:
4634 case AARCH64_OPND_Fm:
4635 case AARCH64_OPND_Fa:
4636 case AARCH64_OPND_Ft:
4637 case AARCH64_OPND_Ft2:
4638 case AARCH64_OPND_Sd:
4639 case AARCH64_OPND_Sn:
4640 case AARCH64_OPND_Sm:
4641 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4642 if (val == PARSE_FAIL)
4643 {
4644 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4645 goto failure;
4646 }
4647 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4648
4649 info->reg.regno = val;
4650 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4651 break;
4652
4653 case AARCH64_OPND_Vd:
4654 case AARCH64_OPND_Vn:
4655 case AARCH64_OPND_Vm:
4656 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4657 if (val == PARSE_FAIL)
4658 {
4659 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4660 goto failure;
4661 }
4662 if (vectype.defined & NTA_HASINDEX)
4663 goto failure;
4664
4665 info->reg.regno = val;
4666 info->qualifier = vectype_to_qualifier (&vectype);
4667 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4668 goto failure;
4669 break;
4670
4671 case AARCH64_OPND_VdD1:
4672 case AARCH64_OPND_VnD1:
4673 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4674 if (val == PARSE_FAIL)
4675 {
4676 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4677 goto failure;
4678 }
4679 if (vectype.type != NT_d || vectype.index != 1)
4680 {
4681 set_fatal_syntax_error
4682 (_("the top half of a 128-bit FP/SIMD register is expected"));
4683 goto failure;
4684 }
4685 info->reg.regno = val;
4686 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4687 here; it is correct for the purpose of encoding/decoding since
4688 only the register number is explicitly encoded in the related
4689 instructions, although this appears a bit hacky. */
4690 info->qualifier = AARCH64_OPND_QLF_S_D;
4691 break;
4692
4693 case AARCH64_OPND_Ed:
4694 case AARCH64_OPND_En:
4695 case AARCH64_OPND_Em:
4696 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4697 if (val == PARSE_FAIL)
4698 {
4699 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4700 goto failure;
4701 }
4702 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4703 goto failure;
4704
4705 info->reglane.regno = val;
4706 info->reglane.index = vectype.index;
4707 info->qualifier = vectype_to_qualifier (&vectype);
4708 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4709 goto failure;
4710 break;
4711
4712 case AARCH64_OPND_LVn:
4713 case AARCH64_OPND_LVt:
4714 case AARCH64_OPND_LVt_AL:
4715 case AARCH64_OPND_LEt:
4716 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4717 goto failure;
4718 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4719 {
4720 set_fatal_syntax_error (_("invalid register list"));
4721 goto failure;
4722 }
4723 info->reglist.first_regno = (val >> 2) & 0x1f;
4724 info->reglist.num_regs = (val & 0x3) + 1;
4725 if (operands[i] == AARCH64_OPND_LEt)
4726 {
4727 if (!(vectype.defined & NTA_HASINDEX))
4728 goto failure;
4729 info->reglist.has_index = 1;
4730 info->reglist.index = vectype.index;
4731 }
4732 else if (!(vectype.defined & NTA_HASTYPE))
4733 goto failure;
4734 info->qualifier = vectype_to_qualifier (&vectype);
4735 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4736 goto failure;
4737 break;
4738
4739 case AARCH64_OPND_Cn:
4740 case AARCH64_OPND_Cm:
4741 po_reg_or_fail (REG_TYPE_CN);
4742 if (val > 15)
4743 {
4744 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4745 goto failure;
4746 }
4747 inst.base.operands[i].reg.regno = val;
4748 break;
4749
4750 case AARCH64_OPND_SHLL_IMM:
4751 case AARCH64_OPND_IMM_VLSR:
4752 po_imm_or_fail (1, 64);
4753 info->imm.value = val;
4754 break;
4755
4756 case AARCH64_OPND_CCMP_IMM:
4757 case AARCH64_OPND_FBITS:
4758 case AARCH64_OPND_UIMM4:
4759 case AARCH64_OPND_UIMM3_OP1:
4760 case AARCH64_OPND_UIMM3_OP2:
4761 case AARCH64_OPND_IMM_VLSL:
4762 case AARCH64_OPND_IMM:
4763 case AARCH64_OPND_WIDTH:
4764 po_imm_nc_or_fail ();
4765 info->imm.value = val;
4766 break;
4767
4768 case AARCH64_OPND_UIMM7:
4769 po_imm_or_fail (0, 127);
4770 info->imm.value = val;
4771 break;
4772
4773 case AARCH64_OPND_IDX:
4774 case AARCH64_OPND_BIT_NUM:
4775 case AARCH64_OPND_IMMR:
4776 case AARCH64_OPND_IMMS:
4777 po_imm_or_fail (0, 63);
4778 info->imm.value = val;
4779 break;
4780
4781 case AARCH64_OPND_IMM0:
4782 po_imm_nc_or_fail ();
4783 if (val != 0)
4784 {
4785 set_fatal_syntax_error (_("immediate zero expected"));
4786 goto failure;
4787 }
4788 info->imm.value = 0;
4789 break;
4790
4791 case AARCH64_OPND_FPIMM0:
4792 {
4793 int qfloat;
4794 bfd_boolean res1 = FALSE, res2 = FALSE;
4795 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4796 it is probably not worth the effort to support it. */
4797 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4798 && !(res2 = parse_constant_immediate (&str, &val)))
4799 goto failure;
4800 if ((res1 && qfloat == 0) || (res2 && val == 0))
4801 {
4802 info->imm.value = 0;
4803 info->imm.is_fp = 1;
4804 break;
4805 }
4806 set_fatal_syntax_error (_("immediate zero expected"));
4807 goto failure;
4808 }
4809
4810 case AARCH64_OPND_IMM_MOV:
4811 {
4812 char *saved = str;
4813 if (reg_name_p (str, REG_TYPE_R_Z_SP))
4814 goto failure;
4815 str = saved;
4816 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4817 GE_OPT_PREFIX, 1));
4818 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4819 later. fix_mov_imm_insn will try to determine a machine
4820 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4821 message if the immediate cannot be moved by a single
4822 instruction. */
4823 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4824 inst.base.operands[i].skip = 1;
4825 }
4826 break;
4827
4828 case AARCH64_OPND_SIMD_IMM:
4829 case AARCH64_OPND_SIMD_IMM_SFT:
4830 if (! parse_big_immediate (&str, &val))
4831 goto failure;
4832 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4833 /* addr_off_p */ 0,
4834 /* need_libopcodes_p */ 1,
4835 /* skip_p */ 1);
4836 /* Parse shift.
4837 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4838 shift, we don't check it here; we leave the checking to
4839 the libopcodes (operand_general_constraint_met_p). By
4840 doing this, we achieve better diagnostics. */
4841 if (skip_past_comma (&str)
4842 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4843 goto failure;
4844 if (!info->shifter.operator_present
4845 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4846 {
4847 /* Default to LSL if not present. Libopcodes prefers shifter
4848 kind to be explicit. */
4849 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4850 info->shifter.kind = AARCH64_MOD_LSL;
4851 }
4852 break;
4853
4854 case AARCH64_OPND_FPIMM:
4855 case AARCH64_OPND_SIMD_FPIMM:
4856 {
4857 int qfloat;
4858 bfd_boolean dp_p
4859 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4860 == 8);
4861 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4862 goto failure;
4863 if (qfloat == 0)
4864 {
4865 set_fatal_syntax_error (_("invalid floating-point constant"));
4866 goto failure;
4867 }
4868 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4869 inst.base.operands[i].imm.is_fp = 1;
4870 }
4871 break;
4872
4873 case AARCH64_OPND_LIMM:
4874 po_misc_or_fail (parse_shifter_operand (&str, info,
4875 SHIFTED_LOGIC_IMM));
4876 if (info->shifter.operator_present)
4877 {
4878 set_fatal_syntax_error
4879 (_("shift not allowed for bitmask immediate"));
4880 goto failure;
4881 }
4882 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4883 /* addr_off_p */ 0,
4884 /* need_libopcodes_p */ 1,
4885 /* skip_p */ 1);
4886 break;
4887
4888 case AARCH64_OPND_AIMM:
4889 if (opcode->op == OP_ADD)
4890 /* ADD may have relocation types. */
4891 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4892 SHIFTED_ARITH_IMM));
4893 else
4894 po_misc_or_fail (parse_shifter_operand (&str, info,
4895 SHIFTED_ARITH_IMM));
4896 switch (inst.reloc.type)
4897 {
4898 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4899 info->shifter.amount = 12;
4900 break;
4901 case BFD_RELOC_UNUSED:
4902 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4903 if (info->shifter.kind != AARCH64_MOD_NONE)
4904 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4905 inst.reloc.pc_rel = 0;
4906 break;
4907 default:
4908 break;
4909 }
4910 info->imm.value = 0;
4911 if (!info->shifter.operator_present)
4912 {
4913 /* Default to LSL if not present. Libopcodes prefers shifter
4914 kind to be explicit. */
4915 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4916 info->shifter.kind = AARCH64_MOD_LSL;
4917 }
4918 break;
4919
4920 case AARCH64_OPND_HALF:
4921 {
4922 /* #<imm16> or relocation. */
4923 int internal_fixup_p;
4924 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4925 if (internal_fixup_p)
4926 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4927 skip_whitespace (str);
4928 if (skip_past_comma (&str))
4929 {
4930 /* {, LSL #<shift>} */
4931 if (! aarch64_gas_internal_fixup_p ())
4932 {
4933 set_fatal_syntax_error (_("can't mix relocation modifier "
4934 "with explicit shift"));
4935 goto failure;
4936 }
4937 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4938 }
4939 else
4940 inst.base.operands[i].shifter.amount = 0;
4941 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4942 inst.base.operands[i].imm.value = 0;
4943 if (! process_movw_reloc_info ())
4944 goto failure;
4945 }
4946 break;
4947
4948 case AARCH64_OPND_EXCEPTION:
4949 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4950 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4951 /* addr_off_p */ 0,
4952 /* need_libopcodes_p */ 0,
4953 /* skip_p */ 1);
4954 break;
4955
4956 case AARCH64_OPND_NZCV:
4957 {
4958 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
4959 if (nzcv != NULL)
4960 {
4961 str += 4;
4962 info->imm.value = nzcv->value;
4963 break;
4964 }
4965 po_imm_or_fail (0, 15);
4966 info->imm.value = val;
4967 }
4968 break;
4969
4970 case AARCH64_OPND_COND:
4971 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
4972 str += 2;
4973 if (info->cond == NULL)
4974 {
4975 set_syntax_error (_("invalid condition"));
4976 goto failure;
4977 }
4978 break;
4979
4980 case AARCH64_OPND_ADDR_ADRP:
4981 po_misc_or_fail (parse_adrp (&str));
4982 /* Clear the value as operand needs to be relocated. */
4983 info->imm.value = 0;
4984 break;
4985
4986 case AARCH64_OPND_ADDR_PCREL14:
4987 case AARCH64_OPND_ADDR_PCREL19:
4988 case AARCH64_OPND_ADDR_PCREL21:
4989 case AARCH64_OPND_ADDR_PCREL26:
4990 po_misc_or_fail (parse_address_reloc (&str, info));
4991 if (!info->addr.pcrel)
4992 {
4993 set_syntax_error (_("invalid pc-relative address"));
4994 goto failure;
4995 }
4996 if (inst.gen_lit_pool
4997 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
4998 {
4999 /* Only permit "=value" in the literal load instructions.
5000 The literal will be generated by programmer_friendly_fixup. */
5001 set_syntax_error (_("invalid use of \"=immediate\""));
5002 goto failure;
5003 }
5004 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5005 {
5006 set_syntax_error (_("unrecognized relocation suffix"));
5007 goto failure;
5008 }
5009 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5010 {
5011 info->imm.value = inst.reloc.exp.X_add_number;
5012 inst.reloc.type = BFD_RELOC_UNUSED;
5013 }
5014 else
5015 {
5016 info->imm.value = 0;
5017 if (inst.reloc.type == BFD_RELOC_UNUSED)
5018 switch (opcode->iclass)
5019 {
5020 case compbranch:
5021 case condbranch:
5022 /* e.g. CBZ or B.COND */
5023 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5024 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5025 break;
5026 case testbranch:
5027 /* e.g. TBZ */
5028 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5029 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5030 break;
5031 case branch_imm:
5032 /* e.g. B or BL */
5033 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5034 inst.reloc.type =
5035 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5036 : BFD_RELOC_AARCH64_JUMP26;
5037 break;
5038 case loadlit:
5039 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5040 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5041 break;
5042 case pcreladdr:
5043 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5044 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5045 break;
5046 default:
5047 gas_assert (0);
5048 abort ();
5049 }
5050 inst.reloc.pc_rel = 1;
5051 }
5052 break;
5053
5054 case AARCH64_OPND_ADDR_SIMPLE:
5055 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5056 /* [<Xn|SP>{, #<simm>}] */
5057 po_char_or_fail ('[');
5058 po_reg_or_fail (REG_TYPE_R64_SP);
5059 /* Accept optional ", #0". */
5060 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5061 && skip_past_char (&str, ','))
5062 {
5063 skip_past_char (&str, '#');
5064 if (! skip_past_char (&str, '0'))
5065 {
5066 set_fatal_syntax_error
5067 (_("the optional immediate offset can only be 0"));
5068 goto failure;
5069 }
5070 }
5071 po_char_or_fail (']');
5072 info->addr.base_regno = val;
5073 break;
5074
5075 case AARCH64_OPND_ADDR_REGOFF:
5076 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5077 po_misc_or_fail (parse_address (&str, info, 0));
5078 if (info->addr.pcrel || !info->addr.offset.is_reg
5079 || !info->addr.preind || info->addr.postind
5080 || info->addr.writeback)
5081 {
5082 set_syntax_error (_("invalid addressing mode"));
5083 goto failure;
5084 }
5085 if (!info->shifter.operator_present)
5086 {
5087 /* Default to LSL if not present. Libopcodes prefers shifter
5088 kind to be explicit. */
5089 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5090 info->shifter.kind = AARCH64_MOD_LSL;
5091 }
5092 /* Qualifier to be deduced by libopcodes. */
5093 break;
5094
5095 case AARCH64_OPND_ADDR_SIMM7:
5096 po_misc_or_fail (parse_address (&str, info, 0));
5097 if (info->addr.pcrel || info->addr.offset.is_reg
5098 || (!info->addr.preind && !info->addr.postind))
5099 {
5100 set_syntax_error (_("invalid addressing mode"));
5101 goto failure;
5102 }
5103 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5104 /* addr_off_p */ 1,
5105 /* need_libopcodes_p */ 1,
5106 /* skip_p */ 0);
5107 break;
5108
5109 case AARCH64_OPND_ADDR_SIMM9:
5110 case AARCH64_OPND_ADDR_SIMM9_2:
5111 po_misc_or_fail (parse_address_reloc (&str, info));
5112 if (info->addr.pcrel || info->addr.offset.is_reg
5113 || (!info->addr.preind && !info->addr.postind)
5114 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5115 && info->addr.writeback))
5116 {
5117 set_syntax_error (_("invalid addressing mode"));
5118 goto failure;
5119 }
5120 if (inst.reloc.type != BFD_RELOC_UNUSED)
5121 {
5122 set_syntax_error (_("relocation not allowed"));
5123 goto failure;
5124 }
5125 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5126 /* addr_off_p */ 1,
5127 /* need_libopcodes_p */ 1,
5128 /* skip_p */ 0);
5129 break;
5130
5131 case AARCH64_OPND_ADDR_UIMM12:
5132 po_misc_or_fail (parse_address_reloc (&str, info));
5133 if (info->addr.pcrel || info->addr.offset.is_reg
5134 || !info->addr.preind || info->addr.writeback)
5135 {
5136 set_syntax_error (_("invalid addressing mode"));
5137 goto failure;
5138 }
5139 if (inst.reloc.type == BFD_RELOC_UNUSED)
5140 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5141 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5142 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5143 /* Leave qualifier to be determined by libopcodes. */
5144 break;
5145
5146 case AARCH64_OPND_SIMD_ADDR_POST:
5147 /* [<Xn|SP>], <Xm|#<amount>> */
5148 po_misc_or_fail (parse_address (&str, info, 1));
5149 if (!info->addr.postind || !info->addr.writeback)
5150 {
5151 set_syntax_error (_("invalid addressing mode"));
5152 goto failure;
5153 }
5154 if (!info->addr.offset.is_reg)
5155 {
5156 if (inst.reloc.exp.X_op == O_constant)
5157 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5158 else
5159 {
5160 set_fatal_syntax_error
5161 (_("writeback value should be an immediate constant"));
5162 goto failure;
5163 }
5164 }
5165 /* No qualifier. */
5166 break;
5167
5168 case AARCH64_OPND_SYSREG:
5169 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5170 == PARSE_FAIL)
5171 {
5172 set_syntax_error (_("unknown or missing system register name"));
5173 goto failure;
5174 }
5175 inst.base.operands[i].sysreg = val;
5176 break;
5177
5178 case AARCH64_OPND_PSTATEFIELD:
5179 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5180 == PARSE_FAIL)
5181 {
5182 set_syntax_error (_("unknown or missing PSTATE field name"));
5183 goto failure;
5184 }
5185 inst.base.operands[i].pstatefield = val;
5186 break;
5187
5188 case AARCH64_OPND_SYSREG_IC:
5189 inst.base.operands[i].sysins_op =
5190 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5191 goto sys_reg_ins;
5192 case AARCH64_OPND_SYSREG_DC:
5193 inst.base.operands[i].sysins_op =
5194 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5195 goto sys_reg_ins;
5196 case AARCH64_OPND_SYSREG_AT:
5197 inst.base.operands[i].sysins_op =
5198 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5199 goto sys_reg_ins;
5200 case AARCH64_OPND_SYSREG_TLBI:
5201 inst.base.operands[i].sysins_op =
5202 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5203 sys_reg_ins:
5204 if (inst.base.operands[i].sysins_op == NULL)
5205 {
5206 set_fatal_syntax_error ( _("unknown or missing operation name"));
5207 goto failure;
5208 }
5209 break;
5210
5211 case AARCH64_OPND_BARRIER:
5212 case AARCH64_OPND_BARRIER_ISB:
5213 val = parse_barrier (&str);
5214 if (val != PARSE_FAIL
5215 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5216 {
5217 /* ISB only accepts options name 'sy'. */
5218 set_syntax_error
5219 (_("the specified option is not accepted in ISB"));
5220 /* Turn off backtrack as this optional operand is present. */
5221 backtrack_pos = 0;
5222 goto failure;
5223 }
5224 /* This is an extension to accept a 0..15 immediate. */
5225 if (val == PARSE_FAIL)
5226 po_imm_or_fail (0, 15);
5227 info->barrier = aarch64_barrier_options + val;
5228 break;
5229
5230 case AARCH64_OPND_PRFOP:
5231 val = parse_pldop (&str);
5232 /* This is an extension to accept a 0..31 immediate. */
5233 if (val == PARSE_FAIL)
5234 po_imm_or_fail (0, 31);
5235 inst.base.operands[i].prfop = aarch64_prfops + val;
5236 break;
5237
5238 default:
5239 as_fatal (_("unhandled operand code %d"), operands[i]);
5240 }
5241
5242 /* If we get here, this operand was successfully parsed. */
5243 inst.base.operands[i].present = 1;
5244 continue;
5245
5246 failure:
5247 /* The parse routine should already have set the error, but in case
5248 not, set a default one here. */
5249 if (! error_p ())
5250 set_default_error ();
5251
5252 if (! backtrack_pos)
5253 goto parse_operands_return;
5254
5255 /* Reaching here means we are dealing with an optional operand that is
5256 omitted from the assembly line. */
5257 gas_assert (optional_operand_p (opcode, i));
5258 info->present = 0;
5259 process_omitted_operand (operands[i], opcode, i, info);
5260
5261 /* Try again, skipping the optional operand at backtrack_pos. */
5262 str = backtrack_pos;
5263 backtrack_pos = 0;
5264
5265 /* If this is the last operand that is optional and omitted, but without
5266 the presence of a comma. */
5267 if (i && comma_skipped_p && i == aarch64_num_of_operands (opcode) - 1)
5268 {
5269 set_fatal_syntax_error
5270 (_("unexpected comma before the omitted optional operand"));
5271 goto parse_operands_return;
5272 }
5273
5274 /* Clear any error record after the omitted optional operand has been
5275 successfully handled. */
5276 clear_error ();
5277 }
5278
5279 /* Check if we have parsed all the operands. */
5280 if (*str != '\0' && ! error_p ())
5281 {
5282 /* Set I to the index of the last present operand; this is
5283 for the purpose of diagnostics. */
5284 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5285 ;
5286 set_fatal_syntax_error
5287 (_("unexpected characters following instruction"));
5288 }
5289
5290 parse_operands_return:
5291
5292 if (error_p ())
5293 {
5294 DEBUG_TRACE ("parsing FAIL: %s - %s",
5295 operand_mismatch_kind_names[get_error_kind ()],
5296 get_error_message ());
5297 /* Record the operand error properly; this is useful when there
5298 are multiple instruction templates for a mnemonic name, so that
5299 later on, we can select the error that most closely describes
5300 the problem. */
5301 record_operand_error (opcode, i, get_error_kind (),
5302 get_error_message ());
5303 return FALSE;
5304 }
5305 else
5306 {
5307 DEBUG_TRACE ("parsing SUCCESS");
5308 return TRUE;
5309 }
5310 }
5311
5312 /* It does some fix-up to provide some programmer friendly feature while
5313 keeping the libopcodes happy, i.e. libopcodes only accepts
5314 the preferred architectural syntax.
5315 Return FALSE if there is any failure; otherwise return TRUE. */
5316
5317 static bfd_boolean
5318 programmer_friendly_fixup (aarch64_instruction *instr)
5319 {
5320 aarch64_inst *base = &instr->base;
5321 const aarch64_opcode *opcode = base->opcode;
5322 enum aarch64_op op = opcode->op;
5323 aarch64_opnd_info *operands = base->operands;
5324
5325 DEBUG_TRACE ("enter");
5326
5327 switch (opcode->iclass)
5328 {
5329 case testbranch:
5330 /* TBNZ Xn|Wn, #uimm6, label
5331 Test and Branch Not Zero: conditionally jumps to label if bit number
5332 uimm6 in register Xn is not zero. The bit number implies the width of
5333 the register, which may be written and should be disassembled as Wn if
5334 uimm is less than 32. */
5335 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5336 {
5337 if (operands[1].imm.value >= 32)
5338 {
5339 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5340 0, 31);
5341 return FALSE;
5342 }
5343 operands[0].qualifier = AARCH64_OPND_QLF_X;
5344 }
5345 break;
5346 case loadlit:
5347 /* LDR Wt, label | =value
5348 As a convenience assemblers will typically permit the notation
5349 "=value" in conjunction with the pc-relative literal load instructions
5350 to automatically place an immediate value or symbolic address in a
5351 nearby literal pool and generate a hidden label which references it.
5352 ISREG has been set to 0 in the case of =value. */
5353 if (instr->gen_lit_pool
5354 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5355 {
5356 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5357 if (op == OP_LDRSW_LIT)
5358 size = 4;
5359 if (instr->reloc.exp.X_op != O_constant
5360 && instr->reloc.exp.X_op != O_big
5361 && instr->reloc.exp.X_op != O_symbol)
5362 {
5363 record_operand_error (opcode, 1,
5364 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5365 _("constant expression expected"));
5366 return FALSE;
5367 }
5368 if (! add_to_lit_pool (&instr->reloc.exp, size))
5369 {
5370 record_operand_error (opcode, 1,
5371 AARCH64_OPDE_OTHER_ERROR,
5372 _("literal pool insertion failed"));
5373 return FALSE;
5374 }
5375 }
5376 break;
5377 case log_shift:
5378 case bitfield:
5379 /* UXT[BHW] Wd, Wn
5380 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5381 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5382 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5383 A programmer-friendly assembler should accept a destination Xd in
5384 place of Wd, however that is not the preferred form for disassembly.
5385 */
5386 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5387 && operands[1].qualifier == AARCH64_OPND_QLF_W
5388 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5389 operands[0].qualifier = AARCH64_OPND_QLF_W;
5390 break;
5391
5392 case addsub_ext:
5393 {
5394 /* In the 64-bit form, the final register operand is written as Wm
5395 for all but the (possibly omitted) UXTX/LSL and SXTX
5396 operators.
5397 As a programmer-friendly assembler, we accept e.g.
5398 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5399 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5400 int idx = aarch64_operand_index (opcode->operands,
5401 AARCH64_OPND_Rm_EXT);
5402 gas_assert (idx == 1 || idx == 2);
5403 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5404 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5405 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5406 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5407 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5408 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5409 }
5410 break;
5411
5412 default:
5413 break;
5414 }
5415
5416 DEBUG_TRACE ("exit with SUCCESS");
5417 return TRUE;
5418 }
5419
5420 /* A wrapper function to interface with libopcodes on encoding and
5421 record the error message if there is any.
5422
5423 Return TRUE on success; otherwise return FALSE. */
5424
5425 static bfd_boolean
5426 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5427 aarch64_insn *code)
5428 {
5429 aarch64_operand_error error_info;
5430 error_info.kind = AARCH64_OPDE_NIL;
5431 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5432 return TRUE;
5433 else
5434 {
5435 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5436 record_operand_error_info (opcode, &error_info);
5437 return FALSE;
5438 }
5439 }
5440
5441 #ifdef DEBUG_AARCH64
5442 static inline void
5443 dump_opcode_operands (const aarch64_opcode *opcode)
5444 {
5445 int i = 0;
5446 while (opcode->operands[i] != AARCH64_OPND_NIL)
5447 {
5448 aarch64_verbose ("\t\t opnd%d: %s", i,
5449 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5450 ? aarch64_get_operand_name (opcode->operands[i])
5451 : aarch64_get_operand_desc (opcode->operands[i]));
5452 ++i;
5453 }
5454 }
5455 #endif /* DEBUG_AARCH64 */
5456
5457 /* This is the guts of the machine-dependent assembler. STR points to a
5458 machine dependent instruction. This function is supposed to emit
5459 the frags/bytes it assembles to. */
5460
5461 void
5462 md_assemble (char *str)
5463 {
5464 char *p = str;
5465 templates *template;
5466 aarch64_opcode *opcode;
5467 aarch64_inst *inst_base;
5468 unsigned saved_cond;
5469
5470 /* Align the previous label if needed. */
5471 if (last_label_seen != NULL)
5472 {
5473 symbol_set_frag (last_label_seen, frag_now);
5474 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5475 S_SET_SEGMENT (last_label_seen, now_seg);
5476 }
5477
5478 inst.reloc.type = BFD_RELOC_UNUSED;
5479
5480 DEBUG_TRACE ("\n\n");
5481 DEBUG_TRACE ("==============================");
5482 DEBUG_TRACE ("Enter md_assemble with %s", str);
5483
5484 template = opcode_lookup (&p);
5485 if (!template)
5486 {
5487 /* It wasn't an instruction, but it might be a register alias of
5488 the form alias .req reg directive. */
5489 if (!create_register_alias (str, p))
5490 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5491 str);
5492 return;
5493 }
5494
5495 skip_whitespace (p);
5496 if (*p == ',')
5497 {
5498 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5499 get_mnemonic_name (str), str);
5500 return;
5501 }
5502
5503 init_operand_error_report ();
5504
5505 saved_cond = inst.cond;
5506 reset_aarch64_instruction (&inst);
5507 inst.cond = saved_cond;
5508
5509 /* Iterate through all opcode entries with the same mnemonic name. */
5510 do
5511 {
5512 opcode = template->opcode;
5513
5514 DEBUG_TRACE ("opcode %s found", opcode->name);
5515 #ifdef DEBUG_AARCH64
5516 if (debug_dump)
5517 dump_opcode_operands (opcode);
5518 #endif /* DEBUG_AARCH64 */
5519
5520 /* Check that this instruction is supported for this CPU. */
5521 if (!opcode->avariant
5522 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5523 {
5524 as_bad (_("selected processor does not support `%s'"), str);
5525 return;
5526 }
5527
5528 mapping_state (MAP_INSN);
5529
5530 inst_base = &inst.base;
5531 inst_base->opcode = opcode;
5532
5533 /* Truly conditionally executed instructions, e.g. b.cond. */
5534 if (opcode->flags & F_COND)
5535 {
5536 gas_assert (inst.cond != COND_ALWAYS);
5537 inst_base->cond = get_cond_from_value (inst.cond);
5538 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5539 }
5540 else if (inst.cond != COND_ALWAYS)
5541 {
5542 /* It shouldn't arrive here, where the assembly looks like a
5543 conditional instruction but the found opcode is unconditional. */
5544 gas_assert (0);
5545 continue;
5546 }
5547
5548 if (parse_operands (p, opcode)
5549 && programmer_friendly_fixup (&inst)
5550 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5551 {
5552 if (inst.reloc.type == BFD_RELOC_UNUSED
5553 || !inst.reloc.need_libopcodes_p)
5554 output_inst (NULL);
5555 else
5556 {
5557 /* If there is relocation generated for the instruction,
5558 store the instruction information for the future fix-up. */
5559 struct aarch64_inst *copy;
5560 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5561 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5562 abort ();
5563 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5564 output_inst (copy);
5565 }
5566 return;
5567 }
5568
5569 template = template->next;
5570 if (template != NULL)
5571 {
5572 reset_aarch64_instruction (&inst);
5573 inst.cond = saved_cond;
5574 }
5575 }
5576 while (template != NULL);
5577
5578 /* Issue the error messages if any. */
5579 output_operand_error_report (str);
5580 }
5581
5582 /* Various frobbings of labels and their addresses. */
5583
5584 void
5585 aarch64_start_line_hook (void)
5586 {
5587 last_label_seen = NULL;
5588 }
5589
5590 void
5591 aarch64_frob_label (symbolS * sym)
5592 {
5593 last_label_seen = sym;
5594
5595 dwarf2_emit_label (sym);
5596 }
5597
5598 int
5599 aarch64_data_in_code (void)
5600 {
5601 if (!strncmp (input_line_pointer + 1, "data:", 5))
5602 {
5603 *input_line_pointer = '/';
5604 input_line_pointer += 5;
5605 *input_line_pointer = 0;
5606 return 1;
5607 }
5608
5609 return 0;
5610 }
5611
5612 char *
5613 aarch64_canonicalize_symbol_name (char *name)
5614 {
5615 int len;
5616
5617 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5618 *(name + len - 5) = 0;
5619
5620 return name;
5621 }
5622 \f
5623 /* Table of all register names defined by default. The user can
5624 define additional names with .req. Note that all register names
5625 should appear in both upper and lowercase variants. Some registers
5626 also have mixed-case names. */
5627
5628 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5629 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5630 #define REGSET31(p,t) \
5631 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5632 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5633 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5634 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5635 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5636 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5637 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5638 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5639 #define REGSET(p,t) \
5640 REGSET31(p,t), REGNUM(p,31,t)
5641
5642 /* These go into aarch64_reg_hsh hash-table. */
5643 static const reg_entry reg_names[] = {
5644 /* Integer registers. */
5645 REGSET31 (x, R_64), REGSET31 (X, R_64),
5646 REGSET31 (w, R_32), REGSET31 (W, R_32),
5647
5648 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5649 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5650
5651 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5652 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5653
5654 /* Coprocessor register numbers. */
5655 REGSET (c, CN), REGSET (C, CN),
5656
5657 /* Floating-point single precision registers. */
5658 REGSET (s, FP_S), REGSET (S, FP_S),
5659
5660 /* Floating-point double precision registers. */
5661 REGSET (d, FP_D), REGSET (D, FP_D),
5662
5663 /* Floating-point half precision registers. */
5664 REGSET (h, FP_H), REGSET (H, FP_H),
5665
5666 /* Floating-point byte precision registers. */
5667 REGSET (b, FP_B), REGSET (B, FP_B),
5668
5669 /* Floating-point quad precision registers. */
5670 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5671
5672 /* FP/SIMD registers. */
5673 REGSET (v, VN), REGSET (V, VN),
5674 };
5675
5676 #undef REGDEF
5677 #undef REGNUM
5678 #undef REGSET
5679
5680 #define N 1
5681 #define n 0
5682 #define Z 1
5683 #define z 0
5684 #define C 1
5685 #define c 0
5686 #define V 1
5687 #define v 0
5688 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5689 static const asm_nzcv nzcv_names[] = {
5690 {"nzcv", B (n, z, c, v)},
5691 {"nzcV", B (n, z, c, V)},
5692 {"nzCv", B (n, z, C, v)},
5693 {"nzCV", B (n, z, C, V)},
5694 {"nZcv", B (n, Z, c, v)},
5695 {"nZcV", B (n, Z, c, V)},
5696 {"nZCv", B (n, Z, C, v)},
5697 {"nZCV", B (n, Z, C, V)},
5698 {"Nzcv", B (N, z, c, v)},
5699 {"NzcV", B (N, z, c, V)},
5700 {"NzCv", B (N, z, C, v)},
5701 {"NzCV", B (N, z, C, V)},
5702 {"NZcv", B (N, Z, c, v)},
5703 {"NZcV", B (N, Z, c, V)},
5704 {"NZCv", B (N, Z, C, v)},
5705 {"NZCV", B (N, Z, C, V)}
5706 };
5707
5708 #undef N
5709 #undef n
5710 #undef Z
5711 #undef z
5712 #undef C
5713 #undef c
5714 #undef V
5715 #undef v
5716 #undef B
5717 \f
5718 /* MD interface: bits in the object file. */
5719
5720 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5721 for use in the a.out file, and stores them in the array pointed to by buf.
5722 This knows about the endian-ness of the target machine and does
5723 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5724 2 (short) and 4 (long) Floating numbers are put out as a series of
5725 LITTLENUMS (shorts, here at least). */
5726
5727 void
5728 md_number_to_chars (char *buf, valueT val, int n)
5729 {
5730 if (target_big_endian)
5731 number_to_chars_bigendian (buf, val, n);
5732 else
5733 number_to_chars_littleendian (buf, val, n);
5734 }
5735
5736 /* MD interface: Sections. */
5737
5738 /* Estimate the size of a frag before relaxing. Assume everything fits in
5739 4 bytes. */
5740
5741 int
5742 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5743 {
5744 fragp->fr_var = 4;
5745 return 4;
5746 }
5747
5748 /* Round up a section size to the appropriate boundary. */
5749
5750 valueT
5751 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5752 {
5753 return size;
5754 }
5755
5756 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5757 of an rs_align_code fragment. */
5758
5759 void
5760 aarch64_handle_align (fragS * fragP)
5761 {
5762 /* NOP = d503201f */
5763 /* AArch64 instructions are always little-endian. */
5764 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5765
5766 int bytes, fix, noop_size;
5767 char *p;
5768 const char *noop;
5769
5770 if (fragP->fr_type != rs_align_code)
5771 return;
5772
5773 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5774 p = fragP->fr_literal + fragP->fr_fix;
5775 fix = 0;
5776
5777 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
5778 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
5779
5780 #ifdef OBJ_ELF
5781 gas_assert (fragP->tc_frag_data.recorded);
5782 #endif
5783
5784 noop = aarch64_noop;
5785 noop_size = sizeof (aarch64_noop);
5786 fragP->fr_var = noop_size;
5787
5788 if (bytes & (noop_size - 1))
5789 {
5790 fix = bytes & (noop_size - 1);
5791 #ifdef OBJ_ELF
5792 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5793 #endif
5794 memset (p, 0, fix);
5795 p += fix;
5796 bytes -= fix;
5797 }
5798
5799 while (bytes >= noop_size)
5800 {
5801 memcpy (p, noop, noop_size);
5802 p += noop_size;
5803 bytes -= noop_size;
5804 fix += noop_size;
5805 }
5806
5807 fragP->fr_fix += fix;
5808 }
5809
5810 /* Called from md_do_align. Used to create an alignment
5811 frag in a code section. */
5812
5813 void
5814 aarch64_frag_align_code (int n, int max)
5815 {
5816 char *p;
5817
5818 /* We assume that there will never be a requirement
5819 to support alignments greater than x bytes. */
5820 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
5821 as_fatal (_
5822 ("alignments greater than %d bytes not supported in .text sections"),
5823 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
5824
5825 p = frag_var (rs_align_code,
5826 MAX_MEM_FOR_RS_ALIGN_CODE,
5827 1,
5828 (relax_substateT) max,
5829 (symbolS *) NULL, (offsetT) n, (char *) NULL);
5830 *p = 0;
5831 }
5832
5833 /* Perform target specific initialisation of a frag.
5834 Note - despite the name this initialisation is not done when the frag
5835 is created, but only when its type is assigned. A frag can be created
5836 and used a long time before its type is set, so beware of assuming that
5837 this initialisationis performed first. */
5838
5839 #ifndef OBJ_ELF
5840 void
5841 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5842 int max_chars ATTRIBUTE_UNUSED)
5843 {
5844 }
5845
5846 #else /* OBJ_ELF is defined. */
5847 void
5848 aarch64_init_frag (fragS * fragP, int max_chars)
5849 {
5850 /* Record a mapping symbol for alignment frags. We will delete this
5851 later if the alignment ends up empty. */
5852 if (!fragP->tc_frag_data.recorded)
5853 {
5854 fragP->tc_frag_data.recorded = 1;
5855 switch (fragP->fr_type)
5856 {
5857 case rs_align:
5858 case rs_align_test:
5859 case rs_fill:
5860 mapping_state_2 (MAP_DATA, max_chars);
5861 break;
5862 case rs_align_code:
5863 mapping_state_2 (MAP_INSN, max_chars);
5864 break;
5865 default:
5866 break;
5867 }
5868 }
5869 }
5870 \f
5871 /* Initialize the DWARF-2 unwind information for this procedure. */
5872
5873 void
5874 tc_aarch64_frame_initial_instructions (void)
5875 {
5876 cfi_add_CFA_def_cfa (REG_SP, 0);
5877 }
5878 #endif /* OBJ_ELF */
5879
5880 /* Convert REGNAME to a DWARF-2 register number. */
5881
5882 int
5883 tc_aarch64_regname_to_dw2regnum (char *regname)
5884 {
5885 const reg_entry *reg = parse_reg (&regname);
5886 if (reg == NULL)
5887 return -1;
5888
5889 switch (reg->type)
5890 {
5891 case REG_TYPE_SP_32:
5892 case REG_TYPE_SP_64:
5893 case REG_TYPE_R_32:
5894 case REG_TYPE_R_64:
5895 case REG_TYPE_FP_B:
5896 case REG_TYPE_FP_H:
5897 case REG_TYPE_FP_S:
5898 case REG_TYPE_FP_D:
5899 case REG_TYPE_FP_Q:
5900 return reg->number;
5901 default:
5902 break;
5903 }
5904 return -1;
5905 }
5906
5907 /* Implement DWARF2_ADDR_SIZE. */
5908
5909 int
5910 aarch64_dwarf2_addr_size (void)
5911 {
5912 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
5913 if (ilp32_p)
5914 return 4;
5915 #endif
5916 return bfd_arch_bits_per_address (stdoutput) / 8;
5917 }
5918
5919 /* MD interface: Symbol and relocation handling. */
5920
5921 /* Return the address within the segment that a PC-relative fixup is
5922 relative to. For AArch64 PC-relative fixups applied to instructions
5923 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
5924
5925 long
5926 md_pcrel_from_section (fixS * fixP, segT seg)
5927 {
5928 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
5929
5930 /* If this is pc-relative and we are going to emit a relocation
5931 then we just want to put out any pipeline compensation that the linker
5932 will need. Otherwise we want to use the calculated base. */
5933 if (fixP->fx_pcrel
5934 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
5935 || aarch64_force_relocation (fixP)))
5936 base = 0;
5937
5938 /* AArch64 should be consistent for all pc-relative relocations. */
5939 return base + AARCH64_PCREL_OFFSET;
5940 }
5941
5942 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
5943 Otherwise we have no need to default values of symbols. */
5944
5945 symbolS *
5946 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
5947 {
5948 #ifdef OBJ_ELF
5949 if (name[0] == '_' && name[1] == 'G'
5950 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
5951 {
5952 if (!GOT_symbol)
5953 {
5954 if (symbol_find (name))
5955 as_bad (_("GOT already in the symbol table"));
5956
5957 GOT_symbol = symbol_new (name, undefined_section,
5958 (valueT) 0, &zero_address_frag);
5959 }
5960
5961 return GOT_symbol;
5962 }
5963 #endif
5964
5965 return 0;
5966 }
5967
5968 /* Return non-zero if the indicated VALUE has overflowed the maximum
5969 range expressible by a unsigned number with the indicated number of
5970 BITS. */
5971
5972 static bfd_boolean
5973 unsigned_overflow (valueT value, unsigned bits)
5974 {
5975 valueT lim;
5976 if (bits >= sizeof (valueT) * 8)
5977 return FALSE;
5978 lim = (valueT) 1 << bits;
5979 return (value >= lim);
5980 }
5981
5982
5983 /* Return non-zero if the indicated VALUE has overflowed the maximum
5984 range expressible by an signed number with the indicated number of
5985 BITS. */
5986
5987 static bfd_boolean
5988 signed_overflow (offsetT value, unsigned bits)
5989 {
5990 offsetT lim;
5991 if (bits >= sizeof (offsetT) * 8)
5992 return FALSE;
5993 lim = (offsetT) 1 << (bits - 1);
5994 return (value < -lim || value >= lim);
5995 }
5996
5997 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
5998 unsigned immediate offset load/store instruction, try to encode it as
5999 an unscaled, 9-bit, signed immediate offset load/store instruction.
6000 Return TRUE if it is successful; otherwise return FALSE.
6001
6002 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6003 in response to the standard LDR/STR mnemonics when the immediate offset is
6004 unambiguous, i.e. when it is negative or unaligned. */
6005
6006 static bfd_boolean
6007 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6008 {
6009 int idx;
6010 enum aarch64_op new_op;
6011 const aarch64_opcode *new_opcode;
6012
6013 gas_assert (instr->opcode->iclass == ldst_pos);
6014
6015 switch (instr->opcode->op)
6016 {
6017 case OP_LDRB_POS:new_op = OP_LDURB; break;
6018 case OP_STRB_POS: new_op = OP_STURB; break;
6019 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6020 case OP_LDRH_POS: new_op = OP_LDURH; break;
6021 case OP_STRH_POS: new_op = OP_STURH; break;
6022 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6023 case OP_LDR_POS: new_op = OP_LDUR; break;
6024 case OP_STR_POS: new_op = OP_STUR; break;
6025 case OP_LDRF_POS: new_op = OP_LDURV; break;
6026 case OP_STRF_POS: new_op = OP_STURV; break;
6027 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6028 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6029 default: new_op = OP_NIL; break;
6030 }
6031
6032 if (new_op == OP_NIL)
6033 return FALSE;
6034
6035 new_opcode = aarch64_get_opcode (new_op);
6036 gas_assert (new_opcode != NULL);
6037
6038 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6039 instr->opcode->op, new_opcode->op);
6040
6041 aarch64_replace_opcode (instr, new_opcode);
6042
6043 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6044 qualifier matching may fail because the out-of-date qualifier will
6045 prevent the operand being updated with a new and correct qualifier. */
6046 idx = aarch64_operand_index (instr->opcode->operands,
6047 AARCH64_OPND_ADDR_SIMM9);
6048 gas_assert (idx == 1);
6049 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6050
6051 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6052
6053 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6054 return FALSE;
6055
6056 return TRUE;
6057 }
6058
6059 /* Called by fix_insn to fix a MOV immediate alias instruction.
6060
6061 Operand for a generic move immediate instruction, which is an alias
6062 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6063 a 32-bit/64-bit immediate value into general register. An assembler error
6064 shall result if the immediate cannot be created by a single one of these
6065 instructions. If there is a choice, then to ensure reversability an
6066 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6067
6068 static void
6069 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6070 {
6071 const aarch64_opcode *opcode;
6072
6073 /* Need to check if the destination is SP/ZR. The check has to be done
6074 before any aarch64_replace_opcode. */
6075 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6076 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6077
6078 instr->operands[1].imm.value = value;
6079 instr->operands[1].skip = 0;
6080
6081 if (try_mov_wide_p)
6082 {
6083 /* Try the MOVZ alias. */
6084 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6085 aarch64_replace_opcode (instr, opcode);
6086 if (aarch64_opcode_encode (instr->opcode, instr,
6087 &instr->value, NULL, NULL))
6088 {
6089 put_aarch64_insn (buf, instr->value);
6090 return;
6091 }
6092 /* Try the MOVK alias. */
6093 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6094 aarch64_replace_opcode (instr, opcode);
6095 if (aarch64_opcode_encode (instr->opcode, instr,
6096 &instr->value, NULL, NULL))
6097 {
6098 put_aarch64_insn (buf, instr->value);
6099 return;
6100 }
6101 }
6102
6103 if (try_mov_bitmask_p)
6104 {
6105 /* Try the ORR alias. */
6106 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6107 aarch64_replace_opcode (instr, opcode);
6108 if (aarch64_opcode_encode (instr->opcode, instr,
6109 &instr->value, NULL, NULL))
6110 {
6111 put_aarch64_insn (buf, instr->value);
6112 return;
6113 }
6114 }
6115
6116 as_bad_where (fixP->fx_file, fixP->fx_line,
6117 _("immediate cannot be moved by a single instruction"));
6118 }
6119
6120 /* An instruction operand which is immediate related may have symbol used
6121 in the assembly, e.g.
6122
6123 mov w0, u32
6124 .set u32, 0x00ffff00
6125
6126 At the time when the assembly instruction is parsed, a referenced symbol,
6127 like 'u32' in the above example may not have been seen; a fixS is created
6128 in such a case and is handled here after symbols have been resolved.
6129 Instruction is fixed up with VALUE using the information in *FIXP plus
6130 extra information in FLAGS.
6131
6132 This function is called by md_apply_fix to fix up instructions that need
6133 a fix-up described above but does not involve any linker-time relocation. */
6134
6135 static void
6136 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6137 {
6138 int idx;
6139 uint32_t insn;
6140 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6141 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6142 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6143
6144 if (new_inst)
6145 {
6146 /* Now the instruction is about to be fixed-up, so the operand that
6147 was previously marked as 'ignored' needs to be unmarked in order
6148 to get the encoding done properly. */
6149 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6150 new_inst->operands[idx].skip = 0;
6151 }
6152
6153 gas_assert (opnd != AARCH64_OPND_NIL);
6154
6155 switch (opnd)
6156 {
6157 case AARCH64_OPND_EXCEPTION:
6158 if (unsigned_overflow (value, 16))
6159 as_bad_where (fixP->fx_file, fixP->fx_line,
6160 _("immediate out of range"));
6161 insn = get_aarch64_insn (buf);
6162 insn |= encode_svc_imm (value);
6163 put_aarch64_insn (buf, insn);
6164 break;
6165
6166 case AARCH64_OPND_AIMM:
6167 /* ADD or SUB with immediate.
6168 NOTE this assumes we come here with a add/sub shifted reg encoding
6169 3 322|2222|2 2 2 21111 111111
6170 1 098|7654|3 2 1 09876 543210 98765 43210
6171 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6172 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6173 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6174 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6175 ->
6176 3 322|2222|2 2 221111111111
6177 1 098|7654|3 2 109876543210 98765 43210
6178 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6179 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6180 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6181 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6182 Fields sf Rn Rd are already set. */
6183 insn = get_aarch64_insn (buf);
6184 if (value < 0)
6185 {
6186 /* Add <-> sub. */
6187 insn = reencode_addsub_switch_add_sub (insn);
6188 value = -value;
6189 }
6190
6191 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6192 && unsigned_overflow (value, 12))
6193 {
6194 /* Try to shift the value by 12 to make it fit. */
6195 if (((value >> 12) << 12) == value
6196 && ! unsigned_overflow (value, 12 + 12))
6197 {
6198 value >>= 12;
6199 insn |= encode_addsub_imm_shift_amount (1);
6200 }
6201 }
6202
6203 if (unsigned_overflow (value, 12))
6204 as_bad_where (fixP->fx_file, fixP->fx_line,
6205 _("immediate out of range"));
6206
6207 insn |= encode_addsub_imm (value);
6208
6209 put_aarch64_insn (buf, insn);
6210 break;
6211
6212 case AARCH64_OPND_SIMD_IMM:
6213 case AARCH64_OPND_SIMD_IMM_SFT:
6214 case AARCH64_OPND_LIMM:
6215 /* Bit mask immediate. */
6216 gas_assert (new_inst != NULL);
6217 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6218 new_inst->operands[idx].imm.value = value;
6219 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6220 &new_inst->value, NULL, NULL))
6221 put_aarch64_insn (buf, new_inst->value);
6222 else
6223 as_bad_where (fixP->fx_file, fixP->fx_line,
6224 _("invalid immediate"));
6225 break;
6226
6227 case AARCH64_OPND_HALF:
6228 /* 16-bit unsigned immediate. */
6229 if (unsigned_overflow (value, 16))
6230 as_bad_where (fixP->fx_file, fixP->fx_line,
6231 _("immediate out of range"));
6232 insn = get_aarch64_insn (buf);
6233 insn |= encode_movw_imm (value & 0xffff);
6234 put_aarch64_insn (buf, insn);
6235 break;
6236
6237 case AARCH64_OPND_IMM_MOV:
6238 /* Operand for a generic move immediate instruction, which is
6239 an alias instruction that generates a single MOVZ, MOVN or ORR
6240 instruction to loads a 32-bit/64-bit immediate value into general
6241 register. An assembler error shall result if the immediate cannot be
6242 created by a single one of these instructions. If there is a choice,
6243 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6244 and MOVZ or MOVN to ORR. */
6245 gas_assert (new_inst != NULL);
6246 fix_mov_imm_insn (fixP, buf, new_inst, value);
6247 break;
6248
6249 case AARCH64_OPND_ADDR_SIMM7:
6250 case AARCH64_OPND_ADDR_SIMM9:
6251 case AARCH64_OPND_ADDR_SIMM9_2:
6252 case AARCH64_OPND_ADDR_UIMM12:
6253 /* Immediate offset in an address. */
6254 insn = get_aarch64_insn (buf);
6255
6256 gas_assert (new_inst != NULL && new_inst->value == insn);
6257 gas_assert (new_inst->opcode->operands[1] == opnd
6258 || new_inst->opcode->operands[2] == opnd);
6259
6260 /* Get the index of the address operand. */
6261 if (new_inst->opcode->operands[1] == opnd)
6262 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6263 idx = 1;
6264 else
6265 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6266 idx = 2;
6267
6268 /* Update the resolved offset value. */
6269 new_inst->operands[idx].addr.offset.imm = value;
6270
6271 /* Encode/fix-up. */
6272 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6273 &new_inst->value, NULL, NULL))
6274 {
6275 put_aarch64_insn (buf, new_inst->value);
6276 break;
6277 }
6278 else if (new_inst->opcode->iclass == ldst_pos
6279 && try_to_encode_as_unscaled_ldst (new_inst))
6280 {
6281 put_aarch64_insn (buf, new_inst->value);
6282 break;
6283 }
6284
6285 as_bad_where (fixP->fx_file, fixP->fx_line,
6286 _("immediate offset out of range"));
6287 break;
6288
6289 default:
6290 gas_assert (0);
6291 as_fatal (_("unhandled operand code %d"), opnd);
6292 }
6293 }
6294
6295 /* Apply a fixup (fixP) to segment data, once it has been determined
6296 by our caller that we have all the info we need to fix it up.
6297
6298 Parameter valP is the pointer to the value of the bits. */
6299
6300 void
6301 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6302 {
6303 offsetT value = *valP;
6304 uint32_t insn;
6305 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6306 int scale;
6307 unsigned flags = fixP->fx_addnumber;
6308
6309 DEBUG_TRACE ("\n\n");
6310 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6311 DEBUG_TRACE ("Enter md_apply_fix");
6312
6313 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6314
6315 /* Note whether this will delete the relocation. */
6316
6317 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6318 fixP->fx_done = 1;
6319
6320 /* Process the relocations. */
6321 switch (fixP->fx_r_type)
6322 {
6323 case BFD_RELOC_NONE:
6324 /* This will need to go in the object file. */
6325 fixP->fx_done = 0;
6326 break;
6327
6328 case BFD_RELOC_8:
6329 case BFD_RELOC_8_PCREL:
6330 if (fixP->fx_done || !seg->use_rela_p)
6331 md_number_to_chars (buf, value, 1);
6332 break;
6333
6334 case BFD_RELOC_16:
6335 case BFD_RELOC_16_PCREL:
6336 if (fixP->fx_done || !seg->use_rela_p)
6337 md_number_to_chars (buf, value, 2);
6338 break;
6339
6340 case BFD_RELOC_32:
6341 case BFD_RELOC_32_PCREL:
6342 if (fixP->fx_done || !seg->use_rela_p)
6343 md_number_to_chars (buf, value, 4);
6344 break;
6345
6346 case BFD_RELOC_64:
6347 case BFD_RELOC_64_PCREL:
6348 if (fixP->fx_done || !seg->use_rela_p)
6349 md_number_to_chars (buf, value, 8);
6350 break;
6351
6352 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6353 /* We claim that these fixups have been processed here, even if
6354 in fact we generate an error because we do not have a reloc
6355 for them, so tc_gen_reloc() will reject them. */
6356 fixP->fx_done = 1;
6357 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6358 {
6359 as_bad_where (fixP->fx_file, fixP->fx_line,
6360 _("undefined symbol %s used as an immediate value"),
6361 S_GET_NAME (fixP->fx_addsy));
6362 goto apply_fix_return;
6363 }
6364 fix_insn (fixP, flags, value);
6365 break;
6366
6367 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6368 if (fixP->fx_done || !seg->use_rela_p)
6369 {
6370 if (value & 3)
6371 as_bad_where (fixP->fx_file, fixP->fx_line,
6372 _("pc-relative load offset not word aligned"));
6373 if (signed_overflow (value, 21))
6374 as_bad_where (fixP->fx_file, fixP->fx_line,
6375 _("pc-relative load offset out of range"));
6376 insn = get_aarch64_insn (buf);
6377 insn |= encode_ld_lit_ofs_19 (value >> 2);
6378 put_aarch64_insn (buf, insn);
6379 }
6380 break;
6381
6382 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6383 if (fixP->fx_done || !seg->use_rela_p)
6384 {
6385 if (signed_overflow (value, 21))
6386 as_bad_where (fixP->fx_file, fixP->fx_line,
6387 _("pc-relative address offset out of range"));
6388 insn = get_aarch64_insn (buf);
6389 insn |= encode_adr_imm (value);
6390 put_aarch64_insn (buf, insn);
6391 }
6392 break;
6393
6394 case BFD_RELOC_AARCH64_BRANCH19:
6395 if (fixP->fx_done || !seg->use_rela_p)
6396 {
6397 if (value & 3)
6398 as_bad_where (fixP->fx_file, fixP->fx_line,
6399 _("conditional branch target not word aligned"));
6400 if (signed_overflow (value, 21))
6401 as_bad_where (fixP->fx_file, fixP->fx_line,
6402 _("conditional branch out of range"));
6403 insn = get_aarch64_insn (buf);
6404 insn |= encode_cond_branch_ofs_19 (value >> 2);
6405 put_aarch64_insn (buf, insn);
6406 }
6407 break;
6408
6409 case BFD_RELOC_AARCH64_TSTBR14:
6410 if (fixP->fx_done || !seg->use_rela_p)
6411 {
6412 if (value & 3)
6413 as_bad_where (fixP->fx_file, fixP->fx_line,
6414 _("conditional branch target not word aligned"));
6415 if (signed_overflow (value, 16))
6416 as_bad_where (fixP->fx_file, fixP->fx_line,
6417 _("conditional branch out of range"));
6418 insn = get_aarch64_insn (buf);
6419 insn |= encode_tst_branch_ofs_14 (value >> 2);
6420 put_aarch64_insn (buf, insn);
6421 }
6422 break;
6423
6424 case BFD_RELOC_AARCH64_JUMP26:
6425 case BFD_RELOC_AARCH64_CALL26:
6426 if (fixP->fx_done || !seg->use_rela_p)
6427 {
6428 if (value & 3)
6429 as_bad_where (fixP->fx_file, fixP->fx_line,
6430 _("branch target not word aligned"));
6431 if (signed_overflow (value, 28))
6432 as_bad_where (fixP->fx_file, fixP->fx_line,
6433 _("branch out of range"));
6434 insn = get_aarch64_insn (buf);
6435 insn |= encode_branch_ofs_26 (value >> 2);
6436 put_aarch64_insn (buf, insn);
6437 }
6438 break;
6439
6440 case BFD_RELOC_AARCH64_MOVW_G0:
6441 case BFD_RELOC_AARCH64_MOVW_G0_S:
6442 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6443 scale = 0;
6444 goto movw_common;
6445 case BFD_RELOC_AARCH64_MOVW_G1:
6446 case BFD_RELOC_AARCH64_MOVW_G1_S:
6447 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6448 scale = 16;
6449 goto movw_common;
6450 case BFD_RELOC_AARCH64_MOVW_G2:
6451 case BFD_RELOC_AARCH64_MOVW_G2_S:
6452 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6453 scale = 32;
6454 goto movw_common;
6455 case BFD_RELOC_AARCH64_MOVW_G3:
6456 scale = 48;
6457 movw_common:
6458 if (fixP->fx_done || !seg->use_rela_p)
6459 {
6460 insn = get_aarch64_insn (buf);
6461
6462 if (!fixP->fx_done)
6463 {
6464 /* REL signed addend must fit in 16 bits */
6465 if (signed_overflow (value, 16))
6466 as_bad_where (fixP->fx_file, fixP->fx_line,
6467 _("offset out of range"));
6468 }
6469 else
6470 {
6471 /* Check for overflow and scale. */
6472 switch (fixP->fx_r_type)
6473 {
6474 case BFD_RELOC_AARCH64_MOVW_G0:
6475 case BFD_RELOC_AARCH64_MOVW_G1:
6476 case BFD_RELOC_AARCH64_MOVW_G2:
6477 case BFD_RELOC_AARCH64_MOVW_G3:
6478 if (unsigned_overflow (value, scale + 16))
6479 as_bad_where (fixP->fx_file, fixP->fx_line,
6480 _("unsigned value out of range"));
6481 break;
6482 case BFD_RELOC_AARCH64_MOVW_G0_S:
6483 case BFD_RELOC_AARCH64_MOVW_G1_S:
6484 case BFD_RELOC_AARCH64_MOVW_G2_S:
6485 /* NOTE: We can only come here with movz or movn. */
6486 if (signed_overflow (value, scale + 16))
6487 as_bad_where (fixP->fx_file, fixP->fx_line,
6488 _("signed value out of range"));
6489 if (value < 0)
6490 {
6491 /* Force use of MOVN. */
6492 value = ~value;
6493 insn = reencode_movzn_to_movn (insn);
6494 }
6495 else
6496 {
6497 /* Force use of MOVZ. */
6498 insn = reencode_movzn_to_movz (insn);
6499 }
6500 break;
6501 default:
6502 /* Unchecked relocations. */
6503 break;
6504 }
6505 value >>= scale;
6506 }
6507
6508 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6509 insn |= encode_movw_imm (value & 0xffff);
6510
6511 put_aarch64_insn (buf, insn);
6512 }
6513 break;
6514
6515 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6516 fixP->fx_r_type = (ilp32_p
6517 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6518 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6519 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6520 /* Should always be exported to object file, see
6521 aarch64_force_relocation(). */
6522 gas_assert (!fixP->fx_done);
6523 gas_assert (seg->use_rela_p);
6524 break;
6525
6526 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6527 fixP->fx_r_type = (ilp32_p
6528 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6529 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6530 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6531 /* Should always be exported to object file, see
6532 aarch64_force_relocation(). */
6533 gas_assert (!fixP->fx_done);
6534 gas_assert (seg->use_rela_p);
6535 break;
6536
6537 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6538 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6539 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6540 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6541 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6542 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6543 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6544 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6545 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6546 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6547 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6548 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6549 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6550 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6551 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6552 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6553 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6554 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6555 /* Should always be exported to object file, see
6556 aarch64_force_relocation(). */
6557 gas_assert (!fixP->fx_done);
6558 gas_assert (seg->use_rela_p);
6559 break;
6560
6561 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6562 /* Should always be exported to object file, see
6563 aarch64_force_relocation(). */
6564 fixP->fx_r_type = (ilp32_p
6565 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6566 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6567 gas_assert (!fixP->fx_done);
6568 gas_assert (seg->use_rela_p);
6569 break;
6570
6571 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6572 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6573 case BFD_RELOC_AARCH64_ADD_LO12:
6574 case BFD_RELOC_AARCH64_LDST8_LO12:
6575 case BFD_RELOC_AARCH64_LDST16_LO12:
6576 case BFD_RELOC_AARCH64_LDST32_LO12:
6577 case BFD_RELOC_AARCH64_LDST64_LO12:
6578 case BFD_RELOC_AARCH64_LDST128_LO12:
6579 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6580 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6581 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6582 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6583 /* Should always be exported to object file, see
6584 aarch64_force_relocation(). */
6585 gas_assert (!fixP->fx_done);
6586 gas_assert (seg->use_rela_p);
6587 break;
6588
6589 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6590 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6591 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6592 break;
6593
6594 default:
6595 as_bad_where (fixP->fx_file, fixP->fx_line,
6596 _("unexpected %s fixup"),
6597 bfd_get_reloc_code_name (fixP->fx_r_type));
6598 break;
6599 }
6600
6601 apply_fix_return:
6602 /* Free the allocated the struct aarch64_inst.
6603 N.B. currently there are very limited number of fix-up types actually use
6604 this field, so the impact on the performance should be minimal . */
6605 if (fixP->tc_fix_data.inst != NULL)
6606 free (fixP->tc_fix_data.inst);
6607
6608 return;
6609 }
6610
6611 /* Translate internal representation of relocation info to BFD target
6612 format. */
6613
6614 arelent *
6615 tc_gen_reloc (asection * section, fixS * fixp)
6616 {
6617 arelent *reloc;
6618 bfd_reloc_code_real_type code;
6619
6620 reloc = xmalloc (sizeof (arelent));
6621
6622 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6623 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6624 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6625
6626 if (fixp->fx_pcrel)
6627 {
6628 if (section->use_rela_p)
6629 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6630 else
6631 fixp->fx_offset = reloc->address;
6632 }
6633 reloc->addend = fixp->fx_offset;
6634
6635 code = fixp->fx_r_type;
6636 switch (code)
6637 {
6638 case BFD_RELOC_16:
6639 if (fixp->fx_pcrel)
6640 code = BFD_RELOC_16_PCREL;
6641 break;
6642
6643 case BFD_RELOC_32:
6644 if (fixp->fx_pcrel)
6645 code = BFD_RELOC_32_PCREL;
6646 break;
6647
6648 case BFD_RELOC_64:
6649 if (fixp->fx_pcrel)
6650 code = BFD_RELOC_64_PCREL;
6651 break;
6652
6653 default:
6654 break;
6655 }
6656
6657 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6658 if (reloc->howto == NULL)
6659 {
6660 as_bad_where (fixp->fx_file, fixp->fx_line,
6661 _
6662 ("cannot represent %s relocation in this object file format"),
6663 bfd_get_reloc_code_name (code));
6664 return NULL;
6665 }
6666
6667 return reloc;
6668 }
6669
6670 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6671
6672 void
6673 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6674 {
6675 bfd_reloc_code_real_type type;
6676 int pcrel = 0;
6677
6678 /* Pick a reloc.
6679 FIXME: @@ Should look at CPU word size. */
6680 switch (size)
6681 {
6682 case 1:
6683 type = BFD_RELOC_8;
6684 break;
6685 case 2:
6686 type = BFD_RELOC_16;
6687 break;
6688 case 4:
6689 type = BFD_RELOC_32;
6690 break;
6691 case 8:
6692 type = BFD_RELOC_64;
6693 break;
6694 default:
6695 as_bad (_("cannot do %u-byte relocation"), size);
6696 type = BFD_RELOC_UNUSED;
6697 break;
6698 }
6699
6700 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6701 }
6702
6703 int
6704 aarch64_force_relocation (struct fix *fixp)
6705 {
6706 switch (fixp->fx_r_type)
6707 {
6708 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6709 /* Perform these "immediate" internal relocations
6710 even if the symbol is extern or weak. */
6711 return 0;
6712
6713 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6714 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6715 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6716 /* Pseudo relocs that need to be fixed up according to
6717 ilp32_p. */
6718 return 0;
6719
6720 case BFD_RELOC_AARCH64_ADD_LO12:
6721 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6722 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6723 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6724 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6725 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6726 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6727 case BFD_RELOC_AARCH64_LDST128_LO12:
6728 case BFD_RELOC_AARCH64_LDST16_LO12:
6729 case BFD_RELOC_AARCH64_LDST32_LO12:
6730 case BFD_RELOC_AARCH64_LDST64_LO12:
6731 case BFD_RELOC_AARCH64_LDST8_LO12:
6732 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6733 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6734 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6735 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6736 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6737 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6738 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6739 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6740 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6741 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6742 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6743 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6744 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6745 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6746 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6747 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6748 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6749 /* Always leave these relocations for the linker. */
6750 return 1;
6751
6752 default:
6753 break;
6754 }
6755
6756 return generic_force_reloc (fixp);
6757 }
6758
6759 #ifdef OBJ_ELF
6760
6761 const char *
6762 elf64_aarch64_target_format (void)
6763 {
6764 if (target_big_endian)
6765 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6766 else
6767 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6768 }
6769
6770 void
6771 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6772 {
6773 elf_frob_symbol (symp, puntp);
6774 }
6775 #endif
6776
6777 /* MD interface: Finalization. */
6778
6779 /* A good place to do this, although this was probably not intended
6780 for this kind of use. We need to dump the literal pool before
6781 references are made to a null symbol pointer. */
6782
6783 void
6784 aarch64_cleanup (void)
6785 {
6786 literal_pool *pool;
6787
6788 for (pool = list_of_pools; pool; pool = pool->next)
6789 {
6790 /* Put it at the end of the relevant section. */
6791 subseg_set (pool->section, pool->sub_section);
6792 s_ltorg (0);
6793 }
6794 }
6795
6796 #ifdef OBJ_ELF
6797 /* Remove any excess mapping symbols generated for alignment frags in
6798 SEC. We may have created a mapping symbol before a zero byte
6799 alignment; remove it if there's a mapping symbol after the
6800 alignment. */
6801 static void
6802 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6803 void *dummy ATTRIBUTE_UNUSED)
6804 {
6805 segment_info_type *seginfo = seg_info (sec);
6806 fragS *fragp;
6807
6808 if (seginfo == NULL || seginfo->frchainP == NULL)
6809 return;
6810
6811 for (fragp = seginfo->frchainP->frch_root;
6812 fragp != NULL; fragp = fragp->fr_next)
6813 {
6814 symbolS *sym = fragp->tc_frag_data.last_map;
6815 fragS *next = fragp->fr_next;
6816
6817 /* Variable-sized frags have been converted to fixed size by
6818 this point. But if this was variable-sized to start with,
6819 there will be a fixed-size frag after it. So don't handle
6820 next == NULL. */
6821 if (sym == NULL || next == NULL)
6822 continue;
6823
6824 if (S_GET_VALUE (sym) < next->fr_address)
6825 /* Not at the end of this frag. */
6826 continue;
6827 know (S_GET_VALUE (sym) == next->fr_address);
6828
6829 do
6830 {
6831 if (next->tc_frag_data.first_map != NULL)
6832 {
6833 /* Next frag starts with a mapping symbol. Discard this
6834 one. */
6835 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6836 break;
6837 }
6838
6839 if (next->fr_next == NULL)
6840 {
6841 /* This mapping symbol is at the end of the section. Discard
6842 it. */
6843 know (next->fr_fix == 0 && next->fr_var == 0);
6844 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6845 break;
6846 }
6847
6848 /* As long as we have empty frags without any mapping symbols,
6849 keep looking. */
6850 /* If the next frag is non-empty and does not start with a
6851 mapping symbol, then this mapping symbol is required. */
6852 if (next->fr_address != next->fr_next->fr_address)
6853 break;
6854
6855 next = next->fr_next;
6856 }
6857 while (next != NULL);
6858 }
6859 }
6860 #endif
6861
6862 /* Adjust the symbol table. */
6863
6864 void
6865 aarch64_adjust_symtab (void)
6866 {
6867 #ifdef OBJ_ELF
6868 /* Remove any overlapping mapping symbols generated by alignment frags. */
6869 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6870 /* Now do generic ELF adjustments. */
6871 elf_adjust_symtab ();
6872 #endif
6873 }
6874
6875 static void
6876 checked_hash_insert (struct hash_control *table, const char *key, void *value)
6877 {
6878 const char *hash_err;
6879
6880 hash_err = hash_insert (table, key, value);
6881 if (hash_err)
6882 printf ("Internal Error: Can't hash %s\n", key);
6883 }
6884
6885 static void
6886 fill_instruction_hash_table (void)
6887 {
6888 aarch64_opcode *opcode = aarch64_opcode_table;
6889
6890 while (opcode->name != NULL)
6891 {
6892 templates *templ, *new_templ;
6893 templ = hash_find (aarch64_ops_hsh, opcode->name);
6894
6895 new_templ = (templates *) xmalloc (sizeof (templates));
6896 new_templ->opcode = opcode;
6897 new_templ->next = NULL;
6898
6899 if (!templ)
6900 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6901 else
6902 {
6903 new_templ->next = templ->next;
6904 templ->next = new_templ;
6905 }
6906 ++opcode;
6907 }
6908 }
6909
6910 static inline void
6911 convert_to_upper (char *dst, const char *src, size_t num)
6912 {
6913 unsigned int i;
6914 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6915 *dst = TOUPPER (*src);
6916 *dst = '\0';
6917 }
6918
6919 /* Assume STR point to a lower-case string, allocate, convert and return
6920 the corresponding upper-case string. */
6921 static inline const char*
6922 get_upper_str (const char *str)
6923 {
6924 char *ret;
6925 size_t len = strlen (str);
6926 if ((ret = xmalloc (len + 1)) == NULL)
6927 abort ();
6928 convert_to_upper (ret, str, len);
6929 return ret;
6930 }
6931
6932 /* MD interface: Initialization. */
6933
6934 void
6935 md_begin (void)
6936 {
6937 unsigned mach;
6938 unsigned int i;
6939
6940 if ((aarch64_ops_hsh = hash_new ()) == NULL
6941 || (aarch64_cond_hsh = hash_new ()) == NULL
6942 || (aarch64_shift_hsh = hash_new ()) == NULL
6943 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
6944 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
6945 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
6946 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
6947 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
6948 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
6949 || (aarch64_reg_hsh = hash_new ()) == NULL
6950 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
6951 || (aarch64_nzcv_hsh = hash_new ()) == NULL
6952 || (aarch64_pldop_hsh = hash_new ()) == NULL)
6953 as_fatal (_("virtual memory exhausted"));
6954
6955 fill_instruction_hash_table ();
6956
6957 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
6958 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
6959 (void *) (aarch64_sys_regs + i));
6960
6961 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
6962 checked_hash_insert (aarch64_pstatefield_hsh,
6963 aarch64_pstatefields[i].name,
6964 (void *) (aarch64_pstatefields + i));
6965
6966 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
6967 checked_hash_insert (aarch64_sys_regs_ic_hsh,
6968 aarch64_sys_regs_ic[i].template,
6969 (void *) (aarch64_sys_regs_ic + i));
6970
6971 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
6972 checked_hash_insert (aarch64_sys_regs_dc_hsh,
6973 aarch64_sys_regs_dc[i].template,
6974 (void *) (aarch64_sys_regs_dc + i));
6975
6976 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
6977 checked_hash_insert (aarch64_sys_regs_at_hsh,
6978 aarch64_sys_regs_at[i].template,
6979 (void *) (aarch64_sys_regs_at + i));
6980
6981 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
6982 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
6983 aarch64_sys_regs_tlbi[i].template,
6984 (void *) (aarch64_sys_regs_tlbi + i));
6985
6986 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
6987 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
6988 (void *) (reg_names + i));
6989
6990 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
6991 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
6992 (void *) (nzcv_names + i));
6993
6994 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
6995 {
6996 const char *name = aarch64_operand_modifiers[i].name;
6997 checked_hash_insert (aarch64_shift_hsh, name,
6998 (void *) (aarch64_operand_modifiers + i));
6999 /* Also hash the name in the upper case. */
7000 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7001 (void *) (aarch64_operand_modifiers + i));
7002 }
7003
7004 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7005 {
7006 unsigned int j;
7007 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7008 the same condition code. */
7009 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7010 {
7011 const char *name = aarch64_conds[i].names[j];
7012 if (name == NULL)
7013 break;
7014 checked_hash_insert (aarch64_cond_hsh, name,
7015 (void *) (aarch64_conds + i));
7016 /* Also hash the name in the upper case. */
7017 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7018 (void *) (aarch64_conds + i));
7019 }
7020 }
7021
7022 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7023 {
7024 const char *name = aarch64_barrier_options[i].name;
7025 /* Skip xx00 - the unallocated values of option. */
7026 if ((i & 0x3) == 0)
7027 continue;
7028 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7029 (void *) (aarch64_barrier_options + i));
7030 /* Also hash the name in the upper case. */
7031 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7032 (void *) (aarch64_barrier_options + i));
7033 }
7034
7035 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7036 {
7037 const char* name = aarch64_prfops[i].name;
7038 /* Skip the unallocated hint encodings. */
7039 if (name == NULL)
7040 continue;
7041 checked_hash_insert (aarch64_pldop_hsh, name,
7042 (void *) (aarch64_prfops + i));
7043 /* Also hash the name in the upper case. */
7044 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7045 (void *) (aarch64_prfops + i));
7046 }
7047
7048 /* Set the cpu variant based on the command-line options. */
7049 if (!mcpu_cpu_opt)
7050 mcpu_cpu_opt = march_cpu_opt;
7051
7052 if (!mcpu_cpu_opt)
7053 mcpu_cpu_opt = &cpu_default;
7054
7055 cpu_variant = *mcpu_cpu_opt;
7056
7057 /* Record the CPU type. */
7058 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7059
7060 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7061 }
7062
7063 /* Command line processing. */
7064
7065 const char *md_shortopts = "m:";
7066
7067 #ifdef AARCH64_BI_ENDIAN
7068 #define OPTION_EB (OPTION_MD_BASE + 0)
7069 #define OPTION_EL (OPTION_MD_BASE + 1)
7070 #else
7071 #if TARGET_BYTES_BIG_ENDIAN
7072 #define OPTION_EB (OPTION_MD_BASE + 0)
7073 #else
7074 #define OPTION_EL (OPTION_MD_BASE + 1)
7075 #endif
7076 #endif
7077
7078 struct option md_longopts[] = {
7079 #ifdef OPTION_EB
7080 {"EB", no_argument, NULL, OPTION_EB},
7081 #endif
7082 #ifdef OPTION_EL
7083 {"EL", no_argument, NULL, OPTION_EL},
7084 #endif
7085 {NULL, no_argument, NULL, 0}
7086 };
7087
7088 size_t md_longopts_size = sizeof (md_longopts);
7089
7090 struct aarch64_option_table
7091 {
7092 char *option; /* Option name to match. */
7093 char *help; /* Help information. */
7094 int *var; /* Variable to change. */
7095 int value; /* What to change it to. */
7096 char *deprecated; /* If non-null, print this message. */
7097 };
7098
7099 static struct aarch64_option_table aarch64_opts[] = {
7100 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7101 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7102 NULL},
7103 #ifdef DEBUG_AARCH64
7104 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7105 #endif /* DEBUG_AARCH64 */
7106 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7107 NULL},
7108 {NULL, NULL, NULL, 0, NULL}
7109 };
7110
7111 struct aarch64_cpu_option_table
7112 {
7113 char *name;
7114 const aarch64_feature_set value;
7115 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7116 case. */
7117 const char *canonical_name;
7118 };
7119
7120 /* This list should, at a minimum, contain all the cpu names
7121 recognized by GCC. */
7122 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7123 {"all", AARCH64_ANY, NULL},
7124 {"cortex-a53", AARCH64_ARCH_V8, "Cortex-A53"},
7125 {"cortex-a57", AARCH64_ARCH_V8, "Cortex-A57"},
7126 {"generic", AARCH64_ARCH_V8, NULL},
7127
7128 /* These two are example CPUs supported in GCC, once we have real
7129 CPUs they will be removed. */
7130 {"example-1", AARCH64_ARCH_V8, NULL},
7131 {"example-2", AARCH64_ARCH_V8, NULL},
7132
7133 {NULL, AARCH64_ARCH_NONE, NULL}
7134 };
7135
7136 struct aarch64_arch_option_table
7137 {
7138 char *name;
7139 const aarch64_feature_set value;
7140 };
7141
7142 /* This list should, at a minimum, contain all the architecture names
7143 recognized by GCC. */
7144 static const struct aarch64_arch_option_table aarch64_archs[] = {
7145 {"all", AARCH64_ANY},
7146 {"armv8-a", AARCH64_ARCH_V8},
7147 {NULL, AARCH64_ARCH_NONE}
7148 };
7149
7150 /* ISA extensions. */
7151 struct aarch64_option_cpu_value_table
7152 {
7153 char *name;
7154 const aarch64_feature_set value;
7155 };
7156
7157 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7158 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7159 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7160 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7161 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7162 {NULL, AARCH64_ARCH_NONE}
7163 };
7164
7165 struct aarch64_long_option_table
7166 {
7167 char *option; /* Substring to match. */
7168 char *help; /* Help information. */
7169 int (*func) (char *subopt); /* Function to decode sub-option. */
7170 char *deprecated; /* If non-null, print this message. */
7171 };
7172
7173 static int
7174 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7175 {
7176 /* We insist on extensions being added before being removed. We achieve
7177 this by using the ADDING_VALUE variable to indicate whether we are
7178 adding an extension (1) or removing it (0) and only allowing it to
7179 change in the order -1 -> 1 -> 0. */
7180 int adding_value = -1;
7181 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7182
7183 /* Copy the feature set, so that we can modify it. */
7184 *ext_set = **opt_p;
7185 *opt_p = ext_set;
7186
7187 while (str != NULL && *str != 0)
7188 {
7189 const struct aarch64_option_cpu_value_table *opt;
7190 char *ext;
7191 int optlen;
7192
7193 if (*str != '+')
7194 {
7195 as_bad (_("invalid architectural extension"));
7196 return 0;
7197 }
7198
7199 str++;
7200 ext = strchr (str, '+');
7201
7202 if (ext != NULL)
7203 optlen = ext - str;
7204 else
7205 optlen = strlen (str);
7206
7207 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7208 {
7209 if (adding_value != 0)
7210 adding_value = 0;
7211 optlen -= 2;
7212 str += 2;
7213 }
7214 else if (optlen > 0)
7215 {
7216 if (adding_value == -1)
7217 adding_value = 1;
7218 else if (adding_value != 1)
7219 {
7220 as_bad (_("must specify extensions to add before specifying "
7221 "those to remove"));
7222 return FALSE;
7223 }
7224 }
7225
7226 if (optlen == 0)
7227 {
7228 as_bad (_("missing architectural extension"));
7229 return 0;
7230 }
7231
7232 gas_assert (adding_value != -1);
7233
7234 for (opt = aarch64_features; opt->name != NULL; opt++)
7235 if (strncmp (opt->name, str, optlen) == 0)
7236 {
7237 /* Add or remove the extension. */
7238 if (adding_value)
7239 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7240 else
7241 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7242 break;
7243 }
7244
7245 if (opt->name == NULL)
7246 {
7247 as_bad (_("unknown architectural extension `%s'"), str);
7248 return 0;
7249 }
7250
7251 str = ext;
7252 };
7253
7254 return 1;
7255 }
7256
7257 static int
7258 aarch64_parse_cpu (char *str)
7259 {
7260 const struct aarch64_cpu_option_table *opt;
7261 char *ext = strchr (str, '+');
7262 size_t optlen;
7263
7264 if (ext != NULL)
7265 optlen = ext - str;
7266 else
7267 optlen = strlen (str);
7268
7269 if (optlen == 0)
7270 {
7271 as_bad (_("missing cpu name `%s'"), str);
7272 return 0;
7273 }
7274
7275 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7276 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7277 {
7278 mcpu_cpu_opt = &opt->value;
7279 if (ext != NULL)
7280 return aarch64_parse_features (ext, &mcpu_cpu_opt);
7281
7282 return 1;
7283 }
7284
7285 as_bad (_("unknown cpu `%s'"), str);
7286 return 0;
7287 }
7288
7289 static int
7290 aarch64_parse_arch (char *str)
7291 {
7292 const struct aarch64_arch_option_table *opt;
7293 char *ext = strchr (str, '+');
7294 size_t optlen;
7295
7296 if (ext != NULL)
7297 optlen = ext - str;
7298 else
7299 optlen = strlen (str);
7300
7301 if (optlen == 0)
7302 {
7303 as_bad (_("missing architecture name `%s'"), str);
7304 return 0;
7305 }
7306
7307 for (opt = aarch64_archs; opt->name != NULL; opt++)
7308 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7309 {
7310 march_cpu_opt = &opt->value;
7311 if (ext != NULL)
7312 return aarch64_parse_features (ext, &march_cpu_opt);
7313
7314 return 1;
7315 }
7316
7317 as_bad (_("unknown architecture `%s'\n"), str);
7318 return 0;
7319 }
7320
7321 /* ABIs. */
7322 struct aarch64_option_abi_value_table
7323 {
7324 char *name;
7325 enum aarch64_abi_type value;
7326 };
7327
7328 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7329 {"ilp32", AARCH64_ABI_ILP32},
7330 {"lp64", AARCH64_ABI_LP64},
7331 {NULL, 0}
7332 };
7333
7334 static int
7335 aarch64_parse_abi (char *str)
7336 {
7337 const struct aarch64_option_abi_value_table *opt;
7338 size_t optlen = strlen (str);
7339
7340 if (optlen == 0)
7341 {
7342 as_bad (_("missing abi name `%s'"), str);
7343 return 0;
7344 }
7345
7346 for (opt = aarch64_abis; opt->name != NULL; opt++)
7347 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7348 {
7349 aarch64_abi = opt->value;
7350 return 1;
7351 }
7352
7353 as_bad (_("unknown abi `%s'\n"), str);
7354 return 0;
7355 }
7356
7357 static struct aarch64_long_option_table aarch64_long_opts[] = {
7358 #ifdef OBJ_ELF
7359 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7360 aarch64_parse_abi, NULL},
7361 #endif /* OBJ_ELF */
7362 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7363 aarch64_parse_cpu, NULL},
7364 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7365 aarch64_parse_arch, NULL},
7366 {NULL, NULL, 0, NULL}
7367 };
7368
7369 int
7370 md_parse_option (int c, char *arg)
7371 {
7372 struct aarch64_option_table *opt;
7373 struct aarch64_long_option_table *lopt;
7374
7375 switch (c)
7376 {
7377 #ifdef OPTION_EB
7378 case OPTION_EB:
7379 target_big_endian = 1;
7380 break;
7381 #endif
7382
7383 #ifdef OPTION_EL
7384 case OPTION_EL:
7385 target_big_endian = 0;
7386 break;
7387 #endif
7388
7389 case 'a':
7390 /* Listing option. Just ignore these, we don't support additional
7391 ones. */
7392 return 0;
7393
7394 default:
7395 for (opt = aarch64_opts; opt->option != NULL; opt++)
7396 {
7397 if (c == opt->option[0]
7398 && ((arg == NULL && opt->option[1] == 0)
7399 || streq (arg, opt->option + 1)))
7400 {
7401 /* If the option is deprecated, tell the user. */
7402 if (opt->deprecated != NULL)
7403 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7404 arg ? arg : "", _(opt->deprecated));
7405
7406 if (opt->var != NULL)
7407 *opt->var = opt->value;
7408
7409 return 1;
7410 }
7411 }
7412
7413 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7414 {
7415 /* These options are expected to have an argument. */
7416 if (c == lopt->option[0]
7417 && arg != NULL
7418 && strncmp (arg, lopt->option + 1,
7419 strlen (lopt->option + 1)) == 0)
7420 {
7421 /* If the option is deprecated, tell the user. */
7422 if (lopt->deprecated != NULL)
7423 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7424 _(lopt->deprecated));
7425
7426 /* Call the sup-option parser. */
7427 return lopt->func (arg + strlen (lopt->option) - 1);
7428 }
7429 }
7430
7431 return 0;
7432 }
7433
7434 return 1;
7435 }
7436
7437 void
7438 md_show_usage (FILE * fp)
7439 {
7440 struct aarch64_option_table *opt;
7441 struct aarch64_long_option_table *lopt;
7442
7443 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7444
7445 for (opt = aarch64_opts; opt->option != NULL; opt++)
7446 if (opt->help != NULL)
7447 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7448
7449 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7450 if (lopt->help != NULL)
7451 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7452
7453 #ifdef OPTION_EB
7454 fprintf (fp, _("\
7455 -EB assemble code for a big-endian cpu\n"));
7456 #endif
7457
7458 #ifdef OPTION_EL
7459 fprintf (fp, _("\
7460 -EL assemble code for a little-endian cpu\n"));
7461 #endif
7462 }
7463
7464 /* Parse a .cpu directive. */
7465
7466 static void
7467 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7468 {
7469 const struct aarch64_cpu_option_table *opt;
7470 char saved_char;
7471 char *name;
7472 char *ext;
7473 size_t optlen;
7474
7475 name = input_line_pointer;
7476 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7477 input_line_pointer++;
7478 saved_char = *input_line_pointer;
7479 *input_line_pointer = 0;
7480
7481 ext = strchr (name, '+');
7482
7483 if (ext != NULL)
7484 optlen = ext - name;
7485 else
7486 optlen = strlen (name);
7487
7488 /* Skip the first "all" entry. */
7489 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7490 if (strlen (opt->name) == optlen
7491 && strncmp (name, opt->name, optlen) == 0)
7492 {
7493 mcpu_cpu_opt = &opt->value;
7494 if (ext != NULL)
7495 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7496 return;
7497
7498 cpu_variant = *mcpu_cpu_opt;
7499
7500 *input_line_pointer = saved_char;
7501 demand_empty_rest_of_line ();
7502 return;
7503 }
7504 as_bad (_("unknown cpu `%s'"), name);
7505 *input_line_pointer = saved_char;
7506 ignore_rest_of_line ();
7507 }
7508
7509
7510 /* Parse a .arch directive. */
7511
7512 static void
7513 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7514 {
7515 const struct aarch64_arch_option_table *opt;
7516 char saved_char;
7517 char *name;
7518 char *ext;
7519 size_t optlen;
7520
7521 name = input_line_pointer;
7522 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7523 input_line_pointer++;
7524 saved_char = *input_line_pointer;
7525 *input_line_pointer = 0;
7526
7527 ext = strchr (name, '+');
7528
7529 if (ext != NULL)
7530 optlen = ext - name;
7531 else
7532 optlen = strlen (name);
7533
7534 /* Skip the first "all" entry. */
7535 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7536 if (strlen (opt->name) == optlen
7537 && strncmp (name, opt->name, optlen) == 0)
7538 {
7539 mcpu_cpu_opt = &opt->value;
7540 if (ext != NULL)
7541 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7542 return;
7543
7544 cpu_variant = *mcpu_cpu_opt;
7545
7546 *input_line_pointer = saved_char;
7547 demand_empty_rest_of_line ();
7548 return;
7549 }
7550
7551 as_bad (_("unknown architecture `%s'\n"), name);
7552 *input_line_pointer = saved_char;
7553 ignore_rest_of_line ();
7554 }
7555
7556 /* Copy symbol information. */
7557
7558 void
7559 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7560 {
7561 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7562 }
This page took 0.211908 seconds and 5 git commands to generate.