1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2019 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
39 #ifdef HAVE_SYS_PARAM_H
40 #include <sys/param.h>
43 #define INT_MAX (int) (((unsigned) (-1)) >> 1)
47 #ifndef REGISTER_WARNINGS
48 #define REGISTER_WARNINGS 1
51 #ifndef INFER_ADDR_PREFIX
52 #define INFER_ADDR_PREFIX 1
56 #define DEFAULT_ARCH "i386"
61 #define INLINE __inline__
67 /* Prefixes will be emitted in the order defined below.
68 WAIT_PREFIX must be the first prefix since FWAIT is really is an
69 instruction, and so must come before any prefixes.
70 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
71 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
77 #define HLE_PREFIX REP_PREFIX
78 #define BND_PREFIX REP_PREFIX
80 #define REX_PREFIX 6 /* must come last. */
81 #define MAX_PREFIXES 7 /* max prefixes per opcode */
83 /* we define the syntax here (modulo base,index,scale syntax) */
84 #define REGISTER_PREFIX '%'
85 #define IMMEDIATE_PREFIX '$'
86 #define ABSOLUTE_PREFIX '*'
88 /* these are the instruction mnemonic suffixes in AT&T syntax or
89 memory operand size in Intel syntax. */
90 #define WORD_MNEM_SUFFIX 'w'
91 #define BYTE_MNEM_SUFFIX 'b'
92 #define SHORT_MNEM_SUFFIX 's'
93 #define LONG_MNEM_SUFFIX 'l'
94 #define QWORD_MNEM_SUFFIX 'q'
95 /* Intel Syntax. Use a non-ascii letter since since it never appears
97 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
99 #define END_OF_INSN '\0'
101 /* This matches the C -> StaticRounding alias in the opcode table. */
102 #define commutative staticrounding
105 'templates' is for grouping together 'template' structures for opcodes
106 of the same name. This is only used for storing the insns in the grand
107 ole hash table of insns.
108 The templates themselves start at START and range up to (but not including)
113 const insn_template
*start
;
114 const insn_template
*end
;
118 /* 386 operand encoding bytes: see 386 book for details of this. */
121 unsigned int regmem
; /* codes register or memory operand */
122 unsigned int reg
; /* codes register operand (or extended opcode) */
123 unsigned int mode
; /* how to interpret regmem & reg */
127 /* x86-64 extension prefix. */
128 typedef int rex_byte
;
130 /* 386 opcode byte to code indirect addressing. */
139 /* x86 arch names, types and features */
142 const char *name
; /* arch name */
143 unsigned int len
; /* arch string length */
144 enum processor_type type
; /* arch type */
145 i386_cpu_flags flags
; /* cpu feature flags */
146 unsigned int skip
; /* show_arch should skip this. */
150 /* Used to turn off indicated flags. */
153 const char *name
; /* arch name */
154 unsigned int len
; /* arch string length */
155 i386_cpu_flags flags
; /* cpu feature flags */
159 static void update_code_flag (int, int);
160 static void set_code_flag (int);
161 static void set_16bit_gcc_code_flag (int);
162 static void set_intel_syntax (int);
163 static void set_intel_mnemonic (int);
164 static void set_allow_index_reg (int);
165 static void set_check (int);
166 static void set_cpu_arch (int);
168 static void pe_directive_secrel (int);
170 static void signed_cons (int);
171 static char *output_invalid (int c
);
172 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
174 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
176 static int i386_att_operand (char *);
177 static int i386_intel_operand (char *, int);
178 static int i386_intel_simplify (expressionS
*);
179 static int i386_intel_parse_name (const char *, expressionS
*);
180 static const reg_entry
*parse_register (char *, char **);
181 static char *parse_insn (char *, char *);
182 static char *parse_operands (char *, const char *);
183 static void swap_operands (void);
184 static void swap_2_operands (int, int);
185 static void optimize_imm (void);
186 static void optimize_disp (void);
187 static const insn_template
*match_template (char);
188 static int check_string (void);
189 static int process_suffix (void);
190 static int check_byte_reg (void);
191 static int check_long_reg (void);
192 static int check_qword_reg (void);
193 static int check_word_reg (void);
194 static int finalize_imm (void);
195 static int process_operands (void);
196 static const seg_entry
*build_modrm_byte (void);
197 static void output_insn (void);
198 static void output_imm (fragS
*, offsetT
);
199 static void output_disp (fragS
*, offsetT
);
201 static void s_bss (int);
203 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
204 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
206 /* GNU_PROPERTY_X86_ISA_1_USED. */
207 static unsigned int x86_isa_1_used
;
208 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
209 static unsigned int x86_feature_2_used
;
210 /* Generate x86 used ISA and feature properties. */
211 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
214 static const char *default_arch
= DEFAULT_ARCH
;
216 /* This struct describes rounding control and SAE in the instruction. */
230 static struct RC_Operation rc_op
;
232 /* The struct describes masking, applied to OPERAND in the instruction.
233 MASK is a pointer to the corresponding mask register. ZEROING tells
234 whether merging or zeroing mask is used. */
235 struct Mask_Operation
237 const reg_entry
*mask
;
238 unsigned int zeroing
;
239 /* The operand where this operation is associated. */
243 static struct Mask_Operation mask_op
;
245 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
247 struct Broadcast_Operation
249 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
252 /* Index of broadcasted operand. */
255 /* Number of bytes to broadcast. */
259 static struct Broadcast_Operation broadcast_op
;
264 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
265 unsigned char bytes
[4];
267 /* Destination or source register specifier. */
268 const reg_entry
*register_specifier
;
271 /* 'md_assemble ()' gathers together information and puts it into a
278 const reg_entry
*regs
;
283 operand_size_mismatch
,
284 operand_type_mismatch
,
285 register_type_mismatch
,
286 number_of_operands_mismatch
,
287 invalid_instruction_suffix
,
289 unsupported_with_intel_mnemonic
,
292 invalid_vsib_address
,
293 invalid_vector_register_set
,
294 unsupported_vector_index_register
,
295 unsupported_broadcast
,
298 mask_not_on_destination
,
301 rc_sae_operand_not_last_imm
,
302 invalid_register_operand
,
307 /* TM holds the template for the insn were currently assembling. */
310 /* SUFFIX holds the instruction size suffix for byte, word, dword
311 or qword, if given. */
314 /* OPERANDS gives the number of given operands. */
315 unsigned int operands
;
317 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
318 of given register, displacement, memory operands and immediate
320 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
322 /* TYPES [i] is the type (see above #defines) which tells us how to
323 use OP[i] for the corresponding operand. */
324 i386_operand_type types
[MAX_OPERANDS
];
326 /* Displacement expression, immediate expression, or register for each
328 union i386_op op
[MAX_OPERANDS
];
330 /* Flags for operands. */
331 unsigned int flags
[MAX_OPERANDS
];
332 #define Operand_PCrel 1
333 #define Operand_Mem 2
335 /* Relocation type for operand */
336 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
338 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
339 the base index byte below. */
340 const reg_entry
*base_reg
;
341 const reg_entry
*index_reg
;
342 unsigned int log2_scale_factor
;
344 /* SEG gives the seg_entries of this insn. They are zero unless
345 explicit segment overrides are given. */
346 const seg_entry
*seg
[2];
348 /* Copied first memory operand string, for re-checking. */
351 /* PREFIX holds all the given prefix opcodes (usually null).
352 PREFIXES is the number of prefix opcodes. */
353 unsigned int prefixes
;
354 unsigned char prefix
[MAX_PREFIXES
];
356 /* Has MMX register operands. */
357 bfd_boolean has_regmmx
;
359 /* Has XMM register operands. */
360 bfd_boolean has_regxmm
;
362 /* Has YMM register operands. */
363 bfd_boolean has_regymm
;
365 /* Has ZMM register operands. */
366 bfd_boolean has_regzmm
;
368 /* RM and SIB are the modrm byte and the sib byte where the
369 addressing modes of this insn are encoded. */
376 /* Masking attributes. */
377 struct Mask_Operation
*mask
;
379 /* Rounding control and SAE attributes. */
380 struct RC_Operation
*rounding
;
382 /* Broadcasting attributes. */
383 struct Broadcast_Operation
*broadcast
;
385 /* Compressed disp8*N attribute. */
386 unsigned int memshift
;
388 /* Prefer load or store in encoding. */
391 dir_encoding_default
= 0,
397 /* Prefer 8bit or 32bit displacement in encoding. */
400 disp_encoding_default
= 0,
405 /* Prefer the REX byte in encoding. */
406 bfd_boolean rex_encoding
;
408 /* Disable instruction size optimization. */
409 bfd_boolean no_optimize
;
411 /* How to encode vector instructions. */
414 vex_encoding_default
= 0,
421 const char *rep_prefix
;
424 const char *hle_prefix
;
426 /* Have BND prefix. */
427 const char *bnd_prefix
;
429 /* Have NOTRACK prefix. */
430 const char *notrack_prefix
;
433 enum i386_error error
;
436 typedef struct _i386_insn i386_insn
;
438 /* Link RC type with corresponding string, that'll be looked for in
447 static const struct RC_name RC_NamesTable
[] =
449 { rne
, STRING_COMMA_LEN ("rn-sae") },
450 { rd
, STRING_COMMA_LEN ("rd-sae") },
451 { ru
, STRING_COMMA_LEN ("ru-sae") },
452 { rz
, STRING_COMMA_LEN ("rz-sae") },
453 { saeonly
, STRING_COMMA_LEN ("sae") },
456 /* List of chars besides those in app.c:symbol_chars that can start an
457 operand. Used to prevent the scrubber eating vital white-space. */
458 const char extra_symbol_chars
[] = "*%-([{}"
467 #if (defined (TE_I386AIX) \
468 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
469 && !defined (TE_GNU) \
470 && !defined (TE_LINUX) \
471 && !defined (TE_NACL) \
472 && !defined (TE_FreeBSD) \
473 && !defined (TE_DragonFly) \
474 && !defined (TE_NetBSD)))
475 /* This array holds the chars that always start a comment. If the
476 pre-processor is disabled, these aren't very useful. The option
477 --divide will remove '/' from this list. */
478 const char *i386_comment_chars
= "#/";
479 #define SVR4_COMMENT_CHARS 1
480 #define PREFIX_SEPARATOR '\\'
483 const char *i386_comment_chars
= "#";
484 #define PREFIX_SEPARATOR '/'
487 /* This array holds the chars that only start a comment at the beginning of
488 a line. If the line seems to have the form '# 123 filename'
489 .line and .file directives will appear in the pre-processed output.
490 Note that input_file.c hand checks for '#' at the beginning of the
491 first line of the input file. This is because the compiler outputs
492 #NO_APP at the beginning of its output.
493 Also note that comments started like this one will always work if
494 '/' isn't otherwise defined. */
495 const char line_comment_chars
[] = "#/";
497 const char line_separator_chars
[] = ";";
499 /* Chars that can be used to separate mant from exp in floating point
501 const char EXP_CHARS
[] = "eE";
503 /* Chars that mean this number is a floating point constant
506 const char FLT_CHARS
[] = "fFdDxX";
508 /* Tables for lexical analysis. */
509 static char mnemonic_chars
[256];
510 static char register_chars
[256];
511 static char operand_chars
[256];
512 static char identifier_chars
[256];
513 static char digit_chars
[256];
515 /* Lexical macros. */
516 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
517 #define is_operand_char(x) (operand_chars[(unsigned char) x])
518 #define is_register_char(x) (register_chars[(unsigned char) x])
519 #define is_space_char(x) ((x) == ' ')
520 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
521 #define is_digit_char(x) (digit_chars[(unsigned char) x])
523 /* All non-digit non-letter characters that may occur in an operand. */
524 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
526 /* md_assemble() always leaves the strings it's passed unaltered. To
527 effect this we maintain a stack of saved characters that we've smashed
528 with '\0's (indicating end of strings for various sub-fields of the
529 assembler instruction). */
530 static char save_stack
[32];
531 static char *save_stack_p
;
532 #define END_STRING_AND_SAVE(s) \
533 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
534 #define RESTORE_END_STRING(s) \
535 do { *(s) = *--save_stack_p; } while (0)
537 /* The instruction we're assembling. */
540 /* Possible templates for current insn. */
541 static const templates
*current_templates
;
543 /* Per instruction expressionS buffers: max displacements & immediates. */
544 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
545 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
547 /* Current operand we are working on. */
548 static int this_operand
= -1;
550 /* We support four different modes. FLAG_CODE variable is used to distinguish
558 static enum flag_code flag_code
;
559 static unsigned int object_64bit
;
560 static unsigned int disallow_64bit_reloc
;
561 static int use_rela_relocations
= 0;
563 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
564 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
565 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
567 /* The ELF ABI to use. */
575 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
578 #if defined (TE_PE) || defined (TE_PEP)
579 /* Use big object file format. */
580 static int use_big_obj
= 0;
583 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
584 /* 1 if generating code for a shared library. */
585 static int shared
= 0;
588 /* 1 for intel syntax,
590 static int intel_syntax
= 0;
592 /* 1 for Intel64 ISA,
596 /* 1 for intel mnemonic,
597 0 if att mnemonic. */
598 static int intel_mnemonic
= !SYSV386_COMPAT
;
600 /* 1 if pseudo registers are permitted. */
601 static int allow_pseudo_reg
= 0;
603 /* 1 if register prefix % not required. */
604 static int allow_naked_reg
= 0;
606 /* 1 if the assembler should add BND prefix for all control-transferring
607 instructions supporting it, even if this prefix wasn't specified
609 static int add_bnd_prefix
= 0;
611 /* 1 if pseudo index register, eiz/riz, is allowed . */
612 static int allow_index_reg
= 0;
614 /* 1 if the assembler should ignore LOCK prefix, even if it was
615 specified explicitly. */
616 static int omit_lock_prefix
= 0;
618 /* 1 if the assembler should encode lfence, mfence, and sfence as
619 "lock addl $0, (%{re}sp)". */
620 static int avoid_fence
= 0;
622 /* 1 if the assembler should generate relax relocations. */
624 static int generate_relax_relocations
625 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
627 static enum check_kind
633 sse_check
, operand_check
= check_warning
;
636 1. Clear the REX_W bit with register operand if possible.
637 2. Above plus use 128bit vector instruction to clear the full vector
640 static int optimize
= 0;
643 1. Clear the REX_W bit with register operand if possible.
644 2. Above plus use 128bit vector instruction to clear the full vector
646 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
649 static int optimize_for_space
= 0;
651 /* Register prefix used for error message. */
652 static const char *register_prefix
= "%";
654 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
655 leave, push, and pop instructions so that gcc has the same stack
656 frame as in 32 bit mode. */
657 static char stackop_size
= '\0';
659 /* Non-zero to optimize code alignment. */
660 int optimize_align_code
= 1;
662 /* Non-zero to quieten some warnings. */
663 static int quiet_warnings
= 0;
666 static const char *cpu_arch_name
= NULL
;
667 static char *cpu_sub_arch_name
= NULL
;
669 /* CPU feature flags. */
670 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
672 /* If we have selected a cpu we are generating instructions for. */
673 static int cpu_arch_tune_set
= 0;
675 /* Cpu we are generating instructions for. */
676 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
678 /* CPU feature flags of cpu we are generating instructions for. */
679 static i386_cpu_flags cpu_arch_tune_flags
;
681 /* CPU instruction set architecture used. */
682 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
684 /* CPU feature flags of instruction set architecture used. */
685 i386_cpu_flags cpu_arch_isa_flags
;
687 /* If set, conditional jumps are not automatically promoted to handle
688 larger than a byte offset. */
689 static unsigned int no_cond_jump_promotion
= 0;
691 /* Encode SSE instructions with VEX prefix. */
692 static unsigned int sse2avx
;
694 /* Encode scalar AVX instructions with specific vector length. */
701 /* Encode VEX WIG instructions with specific vex.w. */
708 /* Encode scalar EVEX LIG instructions with specific vector length. */
716 /* Encode EVEX WIG instructions with specific evex.w. */
723 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
724 static enum rc_type evexrcig
= rne
;
726 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
727 static symbolS
*GOT_symbol
;
729 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
730 unsigned int x86_dwarf2_return_column
;
732 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
733 int x86_cie_data_alignment
;
735 /* Interface to relax_segment.
736 There are 3 major relax states for 386 jump insns because the
737 different types of jumps add different sizes to frags when we're
738 figuring out what sort of jump to choose to reach a given label. */
741 #define UNCOND_JUMP 0
743 #define COND_JUMP86 2
748 #define SMALL16 (SMALL | CODE16)
750 #define BIG16 (BIG | CODE16)
754 #define INLINE __inline__
760 #define ENCODE_RELAX_STATE(type, size) \
761 ((relax_substateT) (((type) << 2) | (size)))
762 #define TYPE_FROM_RELAX_STATE(s) \
764 #define DISP_SIZE_FROM_RELAX_STATE(s) \
765 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
767 /* This table is used by relax_frag to promote short jumps to long
768 ones where necessary. SMALL (short) jumps may be promoted to BIG
769 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
770 don't allow a short jump in a 32 bit code segment to be promoted to
771 a 16 bit offset jump because it's slower (requires data size
772 prefix), and doesn't work, unless the destination is in the bottom
773 64k of the code segment (The top 16 bits of eip are zeroed). */
775 const relax_typeS md_relax_table
[] =
778 1) most positive reach of this state,
779 2) most negative reach of this state,
780 3) how many bytes this mode will have in the variable part of the frag
781 4) which index into the table to try if we can't fit into this one. */
783 /* UNCOND_JUMP states. */
784 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
785 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
786 /* dword jmp adds 4 bytes to frag:
787 0 extra opcode bytes, 4 displacement bytes. */
789 /* word jmp adds 2 byte2 to frag:
790 0 extra opcode bytes, 2 displacement bytes. */
793 /* COND_JUMP states. */
794 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
795 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
796 /* dword conditionals adds 5 bytes to frag:
797 1 extra opcode byte, 4 displacement bytes. */
799 /* word conditionals add 3 bytes to frag:
800 1 extra opcode byte, 2 displacement bytes. */
803 /* COND_JUMP86 states. */
804 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
805 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
806 /* dword conditionals adds 5 bytes to frag:
807 1 extra opcode byte, 4 displacement bytes. */
809 /* word conditionals add 4 bytes to frag:
810 1 displacement byte and a 3 byte long branch insn. */
814 static const arch_entry cpu_arch
[] =
816 /* Do not replace the first two entries - i386_target_format()
817 relies on them being there in this order. */
818 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
819 CPU_GENERIC32_FLAGS
, 0 },
820 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
821 CPU_GENERIC64_FLAGS
, 0 },
822 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
824 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
826 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
828 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
830 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
832 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
834 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
836 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
838 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
839 CPU_PENTIUMPRO_FLAGS
, 0 },
840 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
842 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
844 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
846 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
848 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
849 CPU_NOCONA_FLAGS
, 0 },
850 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
852 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
854 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
855 CPU_CORE2_FLAGS
, 1 },
856 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
857 CPU_CORE2_FLAGS
, 0 },
858 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
859 CPU_COREI7_FLAGS
, 0 },
860 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
862 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
864 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
865 CPU_IAMCU_FLAGS
, 0 },
866 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
868 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
870 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
871 CPU_ATHLON_FLAGS
, 0 },
872 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
874 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
876 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
878 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
879 CPU_AMDFAM10_FLAGS
, 0 },
880 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
881 CPU_BDVER1_FLAGS
, 0 },
882 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
883 CPU_BDVER2_FLAGS
, 0 },
884 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
885 CPU_BDVER3_FLAGS
, 0 },
886 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
887 CPU_BDVER4_FLAGS
, 0 },
888 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
889 CPU_ZNVER1_FLAGS
, 0 },
890 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
891 CPU_ZNVER2_FLAGS
, 0 },
892 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
893 CPU_BTVER1_FLAGS
, 0 },
894 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
895 CPU_BTVER2_FLAGS
, 0 },
896 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
898 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
900 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
902 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
904 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
906 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
908 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
910 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
912 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
914 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
916 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
917 CPU_SSSE3_FLAGS
, 0 },
918 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
919 CPU_SSE4_1_FLAGS
, 0 },
920 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
921 CPU_SSE4_2_FLAGS
, 0 },
922 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
923 CPU_SSE4_2_FLAGS
, 0 },
924 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
926 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
928 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
929 CPU_AVX512F_FLAGS
, 0 },
930 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
931 CPU_AVX512CD_FLAGS
, 0 },
932 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
933 CPU_AVX512ER_FLAGS
, 0 },
934 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
935 CPU_AVX512PF_FLAGS
, 0 },
936 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
937 CPU_AVX512DQ_FLAGS
, 0 },
938 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
939 CPU_AVX512BW_FLAGS
, 0 },
940 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
941 CPU_AVX512VL_FLAGS
, 0 },
942 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
944 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
945 CPU_VMFUNC_FLAGS
, 0 },
946 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
948 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
949 CPU_XSAVE_FLAGS
, 0 },
950 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
951 CPU_XSAVEOPT_FLAGS
, 0 },
952 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
953 CPU_XSAVEC_FLAGS
, 0 },
954 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
955 CPU_XSAVES_FLAGS
, 0 },
956 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
958 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
959 CPU_PCLMUL_FLAGS
, 0 },
960 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
961 CPU_PCLMUL_FLAGS
, 1 },
962 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
963 CPU_FSGSBASE_FLAGS
, 0 },
964 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
965 CPU_RDRND_FLAGS
, 0 },
966 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
968 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
970 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
972 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
974 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
976 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
978 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
979 CPU_MOVBE_FLAGS
, 0 },
980 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
982 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
984 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
985 CPU_LZCNT_FLAGS
, 0 },
986 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
988 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
990 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
991 CPU_INVPCID_FLAGS
, 0 },
992 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
993 CPU_CLFLUSH_FLAGS
, 0 },
994 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
996 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
997 CPU_SYSCALL_FLAGS
, 0 },
998 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
999 CPU_RDTSCP_FLAGS
, 0 },
1000 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1001 CPU_3DNOW_FLAGS
, 0 },
1002 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1003 CPU_3DNOWA_FLAGS
, 0 },
1004 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1005 CPU_PADLOCK_FLAGS
, 0 },
1006 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1007 CPU_SVME_FLAGS
, 1 },
1008 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1009 CPU_SVME_FLAGS
, 0 },
1010 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1011 CPU_SSE4A_FLAGS
, 0 },
1012 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1014 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1016 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1018 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1020 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1021 CPU_RDSEED_FLAGS
, 0 },
1022 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1023 CPU_PRFCHW_FLAGS
, 0 },
1024 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1025 CPU_SMAP_FLAGS
, 0 },
1026 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1028 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1030 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1031 CPU_CLFLUSHOPT_FLAGS
, 0 },
1032 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1033 CPU_PREFETCHWT1_FLAGS
, 0 },
1034 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1036 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1037 CPU_CLWB_FLAGS
, 0 },
1038 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1039 CPU_AVX512IFMA_FLAGS
, 0 },
1040 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1041 CPU_AVX512VBMI_FLAGS
, 0 },
1042 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1043 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1044 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1045 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1046 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1047 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1048 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1049 CPU_AVX512_VBMI2_FLAGS
, 0 },
1050 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1051 CPU_AVX512_VNNI_FLAGS
, 0 },
1052 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1053 CPU_AVX512_BITALG_FLAGS
, 0 },
1054 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1055 CPU_CLZERO_FLAGS
, 0 },
1056 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1057 CPU_MWAITX_FLAGS
, 0 },
1058 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1059 CPU_OSPKE_FLAGS
, 0 },
1060 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1061 CPU_RDPID_FLAGS
, 0 },
1062 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1063 CPU_PTWRITE_FLAGS
, 0 },
1064 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1066 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1067 CPU_SHSTK_FLAGS
, 0 },
1068 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1069 CPU_GFNI_FLAGS
, 0 },
1070 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1071 CPU_VAES_FLAGS
, 0 },
1072 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1073 CPU_VPCLMULQDQ_FLAGS
, 0 },
1074 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1075 CPU_WBNOINVD_FLAGS
, 0 },
1076 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1077 CPU_PCONFIG_FLAGS
, 0 },
1078 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1079 CPU_WAITPKG_FLAGS
, 0 },
1080 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1081 CPU_CLDEMOTE_FLAGS
, 0 },
1082 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1083 CPU_MOVDIRI_FLAGS
, 0 },
1084 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1085 CPU_MOVDIR64B_FLAGS
, 0 },
1086 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1087 CPU_AVX512_BF16_FLAGS
, 0 },
1088 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1089 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1090 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1091 CPU_ENQCMD_FLAGS
, 0 },
1092 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1093 CPU_RDPRU_FLAGS
, 0 },
1094 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1095 CPU_MCOMMIT_FLAGS
, 0 },
1098 static const noarch_entry cpu_noarch
[] =
1100 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1101 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1102 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1103 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1104 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1105 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1106 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1107 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1108 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1109 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1110 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1111 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1112 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1113 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1114 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1115 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1116 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1117 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1118 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1119 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1120 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1121 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1122 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1123 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1124 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1125 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1126 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1127 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1128 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1129 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1130 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1131 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1132 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1133 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1134 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1135 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1136 { STRING_COMMA_LEN ("noavx512_vp2intersect"), CPU_ANY_SHSTK_FLAGS
},
1137 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1141 /* Like s_lcomm_internal in gas/read.c but the alignment string
1142 is allowed to be optional. */
1145 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1152 && *input_line_pointer
== ',')
1154 align
= parse_align (needs_align
- 1);
1156 if (align
== (addressT
) -1)
1171 bss_alloc (symbolP
, size
, align
);
1176 pe_lcomm (int needs_align
)
1178 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1182 const pseudo_typeS md_pseudo_table
[] =
1184 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1185 {"align", s_align_bytes
, 0},
1187 {"align", s_align_ptwo
, 0},
1189 {"arch", set_cpu_arch
, 0},
1193 {"lcomm", pe_lcomm
, 1},
1195 {"ffloat", float_cons
, 'f'},
1196 {"dfloat", float_cons
, 'd'},
1197 {"tfloat", float_cons
, 'x'},
1199 {"slong", signed_cons
, 4},
1200 {"noopt", s_ignore
, 0},
1201 {"optim", s_ignore
, 0},
1202 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1203 {"code16", set_code_flag
, CODE_16BIT
},
1204 {"code32", set_code_flag
, CODE_32BIT
},
1206 {"code64", set_code_flag
, CODE_64BIT
},
1208 {"intel_syntax", set_intel_syntax
, 1},
1209 {"att_syntax", set_intel_syntax
, 0},
1210 {"intel_mnemonic", set_intel_mnemonic
, 1},
1211 {"att_mnemonic", set_intel_mnemonic
, 0},
1212 {"allow_index_reg", set_allow_index_reg
, 1},
1213 {"disallow_index_reg", set_allow_index_reg
, 0},
1214 {"sse_check", set_check
, 0},
1215 {"operand_check", set_check
, 1},
1216 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1217 {"largecomm", handle_large_common
, 0},
1219 {"file", dwarf2_directive_file
, 0},
1220 {"loc", dwarf2_directive_loc
, 0},
1221 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1224 {"secrel32", pe_directive_secrel
, 0},
1229 /* For interface with expression (). */
1230 extern char *input_line_pointer
;
1232 /* Hash table for instruction mnemonic lookup. */
1233 static struct hash_control
*op_hash
;
1235 /* Hash table for register lookup. */
1236 static struct hash_control
*reg_hash
;
1238 /* Various efficient no-op patterns for aligning code labels.
1239 Note: Don't try to assemble the instructions in the comments.
1240 0L and 0w are not legal. */
1241 static const unsigned char f32_1
[] =
1243 static const unsigned char f32_2
[] =
1244 {0x66,0x90}; /* xchg %ax,%ax */
1245 static const unsigned char f32_3
[] =
1246 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1247 static const unsigned char f32_4
[] =
1248 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1249 static const unsigned char f32_6
[] =
1250 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1251 static const unsigned char f32_7
[] =
1252 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1253 static const unsigned char f16_3
[] =
1254 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1255 static const unsigned char f16_4
[] =
1256 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1257 static const unsigned char jump_disp8
[] =
1258 {0xeb}; /* jmp disp8 */
1259 static const unsigned char jump32_disp32
[] =
1260 {0xe9}; /* jmp disp32 */
1261 static const unsigned char jump16_disp32
[] =
1262 {0x66,0xe9}; /* jmp disp32 */
1263 /* 32-bit NOPs patterns. */
1264 static const unsigned char *const f32_patt
[] = {
1265 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1267 /* 16-bit NOPs patterns. */
1268 static const unsigned char *const f16_patt
[] = {
1269 f32_1
, f32_2
, f16_3
, f16_4
1271 /* nopl (%[re]ax) */
1272 static const unsigned char alt_3
[] =
1274 /* nopl 0(%[re]ax) */
1275 static const unsigned char alt_4
[] =
1276 {0x0f,0x1f,0x40,0x00};
1277 /* nopl 0(%[re]ax,%[re]ax,1) */
1278 static const unsigned char alt_5
[] =
1279 {0x0f,0x1f,0x44,0x00,0x00};
1280 /* nopw 0(%[re]ax,%[re]ax,1) */
1281 static const unsigned char alt_6
[] =
1282 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1283 /* nopl 0L(%[re]ax) */
1284 static const unsigned char alt_7
[] =
1285 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1286 /* nopl 0L(%[re]ax,%[re]ax,1) */
1287 static const unsigned char alt_8
[] =
1288 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1289 /* nopw 0L(%[re]ax,%[re]ax,1) */
1290 static const unsigned char alt_9
[] =
1291 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1292 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1293 static const unsigned char alt_10
[] =
1294 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1295 /* data16 nopw %cs:0L(%eax,%eax,1) */
1296 static const unsigned char alt_11
[] =
1297 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1298 /* 32-bit and 64-bit NOPs patterns. */
1299 static const unsigned char *const alt_patt
[] = {
1300 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1301 alt_9
, alt_10
, alt_11
1304 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1305 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1308 i386_output_nops (char *where
, const unsigned char *const *patt
,
1309 int count
, int max_single_nop_size
)
1312 /* Place the longer NOP first. */
1315 const unsigned char *nops
;
1317 if (max_single_nop_size
< 1)
1319 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1320 max_single_nop_size
);
1324 nops
= patt
[max_single_nop_size
- 1];
1326 /* Use the smaller one if the requsted one isn't available. */
1329 max_single_nop_size
--;
1330 nops
= patt
[max_single_nop_size
- 1];
1333 last
= count
% max_single_nop_size
;
1336 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1337 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1341 nops
= patt
[last
- 1];
1344 /* Use the smaller one plus one-byte NOP if the needed one
1347 nops
= patt
[last
- 1];
1348 memcpy (where
+ offset
, nops
, last
);
1349 where
[offset
+ last
] = *patt
[0];
1352 memcpy (where
+ offset
, nops
, last
);
1357 fits_in_imm7 (offsetT num
)
1359 return (num
& 0x7f) == num
;
1363 fits_in_imm31 (offsetT num
)
1365 return (num
& 0x7fffffff) == num
;
1368 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1369 single NOP instruction LIMIT. */
1372 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1374 const unsigned char *const *patt
= NULL
;
1375 int max_single_nop_size
;
1376 /* Maximum number of NOPs before switching to jump over NOPs. */
1377 int max_number_of_nops
;
1379 switch (fragP
->fr_type
)
1388 /* We need to decide which NOP sequence to use for 32bit and
1389 64bit. When -mtune= is used:
1391 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1392 PROCESSOR_GENERIC32, f32_patt will be used.
1393 2. For the rest, alt_patt will be used.
1395 When -mtune= isn't used, alt_patt will be used if
1396 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1399 When -march= or .arch is used, we can't use anything beyond
1400 cpu_arch_isa_flags. */
1402 if (flag_code
== CODE_16BIT
)
1405 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1406 /* Limit number of NOPs to 2 in 16-bit mode. */
1407 max_number_of_nops
= 2;
1411 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1413 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1414 switch (cpu_arch_tune
)
1416 case PROCESSOR_UNKNOWN
:
1417 /* We use cpu_arch_isa_flags to check if we SHOULD
1418 optimize with nops. */
1419 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1424 case PROCESSOR_PENTIUM4
:
1425 case PROCESSOR_NOCONA
:
1426 case PROCESSOR_CORE
:
1427 case PROCESSOR_CORE2
:
1428 case PROCESSOR_COREI7
:
1429 case PROCESSOR_L1OM
:
1430 case PROCESSOR_K1OM
:
1431 case PROCESSOR_GENERIC64
:
1433 case PROCESSOR_ATHLON
:
1435 case PROCESSOR_AMDFAM10
:
1437 case PROCESSOR_ZNVER
:
1441 case PROCESSOR_I386
:
1442 case PROCESSOR_I486
:
1443 case PROCESSOR_PENTIUM
:
1444 case PROCESSOR_PENTIUMPRO
:
1445 case PROCESSOR_IAMCU
:
1446 case PROCESSOR_GENERIC32
:
1453 switch (fragP
->tc_frag_data
.tune
)
1455 case PROCESSOR_UNKNOWN
:
1456 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1457 PROCESSOR_UNKNOWN. */
1461 case PROCESSOR_I386
:
1462 case PROCESSOR_I486
:
1463 case PROCESSOR_PENTIUM
:
1464 case PROCESSOR_IAMCU
:
1466 case PROCESSOR_ATHLON
:
1468 case PROCESSOR_AMDFAM10
:
1470 case PROCESSOR_ZNVER
:
1472 case PROCESSOR_GENERIC32
:
1473 /* We use cpu_arch_isa_flags to check if we CAN optimize
1475 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1480 case PROCESSOR_PENTIUMPRO
:
1481 case PROCESSOR_PENTIUM4
:
1482 case PROCESSOR_NOCONA
:
1483 case PROCESSOR_CORE
:
1484 case PROCESSOR_CORE2
:
1485 case PROCESSOR_COREI7
:
1486 case PROCESSOR_L1OM
:
1487 case PROCESSOR_K1OM
:
1488 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1493 case PROCESSOR_GENERIC64
:
1499 if (patt
== f32_patt
)
1501 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1502 /* Limit number of NOPs to 2 for older processors. */
1503 max_number_of_nops
= 2;
1507 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1508 /* Limit number of NOPs to 7 for newer processors. */
1509 max_number_of_nops
= 7;
1514 limit
= max_single_nop_size
;
1516 if (fragP
->fr_type
== rs_fill_nop
)
1518 /* Output NOPs for .nop directive. */
1519 if (limit
> max_single_nop_size
)
1521 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1522 _("invalid single nop size: %d "
1523 "(expect within [0, %d])"),
1524 limit
, max_single_nop_size
);
1529 fragP
->fr_var
= count
;
1531 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1533 /* Generate jump over NOPs. */
1534 offsetT disp
= count
- 2;
1535 if (fits_in_imm7 (disp
))
1537 /* Use "jmp disp8" if possible. */
1539 where
[0] = jump_disp8
[0];
1545 unsigned int size_of_jump
;
1547 if (flag_code
== CODE_16BIT
)
1549 where
[0] = jump16_disp32
[0];
1550 where
[1] = jump16_disp32
[1];
1555 where
[0] = jump32_disp32
[0];
1559 count
-= size_of_jump
+ 4;
1560 if (!fits_in_imm31 (count
))
1562 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1563 _("jump over nop padding out of range"));
1567 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1568 where
+= size_of_jump
+ 4;
1572 /* Generate multiple NOPs. */
1573 i386_output_nops (where
, patt
, count
, limit
);
1577 operand_type_all_zero (const union i386_operand_type
*x
)
1579 switch (ARRAY_SIZE(x
->array
))
1590 return !x
->array
[0];
1597 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1599 switch (ARRAY_SIZE(x
->array
))
1615 x
->bitfield
.class = ClassNone
;
1616 x
->bitfield
.instance
= InstanceNone
;
1620 operand_type_equal (const union i386_operand_type
*x
,
1621 const union i386_operand_type
*y
)
1623 switch (ARRAY_SIZE(x
->array
))
1626 if (x
->array
[2] != y
->array
[2])
1630 if (x
->array
[1] != y
->array
[1])
1634 return x
->array
[0] == y
->array
[0];
1642 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1644 switch (ARRAY_SIZE(x
->array
))
1659 return !x
->array
[0];
1666 cpu_flags_equal (const union i386_cpu_flags
*x
,
1667 const union i386_cpu_flags
*y
)
1669 switch (ARRAY_SIZE(x
->array
))
1672 if (x
->array
[3] != y
->array
[3])
1676 if (x
->array
[2] != y
->array
[2])
1680 if (x
->array
[1] != y
->array
[1])
1684 return x
->array
[0] == y
->array
[0];
1692 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1694 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1695 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1698 static INLINE i386_cpu_flags
1699 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1701 switch (ARRAY_SIZE (x
.array
))
1704 x
.array
[3] &= y
.array
[3];
1707 x
.array
[2] &= y
.array
[2];
1710 x
.array
[1] &= y
.array
[1];
1713 x
.array
[0] &= y
.array
[0];
1721 static INLINE i386_cpu_flags
1722 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1724 switch (ARRAY_SIZE (x
.array
))
1727 x
.array
[3] |= y
.array
[3];
1730 x
.array
[2] |= y
.array
[2];
1733 x
.array
[1] |= y
.array
[1];
1736 x
.array
[0] |= y
.array
[0];
1744 static INLINE i386_cpu_flags
1745 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1747 switch (ARRAY_SIZE (x
.array
))
1750 x
.array
[3] &= ~y
.array
[3];
1753 x
.array
[2] &= ~y
.array
[2];
1756 x
.array
[1] &= ~y
.array
[1];
1759 x
.array
[0] &= ~y
.array
[0];
1767 #define CPU_FLAGS_ARCH_MATCH 0x1
1768 #define CPU_FLAGS_64BIT_MATCH 0x2
1770 #define CPU_FLAGS_PERFECT_MATCH \
1771 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1773 /* Return CPU flags match bits. */
1776 cpu_flags_match (const insn_template
*t
)
1778 i386_cpu_flags x
= t
->cpu_flags
;
1779 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1781 x
.bitfield
.cpu64
= 0;
1782 x
.bitfield
.cpuno64
= 0;
1784 if (cpu_flags_all_zero (&x
))
1786 /* This instruction is available on all archs. */
1787 match
|= CPU_FLAGS_ARCH_MATCH
;
1791 /* This instruction is available only on some archs. */
1792 i386_cpu_flags cpu
= cpu_arch_flags
;
1794 /* AVX512VL is no standalone feature - match it and then strip it. */
1795 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1797 x
.bitfield
.cpuavx512vl
= 0;
1799 cpu
= cpu_flags_and (x
, cpu
);
1800 if (!cpu_flags_all_zero (&cpu
))
1802 if (x
.bitfield
.cpuavx
)
1804 /* We need to check a few extra flags with AVX. */
1805 if (cpu
.bitfield
.cpuavx
1806 && (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1807 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1808 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1809 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1810 match
|= CPU_FLAGS_ARCH_MATCH
;
1812 else if (x
.bitfield
.cpuavx512f
)
1814 /* We need to check a few extra flags with AVX512F. */
1815 if (cpu
.bitfield
.cpuavx512f
1816 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1817 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1818 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1819 match
|= CPU_FLAGS_ARCH_MATCH
;
1822 match
|= CPU_FLAGS_ARCH_MATCH
;
1828 static INLINE i386_operand_type
1829 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1831 if (x
.bitfield
.class != y
.bitfield
.class)
1832 x
.bitfield
.class = ClassNone
;
1833 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
1834 x
.bitfield
.instance
= InstanceNone
;
1836 switch (ARRAY_SIZE (x
.array
))
1839 x
.array
[2] &= y
.array
[2];
1842 x
.array
[1] &= y
.array
[1];
1845 x
.array
[0] &= y
.array
[0];
1853 static INLINE i386_operand_type
1854 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
1856 gas_assert (y
.bitfield
.class == ClassNone
);
1857 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1859 switch (ARRAY_SIZE (x
.array
))
1862 x
.array
[2] &= ~y
.array
[2];
1865 x
.array
[1] &= ~y
.array
[1];
1868 x
.array
[0] &= ~y
.array
[0];
1876 static INLINE i386_operand_type
1877 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1879 gas_assert (x
.bitfield
.class == ClassNone
||
1880 y
.bitfield
.class == ClassNone
||
1881 x
.bitfield
.class == y
.bitfield
.class);
1882 gas_assert (x
.bitfield
.instance
== InstanceNone
||
1883 y
.bitfield
.instance
== InstanceNone
||
1884 x
.bitfield
.instance
== y
.bitfield
.instance
);
1886 switch (ARRAY_SIZE (x
.array
))
1889 x
.array
[2] |= y
.array
[2];
1892 x
.array
[1] |= y
.array
[1];
1895 x
.array
[0] |= y
.array
[0];
1903 static INLINE i386_operand_type
1904 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1906 gas_assert (y
.bitfield
.class == ClassNone
);
1907 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1909 switch (ARRAY_SIZE (x
.array
))
1912 x
.array
[2] ^= y
.array
[2];
1915 x
.array
[1] ^= y
.array
[1];
1918 x
.array
[0] ^= y
.array
[0];
1926 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
1927 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
1928 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
1929 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
1930 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
1931 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
1932 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
1933 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
1934 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
1935 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
1936 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
1937 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
1938 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
1939 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
1940 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
1941 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
1942 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
1953 operand_type_check (i386_operand_type t
, enum operand_type c
)
1958 return t
.bitfield
.class == Reg
;
1961 return (t
.bitfield
.imm8
1965 || t
.bitfield
.imm32s
1966 || t
.bitfield
.imm64
);
1969 return (t
.bitfield
.disp8
1970 || t
.bitfield
.disp16
1971 || t
.bitfield
.disp32
1972 || t
.bitfield
.disp32s
1973 || t
.bitfield
.disp64
);
1976 return (t
.bitfield
.disp8
1977 || t
.bitfield
.disp16
1978 || t
.bitfield
.disp32
1979 || t
.bitfield
.disp32s
1980 || t
.bitfield
.disp64
1981 || t
.bitfield
.baseindex
);
1990 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
1991 between operand GIVEN and opeand WANTED for instruction template T. */
1994 match_operand_size (const insn_template
*t
, unsigned int wanted
,
1997 return !((i
.types
[given
].bitfield
.byte
1998 && !t
->operand_types
[wanted
].bitfield
.byte
)
1999 || (i
.types
[given
].bitfield
.word
2000 && !t
->operand_types
[wanted
].bitfield
.word
)
2001 || (i
.types
[given
].bitfield
.dword
2002 && !t
->operand_types
[wanted
].bitfield
.dword
)
2003 || (i
.types
[given
].bitfield
.qword
2004 && !t
->operand_types
[wanted
].bitfield
.qword
)
2005 || (i
.types
[given
].bitfield
.tbyte
2006 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2009 /* Return 1 if there is no conflict in SIMD register between operand
2010 GIVEN and opeand WANTED for instruction template T. */
2013 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2016 return !((i
.types
[given
].bitfield
.xmmword
2017 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2018 || (i
.types
[given
].bitfield
.ymmword
2019 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2020 || (i
.types
[given
].bitfield
.zmmword
2021 && !t
->operand_types
[wanted
].bitfield
.zmmword
));
2024 /* Return 1 if there is no conflict in any size between operand GIVEN
2025 and opeand WANTED for instruction template T. */
2028 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2031 return (match_operand_size (t
, wanted
, given
)
2032 && !((i
.types
[given
].bitfield
.unspecified
2034 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2035 || (i
.types
[given
].bitfield
.fword
2036 && !t
->operand_types
[wanted
].bitfield
.fword
)
2037 /* For scalar opcode templates to allow register and memory
2038 operands at the same time, some special casing is needed
2039 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2040 down-conversion vpmov*. */
2041 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2042 && !t
->opcode_modifier
.broadcast
2043 && (t
->operand_types
[wanted
].bitfield
.byte
2044 || t
->operand_types
[wanted
].bitfield
.word
2045 || t
->operand_types
[wanted
].bitfield
.dword
2046 || t
->operand_types
[wanted
].bitfield
.qword
))
2047 ? (i
.types
[given
].bitfield
.xmmword
2048 || i
.types
[given
].bitfield
.ymmword
2049 || i
.types
[given
].bitfield
.zmmword
)
2050 : !match_simd_size(t
, wanted
, given
))));
2053 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2054 operands for instruction template T, and it has MATCH_REVERSE set if there
2055 is no size conflict on any operands for the template with operands reversed
2056 (and the template allows for reversing in the first place). */
2058 #define MATCH_STRAIGHT 1
2059 #define MATCH_REVERSE 2
2061 static INLINE
unsigned int
2062 operand_size_match (const insn_template
*t
)
2064 unsigned int j
, match
= MATCH_STRAIGHT
;
2066 /* Don't check jump instructions. */
2067 if (t
->opcode_modifier
.jump
2068 || t
->opcode_modifier
.jumpbyte
2069 || t
->opcode_modifier
.jumpdword
2070 || t
->opcode_modifier
.jumpintersegment
)
2073 /* Check memory and accumulator operand size. */
2074 for (j
= 0; j
< i
.operands
; j
++)
2076 if (i
.types
[j
].bitfield
.class != Reg
2077 && i
.types
[j
].bitfield
.class != RegSIMD
2078 && t
->operand_types
[j
].bitfield
.anysize
)
2081 if (t
->operand_types
[j
].bitfield
.class == Reg
2082 && !match_operand_size (t
, j
, j
))
2088 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2089 && !match_simd_size (t
, j
, j
))
2095 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2096 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2102 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2109 if (!t
->opcode_modifier
.d
)
2113 i
.error
= operand_size_mismatch
;
2117 /* Check reverse. */
2118 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2120 for (j
= 0; j
< i
.operands
; j
++)
2122 unsigned int given
= i
.operands
- j
- 1;
2124 if (t
->operand_types
[j
].bitfield
.class == Reg
2125 && !match_operand_size (t
, j
, given
))
2128 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2129 && !match_simd_size (t
, j
, given
))
2132 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2133 && (!match_operand_size (t
, j
, given
)
2134 || !match_simd_size (t
, j
, given
)))
2137 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2141 return match
| MATCH_REVERSE
;
2145 operand_type_match (i386_operand_type overlap
,
2146 i386_operand_type given
)
2148 i386_operand_type temp
= overlap
;
2150 temp
.bitfield
.jumpabsolute
= 0;
2151 temp
.bitfield
.unspecified
= 0;
2152 temp
.bitfield
.byte
= 0;
2153 temp
.bitfield
.word
= 0;
2154 temp
.bitfield
.dword
= 0;
2155 temp
.bitfield
.fword
= 0;
2156 temp
.bitfield
.qword
= 0;
2157 temp
.bitfield
.tbyte
= 0;
2158 temp
.bitfield
.xmmword
= 0;
2159 temp
.bitfield
.ymmword
= 0;
2160 temp
.bitfield
.zmmword
= 0;
2161 if (operand_type_all_zero (&temp
))
2164 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
2165 && given
.bitfield
.jumpabsolute
== overlap
.bitfield
.jumpabsolute
)
2169 i
.error
= operand_type_mismatch
;
2173 /* If given types g0 and g1 are registers they must be of the same type
2174 unless the expected operand type register overlap is null.
2175 Memory operand size of certain SIMD instructions is also being checked
2179 operand_type_register_match (i386_operand_type g0
,
2180 i386_operand_type t0
,
2181 i386_operand_type g1
,
2182 i386_operand_type t1
)
2184 if (g0
.bitfield
.class != Reg
2185 && g0
.bitfield
.class != RegSIMD
2186 && (!operand_type_check (g0
, anymem
)
2187 || g0
.bitfield
.unspecified
2188 || t0
.bitfield
.class != RegSIMD
))
2191 if (g1
.bitfield
.class != Reg
2192 && g1
.bitfield
.class != RegSIMD
2193 && (!operand_type_check (g1
, anymem
)
2194 || g1
.bitfield
.unspecified
2195 || t1
.bitfield
.class != RegSIMD
))
2198 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2199 && g0
.bitfield
.word
== g1
.bitfield
.word
2200 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2201 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2202 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2203 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2204 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2207 if (!(t0
.bitfield
.byte
& t1
.bitfield
.byte
)
2208 && !(t0
.bitfield
.word
& t1
.bitfield
.word
)
2209 && !(t0
.bitfield
.dword
& t1
.bitfield
.dword
)
2210 && !(t0
.bitfield
.qword
& t1
.bitfield
.qword
)
2211 && !(t0
.bitfield
.xmmword
& t1
.bitfield
.xmmword
)
2212 && !(t0
.bitfield
.ymmword
& t1
.bitfield
.ymmword
)
2213 && !(t0
.bitfield
.zmmword
& t1
.bitfield
.zmmword
))
2216 i
.error
= register_type_mismatch
;
2221 static INLINE
unsigned int
2222 register_number (const reg_entry
*r
)
2224 unsigned int nr
= r
->reg_num
;
2226 if (r
->reg_flags
& RegRex
)
2229 if (r
->reg_flags
& RegVRex
)
2235 static INLINE
unsigned int
2236 mode_from_disp_size (i386_operand_type t
)
2238 if (t
.bitfield
.disp8
)
2240 else if (t
.bitfield
.disp16
2241 || t
.bitfield
.disp32
2242 || t
.bitfield
.disp32s
)
2249 fits_in_signed_byte (addressT num
)
2251 return num
+ 0x80 <= 0xff;
2255 fits_in_unsigned_byte (addressT num
)
2261 fits_in_unsigned_word (addressT num
)
2263 return num
<= 0xffff;
2267 fits_in_signed_word (addressT num
)
2269 return num
+ 0x8000 <= 0xffff;
2273 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2278 return num
+ 0x80000000 <= 0xffffffff;
2280 } /* fits_in_signed_long() */
2283 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2288 return num
<= 0xffffffff;
2290 } /* fits_in_unsigned_long() */
2293 fits_in_disp8 (offsetT num
)
2295 int shift
= i
.memshift
;
2301 mask
= (1 << shift
) - 1;
2303 /* Return 0 if NUM isn't properly aligned. */
2307 /* Check if NUM will fit in 8bit after shift. */
2308 return fits_in_signed_byte (num
>> shift
);
2312 fits_in_imm4 (offsetT num
)
2314 return (num
& 0xf) == num
;
2317 static i386_operand_type
2318 smallest_imm_type (offsetT num
)
2320 i386_operand_type t
;
2322 operand_type_set (&t
, 0);
2323 t
.bitfield
.imm64
= 1;
2325 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2327 /* This code is disabled on the 486 because all the Imm1 forms
2328 in the opcode table are slower on the i486. They're the
2329 versions with the implicitly specified single-position
2330 displacement, which has another syntax if you really want to
2332 t
.bitfield
.imm1
= 1;
2333 t
.bitfield
.imm8
= 1;
2334 t
.bitfield
.imm8s
= 1;
2335 t
.bitfield
.imm16
= 1;
2336 t
.bitfield
.imm32
= 1;
2337 t
.bitfield
.imm32s
= 1;
2339 else if (fits_in_signed_byte (num
))
2341 t
.bitfield
.imm8
= 1;
2342 t
.bitfield
.imm8s
= 1;
2343 t
.bitfield
.imm16
= 1;
2344 t
.bitfield
.imm32
= 1;
2345 t
.bitfield
.imm32s
= 1;
2347 else if (fits_in_unsigned_byte (num
))
2349 t
.bitfield
.imm8
= 1;
2350 t
.bitfield
.imm16
= 1;
2351 t
.bitfield
.imm32
= 1;
2352 t
.bitfield
.imm32s
= 1;
2354 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2356 t
.bitfield
.imm16
= 1;
2357 t
.bitfield
.imm32
= 1;
2358 t
.bitfield
.imm32s
= 1;
2360 else if (fits_in_signed_long (num
))
2362 t
.bitfield
.imm32
= 1;
2363 t
.bitfield
.imm32s
= 1;
2365 else if (fits_in_unsigned_long (num
))
2366 t
.bitfield
.imm32
= 1;
2372 offset_in_range (offsetT val
, int size
)
2378 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2379 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2380 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2382 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2388 /* If BFD64, sign extend val for 32bit address mode. */
2389 if (flag_code
!= CODE_64BIT
2390 || i
.prefix
[ADDR_PREFIX
])
2391 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2392 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2395 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2397 char buf1
[40], buf2
[40];
2399 sprint_value (buf1
, val
);
2400 sprint_value (buf2
, val
& mask
);
2401 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2416 a. PREFIX_EXIST if attempting to add a prefix where one from the
2417 same class already exists.
2418 b. PREFIX_LOCK if lock prefix is added.
2419 c. PREFIX_REP if rep/repne prefix is added.
2420 d. PREFIX_DS if ds prefix is added.
2421 e. PREFIX_OTHER if other prefix is added.
2424 static enum PREFIX_GROUP
2425 add_prefix (unsigned int prefix
)
2427 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2430 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2431 && flag_code
== CODE_64BIT
)
2433 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2434 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2435 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2436 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2447 case DS_PREFIX_OPCODE
:
2450 case CS_PREFIX_OPCODE
:
2451 case ES_PREFIX_OPCODE
:
2452 case FS_PREFIX_OPCODE
:
2453 case GS_PREFIX_OPCODE
:
2454 case SS_PREFIX_OPCODE
:
2458 case REPNE_PREFIX_OPCODE
:
2459 case REPE_PREFIX_OPCODE
:
2464 case LOCK_PREFIX_OPCODE
:
2473 case ADDR_PREFIX_OPCODE
:
2477 case DATA_PREFIX_OPCODE
:
2481 if (i
.prefix
[q
] != 0)
2489 i
.prefix
[q
] |= prefix
;
2492 as_bad (_("same type of prefix used twice"));
2498 update_code_flag (int value
, int check
)
2500 PRINTF_LIKE ((*as_error
));
2502 flag_code
= (enum flag_code
) value
;
2503 if (flag_code
== CODE_64BIT
)
2505 cpu_arch_flags
.bitfield
.cpu64
= 1;
2506 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2510 cpu_arch_flags
.bitfield
.cpu64
= 0;
2511 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2513 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2516 as_error
= as_fatal
;
2519 (*as_error
) (_("64bit mode not supported on `%s'."),
2520 cpu_arch_name
? cpu_arch_name
: default_arch
);
2522 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2525 as_error
= as_fatal
;
2528 (*as_error
) (_("32bit mode not supported on `%s'."),
2529 cpu_arch_name
? cpu_arch_name
: default_arch
);
2531 stackop_size
= '\0';
2535 set_code_flag (int value
)
2537 update_code_flag (value
, 0);
2541 set_16bit_gcc_code_flag (int new_code_flag
)
2543 flag_code
= (enum flag_code
) new_code_flag
;
2544 if (flag_code
!= CODE_16BIT
)
2546 cpu_arch_flags
.bitfield
.cpu64
= 0;
2547 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2548 stackop_size
= LONG_MNEM_SUFFIX
;
2552 set_intel_syntax (int syntax_flag
)
2554 /* Find out if register prefixing is specified. */
2555 int ask_naked_reg
= 0;
2558 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2561 int e
= get_symbol_name (&string
);
2563 if (strcmp (string
, "prefix") == 0)
2565 else if (strcmp (string
, "noprefix") == 0)
2568 as_bad (_("bad argument to syntax directive."));
2569 (void) restore_line_pointer (e
);
2571 demand_empty_rest_of_line ();
2573 intel_syntax
= syntax_flag
;
2575 if (ask_naked_reg
== 0)
2576 allow_naked_reg
= (intel_syntax
2577 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2579 allow_naked_reg
= (ask_naked_reg
< 0);
2581 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2583 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2584 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2585 register_prefix
= allow_naked_reg
? "" : "%";
2589 set_intel_mnemonic (int mnemonic_flag
)
2591 intel_mnemonic
= mnemonic_flag
;
2595 set_allow_index_reg (int flag
)
2597 allow_index_reg
= flag
;
2601 set_check (int what
)
2603 enum check_kind
*kind
;
2608 kind
= &operand_check
;
2619 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2622 int e
= get_symbol_name (&string
);
2624 if (strcmp (string
, "none") == 0)
2626 else if (strcmp (string
, "warning") == 0)
2627 *kind
= check_warning
;
2628 else if (strcmp (string
, "error") == 0)
2629 *kind
= check_error
;
2631 as_bad (_("bad argument to %s_check directive."), str
);
2632 (void) restore_line_pointer (e
);
2635 as_bad (_("missing argument for %s_check directive"), str
);
2637 demand_empty_rest_of_line ();
2641 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2642 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2644 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2645 static const char *arch
;
2647 /* Intel LIOM is only supported on ELF. */
2653 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2654 use default_arch. */
2655 arch
= cpu_arch_name
;
2657 arch
= default_arch
;
2660 /* If we are targeting Intel MCU, we must enable it. */
2661 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2662 || new_flag
.bitfield
.cpuiamcu
)
2665 /* If we are targeting Intel L1OM, we must enable it. */
2666 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2667 || new_flag
.bitfield
.cpul1om
)
2670 /* If we are targeting Intel K1OM, we must enable it. */
2671 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2672 || new_flag
.bitfield
.cpuk1om
)
2675 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2680 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2684 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2687 int e
= get_symbol_name (&string
);
2689 i386_cpu_flags flags
;
2691 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2693 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2695 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2699 cpu_arch_name
= cpu_arch
[j
].name
;
2700 cpu_sub_arch_name
= NULL
;
2701 cpu_arch_flags
= cpu_arch
[j
].flags
;
2702 if (flag_code
== CODE_64BIT
)
2704 cpu_arch_flags
.bitfield
.cpu64
= 1;
2705 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2709 cpu_arch_flags
.bitfield
.cpu64
= 0;
2710 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2712 cpu_arch_isa
= cpu_arch
[j
].type
;
2713 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2714 if (!cpu_arch_tune_set
)
2716 cpu_arch_tune
= cpu_arch_isa
;
2717 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2722 flags
= cpu_flags_or (cpu_arch_flags
,
2725 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2727 if (cpu_sub_arch_name
)
2729 char *name
= cpu_sub_arch_name
;
2730 cpu_sub_arch_name
= concat (name
,
2732 (const char *) NULL
);
2736 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2737 cpu_arch_flags
= flags
;
2738 cpu_arch_isa_flags
= flags
;
2742 = cpu_flags_or (cpu_arch_isa_flags
,
2744 (void) restore_line_pointer (e
);
2745 demand_empty_rest_of_line ();
2750 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2752 /* Disable an ISA extension. */
2753 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2754 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2756 flags
= cpu_flags_and_not (cpu_arch_flags
,
2757 cpu_noarch
[j
].flags
);
2758 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2760 if (cpu_sub_arch_name
)
2762 char *name
= cpu_sub_arch_name
;
2763 cpu_sub_arch_name
= concat (name
, string
,
2764 (const char *) NULL
);
2768 cpu_sub_arch_name
= xstrdup (string
);
2769 cpu_arch_flags
= flags
;
2770 cpu_arch_isa_flags
= flags
;
2772 (void) restore_line_pointer (e
);
2773 demand_empty_rest_of_line ();
2777 j
= ARRAY_SIZE (cpu_arch
);
2780 if (j
>= ARRAY_SIZE (cpu_arch
))
2781 as_bad (_("no such architecture: `%s'"), string
);
2783 *input_line_pointer
= e
;
2786 as_bad (_("missing cpu architecture"));
2788 no_cond_jump_promotion
= 0;
2789 if (*input_line_pointer
== ','
2790 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2795 ++input_line_pointer
;
2796 e
= get_symbol_name (&string
);
2798 if (strcmp (string
, "nojumps") == 0)
2799 no_cond_jump_promotion
= 1;
2800 else if (strcmp (string
, "jumps") == 0)
2803 as_bad (_("no such architecture modifier: `%s'"), string
);
2805 (void) restore_line_pointer (e
);
2808 demand_empty_rest_of_line ();
2811 enum bfd_architecture
2814 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2816 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2817 || flag_code
!= CODE_64BIT
)
2818 as_fatal (_("Intel L1OM is 64bit ELF only"));
2819 return bfd_arch_l1om
;
2821 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2823 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2824 || flag_code
!= CODE_64BIT
)
2825 as_fatal (_("Intel K1OM is 64bit ELF only"));
2826 return bfd_arch_k1om
;
2828 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2830 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2831 || flag_code
== CODE_64BIT
)
2832 as_fatal (_("Intel MCU is 32bit ELF only"));
2833 return bfd_arch_iamcu
;
2836 return bfd_arch_i386
;
2842 if (!strncmp (default_arch
, "x86_64", 6))
2844 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2846 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2847 || default_arch
[6] != '\0')
2848 as_fatal (_("Intel L1OM is 64bit ELF only"));
2849 return bfd_mach_l1om
;
2851 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2853 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2854 || default_arch
[6] != '\0')
2855 as_fatal (_("Intel K1OM is 64bit ELF only"));
2856 return bfd_mach_k1om
;
2858 else if (default_arch
[6] == '\0')
2859 return bfd_mach_x86_64
;
2861 return bfd_mach_x64_32
;
2863 else if (!strcmp (default_arch
, "i386")
2864 || !strcmp (default_arch
, "iamcu"))
2866 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2868 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
2869 as_fatal (_("Intel MCU is 32bit ELF only"));
2870 return bfd_mach_i386_iamcu
;
2873 return bfd_mach_i386_i386
;
2876 as_fatal (_("unknown architecture"));
2882 const char *hash_err
;
2884 /* Support pseudo prefixes like {disp32}. */
2885 lex_type
['{'] = LEX_BEGIN_NAME
;
2887 /* Initialize op_hash hash table. */
2888 op_hash
= hash_new ();
2891 const insn_template
*optab
;
2892 templates
*core_optab
;
2894 /* Setup for loop. */
2896 core_optab
= XNEW (templates
);
2897 core_optab
->start
= optab
;
2902 if (optab
->name
== NULL
2903 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
2905 /* different name --> ship out current template list;
2906 add to hash table; & begin anew. */
2907 core_optab
->end
= optab
;
2908 hash_err
= hash_insert (op_hash
,
2910 (void *) core_optab
);
2913 as_fatal (_("can't hash %s: %s"),
2917 if (optab
->name
== NULL
)
2919 core_optab
= XNEW (templates
);
2920 core_optab
->start
= optab
;
2925 /* Initialize reg_hash hash table. */
2926 reg_hash
= hash_new ();
2928 const reg_entry
*regtab
;
2929 unsigned int regtab_size
= i386_regtab_size
;
2931 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
2933 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
2935 as_fatal (_("can't hash %s: %s"),
2941 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2946 for (c
= 0; c
< 256; c
++)
2951 mnemonic_chars
[c
] = c
;
2952 register_chars
[c
] = c
;
2953 operand_chars
[c
] = c
;
2955 else if (ISLOWER (c
))
2957 mnemonic_chars
[c
] = c
;
2958 register_chars
[c
] = c
;
2959 operand_chars
[c
] = c
;
2961 else if (ISUPPER (c
))
2963 mnemonic_chars
[c
] = TOLOWER (c
);
2964 register_chars
[c
] = mnemonic_chars
[c
];
2965 operand_chars
[c
] = c
;
2967 else if (c
== '{' || c
== '}')
2969 mnemonic_chars
[c
] = c
;
2970 operand_chars
[c
] = c
;
2973 if (ISALPHA (c
) || ISDIGIT (c
))
2974 identifier_chars
[c
] = c
;
2977 identifier_chars
[c
] = c
;
2978 operand_chars
[c
] = c
;
2983 identifier_chars
['@'] = '@';
2986 identifier_chars
['?'] = '?';
2987 operand_chars
['?'] = '?';
2989 digit_chars
['-'] = '-';
2990 mnemonic_chars
['_'] = '_';
2991 mnemonic_chars
['-'] = '-';
2992 mnemonic_chars
['.'] = '.';
2993 identifier_chars
['_'] = '_';
2994 identifier_chars
['.'] = '.';
2996 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
2997 operand_chars
[(unsigned char) *p
] = *p
;
3000 if (flag_code
== CODE_64BIT
)
3002 #if defined (OBJ_COFF) && defined (TE_PE)
3003 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3006 x86_dwarf2_return_column
= 16;
3008 x86_cie_data_alignment
= -8;
3012 x86_dwarf2_return_column
= 8;
3013 x86_cie_data_alignment
= -4;
3018 i386_print_statistics (FILE *file
)
3020 hash_print_statistics (file
, "i386 opcode", op_hash
);
3021 hash_print_statistics (file
, "i386 register", reg_hash
);
3026 /* Debugging routines for md_assemble. */
3027 static void pte (insn_template
*);
3028 static void pt (i386_operand_type
);
3029 static void pe (expressionS
*);
3030 static void ps (symbolS
*);
3033 pi (const char *line
, i386_insn
*x
)
3037 fprintf (stdout
, "%s: template ", line
);
3039 fprintf (stdout
, " address: base %s index %s scale %x\n",
3040 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3041 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3042 x
->log2_scale_factor
);
3043 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3044 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3045 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3046 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3047 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3048 (x
->rex
& REX_W
) != 0,
3049 (x
->rex
& REX_R
) != 0,
3050 (x
->rex
& REX_X
) != 0,
3051 (x
->rex
& REX_B
) != 0);
3052 for (j
= 0; j
< x
->operands
; j
++)
3054 fprintf (stdout
, " #%d: ", j
+ 1);
3056 fprintf (stdout
, "\n");
3057 if (x
->types
[j
].bitfield
.class == Reg
3058 || x
->types
[j
].bitfield
.class == RegMMX
3059 || x
->types
[j
].bitfield
.class == RegSIMD
3060 || x
->types
[j
].bitfield
.class == SReg
3061 || x
->types
[j
].bitfield
.class == RegCR
3062 || x
->types
[j
].bitfield
.class == RegDR
3063 || x
->types
[j
].bitfield
.class == RegTR
)
3064 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3065 if (operand_type_check (x
->types
[j
], imm
))
3067 if (operand_type_check (x
->types
[j
], disp
))
3068 pe (x
->op
[j
].disps
);
3073 pte (insn_template
*t
)
3076 fprintf (stdout
, " %d operands ", t
->operands
);
3077 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3078 if (t
->extension_opcode
!= None
)
3079 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3080 if (t
->opcode_modifier
.d
)
3081 fprintf (stdout
, "D");
3082 if (t
->opcode_modifier
.w
)
3083 fprintf (stdout
, "W");
3084 fprintf (stdout
, "\n");
3085 for (j
= 0; j
< t
->operands
; j
++)
3087 fprintf (stdout
, " #%d type ", j
+ 1);
3088 pt (t
->operand_types
[j
]);
3089 fprintf (stdout
, "\n");
3096 fprintf (stdout
, " operation %d\n", e
->X_op
);
3097 fprintf (stdout
, " add_number %ld (%lx)\n",
3098 (long) e
->X_add_number
, (long) e
->X_add_number
);
3099 if (e
->X_add_symbol
)
3101 fprintf (stdout
, " add_symbol ");
3102 ps (e
->X_add_symbol
);
3103 fprintf (stdout
, "\n");
3107 fprintf (stdout
, " op_symbol ");
3108 ps (e
->X_op_symbol
);
3109 fprintf (stdout
, "\n");
3116 fprintf (stdout
, "%s type %s%s",
3118 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3119 segment_name (S_GET_SEGMENT (s
)));
3122 static struct type_name
3124 i386_operand_type mask
;
3127 const type_names
[] =
3129 { OPERAND_TYPE_REG8
, "r8" },
3130 { OPERAND_TYPE_REG16
, "r16" },
3131 { OPERAND_TYPE_REG32
, "r32" },
3132 { OPERAND_TYPE_REG64
, "r64" },
3133 { OPERAND_TYPE_ACC8
, "acc8" },
3134 { OPERAND_TYPE_ACC16
, "acc16" },
3135 { OPERAND_TYPE_ACC32
, "acc32" },
3136 { OPERAND_TYPE_ACC64
, "acc64" },
3137 { OPERAND_TYPE_IMM8
, "i8" },
3138 { OPERAND_TYPE_IMM8
, "i8s" },
3139 { OPERAND_TYPE_IMM16
, "i16" },
3140 { OPERAND_TYPE_IMM32
, "i32" },
3141 { OPERAND_TYPE_IMM32S
, "i32s" },
3142 { OPERAND_TYPE_IMM64
, "i64" },
3143 { OPERAND_TYPE_IMM1
, "i1" },
3144 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3145 { OPERAND_TYPE_DISP8
, "d8" },
3146 { OPERAND_TYPE_DISP16
, "d16" },
3147 { OPERAND_TYPE_DISP32
, "d32" },
3148 { OPERAND_TYPE_DISP32S
, "d32s" },
3149 { OPERAND_TYPE_DISP64
, "d64" },
3150 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3151 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3152 { OPERAND_TYPE_CONTROL
, "control reg" },
3153 { OPERAND_TYPE_TEST
, "test reg" },
3154 { OPERAND_TYPE_DEBUG
, "debug reg" },
3155 { OPERAND_TYPE_FLOATREG
, "FReg" },
3156 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3157 { OPERAND_TYPE_SREG
, "SReg" },
3158 { OPERAND_TYPE_JUMPABSOLUTE
, "Jump Absolute" },
3159 { OPERAND_TYPE_REGMMX
, "rMMX" },
3160 { OPERAND_TYPE_REGXMM
, "rXMM" },
3161 { OPERAND_TYPE_REGYMM
, "rYMM" },
3162 { OPERAND_TYPE_REGZMM
, "rZMM" },
3163 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3164 { OPERAND_TYPE_ESSEG
, "es" },
3168 pt (i386_operand_type t
)
3171 i386_operand_type a
;
3173 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3175 a
= operand_type_and (t
, type_names
[j
].mask
);
3176 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3177 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3182 #endif /* DEBUG386 */
3184 static bfd_reloc_code_real_type
3185 reloc (unsigned int size
,
3188 bfd_reloc_code_real_type other
)
3190 if (other
!= NO_RELOC
)
3192 reloc_howto_type
*rel
;
3197 case BFD_RELOC_X86_64_GOT32
:
3198 return BFD_RELOC_X86_64_GOT64
;
3200 case BFD_RELOC_X86_64_GOTPLT64
:
3201 return BFD_RELOC_X86_64_GOTPLT64
;
3203 case BFD_RELOC_X86_64_PLTOFF64
:
3204 return BFD_RELOC_X86_64_PLTOFF64
;
3206 case BFD_RELOC_X86_64_GOTPC32
:
3207 other
= BFD_RELOC_X86_64_GOTPC64
;
3209 case BFD_RELOC_X86_64_GOTPCREL
:
3210 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3212 case BFD_RELOC_X86_64_TPOFF32
:
3213 other
= BFD_RELOC_X86_64_TPOFF64
;
3215 case BFD_RELOC_X86_64_DTPOFF32
:
3216 other
= BFD_RELOC_X86_64_DTPOFF64
;
3222 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3223 if (other
== BFD_RELOC_SIZE32
)
3226 other
= BFD_RELOC_SIZE64
;
3229 as_bad (_("there are no pc-relative size relocations"));
3235 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3236 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3239 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3241 as_bad (_("unknown relocation (%u)"), other
);
3242 else if (size
!= bfd_get_reloc_size (rel
))
3243 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3244 bfd_get_reloc_size (rel
),
3246 else if (pcrel
&& !rel
->pc_relative
)
3247 as_bad (_("non-pc-relative relocation for pc-relative field"));
3248 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3250 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3252 as_bad (_("relocated field and relocation type differ in signedness"));
3261 as_bad (_("there are no unsigned pc-relative relocations"));
3264 case 1: return BFD_RELOC_8_PCREL
;
3265 case 2: return BFD_RELOC_16_PCREL
;
3266 case 4: return BFD_RELOC_32_PCREL
;
3267 case 8: return BFD_RELOC_64_PCREL
;
3269 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3276 case 4: return BFD_RELOC_X86_64_32S
;
3281 case 1: return BFD_RELOC_8
;
3282 case 2: return BFD_RELOC_16
;
3283 case 4: return BFD_RELOC_32
;
3284 case 8: return BFD_RELOC_64
;
3286 as_bad (_("cannot do %s %u byte relocation"),
3287 sign
> 0 ? "signed" : "unsigned", size
);
3293 /* Here we decide which fixups can be adjusted to make them relative to
3294 the beginning of the section instead of the symbol. Basically we need
3295 to make sure that the dynamic relocations are done correctly, so in
3296 some cases we force the original symbol to be used. */
3299 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3301 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3305 /* Don't adjust pc-relative references to merge sections in 64-bit
3307 if (use_rela_relocations
3308 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3312 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3313 and changed later by validate_fix. */
3314 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3315 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3318 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3319 for size relocations. */
3320 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3321 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3322 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3323 || fixP
->fx_r_type
== BFD_RELOC_386_PLT32
3324 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3325 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3326 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3327 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3328 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3329 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3330 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3331 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3332 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3333 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3334 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3335 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3336 || fixP
->fx_r_type
== BFD_RELOC_X86_64_PLT32
3337 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3338 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3339 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3340 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3341 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3342 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3343 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3344 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3345 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3346 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3347 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3348 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3349 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3350 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3351 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3352 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3359 intel_float_operand (const char *mnemonic
)
3361 /* Note that the value returned is meaningful only for opcodes with (memory)
3362 operands, hence the code here is free to improperly handle opcodes that
3363 have no operands (for better performance and smaller code). */
3365 if (mnemonic
[0] != 'f')
3366 return 0; /* non-math */
3368 switch (mnemonic
[1])
3370 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3371 the fs segment override prefix not currently handled because no
3372 call path can make opcodes without operands get here */
3374 return 2 /* integer op */;
3376 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3377 return 3; /* fldcw/fldenv */
3380 if (mnemonic
[2] != 'o' /* fnop */)
3381 return 3; /* non-waiting control op */
3384 if (mnemonic
[2] == 's')
3385 return 3; /* frstor/frstpm */
3388 if (mnemonic
[2] == 'a')
3389 return 3; /* fsave */
3390 if (mnemonic
[2] == 't')
3392 switch (mnemonic
[3])
3394 case 'c': /* fstcw */
3395 case 'd': /* fstdw */
3396 case 'e': /* fstenv */
3397 case 's': /* fsts[gw] */
3403 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3404 return 0; /* fxsave/fxrstor are not really math ops */
3411 /* Build the VEX prefix. */
3414 build_vex_prefix (const insn_template
*t
)
3416 unsigned int register_specifier
;
3417 unsigned int implied_prefix
;
3418 unsigned int vector_length
;
3421 /* Check register specifier. */
3422 if (i
.vex
.register_specifier
)
3424 register_specifier
=
3425 ~register_number (i
.vex
.register_specifier
) & 0xf;
3426 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3429 register_specifier
= 0xf;
3431 /* Use 2-byte VEX prefix by swapping destination and source operand
3432 if there are more than 1 register operand. */
3433 if (i
.reg_operands
> 1
3434 && i
.vec_encoding
!= vex_encoding_vex3
3435 && i
.dir_encoding
== dir_encoding_default
3436 && i
.operands
== i
.reg_operands
3437 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3438 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3439 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3442 unsigned int xchg
= i
.operands
- 1;
3443 union i386_op temp_op
;
3444 i386_operand_type temp_type
;
3446 temp_type
= i
.types
[xchg
];
3447 i
.types
[xchg
] = i
.types
[0];
3448 i
.types
[0] = temp_type
;
3449 temp_op
= i
.op
[xchg
];
3450 i
.op
[xchg
] = i
.op
[0];
3453 gas_assert (i
.rm
.mode
== 3);
3457 i
.rm
.regmem
= i
.rm
.reg
;
3460 if (i
.tm
.opcode_modifier
.d
)
3461 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3462 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3463 else /* Use the next insn. */
3467 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3468 are no memory operands and at least 3 register ones. */
3469 if (i
.reg_operands
>= 3
3470 && i
.vec_encoding
!= vex_encoding_vex3
3471 && i
.reg_operands
== i
.operands
- i
.imm_operands
3472 && i
.tm
.opcode_modifier
.vex
3473 && i
.tm
.opcode_modifier
.commutative
3474 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3476 && i
.vex
.register_specifier
3477 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3479 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3480 union i386_op temp_op
;
3481 i386_operand_type temp_type
;
3483 gas_assert (i
.tm
.opcode_modifier
.vexopcode
== VEX0F
);
3484 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3485 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3486 &i
.types
[i
.operands
- 3]));
3487 gas_assert (i
.rm
.mode
== 3);
3489 temp_type
= i
.types
[xchg
];
3490 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3491 i
.types
[xchg
+ 1] = temp_type
;
3492 temp_op
= i
.op
[xchg
];
3493 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3494 i
.op
[xchg
+ 1] = temp_op
;
3497 xchg
= i
.rm
.regmem
| 8;
3498 i
.rm
.regmem
= ~register_specifier
& 0xf;
3499 gas_assert (!(i
.rm
.regmem
& 8));
3500 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3501 register_specifier
= ~xchg
& 0xf;
3504 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3505 vector_length
= avxscalar
;
3506 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3512 /* Determine vector length from the last multi-length vector
3515 for (op
= t
->operands
; op
--;)
3516 if (t
->operand_types
[op
].bitfield
.xmmword
3517 && t
->operand_types
[op
].bitfield
.ymmword
3518 && i
.types
[op
].bitfield
.ymmword
)
3525 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3530 case DATA_PREFIX_OPCODE
:
3533 case REPE_PREFIX_OPCODE
:
3536 case REPNE_PREFIX_OPCODE
:
3543 /* Check the REX.W bit and VEXW. */
3544 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3545 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3546 else if (i
.tm
.opcode_modifier
.vexw
)
3547 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3549 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3551 /* Use 2-byte VEX prefix if possible. */
3553 && i
.vec_encoding
!= vex_encoding_vex3
3554 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3555 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3557 /* 2-byte VEX prefix. */
3561 i
.vex
.bytes
[0] = 0xc5;
3563 /* Check the REX.R bit. */
3564 r
= (i
.rex
& REX_R
) ? 0 : 1;
3565 i
.vex
.bytes
[1] = (r
<< 7
3566 | register_specifier
<< 3
3567 | vector_length
<< 2
3572 /* 3-byte VEX prefix. */
3577 switch (i
.tm
.opcode_modifier
.vexopcode
)
3581 i
.vex
.bytes
[0] = 0xc4;
3585 i
.vex
.bytes
[0] = 0xc4;
3589 i
.vex
.bytes
[0] = 0xc4;
3593 i
.vex
.bytes
[0] = 0x8f;
3597 i
.vex
.bytes
[0] = 0x8f;
3601 i
.vex
.bytes
[0] = 0x8f;
3607 /* The high 3 bits of the second VEX byte are 1's compliment
3608 of RXB bits from REX. */
3609 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3611 i
.vex
.bytes
[2] = (w
<< 7
3612 | register_specifier
<< 3
3613 | vector_length
<< 2
3618 static INLINE bfd_boolean
3619 is_evex_encoding (const insn_template
*t
)
3621 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3622 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3623 || t
->opcode_modifier
.sae
;
3626 static INLINE bfd_boolean
3627 is_any_vex_encoding (const insn_template
*t
)
3629 return t
->opcode_modifier
.vex
|| t
->opcode_modifier
.vexopcode
3630 || is_evex_encoding (t
);
3633 /* Build the EVEX prefix. */
3636 build_evex_prefix (void)
3638 unsigned int register_specifier
;
3639 unsigned int implied_prefix
;
3641 rex_byte vrex_used
= 0;
3643 /* Check register specifier. */
3644 if (i
.vex
.register_specifier
)
3646 gas_assert ((i
.vrex
& REX_X
) == 0);
3648 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3649 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3650 register_specifier
+= 8;
3651 /* The upper 16 registers are encoded in the fourth byte of the
3653 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3654 i
.vex
.bytes
[3] = 0x8;
3655 register_specifier
= ~register_specifier
& 0xf;
3659 register_specifier
= 0xf;
3661 /* Encode upper 16 vector index register in the fourth byte of
3663 if (!(i
.vrex
& REX_X
))
3664 i
.vex
.bytes
[3] = 0x8;
3669 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3674 case DATA_PREFIX_OPCODE
:
3677 case REPE_PREFIX_OPCODE
:
3680 case REPNE_PREFIX_OPCODE
:
3687 /* 4 byte EVEX prefix. */
3689 i
.vex
.bytes
[0] = 0x62;
3692 switch (i
.tm
.opcode_modifier
.vexopcode
)
3708 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3710 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3712 /* The fifth bit of the second EVEX byte is 1's compliment of the
3713 REX_R bit in VREX. */
3714 if (!(i
.vrex
& REX_R
))
3715 i
.vex
.bytes
[1] |= 0x10;
3719 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3721 /* When all operands are registers, the REX_X bit in REX is not
3722 used. We reuse it to encode the upper 16 registers, which is
3723 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3724 as 1's compliment. */
3725 if ((i
.vrex
& REX_B
))
3728 i
.vex
.bytes
[1] &= ~0x40;
3732 /* EVEX instructions shouldn't need the REX prefix. */
3733 i
.vrex
&= ~vrex_used
;
3734 gas_assert (i
.vrex
== 0);
3736 /* Check the REX.W bit and VEXW. */
3737 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3738 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3739 else if (i
.tm
.opcode_modifier
.vexw
)
3740 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3742 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3744 /* Encode the U bit. */
3745 implied_prefix
|= 0x4;
3747 /* The third byte of the EVEX prefix. */
3748 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3750 /* The fourth byte of the EVEX prefix. */
3751 /* The zeroing-masking bit. */
3752 if (i
.mask
&& i
.mask
->zeroing
)
3753 i
.vex
.bytes
[3] |= 0x80;
3755 /* Don't always set the broadcast bit if there is no RC. */
3758 /* Encode the vector length. */
3759 unsigned int vec_length
;
3761 if (!i
.tm
.opcode_modifier
.evex
3762 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3766 /* Determine vector length from the last multi-length vector
3769 for (op
= i
.operands
; op
--;)
3770 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3771 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3772 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3774 if (i
.types
[op
].bitfield
.zmmword
)
3776 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3779 else if (i
.types
[op
].bitfield
.ymmword
)
3781 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3784 else if (i
.types
[op
].bitfield
.xmmword
)
3786 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3789 else if (i
.broadcast
&& (int) op
== i
.broadcast
->operand
)
3791 switch (i
.broadcast
->bytes
)
3794 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3797 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3800 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3809 if (op
>= MAX_OPERANDS
)
3813 switch (i
.tm
.opcode_modifier
.evex
)
3815 case EVEXLIG
: /* LL' is ignored */
3816 vec_length
= evexlig
<< 5;
3819 vec_length
= 0 << 5;
3822 vec_length
= 1 << 5;
3825 vec_length
= 2 << 5;
3831 i
.vex
.bytes
[3] |= vec_length
;
3832 /* Encode the broadcast bit. */
3834 i
.vex
.bytes
[3] |= 0x10;
3838 if (i
.rounding
->type
!= saeonly
)
3839 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3841 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3844 if (i
.mask
&& i
.mask
->mask
)
3845 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3849 process_immext (void)
3853 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3854 which is coded in the same place as an 8-bit immediate field
3855 would be. Here we fake an 8-bit immediate operand from the
3856 opcode suffix stored in tm.extension_opcode.
3858 AVX instructions also use this encoding, for some of
3859 3 argument instructions. */
3861 gas_assert (i
.imm_operands
<= 1
3863 || (is_any_vex_encoding (&i
.tm
)
3864 && i
.operands
<= 4)));
3866 exp
= &im_expressions
[i
.imm_operands
++];
3867 i
.op
[i
.operands
].imms
= exp
;
3868 i
.types
[i
.operands
] = imm8
;
3870 exp
->X_op
= O_constant
;
3871 exp
->X_add_number
= i
.tm
.extension_opcode
;
3872 i
.tm
.extension_opcode
= None
;
3879 switch (i
.tm
.opcode_modifier
.hleprefixok
)
3884 as_bad (_("invalid instruction `%s' after `%s'"),
3885 i
.tm
.name
, i
.hle_prefix
);
3888 if (i
.prefix
[LOCK_PREFIX
])
3890 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
3894 case HLEPrefixRelease
:
3895 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
3897 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3901 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
3903 as_bad (_("memory destination needed for instruction `%s'"
3904 " after `xrelease'"), i
.tm
.name
);
3911 /* Try the shortest encoding by shortening operand size. */
3914 optimize_encoding (void)
3918 if (optimize_for_space
3919 && i
.reg_operands
== 1
3920 && i
.imm_operands
== 1
3921 && !i
.types
[1].bitfield
.byte
3922 && i
.op
[0].imms
->X_op
== O_constant
3923 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
3924 && ((i
.tm
.base_opcode
== 0xa8
3925 && i
.tm
.extension_opcode
== None
)
3926 || (i
.tm
.base_opcode
== 0xf6
3927 && i
.tm
.extension_opcode
== 0x0)))
3930 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
3932 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
3933 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
3935 i
.types
[1].bitfield
.byte
= 1;
3936 /* Ignore the suffix. */
3938 if (base_regnum
>= 4
3939 && !(i
.op
[1].regs
->reg_flags
& RegRex
))
3941 /* Handle SP, BP, SI and DI registers. */
3942 if (i
.types
[1].bitfield
.word
)
3944 else if (i
.types
[1].bitfield
.dword
)
3952 else if (flag_code
== CODE_64BIT
3953 && ((i
.types
[1].bitfield
.qword
3954 && i
.reg_operands
== 1
3955 && i
.imm_operands
== 1
3956 && i
.op
[0].imms
->X_op
== O_constant
3957 && ((i
.tm
.base_opcode
== 0xb8
3958 && i
.tm
.extension_opcode
== None
3959 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
3960 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
3961 && (((i
.tm
.base_opcode
== 0x24
3962 || i
.tm
.base_opcode
== 0xa8)
3963 && i
.tm
.extension_opcode
== None
)
3964 || (i
.tm
.base_opcode
== 0x80
3965 && i
.tm
.extension_opcode
== 0x4)
3966 || ((i
.tm
.base_opcode
== 0xf6
3967 || (i
.tm
.base_opcode
| 1) == 0xc7)
3968 && i
.tm
.extension_opcode
== 0x0)))
3969 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
3970 && i
.tm
.base_opcode
== 0x83
3971 && i
.tm
.extension_opcode
== 0x4)))
3972 || (i
.types
[0].bitfield
.qword
3973 && ((i
.reg_operands
== 2
3974 && i
.op
[0].regs
== i
.op
[1].regs
3975 && ((i
.tm
.base_opcode
== 0x30
3976 || i
.tm
.base_opcode
== 0x28)
3977 && i
.tm
.extension_opcode
== None
))
3978 || (i
.reg_operands
== 1
3980 && i
.tm
.base_opcode
== 0x30
3981 && i
.tm
.extension_opcode
== None
)))))
3984 andq $imm31, %r64 -> andl $imm31, %r32
3985 andq $imm7, %r64 -> andl $imm7, %r32
3986 testq $imm31, %r64 -> testl $imm31, %r32
3987 xorq %r64, %r64 -> xorl %r32, %r32
3988 subq %r64, %r64 -> subl %r32, %r32
3989 movq $imm31, %r64 -> movl $imm31, %r32
3990 movq $imm32, %r64 -> movl $imm32, %r32
3992 i
.tm
.opcode_modifier
.norex64
= 1;
3993 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
3996 movq $imm31, %r64 -> movl $imm31, %r32
3997 movq $imm32, %r64 -> movl $imm32, %r32
3999 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4000 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4001 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4002 i
.types
[0].bitfield
.imm32
= 1;
4003 i
.types
[0].bitfield
.imm32s
= 0;
4004 i
.types
[0].bitfield
.imm64
= 0;
4005 i
.types
[1].bitfield
.dword
= 1;
4006 i
.types
[1].bitfield
.qword
= 0;
4007 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4010 movq $imm31, %r64 -> movl $imm31, %r32
4012 i
.tm
.base_opcode
= 0xb8;
4013 i
.tm
.extension_opcode
= None
;
4014 i
.tm
.opcode_modifier
.w
= 0;
4015 i
.tm
.opcode_modifier
.shortform
= 1;
4016 i
.tm
.opcode_modifier
.modrm
= 0;
4020 else if (optimize
> 1
4021 && !optimize_for_space
4022 && i
.reg_operands
== 2
4023 && i
.op
[0].regs
== i
.op
[1].regs
4024 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4025 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4026 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4029 andb %rN, %rN -> testb %rN, %rN
4030 andw %rN, %rN -> testw %rN, %rN
4031 andq %rN, %rN -> testq %rN, %rN
4032 orb %rN, %rN -> testb %rN, %rN
4033 orw %rN, %rN -> testw %rN, %rN
4034 orq %rN, %rN -> testq %rN, %rN
4036 and outside of 64-bit mode
4038 andl %rN, %rN -> testl %rN, %rN
4039 orl %rN, %rN -> testl %rN, %rN
4041 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4043 else if (i
.reg_operands
== 3
4044 && i
.op
[0].regs
== i
.op
[1].regs
4045 && !i
.types
[2].bitfield
.xmmword
4046 && (i
.tm
.opcode_modifier
.vex
4047 || ((!i
.mask
|| i
.mask
->zeroing
)
4049 && is_evex_encoding (&i
.tm
)
4050 && (i
.vec_encoding
!= vex_encoding_evex
4051 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4052 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4053 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4054 && i
.types
[2].bitfield
.ymmword
))))
4055 && ((i
.tm
.base_opcode
== 0x55
4056 || i
.tm
.base_opcode
== 0x6655
4057 || i
.tm
.base_opcode
== 0x66df
4058 || i
.tm
.base_opcode
== 0x57
4059 || i
.tm
.base_opcode
== 0x6657
4060 || i
.tm
.base_opcode
== 0x66ef
4061 || i
.tm
.base_opcode
== 0x66f8
4062 || i
.tm
.base_opcode
== 0x66f9
4063 || i
.tm
.base_opcode
== 0x66fa
4064 || i
.tm
.base_opcode
== 0x66fb
4065 || i
.tm
.base_opcode
== 0x42
4066 || i
.tm
.base_opcode
== 0x6642
4067 || i
.tm
.base_opcode
== 0x47
4068 || i
.tm
.base_opcode
== 0x6647)
4069 && i
.tm
.extension_opcode
== None
))
4072 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4074 EVEX VOP %zmmM, %zmmM, %zmmN
4075 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4076 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4077 EVEX VOP %ymmM, %ymmM, %ymmN
4078 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4079 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4080 VEX VOP %ymmM, %ymmM, %ymmN
4081 -> VEX VOP %xmmM, %xmmM, %xmmN
4082 VOP, one of vpandn and vpxor:
4083 VEX VOP %ymmM, %ymmM, %ymmN
4084 -> VEX VOP %xmmM, %xmmM, %xmmN
4085 VOP, one of vpandnd and vpandnq:
4086 EVEX VOP %zmmM, %zmmM, %zmmN
4087 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4088 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4089 EVEX VOP %ymmM, %ymmM, %ymmN
4090 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4091 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4092 VOP, one of vpxord and vpxorq:
4093 EVEX VOP %zmmM, %zmmM, %zmmN
4094 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4095 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4096 EVEX VOP %ymmM, %ymmM, %ymmN
4097 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4098 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4099 VOP, one of kxord and kxorq:
4100 VEX VOP %kM, %kM, %kN
4101 -> VEX kxorw %kM, %kM, %kN
4102 VOP, one of kandnd and kandnq:
4103 VEX VOP %kM, %kM, %kN
4104 -> VEX kandnw %kM, %kM, %kN
4106 if (is_evex_encoding (&i
.tm
))
4108 if (i
.vec_encoding
!= vex_encoding_evex
)
4110 i
.tm
.opcode_modifier
.vex
= VEX128
;
4111 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4112 i
.tm
.opcode_modifier
.evex
= 0;
4114 else if (optimize
> 1)
4115 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4119 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4121 i
.tm
.base_opcode
&= 0xff;
4122 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4125 i
.tm
.opcode_modifier
.vex
= VEX128
;
4127 if (i
.tm
.opcode_modifier
.vex
)
4128 for (j
= 0; j
< 3; j
++)
4130 i
.types
[j
].bitfield
.xmmword
= 1;
4131 i
.types
[j
].bitfield
.ymmword
= 0;
4134 else if (i
.vec_encoding
!= vex_encoding_evex
4135 && !i
.types
[0].bitfield
.zmmword
4136 && !i
.types
[1].bitfield
.zmmword
4139 && is_evex_encoding (&i
.tm
)
4140 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x666f
4141 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf36f
4142 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f
4143 || (i
.tm
.base_opcode
& ~4) == 0x66db
4144 || (i
.tm
.base_opcode
& ~4) == 0x66eb)
4145 && i
.tm
.extension_opcode
== None
)
4148 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4149 vmovdqu32 and vmovdqu64:
4150 EVEX VOP %xmmM, %xmmN
4151 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4152 EVEX VOP %ymmM, %ymmN
4153 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4155 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4157 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4159 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4161 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4162 VOP, one of vpand, vpandn, vpor, vpxor:
4163 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4164 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4165 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4166 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4167 EVEX VOP{d,q} mem, %xmmM, %xmmN
4168 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4169 EVEX VOP{d,q} mem, %ymmM, %ymmN
4170 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4172 for (j
= 0; j
< i
.operands
; j
++)
4173 if (operand_type_check (i
.types
[j
], disp
)
4174 && i
.op
[j
].disps
->X_op
== O_constant
)
4176 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4177 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4178 bytes, we choose EVEX Disp8 over VEX Disp32. */
4179 int evex_disp8
, vex_disp8
;
4180 unsigned int memshift
= i
.memshift
;
4181 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4183 evex_disp8
= fits_in_disp8 (n
);
4185 vex_disp8
= fits_in_disp8 (n
);
4186 if (evex_disp8
!= vex_disp8
)
4188 i
.memshift
= memshift
;
4192 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4195 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f)
4196 i
.tm
.base_opcode
^= 0xf36f ^ 0xf26f;
4197 i
.tm
.opcode_modifier
.vex
4198 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4199 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4200 /* VPAND, VPOR, and VPXOR are commutative. */
4201 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0x66df)
4202 i
.tm
.opcode_modifier
.commutative
= 1;
4203 i
.tm
.opcode_modifier
.evex
= 0;
4204 i
.tm
.opcode_modifier
.masking
= 0;
4205 i
.tm
.opcode_modifier
.broadcast
= 0;
4206 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4209 i
.types
[j
].bitfield
.disp8
4210 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4214 /* This is the guts of the machine-dependent assembler. LINE points to a
4215 machine dependent instruction. This function is supposed to emit
4216 the frags/bytes it assembles to. */
4219 md_assemble (char *line
)
4222 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4223 const insn_template
*t
;
4225 /* Initialize globals. */
4226 memset (&i
, '\0', sizeof (i
));
4227 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4228 i
.reloc
[j
] = NO_RELOC
;
4229 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4230 memset (im_expressions
, '\0', sizeof (im_expressions
));
4231 save_stack_p
= save_stack
;
4233 /* First parse an instruction mnemonic & call i386_operand for the operands.
4234 We assume that the scrubber has arranged it so that line[0] is the valid
4235 start of a (possibly prefixed) mnemonic. */
4237 line
= parse_insn (line
, mnemonic
);
4240 mnem_suffix
= i
.suffix
;
4242 line
= parse_operands (line
, mnemonic
);
4244 xfree (i
.memop1_string
);
4245 i
.memop1_string
= NULL
;
4249 /* Now we've parsed the mnemonic into a set of templates, and have the
4250 operands at hand. */
4252 /* All intel opcodes have reversed operands except for "bound" and
4253 "enter". We also don't reverse intersegment "jmp" and "call"
4254 instructions with 2 immediate operands so that the immediate segment
4255 precedes the offset, as it does when in AT&T mode. */
4258 && (strcmp (mnemonic
, "bound") != 0)
4259 && (strcmp (mnemonic
, "invlpga") != 0)
4260 && !(operand_type_check (i
.types
[0], imm
)
4261 && operand_type_check (i
.types
[1], imm
)))
4264 /* The order of the immediates should be reversed
4265 for 2 immediates extrq and insertq instructions */
4266 if (i
.imm_operands
== 2
4267 && (strcmp (mnemonic
, "extrq") == 0
4268 || strcmp (mnemonic
, "insertq") == 0))
4269 swap_2_operands (0, 1);
4274 /* Don't optimize displacement for movabs since it only takes 64bit
4277 && i
.disp_encoding
!= disp_encoding_32bit
4278 && (flag_code
!= CODE_64BIT
4279 || strcmp (mnemonic
, "movabs") != 0))
4282 /* Next, we find a template that matches the given insn,
4283 making sure the overlap of the given operands types is consistent
4284 with the template operand types. */
4286 if (!(t
= match_template (mnem_suffix
)))
4289 if (sse_check
!= check_none
4290 && !i
.tm
.opcode_modifier
.noavx
4291 && !i
.tm
.cpu_flags
.bitfield
.cpuavx
4292 && (i
.tm
.cpu_flags
.bitfield
.cpusse
4293 || i
.tm
.cpu_flags
.bitfield
.cpusse2
4294 || i
.tm
.cpu_flags
.bitfield
.cpusse3
4295 || i
.tm
.cpu_flags
.bitfield
.cpussse3
4296 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
4297 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
4298 || i
.tm
.cpu_flags
.bitfield
.cpupclmul
4299 || i
.tm
.cpu_flags
.bitfield
.cpuaes
4300 || i
.tm
.cpu_flags
.bitfield
.cpugfni
))
4302 (sse_check
== check_warning
4304 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4307 /* Zap movzx and movsx suffix. The suffix has been set from
4308 "word ptr" or "byte ptr" on the source operand in Intel syntax
4309 or extracted from mnemonic in AT&T syntax. But we'll use
4310 the destination register to choose the suffix for encoding. */
4311 if ((i
.tm
.base_opcode
& ~9) == 0x0fb6)
4313 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
4314 there is no suffix, the default will be byte extension. */
4315 if (i
.reg_operands
!= 2
4318 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
4323 if (i
.tm
.opcode_modifier
.fwait
)
4324 if (!add_prefix (FWAIT_OPCODE
))
4327 /* Check if REP prefix is OK. */
4328 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
4330 as_bad (_("invalid instruction `%s' after `%s'"),
4331 i
.tm
.name
, i
.rep_prefix
);
4335 /* Check for lock without a lockable instruction. Destination operand
4336 must be memory unless it is xchg (0x86). */
4337 if (i
.prefix
[LOCK_PREFIX
]
4338 && (!i
.tm
.opcode_modifier
.islockable
4339 || i
.mem_operands
== 0
4340 || (i
.tm
.base_opcode
!= 0x86
4341 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
4343 as_bad (_("expecting lockable instruction after `lock'"));
4347 /* Check for data size prefix on VEX/XOP/EVEX encoded insns. */
4348 if (i
.prefix
[DATA_PREFIX
] && is_any_vex_encoding (&i
.tm
))
4350 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
4354 /* Check if HLE prefix is OK. */
4355 if (i
.hle_prefix
&& !check_hle ())
4358 /* Check BND prefix. */
4359 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
4360 as_bad (_("expecting valid branch instruction after `bnd'"));
4362 /* Check NOTRACK prefix. */
4363 if (i
.notrack_prefix
&& !i
.tm
.opcode_modifier
.notrackprefixok
)
4364 as_bad (_("expecting indirect branch instruction after `notrack'"));
4366 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
4368 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4369 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4370 else if (flag_code
!= CODE_16BIT
4371 ? i
.prefix
[ADDR_PREFIX
]
4372 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
4373 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4376 /* Insert BND prefix. */
4377 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
4379 if (!i
.prefix
[BND_PREFIX
])
4380 add_prefix (BND_PREFIX_OPCODE
);
4381 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
4383 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4384 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
4388 /* Check string instruction segment overrides. */
4389 if (i
.tm
.opcode_modifier
.isstring
&& i
.mem_operands
!= 0)
4391 if (!check_string ())
4393 i
.disp_operands
= 0;
4396 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
4397 optimize_encoding ();
4399 if (!process_suffix ())
4402 /* Update operand types. */
4403 for (j
= 0; j
< i
.operands
; j
++)
4404 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
4406 /* Make still unresolved immediate matches conform to size of immediate
4407 given in i.suffix. */
4408 if (!finalize_imm ())
4411 if (i
.types
[0].bitfield
.imm1
)
4412 i
.imm_operands
= 0; /* kludge for shift insns. */
4414 /* We only need to check those implicit registers for instructions
4415 with 3 operands or less. */
4416 if (i
.operands
<= 3)
4417 for (j
= 0; j
< i
.operands
; j
++)
4418 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
4419 && !i
.types
[j
].bitfield
.xmmword
)
4422 /* ImmExt should be processed after SSE2AVX. */
4423 if (!i
.tm
.opcode_modifier
.sse2avx
4424 && i
.tm
.opcode_modifier
.immext
)
4427 /* For insns with operands there are more diddles to do to the opcode. */
4430 if (!process_operands ())
4433 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
4435 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4436 as_warn (_("translating to `%sp'"), i
.tm
.name
);
4439 if (is_any_vex_encoding (&i
.tm
))
4441 if (!cpu_arch_flags
.bitfield
.cpui286
)
4443 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
4448 if (i
.tm
.opcode_modifier
.vex
)
4449 build_vex_prefix (t
);
4451 build_evex_prefix ();
4454 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4455 instructions may define INT_OPCODE as well, so avoid this corner
4456 case for those instructions that use MODRM. */
4457 if (i
.tm
.base_opcode
== INT_OPCODE
4458 && !i
.tm
.opcode_modifier
.modrm
4459 && i
.op
[0].imms
->X_add_number
== 3)
4461 i
.tm
.base_opcode
= INT3_OPCODE
;
4465 if ((i
.tm
.opcode_modifier
.jump
4466 || i
.tm
.opcode_modifier
.jumpbyte
4467 || i
.tm
.opcode_modifier
.jumpdword
)
4468 && i
.op
[0].disps
->X_op
== O_constant
)
4470 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4471 the absolute address given by the constant. Since ix86 jumps and
4472 calls are pc relative, we need to generate a reloc. */
4473 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
4474 i
.op
[0].disps
->X_op
= O_symbol
;
4477 if (i
.tm
.opcode_modifier
.rex64
)
4480 /* For 8 bit registers we need an empty rex prefix. Also if the
4481 instruction already has a prefix, we need to convert old
4482 registers to new ones. */
4484 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
4485 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
4486 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
4487 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
4488 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
4489 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
4494 i
.rex
|= REX_OPCODE
;
4495 for (x
= 0; x
< 2; x
++)
4497 /* Look for 8 bit operand that uses old registers. */
4498 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
4499 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
4501 /* In case it is "hi" register, give up. */
4502 if (i
.op
[x
].regs
->reg_num
> 3)
4503 as_bad (_("can't encode register '%s%s' in an "
4504 "instruction requiring REX prefix."),
4505 register_prefix
, i
.op
[x
].regs
->reg_name
);
4507 /* Otherwise it is equivalent to the extended register.
4508 Since the encoding doesn't change this is merely
4509 cosmetic cleanup for debug output. */
4511 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
4516 if (i
.rex
== 0 && i
.rex_encoding
)
4518 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4519 that uses legacy register. If it is "hi" register, don't add
4520 the REX_OPCODE byte. */
4522 for (x
= 0; x
< 2; x
++)
4523 if (i
.types
[x
].bitfield
.class == Reg
4524 && i
.types
[x
].bitfield
.byte
4525 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
4526 && i
.op
[x
].regs
->reg_num
> 3)
4528 i
.rex_encoding
= FALSE
;
4537 add_prefix (REX_OPCODE
| i
.rex
);
4539 /* We are ready to output the insn. */
4544 parse_insn (char *line
, char *mnemonic
)
4547 char *token_start
= l
;
4550 const insn_template
*t
;
4556 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
4561 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
4563 as_bad (_("no such instruction: `%s'"), token_start
);
4568 if (!is_space_char (*l
)
4569 && *l
!= END_OF_INSN
4571 || (*l
!= PREFIX_SEPARATOR
4574 as_bad (_("invalid character %s in mnemonic"),
4575 output_invalid (*l
));
4578 if (token_start
== l
)
4580 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
4581 as_bad (_("expecting prefix; got nothing"));
4583 as_bad (_("expecting mnemonic; got nothing"));
4587 /* Look up instruction (or prefix) via hash table. */
4588 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
4590 if (*l
!= END_OF_INSN
4591 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
4592 && current_templates
4593 && current_templates
->start
->opcode_modifier
.isprefix
)
4595 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
4597 as_bad ((flag_code
!= CODE_64BIT
4598 ? _("`%s' is only supported in 64-bit mode")
4599 : _("`%s' is not supported in 64-bit mode")),
4600 current_templates
->start
->name
);
4603 /* If we are in 16-bit mode, do not allow addr16 or data16.
4604 Similarly, in 32-bit mode, do not allow addr32 or data32. */
4605 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
4606 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
4607 && flag_code
!= CODE_64BIT
4608 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
4609 ^ (flag_code
== CODE_16BIT
)))
4611 as_bad (_("redundant %s prefix"),
4612 current_templates
->start
->name
);
4615 if (current_templates
->start
->opcode_length
== 0)
4617 /* Handle pseudo prefixes. */
4618 switch (current_templates
->start
->base_opcode
)
4622 i
.disp_encoding
= disp_encoding_8bit
;
4626 i
.disp_encoding
= disp_encoding_32bit
;
4630 i
.dir_encoding
= dir_encoding_load
;
4634 i
.dir_encoding
= dir_encoding_store
;
4638 i
.vec_encoding
= vex_encoding_vex2
;
4642 i
.vec_encoding
= vex_encoding_vex3
;
4646 i
.vec_encoding
= vex_encoding_evex
;
4650 i
.rex_encoding
= TRUE
;
4654 i
.no_optimize
= TRUE
;
4662 /* Add prefix, checking for repeated prefixes. */
4663 switch (add_prefix (current_templates
->start
->base_opcode
))
4668 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
4669 i
.notrack_prefix
= current_templates
->start
->name
;
4672 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
4673 i
.hle_prefix
= current_templates
->start
->name
;
4674 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
4675 i
.bnd_prefix
= current_templates
->start
->name
;
4677 i
.rep_prefix
= current_templates
->start
->name
;
4683 /* Skip past PREFIX_SEPARATOR and reset token_start. */
4690 if (!current_templates
)
4692 /* Deprecated functionality (new code should use pseudo-prefixes instead):
4693 Check if we should swap operand or force 32bit displacement in
4695 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
4696 i
.dir_encoding
= dir_encoding_swap
;
4697 else if (mnem_p
- 3 == dot_p
4700 i
.disp_encoding
= disp_encoding_8bit
;
4701 else if (mnem_p
- 4 == dot_p
4705 i
.disp_encoding
= disp_encoding_32bit
;
4710 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
4713 if (!current_templates
)
4716 if (mnem_p
> mnemonic
)
4718 /* See if we can get a match by trimming off a suffix. */
4721 case WORD_MNEM_SUFFIX
:
4722 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
4723 i
.suffix
= SHORT_MNEM_SUFFIX
;
4726 case BYTE_MNEM_SUFFIX
:
4727 case QWORD_MNEM_SUFFIX
:
4728 i
.suffix
= mnem_p
[-1];
4730 current_templates
= (const templates
*) hash_find (op_hash
,
4733 case SHORT_MNEM_SUFFIX
:
4734 case LONG_MNEM_SUFFIX
:
4737 i
.suffix
= mnem_p
[-1];
4739 current_templates
= (const templates
*) hash_find (op_hash
,
4748 if (intel_float_operand (mnemonic
) == 1)
4749 i
.suffix
= SHORT_MNEM_SUFFIX
;
4751 i
.suffix
= LONG_MNEM_SUFFIX
;
4753 current_templates
= (const templates
*) hash_find (op_hash
,
4760 if (!current_templates
)
4762 as_bad (_("no such instruction: `%s'"), token_start
);
4767 if (current_templates
->start
->opcode_modifier
.jump
4768 || current_templates
->start
->opcode_modifier
.jumpbyte
)
4770 /* Check for a branch hint. We allow ",pt" and ",pn" for
4771 predict taken and predict not taken respectively.
4772 I'm not sure that branch hints actually do anything on loop
4773 and jcxz insns (JumpByte) for current Pentium4 chips. They
4774 may work in the future and it doesn't hurt to accept them
4776 if (l
[0] == ',' && l
[1] == 'p')
4780 if (!add_prefix (DS_PREFIX_OPCODE
))
4784 else if (l
[2] == 'n')
4786 if (!add_prefix (CS_PREFIX_OPCODE
))
4792 /* Any other comma loses. */
4795 as_bad (_("invalid character %s in mnemonic"),
4796 output_invalid (*l
));
4800 /* Check if instruction is supported on specified architecture. */
4802 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
4804 supported
|= cpu_flags_match (t
);
4805 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
4807 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
4808 as_warn (_("use .code16 to ensure correct addressing mode"));
4814 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
4815 as_bad (flag_code
== CODE_64BIT
4816 ? _("`%s' is not supported in 64-bit mode")
4817 : _("`%s' is only supported in 64-bit mode"),
4818 current_templates
->start
->name
);
4820 as_bad (_("`%s' is not supported on `%s%s'"),
4821 current_templates
->start
->name
,
4822 cpu_arch_name
? cpu_arch_name
: default_arch
,
4823 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
4829 parse_operands (char *l
, const char *mnemonic
)
4833 /* 1 if operand is pending after ','. */
4834 unsigned int expecting_operand
= 0;
4836 /* Non-zero if operand parens not balanced. */
4837 unsigned int paren_not_balanced
;
4839 while (*l
!= END_OF_INSN
)
4841 /* Skip optional white space before operand. */
4842 if (is_space_char (*l
))
4844 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
4846 as_bad (_("invalid character %s before operand %d"),
4847 output_invalid (*l
),
4851 token_start
= l
; /* After white space. */
4852 paren_not_balanced
= 0;
4853 while (paren_not_balanced
|| *l
!= ',')
4855 if (*l
== END_OF_INSN
)
4857 if (paren_not_balanced
)
4860 as_bad (_("unbalanced parenthesis in operand %d."),
4863 as_bad (_("unbalanced brackets in operand %d."),
4868 break; /* we are done */
4870 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
4872 as_bad (_("invalid character %s in operand %d"),
4873 output_invalid (*l
),
4880 ++paren_not_balanced
;
4882 --paren_not_balanced
;
4887 ++paren_not_balanced
;
4889 --paren_not_balanced
;
4893 if (l
!= token_start
)
4894 { /* Yes, we've read in another operand. */
4895 unsigned int operand_ok
;
4896 this_operand
= i
.operands
++;
4897 if (i
.operands
> MAX_OPERANDS
)
4899 as_bad (_("spurious operands; (%d operands/instruction max)"),
4903 i
.types
[this_operand
].bitfield
.unspecified
= 1;
4904 /* Now parse operand adding info to 'i' as we go along. */
4905 END_STRING_AND_SAVE (l
);
4907 if (i
.mem_operands
> 1)
4909 as_bad (_("too many memory references for `%s'"),
4916 i386_intel_operand (token_start
,
4917 intel_float_operand (mnemonic
));
4919 operand_ok
= i386_att_operand (token_start
);
4921 RESTORE_END_STRING (l
);
4927 if (expecting_operand
)
4929 expecting_operand_after_comma
:
4930 as_bad (_("expecting operand after ','; got nothing"));
4935 as_bad (_("expecting operand before ','; got nothing"));
4940 /* Now *l must be either ',' or END_OF_INSN. */
4943 if (*++l
== END_OF_INSN
)
4945 /* Just skip it, if it's \n complain. */
4946 goto expecting_operand_after_comma
;
4948 expecting_operand
= 1;
4955 swap_2_operands (int xchg1
, int xchg2
)
4957 union i386_op temp_op
;
4958 i386_operand_type temp_type
;
4959 unsigned int temp_flags
;
4960 enum bfd_reloc_code_real temp_reloc
;
4962 temp_type
= i
.types
[xchg2
];
4963 i
.types
[xchg2
] = i
.types
[xchg1
];
4964 i
.types
[xchg1
] = temp_type
;
4966 temp_flags
= i
.flags
[xchg2
];
4967 i
.flags
[xchg2
] = i
.flags
[xchg1
];
4968 i
.flags
[xchg1
] = temp_flags
;
4970 temp_op
= i
.op
[xchg2
];
4971 i
.op
[xchg2
] = i
.op
[xchg1
];
4972 i
.op
[xchg1
] = temp_op
;
4974 temp_reloc
= i
.reloc
[xchg2
];
4975 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
4976 i
.reloc
[xchg1
] = temp_reloc
;
4980 if (i
.mask
->operand
== xchg1
)
4981 i
.mask
->operand
= xchg2
;
4982 else if (i
.mask
->operand
== xchg2
)
4983 i
.mask
->operand
= xchg1
;
4987 if (i
.broadcast
->operand
== xchg1
)
4988 i
.broadcast
->operand
= xchg2
;
4989 else if (i
.broadcast
->operand
== xchg2
)
4990 i
.broadcast
->operand
= xchg1
;
4994 if (i
.rounding
->operand
== xchg1
)
4995 i
.rounding
->operand
= xchg2
;
4996 else if (i
.rounding
->operand
== xchg2
)
4997 i
.rounding
->operand
= xchg1
;
5002 swap_operands (void)
5008 swap_2_operands (1, i
.operands
- 2);
5012 swap_2_operands (0, i
.operands
- 1);
5018 if (i
.mem_operands
== 2)
5020 const seg_entry
*temp_seg
;
5021 temp_seg
= i
.seg
[0];
5022 i
.seg
[0] = i
.seg
[1];
5023 i
.seg
[1] = temp_seg
;
5027 /* Try to ensure constant immediates are represented in the smallest
5032 char guess_suffix
= 0;
5036 guess_suffix
= i
.suffix
;
5037 else if (i
.reg_operands
)
5039 /* Figure out a suffix from the last register operand specified.
5040 We can't do this properly yet, i.e. excluding special register
5041 instances, but the following works for instructions with
5042 immediates. In any case, we can't set i.suffix yet. */
5043 for (op
= i
.operands
; --op
>= 0;)
5044 if (i
.types
[op
].bitfield
.class != Reg
)
5046 else if (i
.types
[op
].bitfield
.byte
)
5048 guess_suffix
= BYTE_MNEM_SUFFIX
;
5051 else if (i
.types
[op
].bitfield
.word
)
5053 guess_suffix
= WORD_MNEM_SUFFIX
;
5056 else if (i
.types
[op
].bitfield
.dword
)
5058 guess_suffix
= LONG_MNEM_SUFFIX
;
5061 else if (i
.types
[op
].bitfield
.qword
)
5063 guess_suffix
= QWORD_MNEM_SUFFIX
;
5067 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5068 guess_suffix
= WORD_MNEM_SUFFIX
;
5070 for (op
= i
.operands
; --op
>= 0;)
5071 if (operand_type_check (i
.types
[op
], imm
))
5073 switch (i
.op
[op
].imms
->X_op
)
5076 /* If a suffix is given, this operand may be shortened. */
5077 switch (guess_suffix
)
5079 case LONG_MNEM_SUFFIX
:
5080 i
.types
[op
].bitfield
.imm32
= 1;
5081 i
.types
[op
].bitfield
.imm64
= 1;
5083 case WORD_MNEM_SUFFIX
:
5084 i
.types
[op
].bitfield
.imm16
= 1;
5085 i
.types
[op
].bitfield
.imm32
= 1;
5086 i
.types
[op
].bitfield
.imm32s
= 1;
5087 i
.types
[op
].bitfield
.imm64
= 1;
5089 case BYTE_MNEM_SUFFIX
:
5090 i
.types
[op
].bitfield
.imm8
= 1;
5091 i
.types
[op
].bitfield
.imm8s
= 1;
5092 i
.types
[op
].bitfield
.imm16
= 1;
5093 i
.types
[op
].bitfield
.imm32
= 1;
5094 i
.types
[op
].bitfield
.imm32s
= 1;
5095 i
.types
[op
].bitfield
.imm64
= 1;
5099 /* If this operand is at most 16 bits, convert it
5100 to a signed 16 bit number before trying to see
5101 whether it will fit in an even smaller size.
5102 This allows a 16-bit operand such as $0xffe0 to
5103 be recognised as within Imm8S range. */
5104 if ((i
.types
[op
].bitfield
.imm16
)
5105 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
5107 i
.op
[op
].imms
->X_add_number
=
5108 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
5111 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5112 if ((i
.types
[op
].bitfield
.imm32
)
5113 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
5116 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5117 ^ ((offsetT
) 1 << 31))
5118 - ((offsetT
) 1 << 31));
5122 = operand_type_or (i
.types
[op
],
5123 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5125 /* We must avoid matching of Imm32 templates when 64bit
5126 only immediate is available. */
5127 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5128 i
.types
[op
].bitfield
.imm32
= 0;
5135 /* Symbols and expressions. */
5137 /* Convert symbolic operand to proper sizes for matching, but don't
5138 prevent matching a set of insns that only supports sizes other
5139 than those matching the insn suffix. */
5141 i386_operand_type mask
, allowed
;
5142 const insn_template
*t
;
5144 operand_type_set (&mask
, 0);
5145 operand_type_set (&allowed
, 0);
5147 for (t
= current_templates
->start
;
5148 t
< current_templates
->end
;
5151 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5152 allowed
= operand_type_and (allowed
, anyimm
);
5154 switch (guess_suffix
)
5156 case QWORD_MNEM_SUFFIX
:
5157 mask
.bitfield
.imm64
= 1;
5158 mask
.bitfield
.imm32s
= 1;
5160 case LONG_MNEM_SUFFIX
:
5161 mask
.bitfield
.imm32
= 1;
5163 case WORD_MNEM_SUFFIX
:
5164 mask
.bitfield
.imm16
= 1;
5166 case BYTE_MNEM_SUFFIX
:
5167 mask
.bitfield
.imm8
= 1;
5172 allowed
= operand_type_and (mask
, allowed
);
5173 if (!operand_type_all_zero (&allowed
))
5174 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5181 /* Try to use the smallest displacement type too. */
5183 optimize_disp (void)
5187 for (op
= i
.operands
; --op
>= 0;)
5188 if (operand_type_check (i
.types
[op
], disp
))
5190 if (i
.op
[op
].disps
->X_op
== O_constant
)
5192 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5194 if (i
.types
[op
].bitfield
.disp16
5195 && (op_disp
& ~(offsetT
) 0xffff) == 0)
5197 /* If this operand is at most 16 bits, convert
5198 to a signed 16 bit number and don't use 64bit
5200 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
5201 i
.types
[op
].bitfield
.disp64
= 0;
5204 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5205 if (i
.types
[op
].bitfield
.disp32
5206 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
5208 /* If this operand is at most 32 bits, convert
5209 to a signed 32 bit number and don't use 64bit
5211 op_disp
&= (((offsetT
) 2 << 31) - 1);
5212 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5213 i
.types
[op
].bitfield
.disp64
= 0;
5216 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5218 i
.types
[op
].bitfield
.disp8
= 0;
5219 i
.types
[op
].bitfield
.disp16
= 0;
5220 i
.types
[op
].bitfield
.disp32
= 0;
5221 i
.types
[op
].bitfield
.disp32s
= 0;
5222 i
.types
[op
].bitfield
.disp64
= 0;
5226 else if (flag_code
== CODE_64BIT
)
5228 if (fits_in_signed_long (op_disp
))
5230 i
.types
[op
].bitfield
.disp64
= 0;
5231 i
.types
[op
].bitfield
.disp32s
= 1;
5233 if (i
.prefix
[ADDR_PREFIX
]
5234 && fits_in_unsigned_long (op_disp
))
5235 i
.types
[op
].bitfield
.disp32
= 1;
5237 if ((i
.types
[op
].bitfield
.disp32
5238 || i
.types
[op
].bitfield
.disp32s
5239 || i
.types
[op
].bitfield
.disp16
)
5240 && fits_in_disp8 (op_disp
))
5241 i
.types
[op
].bitfield
.disp8
= 1;
5243 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5244 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5246 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5247 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5248 i
.types
[op
].bitfield
.disp8
= 0;
5249 i
.types
[op
].bitfield
.disp16
= 0;
5250 i
.types
[op
].bitfield
.disp32
= 0;
5251 i
.types
[op
].bitfield
.disp32s
= 0;
5252 i
.types
[op
].bitfield
.disp64
= 0;
5255 /* We only support 64bit displacement on constants. */
5256 i
.types
[op
].bitfield
.disp64
= 0;
5260 /* Return 1 if there is a match in broadcast bytes between operand
5261 GIVEN and instruction template T. */
5264 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5266 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5267 && i
.types
[given
].bitfield
.byte
)
5268 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5269 && i
.types
[given
].bitfield
.word
)
5270 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5271 && i
.types
[given
].bitfield
.dword
)
5272 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5273 && i
.types
[given
].bitfield
.qword
));
5276 /* Check if operands are valid for the instruction. */
5279 check_VecOperands (const insn_template
*t
)
5283 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
5285 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5286 any one operand are implicity requiring AVX512VL support if the actual
5287 operand size is YMMword or XMMword. Since this function runs after
5288 template matching, there's no need to check for YMMword/XMMword in
5290 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
5291 if (!cpu_flags_all_zero (&cpu
)
5292 && !t
->cpu_flags
.bitfield
.cpuavx512vl
5293 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
5295 for (op
= 0; op
< t
->operands
; ++op
)
5297 if (t
->operand_types
[op
].bitfield
.zmmword
5298 && (i
.types
[op
].bitfield
.ymmword
5299 || i
.types
[op
].bitfield
.xmmword
))
5301 i
.error
= unsupported
;
5307 /* Without VSIB byte, we can't have a vector register for index. */
5308 if (!t
->opcode_modifier
.vecsib
5310 && (i
.index_reg
->reg_type
.bitfield
.xmmword
5311 || i
.index_reg
->reg_type
.bitfield
.ymmword
5312 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
5314 i
.error
= unsupported_vector_index_register
;
5318 /* Check if default mask is allowed. */
5319 if (t
->opcode_modifier
.nodefmask
5320 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
5322 i
.error
= no_default_mask
;
5326 /* For VSIB byte, we need a vector register for index, and all vector
5327 registers must be distinct. */
5328 if (t
->opcode_modifier
.vecsib
)
5331 || !((t
->opcode_modifier
.vecsib
== VecSIB128
5332 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
5333 || (t
->opcode_modifier
.vecsib
== VecSIB256
5334 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
5335 || (t
->opcode_modifier
.vecsib
== VecSIB512
5336 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
5338 i
.error
= invalid_vsib_address
;
5342 gas_assert (i
.reg_operands
== 2 || i
.mask
);
5343 if (i
.reg_operands
== 2 && !i
.mask
)
5345 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
5346 gas_assert (i
.types
[0].bitfield
.xmmword
5347 || i
.types
[0].bitfield
.ymmword
);
5348 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
5349 gas_assert (i
.types
[2].bitfield
.xmmword
5350 || i
.types
[2].bitfield
.ymmword
);
5351 if (operand_check
== check_none
)
5353 if (register_number (i
.op
[0].regs
)
5354 != register_number (i
.index_reg
)
5355 && register_number (i
.op
[2].regs
)
5356 != register_number (i
.index_reg
)
5357 && register_number (i
.op
[0].regs
)
5358 != register_number (i
.op
[2].regs
))
5360 if (operand_check
== check_error
)
5362 i
.error
= invalid_vector_register_set
;
5365 as_warn (_("mask, index, and destination registers should be distinct"));
5367 else if (i
.reg_operands
== 1 && i
.mask
)
5369 if (i
.types
[1].bitfield
.class == RegSIMD
5370 && (i
.types
[1].bitfield
.xmmword
5371 || i
.types
[1].bitfield
.ymmword
5372 || i
.types
[1].bitfield
.zmmword
)
5373 && (register_number (i
.op
[1].regs
)
5374 == register_number (i
.index_reg
)))
5376 if (operand_check
== check_error
)
5378 i
.error
= invalid_vector_register_set
;
5381 if (operand_check
!= check_none
)
5382 as_warn (_("index and destination registers should be distinct"));
5387 /* Check if broadcast is supported by the instruction and is applied
5388 to the memory operand. */
5391 i386_operand_type type
, overlap
;
5393 /* Check if specified broadcast is supported in this instruction,
5394 and its broadcast bytes match the memory operand. */
5395 op
= i
.broadcast
->operand
;
5396 if (!t
->opcode_modifier
.broadcast
5397 || !(i
.flags
[op
] & Operand_Mem
)
5398 || (!i
.types
[op
].bitfield
.unspecified
5399 && !match_broadcast_size (t
, op
)))
5402 i
.error
= unsupported_broadcast
;
5406 i
.broadcast
->bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
5407 * i
.broadcast
->type
);
5408 operand_type_set (&type
, 0);
5409 switch (i
.broadcast
->bytes
)
5412 type
.bitfield
.word
= 1;
5415 type
.bitfield
.dword
= 1;
5418 type
.bitfield
.qword
= 1;
5421 type
.bitfield
.xmmword
= 1;
5424 type
.bitfield
.ymmword
= 1;
5427 type
.bitfield
.zmmword
= 1;
5433 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
5434 if (operand_type_all_zero (&overlap
))
5437 if (t
->opcode_modifier
.checkregsize
)
5441 type
.bitfield
.baseindex
= 1;
5442 for (j
= 0; j
< i
.operands
; ++j
)
5445 && !operand_type_register_match(i
.types
[j
],
5446 t
->operand_types
[j
],
5448 t
->operand_types
[op
]))
5453 /* If broadcast is supported in this instruction, we need to check if
5454 operand of one-element size isn't specified without broadcast. */
5455 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
5457 /* Find memory operand. */
5458 for (op
= 0; op
< i
.operands
; op
++)
5459 if (i
.flags
[op
] & Operand_Mem
)
5461 gas_assert (op
< i
.operands
);
5462 /* Check size of the memory operand. */
5463 if (match_broadcast_size (t
, op
))
5465 i
.error
= broadcast_needed
;
5470 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
5472 /* Check if requested masking is supported. */
5475 switch (t
->opcode_modifier
.masking
)
5479 case MERGING_MASKING
:
5480 if (i
.mask
->zeroing
)
5483 i
.error
= unsupported_masking
;
5487 case DYNAMIC_MASKING
:
5488 /* Memory destinations allow only merging masking. */
5489 if (i
.mask
->zeroing
&& i
.mem_operands
)
5491 /* Find memory operand. */
5492 for (op
= 0; op
< i
.operands
; op
++)
5493 if (i
.flags
[op
] & Operand_Mem
)
5495 gas_assert (op
< i
.operands
);
5496 if (op
== i
.operands
- 1)
5498 i
.error
= unsupported_masking
;
5508 /* Check if masking is applied to dest operand. */
5509 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
5511 i
.error
= mask_not_on_destination
;
5518 if (!t
->opcode_modifier
.sae
5519 || (i
.rounding
->type
!= saeonly
&& !t
->opcode_modifier
.staticrounding
))
5521 i
.error
= unsupported_rc_sae
;
5524 /* If the instruction has several immediate operands and one of
5525 them is rounding, the rounding operand should be the last
5526 immediate operand. */
5527 if (i
.imm_operands
> 1
5528 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
5530 i
.error
= rc_sae_operand_not_last_imm
;
5535 /* Check vector Disp8 operand. */
5536 if (t
->opcode_modifier
.disp8memshift
5537 && i
.disp_encoding
!= disp_encoding_32bit
)
5540 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
5541 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
5542 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
5545 const i386_operand_type
*type
= NULL
;
5548 for (op
= 0; op
< i
.operands
; op
++)
5549 if (i
.flags
[op
] & Operand_Mem
)
5551 if (t
->opcode_modifier
.evex
== EVEXLIG
)
5552 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
5553 else if (t
->operand_types
[op
].bitfield
.xmmword
5554 + t
->operand_types
[op
].bitfield
.ymmword
5555 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
5556 type
= &t
->operand_types
[op
];
5557 else if (!i
.types
[op
].bitfield
.unspecified
)
5558 type
= &i
.types
[op
];
5560 else if (i
.types
[op
].bitfield
.class == RegSIMD
5561 && t
->opcode_modifier
.evex
!= EVEXLIG
)
5563 if (i
.types
[op
].bitfield
.zmmword
)
5565 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
5567 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
5573 if (type
->bitfield
.zmmword
)
5575 else if (type
->bitfield
.ymmword
)
5577 else if (type
->bitfield
.xmmword
)
5581 /* For the check in fits_in_disp8(). */
5582 if (i
.memshift
== 0)
5586 for (op
= 0; op
< i
.operands
; op
++)
5587 if (operand_type_check (i
.types
[op
], disp
)
5588 && i
.op
[op
].disps
->X_op
== O_constant
)
5590 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
5592 i
.types
[op
].bitfield
.disp8
= 1;
5595 i
.types
[op
].bitfield
.disp8
= 0;
5604 /* Check if operands are valid for the instruction. Update VEX
5608 VEX_check_operands (const insn_template
*t
)
5610 if (i
.vec_encoding
== vex_encoding_evex
)
5612 /* This instruction must be encoded with EVEX prefix. */
5613 if (!is_evex_encoding (t
))
5615 i
.error
= unsupported
;
5621 if (!t
->opcode_modifier
.vex
)
5623 /* This instruction template doesn't have VEX prefix. */
5624 if (i
.vec_encoding
!= vex_encoding_default
)
5626 i
.error
= unsupported
;
5632 /* Check the special Imm4 cases; must be the first operand. */
5633 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
5635 if (i
.op
[0].imms
->X_op
!= O_constant
5636 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
5642 /* Turn off Imm<N> so that update_imm won't complain. */
5643 operand_type_set (&i
.types
[0], 0);
5649 static const insn_template
*
5650 match_template (char mnem_suffix
)
5652 /* Points to template once we've found it. */
5653 const insn_template
*t
;
5654 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
5655 i386_operand_type overlap4
;
5656 unsigned int found_reverse_match
;
5657 i386_opcode_modifier suffix_check
;
5658 i386_operand_type operand_types
[MAX_OPERANDS
];
5659 int addr_prefix_disp
;
5661 unsigned int found_cpu_match
, size_match
;
5662 unsigned int check_register
;
5663 enum i386_error specific_error
= 0;
5665 #if MAX_OPERANDS != 5
5666 # error "MAX_OPERANDS must be 5."
5669 found_reverse_match
= 0;
5670 addr_prefix_disp
= -1;
5672 /* Prepare for mnemonic suffix check. */
5673 memset (&suffix_check
, 0, sizeof (suffix_check
));
5674 switch (mnem_suffix
)
5676 case BYTE_MNEM_SUFFIX
:
5677 suffix_check
.no_bsuf
= 1;
5679 case WORD_MNEM_SUFFIX
:
5680 suffix_check
.no_wsuf
= 1;
5682 case SHORT_MNEM_SUFFIX
:
5683 suffix_check
.no_ssuf
= 1;
5685 case LONG_MNEM_SUFFIX
:
5686 suffix_check
.no_lsuf
= 1;
5688 case QWORD_MNEM_SUFFIX
:
5689 suffix_check
.no_qsuf
= 1;
5692 /* NB: In Intel syntax, normally we can check for memory operand
5693 size when there is no mnemonic suffix. But jmp and call have
5694 2 different encodings with Dword memory operand size, one with
5695 No_ldSuf and the other without. i.suffix is set to
5696 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
5697 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
5698 suffix_check
.no_ldsuf
= 1;
5701 /* Must have right number of operands. */
5702 i
.error
= number_of_operands_mismatch
;
5704 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
5706 addr_prefix_disp
= -1;
5707 found_reverse_match
= 0;
5709 if (i
.operands
!= t
->operands
)
5712 /* Check processor support. */
5713 i
.error
= unsupported
;
5714 found_cpu_match
= (cpu_flags_match (t
)
5715 == CPU_FLAGS_PERFECT_MATCH
);
5716 if (!found_cpu_match
)
5719 /* Check AT&T mnemonic. */
5720 i
.error
= unsupported_with_intel_mnemonic
;
5721 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
5724 /* Check AT&T/Intel syntax and Intel64/AMD64 ISA. */
5725 i
.error
= unsupported_syntax
;
5726 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
5727 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
)
5728 || (intel64
&& t
->opcode_modifier
.amd64
)
5729 || (!intel64
&& t
->opcode_modifier
.intel64
))
5732 /* Check the suffix. */
5733 i
.error
= invalid_instruction_suffix
;
5734 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
5735 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
5736 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
5737 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
5738 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
5739 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
5742 size_match
= operand_size_match (t
);
5746 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5747 operand_types
[j
] = t
->operand_types
[j
];
5749 /* In general, don't allow 64-bit operands in 32-bit mode. */
5750 if (i
.suffix
== QWORD_MNEM_SUFFIX
5751 && flag_code
!= CODE_64BIT
5753 ? (!t
->opcode_modifier
.ignoresize
5754 && !t
->opcode_modifier
.broadcast
5755 && !intel_float_operand (t
->name
))
5756 : intel_float_operand (t
->name
) != 2)
5757 && ((operand_types
[0].bitfield
.class != RegMMX
5758 && operand_types
[0].bitfield
.class != RegSIMD
)
5759 || (operand_types
[t
->operands
> 1].bitfield
.class != RegMMX
5760 && operand_types
[t
->operands
> 1].bitfield
.class != RegSIMD
))
5761 && (t
->base_opcode
!= 0x0fc7
5762 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
5765 /* In general, don't allow 32-bit operands on pre-386. */
5766 else if (i
.suffix
== LONG_MNEM_SUFFIX
5767 && !cpu_arch_flags
.bitfield
.cpui386
5769 ? (!t
->opcode_modifier
.ignoresize
5770 && !intel_float_operand (t
->name
))
5771 : intel_float_operand (t
->name
) != 2)
5772 && ((operand_types
[0].bitfield
.class != RegMMX
5773 && operand_types
[0].bitfield
.class != RegSIMD
)
5774 || (operand_types
[t
->operands
> 1].bitfield
.class != RegMMX
5775 && operand_types
[t
->operands
> 1].bitfield
.class
5779 /* Do not verify operands when there are none. */
5783 /* We've found a match; break out of loop. */
5787 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
5788 into Disp32/Disp16/Disp32 operand. */
5789 if (i
.prefix
[ADDR_PREFIX
] != 0)
5791 /* There should be only one Disp operand. */
5795 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5797 if (operand_types
[j
].bitfield
.disp16
)
5799 addr_prefix_disp
= j
;
5800 operand_types
[j
].bitfield
.disp32
= 1;
5801 operand_types
[j
].bitfield
.disp16
= 0;
5807 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5809 if (operand_types
[j
].bitfield
.disp32
)
5811 addr_prefix_disp
= j
;
5812 operand_types
[j
].bitfield
.disp32
= 0;
5813 operand_types
[j
].bitfield
.disp16
= 1;
5819 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5821 if (operand_types
[j
].bitfield
.disp64
)
5823 addr_prefix_disp
= j
;
5824 operand_types
[j
].bitfield
.disp64
= 0;
5825 operand_types
[j
].bitfield
.disp32
= 1;
5833 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
5834 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
&& t
->base_opcode
== 0xa0)
5837 /* We check register size if needed. */
5838 if (t
->opcode_modifier
.checkregsize
)
5840 check_register
= (1 << t
->operands
) - 1;
5842 check_register
&= ~(1 << i
.broadcast
->operand
);
5847 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
5848 switch (t
->operands
)
5851 if (!operand_type_match (overlap0
, i
.types
[0]))
5855 /* xchg %eax, %eax is a special case. It is an alias for nop
5856 only in 32bit mode and we can use opcode 0x90. In 64bit
5857 mode, we can't use 0x90 for xchg %eax, %eax since it should
5858 zero-extend %eax to %rax. */
5859 if (flag_code
== CODE_64BIT
5860 && t
->base_opcode
== 0x90
5861 && i
.types
[0].bitfield
.instance
== Accum
5862 && i
.types
[0].bitfield
.dword
5863 && i
.types
[1].bitfield
.instance
== Accum
5864 && i
.types
[1].bitfield
.dword
)
5866 /* xrelease mov %eax, <disp> is another special case. It must not
5867 match the accumulator-only encoding of mov. */
5868 if (flag_code
!= CODE_64BIT
5870 && t
->base_opcode
== 0xa0
5871 && i
.types
[0].bitfield
.instance
== Accum
5872 && (i
.flags
[1] & Operand_Mem
))
5877 if (!(size_match
& MATCH_STRAIGHT
))
5879 /* Reverse direction of operands if swapping is possible in the first
5880 place (operands need to be symmetric) and
5881 - the load form is requested, and the template is a store form,
5882 - the store form is requested, and the template is a load form,
5883 - the non-default (swapped) form is requested. */
5884 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
5885 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
5886 && !operand_type_all_zero (&overlap1
))
5887 switch (i
.dir_encoding
)
5889 case dir_encoding_load
:
5890 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
5891 || t
->opcode_modifier
.regmem
)
5895 case dir_encoding_store
:
5896 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
5897 && !t
->opcode_modifier
.regmem
)
5901 case dir_encoding_swap
:
5904 case dir_encoding_default
:
5907 /* If we want store form, we skip the current load. */
5908 if ((i
.dir_encoding
== dir_encoding_store
5909 || i
.dir_encoding
== dir_encoding_swap
)
5910 && i
.mem_operands
== 0
5911 && t
->opcode_modifier
.load
)
5916 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
5917 if (!operand_type_match (overlap0
, i
.types
[0])
5918 || !operand_type_match (overlap1
, i
.types
[1])
5919 || ((check_register
& 3) == 3
5920 && !operand_type_register_match (i
.types
[0],
5925 /* Check if other direction is valid ... */
5926 if (!t
->opcode_modifier
.d
)
5930 if (!(size_match
& MATCH_REVERSE
))
5932 /* Try reversing direction of operands. */
5933 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
5934 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
5935 if (!operand_type_match (overlap0
, i
.types
[0])
5936 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
5938 && !operand_type_register_match (i
.types
[0],
5939 operand_types
[i
.operands
- 1],
5940 i
.types
[i
.operands
- 1],
5943 /* Does not match either direction. */
5946 /* found_reverse_match holds which of D or FloatR
5948 if (!t
->opcode_modifier
.d
)
5949 found_reverse_match
= 0;
5950 else if (operand_types
[0].bitfield
.tbyte
)
5951 found_reverse_match
= Opcode_FloatD
;
5952 else if (operand_types
[0].bitfield
.xmmword
5953 || operand_types
[i
.operands
- 1].bitfield
.xmmword
5954 || operand_types
[0].bitfield
.class == RegMMX
5955 || operand_types
[i
.operands
- 1].bitfield
.class == RegMMX
5956 || is_any_vex_encoding(t
))
5957 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
5958 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
5960 found_reverse_match
= Opcode_D
;
5961 if (t
->opcode_modifier
.floatr
)
5962 found_reverse_match
|= Opcode_FloatR
;
5966 /* Found a forward 2 operand match here. */
5967 switch (t
->operands
)
5970 overlap4
= operand_type_and (i
.types
[4],
5974 overlap3
= operand_type_and (i
.types
[3],
5978 overlap2
= operand_type_and (i
.types
[2],
5983 switch (t
->operands
)
5986 if (!operand_type_match (overlap4
, i
.types
[4])
5987 || !operand_type_register_match (i
.types
[3],
5994 if (!operand_type_match (overlap3
, i
.types
[3])
5995 || ((check_register
& 0xa) == 0xa
5996 && !operand_type_register_match (i
.types
[1],
6000 || ((check_register
& 0xc) == 0xc
6001 && !operand_type_register_match (i
.types
[2],
6008 /* Here we make use of the fact that there are no
6009 reverse match 3 operand instructions. */
6010 if (!operand_type_match (overlap2
, i
.types
[2])
6011 || ((check_register
& 5) == 5
6012 && !operand_type_register_match (i
.types
[0],
6016 || ((check_register
& 6) == 6
6017 && !operand_type_register_match (i
.types
[1],
6025 /* Found either forward/reverse 2, 3 or 4 operand match here:
6026 slip through to break. */
6028 if (!found_cpu_match
)
6031 /* Check if vector and VEX operands are valid. */
6032 if (check_VecOperands (t
) || VEX_check_operands (t
))
6034 specific_error
= i
.error
;
6038 /* We've found a match; break out of loop. */
6042 if (t
== current_templates
->end
)
6044 /* We found no match. */
6045 const char *err_msg
;
6046 switch (specific_error
? specific_error
: i
.error
)
6050 case operand_size_mismatch
:
6051 err_msg
= _("operand size mismatch");
6053 case operand_type_mismatch
:
6054 err_msg
= _("operand type mismatch");
6056 case register_type_mismatch
:
6057 err_msg
= _("register type mismatch");
6059 case number_of_operands_mismatch
:
6060 err_msg
= _("number of operands mismatch");
6062 case invalid_instruction_suffix
:
6063 err_msg
= _("invalid instruction suffix");
6066 err_msg
= _("constant doesn't fit in 4 bits");
6068 case unsupported_with_intel_mnemonic
:
6069 err_msg
= _("unsupported with Intel mnemonic");
6071 case unsupported_syntax
:
6072 err_msg
= _("unsupported syntax");
6075 as_bad (_("unsupported instruction `%s'"),
6076 current_templates
->start
->name
);
6078 case invalid_vsib_address
:
6079 err_msg
= _("invalid VSIB address");
6081 case invalid_vector_register_set
:
6082 err_msg
= _("mask, index, and destination registers must be distinct");
6084 case unsupported_vector_index_register
:
6085 err_msg
= _("unsupported vector index register");
6087 case unsupported_broadcast
:
6088 err_msg
= _("unsupported broadcast");
6090 case broadcast_needed
:
6091 err_msg
= _("broadcast is needed for operand of such type");
6093 case unsupported_masking
:
6094 err_msg
= _("unsupported masking");
6096 case mask_not_on_destination
:
6097 err_msg
= _("mask not on destination operand");
6099 case no_default_mask
:
6100 err_msg
= _("default mask isn't allowed");
6102 case unsupported_rc_sae
:
6103 err_msg
= _("unsupported static rounding/sae");
6105 case rc_sae_operand_not_last_imm
:
6107 err_msg
= _("RC/SAE operand must precede immediate operands");
6109 err_msg
= _("RC/SAE operand must follow immediate operands");
6111 case invalid_register_operand
:
6112 err_msg
= _("invalid register operand");
6115 as_bad (_("%s for `%s'"), err_msg
,
6116 current_templates
->start
->name
);
6120 if (!quiet_warnings
)
6123 && (i
.types
[0].bitfield
.jumpabsolute
6124 != operand_types
[0].bitfield
.jumpabsolute
))
6126 as_warn (_("indirect %s without `*'"), t
->name
);
6129 if (t
->opcode_modifier
.isprefix
6130 && t
->opcode_modifier
.ignoresize
)
6132 /* Warn them that a data or address size prefix doesn't
6133 affect assembly of the next line of code. */
6134 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6138 /* Copy the template we found. */
6141 if (addr_prefix_disp
!= -1)
6142 i
.tm
.operand_types
[addr_prefix_disp
]
6143 = operand_types
[addr_prefix_disp
];
6145 if (found_reverse_match
)
6147 /* If we found a reverse match we must alter the opcode direction
6148 bit and clear/flip the regmem modifier one. found_reverse_match
6149 holds bits to change (different for int & float insns). */
6151 i
.tm
.base_opcode
^= found_reverse_match
;
6153 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6154 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6156 /* Certain SIMD insns have their load forms specified in the opcode
6157 table, and hence we need to _set_ RegMem instead of clearing it.
6158 We need to avoid setting the bit though on insns like KMOVW. */
6159 i
.tm
.opcode_modifier
.regmem
6160 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6161 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
6162 && !i
.tm
.opcode_modifier
.regmem
;
6171 unsigned int mem_op
= i
.flags
[0] & Operand_Mem
? 0 : 1;
6173 if (i
.tm
.operand_types
[mem_op
].bitfield
.esseg
)
6175 if (i
.seg
[0] != NULL
&& i
.seg
[0] != &es
)
6177 as_bad (_("`%s' operand %d must use `%ses' segment"),
6179 intel_syntax
? i
.tm
.operands
- mem_op
: mem_op
+ 1,
6183 /* There's only ever one segment override allowed per instruction.
6184 This instruction possibly has a legal segment override on the
6185 second operand, so copy the segment to where non-string
6186 instructions store it, allowing common code. */
6187 i
.seg
[0] = i
.seg
[1];
6189 else if (i
.tm
.operand_types
[mem_op
+ 1].bitfield
.esseg
)
6191 if (i
.seg
[1] != NULL
&& i
.seg
[1] != &es
)
6193 as_bad (_("`%s' operand %d must use `%ses' segment"),
6195 intel_syntax
? i
.tm
.operands
- mem_op
- 1 : mem_op
+ 2,
6204 process_suffix (void)
6206 /* If matched instruction specifies an explicit instruction mnemonic
6208 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
6209 i
.suffix
= WORD_MNEM_SUFFIX
;
6210 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
6211 i
.suffix
= LONG_MNEM_SUFFIX
;
6212 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
6213 i
.suffix
= QWORD_MNEM_SUFFIX
;
6214 else if (i
.reg_operands
)
6216 /* If there's no instruction mnemonic suffix we try to invent one
6217 based on register operands. */
6220 /* We take i.suffix from the last register operand specified,
6221 Destination register type is more significant than source
6222 register type. crc32 in SSE4.2 prefers source register
6224 if (i
.tm
.base_opcode
== 0xf20f38f0
6225 && i
.types
[0].bitfield
.class == Reg
)
6227 if (i
.types
[0].bitfield
.byte
)
6228 i
.suffix
= BYTE_MNEM_SUFFIX
;
6229 else if (i
.types
[0].bitfield
.word
)
6230 i
.suffix
= WORD_MNEM_SUFFIX
;
6231 else if (i
.types
[0].bitfield
.dword
)
6232 i
.suffix
= LONG_MNEM_SUFFIX
;
6233 else if (i
.types
[0].bitfield
.qword
)
6234 i
.suffix
= QWORD_MNEM_SUFFIX
;
6241 if (i
.tm
.base_opcode
== 0xf20f38f0)
6243 /* We have to know the operand size for crc32. */
6244 as_bad (_("ambiguous memory operand size for `%s`"),
6249 for (op
= i
.operands
; --op
>= 0;)
6250 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
6251 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6253 if (i
.types
[op
].bitfield
.class != Reg
)
6255 if (i
.types
[op
].bitfield
.byte
)
6256 i
.suffix
= BYTE_MNEM_SUFFIX
;
6257 else if (i
.types
[op
].bitfield
.word
)
6258 i
.suffix
= WORD_MNEM_SUFFIX
;
6259 else if (i
.types
[op
].bitfield
.dword
)
6260 i
.suffix
= LONG_MNEM_SUFFIX
;
6261 else if (i
.types
[op
].bitfield
.qword
)
6262 i
.suffix
= QWORD_MNEM_SUFFIX
;
6269 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6272 && i
.tm
.opcode_modifier
.ignoresize
6273 && i
.tm
.opcode_modifier
.no_bsuf
)
6275 else if (!check_byte_reg ())
6278 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
6281 && i
.tm
.opcode_modifier
.ignoresize
6282 && i
.tm
.opcode_modifier
.no_lsuf
6283 && !i
.tm
.opcode_modifier
.todword
6284 && !i
.tm
.opcode_modifier
.toqword
)
6286 else if (!check_long_reg ())
6289 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6292 && i
.tm
.opcode_modifier
.ignoresize
6293 && i
.tm
.opcode_modifier
.no_qsuf
6294 && !i
.tm
.opcode_modifier
.todword
6295 && !i
.tm
.opcode_modifier
.toqword
)
6297 else if (!check_qword_reg ())
6300 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6303 && i
.tm
.opcode_modifier
.ignoresize
6304 && i
.tm
.opcode_modifier
.no_wsuf
)
6306 else if (!check_word_reg ())
6309 else if (intel_syntax
&& i
.tm
.opcode_modifier
.ignoresize
)
6310 /* Do nothing if the instruction is going to ignore the prefix. */
6315 else if (i
.tm
.opcode_modifier
.defaultsize
6317 /* exclude fldenv/frstor/fsave/fstenv */
6318 && i
.tm
.opcode_modifier
.no_ssuf
)
6320 if (stackop_size
== LONG_MNEM_SUFFIX
6321 && i
.tm
.base_opcode
== 0xcf)
6323 /* stackop_size is set to LONG_MNEM_SUFFIX for the
6324 .code16gcc directive to support 16-bit mode with
6325 32-bit address. For IRET without a suffix, generate
6326 16-bit IRET (opcode 0xcf) to return from an interrupt
6328 i
.suffix
= WORD_MNEM_SUFFIX
;
6329 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
6332 i
.suffix
= stackop_size
;
6334 else if (intel_syntax
6336 && (i
.tm
.operand_types
[0].bitfield
.jumpabsolute
6337 || i
.tm
.opcode_modifier
.jumpbyte
6338 || i
.tm
.opcode_modifier
.jumpintersegment
6339 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
6340 && i
.tm
.extension_opcode
<= 3)))
6345 if (!i
.tm
.opcode_modifier
.no_qsuf
)
6347 i
.suffix
= QWORD_MNEM_SUFFIX
;
6352 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6353 i
.suffix
= LONG_MNEM_SUFFIX
;
6356 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6357 i
.suffix
= WORD_MNEM_SUFFIX
;
6366 if (i
.tm
.opcode_modifier
.w
)
6368 as_bad (_("no instruction mnemonic suffix given and "
6369 "no register operands; can't size instruction"));
6375 unsigned int suffixes
;
6377 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
6378 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6380 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6382 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
6384 if (!i
.tm
.opcode_modifier
.no_ssuf
)
6386 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
6389 /* There are more than suffix matches. */
6390 if (i
.tm
.opcode_modifier
.w
6391 || ((suffixes
& (suffixes
- 1))
6392 && !i
.tm
.opcode_modifier
.defaultsize
6393 && !i
.tm
.opcode_modifier
.ignoresize
))
6395 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
6401 /* Change the opcode based on the operand size given by i.suffix. */
6404 /* Size floating point instruction. */
6405 case LONG_MNEM_SUFFIX
:
6406 if (i
.tm
.opcode_modifier
.floatmf
)
6408 i
.tm
.base_opcode
^= 4;
6412 case WORD_MNEM_SUFFIX
:
6413 case QWORD_MNEM_SUFFIX
:
6414 /* It's not a byte, select word/dword operation. */
6415 if (i
.tm
.opcode_modifier
.w
)
6417 if (i
.tm
.opcode_modifier
.shortform
)
6418 i
.tm
.base_opcode
|= 8;
6420 i
.tm
.base_opcode
|= 1;
6423 case SHORT_MNEM_SUFFIX
:
6424 /* Now select between word & dword operations via the operand
6425 size prefix, except for instructions that will ignore this
6427 if (i
.reg_operands
> 0
6428 && i
.types
[0].bitfield
.class == Reg
6429 && i
.tm
.opcode_modifier
.addrprefixopreg
6430 && (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
6431 || i
.operands
== 1))
6433 /* The address size override prefix changes the size of the
6435 if ((flag_code
== CODE_32BIT
6436 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
6437 || (flag_code
!= CODE_32BIT
6438 && i
.op
[0].regs
->reg_type
.bitfield
.dword
))
6439 if (!add_prefix (ADDR_PREFIX_OPCODE
))
6442 else if (i
.suffix
!= QWORD_MNEM_SUFFIX
6443 && !i
.tm
.opcode_modifier
.ignoresize
6444 && !i
.tm
.opcode_modifier
.floatmf
6445 && !is_any_vex_encoding (&i
.tm
)
6446 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
6447 || (flag_code
== CODE_64BIT
6448 && i
.tm
.opcode_modifier
.jumpbyte
)))
6450 unsigned int prefix
= DATA_PREFIX_OPCODE
;
6452 if (i
.tm
.opcode_modifier
.jumpbyte
) /* jcxz, loop */
6453 prefix
= ADDR_PREFIX_OPCODE
;
6455 if (!add_prefix (prefix
))
6459 /* Set mode64 for an operand. */
6460 if (i
.suffix
== QWORD_MNEM_SUFFIX
6461 && flag_code
== CODE_64BIT
6462 && !i
.tm
.opcode_modifier
.norex64
6463 /* Special case for xchg %rax,%rax. It is NOP and doesn't
6465 && ! (i
.operands
== 2
6466 && i
.tm
.base_opcode
== 0x90
6467 && i
.tm
.extension_opcode
== None
6468 && i
.types
[0].bitfield
.instance
== Accum
6469 && i
.types
[0].bitfield
.qword
6470 && i
.types
[1].bitfield
.instance
== Accum
6471 && i
.types
[1].bitfield
.qword
))
6477 if (i
.reg_operands
!= 0
6479 && i
.tm
.opcode_modifier
.addrprefixopreg
6480 && i
.tm
.operand_types
[0].bitfield
.instance
!= Accum
)
6482 /* Check invalid register operand when the address size override
6483 prefix changes the size of register operands. */
6485 enum { need_word
, need_dword
, need_qword
} need
;
6487 if (flag_code
== CODE_32BIT
)
6488 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
6491 if (i
.prefix
[ADDR_PREFIX
])
6494 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
6497 for (op
= 0; op
< i
.operands
; op
++)
6498 if (i
.types
[op
].bitfield
.class == Reg
6499 && ((need
== need_word
6500 && !i
.op
[op
].regs
->reg_type
.bitfield
.word
)
6501 || (need
== need_dword
6502 && !i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
6503 || (need
== need_qword
6504 && !i
.op
[op
].regs
->reg_type
.bitfield
.qword
)))
6506 as_bad (_("invalid register operand size for `%s'"),
6516 check_byte_reg (void)
6520 for (op
= i
.operands
; --op
>= 0;)
6522 /* Skip non-register operands. */
6523 if (i
.types
[op
].bitfield
.class != Reg
)
6526 /* If this is an eight bit register, it's OK. If it's the 16 or
6527 32 bit version of an eight bit register, we will just use the
6528 low portion, and that's OK too. */
6529 if (i
.types
[op
].bitfield
.byte
)
6532 /* I/O port address operands are OK too. */
6533 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
6534 && i
.tm
.operand_types
[op
].bitfield
.word
)
6537 /* crc32 doesn't generate this warning. */
6538 if (i
.tm
.base_opcode
== 0xf20f38f0)
6541 if ((i
.types
[op
].bitfield
.word
6542 || i
.types
[op
].bitfield
.dword
6543 || i
.types
[op
].bitfield
.qword
)
6544 && i
.op
[op
].regs
->reg_num
< 4
6545 /* Prohibit these changes in 64bit mode, since the lowering
6546 would be more complicated. */
6547 && flag_code
!= CODE_64BIT
)
6549 #if REGISTER_WARNINGS
6550 if (!quiet_warnings
)
6551 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6553 (i
.op
[op
].regs
+ (i
.types
[op
].bitfield
.word
6554 ? REGNAM_AL
- REGNAM_AX
6555 : REGNAM_AL
- REGNAM_EAX
))->reg_name
,
6557 i
.op
[op
].regs
->reg_name
,
6562 /* Any other register is bad. */
6563 if (i
.types
[op
].bitfield
.class == Reg
6564 || i
.types
[op
].bitfield
.class == RegMMX
6565 || i
.types
[op
].bitfield
.class == RegSIMD
6566 || i
.types
[op
].bitfield
.class == SReg
6567 || i
.types
[op
].bitfield
.class == RegCR
6568 || i
.types
[op
].bitfield
.class == RegDR
6569 || i
.types
[op
].bitfield
.class == RegTR
)
6571 as_bad (_("`%s%s' not allowed with `%s%c'"),
6573 i
.op
[op
].regs
->reg_name
,
6583 check_long_reg (void)
6587 for (op
= i
.operands
; --op
>= 0;)
6588 /* Skip non-register operands. */
6589 if (i
.types
[op
].bitfield
.class != Reg
)
6591 /* Reject eight bit registers, except where the template requires
6592 them. (eg. movzb) */
6593 else if (i
.types
[op
].bitfield
.byte
6594 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6595 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6596 && (i
.tm
.operand_types
[op
].bitfield
.word
6597 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6599 as_bad (_("`%s%s' not allowed with `%s%c'"),
6601 i
.op
[op
].regs
->reg_name
,
6606 /* Warn if the e prefix on a general reg is missing. */
6607 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
6608 && i
.types
[op
].bitfield
.word
6609 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6610 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6611 && i
.tm
.operand_types
[op
].bitfield
.dword
)
6613 /* Prohibit these changes in the 64bit mode, since the
6614 lowering is more complicated. */
6615 if (flag_code
== CODE_64BIT
)
6617 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6618 register_prefix
, i
.op
[op
].regs
->reg_name
,
6622 #if REGISTER_WARNINGS
6623 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6625 (i
.op
[op
].regs
+ REGNAM_EAX
- REGNAM_AX
)->reg_name
,
6626 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
6629 /* Warn if the r prefix on a general reg is present. */
6630 else if (i
.types
[op
].bitfield
.qword
6631 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6632 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6633 && i
.tm
.operand_types
[op
].bitfield
.dword
)
6636 && i
.tm
.opcode_modifier
.toqword
6637 && i
.types
[0].bitfield
.class != RegSIMD
)
6639 /* Convert to QWORD. We want REX byte. */
6640 i
.suffix
= QWORD_MNEM_SUFFIX
;
6644 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6645 register_prefix
, i
.op
[op
].regs
->reg_name
,
6654 check_qword_reg (void)
6658 for (op
= i
.operands
; --op
>= 0; )
6659 /* Skip non-register operands. */
6660 if (i
.types
[op
].bitfield
.class != Reg
)
6662 /* Reject eight bit registers, except where the template requires
6663 them. (eg. movzb) */
6664 else if (i
.types
[op
].bitfield
.byte
6665 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6666 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6667 && (i
.tm
.operand_types
[op
].bitfield
.word
6668 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6670 as_bad (_("`%s%s' not allowed with `%s%c'"),
6672 i
.op
[op
].regs
->reg_name
,
6677 /* Warn if the r prefix on a general reg is missing. */
6678 else if ((i
.types
[op
].bitfield
.word
6679 || i
.types
[op
].bitfield
.dword
)
6680 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6681 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6682 && i
.tm
.operand_types
[op
].bitfield
.qword
)
6684 /* Prohibit these changes in the 64bit mode, since the
6685 lowering is more complicated. */
6687 && i
.tm
.opcode_modifier
.todword
6688 && i
.types
[0].bitfield
.class != RegSIMD
)
6690 /* Convert to DWORD. We don't want REX byte. */
6691 i
.suffix
= LONG_MNEM_SUFFIX
;
6695 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6696 register_prefix
, i
.op
[op
].regs
->reg_name
,
6705 check_word_reg (void)
6708 for (op
= i
.operands
; --op
>= 0;)
6709 /* Skip non-register operands. */
6710 if (i
.types
[op
].bitfield
.class != Reg
)
6712 /* Reject eight bit registers, except where the template requires
6713 them. (eg. movzb) */
6714 else if (i
.types
[op
].bitfield
.byte
6715 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6716 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6717 && (i
.tm
.operand_types
[op
].bitfield
.word
6718 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6720 as_bad (_("`%s%s' not allowed with `%s%c'"),
6722 i
.op
[op
].regs
->reg_name
,
6727 /* Warn if the e or r prefix on a general reg is present. */
6728 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
6729 && (i
.types
[op
].bitfield
.dword
6730 || i
.types
[op
].bitfield
.qword
)
6731 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6732 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6733 && i
.tm
.operand_types
[op
].bitfield
.word
)
6735 /* Prohibit these changes in the 64bit mode, since the
6736 lowering is more complicated. */
6737 if (flag_code
== CODE_64BIT
)
6739 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6740 register_prefix
, i
.op
[op
].regs
->reg_name
,
6744 #if REGISTER_WARNINGS
6745 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6747 (i
.op
[op
].regs
+ REGNAM_AX
- REGNAM_EAX
)->reg_name
,
6748 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
6755 update_imm (unsigned int j
)
6757 i386_operand_type overlap
= i
.types
[j
];
6758 if ((overlap
.bitfield
.imm8
6759 || overlap
.bitfield
.imm8s
6760 || overlap
.bitfield
.imm16
6761 || overlap
.bitfield
.imm32
6762 || overlap
.bitfield
.imm32s
6763 || overlap
.bitfield
.imm64
)
6764 && !operand_type_equal (&overlap
, &imm8
)
6765 && !operand_type_equal (&overlap
, &imm8s
)
6766 && !operand_type_equal (&overlap
, &imm16
)
6767 && !operand_type_equal (&overlap
, &imm32
)
6768 && !operand_type_equal (&overlap
, &imm32s
)
6769 && !operand_type_equal (&overlap
, &imm64
))
6773 i386_operand_type temp
;
6775 operand_type_set (&temp
, 0);
6776 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6778 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
6779 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
6781 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6782 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
6783 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6785 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
6786 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
6789 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
6792 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
6793 || operand_type_equal (&overlap
, &imm16_32
)
6794 || operand_type_equal (&overlap
, &imm16_32s
))
6796 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
6801 if (!operand_type_equal (&overlap
, &imm8
)
6802 && !operand_type_equal (&overlap
, &imm8s
)
6803 && !operand_type_equal (&overlap
, &imm16
)
6804 && !operand_type_equal (&overlap
, &imm32
)
6805 && !operand_type_equal (&overlap
, &imm32s
)
6806 && !operand_type_equal (&overlap
, &imm64
))
6808 as_bad (_("no instruction mnemonic suffix given; "
6809 "can't determine immediate size"));
6813 i
.types
[j
] = overlap
;
6823 /* Update the first 2 immediate operands. */
6824 n
= i
.operands
> 2 ? 2 : i
.operands
;
6827 for (j
= 0; j
< n
; j
++)
6828 if (update_imm (j
) == 0)
6831 /* The 3rd operand can't be immediate operand. */
6832 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
6839 process_operands (void)
6841 /* Default segment register this instruction will use for memory
6842 accesses. 0 means unknown. This is only for optimizing out
6843 unnecessary segment overrides. */
6844 const seg_entry
*default_seg
= 0;
6846 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
6848 unsigned int dupl
= i
.operands
;
6849 unsigned int dest
= dupl
- 1;
6852 /* The destination must be an xmm register. */
6853 gas_assert (i
.reg_operands
6854 && MAX_OPERANDS
> dupl
6855 && operand_type_equal (&i
.types
[dest
], ®xmm
));
6857 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
6858 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
6860 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
6862 /* Keep xmm0 for instructions with VEX prefix and 3
6864 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
6865 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
6870 /* We remove the first xmm0 and keep the number of
6871 operands unchanged, which in fact duplicates the
6873 for (j
= 1; j
< i
.operands
; j
++)
6875 i
.op
[j
- 1] = i
.op
[j
];
6876 i
.types
[j
- 1] = i
.types
[j
];
6877 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
6878 i
.flags
[j
- 1] = i
.flags
[j
];
6882 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
6884 gas_assert ((MAX_OPERANDS
- 1) > dupl
6885 && (i
.tm
.opcode_modifier
.vexsources
6888 /* Add the implicit xmm0 for instructions with VEX prefix
6890 for (j
= i
.operands
; j
> 0; j
--)
6892 i
.op
[j
] = i
.op
[j
- 1];
6893 i
.types
[j
] = i
.types
[j
- 1];
6894 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
6895 i
.flags
[j
] = i
.flags
[j
- 1];
6898 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
6899 i
.types
[0] = regxmm
;
6900 i
.tm
.operand_types
[0] = regxmm
;
6903 i
.reg_operands
+= 2;
6908 i
.op
[dupl
] = i
.op
[dest
];
6909 i
.types
[dupl
] = i
.types
[dest
];
6910 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
6911 i
.flags
[dupl
] = i
.flags
[dest
];
6920 i
.op
[dupl
] = i
.op
[dest
];
6921 i
.types
[dupl
] = i
.types
[dest
];
6922 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
6923 i
.flags
[dupl
] = i
.flags
[dest
];
6926 if (i
.tm
.opcode_modifier
.immext
)
6929 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
6930 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
6934 for (j
= 1; j
< i
.operands
; j
++)
6936 i
.op
[j
- 1] = i
.op
[j
];
6937 i
.types
[j
- 1] = i
.types
[j
];
6939 /* We need to adjust fields in i.tm since they are used by
6940 build_modrm_byte. */
6941 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
6943 i
.flags
[j
- 1] = i
.flags
[j
];
6950 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
6952 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
6954 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
6955 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
6956 regnum
= register_number (i
.op
[1].regs
);
6957 first_reg_in_group
= regnum
& ~3;
6958 last_reg_in_group
= first_reg_in_group
+ 3;
6959 if (regnum
!= first_reg_in_group
)
6960 as_warn (_("source register `%s%s' implicitly denotes"
6961 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
6962 register_prefix
, i
.op
[1].regs
->reg_name
,
6963 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
6964 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
6967 else if (i
.tm
.opcode_modifier
.regkludge
)
6969 /* The imul $imm, %reg instruction is converted into
6970 imul $imm, %reg, %reg, and the clr %reg instruction
6971 is converted into xor %reg, %reg. */
6973 unsigned int first_reg_op
;
6975 if (operand_type_check (i
.types
[0], reg
))
6979 /* Pretend we saw the extra register operand. */
6980 gas_assert (i
.reg_operands
== 1
6981 && i
.op
[first_reg_op
+ 1].regs
== 0);
6982 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
6983 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
6988 if (i
.tm
.opcode_modifier
.modrm
)
6990 /* The opcode is completed (modulo i.tm.extension_opcode which
6991 must be put into the modrm byte). Now, we make the modrm and
6992 index base bytes based on all the info we've collected. */
6994 default_seg
= build_modrm_byte ();
6996 else if (i
.types
[0].bitfield
.class == SReg
)
6998 if (flag_code
!= CODE_64BIT
6999 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7000 && i
.op
[0].regs
->reg_num
== 1
7001 : (i
.tm
.base_opcode
| 1) == POP_SEG386_SHORT
7002 && i
.op
[0].regs
->reg_num
< 4)
7004 as_bad (_("you can't `%s %s%s'"),
7005 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7008 if ( i
.op
[0].regs
->reg_num
> 3 && i
.tm
.opcode_length
== 1 )
7010 i
.tm
.base_opcode
^= POP_SEG_SHORT
^ POP_SEG386_SHORT
;
7011 i
.tm
.opcode_length
= 2;
7013 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7015 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
7019 else if (i
.tm
.opcode_modifier
.isstring
)
7021 /* For the string instructions that allow a segment override
7022 on one of their operands, the default segment is ds. */
7025 else if (i
.tm
.opcode_modifier
.shortform
)
7027 /* The register or float register operand is in operand
7029 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
7031 /* Register goes in low 3 bits of opcode. */
7032 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
7033 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7035 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
7037 /* Warn about some common errors, but press on regardless.
7038 The first case can be generated by gcc (<= 2.8.1). */
7039 if (i
.operands
== 2)
7041 /* Reversed arguments on faddp, fsubp, etc. */
7042 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
7043 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
7044 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
7048 /* Extraneous `l' suffix on fp insn. */
7049 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
7050 register_prefix
, i
.op
[0].regs
->reg_name
);
7055 if (i
.tm
.base_opcode
== 0x8d /* lea */
7058 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
7060 /* If a segment was explicitly specified, and the specified segment
7061 is not the default, use an opcode prefix to select it. If we
7062 never figured out what the default segment is, then default_seg
7063 will be zero at this point, and the specified segment prefix will
7065 if ((i
.seg
[0]) && (i
.seg
[0] != default_seg
))
7067 if (!add_prefix (i
.seg
[0]->seg_prefix
))
7073 static const seg_entry
*
7074 build_modrm_byte (void)
7076 const seg_entry
*default_seg
= 0;
7077 unsigned int source
, dest
;
7080 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
7083 unsigned int nds
, reg_slot
;
7086 dest
= i
.operands
- 1;
7089 /* There are 2 kinds of instructions:
7090 1. 5 operands: 4 register operands or 3 register operands
7091 plus 1 memory operand plus one Imm4 operand, VexXDS, and
7092 VexW0 or VexW1. The destination must be either XMM, YMM or
7094 2. 4 operands: 4 register operands or 3 register operands
7095 plus 1 memory operand, with VexXDS. */
7096 gas_assert ((i
.reg_operands
== 4
7097 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
7098 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7099 && i
.tm
.opcode_modifier
.vexw
7100 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
7102 /* If VexW1 is set, the first non-immediate operand is the source and
7103 the second non-immediate one is encoded in the immediate operand. */
7104 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
7106 source
= i
.imm_operands
;
7107 reg_slot
= i
.imm_operands
+ 1;
7111 source
= i
.imm_operands
+ 1;
7112 reg_slot
= i
.imm_operands
;
7115 if (i
.imm_operands
== 0)
7117 /* When there is no immediate operand, generate an 8bit
7118 immediate operand to encode the first operand. */
7119 exp
= &im_expressions
[i
.imm_operands
++];
7120 i
.op
[i
.operands
].imms
= exp
;
7121 i
.types
[i
.operands
] = imm8
;
7124 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7125 exp
->X_op
= O_constant
;
7126 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
7127 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7131 gas_assert (i
.imm_operands
== 1);
7132 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
7133 gas_assert (!i
.tm
.opcode_modifier
.immext
);
7135 /* Turn on Imm8 again so that output_imm will generate it. */
7136 i
.types
[0].bitfield
.imm8
= 1;
7138 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7139 i
.op
[0].imms
->X_add_number
7140 |= register_number (i
.op
[reg_slot
].regs
) << 4;
7141 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7144 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
7145 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
7150 /* i.reg_operands MUST be the number of real register operands;
7151 implicit registers do not count. If there are 3 register
7152 operands, it must be a instruction with VexNDS. For a
7153 instruction with VexNDD, the destination register is encoded
7154 in VEX prefix. If there are 4 register operands, it must be
7155 a instruction with VEX prefix and 3 sources. */
7156 if (i
.mem_operands
== 0
7157 && ((i
.reg_operands
== 2
7158 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
7159 || (i
.reg_operands
== 3
7160 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7161 || (i
.reg_operands
== 4 && vex_3_sources
)))
7169 /* When there are 3 operands, one of them may be immediate,
7170 which may be the first or the last operand. Otherwise,
7171 the first operand must be shift count register (cl) or it
7172 is an instruction with VexNDS. */
7173 gas_assert (i
.imm_operands
== 1
7174 || (i
.imm_operands
== 0
7175 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7176 || (i
.types
[0].bitfield
.instance
== RegC
7177 && i
.types
[0].bitfield
.byte
))));
7178 if (operand_type_check (i
.types
[0], imm
)
7179 || (i
.types
[0].bitfield
.instance
== RegC
7180 && i
.types
[0].bitfield
.byte
))
7186 /* When there are 4 operands, the first two must be 8bit
7187 immediate operands. The source operand will be the 3rd
7190 For instructions with VexNDS, if the first operand
7191 an imm8, the source operand is the 2nd one. If the last
7192 operand is imm8, the source operand is the first one. */
7193 gas_assert ((i
.imm_operands
== 2
7194 && i
.types
[0].bitfield
.imm8
7195 && i
.types
[1].bitfield
.imm8
)
7196 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7197 && i
.imm_operands
== 1
7198 && (i
.types
[0].bitfield
.imm8
7199 || i
.types
[i
.operands
- 1].bitfield
.imm8
7201 if (i
.imm_operands
== 2)
7205 if (i
.types
[0].bitfield
.imm8
)
7212 if (is_evex_encoding (&i
.tm
))
7214 /* For EVEX instructions, when there are 5 operands, the
7215 first one must be immediate operand. If the second one
7216 is immediate operand, the source operand is the 3th
7217 one. If the last one is immediate operand, the source
7218 operand is the 2nd one. */
7219 gas_assert (i
.imm_operands
== 2
7220 && i
.tm
.opcode_modifier
.sae
7221 && operand_type_check (i
.types
[0], imm
));
7222 if (operand_type_check (i
.types
[1], imm
))
7224 else if (operand_type_check (i
.types
[4], imm
))
7238 /* RC/SAE operand could be between DEST and SRC. That happens
7239 when one operand is GPR and the other one is XMM/YMM/ZMM
7241 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
7244 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7246 /* For instructions with VexNDS, the register-only source
7247 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
7248 register. It is encoded in VEX prefix. */
7250 i386_operand_type op
;
7253 /* Check register-only source operand when two source
7254 operands are swapped. */
7255 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
7256 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
7264 op
= i
.tm
.operand_types
[vvvv
];
7265 if ((dest
+ 1) >= i
.operands
7266 || ((op
.bitfield
.class != Reg
7267 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
7268 && op
.bitfield
.class != RegSIMD
7269 && !operand_type_equal (&op
, ®mask
)))
7271 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
7277 /* One of the register operands will be encoded in the i.rm.reg
7278 field, the other in the combined i.rm.mode and i.rm.regmem
7279 fields. If no form of this instruction supports a memory
7280 destination operand, then we assume the source operand may
7281 sometimes be a memory operand and so we need to store the
7282 destination in the i.rm.reg field. */
7283 if (!i
.tm
.opcode_modifier
.regmem
7284 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
7286 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
7287 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
7288 if (i
.op
[dest
].regs
->reg_type
.bitfield
.class == RegMMX
7289 || i
.op
[source
].regs
->reg_type
.bitfield
.class == RegMMX
)
7290 i
.has_regmmx
= TRUE
;
7291 else if (i
.op
[dest
].regs
->reg_type
.bitfield
.class == RegSIMD
7292 || i
.op
[source
].regs
->reg_type
.bitfield
.class == RegSIMD
)
7294 if (i
.types
[dest
].bitfield
.zmmword
7295 || i
.types
[source
].bitfield
.zmmword
)
7296 i
.has_regzmm
= TRUE
;
7297 else if (i
.types
[dest
].bitfield
.ymmword
7298 || i
.types
[source
].bitfield
.ymmword
)
7299 i
.has_regymm
= TRUE
;
7301 i
.has_regxmm
= TRUE
;
7303 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7305 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7307 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7309 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7314 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
7315 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
7316 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7318 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7320 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7322 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7325 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
7327 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
7330 add_prefix (LOCK_PREFIX_OPCODE
);
7334 { /* If it's not 2 reg operands... */
7339 unsigned int fake_zero_displacement
= 0;
7342 for (op
= 0; op
< i
.operands
; op
++)
7343 if (i
.flags
[op
] & Operand_Mem
)
7345 gas_assert (op
< i
.operands
);
7347 if (i
.tm
.opcode_modifier
.vecsib
)
7349 if (i
.index_reg
->reg_num
== RegIZ
)
7352 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7355 i
.sib
.base
= NO_BASE_REGISTER
;
7356 i
.sib
.scale
= i
.log2_scale_factor
;
7357 i
.types
[op
].bitfield
.disp8
= 0;
7358 i
.types
[op
].bitfield
.disp16
= 0;
7359 i
.types
[op
].bitfield
.disp64
= 0;
7360 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7362 /* Must be 32 bit */
7363 i
.types
[op
].bitfield
.disp32
= 1;
7364 i
.types
[op
].bitfield
.disp32s
= 0;
7368 i
.types
[op
].bitfield
.disp32
= 0;
7369 i
.types
[op
].bitfield
.disp32s
= 1;
7372 i
.sib
.index
= i
.index_reg
->reg_num
;
7373 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7375 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
7381 if (i
.base_reg
== 0)
7384 if (!i
.disp_operands
)
7385 fake_zero_displacement
= 1;
7386 if (i
.index_reg
== 0)
7388 i386_operand_type newdisp
;
7390 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7391 /* Operand is just <disp> */
7392 if (flag_code
== CODE_64BIT
)
7394 /* 64bit mode overwrites the 32bit absolute
7395 addressing by RIP relative addressing and
7396 absolute addressing is encoded by one of the
7397 redundant SIB forms. */
7398 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7399 i
.sib
.base
= NO_BASE_REGISTER
;
7400 i
.sib
.index
= NO_INDEX_REGISTER
;
7401 newdisp
= (!i
.prefix
[ADDR_PREFIX
] ? disp32s
: disp32
);
7403 else if ((flag_code
== CODE_16BIT
)
7404 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
7406 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
7411 i
.rm
.regmem
= NO_BASE_REGISTER
;
7414 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
7415 i
.types
[op
] = operand_type_or (i
.types
[op
], newdisp
);
7417 else if (!i
.tm
.opcode_modifier
.vecsib
)
7419 /* !i.base_reg && i.index_reg */
7420 if (i
.index_reg
->reg_num
== RegIZ
)
7421 i
.sib
.index
= NO_INDEX_REGISTER
;
7423 i
.sib
.index
= i
.index_reg
->reg_num
;
7424 i
.sib
.base
= NO_BASE_REGISTER
;
7425 i
.sib
.scale
= i
.log2_scale_factor
;
7426 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7427 i
.types
[op
].bitfield
.disp8
= 0;
7428 i
.types
[op
].bitfield
.disp16
= 0;
7429 i
.types
[op
].bitfield
.disp64
= 0;
7430 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7432 /* Must be 32 bit */
7433 i
.types
[op
].bitfield
.disp32
= 1;
7434 i
.types
[op
].bitfield
.disp32s
= 0;
7438 i
.types
[op
].bitfield
.disp32
= 0;
7439 i
.types
[op
].bitfield
.disp32s
= 1;
7441 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7445 /* RIP addressing for 64bit mode. */
7446 else if (i
.base_reg
->reg_num
== RegIP
)
7448 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7449 i
.rm
.regmem
= NO_BASE_REGISTER
;
7450 i
.types
[op
].bitfield
.disp8
= 0;
7451 i
.types
[op
].bitfield
.disp16
= 0;
7452 i
.types
[op
].bitfield
.disp32
= 0;
7453 i
.types
[op
].bitfield
.disp32s
= 1;
7454 i
.types
[op
].bitfield
.disp64
= 0;
7455 i
.flags
[op
] |= Operand_PCrel
;
7456 if (! i
.disp_operands
)
7457 fake_zero_displacement
= 1;
7459 else if (i
.base_reg
->reg_type
.bitfield
.word
)
7461 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7462 switch (i
.base_reg
->reg_num
)
7465 if (i
.index_reg
== 0)
7467 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
7468 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
7472 if (i
.index_reg
== 0)
7475 if (operand_type_check (i
.types
[op
], disp
) == 0)
7477 /* fake (%bp) into 0(%bp) */
7478 i
.types
[op
].bitfield
.disp8
= 1;
7479 fake_zero_displacement
= 1;
7482 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
7483 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
7485 default: /* (%si) -> 4 or (%di) -> 5 */
7486 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
7488 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
7490 else /* i.base_reg and 32/64 bit mode */
7492 if (flag_code
== CODE_64BIT
7493 && operand_type_check (i
.types
[op
], disp
))
7495 i
.types
[op
].bitfield
.disp16
= 0;
7496 i
.types
[op
].bitfield
.disp64
= 0;
7497 if (i
.prefix
[ADDR_PREFIX
] == 0)
7499 i
.types
[op
].bitfield
.disp32
= 0;
7500 i
.types
[op
].bitfield
.disp32s
= 1;
7504 i
.types
[op
].bitfield
.disp32
= 1;
7505 i
.types
[op
].bitfield
.disp32s
= 0;
7509 if (!i
.tm
.opcode_modifier
.vecsib
)
7510 i
.rm
.regmem
= i
.base_reg
->reg_num
;
7511 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
7513 i
.sib
.base
= i
.base_reg
->reg_num
;
7514 /* x86-64 ignores REX prefix bit here to avoid decoder
7516 if (!(i
.base_reg
->reg_flags
& RegRex
)
7517 && (i
.base_reg
->reg_num
== EBP_REG_NUM
7518 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
7520 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
7522 fake_zero_displacement
= 1;
7523 i
.types
[op
].bitfield
.disp8
= 1;
7525 i
.sib
.scale
= i
.log2_scale_factor
;
7526 if (i
.index_reg
== 0)
7528 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7529 /* <disp>(%esp) becomes two byte modrm with no index
7530 register. We've already stored the code for esp
7531 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
7532 Any base register besides %esp will not use the
7533 extra modrm byte. */
7534 i
.sib
.index
= NO_INDEX_REGISTER
;
7536 else if (!i
.tm
.opcode_modifier
.vecsib
)
7538 if (i
.index_reg
->reg_num
== RegIZ
)
7539 i
.sib
.index
= NO_INDEX_REGISTER
;
7541 i
.sib
.index
= i
.index_reg
->reg_num
;
7542 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7543 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7548 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
7549 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
7553 if (!fake_zero_displacement
7557 fake_zero_displacement
= 1;
7558 if (i
.disp_encoding
== disp_encoding_8bit
)
7559 i
.types
[op
].bitfield
.disp8
= 1;
7561 i
.types
[op
].bitfield
.disp32
= 1;
7563 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
7567 if (fake_zero_displacement
)
7569 /* Fakes a zero displacement assuming that i.types[op]
7570 holds the correct displacement size. */
7573 gas_assert (i
.op
[op
].disps
== 0);
7574 exp
= &disp_expressions
[i
.disp_operands
++];
7575 i
.op
[op
].disps
= exp
;
7576 exp
->X_op
= O_constant
;
7577 exp
->X_add_number
= 0;
7578 exp
->X_add_symbol
= (symbolS
*) 0;
7579 exp
->X_op_symbol
= (symbolS
*) 0;
7587 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
7589 if (operand_type_check (i
.types
[0], imm
))
7590 i
.vex
.register_specifier
= NULL
;
7593 /* VEX.vvvv encodes one of the sources when the first
7594 operand is not an immediate. */
7595 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
7596 i
.vex
.register_specifier
= i
.op
[0].regs
;
7598 i
.vex
.register_specifier
= i
.op
[1].regs
;
7601 /* Destination is a XMM register encoded in the ModRM.reg
7603 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
7604 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
7607 /* ModRM.rm and VEX.B encodes the other source. */
7608 if (!i
.mem_operands
)
7612 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
7613 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
7615 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
7617 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
7621 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
7623 i
.vex
.register_specifier
= i
.op
[2].regs
;
7624 if (!i
.mem_operands
)
7627 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
7628 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
7632 /* Fill in i.rm.reg or i.rm.regmem field with register operand
7633 (if any) based on i.tm.extension_opcode. Again, we must be
7634 careful to make sure that segment/control/debug/test/MMX
7635 registers are coded into the i.rm.reg field. */
7636 else if (i
.reg_operands
)
7639 unsigned int vex_reg
= ~0;
7641 for (op
= 0; op
< i
.operands
; op
++)
7643 if (i
.types
[op
].bitfield
.class == Reg
7644 || i
.types
[op
].bitfield
.class == RegBND
7645 || i
.types
[op
].bitfield
.class == RegMask
7646 || i
.types
[op
].bitfield
.class == SReg
7647 || i
.types
[op
].bitfield
.class == RegCR
7648 || i
.types
[op
].bitfield
.class == RegDR
7649 || i
.types
[op
].bitfield
.class == RegTR
)
7651 if (i
.types
[op
].bitfield
.class == RegSIMD
)
7653 if (i
.types
[op
].bitfield
.zmmword
)
7654 i
.has_regzmm
= TRUE
;
7655 else if (i
.types
[op
].bitfield
.ymmword
)
7656 i
.has_regymm
= TRUE
;
7658 i
.has_regxmm
= TRUE
;
7661 if (i
.types
[op
].bitfield
.class == RegMMX
)
7663 i
.has_regmmx
= TRUE
;
7670 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7672 /* For instructions with VexNDS, the register-only
7673 source operand is encoded in VEX prefix. */
7674 gas_assert (mem
!= (unsigned int) ~0);
7679 gas_assert (op
< i
.operands
);
7683 /* Check register-only source operand when two source
7684 operands are swapped. */
7685 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
7686 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
7690 gas_assert (mem
== (vex_reg
+ 1)
7691 && op
< i
.operands
);
7696 gas_assert (vex_reg
< i
.operands
);
7700 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
7702 /* For instructions with VexNDD, the register destination
7703 is encoded in VEX prefix. */
7704 if (i
.mem_operands
== 0)
7706 /* There is no memory operand. */
7707 gas_assert ((op
+ 2) == i
.operands
);
7712 /* There are only 2 non-immediate operands. */
7713 gas_assert (op
< i
.imm_operands
+ 2
7714 && i
.operands
== i
.imm_operands
+ 2);
7715 vex_reg
= i
.imm_operands
+ 1;
7719 gas_assert (op
< i
.operands
);
7721 if (vex_reg
!= (unsigned int) ~0)
7723 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
7725 if ((type
->bitfield
.class != Reg
7726 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
7727 && type
->bitfield
.class != RegSIMD
7728 && !operand_type_equal (type
, ®mask
))
7731 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
7734 /* Don't set OP operand twice. */
7737 /* If there is an extension opcode to put here, the
7738 register number must be put into the regmem field. */
7739 if (i
.tm
.extension_opcode
!= None
)
7741 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
7742 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7744 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
7749 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
7750 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7752 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
7757 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
7758 must set it to 3 to indicate this is a register operand
7759 in the regmem field. */
7760 if (!i
.mem_operands
)
7764 /* Fill in i.rm.reg field with extension opcode (if any). */
7765 if (i
.tm
.extension_opcode
!= None
)
7766 i
.rm
.reg
= i
.tm
.extension_opcode
;
7772 output_branch (void)
7778 relax_substateT subtype
;
7782 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
7783 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
7786 if (i
.prefix
[DATA_PREFIX
] != 0)
7792 /* Pentium4 branch hints. */
7793 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
7794 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
7799 if (i
.prefix
[REX_PREFIX
] != 0)
7805 /* BND prefixed jump. */
7806 if (i
.prefix
[BND_PREFIX
] != 0)
7808 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
7812 if (i
.prefixes
!= 0 && !intel_syntax
)
7813 as_warn (_("skipping prefixes on this instruction"));
7815 /* It's always a symbol; End frag & setup for relax.
7816 Make sure there is enough room in this frag for the largest
7817 instruction we may generate in md_convert_frag. This is 2
7818 bytes for the opcode and room for the prefix and largest
7820 frag_grow (prefix
+ 2 + 4);
7821 /* Prefix and 1 opcode byte go in fr_fix. */
7822 p
= frag_more (prefix
+ 1);
7823 if (i
.prefix
[DATA_PREFIX
] != 0)
7824 *p
++ = DATA_PREFIX_OPCODE
;
7825 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
7826 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
7827 *p
++ = i
.prefix
[SEG_PREFIX
];
7828 if (i
.prefix
[REX_PREFIX
] != 0)
7829 *p
++ = i
.prefix
[REX_PREFIX
];
7830 *p
= i
.tm
.base_opcode
;
7832 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
7833 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
7834 else if (cpu_arch_flags
.bitfield
.cpui386
)
7835 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
7837 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
7840 sym
= i
.op
[0].disps
->X_add_symbol
;
7841 off
= i
.op
[0].disps
->X_add_number
;
7843 if (i
.op
[0].disps
->X_op
!= O_constant
7844 && i
.op
[0].disps
->X_op
!= O_symbol
)
7846 /* Handle complex expressions. */
7847 sym
= make_expr_symbol (i
.op
[0].disps
);
7851 /* 1 possible extra opcode + 4 byte displacement go in var part.
7852 Pass reloc in fr_var. */
7853 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
7856 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7857 /* Return TRUE iff PLT32 relocation should be used for branching to
7861 need_plt32_p (symbolS
*s
)
7863 /* PLT32 relocation is ELF only. */
7868 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
7869 krtld support it. */
7873 /* Since there is no need to prepare for PLT branch on x86-64, we
7874 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
7875 be used as a marker for 32-bit PC-relative branches. */
7879 /* Weak or undefined symbol need PLT32 relocation. */
7880 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
7883 /* Non-global symbol doesn't need PLT32 relocation. */
7884 if (! S_IS_EXTERNAL (s
))
7887 /* Other global symbols need PLT32 relocation. NB: Symbol with
7888 non-default visibilities are treated as normal global symbol
7889 so that PLT32 relocation can be used as a marker for 32-bit
7890 PC-relative branches. It is useful for linker relaxation. */
7901 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
7903 if (i
.tm
.opcode_modifier
.jumpbyte
)
7905 /* This is a loop or jecxz type instruction. */
7907 if (i
.prefix
[ADDR_PREFIX
] != 0)
7909 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
7912 /* Pentium4 branch hints. */
7913 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
7914 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
7916 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
7925 if (flag_code
== CODE_16BIT
)
7928 if (i
.prefix
[DATA_PREFIX
] != 0)
7930 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
7940 if (i
.prefix
[REX_PREFIX
] != 0)
7942 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
7946 /* BND prefixed jump. */
7947 if (i
.prefix
[BND_PREFIX
] != 0)
7949 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
7953 if (i
.prefixes
!= 0 && !intel_syntax
)
7954 as_warn (_("skipping prefixes on this instruction"));
7956 p
= frag_more (i
.tm
.opcode_length
+ size
);
7957 switch (i
.tm
.opcode_length
)
7960 *p
++ = i
.tm
.base_opcode
>> 8;
7963 *p
++ = i
.tm
.base_opcode
;
7969 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7971 && jump_reloc
== NO_RELOC
7972 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
7973 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
7976 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
7978 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7979 i
.op
[0].disps
, 1, jump_reloc
);
7981 /* All jumps handled here are signed, but don't use a signed limit
7982 check for 32 and 16 bit jumps as we want to allow wrap around at
7983 4G and 64k respectively. */
7985 fixP
->fx_signed
= 1;
7989 output_interseg_jump (void)
7997 if (flag_code
== CODE_16BIT
)
8001 if (i
.prefix
[DATA_PREFIX
] != 0)
8007 if (i
.prefix
[REX_PREFIX
] != 0)
8017 if (i
.prefixes
!= 0 && !intel_syntax
)
8018 as_warn (_("skipping prefixes on this instruction"));
8020 /* 1 opcode; 2 segment; offset */
8021 p
= frag_more (prefix
+ 1 + 2 + size
);
8023 if (i
.prefix
[DATA_PREFIX
] != 0)
8024 *p
++ = DATA_PREFIX_OPCODE
;
8026 if (i
.prefix
[REX_PREFIX
] != 0)
8027 *p
++ = i
.prefix
[REX_PREFIX
];
8029 *p
++ = i
.tm
.base_opcode
;
8030 if (i
.op
[1].imms
->X_op
== O_constant
)
8032 offsetT n
= i
.op
[1].imms
->X_add_number
;
8035 && !fits_in_unsigned_word (n
)
8036 && !fits_in_signed_word (n
))
8038 as_bad (_("16-bit jump out of range"));
8041 md_number_to_chars (p
, n
, size
);
8044 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8045 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
8046 if (i
.op
[0].imms
->X_op
!= O_constant
)
8047 as_bad (_("can't handle non absolute segment in `%s'"),
8049 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
8052 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8057 asection
*seg
= now_seg
;
8058 subsegT subseg
= now_subseg
;
8060 unsigned int alignment
, align_size_1
;
8061 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
8062 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
8063 unsigned int padding
;
8065 if (!IS_ELF
|| !x86_used_note
)
8068 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
8070 /* The .note.gnu.property section layout:
8072 Field Length Contents
8075 n_descsz 4 The note descriptor size
8076 n_type 4 NT_GNU_PROPERTY_TYPE_0
8078 n_desc n_descsz The program property array
8082 /* Create the .note.gnu.property section. */
8083 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
8084 bfd_set_section_flags (sec
,
8091 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
8102 bfd_set_section_alignment (sec
, alignment
);
8103 elf_section_type (sec
) = SHT_NOTE
;
8105 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
8107 isa_1_descsz_raw
= 4 + 4 + 4;
8108 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
8109 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
8111 feature_2_descsz_raw
= isa_1_descsz
;
8112 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
8114 feature_2_descsz_raw
+= 4 + 4 + 4;
8115 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
8116 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
8119 descsz
= feature_2_descsz
;
8120 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
8121 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
8123 /* Write n_namsz. */
8124 md_number_to_chars (p
, (valueT
) 4, 4);
8126 /* Write n_descsz. */
8127 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
8130 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
8133 memcpy (p
+ 4 * 3, "GNU", 4);
8135 /* Write 4-byte type. */
8136 md_number_to_chars (p
+ 4 * 4,
8137 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
8139 /* Write 4-byte data size. */
8140 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
8142 /* Write 4-byte data. */
8143 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
8145 /* Zero out paddings. */
8146 padding
= isa_1_descsz
- isa_1_descsz_raw
;
8148 memset (p
+ 4 * 7, 0, padding
);
8150 /* Write 4-byte type. */
8151 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
8152 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
8154 /* Write 4-byte data size. */
8155 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
8157 /* Write 4-byte data. */
8158 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
8159 (valueT
) x86_feature_2_used
, 4);
8161 /* Zero out paddings. */
8162 padding
= feature_2_descsz
- feature_2_descsz_raw
;
8164 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
8166 /* We probably can't restore the current segment, for there likely
8169 subseg_set (seg
, subseg
);
8174 encoding_length (const fragS
*start_frag
, offsetT start_off
,
8175 const char *frag_now_ptr
)
8177 unsigned int len
= 0;
8179 if (start_frag
!= frag_now
)
8181 const fragS
*fr
= start_frag
;
8186 } while (fr
&& fr
!= frag_now
);
8189 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
8195 fragS
*insn_start_frag
;
8196 offsetT insn_start_off
;
8198 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8199 if (IS_ELF
&& x86_used_note
)
8201 if (i
.tm
.cpu_flags
.bitfield
.cpucmov
)
8202 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_CMOV
;
8203 if (i
.tm
.cpu_flags
.bitfield
.cpusse
)
8204 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE
;
8205 if (i
.tm
.cpu_flags
.bitfield
.cpusse2
)
8206 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE2
;
8207 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
)
8208 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE3
;
8209 if (i
.tm
.cpu_flags
.bitfield
.cpussse3
)
8210 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSSE3
;
8211 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_1
)
8212 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_1
;
8213 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_2
)
8214 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_2
;
8215 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
)
8216 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX
;
8217 if (i
.tm
.cpu_flags
.bitfield
.cpuavx2
)
8218 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX2
;
8219 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
8220 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_FMA
;
8221 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
)
8222 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512F
;
8223 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512cd
)
8224 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512CD
;
8225 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512er
)
8226 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512ER
;
8227 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
)
8228 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512PF
;
8229 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
)
8230 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512VL
;
8231 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
)
8232 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512DQ
;
8233 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
)
8234 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512BW
;
8235 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4fmaps
)
8236 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4FMAPS
;
8237 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
)
8238 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4VNNIW
;
8239 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bitalg
)
8240 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BITALG
;
8241 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512ifma
)
8242 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_IFMA
;
8243 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vbmi
)
8244 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI
;
8245 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vbmi2
)
8246 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI2
;
8247 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vnni
)
8248 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VNNI
;
8249 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bf16
)
8250 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BF16
;
8252 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
8253 || i
.tm
.cpu_flags
.bitfield
.cpu287
8254 || i
.tm
.cpu_flags
.bitfield
.cpu387
8255 || i
.tm
.cpu_flags
.bitfield
.cpu687
8256 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
8257 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
8258 /* Don't set GNU_PROPERTY_X86_FEATURE_2_MMX for prefetchtXXX nor
8259 Xfence instructions. */
8260 if (i
.tm
.base_opcode
!= 0xf18
8261 && i
.tm
.base_opcode
!= 0xf0d
8262 && i
.tm
.base_opcode
!= 0xfaef8
8264 || i
.tm
.cpu_flags
.bitfield
.cpummx
8265 || i
.tm
.cpu_flags
.bitfield
.cpua3dnow
8266 || i
.tm
.cpu_flags
.bitfield
.cpua3dnowa
))
8267 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
8269 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
8271 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
8273 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
8274 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
8275 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
8276 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
8277 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
8278 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
8279 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
8280 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
8281 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
8285 /* Tie dwarf2 debug info to the address at the start of the insn.
8286 We can't do this after the insn has been output as the current
8287 frag may have been closed off. eg. by frag_var. */
8288 dwarf2_emit_insn (0);
8290 insn_start_frag
= frag_now
;
8291 insn_start_off
= frag_now_fix ();
8294 if (i
.tm
.opcode_modifier
.jump
)
8296 else if (i
.tm
.opcode_modifier
.jumpbyte
8297 || i
.tm
.opcode_modifier
.jumpdword
)
8299 else if (i
.tm
.opcode_modifier
.jumpintersegment
)
8300 output_interseg_jump ();
8303 /* Output normal instructions here. */
8307 unsigned int prefix
;
8310 && (i
.tm
.base_opcode
== 0xfaee8
8311 || i
.tm
.base_opcode
== 0xfaef0
8312 || i
.tm
.base_opcode
== 0xfaef8))
8314 /* Encode lfence, mfence, and sfence as
8315 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
8316 offsetT val
= 0x240483f0ULL
;
8318 md_number_to_chars (p
, val
, 5);
8322 /* Some processors fail on LOCK prefix. This options makes
8323 assembler ignore LOCK prefix and serves as a workaround. */
8324 if (omit_lock_prefix
)
8326 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
)
8328 i
.prefix
[LOCK_PREFIX
] = 0;
8331 /* Since the VEX/EVEX prefix contains the implicit prefix, we
8332 don't need the explicit prefix. */
8333 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
8335 switch (i
.tm
.opcode_length
)
8338 if (i
.tm
.base_opcode
& 0xff000000)
8340 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
8341 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
8342 || prefix
!= REPE_PREFIX_OPCODE
8343 || (i
.prefix
[REP_PREFIX
] != REPE_PREFIX_OPCODE
))
8344 add_prefix (prefix
);
8348 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
8350 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
8351 add_prefix (prefix
);
8357 /* Check for pseudo prefixes. */
8358 as_bad_where (insn_start_frag
->fr_file
,
8359 insn_start_frag
->fr_line
,
8360 _("pseudo prefix without instruction"));
8366 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8367 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
8368 R_X86_64_GOTTPOFF relocation so that linker can safely
8369 perform IE->LE optimization. */
8370 if (x86_elf_abi
== X86_64_X32_ABI
8372 && i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
8373 && i
.prefix
[REX_PREFIX
] == 0)
8374 add_prefix (REX_OPCODE
);
8377 /* The prefix bytes. */
8378 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
8380 FRAG_APPEND_1_CHAR (*q
);
8384 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
8389 /* REX byte is encoded in VEX prefix. */
8393 FRAG_APPEND_1_CHAR (*q
);
8396 /* There should be no other prefixes for instructions
8401 /* For EVEX instructions i.vrex should become 0 after
8402 build_evex_prefix. For VEX instructions upper 16 registers
8403 aren't available, so VREX should be 0. */
8406 /* Now the VEX prefix. */
8407 p
= frag_more (i
.vex
.length
);
8408 for (j
= 0; j
< i
.vex
.length
; j
++)
8409 p
[j
] = i
.vex
.bytes
[j
];
8412 /* Now the opcode; be careful about word order here! */
8413 if (i
.tm
.opcode_length
== 1)
8415 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
8419 switch (i
.tm
.opcode_length
)
8423 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
8424 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
8428 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
8438 /* Put out high byte first: can't use md_number_to_chars! */
8439 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
8440 *p
= i
.tm
.base_opcode
& 0xff;
8443 /* Now the modrm byte and sib byte (if present). */
8444 if (i
.tm
.opcode_modifier
.modrm
)
8446 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
8449 /* If i.rm.regmem == ESP (4)
8450 && i.rm.mode != (Register mode)
8452 ==> need second modrm byte. */
8453 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
8455 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
8456 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
8458 | i
.sib
.scale
<< 6));
8461 if (i
.disp_operands
)
8462 output_disp (insn_start_frag
, insn_start_off
);
8465 output_imm (insn_start_frag
, insn_start_off
);
8468 * frag_now_fix () returning plain abs_section_offset when we're in the
8469 * absolute section, and abs_section_offset not getting updated as data
8470 * gets added to the frag breaks the logic below.
8472 if (now_seg
!= absolute_section
)
8474 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
8476 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
8484 pi ("" /*line*/, &i
);
8486 #endif /* DEBUG386 */
8489 /* Return the size of the displacement operand N. */
8492 disp_size (unsigned int n
)
8496 if (i
.types
[n
].bitfield
.disp64
)
8498 else if (i
.types
[n
].bitfield
.disp8
)
8500 else if (i
.types
[n
].bitfield
.disp16
)
8505 /* Return the size of the immediate operand N. */
8508 imm_size (unsigned int n
)
8511 if (i
.types
[n
].bitfield
.imm64
)
8513 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
8515 else if (i
.types
[n
].bitfield
.imm16
)
8521 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
8526 for (n
= 0; n
< i
.operands
; n
++)
8528 if (operand_type_check (i
.types
[n
], disp
))
8530 if (i
.op
[n
].disps
->X_op
== O_constant
)
8532 int size
= disp_size (n
);
8533 offsetT val
= i
.op
[n
].disps
->X_add_number
;
8535 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
8537 p
= frag_more (size
);
8538 md_number_to_chars (p
, val
, size
);
8542 enum bfd_reloc_code_real reloc_type
;
8543 int size
= disp_size (n
);
8544 int sign
= i
.types
[n
].bitfield
.disp32s
;
8545 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
8548 /* We can't have 8 bit displacement here. */
8549 gas_assert (!i
.types
[n
].bitfield
.disp8
);
8551 /* The PC relative address is computed relative
8552 to the instruction boundary, so in case immediate
8553 fields follows, we need to adjust the value. */
8554 if (pcrel
&& i
.imm_operands
)
8559 for (n1
= 0; n1
< i
.operands
; n1
++)
8560 if (operand_type_check (i
.types
[n1
], imm
))
8562 /* Only one immediate is allowed for PC
8563 relative address. */
8564 gas_assert (sz
== 0);
8566 i
.op
[n
].disps
->X_add_number
-= sz
;
8568 /* We should find the immediate. */
8569 gas_assert (sz
!= 0);
8572 p
= frag_more (size
);
8573 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
8575 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
8576 && (((reloc_type
== BFD_RELOC_32
8577 || reloc_type
== BFD_RELOC_X86_64_32S
8578 || (reloc_type
== BFD_RELOC_64
8580 && (i
.op
[n
].disps
->X_op
== O_symbol
8581 || (i
.op
[n
].disps
->X_op
== O_add
8582 && ((symbol_get_value_expression
8583 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
8585 || reloc_type
== BFD_RELOC_32_PCREL
))
8589 reloc_type
= BFD_RELOC_386_GOTPC
;
8590 i
.op
[n
].imms
->X_add_number
+=
8591 encoding_length (insn_start_frag
, insn_start_off
, p
);
8593 else if (reloc_type
== BFD_RELOC_64
)
8594 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
8596 /* Don't do the adjustment for x86-64, as there
8597 the pcrel addressing is relative to the _next_
8598 insn, and that is taken care of in other code. */
8599 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
8601 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
8602 size
, i
.op
[n
].disps
, pcrel
,
8604 /* Check for "call/jmp *mem", "mov mem, %reg",
8605 "test %reg, mem" and "binop mem, %reg" where binop
8606 is one of adc, add, and, cmp, or, sbb, sub, xor
8607 instructions without data prefix. Always generate
8608 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
8609 if (i
.prefix
[DATA_PREFIX
] == 0
8610 && (generate_relax_relocations
8613 && i
.rm
.regmem
== 5))
8615 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
8616 && ((i
.operands
== 1
8617 && i
.tm
.base_opcode
== 0xff
8618 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
8620 && (i
.tm
.base_opcode
== 0x8b
8621 || i
.tm
.base_opcode
== 0x85
8622 || (i
.tm
.base_opcode
& 0xc7) == 0x03))))
8626 fixP
->fx_tcbit
= i
.rex
!= 0;
8628 && (i
.base_reg
->reg_num
== RegIP
))
8629 fixP
->fx_tcbit2
= 1;
8632 fixP
->fx_tcbit2
= 1;
8640 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
8645 for (n
= 0; n
< i
.operands
; n
++)
8647 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
8648 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
8651 if (operand_type_check (i
.types
[n
], imm
))
8653 if (i
.op
[n
].imms
->X_op
== O_constant
)
8655 int size
= imm_size (n
);
8658 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
8660 p
= frag_more (size
);
8661 md_number_to_chars (p
, val
, size
);
8665 /* Not absolute_section.
8666 Need a 32-bit fixup (don't support 8bit
8667 non-absolute imms). Try to support other
8669 enum bfd_reloc_code_real reloc_type
;
8670 int size
= imm_size (n
);
8673 if (i
.types
[n
].bitfield
.imm32s
8674 && (i
.suffix
== QWORD_MNEM_SUFFIX
8675 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
8680 p
= frag_more (size
);
8681 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
8683 /* This is tough to explain. We end up with this one if we
8684 * have operands that look like
8685 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
8686 * obtain the absolute address of the GOT, and it is strongly
8687 * preferable from a performance point of view to avoid using
8688 * a runtime relocation for this. The actual sequence of
8689 * instructions often look something like:
8694 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
8696 * The call and pop essentially return the absolute address
8697 * of the label .L66 and store it in %ebx. The linker itself
8698 * will ultimately change the first operand of the addl so
8699 * that %ebx points to the GOT, but to keep things simple, the
8700 * .o file must have this operand set so that it generates not
8701 * the absolute address of .L66, but the absolute address of
8702 * itself. This allows the linker itself simply treat a GOTPC
8703 * relocation as asking for a pcrel offset to the GOT to be
8704 * added in, and the addend of the relocation is stored in the
8705 * operand field for the instruction itself.
8707 * Our job here is to fix the operand so that it would add
8708 * the correct offset so that %ebx would point to itself. The
8709 * thing that is tricky is that .-.L66 will point to the
8710 * beginning of the instruction, so we need to further modify
8711 * the operand so that it will point to itself. There are
8712 * other cases where you have something like:
8714 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
8716 * and here no correction would be required. Internally in
8717 * the assembler we treat operands of this form as not being
8718 * pcrel since the '.' is explicitly mentioned, and I wonder
8719 * whether it would simplify matters to do it this way. Who
8720 * knows. In earlier versions of the PIC patches, the
8721 * pcrel_adjust field was used to store the correction, but
8722 * since the expression is not pcrel, I felt it would be
8723 * confusing to do it this way. */
8725 if ((reloc_type
== BFD_RELOC_32
8726 || reloc_type
== BFD_RELOC_X86_64_32S
8727 || reloc_type
== BFD_RELOC_64
)
8729 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
8730 && (i
.op
[n
].imms
->X_op
== O_symbol
8731 || (i
.op
[n
].imms
->X_op
== O_add
8732 && ((symbol_get_value_expression
8733 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
8737 reloc_type
= BFD_RELOC_386_GOTPC
;
8739 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
8741 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
8742 i
.op
[n
].imms
->X_add_number
+=
8743 encoding_length (insn_start_frag
, insn_start_off
, p
);
8745 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8746 i
.op
[n
].imms
, 0, reloc_type
);
8752 /* x86_cons_fix_new is called via the expression parsing code when a
8753 reloc is needed. We use this hook to get the correct .got reloc. */
8754 static int cons_sign
= -1;
8757 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
8758 expressionS
*exp
, bfd_reloc_code_real_type r
)
8760 r
= reloc (len
, 0, cons_sign
, r
);
8763 if (exp
->X_op
== O_secrel
)
8765 exp
->X_op
= O_symbol
;
8766 r
= BFD_RELOC_32_SECREL
;
8770 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
8773 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
8774 purpose of the `.dc.a' internal pseudo-op. */
8777 x86_address_bytes (void)
8779 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
8781 return stdoutput
->arch_info
->bits_per_address
/ 8;
8784 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
8786 # define lex_got(reloc, adjust, types) NULL
8788 /* Parse operands of the form
8789 <symbol>@GOTOFF+<nnn>
8790 and similar .plt or .got references.
8792 If we find one, set up the correct relocation in RELOC and copy the
8793 input string, minus the `@GOTOFF' into a malloc'd buffer for
8794 parsing by the calling routine. Return this buffer, and if ADJUST
8795 is non-null set it to the length of the string we removed from the
8796 input line. Otherwise return NULL. */
8798 lex_got (enum bfd_reloc_code_real
*rel
,
8800 i386_operand_type
*types
)
8802 /* Some of the relocations depend on the size of what field is to
8803 be relocated. But in our callers i386_immediate and i386_displacement
8804 we don't yet know the operand size (this will be set by insn
8805 matching). Hence we record the word32 relocation here,
8806 and adjust the reloc according to the real size in reloc(). */
8807 static const struct {
8810 const enum bfd_reloc_code_real rel
[2];
8811 const i386_operand_type types64
;
8813 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8814 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
8816 OPERAND_TYPE_IMM32_64
},
8818 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
8819 BFD_RELOC_X86_64_PLTOFF64
},
8820 OPERAND_TYPE_IMM64
},
8821 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
8822 BFD_RELOC_X86_64_PLT32
},
8823 OPERAND_TYPE_IMM32_32S_DISP32
},
8824 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
8825 BFD_RELOC_X86_64_GOTPLT64
},
8826 OPERAND_TYPE_IMM64_DISP64
},
8827 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
8828 BFD_RELOC_X86_64_GOTOFF64
},
8829 OPERAND_TYPE_IMM64_DISP64
},
8830 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
8831 BFD_RELOC_X86_64_GOTPCREL
},
8832 OPERAND_TYPE_IMM32_32S_DISP32
},
8833 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
8834 BFD_RELOC_X86_64_TLSGD
},
8835 OPERAND_TYPE_IMM32_32S_DISP32
},
8836 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
8837 _dummy_first_bfd_reloc_code_real
},
8838 OPERAND_TYPE_NONE
},
8839 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
8840 BFD_RELOC_X86_64_TLSLD
},
8841 OPERAND_TYPE_IMM32_32S_DISP32
},
8842 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
8843 BFD_RELOC_X86_64_GOTTPOFF
},
8844 OPERAND_TYPE_IMM32_32S_DISP32
},
8845 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
8846 BFD_RELOC_X86_64_TPOFF32
},
8847 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
8848 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
8849 _dummy_first_bfd_reloc_code_real
},
8850 OPERAND_TYPE_NONE
},
8851 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
8852 BFD_RELOC_X86_64_DTPOFF32
},
8853 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
8854 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
8855 _dummy_first_bfd_reloc_code_real
},
8856 OPERAND_TYPE_NONE
},
8857 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
8858 _dummy_first_bfd_reloc_code_real
},
8859 OPERAND_TYPE_NONE
},
8860 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
8861 BFD_RELOC_X86_64_GOT32
},
8862 OPERAND_TYPE_IMM32_32S_64_DISP32
},
8863 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
8864 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
8865 OPERAND_TYPE_IMM32_32S_DISP32
},
8866 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
8867 BFD_RELOC_X86_64_TLSDESC_CALL
},
8868 OPERAND_TYPE_IMM32_32S_DISP32
},
8873 #if defined (OBJ_MAYBE_ELF)
8878 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
8879 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
8882 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
8884 int len
= gotrel
[j
].len
;
8885 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
8887 if (gotrel
[j
].rel
[object_64bit
] != 0)
8890 char *tmpbuf
, *past_reloc
;
8892 *rel
= gotrel
[j
].rel
[object_64bit
];
8896 if (flag_code
!= CODE_64BIT
)
8898 types
->bitfield
.imm32
= 1;
8899 types
->bitfield
.disp32
= 1;
8902 *types
= gotrel
[j
].types64
;
8905 if (j
!= 0 && GOT_symbol
== NULL
)
8906 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
8908 /* The length of the first part of our input line. */
8909 first
= cp
- input_line_pointer
;
8911 /* The second part goes from after the reloc token until
8912 (and including) an end_of_line char or comma. */
8913 past_reloc
= cp
+ 1 + len
;
8915 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
8917 second
= cp
+ 1 - past_reloc
;
8919 /* Allocate and copy string. The trailing NUL shouldn't
8920 be necessary, but be safe. */
8921 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
8922 memcpy (tmpbuf
, input_line_pointer
, first
);
8923 if (second
!= 0 && *past_reloc
!= ' ')
8924 /* Replace the relocation token with ' ', so that
8925 errors like foo@GOTOFF1 will be detected. */
8926 tmpbuf
[first
++] = ' ';
8928 /* Increment length by 1 if the relocation token is
8933 memcpy (tmpbuf
+ first
, past_reloc
, second
);
8934 tmpbuf
[first
+ second
] = '\0';
8938 as_bad (_("@%s reloc is not supported with %d-bit output format"),
8939 gotrel
[j
].str
, 1 << (5 + object_64bit
));
8944 /* Might be a symbol version string. Don't as_bad here. */
8953 /* Parse operands of the form
8954 <symbol>@SECREL32+<nnn>
8956 If we find one, set up the correct relocation in RELOC and copy the
8957 input string, minus the `@SECREL32' into a malloc'd buffer for
8958 parsing by the calling routine. Return this buffer, and if ADJUST
8959 is non-null set it to the length of the string we removed from the
8960 input line. Otherwise return NULL.
8962 This function is copied from the ELF version above adjusted for PE targets. */
8965 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
8966 int *adjust ATTRIBUTE_UNUSED
,
8967 i386_operand_type
*types
)
8973 const enum bfd_reloc_code_real rel
[2];
8974 const i386_operand_type types64
;
8978 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
8979 BFD_RELOC_32_SECREL
},
8980 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
8986 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
8987 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
8990 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
8992 int len
= gotrel
[j
].len
;
8994 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
8996 if (gotrel
[j
].rel
[object_64bit
] != 0)
8999 char *tmpbuf
, *past_reloc
;
9001 *rel
= gotrel
[j
].rel
[object_64bit
];
9007 if (flag_code
!= CODE_64BIT
)
9009 types
->bitfield
.imm32
= 1;
9010 types
->bitfield
.disp32
= 1;
9013 *types
= gotrel
[j
].types64
;
9016 /* The length of the first part of our input line. */
9017 first
= cp
- input_line_pointer
;
9019 /* The second part goes from after the reloc token until
9020 (and including) an end_of_line char or comma. */
9021 past_reloc
= cp
+ 1 + len
;
9023 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
9025 second
= cp
+ 1 - past_reloc
;
9027 /* Allocate and copy string. The trailing NUL shouldn't
9028 be necessary, but be safe. */
9029 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
9030 memcpy (tmpbuf
, input_line_pointer
, first
);
9031 if (second
!= 0 && *past_reloc
!= ' ')
9032 /* Replace the relocation token with ' ', so that
9033 errors like foo@SECLREL321 will be detected. */
9034 tmpbuf
[first
++] = ' ';
9035 memcpy (tmpbuf
+ first
, past_reloc
, second
);
9036 tmpbuf
[first
+ second
] = '\0';
9040 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9041 gotrel
[j
].str
, 1 << (5 + object_64bit
));
9046 /* Might be a symbol version string. Don't as_bad here. */
9052 bfd_reloc_code_real_type
9053 x86_cons (expressionS
*exp
, int size
)
9055 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
9057 intel_syntax
= -intel_syntax
;
9060 if (size
== 4 || (object_64bit
&& size
== 8))
9062 /* Handle @GOTOFF and the like in an expression. */
9064 char *gotfree_input_line
;
9067 save
= input_line_pointer
;
9068 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
9069 if (gotfree_input_line
)
9070 input_line_pointer
= gotfree_input_line
;
9074 if (gotfree_input_line
)
9076 /* expression () has merrily parsed up to the end of line,
9077 or a comma - in the wrong buffer. Transfer how far
9078 input_line_pointer has moved to the right buffer. */
9079 input_line_pointer
= (save
9080 + (input_line_pointer
- gotfree_input_line
)
9082 free (gotfree_input_line
);
9083 if (exp
->X_op
== O_constant
9084 || exp
->X_op
== O_absent
9085 || exp
->X_op
== O_illegal
9086 || exp
->X_op
== O_register
9087 || exp
->X_op
== O_big
)
9089 char c
= *input_line_pointer
;
9090 *input_line_pointer
= 0;
9091 as_bad (_("missing or invalid expression `%s'"), save
);
9092 *input_line_pointer
= c
;
9094 else if ((got_reloc
== BFD_RELOC_386_PLT32
9095 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
9096 && exp
->X_op
!= O_symbol
)
9098 char c
= *input_line_pointer
;
9099 *input_line_pointer
= 0;
9100 as_bad (_("invalid PLT expression `%s'"), save
);
9101 *input_line_pointer
= c
;
9108 intel_syntax
= -intel_syntax
;
9111 i386_intel_simplify (exp
);
9117 signed_cons (int size
)
9119 if (flag_code
== CODE_64BIT
)
9127 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
9134 if (exp
.X_op
== O_symbol
)
9135 exp
.X_op
= O_secrel
;
9137 emit_expr (&exp
, 4);
9139 while (*input_line_pointer
++ == ',');
9141 input_line_pointer
--;
9142 demand_empty_rest_of_line ();
9146 /* Handle Vector operations. */
9149 check_VecOperations (char *op_string
, char *op_end
)
9151 const reg_entry
*mask
;
9156 && (op_end
== NULL
|| op_string
< op_end
))
9159 if (*op_string
== '{')
9163 /* Check broadcasts. */
9164 if (strncmp (op_string
, "1to", 3) == 0)
9169 goto duplicated_vec_op
;
9172 if (*op_string
== '8')
9174 else if (*op_string
== '4')
9176 else if (*op_string
== '2')
9178 else if (*op_string
== '1'
9179 && *(op_string
+1) == '6')
9186 as_bad (_("Unsupported broadcast: `%s'"), saved
);
9191 broadcast_op
.type
= bcst_type
;
9192 broadcast_op
.operand
= this_operand
;
9193 broadcast_op
.bytes
= 0;
9194 i
.broadcast
= &broadcast_op
;
9196 /* Check masking operation. */
9197 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
9199 /* k0 can't be used for write mask. */
9200 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
9202 as_bad (_("`%s%s' can't be used for write mask"),
9203 register_prefix
, mask
->reg_name
);
9209 mask_op
.mask
= mask
;
9210 mask_op
.zeroing
= 0;
9211 mask_op
.operand
= this_operand
;
9217 goto duplicated_vec_op
;
9219 i
.mask
->mask
= mask
;
9221 /* Only "{z}" is allowed here. No need to check
9222 zeroing mask explicitly. */
9223 if (i
.mask
->operand
!= this_operand
)
9225 as_bad (_("invalid write mask `%s'"), saved
);
9232 /* Check zeroing-flag for masking operation. */
9233 else if (*op_string
== 'z')
9237 mask_op
.mask
= NULL
;
9238 mask_op
.zeroing
= 1;
9239 mask_op
.operand
= this_operand
;
9244 if (i
.mask
->zeroing
)
9247 as_bad (_("duplicated `%s'"), saved
);
9251 i
.mask
->zeroing
= 1;
9253 /* Only "{%k}" is allowed here. No need to check mask
9254 register explicitly. */
9255 if (i
.mask
->operand
!= this_operand
)
9257 as_bad (_("invalid zeroing-masking `%s'"),
9266 goto unknown_vec_op
;
9268 if (*op_string
!= '}')
9270 as_bad (_("missing `}' in `%s'"), saved
);
9275 /* Strip whitespace since the addition of pseudo prefixes
9276 changed how the scrubber treats '{'. */
9277 if (is_space_char (*op_string
))
9283 /* We don't know this one. */
9284 as_bad (_("unknown vector operation: `%s'"), saved
);
9288 if (i
.mask
&& i
.mask
->zeroing
&& !i
.mask
->mask
)
9290 as_bad (_("zeroing-masking only allowed with write mask"));
9298 i386_immediate (char *imm_start
)
9300 char *save_input_line_pointer
;
9301 char *gotfree_input_line
;
9304 i386_operand_type types
;
9306 operand_type_set (&types
, ~0);
9308 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
9310 as_bad (_("at most %d immediate operands are allowed"),
9311 MAX_IMMEDIATE_OPERANDS
);
9315 exp
= &im_expressions
[i
.imm_operands
++];
9316 i
.op
[this_operand
].imms
= exp
;
9318 if (is_space_char (*imm_start
))
9321 save_input_line_pointer
= input_line_pointer
;
9322 input_line_pointer
= imm_start
;
9324 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
9325 if (gotfree_input_line
)
9326 input_line_pointer
= gotfree_input_line
;
9328 exp_seg
= expression (exp
);
9332 /* Handle vector operations. */
9333 if (*input_line_pointer
== '{')
9335 input_line_pointer
= check_VecOperations (input_line_pointer
,
9337 if (input_line_pointer
== NULL
)
9341 if (*input_line_pointer
)
9342 as_bad (_("junk `%s' after expression"), input_line_pointer
);
9344 input_line_pointer
= save_input_line_pointer
;
9345 if (gotfree_input_line
)
9347 free (gotfree_input_line
);
9349 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
9350 exp
->X_op
= O_illegal
;
9353 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
9357 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
9358 i386_operand_type types
, const char *imm_start
)
9360 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
9363 as_bad (_("missing or invalid immediate expression `%s'"),
9367 else if (exp
->X_op
== O_constant
)
9369 /* Size it properly later. */
9370 i
.types
[this_operand
].bitfield
.imm64
= 1;
9371 /* If not 64bit, sign extend val. */
9372 if (flag_code
!= CODE_64BIT
9373 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
9375 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
9377 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9378 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
9379 && exp_seg
!= absolute_section
9380 && exp_seg
!= text_section
9381 && exp_seg
!= data_section
9382 && exp_seg
!= bss_section
9383 && exp_seg
!= undefined_section
9384 && !bfd_is_com_section (exp_seg
))
9386 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
9390 else if (!intel_syntax
&& exp_seg
== reg_section
)
9393 as_bad (_("illegal immediate register operand %s"), imm_start
);
9398 /* This is an address. The size of the address will be
9399 determined later, depending on destination register,
9400 suffix, or the default for the section. */
9401 i
.types
[this_operand
].bitfield
.imm8
= 1;
9402 i
.types
[this_operand
].bitfield
.imm16
= 1;
9403 i
.types
[this_operand
].bitfield
.imm32
= 1;
9404 i
.types
[this_operand
].bitfield
.imm32s
= 1;
9405 i
.types
[this_operand
].bitfield
.imm64
= 1;
9406 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
9414 i386_scale (char *scale
)
9417 char *save
= input_line_pointer
;
9419 input_line_pointer
= scale
;
9420 val
= get_absolute_expression ();
9425 i
.log2_scale_factor
= 0;
9428 i
.log2_scale_factor
= 1;
9431 i
.log2_scale_factor
= 2;
9434 i
.log2_scale_factor
= 3;
9438 char sep
= *input_line_pointer
;
9440 *input_line_pointer
= '\0';
9441 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
9443 *input_line_pointer
= sep
;
9444 input_line_pointer
= save
;
9448 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
9450 as_warn (_("scale factor of %d without an index register"),
9451 1 << i
.log2_scale_factor
);
9452 i
.log2_scale_factor
= 0;
9454 scale
= input_line_pointer
;
9455 input_line_pointer
= save
;
9460 i386_displacement (char *disp_start
, char *disp_end
)
9464 char *save_input_line_pointer
;
9465 char *gotfree_input_line
;
9467 i386_operand_type bigdisp
, types
= anydisp
;
9470 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
9472 as_bad (_("at most %d displacement operands are allowed"),
9473 MAX_MEMORY_OPERANDS
);
9477 operand_type_set (&bigdisp
, 0);
9478 if ((i
.types
[this_operand
].bitfield
.jumpabsolute
)
9479 || (!current_templates
->start
->opcode_modifier
.jump
9480 && !current_templates
->start
->opcode_modifier
.jumpdword
))
9482 bigdisp
.bitfield
.disp32
= 1;
9483 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
9484 if (flag_code
== CODE_64BIT
)
9488 bigdisp
.bitfield
.disp32s
= 1;
9489 bigdisp
.bitfield
.disp64
= 1;
9492 else if ((flag_code
== CODE_16BIT
) ^ override
)
9494 bigdisp
.bitfield
.disp32
= 0;
9495 bigdisp
.bitfield
.disp16
= 1;
9500 /* For PC-relative branches, the width of the displacement
9501 is dependent upon data size, not address size. */
9502 override
= (i
.prefix
[DATA_PREFIX
] != 0);
9503 if (flag_code
== CODE_64BIT
)
9505 if (override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
9506 bigdisp
.bitfield
.disp16
= 1;
9509 bigdisp
.bitfield
.disp32
= 1;
9510 bigdisp
.bitfield
.disp32s
= 1;
9516 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
9518 : LONG_MNEM_SUFFIX
));
9519 bigdisp
.bitfield
.disp32
= 1;
9520 if ((flag_code
== CODE_16BIT
) ^ override
)
9522 bigdisp
.bitfield
.disp32
= 0;
9523 bigdisp
.bitfield
.disp16
= 1;
9527 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
9530 exp
= &disp_expressions
[i
.disp_operands
];
9531 i
.op
[this_operand
].disps
= exp
;
9533 save_input_line_pointer
= input_line_pointer
;
9534 input_line_pointer
= disp_start
;
9535 END_STRING_AND_SAVE (disp_end
);
9537 #ifndef GCC_ASM_O_HACK
9538 #define GCC_ASM_O_HACK 0
9541 END_STRING_AND_SAVE (disp_end
+ 1);
9542 if (i
.types
[this_operand
].bitfield
.baseIndex
9543 && displacement_string_end
[-1] == '+')
9545 /* This hack is to avoid a warning when using the "o"
9546 constraint within gcc asm statements.
9549 #define _set_tssldt_desc(n,addr,limit,type) \
9550 __asm__ __volatile__ ( \
9552 "movw %w1,2+%0\n\t" \
9554 "movb %b1,4+%0\n\t" \
9555 "movb %4,5+%0\n\t" \
9556 "movb $0,6+%0\n\t" \
9557 "movb %h1,7+%0\n\t" \
9559 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
9561 This works great except that the output assembler ends
9562 up looking a bit weird if it turns out that there is
9563 no offset. You end up producing code that looks like:
9576 So here we provide the missing zero. */
9578 *displacement_string_end
= '0';
9581 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
9582 if (gotfree_input_line
)
9583 input_line_pointer
= gotfree_input_line
;
9585 exp_seg
= expression (exp
);
9588 if (*input_line_pointer
)
9589 as_bad (_("junk `%s' after expression"), input_line_pointer
);
9591 RESTORE_END_STRING (disp_end
+ 1);
9593 input_line_pointer
= save_input_line_pointer
;
9594 if (gotfree_input_line
)
9596 free (gotfree_input_line
);
9598 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
9599 exp
->X_op
= O_illegal
;
9602 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
9604 RESTORE_END_STRING (disp_end
);
9610 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
9611 i386_operand_type types
, const char *disp_start
)
9613 i386_operand_type bigdisp
;
9616 /* We do this to make sure that the section symbol is in
9617 the symbol table. We will ultimately change the relocation
9618 to be relative to the beginning of the section. */
9619 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
9620 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
9621 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
9623 if (exp
->X_op
!= O_symbol
)
9626 if (S_IS_LOCAL (exp
->X_add_symbol
)
9627 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
9628 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
9629 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
9630 exp
->X_op
= O_subtract
;
9631 exp
->X_op_symbol
= GOT_symbol
;
9632 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
9633 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
9634 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
9635 i
.reloc
[this_operand
] = BFD_RELOC_64
;
9637 i
.reloc
[this_operand
] = BFD_RELOC_32
;
9640 else if (exp
->X_op
== O_absent
9641 || exp
->X_op
== O_illegal
9642 || exp
->X_op
== O_big
)
9645 as_bad (_("missing or invalid displacement expression `%s'"),
9650 else if (flag_code
== CODE_64BIT
9651 && !i
.prefix
[ADDR_PREFIX
]
9652 && exp
->X_op
== O_constant
)
9654 /* Since displacement is signed extended to 64bit, don't allow
9655 disp32 and turn off disp32s if they are out of range. */
9656 i
.types
[this_operand
].bitfield
.disp32
= 0;
9657 if (!fits_in_signed_long (exp
->X_add_number
))
9659 i
.types
[this_operand
].bitfield
.disp32s
= 0;
9660 if (i
.types
[this_operand
].bitfield
.baseindex
)
9662 as_bad (_("0x%lx out range of signed 32bit displacement"),
9663 (long) exp
->X_add_number
);
9669 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9670 else if (exp
->X_op
!= O_constant
9671 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
9672 && exp_seg
!= absolute_section
9673 && exp_seg
!= text_section
9674 && exp_seg
!= data_section
9675 && exp_seg
!= bss_section
9676 && exp_seg
!= undefined_section
9677 && !bfd_is_com_section (exp_seg
))
9679 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
9684 /* Check if this is a displacement only operand. */
9685 bigdisp
= i
.types
[this_operand
];
9686 bigdisp
.bitfield
.disp8
= 0;
9687 bigdisp
.bitfield
.disp16
= 0;
9688 bigdisp
.bitfield
.disp32
= 0;
9689 bigdisp
.bitfield
.disp32s
= 0;
9690 bigdisp
.bitfield
.disp64
= 0;
9691 if (operand_type_all_zero (&bigdisp
))
9692 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
9698 /* Return the active addressing mode, taking address override and
9699 registers forming the address into consideration. Update the
9700 address override prefix if necessary. */
9702 static enum flag_code
9703 i386_addressing_mode (void)
9705 enum flag_code addr_mode
;
9707 if (i
.prefix
[ADDR_PREFIX
])
9708 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
9711 addr_mode
= flag_code
;
9713 #if INFER_ADDR_PREFIX
9714 if (i
.mem_operands
== 0)
9716 /* Infer address prefix from the first memory operand. */
9717 const reg_entry
*addr_reg
= i
.base_reg
;
9719 if (addr_reg
== NULL
)
9720 addr_reg
= i
.index_reg
;
9724 if (addr_reg
->reg_type
.bitfield
.dword
)
9725 addr_mode
= CODE_32BIT
;
9726 else if (flag_code
!= CODE_64BIT
9727 && addr_reg
->reg_type
.bitfield
.word
)
9728 addr_mode
= CODE_16BIT
;
9730 if (addr_mode
!= flag_code
)
9732 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
9734 /* Change the size of any displacement too. At most one
9735 of Disp16 or Disp32 is set.
9736 FIXME. There doesn't seem to be any real need for
9737 separate Disp16 and Disp32 flags. The same goes for
9738 Imm16 and Imm32. Removing them would probably clean
9739 up the code quite a lot. */
9740 if (flag_code
!= CODE_64BIT
9741 && (i
.types
[this_operand
].bitfield
.disp16
9742 || i
.types
[this_operand
].bitfield
.disp32
))
9743 i
.types
[this_operand
]
9744 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
9754 /* Make sure the memory operand we've been dealt is valid.
9755 Return 1 on success, 0 on a failure. */
9758 i386_index_check (const char *operand_string
)
9760 const char *kind
= "base/index";
9761 enum flag_code addr_mode
= i386_addressing_mode ();
9763 if (current_templates
->start
->opcode_modifier
.isstring
9764 && !current_templates
->start
->cpu_flags
.bitfield
.cpupadlock
9765 && (current_templates
->end
[-1].opcode_modifier
.isstring
9768 /* Memory operands of string insns are special in that they only allow
9769 a single register (rDI, rSI, or rBX) as their memory address. */
9770 const reg_entry
*expected_reg
;
9771 static const char *di_si
[][2] =
9777 static const char *bx
[] = { "ebx", "bx", "rbx" };
9779 kind
= "string address";
9781 if (current_templates
->start
->opcode_modifier
.repprefixok
)
9783 i386_operand_type type
= current_templates
->end
[-1].operand_types
[0];
9785 if (!type
.bitfield
.baseindex
9786 || ((!i
.mem_operands
!= !intel_syntax
)
9787 && current_templates
->end
[-1].operand_types
[1]
9788 .bitfield
.baseindex
))
9789 type
= current_templates
->end
[-1].operand_types
[1];
9790 expected_reg
= hash_find (reg_hash
,
9791 di_si
[addr_mode
][type
.bitfield
.esseg
]);
9795 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
9797 if (i
.base_reg
!= expected_reg
9799 || operand_type_check (i
.types
[this_operand
], disp
))
9801 /* The second memory operand must have the same size as
9805 && !((addr_mode
== CODE_64BIT
9806 && i
.base_reg
->reg_type
.bitfield
.qword
)
9807 || (addr_mode
== CODE_32BIT
9808 ? i
.base_reg
->reg_type
.bitfield
.dword
9809 : i
.base_reg
->reg_type
.bitfield
.word
)))
9812 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
9814 intel_syntax
? '[' : '(',
9816 expected_reg
->reg_name
,
9817 intel_syntax
? ']' : ')');
9824 as_bad (_("`%s' is not a valid %s expression"),
9825 operand_string
, kind
);
9830 if (addr_mode
!= CODE_16BIT
)
9832 /* 32-bit/64-bit checks. */
9834 && ((addr_mode
== CODE_64BIT
9835 ? !i
.base_reg
->reg_type
.bitfield
.qword
9836 : !i
.base_reg
->reg_type
.bitfield
.dword
)
9837 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
9838 || i
.base_reg
->reg_num
== RegIZ
))
9840 && !i
.index_reg
->reg_type
.bitfield
.xmmword
9841 && !i
.index_reg
->reg_type
.bitfield
.ymmword
9842 && !i
.index_reg
->reg_type
.bitfield
.zmmword
9843 && ((addr_mode
== CODE_64BIT
9844 ? !i
.index_reg
->reg_type
.bitfield
.qword
9845 : !i
.index_reg
->reg_type
.bitfield
.dword
)
9846 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
9849 /* bndmk, bndldx, and bndstx have special restrictions. */
9850 if (current_templates
->start
->base_opcode
== 0xf30f1b
9851 || (current_templates
->start
->base_opcode
& ~1) == 0x0f1a)
9853 /* They cannot use RIP-relative addressing. */
9854 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
9856 as_bad (_("`%s' cannot be used here"), operand_string
);
9860 /* bndldx and bndstx ignore their scale factor. */
9861 if (current_templates
->start
->base_opcode
!= 0xf30f1b
9862 && i
.log2_scale_factor
)
9863 as_warn (_("register scaling is being ignored here"));
9868 /* 16-bit checks. */
9870 && (!i
.base_reg
->reg_type
.bitfield
.word
9871 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
9873 && (!i
.index_reg
->reg_type
.bitfield
.word
9874 || !i
.index_reg
->reg_type
.bitfield
.baseindex
9876 && i
.base_reg
->reg_num
< 6
9877 && i
.index_reg
->reg_num
>= 6
9878 && i
.log2_scale_factor
== 0))))
9885 /* Handle vector immediates. */
9888 RC_SAE_immediate (const char *imm_start
)
9890 unsigned int match_found
, j
;
9891 const char *pstr
= imm_start
;
9899 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
9901 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
9905 rc_op
.type
= RC_NamesTable
[j
].type
;
9906 rc_op
.operand
= this_operand
;
9907 i
.rounding
= &rc_op
;
9911 as_bad (_("duplicated `%s'"), imm_start
);
9914 pstr
+= RC_NamesTable
[j
].len
;
9924 as_bad (_("Missing '}': '%s'"), imm_start
);
9927 /* RC/SAE immediate string should contain nothing more. */;
9930 as_bad (_("Junk after '}': '%s'"), imm_start
);
9934 exp
= &im_expressions
[i
.imm_operands
++];
9935 i
.op
[this_operand
].imms
= exp
;
9937 exp
->X_op
= O_constant
;
9938 exp
->X_add_number
= 0;
9939 exp
->X_add_symbol
= (symbolS
*) 0;
9940 exp
->X_op_symbol
= (symbolS
*) 0;
9942 i
.types
[this_operand
].bitfield
.imm8
= 1;
9946 /* Only string instructions can have a second memory operand, so
9947 reduce current_templates to just those if it contains any. */
9949 maybe_adjust_templates (void)
9951 const insn_template
*t
;
9953 gas_assert (i
.mem_operands
== 1);
9955 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
9956 if (t
->opcode_modifier
.isstring
)
9959 if (t
< current_templates
->end
)
9961 static templates aux_templates
;
9962 bfd_boolean recheck
;
9964 aux_templates
.start
= t
;
9965 for (; t
< current_templates
->end
; ++t
)
9966 if (!t
->opcode_modifier
.isstring
)
9968 aux_templates
.end
= t
;
9970 /* Determine whether to re-check the first memory operand. */
9971 recheck
= (aux_templates
.start
!= current_templates
->start
9972 || t
!= current_templates
->end
);
9974 current_templates
= &aux_templates
;
9979 if (i
.memop1_string
!= NULL
9980 && i386_index_check (i
.memop1_string
) == 0)
9989 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
9993 i386_att_operand (char *operand_string
)
9997 char *op_string
= operand_string
;
9999 if (is_space_char (*op_string
))
10002 /* We check for an absolute prefix (differentiating,
10003 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
10004 if (*op_string
== ABSOLUTE_PREFIX
)
10007 if (is_space_char (*op_string
))
10009 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
10012 /* Check if operand is a register. */
10013 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
10015 i386_operand_type temp
;
10017 /* Check for a segment override by searching for ':' after a
10018 segment register. */
10019 op_string
= end_op
;
10020 if (is_space_char (*op_string
))
10022 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
10024 switch (r
->reg_num
)
10027 i
.seg
[i
.mem_operands
] = &es
;
10030 i
.seg
[i
.mem_operands
] = &cs
;
10033 i
.seg
[i
.mem_operands
] = &ss
;
10036 i
.seg
[i
.mem_operands
] = &ds
;
10039 i
.seg
[i
.mem_operands
] = &fs
;
10042 i
.seg
[i
.mem_operands
] = &gs
;
10046 /* Skip the ':' and whitespace. */
10048 if (is_space_char (*op_string
))
10051 if (!is_digit_char (*op_string
)
10052 && !is_identifier_char (*op_string
)
10053 && *op_string
!= '('
10054 && *op_string
!= ABSOLUTE_PREFIX
)
10056 as_bad (_("bad memory operand `%s'"), op_string
);
10059 /* Handle case of %es:*foo. */
10060 if (*op_string
== ABSOLUTE_PREFIX
)
10063 if (is_space_char (*op_string
))
10065 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
10067 goto do_memory_reference
;
10070 /* Handle vector operations. */
10071 if (*op_string
== '{')
10073 op_string
= check_VecOperations (op_string
, NULL
);
10074 if (op_string
== NULL
)
10080 as_bad (_("junk `%s' after register"), op_string
);
10083 temp
= r
->reg_type
;
10084 temp
.bitfield
.baseindex
= 0;
10085 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10087 i
.types
[this_operand
].bitfield
.unspecified
= 0;
10088 i
.op
[this_operand
].regs
= r
;
10091 else if (*op_string
== REGISTER_PREFIX
)
10093 as_bad (_("bad register name `%s'"), op_string
);
10096 else if (*op_string
== IMMEDIATE_PREFIX
)
10099 if (i
.types
[this_operand
].bitfield
.jumpabsolute
)
10101 as_bad (_("immediate operand illegal with absolute jump"));
10104 if (!i386_immediate (op_string
))
10107 else if (RC_SAE_immediate (operand_string
))
10109 /* If it is a RC or SAE immediate, do nothing. */
10112 else if (is_digit_char (*op_string
)
10113 || is_identifier_char (*op_string
)
10114 || *op_string
== '"'
10115 || *op_string
== '(')
10117 /* This is a memory reference of some sort. */
10120 /* Start and end of displacement string expression (if found). */
10121 char *displacement_string_start
;
10122 char *displacement_string_end
;
10125 do_memory_reference
:
10126 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
10128 if ((i
.mem_operands
== 1
10129 && !current_templates
->start
->opcode_modifier
.isstring
)
10130 || i
.mem_operands
== 2)
10132 as_bad (_("too many memory references for `%s'"),
10133 current_templates
->start
->name
);
10137 /* Check for base index form. We detect the base index form by
10138 looking for an ')' at the end of the operand, searching
10139 for the '(' matching it, and finding a REGISTER_PREFIX or ','
10141 base_string
= op_string
+ strlen (op_string
);
10143 /* Handle vector operations. */
10144 vop_start
= strchr (op_string
, '{');
10145 if (vop_start
&& vop_start
< base_string
)
10147 if (check_VecOperations (vop_start
, base_string
) == NULL
)
10149 base_string
= vop_start
;
10153 if (is_space_char (*base_string
))
10156 /* If we only have a displacement, set-up for it to be parsed later. */
10157 displacement_string_start
= op_string
;
10158 displacement_string_end
= base_string
+ 1;
10160 if (*base_string
== ')')
10163 unsigned int parens_balanced
= 1;
10164 /* We've already checked that the number of left & right ()'s are
10165 equal, so this loop will not be infinite. */
10169 if (*base_string
== ')')
10171 if (*base_string
== '(')
10174 while (parens_balanced
);
10176 temp_string
= base_string
;
10178 /* Skip past '(' and whitespace. */
10180 if (is_space_char (*base_string
))
10183 if (*base_string
== ','
10184 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
10187 displacement_string_end
= temp_string
;
10189 i
.types
[this_operand
].bitfield
.baseindex
= 1;
10193 base_string
= end_op
;
10194 if (is_space_char (*base_string
))
10198 /* There may be an index reg or scale factor here. */
10199 if (*base_string
== ',')
10202 if (is_space_char (*base_string
))
10205 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
10208 base_string
= end_op
;
10209 if (is_space_char (*base_string
))
10211 if (*base_string
== ',')
10214 if (is_space_char (*base_string
))
10217 else if (*base_string
!= ')')
10219 as_bad (_("expecting `,' or `)' "
10220 "after index register in `%s'"),
10225 else if (*base_string
== REGISTER_PREFIX
)
10227 end_op
= strchr (base_string
, ',');
10230 as_bad (_("bad register name `%s'"), base_string
);
10234 /* Check for scale factor. */
10235 if (*base_string
!= ')')
10237 char *end_scale
= i386_scale (base_string
);
10242 base_string
= end_scale
;
10243 if (is_space_char (*base_string
))
10245 if (*base_string
!= ')')
10247 as_bad (_("expecting `)' "
10248 "after scale factor in `%s'"),
10253 else if (!i
.index_reg
)
10255 as_bad (_("expecting index register or scale factor "
10256 "after `,'; got '%c'"),
10261 else if (*base_string
!= ')')
10263 as_bad (_("expecting `,' or `)' "
10264 "after base register in `%s'"),
10269 else if (*base_string
== REGISTER_PREFIX
)
10271 end_op
= strchr (base_string
, ',');
10274 as_bad (_("bad register name `%s'"), base_string
);
10279 /* If there's an expression beginning the operand, parse it,
10280 assuming displacement_string_start and
10281 displacement_string_end are meaningful. */
10282 if (displacement_string_start
!= displacement_string_end
)
10284 if (!i386_displacement (displacement_string_start
,
10285 displacement_string_end
))
10289 /* Special case for (%dx) while doing input/output op. */
10291 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
10292 && i
.base_reg
->reg_type
.bitfield
.word
10293 && i
.index_reg
== 0
10294 && i
.log2_scale_factor
== 0
10295 && i
.seg
[i
.mem_operands
] == 0
10296 && !operand_type_check (i
.types
[this_operand
], disp
))
10298 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
10302 if (i386_index_check (operand_string
) == 0)
10304 i
.flags
[this_operand
] |= Operand_Mem
;
10305 if (i
.mem_operands
== 0)
10306 i
.memop1_string
= xstrdup (operand_string
);
10311 /* It's not a memory operand; argh! */
10312 as_bad (_("invalid char %s beginning operand %d `%s'"),
10313 output_invalid (*op_string
),
10318 return 1; /* Normal return. */
10321 /* Calculate the maximum variable size (i.e., excluding fr_fix)
10322 that an rs_machine_dependent frag may reach. */
10325 i386_frag_max_var (fragS
*frag
)
10327 /* The only relaxable frags are for jumps.
10328 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
10329 gas_assert (frag
->fr_type
== rs_machine_dependent
);
10330 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
10333 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10335 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
10337 /* STT_GNU_IFUNC symbol must go through PLT. */
10338 if ((symbol_get_bfdsym (fr_symbol
)->flags
10339 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
10342 if (!S_IS_EXTERNAL (fr_symbol
))
10343 /* Symbol may be weak or local. */
10344 return !S_IS_WEAK (fr_symbol
);
10346 /* Global symbols with non-default visibility can't be preempted. */
10347 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
10350 if (fr_var
!= NO_RELOC
)
10351 switch ((enum bfd_reloc_code_real
) fr_var
)
10353 case BFD_RELOC_386_PLT32
:
10354 case BFD_RELOC_X86_64_PLT32
:
10355 /* Symbol with PLT relocation may be preempted. */
10361 /* Global symbols with default visibility in a shared library may be
10362 preempted by another definition. */
10367 /* md_estimate_size_before_relax()
10369 Called just before relax() for rs_machine_dependent frags. The x86
10370 assembler uses these frags to handle variable size jump
10373 Any symbol that is now undefined will not become defined.
10374 Return the correct fr_subtype in the frag.
10375 Return the initial "guess for variable size of frag" to caller.
10376 The guess is actually the growth beyond the fixed part. Whatever
10377 we do to grow the fixed or variable part contributes to our
10381 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
10383 /* We've already got fragP->fr_subtype right; all we have to do is
10384 check for un-relaxable symbols. On an ELF system, we can't relax
10385 an externally visible symbol, because it may be overridden by a
10387 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
10388 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10390 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
10393 #if defined (OBJ_COFF) && defined (TE_PE)
10394 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
10395 && S_IS_WEAK (fragP
->fr_symbol
))
10399 /* Symbol is undefined in this segment, or we need to keep a
10400 reloc so that weak symbols can be overridden. */
10401 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
10402 enum bfd_reloc_code_real reloc_type
;
10403 unsigned char *opcode
;
10406 if (fragP
->fr_var
!= NO_RELOC
)
10407 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
10408 else if (size
== 2)
10409 reloc_type
= BFD_RELOC_16_PCREL
;
10410 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10411 else if (need_plt32_p (fragP
->fr_symbol
))
10412 reloc_type
= BFD_RELOC_X86_64_PLT32
;
10415 reloc_type
= BFD_RELOC_32_PCREL
;
10417 old_fr_fix
= fragP
->fr_fix
;
10418 opcode
= (unsigned char *) fragP
->fr_opcode
;
10420 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
10423 /* Make jmp (0xeb) a (d)word displacement jump. */
10425 fragP
->fr_fix
+= size
;
10426 fix_new (fragP
, old_fr_fix
, size
,
10428 fragP
->fr_offset
, 1,
10434 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
10436 /* Negate the condition, and branch past an
10437 unconditional jump. */
10440 /* Insert an unconditional jump. */
10442 /* We added two extra opcode bytes, and have a two byte
10444 fragP
->fr_fix
+= 2 + 2;
10445 fix_new (fragP
, old_fr_fix
+ 2, 2,
10447 fragP
->fr_offset
, 1,
10451 /* Fall through. */
10454 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
10458 fragP
->fr_fix
+= 1;
10459 fixP
= fix_new (fragP
, old_fr_fix
, 1,
10461 fragP
->fr_offset
, 1,
10462 BFD_RELOC_8_PCREL
);
10463 fixP
->fx_signed
= 1;
10467 /* This changes the byte-displacement jump 0x7N
10468 to the (d)word-displacement jump 0x0f,0x8N. */
10469 opcode
[1] = opcode
[0] + 0x10;
10470 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
10471 /* We've added an opcode byte. */
10472 fragP
->fr_fix
+= 1 + size
;
10473 fix_new (fragP
, old_fr_fix
+ 1, size
,
10475 fragP
->fr_offset
, 1,
10480 BAD_CASE (fragP
->fr_subtype
);
10484 return fragP
->fr_fix
- old_fr_fix
;
10487 /* Guess size depending on current relax state. Initially the relax
10488 state will correspond to a short jump and we return 1, because
10489 the variable part of the frag (the branch offset) is one byte
10490 long. However, we can relax a section more than once and in that
10491 case we must either set fr_subtype back to the unrelaxed state,
10492 or return the value for the appropriate branch. */
10493 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
10496 /* Called after relax() is finished.
10498 In: Address of frag.
10499 fr_type == rs_machine_dependent.
10500 fr_subtype is what the address relaxed to.
10502 Out: Any fixSs and constants are set up.
10503 Caller will turn frag into a ".space 0". */
10506 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
10509 unsigned char *opcode
;
10510 unsigned char *where_to_put_displacement
= NULL
;
10511 offsetT target_address
;
10512 offsetT opcode_address
;
10513 unsigned int extension
= 0;
10514 offsetT displacement_from_opcode_start
;
10516 opcode
= (unsigned char *) fragP
->fr_opcode
;
10518 /* Address we want to reach in file space. */
10519 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
10521 /* Address opcode resides at in file space. */
10522 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
10524 /* Displacement from opcode start to fill into instruction. */
10525 displacement_from_opcode_start
= target_address
- opcode_address
;
10527 if ((fragP
->fr_subtype
& BIG
) == 0)
10529 /* Don't have to change opcode. */
10530 extension
= 1; /* 1 opcode + 1 displacement */
10531 where_to_put_displacement
= &opcode
[1];
10535 if (no_cond_jump_promotion
10536 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
10537 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
10538 _("long jump required"));
10540 switch (fragP
->fr_subtype
)
10542 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
10543 extension
= 4; /* 1 opcode + 4 displacement */
10545 where_to_put_displacement
= &opcode
[1];
10548 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
10549 extension
= 2; /* 1 opcode + 2 displacement */
10551 where_to_put_displacement
= &opcode
[1];
10554 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
10555 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
10556 extension
= 5; /* 2 opcode + 4 displacement */
10557 opcode
[1] = opcode
[0] + 0x10;
10558 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
10559 where_to_put_displacement
= &opcode
[2];
10562 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
10563 extension
= 3; /* 2 opcode + 2 displacement */
10564 opcode
[1] = opcode
[0] + 0x10;
10565 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
10566 where_to_put_displacement
= &opcode
[2];
10569 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
10574 where_to_put_displacement
= &opcode
[3];
10578 BAD_CASE (fragP
->fr_subtype
);
10583 /* If size if less then four we are sure that the operand fits,
10584 but if it's 4, then it could be that the displacement is larger
10586 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
10588 && ((addressT
) (displacement_from_opcode_start
- extension
10589 + ((addressT
) 1 << 31))
10590 > (((addressT
) 2 << 31) - 1)))
10592 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
10593 _("jump target out of range"));
10594 /* Make us emit 0. */
10595 displacement_from_opcode_start
= extension
;
10597 /* Now put displacement after opcode. */
10598 md_number_to_chars ((char *) where_to_put_displacement
,
10599 (valueT
) (displacement_from_opcode_start
- extension
),
10600 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
10601 fragP
->fr_fix
+= extension
;
10604 /* Apply a fixup (fixP) to segment data, once it has been determined
10605 by our caller that we have all the info we need to fix it up.
10607 Parameter valP is the pointer to the value of the bits.
10609 On the 386, immediates, displacements, and data pointers are all in
10610 the same (little-endian) format, so we don't need to care about which
10611 we are handling. */
10614 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
10616 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
10617 valueT value
= *valP
;
10619 #if !defined (TE_Mach)
10620 if (fixP
->fx_pcrel
)
10622 switch (fixP
->fx_r_type
)
10628 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
10631 case BFD_RELOC_X86_64_32S
:
10632 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
10635 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
10638 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
10643 if (fixP
->fx_addsy
!= NULL
10644 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
10645 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
10646 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
10647 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
10648 && !use_rela_relocations
)
10650 /* This is a hack. There should be a better way to handle this.
10651 This covers for the fact that bfd_install_relocation will
10652 subtract the current location (for partial_inplace, PC relative
10653 relocations); see more below. */
10657 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
10660 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
10662 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10665 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
10667 if ((sym_seg
== seg
10668 || (symbol_section_p (fixP
->fx_addsy
)
10669 && sym_seg
!= absolute_section
))
10670 && !generic_force_reloc (fixP
))
10672 /* Yes, we add the values in twice. This is because
10673 bfd_install_relocation subtracts them out again. I think
10674 bfd_install_relocation is broken, but I don't dare change
10676 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
10680 #if defined (OBJ_COFF) && defined (TE_PE)
10681 /* For some reason, the PE format does not store a
10682 section address offset for a PC relative symbol. */
10683 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
10684 || S_IS_WEAK (fixP
->fx_addsy
))
10685 value
+= md_pcrel_from (fixP
);
10688 #if defined (OBJ_COFF) && defined (TE_PE)
10689 if (fixP
->fx_addsy
!= NULL
10690 && S_IS_WEAK (fixP
->fx_addsy
)
10691 /* PR 16858: Do not modify weak function references. */
10692 && ! fixP
->fx_pcrel
)
10694 #if !defined (TE_PEP)
10695 /* For x86 PE weak function symbols are neither PC-relative
10696 nor do they set S_IS_FUNCTION. So the only reliable way
10697 to detect them is to check the flags of their containing
10699 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
10700 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
10704 value
-= S_GET_VALUE (fixP
->fx_addsy
);
10708 /* Fix a few things - the dynamic linker expects certain values here,
10709 and we must not disappoint it. */
10710 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10711 if (IS_ELF
&& fixP
->fx_addsy
)
10712 switch (fixP
->fx_r_type
)
10714 case BFD_RELOC_386_PLT32
:
10715 case BFD_RELOC_X86_64_PLT32
:
10716 /* Make the jump instruction point to the address of the operand.
10717 At runtime we merely add the offset to the actual PLT entry.
10718 NB: Subtract the offset size only for jump instructions. */
10719 if (fixP
->fx_pcrel
)
10723 case BFD_RELOC_386_TLS_GD
:
10724 case BFD_RELOC_386_TLS_LDM
:
10725 case BFD_RELOC_386_TLS_IE_32
:
10726 case BFD_RELOC_386_TLS_IE
:
10727 case BFD_RELOC_386_TLS_GOTIE
:
10728 case BFD_RELOC_386_TLS_GOTDESC
:
10729 case BFD_RELOC_X86_64_TLSGD
:
10730 case BFD_RELOC_X86_64_TLSLD
:
10731 case BFD_RELOC_X86_64_GOTTPOFF
:
10732 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10733 value
= 0; /* Fully resolved at runtime. No addend. */
10735 case BFD_RELOC_386_TLS_LE
:
10736 case BFD_RELOC_386_TLS_LDO_32
:
10737 case BFD_RELOC_386_TLS_LE_32
:
10738 case BFD_RELOC_X86_64_DTPOFF32
:
10739 case BFD_RELOC_X86_64_DTPOFF64
:
10740 case BFD_RELOC_X86_64_TPOFF32
:
10741 case BFD_RELOC_X86_64_TPOFF64
:
10742 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
10745 case BFD_RELOC_386_TLS_DESC_CALL
:
10746 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10747 value
= 0; /* Fully resolved at runtime. No addend. */
10748 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
10752 case BFD_RELOC_VTABLE_INHERIT
:
10753 case BFD_RELOC_VTABLE_ENTRY
:
10760 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
10762 #endif /* !defined (TE_Mach) */
10764 /* Are we finished with this relocation now? */
10765 if (fixP
->fx_addsy
== NULL
)
10767 #if defined (OBJ_COFF) && defined (TE_PE)
10768 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
10771 /* Remember value for tc_gen_reloc. */
10772 fixP
->fx_addnumber
= value
;
10773 /* Clear out the frag for now. */
10777 else if (use_rela_relocations
)
10779 fixP
->fx_no_overflow
= 1;
10780 /* Remember value for tc_gen_reloc. */
10781 fixP
->fx_addnumber
= value
;
10785 md_number_to_chars (p
, value
, fixP
->fx_size
);
10789 md_atof (int type
, char *litP
, int *sizeP
)
10791 /* This outputs the LITTLENUMs in REVERSE order;
10792 in accord with the bigendian 386. */
10793 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
10796 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
10799 output_invalid (int c
)
10802 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
10805 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
10806 "(0x%x)", (unsigned char) c
);
10807 return output_invalid_buf
;
10810 /* REG_STRING starts *before* REGISTER_PREFIX. */
10812 static const reg_entry
*
10813 parse_real_register (char *reg_string
, char **end_op
)
10815 char *s
= reg_string
;
10817 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
10818 const reg_entry
*r
;
10820 /* Skip possible REGISTER_PREFIX and possible whitespace. */
10821 if (*s
== REGISTER_PREFIX
)
10824 if (is_space_char (*s
))
10827 p
= reg_name_given
;
10828 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
10830 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
10831 return (const reg_entry
*) NULL
;
10835 /* For naked regs, make sure that we are not dealing with an identifier.
10836 This prevents confusing an identifier like `eax_var' with register
10838 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
10839 return (const reg_entry
*) NULL
;
10843 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
10845 /* Handle floating point regs, allowing spaces in the (i) part. */
10846 if (r
== i386_regtab
/* %st is first entry of table */)
10848 if (!cpu_arch_flags
.bitfield
.cpu8087
10849 && !cpu_arch_flags
.bitfield
.cpu287
10850 && !cpu_arch_flags
.bitfield
.cpu387
)
10851 return (const reg_entry
*) NULL
;
10853 if (is_space_char (*s
))
10858 if (is_space_char (*s
))
10860 if (*s
>= '0' && *s
<= '7')
10862 int fpr
= *s
- '0';
10864 if (is_space_char (*s
))
10869 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
10874 /* We have "%st(" then garbage. */
10875 return (const reg_entry
*) NULL
;
10879 if (r
== NULL
|| allow_pseudo_reg
)
10882 if (operand_type_all_zero (&r
->reg_type
))
10883 return (const reg_entry
*) NULL
;
10885 if ((r
->reg_type
.bitfield
.dword
10886 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
10887 || r
->reg_type
.bitfield
.class == RegCR
10888 || r
->reg_type
.bitfield
.class == RegDR
10889 || r
->reg_type
.bitfield
.class == RegTR
)
10890 && !cpu_arch_flags
.bitfield
.cpui386
)
10891 return (const reg_entry
*) NULL
;
10893 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
10894 return (const reg_entry
*) NULL
;
10896 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
10898 if (r
->reg_type
.bitfield
.zmmword
10899 || r
->reg_type
.bitfield
.class == RegMask
)
10900 return (const reg_entry
*) NULL
;
10902 if (!cpu_arch_flags
.bitfield
.cpuavx
)
10904 if (r
->reg_type
.bitfield
.ymmword
)
10905 return (const reg_entry
*) NULL
;
10907 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
10908 return (const reg_entry
*) NULL
;
10912 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
10913 return (const reg_entry
*) NULL
;
10915 /* Don't allow fake index register unless allow_index_reg isn't 0. */
10916 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
10917 return (const reg_entry
*) NULL
;
10919 /* Upper 16 vector registers are only available with VREX in 64bit
10920 mode, and require EVEX encoding. */
10921 if (r
->reg_flags
& RegVRex
)
10923 if (!cpu_arch_flags
.bitfield
.cpuavx512f
10924 || flag_code
!= CODE_64BIT
)
10925 return (const reg_entry
*) NULL
;
10927 i
.vec_encoding
= vex_encoding_evex
;
10930 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
10931 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
10932 && flag_code
!= CODE_64BIT
)
10933 return (const reg_entry
*) NULL
;
10935 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
10937 return (const reg_entry
*) NULL
;
10942 /* REG_STRING starts *before* REGISTER_PREFIX. */
10944 static const reg_entry
*
10945 parse_register (char *reg_string
, char **end_op
)
10947 const reg_entry
*r
;
10949 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
10950 r
= parse_real_register (reg_string
, end_op
);
10955 char *save
= input_line_pointer
;
10959 input_line_pointer
= reg_string
;
10960 c
= get_symbol_name (®_string
);
10961 symbolP
= symbol_find (reg_string
);
10962 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
10964 const expressionS
*e
= symbol_get_value_expression (symbolP
);
10966 know (e
->X_op
== O_register
);
10967 know (e
->X_add_number
>= 0
10968 && (valueT
) e
->X_add_number
< i386_regtab_size
);
10969 r
= i386_regtab
+ e
->X_add_number
;
10970 if ((r
->reg_flags
& RegVRex
))
10971 i
.vec_encoding
= vex_encoding_evex
;
10972 *end_op
= input_line_pointer
;
10974 *input_line_pointer
= c
;
10975 input_line_pointer
= save
;
10981 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
10983 const reg_entry
*r
;
10984 char *end
= input_line_pointer
;
10987 r
= parse_register (name
, &input_line_pointer
);
10988 if (r
&& end
<= input_line_pointer
)
10990 *nextcharP
= *input_line_pointer
;
10991 *input_line_pointer
= 0;
10992 e
->X_op
= O_register
;
10993 e
->X_add_number
= r
- i386_regtab
;
10996 input_line_pointer
= end
;
10998 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
11002 md_operand (expressionS
*e
)
11005 const reg_entry
*r
;
11007 switch (*input_line_pointer
)
11009 case REGISTER_PREFIX
:
11010 r
= parse_real_register (input_line_pointer
, &end
);
11013 e
->X_op
= O_register
;
11014 e
->X_add_number
= r
- i386_regtab
;
11015 input_line_pointer
= end
;
11020 gas_assert (intel_syntax
);
11021 end
= input_line_pointer
++;
11023 if (*input_line_pointer
== ']')
11025 ++input_line_pointer
;
11026 e
->X_op_symbol
= make_expr_symbol (e
);
11027 e
->X_add_symbol
= NULL
;
11028 e
->X_add_number
= 0;
11033 e
->X_op
= O_absent
;
11034 input_line_pointer
= end
;
11041 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11042 const char *md_shortopts
= "kVQ:sqnO::";
11044 const char *md_shortopts
= "qnO::";
11047 #define OPTION_32 (OPTION_MD_BASE + 0)
11048 #define OPTION_64 (OPTION_MD_BASE + 1)
11049 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
11050 #define OPTION_MARCH (OPTION_MD_BASE + 3)
11051 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
11052 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
11053 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
11054 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
11055 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
11056 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
11057 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
11058 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
11059 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
11060 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
11061 #define OPTION_X32 (OPTION_MD_BASE + 14)
11062 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
11063 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
11064 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
11065 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
11066 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
11067 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
11068 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
11069 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
11070 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
11071 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
11072 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
11073 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
11075 struct option md_longopts
[] =
11077 {"32", no_argument
, NULL
, OPTION_32
},
11078 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11079 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
11080 {"64", no_argument
, NULL
, OPTION_64
},
11082 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11083 {"x32", no_argument
, NULL
, OPTION_X32
},
11084 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
11085 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
11087 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
11088 {"march", required_argument
, NULL
, OPTION_MARCH
},
11089 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
11090 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
11091 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
11092 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
11093 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
11094 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
11095 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
11096 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
11097 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
11098 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
11099 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
11100 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
11101 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
11102 # if defined (TE_PE) || defined (TE_PEP)
11103 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
11105 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
11106 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
11107 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
11108 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
11109 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
11110 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
11111 {NULL
, no_argument
, NULL
, 0}
11113 size_t md_longopts_size
= sizeof (md_longopts
);
11116 md_parse_option (int c
, const char *arg
)
11119 char *arch
, *next
, *saved
;
11124 optimize_align_code
= 0;
11128 quiet_warnings
= 1;
11131 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11132 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
11133 should be emitted or not. FIXME: Not implemented. */
11135 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
11139 /* -V: SVR4 argument to print version ID. */
11141 print_version_id ();
11144 /* -k: Ignore for FreeBSD compatibility. */
11149 /* -s: On i386 Solaris, this tells the native assembler to use
11150 .stab instead of .stab.excl. We always use .stab anyhow. */
11153 case OPTION_MSHARED
:
11157 case OPTION_X86_USED_NOTE
:
11158 if (strcasecmp (arg
, "yes") == 0)
11160 else if (strcasecmp (arg
, "no") == 0)
11163 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
11168 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11169 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
11172 const char **list
, **l
;
11174 list
= bfd_target_list ();
11175 for (l
= list
; *l
!= NULL
; l
++)
11176 if (CONST_STRNEQ (*l
, "elf64-x86-64")
11177 || strcmp (*l
, "coff-x86-64") == 0
11178 || strcmp (*l
, "pe-x86-64") == 0
11179 || strcmp (*l
, "pei-x86-64") == 0
11180 || strcmp (*l
, "mach-o-x86-64") == 0)
11182 default_arch
= "x86_64";
11186 as_fatal (_("no compiled in support for x86_64"));
11192 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11196 const char **list
, **l
;
11198 list
= bfd_target_list ();
11199 for (l
= list
; *l
!= NULL
; l
++)
11200 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
11202 default_arch
= "x86_64:32";
11206 as_fatal (_("no compiled in support for 32bit x86_64"));
11210 as_fatal (_("32bit x86_64 is only supported for ELF"));
11215 default_arch
= "i386";
11218 case OPTION_DIVIDE
:
11219 #ifdef SVR4_COMMENT_CHARS
11224 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
11226 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
11230 i386_comment_chars
= n
;
11236 saved
= xstrdup (arg
);
11238 /* Allow -march=+nosse. */
11244 as_fatal (_("invalid -march= option: `%s'"), arg
);
11245 next
= strchr (arch
, '+');
11248 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
11250 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
11253 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
11256 cpu_arch_name
= cpu_arch
[j
].name
;
11257 cpu_sub_arch_name
= NULL
;
11258 cpu_arch_flags
= cpu_arch
[j
].flags
;
11259 cpu_arch_isa
= cpu_arch
[j
].type
;
11260 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
11261 if (!cpu_arch_tune_set
)
11263 cpu_arch_tune
= cpu_arch_isa
;
11264 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
11268 else if (*cpu_arch
[j
].name
== '.'
11269 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
11271 /* ISA extension. */
11272 i386_cpu_flags flags
;
11274 flags
= cpu_flags_or (cpu_arch_flags
,
11275 cpu_arch
[j
].flags
);
11277 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
11279 if (cpu_sub_arch_name
)
11281 char *name
= cpu_sub_arch_name
;
11282 cpu_sub_arch_name
= concat (name
,
11284 (const char *) NULL
);
11288 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
11289 cpu_arch_flags
= flags
;
11290 cpu_arch_isa_flags
= flags
;
11294 = cpu_flags_or (cpu_arch_isa_flags
,
11295 cpu_arch
[j
].flags
);
11300 if (j
>= ARRAY_SIZE (cpu_arch
))
11302 /* Disable an ISA extension. */
11303 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
11304 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
11306 i386_cpu_flags flags
;
11308 flags
= cpu_flags_and_not (cpu_arch_flags
,
11309 cpu_noarch
[j
].flags
);
11310 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
11312 if (cpu_sub_arch_name
)
11314 char *name
= cpu_sub_arch_name
;
11315 cpu_sub_arch_name
= concat (arch
,
11316 (const char *) NULL
);
11320 cpu_sub_arch_name
= xstrdup (arch
);
11321 cpu_arch_flags
= flags
;
11322 cpu_arch_isa_flags
= flags
;
11327 if (j
>= ARRAY_SIZE (cpu_noarch
))
11328 j
= ARRAY_SIZE (cpu_arch
);
11331 if (j
>= ARRAY_SIZE (cpu_arch
))
11332 as_fatal (_("invalid -march= option: `%s'"), arg
);
11336 while (next
!= NULL
);
11342 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
11343 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
11345 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
11347 cpu_arch_tune_set
= 1;
11348 cpu_arch_tune
= cpu_arch
[j
].type
;
11349 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
11353 if (j
>= ARRAY_SIZE (cpu_arch
))
11354 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
11357 case OPTION_MMNEMONIC
:
11358 if (strcasecmp (arg
, "att") == 0)
11359 intel_mnemonic
= 0;
11360 else if (strcasecmp (arg
, "intel") == 0)
11361 intel_mnemonic
= 1;
11363 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
11366 case OPTION_MSYNTAX
:
11367 if (strcasecmp (arg
, "att") == 0)
11369 else if (strcasecmp (arg
, "intel") == 0)
11372 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
11375 case OPTION_MINDEX_REG
:
11376 allow_index_reg
= 1;
11379 case OPTION_MNAKED_REG
:
11380 allow_naked_reg
= 1;
11383 case OPTION_MSSE2AVX
:
11387 case OPTION_MSSE_CHECK
:
11388 if (strcasecmp (arg
, "error") == 0)
11389 sse_check
= check_error
;
11390 else if (strcasecmp (arg
, "warning") == 0)
11391 sse_check
= check_warning
;
11392 else if (strcasecmp (arg
, "none") == 0)
11393 sse_check
= check_none
;
11395 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
11398 case OPTION_MOPERAND_CHECK
:
11399 if (strcasecmp (arg
, "error") == 0)
11400 operand_check
= check_error
;
11401 else if (strcasecmp (arg
, "warning") == 0)
11402 operand_check
= check_warning
;
11403 else if (strcasecmp (arg
, "none") == 0)
11404 operand_check
= check_none
;
11406 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
11409 case OPTION_MAVXSCALAR
:
11410 if (strcasecmp (arg
, "128") == 0)
11411 avxscalar
= vex128
;
11412 else if (strcasecmp (arg
, "256") == 0)
11413 avxscalar
= vex256
;
11415 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
11418 case OPTION_MVEXWIG
:
11419 if (strcmp (arg
, "0") == 0)
11421 else if (strcmp (arg
, "1") == 0)
11424 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
11427 case OPTION_MADD_BND_PREFIX
:
11428 add_bnd_prefix
= 1;
11431 case OPTION_MEVEXLIG
:
11432 if (strcmp (arg
, "128") == 0)
11433 evexlig
= evexl128
;
11434 else if (strcmp (arg
, "256") == 0)
11435 evexlig
= evexl256
;
11436 else if (strcmp (arg
, "512") == 0)
11437 evexlig
= evexl512
;
11439 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
11442 case OPTION_MEVEXRCIG
:
11443 if (strcmp (arg
, "rne") == 0)
11445 else if (strcmp (arg
, "rd") == 0)
11447 else if (strcmp (arg
, "ru") == 0)
11449 else if (strcmp (arg
, "rz") == 0)
11452 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
11455 case OPTION_MEVEXWIG
:
11456 if (strcmp (arg
, "0") == 0)
11458 else if (strcmp (arg
, "1") == 0)
11461 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
11464 # if defined (TE_PE) || defined (TE_PEP)
11465 case OPTION_MBIG_OBJ
:
11470 case OPTION_MOMIT_LOCK_PREFIX
:
11471 if (strcasecmp (arg
, "yes") == 0)
11472 omit_lock_prefix
= 1;
11473 else if (strcasecmp (arg
, "no") == 0)
11474 omit_lock_prefix
= 0;
11476 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
11479 case OPTION_MFENCE_AS_LOCK_ADD
:
11480 if (strcasecmp (arg
, "yes") == 0)
11482 else if (strcasecmp (arg
, "no") == 0)
11485 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
11488 case OPTION_MRELAX_RELOCATIONS
:
11489 if (strcasecmp (arg
, "yes") == 0)
11490 generate_relax_relocations
= 1;
11491 else if (strcasecmp (arg
, "no") == 0)
11492 generate_relax_relocations
= 0;
11494 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
11497 case OPTION_MAMD64
:
11501 case OPTION_MINTEL64
:
11509 /* Turn off -Os. */
11510 optimize_for_space
= 0;
11512 else if (*arg
== 's')
11514 optimize_for_space
= 1;
11515 /* Turn on all encoding optimizations. */
11516 optimize
= INT_MAX
;
11520 optimize
= atoi (arg
);
11521 /* Turn off -Os. */
11522 optimize_for_space
= 0;
11532 #define MESSAGE_TEMPLATE \
11536 output_message (FILE *stream
, char *p
, char *message
, char *start
,
11537 int *left_p
, const char *name
, int len
)
11539 int size
= sizeof (MESSAGE_TEMPLATE
);
11540 int left
= *left_p
;
11542 /* Reserve 2 spaces for ", " or ",\0" */
11545 /* Check if there is any room. */
11553 p
= mempcpy (p
, name
, len
);
11557 /* Output the current message now and start a new one. */
11560 fprintf (stream
, "%s\n", message
);
11562 left
= size
- (start
- message
) - len
- 2;
11564 gas_assert (left
>= 0);
11566 p
= mempcpy (p
, name
, len
);
11574 show_arch (FILE *stream
, int ext
, int check
)
11576 static char message
[] = MESSAGE_TEMPLATE
;
11577 char *start
= message
+ 27;
11579 int size
= sizeof (MESSAGE_TEMPLATE
);
11586 left
= size
- (start
- message
);
11587 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
11589 /* Should it be skipped? */
11590 if (cpu_arch
[j
].skip
)
11593 name
= cpu_arch
[j
].name
;
11594 len
= cpu_arch
[j
].len
;
11597 /* It is an extension. Skip if we aren't asked to show it. */
11608 /* It is an processor. Skip if we show only extension. */
11611 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
11613 /* It is an impossible processor - skip. */
11617 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
11620 /* Display disabled extensions. */
11622 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
11624 name
= cpu_noarch
[j
].name
;
11625 len
= cpu_noarch
[j
].len
;
11626 p
= output_message (stream
, p
, message
, start
, &left
, name
,
11631 fprintf (stream
, "%s\n", message
);
11635 md_show_usage (FILE *stream
)
11637 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11638 fprintf (stream
, _("\
11639 -Qy, -Qn ignored\n\
11640 -V print assembler version number\n\
11643 fprintf (stream
, _("\
11644 -n Do not optimize code alignment\n\
11645 -q quieten some warnings\n"));
11646 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11647 fprintf (stream
, _("\
11650 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11651 || defined (TE_PE) || defined (TE_PEP))
11652 fprintf (stream
, _("\
11653 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
11655 #ifdef SVR4_COMMENT_CHARS
11656 fprintf (stream
, _("\
11657 --divide do not treat `/' as a comment character\n"));
11659 fprintf (stream
, _("\
11660 --divide ignored\n"));
11662 fprintf (stream
, _("\
11663 -march=CPU[,+EXTENSION...]\n\
11664 generate code for CPU and EXTENSION, CPU is one of:\n"));
11665 show_arch (stream
, 0, 1);
11666 fprintf (stream
, _("\
11667 EXTENSION is combination of:\n"));
11668 show_arch (stream
, 1, 0);
11669 fprintf (stream
, _("\
11670 -mtune=CPU optimize for CPU, CPU is one of:\n"));
11671 show_arch (stream
, 0, 0);
11672 fprintf (stream
, _("\
11673 -msse2avx encode SSE instructions with VEX prefix\n"));
11674 fprintf (stream
, _("\
11675 -msse-check=[none|error|warning] (default: warning)\n\
11676 check SSE instructions\n"));
11677 fprintf (stream
, _("\
11678 -moperand-check=[none|error|warning] (default: warning)\n\
11679 check operand combinations for validity\n"));
11680 fprintf (stream
, _("\
11681 -mavxscalar=[128|256] (default: 128)\n\
11682 encode scalar AVX instructions with specific vector\n\
11684 fprintf (stream
, _("\
11685 -mvexwig=[0|1] (default: 0)\n\
11686 encode VEX instructions with specific VEX.W value\n\
11687 for VEX.W bit ignored instructions\n"));
11688 fprintf (stream
, _("\
11689 -mevexlig=[128|256|512] (default: 128)\n\
11690 encode scalar EVEX instructions with specific vector\n\
11692 fprintf (stream
, _("\
11693 -mevexwig=[0|1] (default: 0)\n\
11694 encode EVEX instructions with specific EVEX.W value\n\
11695 for EVEX.W bit ignored instructions\n"));
11696 fprintf (stream
, _("\
11697 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
11698 encode EVEX instructions with specific EVEX.RC value\n\
11699 for SAE-only ignored instructions\n"));
11700 fprintf (stream
, _("\
11701 -mmnemonic=[att|intel] "));
11702 if (SYSV386_COMPAT
)
11703 fprintf (stream
, _("(default: att)\n"));
11705 fprintf (stream
, _("(default: intel)\n"));
11706 fprintf (stream
, _("\
11707 use AT&T/Intel mnemonic\n"));
11708 fprintf (stream
, _("\
11709 -msyntax=[att|intel] (default: att)\n\
11710 use AT&T/Intel syntax\n"));
11711 fprintf (stream
, _("\
11712 -mindex-reg support pseudo index registers\n"));
11713 fprintf (stream
, _("\
11714 -mnaked-reg don't require `%%' prefix for registers\n"));
11715 fprintf (stream
, _("\
11716 -madd-bnd-prefix add BND prefix for all valid branches\n"));
11717 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11718 fprintf (stream
, _("\
11719 -mshared disable branch optimization for shared code\n"));
11720 fprintf (stream
, _("\
11721 -mx86-used-note=[no|yes] "));
11722 if (DEFAULT_X86_USED_NOTE
)
11723 fprintf (stream
, _("(default: yes)\n"));
11725 fprintf (stream
, _("(default: no)\n"));
11726 fprintf (stream
, _("\
11727 generate x86 used ISA and feature properties\n"));
11729 #if defined (TE_PE) || defined (TE_PEP)
11730 fprintf (stream
, _("\
11731 -mbig-obj generate big object files\n"));
11733 fprintf (stream
, _("\
11734 -momit-lock-prefix=[no|yes] (default: no)\n\
11735 strip all lock prefixes\n"));
11736 fprintf (stream
, _("\
11737 -mfence-as-lock-add=[no|yes] (default: no)\n\
11738 encode lfence, mfence and sfence as\n\
11739 lock addl $0x0, (%%{re}sp)\n"));
11740 fprintf (stream
, _("\
11741 -mrelax-relocations=[no|yes] "));
11742 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
11743 fprintf (stream
, _("(default: yes)\n"));
11745 fprintf (stream
, _("(default: no)\n"));
11746 fprintf (stream
, _("\
11747 generate relax relocations\n"));
11748 fprintf (stream
, _("\
11749 -mamd64 accept only AMD64 ISA [default]\n"));
11750 fprintf (stream
, _("\
11751 -mintel64 accept only Intel64 ISA\n"));
11754 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
11755 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11756 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
11758 /* Pick the target format to use. */
11761 i386_target_format (void)
11763 if (!strncmp (default_arch
, "x86_64", 6))
11765 update_code_flag (CODE_64BIT
, 1);
11766 if (default_arch
[6] == '\0')
11767 x86_elf_abi
= X86_64_ABI
;
11769 x86_elf_abi
= X86_64_X32_ABI
;
11771 else if (!strcmp (default_arch
, "i386"))
11772 update_code_flag (CODE_32BIT
, 1);
11773 else if (!strcmp (default_arch
, "iamcu"))
11775 update_code_flag (CODE_32BIT
, 1);
11776 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
11778 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
11779 cpu_arch_name
= "iamcu";
11780 cpu_sub_arch_name
= NULL
;
11781 cpu_arch_flags
= iamcu_flags
;
11782 cpu_arch_isa
= PROCESSOR_IAMCU
;
11783 cpu_arch_isa_flags
= iamcu_flags
;
11784 if (!cpu_arch_tune_set
)
11786 cpu_arch_tune
= cpu_arch_isa
;
11787 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
11790 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
11791 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
11795 as_fatal (_("unknown architecture"));
11797 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
11798 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
11799 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
11800 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
11802 switch (OUTPUT_FLAVOR
)
11804 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
11805 case bfd_target_aout_flavour
:
11806 return AOUT_TARGET_FORMAT
;
11808 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
11809 # if defined (TE_PE) || defined (TE_PEP)
11810 case bfd_target_coff_flavour
:
11811 if (flag_code
== CODE_64BIT
)
11812 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
11815 # elif defined (TE_GO32)
11816 case bfd_target_coff_flavour
:
11817 return "coff-go32";
11819 case bfd_target_coff_flavour
:
11820 return "coff-i386";
11823 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
11824 case bfd_target_elf_flavour
:
11826 const char *format
;
11828 switch (x86_elf_abi
)
11831 format
= ELF_TARGET_FORMAT
;
11834 use_rela_relocations
= 1;
11836 format
= ELF_TARGET_FORMAT64
;
11838 case X86_64_X32_ABI
:
11839 use_rela_relocations
= 1;
11841 disallow_64bit_reloc
= 1;
11842 format
= ELF_TARGET_FORMAT32
;
11845 if (cpu_arch_isa
== PROCESSOR_L1OM
)
11847 if (x86_elf_abi
!= X86_64_ABI
)
11848 as_fatal (_("Intel L1OM is 64bit only"));
11849 return ELF_TARGET_L1OM_FORMAT
;
11851 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
11853 if (x86_elf_abi
!= X86_64_ABI
)
11854 as_fatal (_("Intel K1OM is 64bit only"));
11855 return ELF_TARGET_K1OM_FORMAT
;
11857 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
11859 if (x86_elf_abi
!= I386_ABI
)
11860 as_fatal (_("Intel MCU is 32bit only"));
11861 return ELF_TARGET_IAMCU_FORMAT
;
11867 #if defined (OBJ_MACH_O)
11868 case bfd_target_mach_o_flavour
:
11869 if (flag_code
== CODE_64BIT
)
11871 use_rela_relocations
= 1;
11873 return "mach-o-x86-64";
11876 return "mach-o-i386";
11884 #endif /* OBJ_MAYBE_ more than one */
11887 md_undefined_symbol (char *name
)
11889 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
11890 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
11891 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
11892 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
11896 if (symbol_find (name
))
11897 as_bad (_("GOT already in symbol table"));
11898 GOT_symbol
= symbol_new (name
, undefined_section
,
11899 (valueT
) 0, &zero_address_frag
);
11906 /* Round up a section size to the appropriate boundary. */
11909 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
11911 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
11912 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
11914 /* For a.out, force the section size to be aligned. If we don't do
11915 this, BFD will align it for us, but it will not write out the
11916 final bytes of the section. This may be a bug in BFD, but it is
11917 easier to fix it here since that is how the other a.out targets
11921 align
= bfd_section_alignment (segment
);
11922 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
11929 /* On the i386, PC-relative offsets are relative to the start of the
11930 next instruction. That is, the address of the offset, plus its
11931 size, since the offset is always the last part of the insn. */
11934 md_pcrel_from (fixS
*fixP
)
11936 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
11942 s_bss (int ignore ATTRIBUTE_UNUSED
)
11946 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11948 obj_elf_section_change_hook ();
11950 temp
= get_absolute_expression ();
11951 subseg_set (bss_section
, (subsegT
) temp
);
11952 demand_empty_rest_of_line ();
11958 i386_validate_fix (fixS
*fixp
)
11960 if (fixp
->fx_subsy
)
11962 if (fixp
->fx_subsy
== GOT_symbol
)
11964 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
11968 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11969 if (fixp
->fx_tcbit2
)
11970 fixp
->fx_r_type
= (fixp
->fx_tcbit
11971 ? BFD_RELOC_X86_64_REX_GOTPCRELX
11972 : BFD_RELOC_X86_64_GOTPCRELX
);
11975 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
11980 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
11982 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
11984 fixp
->fx_subsy
= 0;
11987 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11988 else if (!object_64bit
)
11990 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
11991 && fixp
->fx_tcbit2
)
11992 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
11998 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
12001 bfd_reloc_code_real_type code
;
12003 switch (fixp
->fx_r_type
)
12005 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12006 case BFD_RELOC_SIZE32
:
12007 case BFD_RELOC_SIZE64
:
12008 if (S_IS_DEFINED (fixp
->fx_addsy
)
12009 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
12011 /* Resolve size relocation against local symbol to size of
12012 the symbol plus addend. */
12013 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
12014 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
12015 && !fits_in_unsigned_long (value
))
12016 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12017 _("symbol size computation overflow"));
12018 fixp
->fx_addsy
= NULL
;
12019 fixp
->fx_subsy
= NULL
;
12020 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
12024 /* Fall through. */
12026 case BFD_RELOC_X86_64_PLT32
:
12027 case BFD_RELOC_X86_64_GOT32
:
12028 case BFD_RELOC_X86_64_GOTPCREL
:
12029 case BFD_RELOC_X86_64_GOTPCRELX
:
12030 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
12031 case BFD_RELOC_386_PLT32
:
12032 case BFD_RELOC_386_GOT32
:
12033 case BFD_RELOC_386_GOT32X
:
12034 case BFD_RELOC_386_GOTOFF
:
12035 case BFD_RELOC_386_GOTPC
:
12036 case BFD_RELOC_386_TLS_GD
:
12037 case BFD_RELOC_386_TLS_LDM
:
12038 case BFD_RELOC_386_TLS_LDO_32
:
12039 case BFD_RELOC_386_TLS_IE_32
:
12040 case BFD_RELOC_386_TLS_IE
:
12041 case BFD_RELOC_386_TLS_GOTIE
:
12042 case BFD_RELOC_386_TLS_LE_32
:
12043 case BFD_RELOC_386_TLS_LE
:
12044 case BFD_RELOC_386_TLS_GOTDESC
:
12045 case BFD_RELOC_386_TLS_DESC_CALL
:
12046 case BFD_RELOC_X86_64_TLSGD
:
12047 case BFD_RELOC_X86_64_TLSLD
:
12048 case BFD_RELOC_X86_64_DTPOFF32
:
12049 case BFD_RELOC_X86_64_DTPOFF64
:
12050 case BFD_RELOC_X86_64_GOTTPOFF
:
12051 case BFD_RELOC_X86_64_TPOFF32
:
12052 case BFD_RELOC_X86_64_TPOFF64
:
12053 case BFD_RELOC_X86_64_GOTOFF64
:
12054 case BFD_RELOC_X86_64_GOTPC32
:
12055 case BFD_RELOC_X86_64_GOT64
:
12056 case BFD_RELOC_X86_64_GOTPCREL64
:
12057 case BFD_RELOC_X86_64_GOTPC64
:
12058 case BFD_RELOC_X86_64_GOTPLT64
:
12059 case BFD_RELOC_X86_64_PLTOFF64
:
12060 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12061 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12062 case BFD_RELOC_RVA
:
12063 case BFD_RELOC_VTABLE_ENTRY
:
12064 case BFD_RELOC_VTABLE_INHERIT
:
12066 case BFD_RELOC_32_SECREL
:
12068 code
= fixp
->fx_r_type
;
12070 case BFD_RELOC_X86_64_32S
:
12071 if (!fixp
->fx_pcrel
)
12073 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
12074 code
= fixp
->fx_r_type
;
12077 /* Fall through. */
12079 if (fixp
->fx_pcrel
)
12081 switch (fixp
->fx_size
)
12084 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12085 _("can not do %d byte pc-relative relocation"),
12087 code
= BFD_RELOC_32_PCREL
;
12089 case 1: code
= BFD_RELOC_8_PCREL
; break;
12090 case 2: code
= BFD_RELOC_16_PCREL
; break;
12091 case 4: code
= BFD_RELOC_32_PCREL
; break;
12093 case 8: code
= BFD_RELOC_64_PCREL
; break;
12099 switch (fixp
->fx_size
)
12102 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12103 _("can not do %d byte relocation"),
12105 code
= BFD_RELOC_32
;
12107 case 1: code
= BFD_RELOC_8
; break;
12108 case 2: code
= BFD_RELOC_16
; break;
12109 case 4: code
= BFD_RELOC_32
; break;
12111 case 8: code
= BFD_RELOC_64
; break;
12118 if ((code
== BFD_RELOC_32
12119 || code
== BFD_RELOC_32_PCREL
12120 || code
== BFD_RELOC_X86_64_32S
)
12122 && fixp
->fx_addsy
== GOT_symbol
)
12125 code
= BFD_RELOC_386_GOTPC
;
12127 code
= BFD_RELOC_X86_64_GOTPC32
;
12129 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
12131 && fixp
->fx_addsy
== GOT_symbol
)
12133 code
= BFD_RELOC_X86_64_GOTPC64
;
12136 rel
= XNEW (arelent
);
12137 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
12138 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
12140 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
12142 if (!use_rela_relocations
)
12144 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
12145 vtable entry to be used in the relocation's section offset. */
12146 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
12147 rel
->address
= fixp
->fx_offset
;
12148 #if defined (OBJ_COFF) && defined (TE_PE)
12149 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
12150 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
12155 /* Use the rela in 64bit mode. */
12158 if (disallow_64bit_reloc
)
12161 case BFD_RELOC_X86_64_DTPOFF64
:
12162 case BFD_RELOC_X86_64_TPOFF64
:
12163 case BFD_RELOC_64_PCREL
:
12164 case BFD_RELOC_X86_64_GOTOFF64
:
12165 case BFD_RELOC_X86_64_GOT64
:
12166 case BFD_RELOC_X86_64_GOTPCREL64
:
12167 case BFD_RELOC_X86_64_GOTPC64
:
12168 case BFD_RELOC_X86_64_GOTPLT64
:
12169 case BFD_RELOC_X86_64_PLTOFF64
:
12170 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12171 _("cannot represent relocation type %s in x32 mode"),
12172 bfd_get_reloc_code_name (code
));
12178 if (!fixp
->fx_pcrel
)
12179 rel
->addend
= fixp
->fx_offset
;
12183 case BFD_RELOC_X86_64_PLT32
:
12184 case BFD_RELOC_X86_64_GOT32
:
12185 case BFD_RELOC_X86_64_GOTPCREL
:
12186 case BFD_RELOC_X86_64_GOTPCRELX
:
12187 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
12188 case BFD_RELOC_X86_64_TLSGD
:
12189 case BFD_RELOC_X86_64_TLSLD
:
12190 case BFD_RELOC_X86_64_GOTTPOFF
:
12191 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12192 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12193 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
12196 rel
->addend
= (section
->vma
12198 + fixp
->fx_addnumber
12199 + md_pcrel_from (fixp
));
12204 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
12205 if (rel
->howto
== NULL
)
12207 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
12208 _("cannot represent relocation type %s"),
12209 bfd_get_reloc_code_name (code
));
12210 /* Set howto to a garbage value so that we can keep going. */
12211 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
12212 gas_assert (rel
->howto
!= NULL
);
12218 #include "tc-i386-intel.c"
12221 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
12223 int saved_naked_reg
;
12224 char saved_register_dot
;
12226 saved_naked_reg
= allow_naked_reg
;
12227 allow_naked_reg
= 1;
12228 saved_register_dot
= register_chars
['.'];
12229 register_chars
['.'] = '.';
12230 allow_pseudo_reg
= 1;
12231 expression_and_evaluate (exp
);
12232 allow_pseudo_reg
= 0;
12233 register_chars
['.'] = saved_register_dot
;
12234 allow_naked_reg
= saved_naked_reg
;
12236 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
12238 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
12240 exp
->X_op
= O_constant
;
12241 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
12242 .dw2_regnum
[flag_code
>> 1];
12245 exp
->X_op
= O_illegal
;
12250 tc_x86_frame_initial_instructions (void)
12252 static unsigned int sp_regno
[2];
12254 if (!sp_regno
[flag_code
>> 1])
12256 char *saved_input
= input_line_pointer
;
12257 char sp
[][4] = {"esp", "rsp"};
12260 input_line_pointer
= sp
[flag_code
>> 1];
12261 tc_x86_parse_to_dw2regnum (&exp
);
12262 gas_assert (exp
.X_op
== O_constant
);
12263 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
12264 input_line_pointer
= saved_input
;
12267 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
12268 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
12272 x86_dwarf2_addr_size (void)
12274 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
12275 if (x86_elf_abi
== X86_64_X32_ABI
)
12278 return bfd_arch_bits_per_address (stdoutput
) / 8;
12282 i386_elf_section_type (const char *str
, size_t len
)
12284 if (flag_code
== CODE_64BIT
12285 && len
== sizeof ("unwind") - 1
12286 && strncmp (str
, "unwind", 6) == 0)
12287 return SHT_X86_64_UNWIND
;
12294 i386_solaris_fix_up_eh_frame (segT sec
)
12296 if (flag_code
== CODE_64BIT
)
12297 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
12303 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
12307 exp
.X_op
= O_secrel
;
12308 exp
.X_add_symbol
= symbol
;
12309 exp
.X_add_number
= 0;
12310 emit_expr (&exp
, size
);
12314 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12315 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
12318 x86_64_section_letter (int letter
, const char **ptr_msg
)
12320 if (flag_code
== CODE_64BIT
)
12323 return SHF_X86_64_LARGE
;
12325 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
12328 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
12333 x86_64_section_word (char *str
, size_t len
)
12335 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
12336 return SHF_X86_64_LARGE
;
12342 handle_large_common (int small ATTRIBUTE_UNUSED
)
12344 if (flag_code
!= CODE_64BIT
)
12346 s_comm_internal (0, elf_common_parse
);
12347 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
12351 static segT lbss_section
;
12352 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
12353 asection
*saved_bss_section
= bss_section
;
12355 if (lbss_section
== NULL
)
12357 flagword applicable
;
12358 segT seg
= now_seg
;
12359 subsegT subseg
= now_subseg
;
12361 /* The .lbss section is for local .largecomm symbols. */
12362 lbss_section
= subseg_new (".lbss", 0);
12363 applicable
= bfd_applicable_section_flags (stdoutput
);
12364 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
12365 seg_info (lbss_section
)->bss
= 1;
12367 subseg_set (seg
, subseg
);
12370 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
12371 bss_section
= lbss_section
;
12373 s_comm_internal (0, elf_common_parse
);
12375 elf_com_section_ptr
= saved_com_section_ptr
;
12376 bss_section
= saved_bss_section
;
12379 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */