1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2021 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
37 #ifndef INFER_ADDR_PREFIX
38 #define INFER_ADDR_PREFIX 1
42 #define DEFAULT_ARCH "i386"
47 #define INLINE __inline__
53 /* Prefixes will be emitted in the order defined below.
54 WAIT_PREFIX must be the first prefix since FWAIT is really is an
55 instruction, and so must come before any prefixes.
56 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
57 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
63 #define HLE_PREFIX REP_PREFIX
64 #define BND_PREFIX REP_PREFIX
66 #define REX_PREFIX 6 /* must come last. */
67 #define MAX_PREFIXES 7 /* max prefixes per opcode */
69 /* we define the syntax here (modulo base,index,scale syntax) */
70 #define REGISTER_PREFIX '%'
71 #define IMMEDIATE_PREFIX '$'
72 #define ABSOLUTE_PREFIX '*'
74 /* these are the instruction mnemonic suffixes in AT&T syntax or
75 memory operand size in Intel syntax. */
76 #define WORD_MNEM_SUFFIX 'w'
77 #define BYTE_MNEM_SUFFIX 'b'
78 #define SHORT_MNEM_SUFFIX 's'
79 #define LONG_MNEM_SUFFIX 'l'
80 #define QWORD_MNEM_SUFFIX 'q'
81 /* Intel Syntax. Use a non-ascii letter since since it never appears
83 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
85 #define END_OF_INSN '\0'
87 /* This matches the C -> StaticRounding alias in the opcode table. */
88 #define commutative staticrounding
91 'templates' is for grouping together 'template' structures for opcodes
92 of the same name. This is only used for storing the insns in the grand
93 ole hash table of insns.
94 The templates themselves start at START and range up to (but not including)
99 const insn_template
*start
;
100 const insn_template
*end
;
104 /* 386 operand encoding bytes: see 386 book for details of this. */
107 unsigned int regmem
; /* codes register or memory operand */
108 unsigned int reg
; /* codes register operand (or extended opcode) */
109 unsigned int mode
; /* how to interpret regmem & reg */
113 /* x86-64 extension prefix. */
114 typedef int rex_byte
;
116 /* 386 opcode byte to code indirect addressing. */
125 /* x86 arch names, types and features */
128 const char *name
; /* arch name */
129 unsigned int len
; /* arch string length */
130 enum processor_type type
; /* arch type */
131 i386_cpu_flags flags
; /* cpu feature flags */
132 unsigned int skip
; /* show_arch should skip this. */
136 /* Used to turn off indicated flags. */
139 const char *name
; /* arch name */
140 unsigned int len
; /* arch string length */
141 i386_cpu_flags flags
; /* cpu feature flags */
145 static void update_code_flag (int, int);
146 static void set_code_flag (int);
147 static void set_16bit_gcc_code_flag (int);
148 static void set_intel_syntax (int);
149 static void set_intel_mnemonic (int);
150 static void set_allow_index_reg (int);
151 static void set_check (int);
152 static void set_cpu_arch (int);
154 static void pe_directive_secrel (int);
156 static void signed_cons (int);
157 static char *output_invalid (int c
);
158 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
160 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
162 static int i386_att_operand (char *);
163 static int i386_intel_operand (char *, int);
164 static int i386_intel_simplify (expressionS
*);
165 static int i386_intel_parse_name (const char *, expressionS
*);
166 static const reg_entry
*parse_register (char *, char **);
167 static char *parse_insn (char *, char *);
168 static char *parse_operands (char *, const char *);
169 static void swap_operands (void);
170 static void swap_2_operands (unsigned int, unsigned int);
171 static enum flag_code
i386_addressing_mode (void);
172 static void optimize_imm (void);
173 static void optimize_disp (void);
174 static const insn_template
*match_template (char);
175 static int check_string (void);
176 static int process_suffix (void);
177 static int check_byte_reg (void);
178 static int check_long_reg (void);
179 static int check_qword_reg (void);
180 static int check_word_reg (void);
181 static int finalize_imm (void);
182 static int process_operands (void);
183 static const reg_entry
*build_modrm_byte (void);
184 static void output_insn (void);
185 static void output_imm (fragS
*, offsetT
);
186 static void output_disp (fragS
*, offsetT
);
188 static void s_bss (int);
190 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
191 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
193 /* GNU_PROPERTY_X86_ISA_1_USED. */
194 static unsigned int x86_isa_1_used
;
195 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
196 static unsigned int x86_feature_2_used
;
197 /* Generate x86 used ISA and feature properties. */
198 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
201 static const char *default_arch
= DEFAULT_ARCH
;
203 /* parse_register() returns this when a register alias cannot be used. */
204 static const reg_entry bad_reg
= { "<bad>", OPERAND_TYPE_NONE
, 0, 0,
205 { Dw2Inval
, Dw2Inval
} };
207 static const reg_entry
*reg_eax
;
208 static const reg_entry
*reg_ds
;
209 static const reg_entry
*reg_es
;
210 static const reg_entry
*reg_ss
;
211 static const reg_entry
*reg_st0
;
212 static const reg_entry
*reg_k0
;
217 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
218 unsigned char bytes
[4];
220 /* Destination or source register specifier. */
221 const reg_entry
*register_specifier
;
224 /* 'md_assemble ()' gathers together information and puts it into a
231 const reg_entry
*regs
;
236 operand_size_mismatch
,
237 operand_type_mismatch
,
238 register_type_mismatch
,
239 number_of_operands_mismatch
,
240 invalid_instruction_suffix
,
242 unsupported_with_intel_mnemonic
,
246 invalid_vsib_address
,
247 invalid_vector_register_set
,
248 invalid_tmm_register_set
,
249 unsupported_vector_index_register
,
250 unsupported_broadcast
,
253 mask_not_on_destination
,
256 rc_sae_operand_not_last_imm
,
257 invalid_register_operand
,
262 /* TM holds the template for the insn were currently assembling. */
265 /* SUFFIX holds the instruction size suffix for byte, word, dword
266 or qword, if given. */
269 /* OPCODE_LENGTH holds the number of base opcode bytes. */
270 unsigned char opcode_length
;
272 /* OPERANDS gives the number of given operands. */
273 unsigned int operands
;
275 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
276 of given register, displacement, memory operands and immediate
278 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
280 /* TYPES [i] is the type (see above #defines) which tells us how to
281 use OP[i] for the corresponding operand. */
282 i386_operand_type types
[MAX_OPERANDS
];
284 /* Displacement expression, immediate expression, or register for each
286 union i386_op op
[MAX_OPERANDS
];
288 /* Flags for operands. */
289 unsigned int flags
[MAX_OPERANDS
];
290 #define Operand_PCrel 1
291 #define Operand_Mem 2
293 /* Relocation type for operand */
294 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
296 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
297 the base index byte below. */
298 const reg_entry
*base_reg
;
299 const reg_entry
*index_reg
;
300 unsigned int log2_scale_factor
;
302 /* SEG gives the seg_entries of this insn. They are zero unless
303 explicit segment overrides are given. */
304 const reg_entry
*seg
[2];
306 /* Copied first memory operand string, for re-checking. */
309 /* PREFIX holds all the given prefix opcodes (usually null).
310 PREFIXES is the number of prefix opcodes. */
311 unsigned int prefixes
;
312 unsigned char prefix
[MAX_PREFIXES
];
314 /* Register is in low 3 bits of opcode. */
317 /* The operand to a branch insn indicates an absolute branch. */
320 /* Extended states. */
328 xstate_ymm
= 1 << 2 | xstate_xmm
,
330 xstate_zmm
= 1 << 3 | xstate_ymm
,
333 /* Use MASK state. */
337 /* Has GOTPC or TLS relocation. */
338 bool has_gotpc_tls_reloc
;
340 /* RM and SIB are the modrm byte and the sib byte where the
341 addressing modes of this insn are encoded. */
348 /* Masking attributes.
350 The struct describes masking, applied to OPERAND in the instruction.
351 REG is a pointer to the corresponding mask register. ZEROING tells
352 whether merging or zeroing mask is used. */
353 struct Mask_Operation
355 const reg_entry
*reg
;
356 unsigned int zeroing
;
357 /* The operand where this operation is associated. */
358 unsigned int operand
;
361 /* Rounding control and SAE attributes. */
374 unsigned int operand
;
377 /* Broadcasting attributes.
379 The struct describes broadcasting, applied to OPERAND. TYPE is
380 expresses the broadcast factor. */
381 struct Broadcast_Operation
383 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
386 /* Index of broadcasted operand. */
387 unsigned int operand
;
389 /* Number of bytes to broadcast. */
393 /* Compressed disp8*N attribute. */
394 unsigned int memshift
;
396 /* Prefer load or store in encoding. */
399 dir_encoding_default
= 0,
405 /* Prefer 8bit, 16bit, 32bit displacement in encoding. */
408 disp_encoding_default
= 0,
414 /* Prefer the REX byte in encoding. */
417 /* Disable instruction size optimization. */
420 /* How to encode vector instructions. */
423 vex_encoding_default
= 0,
431 const char *rep_prefix
;
434 const char *hle_prefix
;
436 /* Have BND prefix. */
437 const char *bnd_prefix
;
439 /* Have NOTRACK prefix. */
440 const char *notrack_prefix
;
443 enum i386_error error
;
446 typedef struct _i386_insn i386_insn
;
448 /* Link RC type with corresponding string, that'll be looked for in
457 static const struct RC_name RC_NamesTable
[] =
459 { rne
, STRING_COMMA_LEN ("rn-sae") },
460 { rd
, STRING_COMMA_LEN ("rd-sae") },
461 { ru
, STRING_COMMA_LEN ("ru-sae") },
462 { rz
, STRING_COMMA_LEN ("rz-sae") },
463 { saeonly
, STRING_COMMA_LEN ("sae") },
466 /* List of chars besides those in app.c:symbol_chars that can start an
467 operand. Used to prevent the scrubber eating vital white-space. */
468 const char extra_symbol_chars
[] = "*%-([{}"
477 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
478 && !defined (TE_GNU) \
479 && !defined (TE_LINUX) \
480 && !defined (TE_FreeBSD) \
481 && !defined (TE_DragonFly) \
482 && !defined (TE_NetBSD))
483 /* This array holds the chars that always start a comment. If the
484 pre-processor is disabled, these aren't very useful. The option
485 --divide will remove '/' from this list. */
486 const char *i386_comment_chars
= "#/";
487 #define SVR4_COMMENT_CHARS 1
488 #define PREFIX_SEPARATOR '\\'
491 const char *i386_comment_chars
= "#";
492 #define PREFIX_SEPARATOR '/'
495 /* This array holds the chars that only start a comment at the beginning of
496 a line. If the line seems to have the form '# 123 filename'
497 .line and .file directives will appear in the pre-processed output.
498 Note that input_file.c hand checks for '#' at the beginning of the
499 first line of the input file. This is because the compiler outputs
500 #NO_APP at the beginning of its output.
501 Also note that comments started like this one will always work if
502 '/' isn't otherwise defined. */
503 const char line_comment_chars
[] = "#/";
505 const char line_separator_chars
[] = ";";
507 /* Chars that can be used to separate mant from exp in floating point
509 const char EXP_CHARS
[] = "eE";
511 /* Chars that mean this number is a floating point constant
514 const char FLT_CHARS
[] = "fFdDxX";
516 /* Tables for lexical analysis. */
517 static char mnemonic_chars
[256];
518 static char register_chars
[256];
519 static char operand_chars
[256];
520 static char identifier_chars
[256];
521 static char digit_chars
[256];
523 /* Lexical macros. */
524 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
525 #define is_operand_char(x) (operand_chars[(unsigned char) x])
526 #define is_register_char(x) (register_chars[(unsigned char) x])
527 #define is_space_char(x) ((x) == ' ')
528 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
529 #define is_digit_char(x) (digit_chars[(unsigned char) x])
531 /* All non-digit non-letter characters that may occur in an operand. */
532 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
534 /* md_assemble() always leaves the strings it's passed unaltered. To
535 effect this we maintain a stack of saved characters that we've smashed
536 with '\0's (indicating end of strings for various sub-fields of the
537 assembler instruction). */
538 static char save_stack
[32];
539 static char *save_stack_p
;
540 #define END_STRING_AND_SAVE(s) \
541 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
542 #define RESTORE_END_STRING(s) \
543 do { *(s) = *--save_stack_p; } while (0)
545 /* The instruction we're assembling. */
548 /* Possible templates for current insn. */
549 static const templates
*current_templates
;
551 /* Per instruction expressionS buffers: max displacements & immediates. */
552 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
553 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
555 /* Current operand we are working on. */
556 static int this_operand
= -1;
558 /* We support four different modes. FLAG_CODE variable is used to distinguish
566 static enum flag_code flag_code
;
567 static unsigned int object_64bit
;
568 static unsigned int disallow_64bit_reloc
;
569 static int use_rela_relocations
= 0;
570 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
571 static const char *tls_get_addr
;
573 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
574 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
575 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
577 /* The ELF ABI to use. */
585 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
588 #if defined (TE_PE) || defined (TE_PEP)
589 /* Use big object file format. */
590 static int use_big_obj
= 0;
593 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
594 /* 1 if generating code for a shared library. */
595 static int shared
= 0;
598 /* 1 for intel syntax,
600 static int intel_syntax
= 0;
602 static enum x86_64_isa
604 amd64
= 1, /* AMD64 ISA. */
605 intel64
/* Intel64 ISA. */
608 /* 1 for intel mnemonic,
609 0 if att mnemonic. */
610 static int intel_mnemonic
= !SYSV386_COMPAT
;
612 /* 1 if pseudo registers are permitted. */
613 static int allow_pseudo_reg
= 0;
615 /* 1 if register prefix % not required. */
616 static int allow_naked_reg
= 0;
618 /* 1 if the assembler should add BND prefix for all control-transferring
619 instructions supporting it, even if this prefix wasn't specified
621 static int add_bnd_prefix
= 0;
623 /* 1 if pseudo index register, eiz/riz, is allowed . */
624 static int allow_index_reg
= 0;
626 /* 1 if the assembler should ignore LOCK prefix, even if it was
627 specified explicitly. */
628 static int omit_lock_prefix
= 0;
630 /* 1 if the assembler should encode lfence, mfence, and sfence as
631 "lock addl $0, (%{re}sp)". */
632 static int avoid_fence
= 0;
634 /* 1 if lfence should be inserted after every load. */
635 static int lfence_after_load
= 0;
637 /* Non-zero if lfence should be inserted before indirect branch. */
638 static enum lfence_before_indirect_branch_kind
640 lfence_branch_none
= 0,
641 lfence_branch_register
,
642 lfence_branch_memory
,
645 lfence_before_indirect_branch
;
647 /* Non-zero if lfence should be inserted before ret. */
648 static enum lfence_before_ret_kind
650 lfence_before_ret_none
= 0,
651 lfence_before_ret_not
,
652 lfence_before_ret_or
,
653 lfence_before_ret_shl
657 /* Types of previous instruction is .byte or prefix. */
672 /* 1 if the assembler should generate relax relocations. */
674 static int generate_relax_relocations
675 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
677 static enum check_kind
683 sse_check
, operand_check
= check_warning
;
685 /* Non-zero if branches should be aligned within power of 2 boundary. */
686 static int align_branch_power
= 0;
688 /* Types of branches to align. */
689 enum align_branch_kind
691 align_branch_none
= 0,
692 align_branch_jcc
= 1,
693 align_branch_fused
= 2,
694 align_branch_jmp
= 3,
695 align_branch_call
= 4,
696 align_branch_indirect
= 5,
700 /* Type bits of branches to align. */
701 enum align_branch_bit
703 align_branch_jcc_bit
= 1 << align_branch_jcc
,
704 align_branch_fused_bit
= 1 << align_branch_fused
,
705 align_branch_jmp_bit
= 1 << align_branch_jmp
,
706 align_branch_call_bit
= 1 << align_branch_call
,
707 align_branch_indirect_bit
= 1 << align_branch_indirect
,
708 align_branch_ret_bit
= 1 << align_branch_ret
711 static unsigned int align_branch
= (align_branch_jcc_bit
712 | align_branch_fused_bit
713 | align_branch_jmp_bit
);
715 /* Types of condition jump used by macro-fusion. */
718 mf_jcc_jo
= 0, /* base opcode 0x70 */
719 mf_jcc_jc
, /* base opcode 0x72 */
720 mf_jcc_je
, /* base opcode 0x74 */
721 mf_jcc_jna
, /* base opcode 0x76 */
722 mf_jcc_js
, /* base opcode 0x78 */
723 mf_jcc_jp
, /* base opcode 0x7a */
724 mf_jcc_jl
, /* base opcode 0x7c */
725 mf_jcc_jle
, /* base opcode 0x7e */
728 /* Types of compare flag-modifying insntructions used by macro-fusion. */
731 mf_cmp_test_and
, /* test/cmp */
732 mf_cmp_alu_cmp
, /* add/sub/cmp */
733 mf_cmp_incdec
/* inc/dec */
736 /* The maximum padding size for fused jcc. CMP like instruction can
737 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
739 #define MAX_FUSED_JCC_PADDING_SIZE 20
741 /* The maximum number of prefixes added for an instruction. */
742 static unsigned int align_branch_prefix_size
= 5;
745 1. Clear the REX_W bit with register operand if possible.
746 2. Above plus use 128bit vector instruction to clear the full vector
749 static int optimize
= 0;
752 1. Clear the REX_W bit with register operand if possible.
753 2. Above plus use 128bit vector instruction to clear the full vector
755 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
758 static int optimize_for_space
= 0;
760 /* Register prefix used for error message. */
761 static const char *register_prefix
= "%";
763 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
764 leave, push, and pop instructions so that gcc has the same stack
765 frame as in 32 bit mode. */
766 static char stackop_size
= '\0';
768 /* Non-zero to optimize code alignment. */
769 int optimize_align_code
= 1;
771 /* Non-zero to quieten some warnings. */
772 static int quiet_warnings
= 0;
775 static const char *cpu_arch_name
= NULL
;
776 static char *cpu_sub_arch_name
= NULL
;
778 /* CPU feature flags. */
779 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
781 /* If we have selected a cpu we are generating instructions for. */
782 static int cpu_arch_tune_set
= 0;
784 /* Cpu we are generating instructions for. */
785 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
787 /* CPU feature flags of cpu we are generating instructions for. */
788 static i386_cpu_flags cpu_arch_tune_flags
;
790 /* CPU instruction set architecture used. */
791 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
793 /* CPU feature flags of instruction set architecture used. */
794 i386_cpu_flags cpu_arch_isa_flags
;
796 /* If set, conditional jumps are not automatically promoted to handle
797 larger than a byte offset. */
798 static unsigned int no_cond_jump_promotion
= 0;
800 /* Encode SSE instructions with VEX prefix. */
801 static unsigned int sse2avx
;
803 /* Encode scalar AVX instructions with specific vector length. */
810 /* Encode VEX WIG instructions with specific vex.w. */
817 /* Encode scalar EVEX LIG instructions with specific vector length. */
825 /* Encode EVEX WIG instructions with specific evex.w. */
832 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
833 static enum rc_type evexrcig
= rne
;
835 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
836 static symbolS
*GOT_symbol
;
838 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
839 unsigned int x86_dwarf2_return_column
;
841 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
842 int x86_cie_data_alignment
;
844 /* Interface to relax_segment.
845 There are 3 major relax states for 386 jump insns because the
846 different types of jumps add different sizes to frags when we're
847 figuring out what sort of jump to choose to reach a given label.
849 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
850 branches which are handled by md_estimate_size_before_relax() and
851 i386_generic_table_relax_frag(). */
854 #define UNCOND_JUMP 0
856 #define COND_JUMP86 2
857 #define BRANCH_PADDING 3
858 #define BRANCH_PREFIX 4
859 #define FUSED_JCC_PADDING 5
864 #define SMALL16 (SMALL | CODE16)
866 #define BIG16 (BIG | CODE16)
870 #define INLINE __inline__
876 #define ENCODE_RELAX_STATE(type, size) \
877 ((relax_substateT) (((type) << 2) | (size)))
878 #define TYPE_FROM_RELAX_STATE(s) \
880 #define DISP_SIZE_FROM_RELAX_STATE(s) \
881 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
883 /* This table is used by relax_frag to promote short jumps to long
884 ones where necessary. SMALL (short) jumps may be promoted to BIG
885 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
886 don't allow a short jump in a 32 bit code segment to be promoted to
887 a 16 bit offset jump because it's slower (requires data size
888 prefix), and doesn't work, unless the destination is in the bottom
889 64k of the code segment (The top 16 bits of eip are zeroed). */
891 const relax_typeS md_relax_table
[] =
894 1) most positive reach of this state,
895 2) most negative reach of this state,
896 3) how many bytes this mode will have in the variable part of the frag
897 4) which index into the table to try if we can't fit into this one. */
899 /* UNCOND_JUMP states. */
900 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
901 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
902 /* dword jmp adds 4 bytes to frag:
903 0 extra opcode bytes, 4 displacement bytes. */
905 /* word jmp adds 2 byte2 to frag:
906 0 extra opcode bytes, 2 displacement bytes. */
909 /* COND_JUMP states. */
910 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
911 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
912 /* dword conditionals adds 5 bytes to frag:
913 1 extra opcode byte, 4 displacement bytes. */
915 /* word conditionals add 3 bytes to frag:
916 1 extra opcode byte, 2 displacement bytes. */
919 /* COND_JUMP86 states. */
920 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
921 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
922 /* dword conditionals adds 5 bytes to frag:
923 1 extra opcode byte, 4 displacement bytes. */
925 /* word conditionals add 4 bytes to frag:
926 1 displacement byte and a 3 byte long branch insn. */
930 static const arch_entry cpu_arch
[] =
932 /* Do not replace the first two entries - i386_target_format()
933 relies on them being there in this order. */
934 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
935 CPU_GENERIC32_FLAGS
, 0 },
936 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
937 CPU_GENERIC64_FLAGS
, 0 },
938 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
940 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
942 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
944 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
946 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
948 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
950 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
952 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
954 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
955 CPU_PENTIUMPRO_FLAGS
, 0 },
956 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
958 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
960 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
962 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
964 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
965 CPU_NOCONA_FLAGS
, 0 },
966 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
968 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
970 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
971 CPU_CORE2_FLAGS
, 1 },
972 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
973 CPU_CORE2_FLAGS
, 0 },
974 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
975 CPU_COREI7_FLAGS
, 0 },
976 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
978 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
980 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
981 CPU_IAMCU_FLAGS
, 0 },
982 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
984 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
986 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
987 CPU_ATHLON_FLAGS
, 0 },
988 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
990 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
992 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
994 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
995 CPU_AMDFAM10_FLAGS
, 0 },
996 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
997 CPU_BDVER1_FLAGS
, 0 },
998 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
999 CPU_BDVER2_FLAGS
, 0 },
1000 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
1001 CPU_BDVER3_FLAGS
, 0 },
1002 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
1003 CPU_BDVER4_FLAGS
, 0 },
1004 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
1005 CPU_ZNVER1_FLAGS
, 0 },
1006 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
1007 CPU_ZNVER2_FLAGS
, 0 },
1008 { STRING_COMMA_LEN ("znver3"), PROCESSOR_ZNVER
,
1009 CPU_ZNVER3_FLAGS
, 0 },
1010 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
1011 CPU_BTVER1_FLAGS
, 0 },
1012 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
1013 CPU_BTVER2_FLAGS
, 0 },
1014 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
1015 CPU_8087_FLAGS
, 0 },
1016 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
1018 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
1020 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
1022 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
1023 CPU_CMOV_FLAGS
, 0 },
1024 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
1025 CPU_FXSR_FLAGS
, 0 },
1026 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
1028 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
1030 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
1031 CPU_SSE2_FLAGS
, 0 },
1032 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
1033 CPU_SSE3_FLAGS
, 0 },
1034 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1035 CPU_SSE4A_FLAGS
, 0 },
1036 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
1037 CPU_SSSE3_FLAGS
, 0 },
1038 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
1039 CPU_SSE4_1_FLAGS
, 0 },
1040 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
1041 CPU_SSE4_2_FLAGS
, 0 },
1042 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
1043 CPU_SSE4_2_FLAGS
, 0 },
1044 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
1046 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
1047 CPU_AVX2_FLAGS
, 0 },
1048 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
1049 CPU_AVX512F_FLAGS
, 0 },
1050 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
1051 CPU_AVX512CD_FLAGS
, 0 },
1052 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
1053 CPU_AVX512ER_FLAGS
, 0 },
1054 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
1055 CPU_AVX512PF_FLAGS
, 0 },
1056 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
1057 CPU_AVX512DQ_FLAGS
, 0 },
1058 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
1059 CPU_AVX512BW_FLAGS
, 0 },
1060 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
1061 CPU_AVX512VL_FLAGS
, 0 },
1062 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
1064 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
1065 CPU_VMFUNC_FLAGS
, 0 },
1066 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
1068 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
1069 CPU_XSAVE_FLAGS
, 0 },
1070 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
1071 CPU_XSAVEOPT_FLAGS
, 0 },
1072 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
1073 CPU_XSAVEC_FLAGS
, 0 },
1074 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
1075 CPU_XSAVES_FLAGS
, 0 },
1076 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
1078 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
1079 CPU_PCLMUL_FLAGS
, 0 },
1080 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
1081 CPU_PCLMUL_FLAGS
, 1 },
1082 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
1083 CPU_FSGSBASE_FLAGS
, 0 },
1084 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
1085 CPU_RDRND_FLAGS
, 0 },
1086 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
1087 CPU_F16C_FLAGS
, 0 },
1088 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
1089 CPU_BMI2_FLAGS
, 0 },
1090 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
1092 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
1093 CPU_FMA4_FLAGS
, 0 },
1094 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
1096 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
1098 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
1099 CPU_MOVBE_FLAGS
, 0 },
1100 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
1101 CPU_CX16_FLAGS
, 0 },
1102 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
1104 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
1105 CPU_LZCNT_FLAGS
, 0 },
1106 { STRING_COMMA_LEN (".popcnt"), PROCESSOR_UNKNOWN
,
1107 CPU_POPCNT_FLAGS
, 0 },
1108 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
1110 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
1112 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
1113 CPU_INVPCID_FLAGS
, 0 },
1114 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
1115 CPU_CLFLUSH_FLAGS
, 0 },
1116 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
1118 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
1119 CPU_SYSCALL_FLAGS
, 0 },
1120 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
1121 CPU_RDTSCP_FLAGS
, 0 },
1122 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1123 CPU_3DNOW_FLAGS
, 0 },
1124 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1125 CPU_3DNOWA_FLAGS
, 0 },
1126 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1127 CPU_PADLOCK_FLAGS
, 0 },
1128 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1129 CPU_SVME_FLAGS
, 1 },
1130 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1131 CPU_SVME_FLAGS
, 0 },
1132 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1133 CPU_SSE4A_FLAGS
, 0 },
1134 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1136 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1138 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1140 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1142 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1143 CPU_RDSEED_FLAGS
, 0 },
1144 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1145 CPU_PRFCHW_FLAGS
, 0 },
1146 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1147 CPU_SMAP_FLAGS
, 0 },
1148 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1150 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1152 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1153 CPU_CLFLUSHOPT_FLAGS
, 0 },
1154 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1155 CPU_PREFETCHWT1_FLAGS
, 0 },
1156 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1158 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1159 CPU_CLWB_FLAGS
, 0 },
1160 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1161 CPU_AVX512IFMA_FLAGS
, 0 },
1162 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1163 CPU_AVX512VBMI_FLAGS
, 0 },
1164 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1165 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1166 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1167 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1168 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1169 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1170 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1171 CPU_AVX512_VBMI2_FLAGS
, 0 },
1172 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1173 CPU_AVX512_VNNI_FLAGS
, 0 },
1174 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1175 CPU_AVX512_BITALG_FLAGS
, 0 },
1176 { STRING_COMMA_LEN (".avx_vnni"), PROCESSOR_UNKNOWN
,
1177 CPU_AVX_VNNI_FLAGS
, 0 },
1178 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1179 CPU_CLZERO_FLAGS
, 0 },
1180 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1181 CPU_MWAITX_FLAGS
, 0 },
1182 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1183 CPU_OSPKE_FLAGS
, 0 },
1184 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1185 CPU_RDPID_FLAGS
, 0 },
1186 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1187 CPU_PTWRITE_FLAGS
, 0 },
1188 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1190 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1191 CPU_SHSTK_FLAGS
, 0 },
1192 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1193 CPU_GFNI_FLAGS
, 0 },
1194 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1195 CPU_VAES_FLAGS
, 0 },
1196 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1197 CPU_VPCLMULQDQ_FLAGS
, 0 },
1198 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1199 CPU_WBNOINVD_FLAGS
, 0 },
1200 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1201 CPU_PCONFIG_FLAGS
, 0 },
1202 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1203 CPU_WAITPKG_FLAGS
, 0 },
1204 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1205 CPU_CLDEMOTE_FLAGS
, 0 },
1206 { STRING_COMMA_LEN (".amx_int8"), PROCESSOR_UNKNOWN
,
1207 CPU_AMX_INT8_FLAGS
, 0 },
1208 { STRING_COMMA_LEN (".amx_bf16"), PROCESSOR_UNKNOWN
,
1209 CPU_AMX_BF16_FLAGS
, 0 },
1210 { STRING_COMMA_LEN (".amx_tile"), PROCESSOR_UNKNOWN
,
1211 CPU_AMX_TILE_FLAGS
, 0 },
1212 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1213 CPU_MOVDIRI_FLAGS
, 0 },
1214 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1215 CPU_MOVDIR64B_FLAGS
, 0 },
1216 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1217 CPU_AVX512_BF16_FLAGS
, 0 },
1218 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1219 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1220 { STRING_COMMA_LEN (".tdx"), PROCESSOR_UNKNOWN
,
1222 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1223 CPU_ENQCMD_FLAGS
, 0 },
1224 { STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN
,
1225 CPU_SERIALIZE_FLAGS
, 0 },
1226 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1227 CPU_RDPRU_FLAGS
, 0 },
1228 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1229 CPU_MCOMMIT_FLAGS
, 0 },
1230 { STRING_COMMA_LEN (".sev_es"), PROCESSOR_UNKNOWN
,
1231 CPU_SEV_ES_FLAGS
, 0 },
1232 { STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN
,
1233 CPU_TSXLDTRK_FLAGS
, 0 },
1234 { STRING_COMMA_LEN (".kl"), PROCESSOR_UNKNOWN
,
1236 { STRING_COMMA_LEN (".widekl"), PROCESSOR_UNKNOWN
,
1237 CPU_WIDEKL_FLAGS
, 0 },
1238 { STRING_COMMA_LEN (".uintr"), PROCESSOR_UNKNOWN
,
1239 CPU_UINTR_FLAGS
, 0 },
1240 { STRING_COMMA_LEN (".hreset"), PROCESSOR_UNKNOWN
,
1241 CPU_HRESET_FLAGS
, 0 },
1244 static const noarch_entry cpu_noarch
[] =
1246 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1247 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1248 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1249 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1250 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1251 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1252 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1253 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1254 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1255 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1256 { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS
},
1257 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1258 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1259 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1260 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1261 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1262 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1263 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1264 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1265 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1266 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1267 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1268 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1269 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1270 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1271 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1272 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1273 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1274 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1275 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1276 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1277 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1278 { STRING_COMMA_LEN ("noavx_vnni"), CPU_ANY_AVX_VNNI_FLAGS
},
1279 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1280 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1281 { STRING_COMMA_LEN ("noamx_int8"), CPU_ANY_AMX_INT8_FLAGS
},
1282 { STRING_COMMA_LEN ("noamx_bf16"), CPU_ANY_AMX_BF16_FLAGS
},
1283 { STRING_COMMA_LEN ("noamx_tile"), CPU_ANY_AMX_TILE_FLAGS
},
1284 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1285 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1286 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1287 { STRING_COMMA_LEN ("noavx512_vp2intersect"),
1288 CPU_ANY_AVX512_VP2INTERSECT_FLAGS
},
1289 { STRING_COMMA_LEN ("notdx"), CPU_ANY_TDX_FLAGS
},
1290 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1291 { STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS
},
1292 { STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS
},
1293 { STRING_COMMA_LEN ("nokl"), CPU_ANY_KL_FLAGS
},
1294 { STRING_COMMA_LEN ("nowidekl"), CPU_ANY_WIDEKL_FLAGS
},
1295 { STRING_COMMA_LEN ("nouintr"), CPU_ANY_UINTR_FLAGS
},
1296 { STRING_COMMA_LEN ("nohreset"), CPU_ANY_HRESET_FLAGS
},
1300 /* Like s_lcomm_internal in gas/read.c but the alignment string
1301 is allowed to be optional. */
1304 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1311 && *input_line_pointer
== ',')
1313 align
= parse_align (needs_align
- 1);
1315 if (align
== (addressT
) -1)
1330 bss_alloc (symbolP
, size
, align
);
1335 pe_lcomm (int needs_align
)
1337 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1341 const pseudo_typeS md_pseudo_table
[] =
1343 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1344 {"align", s_align_bytes
, 0},
1346 {"align", s_align_ptwo
, 0},
1348 {"arch", set_cpu_arch
, 0},
1352 {"lcomm", pe_lcomm
, 1},
1354 {"ffloat", float_cons
, 'f'},
1355 {"dfloat", float_cons
, 'd'},
1356 {"tfloat", float_cons
, 'x'},
1358 {"slong", signed_cons
, 4},
1359 {"noopt", s_ignore
, 0},
1360 {"optim", s_ignore
, 0},
1361 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1362 {"code16", set_code_flag
, CODE_16BIT
},
1363 {"code32", set_code_flag
, CODE_32BIT
},
1365 {"code64", set_code_flag
, CODE_64BIT
},
1367 {"intel_syntax", set_intel_syntax
, 1},
1368 {"att_syntax", set_intel_syntax
, 0},
1369 {"intel_mnemonic", set_intel_mnemonic
, 1},
1370 {"att_mnemonic", set_intel_mnemonic
, 0},
1371 {"allow_index_reg", set_allow_index_reg
, 1},
1372 {"disallow_index_reg", set_allow_index_reg
, 0},
1373 {"sse_check", set_check
, 0},
1374 {"operand_check", set_check
, 1},
1375 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1376 {"largecomm", handle_large_common
, 0},
1378 {"file", dwarf2_directive_file
, 0},
1379 {"loc", dwarf2_directive_loc
, 0},
1380 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1383 {"secrel32", pe_directive_secrel
, 0},
1388 /* For interface with expression (). */
1389 extern char *input_line_pointer
;
1391 /* Hash table for instruction mnemonic lookup. */
1392 static htab_t op_hash
;
1394 /* Hash table for register lookup. */
1395 static htab_t reg_hash
;
1397 /* Various efficient no-op patterns for aligning code labels.
1398 Note: Don't try to assemble the instructions in the comments.
1399 0L and 0w are not legal. */
1400 static const unsigned char f32_1
[] =
1402 static const unsigned char f32_2
[] =
1403 {0x66,0x90}; /* xchg %ax,%ax */
1404 static const unsigned char f32_3
[] =
1405 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1406 static const unsigned char f32_4
[] =
1407 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1408 static const unsigned char f32_6
[] =
1409 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1410 static const unsigned char f32_7
[] =
1411 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1412 static const unsigned char f16_3
[] =
1413 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1414 static const unsigned char f16_4
[] =
1415 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1416 static const unsigned char jump_disp8
[] =
1417 {0xeb}; /* jmp disp8 */
1418 static const unsigned char jump32_disp32
[] =
1419 {0xe9}; /* jmp disp32 */
1420 static const unsigned char jump16_disp32
[] =
1421 {0x66,0xe9}; /* jmp disp32 */
1422 /* 32-bit NOPs patterns. */
1423 static const unsigned char *const f32_patt
[] = {
1424 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1426 /* 16-bit NOPs patterns. */
1427 static const unsigned char *const f16_patt
[] = {
1428 f32_1
, f32_2
, f16_3
, f16_4
1430 /* nopl (%[re]ax) */
1431 static const unsigned char alt_3
[] =
1433 /* nopl 0(%[re]ax) */
1434 static const unsigned char alt_4
[] =
1435 {0x0f,0x1f,0x40,0x00};
1436 /* nopl 0(%[re]ax,%[re]ax,1) */
1437 static const unsigned char alt_5
[] =
1438 {0x0f,0x1f,0x44,0x00,0x00};
1439 /* nopw 0(%[re]ax,%[re]ax,1) */
1440 static const unsigned char alt_6
[] =
1441 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1442 /* nopl 0L(%[re]ax) */
1443 static const unsigned char alt_7
[] =
1444 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1445 /* nopl 0L(%[re]ax,%[re]ax,1) */
1446 static const unsigned char alt_8
[] =
1447 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1448 /* nopw 0L(%[re]ax,%[re]ax,1) */
1449 static const unsigned char alt_9
[] =
1450 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1451 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1452 static const unsigned char alt_10
[] =
1453 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1454 /* data16 nopw %cs:0L(%eax,%eax,1) */
1455 static const unsigned char alt_11
[] =
1456 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1457 /* 32-bit and 64-bit NOPs patterns. */
1458 static const unsigned char *const alt_patt
[] = {
1459 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1460 alt_9
, alt_10
, alt_11
1463 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1464 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1467 i386_output_nops (char *where
, const unsigned char *const *patt
,
1468 int count
, int max_single_nop_size
)
1471 /* Place the longer NOP first. */
1474 const unsigned char *nops
;
1476 if (max_single_nop_size
< 1)
1478 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1479 max_single_nop_size
);
1483 nops
= patt
[max_single_nop_size
- 1];
1485 /* Use the smaller one if the requsted one isn't available. */
1488 max_single_nop_size
--;
1489 nops
= patt
[max_single_nop_size
- 1];
1492 last
= count
% max_single_nop_size
;
1495 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1496 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1500 nops
= patt
[last
- 1];
1503 /* Use the smaller one plus one-byte NOP if the needed one
1506 nops
= patt
[last
- 1];
1507 memcpy (where
+ offset
, nops
, last
);
1508 where
[offset
+ last
] = *patt
[0];
1511 memcpy (where
+ offset
, nops
, last
);
1516 fits_in_imm7 (offsetT num
)
1518 return (num
& 0x7f) == num
;
1522 fits_in_imm31 (offsetT num
)
1524 return (num
& 0x7fffffff) == num
;
1527 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1528 single NOP instruction LIMIT. */
1531 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1533 const unsigned char *const *patt
= NULL
;
1534 int max_single_nop_size
;
1535 /* Maximum number of NOPs before switching to jump over NOPs. */
1536 int max_number_of_nops
;
1538 switch (fragP
->fr_type
)
1543 case rs_machine_dependent
:
1544 /* Allow NOP padding for jumps and calls. */
1545 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
1546 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
1553 /* We need to decide which NOP sequence to use for 32bit and
1554 64bit. When -mtune= is used:
1556 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1557 PROCESSOR_GENERIC32, f32_patt will be used.
1558 2. For the rest, alt_patt will be used.
1560 When -mtune= isn't used, alt_patt will be used if
1561 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1564 When -march= or .arch is used, we can't use anything beyond
1565 cpu_arch_isa_flags. */
1567 if (flag_code
== CODE_16BIT
)
1570 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1571 /* Limit number of NOPs to 2 in 16-bit mode. */
1572 max_number_of_nops
= 2;
1576 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1578 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1579 switch (cpu_arch_tune
)
1581 case PROCESSOR_UNKNOWN
:
1582 /* We use cpu_arch_isa_flags to check if we SHOULD
1583 optimize with nops. */
1584 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1589 case PROCESSOR_PENTIUM4
:
1590 case PROCESSOR_NOCONA
:
1591 case PROCESSOR_CORE
:
1592 case PROCESSOR_CORE2
:
1593 case PROCESSOR_COREI7
:
1594 case PROCESSOR_L1OM
:
1595 case PROCESSOR_K1OM
:
1596 case PROCESSOR_GENERIC64
:
1598 case PROCESSOR_ATHLON
:
1600 case PROCESSOR_AMDFAM10
:
1602 case PROCESSOR_ZNVER
:
1606 case PROCESSOR_I386
:
1607 case PROCESSOR_I486
:
1608 case PROCESSOR_PENTIUM
:
1609 case PROCESSOR_PENTIUMPRO
:
1610 case PROCESSOR_IAMCU
:
1611 case PROCESSOR_GENERIC32
:
1618 switch (fragP
->tc_frag_data
.tune
)
1620 case PROCESSOR_UNKNOWN
:
1621 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1622 PROCESSOR_UNKNOWN. */
1626 case PROCESSOR_I386
:
1627 case PROCESSOR_I486
:
1628 case PROCESSOR_PENTIUM
:
1629 case PROCESSOR_IAMCU
:
1631 case PROCESSOR_ATHLON
:
1633 case PROCESSOR_AMDFAM10
:
1635 case PROCESSOR_ZNVER
:
1637 case PROCESSOR_GENERIC32
:
1638 /* We use cpu_arch_isa_flags to check if we CAN optimize
1640 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1645 case PROCESSOR_PENTIUMPRO
:
1646 case PROCESSOR_PENTIUM4
:
1647 case PROCESSOR_NOCONA
:
1648 case PROCESSOR_CORE
:
1649 case PROCESSOR_CORE2
:
1650 case PROCESSOR_COREI7
:
1651 case PROCESSOR_L1OM
:
1652 case PROCESSOR_K1OM
:
1653 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1658 case PROCESSOR_GENERIC64
:
1664 if (patt
== f32_patt
)
1666 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1667 /* Limit number of NOPs to 2 for older processors. */
1668 max_number_of_nops
= 2;
1672 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1673 /* Limit number of NOPs to 7 for newer processors. */
1674 max_number_of_nops
= 7;
1679 limit
= max_single_nop_size
;
1681 if (fragP
->fr_type
== rs_fill_nop
)
1683 /* Output NOPs for .nop directive. */
1684 if (limit
> max_single_nop_size
)
1686 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1687 _("invalid single nop size: %d "
1688 "(expect within [0, %d])"),
1689 limit
, max_single_nop_size
);
1693 else if (fragP
->fr_type
!= rs_machine_dependent
)
1694 fragP
->fr_var
= count
;
1696 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1698 /* Generate jump over NOPs. */
1699 offsetT disp
= count
- 2;
1700 if (fits_in_imm7 (disp
))
1702 /* Use "jmp disp8" if possible. */
1704 where
[0] = jump_disp8
[0];
1710 unsigned int size_of_jump
;
1712 if (flag_code
== CODE_16BIT
)
1714 where
[0] = jump16_disp32
[0];
1715 where
[1] = jump16_disp32
[1];
1720 where
[0] = jump32_disp32
[0];
1724 count
-= size_of_jump
+ 4;
1725 if (!fits_in_imm31 (count
))
1727 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1728 _("jump over nop padding out of range"));
1732 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1733 where
+= size_of_jump
+ 4;
1737 /* Generate multiple NOPs. */
1738 i386_output_nops (where
, patt
, count
, limit
);
1742 operand_type_all_zero (const union i386_operand_type
*x
)
1744 switch (ARRAY_SIZE(x
->array
))
1755 return !x
->array
[0];
1762 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1764 switch (ARRAY_SIZE(x
->array
))
1780 x
->bitfield
.class = ClassNone
;
1781 x
->bitfield
.instance
= InstanceNone
;
1785 operand_type_equal (const union i386_operand_type
*x
,
1786 const union i386_operand_type
*y
)
1788 switch (ARRAY_SIZE(x
->array
))
1791 if (x
->array
[2] != y
->array
[2])
1795 if (x
->array
[1] != y
->array
[1])
1799 return x
->array
[0] == y
->array
[0];
1807 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1809 switch (ARRAY_SIZE(x
->array
))
1824 return !x
->array
[0];
1831 cpu_flags_equal (const union i386_cpu_flags
*x
,
1832 const union i386_cpu_flags
*y
)
1834 switch (ARRAY_SIZE(x
->array
))
1837 if (x
->array
[3] != y
->array
[3])
1841 if (x
->array
[2] != y
->array
[2])
1845 if (x
->array
[1] != y
->array
[1])
1849 return x
->array
[0] == y
->array
[0];
1857 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1859 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1860 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1863 static INLINE i386_cpu_flags
1864 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1866 switch (ARRAY_SIZE (x
.array
))
1869 x
.array
[3] &= y
.array
[3];
1872 x
.array
[2] &= y
.array
[2];
1875 x
.array
[1] &= y
.array
[1];
1878 x
.array
[0] &= y
.array
[0];
1886 static INLINE i386_cpu_flags
1887 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1889 switch (ARRAY_SIZE (x
.array
))
1892 x
.array
[3] |= y
.array
[3];
1895 x
.array
[2] |= y
.array
[2];
1898 x
.array
[1] |= y
.array
[1];
1901 x
.array
[0] |= y
.array
[0];
1909 static INLINE i386_cpu_flags
1910 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1912 switch (ARRAY_SIZE (x
.array
))
1915 x
.array
[3] &= ~y
.array
[3];
1918 x
.array
[2] &= ~y
.array
[2];
1921 x
.array
[1] &= ~y
.array
[1];
1924 x
.array
[0] &= ~y
.array
[0];
1932 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
1934 #define CPU_FLAGS_ARCH_MATCH 0x1
1935 #define CPU_FLAGS_64BIT_MATCH 0x2
1937 #define CPU_FLAGS_PERFECT_MATCH \
1938 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1940 /* Return CPU flags match bits. */
1943 cpu_flags_match (const insn_template
*t
)
1945 i386_cpu_flags x
= t
->cpu_flags
;
1946 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1948 x
.bitfield
.cpu64
= 0;
1949 x
.bitfield
.cpuno64
= 0;
1951 if (cpu_flags_all_zero (&x
))
1953 /* This instruction is available on all archs. */
1954 match
|= CPU_FLAGS_ARCH_MATCH
;
1958 /* This instruction is available only on some archs. */
1959 i386_cpu_flags cpu
= cpu_arch_flags
;
1961 /* AVX512VL is no standalone feature - match it and then strip it. */
1962 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1964 x
.bitfield
.cpuavx512vl
= 0;
1966 cpu
= cpu_flags_and (x
, cpu
);
1967 if (!cpu_flags_all_zero (&cpu
))
1969 if (x
.bitfield
.cpuavx
)
1971 /* We need to check a few extra flags with AVX. */
1972 if (cpu
.bitfield
.cpuavx
1973 && (!t
->opcode_modifier
.sse2avx
1974 || (sse2avx
&& !i
.prefix
[DATA_PREFIX
]))
1975 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1976 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1977 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1978 match
|= CPU_FLAGS_ARCH_MATCH
;
1980 else if (x
.bitfield
.cpuavx512f
)
1982 /* We need to check a few extra flags with AVX512F. */
1983 if (cpu
.bitfield
.cpuavx512f
1984 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1985 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1986 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1987 match
|= CPU_FLAGS_ARCH_MATCH
;
1990 match
|= CPU_FLAGS_ARCH_MATCH
;
1996 static INLINE i386_operand_type
1997 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1999 if (x
.bitfield
.class != y
.bitfield
.class)
2000 x
.bitfield
.class = ClassNone
;
2001 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
2002 x
.bitfield
.instance
= InstanceNone
;
2004 switch (ARRAY_SIZE (x
.array
))
2007 x
.array
[2] &= y
.array
[2];
2010 x
.array
[1] &= y
.array
[1];
2013 x
.array
[0] &= y
.array
[0];
2021 static INLINE i386_operand_type
2022 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
2024 gas_assert (y
.bitfield
.class == ClassNone
);
2025 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2027 switch (ARRAY_SIZE (x
.array
))
2030 x
.array
[2] &= ~y
.array
[2];
2033 x
.array
[1] &= ~y
.array
[1];
2036 x
.array
[0] &= ~y
.array
[0];
2044 static INLINE i386_operand_type
2045 operand_type_or (i386_operand_type x
, i386_operand_type y
)
2047 gas_assert (x
.bitfield
.class == ClassNone
||
2048 y
.bitfield
.class == ClassNone
||
2049 x
.bitfield
.class == y
.bitfield
.class);
2050 gas_assert (x
.bitfield
.instance
== InstanceNone
||
2051 y
.bitfield
.instance
== InstanceNone
||
2052 x
.bitfield
.instance
== y
.bitfield
.instance
);
2054 switch (ARRAY_SIZE (x
.array
))
2057 x
.array
[2] |= y
.array
[2];
2060 x
.array
[1] |= y
.array
[1];
2063 x
.array
[0] |= y
.array
[0];
2071 static INLINE i386_operand_type
2072 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
2074 gas_assert (y
.bitfield
.class == ClassNone
);
2075 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2077 switch (ARRAY_SIZE (x
.array
))
2080 x
.array
[2] ^= y
.array
[2];
2083 x
.array
[1] ^= y
.array
[1];
2086 x
.array
[0] ^= y
.array
[0];
2094 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
2095 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
2096 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
2097 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
2098 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
2099 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
2100 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
2101 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
2102 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
2103 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
2104 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
2105 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
2106 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
2107 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
2108 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
2109 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
2110 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
2121 operand_type_check (i386_operand_type t
, enum operand_type c
)
2126 return t
.bitfield
.class == Reg
;
2129 return (t
.bitfield
.imm8
2133 || t
.bitfield
.imm32s
2134 || t
.bitfield
.imm64
);
2137 return (t
.bitfield
.disp8
2138 || t
.bitfield
.disp16
2139 || t
.bitfield
.disp32
2140 || t
.bitfield
.disp32s
2141 || t
.bitfield
.disp64
);
2144 return (t
.bitfield
.disp8
2145 || t
.bitfield
.disp16
2146 || t
.bitfield
.disp32
2147 || t
.bitfield
.disp32s
2148 || t
.bitfield
.disp64
2149 || t
.bitfield
.baseindex
);
2158 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2159 between operand GIVEN and opeand WANTED for instruction template T. */
2162 match_operand_size (const insn_template
*t
, unsigned int wanted
,
2165 return !((i
.types
[given
].bitfield
.byte
2166 && !t
->operand_types
[wanted
].bitfield
.byte
)
2167 || (i
.types
[given
].bitfield
.word
2168 && !t
->operand_types
[wanted
].bitfield
.word
)
2169 || (i
.types
[given
].bitfield
.dword
2170 && !t
->operand_types
[wanted
].bitfield
.dword
)
2171 || (i
.types
[given
].bitfield
.qword
2172 && !t
->operand_types
[wanted
].bitfield
.qword
)
2173 || (i
.types
[given
].bitfield
.tbyte
2174 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2177 /* Return 1 if there is no conflict in SIMD register between operand
2178 GIVEN and opeand WANTED for instruction template T. */
2181 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2184 return !((i
.types
[given
].bitfield
.xmmword
2185 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2186 || (i
.types
[given
].bitfield
.ymmword
2187 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2188 || (i
.types
[given
].bitfield
.zmmword
2189 && !t
->operand_types
[wanted
].bitfield
.zmmword
)
2190 || (i
.types
[given
].bitfield
.tmmword
2191 && !t
->operand_types
[wanted
].bitfield
.tmmword
));
2194 /* Return 1 if there is no conflict in any size between operand GIVEN
2195 and opeand WANTED for instruction template T. */
2198 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2201 return (match_operand_size (t
, wanted
, given
)
2202 && !((i
.types
[given
].bitfield
.unspecified
2203 && !i
.broadcast
.type
2204 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2205 || (i
.types
[given
].bitfield
.fword
2206 && !t
->operand_types
[wanted
].bitfield
.fword
)
2207 /* For scalar opcode templates to allow register and memory
2208 operands at the same time, some special casing is needed
2209 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2210 down-conversion vpmov*. */
2211 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2212 && t
->operand_types
[wanted
].bitfield
.byte
2213 + t
->operand_types
[wanted
].bitfield
.word
2214 + t
->operand_types
[wanted
].bitfield
.dword
2215 + t
->operand_types
[wanted
].bitfield
.qword
2216 > !!t
->opcode_modifier
.broadcast
)
2217 ? (i
.types
[given
].bitfield
.xmmword
2218 || i
.types
[given
].bitfield
.ymmword
2219 || i
.types
[given
].bitfield
.zmmword
)
2220 : !match_simd_size(t
, wanted
, given
))));
2223 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2224 operands for instruction template T, and it has MATCH_REVERSE set if there
2225 is no size conflict on any operands for the template with operands reversed
2226 (and the template allows for reversing in the first place). */
2228 #define MATCH_STRAIGHT 1
2229 #define MATCH_REVERSE 2
2231 static INLINE
unsigned int
2232 operand_size_match (const insn_template
*t
)
2234 unsigned int j
, match
= MATCH_STRAIGHT
;
2236 /* Don't check non-absolute jump instructions. */
2237 if (t
->opcode_modifier
.jump
2238 && t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
2241 /* Check memory and accumulator operand size. */
2242 for (j
= 0; j
< i
.operands
; j
++)
2244 if (i
.types
[j
].bitfield
.class != Reg
2245 && i
.types
[j
].bitfield
.class != RegSIMD
2246 && t
->opcode_modifier
.anysize
)
2249 if (t
->operand_types
[j
].bitfield
.class == Reg
2250 && !match_operand_size (t
, j
, j
))
2256 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2257 && !match_simd_size (t
, j
, j
))
2263 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2264 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2270 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2277 if (!t
->opcode_modifier
.d
)
2281 i
.error
= operand_size_mismatch
;
2285 /* Check reverse. */
2286 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2288 for (j
= 0; j
< i
.operands
; j
++)
2290 unsigned int given
= i
.operands
- j
- 1;
2292 if (t
->operand_types
[j
].bitfield
.class == Reg
2293 && !match_operand_size (t
, j
, given
))
2296 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2297 && !match_simd_size (t
, j
, given
))
2300 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2301 && (!match_operand_size (t
, j
, given
)
2302 || !match_simd_size (t
, j
, given
)))
2305 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2309 return match
| MATCH_REVERSE
;
2313 operand_type_match (i386_operand_type overlap
,
2314 i386_operand_type given
)
2316 i386_operand_type temp
= overlap
;
2318 temp
.bitfield
.unspecified
= 0;
2319 temp
.bitfield
.byte
= 0;
2320 temp
.bitfield
.word
= 0;
2321 temp
.bitfield
.dword
= 0;
2322 temp
.bitfield
.fword
= 0;
2323 temp
.bitfield
.qword
= 0;
2324 temp
.bitfield
.tbyte
= 0;
2325 temp
.bitfield
.xmmword
= 0;
2326 temp
.bitfield
.ymmword
= 0;
2327 temp
.bitfield
.zmmword
= 0;
2328 temp
.bitfield
.tmmword
= 0;
2329 if (operand_type_all_zero (&temp
))
2332 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
)
2336 i
.error
= operand_type_mismatch
;
2340 /* If given types g0 and g1 are registers they must be of the same type
2341 unless the expected operand type register overlap is null.
2342 Some Intel syntax memory operand size checking also happens here. */
2345 operand_type_register_match (i386_operand_type g0
,
2346 i386_operand_type t0
,
2347 i386_operand_type g1
,
2348 i386_operand_type t1
)
2350 if (g0
.bitfield
.class != Reg
2351 && g0
.bitfield
.class != RegSIMD
2352 && (!operand_type_check (g0
, anymem
)
2353 || g0
.bitfield
.unspecified
2354 || (t0
.bitfield
.class != Reg
2355 && t0
.bitfield
.class != RegSIMD
)))
2358 if (g1
.bitfield
.class != Reg
2359 && g1
.bitfield
.class != RegSIMD
2360 && (!operand_type_check (g1
, anymem
)
2361 || g1
.bitfield
.unspecified
2362 || (t1
.bitfield
.class != Reg
2363 && t1
.bitfield
.class != RegSIMD
)))
2366 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2367 && g0
.bitfield
.word
== g1
.bitfield
.word
2368 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2369 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2370 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2371 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2372 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2375 if (!(t0
.bitfield
.byte
& t1
.bitfield
.byte
)
2376 && !(t0
.bitfield
.word
& t1
.bitfield
.word
)
2377 && !(t0
.bitfield
.dword
& t1
.bitfield
.dword
)
2378 && !(t0
.bitfield
.qword
& t1
.bitfield
.qword
)
2379 && !(t0
.bitfield
.xmmword
& t1
.bitfield
.xmmword
)
2380 && !(t0
.bitfield
.ymmword
& t1
.bitfield
.ymmword
)
2381 && !(t0
.bitfield
.zmmword
& t1
.bitfield
.zmmword
))
2384 i
.error
= register_type_mismatch
;
2389 static INLINE
unsigned int
2390 register_number (const reg_entry
*r
)
2392 unsigned int nr
= r
->reg_num
;
2394 if (r
->reg_flags
& RegRex
)
2397 if (r
->reg_flags
& RegVRex
)
2403 static INLINE
unsigned int
2404 mode_from_disp_size (i386_operand_type t
)
2406 if (t
.bitfield
.disp8
)
2408 else if (t
.bitfield
.disp16
2409 || t
.bitfield
.disp32
2410 || t
.bitfield
.disp32s
)
2417 fits_in_signed_byte (addressT num
)
2419 return num
+ 0x80 <= 0xff;
2423 fits_in_unsigned_byte (addressT num
)
2429 fits_in_unsigned_word (addressT num
)
2431 return num
<= 0xffff;
2435 fits_in_signed_word (addressT num
)
2437 return num
+ 0x8000 <= 0xffff;
2441 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2446 return num
+ 0x80000000 <= 0xffffffff;
2448 } /* fits_in_signed_long() */
2451 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2456 return num
<= 0xffffffff;
2458 } /* fits_in_unsigned_long() */
2460 static INLINE valueT
extend_to_32bit_address (addressT num
)
2463 if (fits_in_unsigned_long(num
))
2464 return (num
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2466 if (!fits_in_signed_long (num
))
2467 return num
& 0xffffffff;
2474 fits_in_disp8 (offsetT num
)
2476 int shift
= i
.memshift
;
2482 mask
= (1 << shift
) - 1;
2484 /* Return 0 if NUM isn't properly aligned. */
2488 /* Check if NUM will fit in 8bit after shift. */
2489 return fits_in_signed_byte (num
>> shift
);
2493 fits_in_imm4 (offsetT num
)
2495 return (num
& 0xf) == num
;
2498 static i386_operand_type
2499 smallest_imm_type (offsetT num
)
2501 i386_operand_type t
;
2503 operand_type_set (&t
, 0);
2504 t
.bitfield
.imm64
= 1;
2506 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2508 /* This code is disabled on the 486 because all the Imm1 forms
2509 in the opcode table are slower on the i486. They're the
2510 versions with the implicitly specified single-position
2511 displacement, which has another syntax if you really want to
2513 t
.bitfield
.imm1
= 1;
2514 t
.bitfield
.imm8
= 1;
2515 t
.bitfield
.imm8s
= 1;
2516 t
.bitfield
.imm16
= 1;
2517 t
.bitfield
.imm32
= 1;
2518 t
.bitfield
.imm32s
= 1;
2520 else if (fits_in_signed_byte (num
))
2522 t
.bitfield
.imm8
= 1;
2523 t
.bitfield
.imm8s
= 1;
2524 t
.bitfield
.imm16
= 1;
2525 t
.bitfield
.imm32
= 1;
2526 t
.bitfield
.imm32s
= 1;
2528 else if (fits_in_unsigned_byte (num
))
2530 t
.bitfield
.imm8
= 1;
2531 t
.bitfield
.imm16
= 1;
2532 t
.bitfield
.imm32
= 1;
2533 t
.bitfield
.imm32s
= 1;
2535 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2537 t
.bitfield
.imm16
= 1;
2538 t
.bitfield
.imm32
= 1;
2539 t
.bitfield
.imm32s
= 1;
2541 else if (fits_in_signed_long (num
))
2543 t
.bitfield
.imm32
= 1;
2544 t
.bitfield
.imm32s
= 1;
2546 else if (fits_in_unsigned_long (num
))
2547 t
.bitfield
.imm32
= 1;
2553 offset_in_range (offsetT val
, int size
)
2559 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2560 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2561 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2563 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2568 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2570 char buf1
[40], buf2
[40];
2572 bfd_sprintf_vma (stdoutput
, buf1
, val
);
2573 bfd_sprintf_vma (stdoutput
, buf2
, val
& mask
);
2574 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2589 a. PREFIX_EXIST if attempting to add a prefix where one from the
2590 same class already exists.
2591 b. PREFIX_LOCK if lock prefix is added.
2592 c. PREFIX_REP if rep/repne prefix is added.
2593 d. PREFIX_DS if ds prefix is added.
2594 e. PREFIX_OTHER if other prefix is added.
2597 static enum PREFIX_GROUP
2598 add_prefix (unsigned int prefix
)
2600 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2603 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2604 && flag_code
== CODE_64BIT
)
2606 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2607 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2608 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2609 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2620 case DS_PREFIX_OPCODE
:
2623 case CS_PREFIX_OPCODE
:
2624 case ES_PREFIX_OPCODE
:
2625 case FS_PREFIX_OPCODE
:
2626 case GS_PREFIX_OPCODE
:
2627 case SS_PREFIX_OPCODE
:
2631 case REPNE_PREFIX_OPCODE
:
2632 case REPE_PREFIX_OPCODE
:
2637 case LOCK_PREFIX_OPCODE
:
2646 case ADDR_PREFIX_OPCODE
:
2650 case DATA_PREFIX_OPCODE
:
2654 if (i
.prefix
[q
] != 0)
2662 i
.prefix
[q
] |= prefix
;
2665 as_bad (_("same type of prefix used twice"));
2671 update_code_flag (int value
, int check
)
2673 PRINTF_LIKE ((*as_error
));
2675 flag_code
= (enum flag_code
) value
;
2676 if (flag_code
== CODE_64BIT
)
2678 cpu_arch_flags
.bitfield
.cpu64
= 1;
2679 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2683 cpu_arch_flags
.bitfield
.cpu64
= 0;
2684 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2686 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2689 as_error
= as_fatal
;
2692 (*as_error
) (_("64bit mode not supported on `%s'."),
2693 cpu_arch_name
? cpu_arch_name
: default_arch
);
2695 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2698 as_error
= as_fatal
;
2701 (*as_error
) (_("32bit mode not supported on `%s'."),
2702 cpu_arch_name
? cpu_arch_name
: default_arch
);
2704 stackop_size
= '\0';
2708 set_code_flag (int value
)
2710 update_code_flag (value
, 0);
2714 set_16bit_gcc_code_flag (int new_code_flag
)
2716 flag_code
= (enum flag_code
) new_code_flag
;
2717 if (flag_code
!= CODE_16BIT
)
2719 cpu_arch_flags
.bitfield
.cpu64
= 0;
2720 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2721 stackop_size
= LONG_MNEM_SUFFIX
;
2725 set_intel_syntax (int syntax_flag
)
2727 /* Find out if register prefixing is specified. */
2728 int ask_naked_reg
= 0;
2731 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2734 int e
= get_symbol_name (&string
);
2736 if (strcmp (string
, "prefix") == 0)
2738 else if (strcmp (string
, "noprefix") == 0)
2741 as_bad (_("bad argument to syntax directive."));
2742 (void) restore_line_pointer (e
);
2744 demand_empty_rest_of_line ();
2746 intel_syntax
= syntax_flag
;
2748 if (ask_naked_reg
== 0)
2749 allow_naked_reg
= (intel_syntax
2750 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2752 allow_naked_reg
= (ask_naked_reg
< 0);
2754 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2756 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2757 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2758 register_prefix
= allow_naked_reg
? "" : "%";
2762 set_intel_mnemonic (int mnemonic_flag
)
2764 intel_mnemonic
= mnemonic_flag
;
2768 set_allow_index_reg (int flag
)
2770 allow_index_reg
= flag
;
2774 set_check (int what
)
2776 enum check_kind
*kind
;
2781 kind
= &operand_check
;
2792 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2795 int e
= get_symbol_name (&string
);
2797 if (strcmp (string
, "none") == 0)
2799 else if (strcmp (string
, "warning") == 0)
2800 *kind
= check_warning
;
2801 else if (strcmp (string
, "error") == 0)
2802 *kind
= check_error
;
2804 as_bad (_("bad argument to %s_check directive."), str
);
2805 (void) restore_line_pointer (e
);
2808 as_bad (_("missing argument for %s_check directive"), str
);
2810 demand_empty_rest_of_line ();
2814 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2815 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2817 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2818 static const char *arch
;
2820 /* Intel LIOM is only supported on ELF. */
2826 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2827 use default_arch. */
2828 arch
= cpu_arch_name
;
2830 arch
= default_arch
;
2833 /* If we are targeting Intel MCU, we must enable it. */
2834 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2835 || new_flag
.bitfield
.cpuiamcu
)
2838 /* If we are targeting Intel L1OM, we must enable it. */
2839 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2840 || new_flag
.bitfield
.cpul1om
)
2843 /* If we are targeting Intel K1OM, we must enable it. */
2844 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2845 || new_flag
.bitfield
.cpuk1om
)
2848 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2853 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2857 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2860 int e
= get_symbol_name (&string
);
2862 i386_cpu_flags flags
;
2864 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2866 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2868 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2872 cpu_arch_name
= cpu_arch
[j
].name
;
2873 cpu_sub_arch_name
= NULL
;
2874 cpu_arch_flags
= cpu_arch
[j
].flags
;
2875 if (flag_code
== CODE_64BIT
)
2877 cpu_arch_flags
.bitfield
.cpu64
= 1;
2878 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2882 cpu_arch_flags
.bitfield
.cpu64
= 0;
2883 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2885 cpu_arch_isa
= cpu_arch
[j
].type
;
2886 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2887 if (!cpu_arch_tune_set
)
2889 cpu_arch_tune
= cpu_arch_isa
;
2890 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2895 flags
= cpu_flags_or (cpu_arch_flags
,
2898 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2900 if (cpu_sub_arch_name
)
2902 char *name
= cpu_sub_arch_name
;
2903 cpu_sub_arch_name
= concat (name
,
2905 (const char *) NULL
);
2909 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2910 cpu_arch_flags
= flags
;
2911 cpu_arch_isa_flags
= flags
;
2915 = cpu_flags_or (cpu_arch_isa_flags
,
2917 (void) restore_line_pointer (e
);
2918 demand_empty_rest_of_line ();
2923 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2925 /* Disable an ISA extension. */
2926 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2927 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2929 flags
= cpu_flags_and_not (cpu_arch_flags
,
2930 cpu_noarch
[j
].flags
);
2931 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2933 if (cpu_sub_arch_name
)
2935 char *name
= cpu_sub_arch_name
;
2936 cpu_sub_arch_name
= concat (name
, string
,
2937 (const char *) NULL
);
2941 cpu_sub_arch_name
= xstrdup (string
);
2942 cpu_arch_flags
= flags
;
2943 cpu_arch_isa_flags
= flags
;
2945 (void) restore_line_pointer (e
);
2946 demand_empty_rest_of_line ();
2950 j
= ARRAY_SIZE (cpu_arch
);
2953 if (j
>= ARRAY_SIZE (cpu_arch
))
2954 as_bad (_("no such architecture: `%s'"), string
);
2956 *input_line_pointer
= e
;
2959 as_bad (_("missing cpu architecture"));
2961 no_cond_jump_promotion
= 0;
2962 if (*input_line_pointer
== ','
2963 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2968 ++input_line_pointer
;
2969 e
= get_symbol_name (&string
);
2971 if (strcmp (string
, "nojumps") == 0)
2972 no_cond_jump_promotion
= 1;
2973 else if (strcmp (string
, "jumps") == 0)
2976 as_bad (_("no such architecture modifier: `%s'"), string
);
2978 (void) restore_line_pointer (e
);
2981 demand_empty_rest_of_line ();
2984 enum bfd_architecture
2987 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2989 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2990 || flag_code
!= CODE_64BIT
)
2991 as_fatal (_("Intel L1OM is 64bit ELF only"));
2992 return bfd_arch_l1om
;
2994 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2996 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2997 || flag_code
!= CODE_64BIT
)
2998 as_fatal (_("Intel K1OM is 64bit ELF only"));
2999 return bfd_arch_k1om
;
3001 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
3003 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3004 || flag_code
== CODE_64BIT
)
3005 as_fatal (_("Intel MCU is 32bit ELF only"));
3006 return bfd_arch_iamcu
;
3009 return bfd_arch_i386
;
3015 if (startswith (default_arch
, "x86_64"))
3017 if (cpu_arch_isa
== PROCESSOR_L1OM
)
3019 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3020 || default_arch
[6] != '\0')
3021 as_fatal (_("Intel L1OM is 64bit ELF only"));
3022 return bfd_mach_l1om
;
3024 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
3026 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3027 || default_arch
[6] != '\0')
3028 as_fatal (_("Intel K1OM is 64bit ELF only"));
3029 return bfd_mach_k1om
;
3031 else if (default_arch
[6] == '\0')
3032 return bfd_mach_x86_64
;
3034 return bfd_mach_x64_32
;
3036 else if (!strcmp (default_arch
, "i386")
3037 || !strcmp (default_arch
, "iamcu"))
3039 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
3041 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
3042 as_fatal (_("Intel MCU is 32bit ELF only"));
3043 return bfd_mach_i386_iamcu
;
3046 return bfd_mach_i386_i386
;
3049 as_fatal (_("unknown architecture"));
3055 /* Support pseudo prefixes like {disp32}. */
3056 lex_type
['{'] = LEX_BEGIN_NAME
;
3058 /* Initialize op_hash hash table. */
3059 op_hash
= str_htab_create ();
3062 const insn_template
*optab
;
3063 templates
*core_optab
;
3065 /* Setup for loop. */
3067 core_optab
= XNEW (templates
);
3068 core_optab
->start
= optab
;
3073 if (optab
->name
== NULL
3074 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
3076 /* different name --> ship out current template list;
3077 add to hash table; & begin anew. */
3078 core_optab
->end
= optab
;
3079 if (str_hash_insert (op_hash
, (optab
- 1)->name
, core_optab
, 0))
3080 as_fatal (_("duplicate %s"), (optab
- 1)->name
);
3082 if (optab
->name
== NULL
)
3084 core_optab
= XNEW (templates
);
3085 core_optab
->start
= optab
;
3090 /* Initialize reg_hash hash table. */
3091 reg_hash
= str_htab_create ();
3093 const reg_entry
*regtab
;
3094 unsigned int regtab_size
= i386_regtab_size
;
3096 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
3098 switch (regtab
->reg_type
.bitfield
.class)
3101 if (regtab
->reg_type
.bitfield
.dword
)
3103 if (regtab
->reg_type
.bitfield
.instance
== Accum
)
3106 else if (regtab
->reg_type
.bitfield
.tbyte
)
3108 /* There's no point inserting st(<N>) in the hash table, as
3109 parentheses aren't included in register_chars[] anyway. */
3110 if (regtab
->reg_type
.bitfield
.instance
!= Accum
)
3117 switch (regtab
->reg_num
)
3119 case 0: reg_es
= regtab
; break;
3120 case 2: reg_ss
= regtab
; break;
3121 case 3: reg_ds
= regtab
; break;
3126 if (!regtab
->reg_num
)
3131 if (str_hash_insert (reg_hash
, regtab
->reg_name
, regtab
, 0) != NULL
)
3132 as_fatal (_("duplicate %s"), regtab
->reg_name
);
3136 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3141 for (c
= 0; c
< 256; c
++)
3146 mnemonic_chars
[c
] = c
;
3147 register_chars
[c
] = c
;
3148 operand_chars
[c
] = c
;
3150 else if (ISLOWER (c
))
3152 mnemonic_chars
[c
] = c
;
3153 register_chars
[c
] = c
;
3154 operand_chars
[c
] = c
;
3156 else if (ISUPPER (c
))
3158 mnemonic_chars
[c
] = TOLOWER (c
);
3159 register_chars
[c
] = mnemonic_chars
[c
];
3160 operand_chars
[c
] = c
;
3162 else if (c
== '{' || c
== '}')
3164 mnemonic_chars
[c
] = c
;
3165 operand_chars
[c
] = c
;
3167 #ifdef SVR4_COMMENT_CHARS
3168 else if (c
== '\\' && strchr (i386_comment_chars
, '/'))
3169 operand_chars
[c
] = c
;
3172 if (ISALPHA (c
) || ISDIGIT (c
))
3173 identifier_chars
[c
] = c
;
3176 identifier_chars
[c
] = c
;
3177 operand_chars
[c
] = c
;
3182 identifier_chars
['@'] = '@';
3185 identifier_chars
['?'] = '?';
3186 operand_chars
['?'] = '?';
3188 digit_chars
['-'] = '-';
3189 mnemonic_chars
['_'] = '_';
3190 mnemonic_chars
['-'] = '-';
3191 mnemonic_chars
['.'] = '.';
3192 identifier_chars
['_'] = '_';
3193 identifier_chars
['.'] = '.';
3195 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
3196 operand_chars
[(unsigned char) *p
] = *p
;
3199 if (flag_code
== CODE_64BIT
)
3201 #if defined (OBJ_COFF) && defined (TE_PE)
3202 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3205 x86_dwarf2_return_column
= 16;
3207 x86_cie_data_alignment
= -8;
3211 x86_dwarf2_return_column
= 8;
3212 x86_cie_data_alignment
= -4;
3215 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3216 can be turned into BRANCH_PREFIX frag. */
3217 if (align_branch_prefix_size
> MAX_FUSED_JCC_PADDING_SIZE
)
3222 i386_print_statistics (FILE *file
)
3224 htab_print_statistics (file
, "i386 opcode", op_hash
);
3225 htab_print_statistics (file
, "i386 register", reg_hash
);
3230 /* Debugging routines for md_assemble. */
3231 static void pte (insn_template
*);
3232 static void pt (i386_operand_type
);
3233 static void pe (expressionS
*);
3234 static void ps (symbolS
*);
3237 pi (const char *line
, i386_insn
*x
)
3241 fprintf (stdout
, "%s: template ", line
);
3243 fprintf (stdout
, " address: base %s index %s scale %x\n",
3244 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3245 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3246 x
->log2_scale_factor
);
3247 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3248 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3249 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3250 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3251 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3252 (x
->rex
& REX_W
) != 0,
3253 (x
->rex
& REX_R
) != 0,
3254 (x
->rex
& REX_X
) != 0,
3255 (x
->rex
& REX_B
) != 0);
3256 for (j
= 0; j
< x
->operands
; j
++)
3258 fprintf (stdout
, " #%d: ", j
+ 1);
3260 fprintf (stdout
, "\n");
3261 if (x
->types
[j
].bitfield
.class == Reg
3262 || x
->types
[j
].bitfield
.class == RegMMX
3263 || x
->types
[j
].bitfield
.class == RegSIMD
3264 || x
->types
[j
].bitfield
.class == RegMask
3265 || x
->types
[j
].bitfield
.class == SReg
3266 || x
->types
[j
].bitfield
.class == RegCR
3267 || x
->types
[j
].bitfield
.class == RegDR
3268 || x
->types
[j
].bitfield
.class == RegTR
3269 || x
->types
[j
].bitfield
.class == RegBND
)
3270 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3271 if (operand_type_check (x
->types
[j
], imm
))
3273 if (operand_type_check (x
->types
[j
], disp
))
3274 pe (x
->op
[j
].disps
);
3279 pte (insn_template
*t
)
3281 static const unsigned char opc_pfx
[] = { 0, 0x66, 0xf3, 0xf2 };
3282 static const char *const opc_spc
[] = {
3283 NULL
, "0f", "0f38", "0f3a", NULL
, NULL
, NULL
, NULL
,
3284 "XOP08", "XOP09", "XOP0A",
3288 fprintf (stdout
, " %d operands ", t
->operands
);
3289 if (opc_pfx
[t
->opcode_modifier
.opcodeprefix
])
3290 fprintf (stdout
, "pfx %x ", opc_pfx
[t
->opcode_modifier
.opcodeprefix
]);
3291 if (opc_spc
[t
->opcode_modifier
.opcodespace
])
3292 fprintf (stdout
, "space %s ", opc_spc
[t
->opcode_modifier
.opcodespace
]);
3293 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3294 if (t
->extension_opcode
!= None
)
3295 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3296 if (t
->opcode_modifier
.d
)
3297 fprintf (stdout
, "D");
3298 if (t
->opcode_modifier
.w
)
3299 fprintf (stdout
, "W");
3300 fprintf (stdout
, "\n");
3301 for (j
= 0; j
< t
->operands
; j
++)
3303 fprintf (stdout
, " #%d type ", j
+ 1);
3304 pt (t
->operand_types
[j
]);
3305 fprintf (stdout
, "\n");
3312 fprintf (stdout
, " operation %d\n", e
->X_op
);
3313 fprintf (stdout
, " add_number %" BFD_VMA_FMT
"d (%" BFD_VMA_FMT
"x)\n",
3314 e
->X_add_number
, e
->X_add_number
);
3315 if (e
->X_add_symbol
)
3317 fprintf (stdout
, " add_symbol ");
3318 ps (e
->X_add_symbol
);
3319 fprintf (stdout
, "\n");
3323 fprintf (stdout
, " op_symbol ");
3324 ps (e
->X_op_symbol
);
3325 fprintf (stdout
, "\n");
3332 fprintf (stdout
, "%s type %s%s",
3334 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3335 segment_name (S_GET_SEGMENT (s
)));
3338 static struct type_name
3340 i386_operand_type mask
;
3343 const type_names
[] =
3345 { OPERAND_TYPE_REG8
, "r8" },
3346 { OPERAND_TYPE_REG16
, "r16" },
3347 { OPERAND_TYPE_REG32
, "r32" },
3348 { OPERAND_TYPE_REG64
, "r64" },
3349 { OPERAND_TYPE_ACC8
, "acc8" },
3350 { OPERAND_TYPE_ACC16
, "acc16" },
3351 { OPERAND_TYPE_ACC32
, "acc32" },
3352 { OPERAND_TYPE_ACC64
, "acc64" },
3353 { OPERAND_TYPE_IMM8
, "i8" },
3354 { OPERAND_TYPE_IMM8
, "i8s" },
3355 { OPERAND_TYPE_IMM16
, "i16" },
3356 { OPERAND_TYPE_IMM32
, "i32" },
3357 { OPERAND_TYPE_IMM32S
, "i32s" },
3358 { OPERAND_TYPE_IMM64
, "i64" },
3359 { OPERAND_TYPE_IMM1
, "i1" },
3360 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3361 { OPERAND_TYPE_DISP8
, "d8" },
3362 { OPERAND_TYPE_DISP16
, "d16" },
3363 { OPERAND_TYPE_DISP32
, "d32" },
3364 { OPERAND_TYPE_DISP32S
, "d32s" },
3365 { OPERAND_TYPE_DISP64
, "d64" },
3366 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3367 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3368 { OPERAND_TYPE_CONTROL
, "control reg" },
3369 { OPERAND_TYPE_TEST
, "test reg" },
3370 { OPERAND_TYPE_DEBUG
, "debug reg" },
3371 { OPERAND_TYPE_FLOATREG
, "FReg" },
3372 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3373 { OPERAND_TYPE_SREG
, "SReg" },
3374 { OPERAND_TYPE_REGMMX
, "rMMX" },
3375 { OPERAND_TYPE_REGXMM
, "rXMM" },
3376 { OPERAND_TYPE_REGYMM
, "rYMM" },
3377 { OPERAND_TYPE_REGZMM
, "rZMM" },
3378 { OPERAND_TYPE_REGTMM
, "rTMM" },
3379 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3383 pt (i386_operand_type t
)
3386 i386_operand_type a
;
3388 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3390 a
= operand_type_and (t
, type_names
[j
].mask
);
3391 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3392 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3397 #endif /* DEBUG386 */
3399 static bfd_reloc_code_real_type
3400 reloc (unsigned int size
,
3403 bfd_reloc_code_real_type other
)
3405 if (other
!= NO_RELOC
)
3407 reloc_howto_type
*rel
;
3412 case BFD_RELOC_X86_64_GOT32
:
3413 return BFD_RELOC_X86_64_GOT64
;
3415 case BFD_RELOC_X86_64_GOTPLT64
:
3416 return BFD_RELOC_X86_64_GOTPLT64
;
3418 case BFD_RELOC_X86_64_PLTOFF64
:
3419 return BFD_RELOC_X86_64_PLTOFF64
;
3421 case BFD_RELOC_X86_64_GOTPC32
:
3422 other
= BFD_RELOC_X86_64_GOTPC64
;
3424 case BFD_RELOC_X86_64_GOTPCREL
:
3425 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3427 case BFD_RELOC_X86_64_TPOFF32
:
3428 other
= BFD_RELOC_X86_64_TPOFF64
;
3430 case BFD_RELOC_X86_64_DTPOFF32
:
3431 other
= BFD_RELOC_X86_64_DTPOFF64
;
3437 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3438 if (other
== BFD_RELOC_SIZE32
)
3441 other
= BFD_RELOC_SIZE64
;
3444 as_bad (_("there are no pc-relative size relocations"));
3450 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3451 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3454 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3456 as_bad (_("unknown relocation (%u)"), other
);
3457 else if (size
!= bfd_get_reloc_size (rel
))
3458 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3459 bfd_get_reloc_size (rel
),
3461 else if (pcrel
&& !rel
->pc_relative
)
3462 as_bad (_("non-pc-relative relocation for pc-relative field"));
3463 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3465 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3467 as_bad (_("relocated field and relocation type differ in signedness"));
3476 as_bad (_("there are no unsigned pc-relative relocations"));
3479 case 1: return BFD_RELOC_8_PCREL
;
3480 case 2: return BFD_RELOC_16_PCREL
;
3481 case 4: return BFD_RELOC_32_PCREL
;
3482 case 8: return BFD_RELOC_64_PCREL
;
3484 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3491 case 4: return BFD_RELOC_X86_64_32S
;
3496 case 1: return BFD_RELOC_8
;
3497 case 2: return BFD_RELOC_16
;
3498 case 4: return BFD_RELOC_32
;
3499 case 8: return BFD_RELOC_64
;
3501 as_bad (_("cannot do %s %u byte relocation"),
3502 sign
> 0 ? "signed" : "unsigned", size
);
3508 /* Here we decide which fixups can be adjusted to make them relative to
3509 the beginning of the section instead of the symbol. Basically we need
3510 to make sure that the dynamic relocations are done correctly, so in
3511 some cases we force the original symbol to be used. */
3514 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3516 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3520 /* Don't adjust pc-relative references to merge sections in 64-bit
3522 if (use_rela_relocations
3523 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3527 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3528 and changed later by validate_fix. */
3529 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3530 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3533 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3534 for size relocations. */
3535 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3536 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3537 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3538 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3539 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3540 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3541 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3542 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3543 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3544 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3545 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3546 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3547 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3548 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3549 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3550 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3551 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3552 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3553 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3554 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3555 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3556 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3557 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3558 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3559 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3560 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3561 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3562 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3563 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3564 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3565 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3572 want_disp32 (const insn_template
*t
)
3574 return flag_code
!= CODE_64BIT
3575 || i
.prefix
[ADDR_PREFIX
]
3576 || (t
->base_opcode
== 0x8d
3577 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
3578 && (!i
.types
[1].bitfield
.qword
3579 || t
->opcode_modifier
.size
== SIZE32
));
3583 intel_float_operand (const char *mnemonic
)
3585 /* Note that the value returned is meaningful only for opcodes with (memory)
3586 operands, hence the code here is free to improperly handle opcodes that
3587 have no operands (for better performance and smaller code). */
3589 if (mnemonic
[0] != 'f')
3590 return 0; /* non-math */
3592 switch (mnemonic
[1])
3594 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3595 the fs segment override prefix not currently handled because no
3596 call path can make opcodes without operands get here */
3598 return 2 /* integer op */;
3600 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3601 return 3; /* fldcw/fldenv */
3604 if (mnemonic
[2] != 'o' /* fnop */)
3605 return 3; /* non-waiting control op */
3608 if (mnemonic
[2] == 's')
3609 return 3; /* frstor/frstpm */
3612 if (mnemonic
[2] == 'a')
3613 return 3; /* fsave */
3614 if (mnemonic
[2] == 't')
3616 switch (mnemonic
[3])
3618 case 'c': /* fstcw */
3619 case 'd': /* fstdw */
3620 case 'e': /* fstenv */
3621 case 's': /* fsts[gw] */
3627 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3628 return 0; /* fxsave/fxrstor are not really math ops */
3636 install_template (const insn_template
*t
)
3642 /* Note that for pseudo prefixes this produces a length of 1. But for them
3643 the length isn't interesting at all. */
3644 for (l
= 1; l
< 4; ++l
)
3645 if (!(t
->base_opcode
>> (8 * l
)))
3648 i
.opcode_length
= l
;
3651 /* Build the VEX prefix. */
3654 build_vex_prefix (const insn_template
*t
)
3656 unsigned int register_specifier
;
3657 unsigned int vector_length
;
3660 /* Check register specifier. */
3661 if (i
.vex
.register_specifier
)
3663 register_specifier
=
3664 ~register_number (i
.vex
.register_specifier
) & 0xf;
3665 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3668 register_specifier
= 0xf;
3670 /* Use 2-byte VEX prefix by swapping destination and source operand
3671 if there are more than 1 register operand. */
3672 if (i
.reg_operands
> 1
3673 && i
.vec_encoding
!= vex_encoding_vex3
3674 && i
.dir_encoding
== dir_encoding_default
3675 && i
.operands
== i
.reg_operands
3676 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3677 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3678 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3681 unsigned int xchg
= i
.operands
- 1;
3682 union i386_op temp_op
;
3683 i386_operand_type temp_type
;
3685 temp_type
= i
.types
[xchg
];
3686 i
.types
[xchg
] = i
.types
[0];
3687 i
.types
[0] = temp_type
;
3688 temp_op
= i
.op
[xchg
];
3689 i
.op
[xchg
] = i
.op
[0];
3692 gas_assert (i
.rm
.mode
== 3);
3696 i
.rm
.regmem
= i
.rm
.reg
;
3699 if (i
.tm
.opcode_modifier
.d
)
3700 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3701 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3702 else /* Use the next insn. */
3703 install_template (&t
[1]);
3706 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3707 are no memory operands and at least 3 register ones. */
3708 if (i
.reg_operands
>= 3
3709 && i
.vec_encoding
!= vex_encoding_vex3
3710 && i
.reg_operands
== i
.operands
- i
.imm_operands
3711 && i
.tm
.opcode_modifier
.vex
3712 && i
.tm
.opcode_modifier
.commutative
3713 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3715 && i
.vex
.register_specifier
3716 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3718 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3719 union i386_op temp_op
;
3720 i386_operand_type temp_type
;
3722 gas_assert (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
);
3723 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3724 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3725 &i
.types
[i
.operands
- 3]));
3726 gas_assert (i
.rm
.mode
== 3);
3728 temp_type
= i
.types
[xchg
];
3729 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3730 i
.types
[xchg
+ 1] = temp_type
;
3731 temp_op
= i
.op
[xchg
];
3732 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3733 i
.op
[xchg
+ 1] = temp_op
;
3736 xchg
= i
.rm
.regmem
| 8;
3737 i
.rm
.regmem
= ~register_specifier
& 0xf;
3738 gas_assert (!(i
.rm
.regmem
& 8));
3739 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3740 register_specifier
= ~xchg
& 0xf;
3743 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3744 vector_length
= avxscalar
;
3745 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3751 /* Determine vector length from the last multi-length vector
3754 for (op
= t
->operands
; op
--;)
3755 if (t
->operand_types
[op
].bitfield
.xmmword
3756 && t
->operand_types
[op
].bitfield
.ymmword
3757 && i
.types
[op
].bitfield
.ymmword
)
3764 /* Check the REX.W bit and VEXW. */
3765 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3766 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3767 else if (i
.tm
.opcode_modifier
.vexw
)
3768 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3770 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3772 /* Use 2-byte VEX prefix if possible. */
3774 && i
.vec_encoding
!= vex_encoding_vex3
3775 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3776 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3778 /* 2-byte VEX prefix. */
3782 i
.vex
.bytes
[0] = 0xc5;
3784 /* Check the REX.R bit. */
3785 r
= (i
.rex
& REX_R
) ? 0 : 1;
3786 i
.vex
.bytes
[1] = (r
<< 7
3787 | register_specifier
<< 3
3788 | vector_length
<< 2
3789 | i
.tm
.opcode_modifier
.opcodeprefix
);
3793 /* 3-byte VEX prefix. */
3796 switch (i
.tm
.opcode_modifier
.opcodespace
)
3801 i
.vex
.bytes
[0] = 0xc4;
3806 i
.vex
.bytes
[0] = 0x8f;
3812 /* The high 3 bits of the second VEX byte are 1's compliment
3813 of RXB bits from REX. */
3814 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3816 i
.vex
.bytes
[2] = (w
<< 7
3817 | register_specifier
<< 3
3818 | vector_length
<< 2
3819 | i
.tm
.opcode_modifier
.opcodeprefix
);
3824 is_evex_encoding (const insn_template
*t
)
3826 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3827 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3828 || t
->opcode_modifier
.sae
;
3832 is_any_vex_encoding (const insn_template
*t
)
3834 return t
->opcode_modifier
.vex
|| is_evex_encoding (t
);
3837 /* Build the EVEX prefix. */
3840 build_evex_prefix (void)
3842 unsigned int register_specifier
, w
;
3843 rex_byte vrex_used
= 0;
3845 /* Check register specifier. */
3846 if (i
.vex
.register_specifier
)
3848 gas_assert ((i
.vrex
& REX_X
) == 0);
3850 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3851 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3852 register_specifier
+= 8;
3853 /* The upper 16 registers are encoded in the fourth byte of the
3855 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3856 i
.vex
.bytes
[3] = 0x8;
3857 register_specifier
= ~register_specifier
& 0xf;
3861 register_specifier
= 0xf;
3863 /* Encode upper 16 vector index register in the fourth byte of
3865 if (!(i
.vrex
& REX_X
))
3866 i
.vex
.bytes
[3] = 0x8;
3871 /* 4 byte EVEX prefix. */
3873 i
.vex
.bytes
[0] = 0x62;
3875 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3877 gas_assert (i
.tm
.opcode_modifier
.opcodespace
>= SPACE_0F
);
3878 gas_assert (i
.tm
.opcode_modifier
.opcodespace
<= SPACE_0F3A
);
3879 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3881 /* The fifth bit of the second EVEX byte is 1's compliment of the
3882 REX_R bit in VREX. */
3883 if (!(i
.vrex
& REX_R
))
3884 i
.vex
.bytes
[1] |= 0x10;
3888 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3890 /* When all operands are registers, the REX_X bit in REX is not
3891 used. We reuse it to encode the upper 16 registers, which is
3892 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3893 as 1's compliment. */
3894 if ((i
.vrex
& REX_B
))
3897 i
.vex
.bytes
[1] &= ~0x40;
3901 /* EVEX instructions shouldn't need the REX prefix. */
3902 i
.vrex
&= ~vrex_used
;
3903 gas_assert (i
.vrex
== 0);
3905 /* Check the REX.W bit and VEXW. */
3906 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3907 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3908 else if (i
.tm
.opcode_modifier
.vexw
)
3909 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3911 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3913 /* The third byte of the EVEX prefix. */
3914 i
.vex
.bytes
[2] = ((w
<< 7)
3915 | (register_specifier
<< 3)
3916 | 4 /* Encode the U bit. */
3917 | i
.tm
.opcode_modifier
.opcodeprefix
);
3919 /* The fourth byte of the EVEX prefix. */
3920 /* The zeroing-masking bit. */
3921 if (i
.mask
.reg
&& i
.mask
.zeroing
)
3922 i
.vex
.bytes
[3] |= 0x80;
3924 /* Don't always set the broadcast bit if there is no RC. */
3925 if (i
.rounding
.type
== rc_none
)
3927 /* Encode the vector length. */
3928 unsigned int vec_length
;
3930 if (!i
.tm
.opcode_modifier
.evex
3931 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3935 /* Determine vector length from the last multi-length vector
3937 for (op
= i
.operands
; op
--;)
3938 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3939 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3940 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3942 if (i
.types
[op
].bitfield
.zmmword
)
3944 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3947 else if (i
.types
[op
].bitfield
.ymmword
)
3949 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3952 else if (i
.types
[op
].bitfield
.xmmword
)
3954 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3957 else if (i
.broadcast
.type
&& op
== i
.broadcast
.operand
)
3959 switch (i
.broadcast
.bytes
)
3962 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3965 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3968 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3977 if (op
>= MAX_OPERANDS
)
3981 switch (i
.tm
.opcode_modifier
.evex
)
3983 case EVEXLIG
: /* LL' is ignored */
3984 vec_length
= evexlig
<< 5;
3987 vec_length
= 0 << 5;
3990 vec_length
= 1 << 5;
3993 vec_length
= 2 << 5;
3999 i
.vex
.bytes
[3] |= vec_length
;
4000 /* Encode the broadcast bit. */
4001 if (i
.broadcast
.type
)
4002 i
.vex
.bytes
[3] |= 0x10;
4004 else if (i
.rounding
.type
!= saeonly
)
4005 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
.type
<< 5);
4007 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
4010 i
.vex
.bytes
[3] |= i
.mask
.reg
->reg_num
;
4014 process_immext (void)
4018 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
4019 which is coded in the same place as an 8-bit immediate field
4020 would be. Here we fake an 8-bit immediate operand from the
4021 opcode suffix stored in tm.extension_opcode.
4023 AVX instructions also use this encoding, for some of
4024 3 argument instructions. */
4026 gas_assert (i
.imm_operands
<= 1
4028 || (is_any_vex_encoding (&i
.tm
)
4029 && i
.operands
<= 4)));
4031 exp
= &im_expressions
[i
.imm_operands
++];
4032 i
.op
[i
.operands
].imms
= exp
;
4033 i
.types
[i
.operands
] = imm8
;
4035 exp
->X_op
= O_constant
;
4036 exp
->X_add_number
= i
.tm
.extension_opcode
;
4037 i
.tm
.extension_opcode
= None
;
4044 switch (i
.tm
.opcode_modifier
.prefixok
)
4052 as_bad (_("invalid instruction `%s' after `%s'"),
4053 i
.tm
.name
, i
.hle_prefix
);
4056 if (i
.prefix
[LOCK_PREFIX
])
4058 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
4062 case PrefixHLERelease
:
4063 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
4065 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4069 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
4071 as_bad (_("memory destination needed for instruction `%s'"
4072 " after `xrelease'"), i
.tm
.name
);
4079 /* Try the shortest encoding by shortening operand size. */
4082 optimize_encoding (void)
4086 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4087 && i
.tm
.base_opcode
== 0x8d)
4090 lea symbol, %rN -> mov $symbol, %rN
4091 lea (%rM), %rN -> mov %rM, %rN
4092 lea (,%rM,1), %rN -> mov %rM, %rN
4094 and in 32-bit mode for 16-bit addressing
4096 lea (%rM), %rN -> movzx %rM, %rN
4098 and in 64-bit mode zap 32-bit addressing in favor of using a
4099 32-bit (or less) destination.
4101 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4103 if (!i
.op
[1].regs
->reg_type
.bitfield
.word
)
4104 i
.tm
.opcode_modifier
.size
= SIZE32
;
4105 i
.prefix
[ADDR_PREFIX
] = 0;
4108 if (!i
.index_reg
&& !i
.base_reg
)
4111 lea symbol, %rN -> mov $symbol, %rN
4113 if (flag_code
== CODE_64BIT
)
4115 /* Don't transform a relocation to a 16-bit one. */
4117 && i
.op
[0].disps
->X_op
!= O_constant
4118 && i
.op
[1].regs
->reg_type
.bitfield
.word
)
4121 if (!i
.op
[1].regs
->reg_type
.bitfield
.qword
4122 || i
.tm
.opcode_modifier
.size
== SIZE32
)
4124 i
.tm
.base_opcode
= 0xb8;
4125 i
.tm
.opcode_modifier
.modrm
= 0;
4126 if (!i
.op
[1].regs
->reg_type
.bitfield
.word
)
4127 i
.types
[0].bitfield
.imm32
= 1;
4130 i
.tm
.opcode_modifier
.size
= SIZE16
;
4131 i
.types
[0].bitfield
.imm16
= 1;
4136 /* Subject to further optimization below. */
4137 i
.tm
.base_opcode
= 0xc7;
4138 i
.tm
.extension_opcode
= 0;
4139 i
.types
[0].bitfield
.imm32s
= 1;
4140 i
.types
[0].bitfield
.baseindex
= 0;
4143 /* Outside of 64-bit mode address and operand sizes have to match if
4144 a relocation is involved, as otherwise we wouldn't (currently) or
4145 even couldn't express the relocation correctly. */
4146 else if (i
.op
[0].disps
4147 && i
.op
[0].disps
->X_op
!= O_constant
4148 && ((!i
.prefix
[ADDR_PREFIX
])
4149 != (flag_code
== CODE_32BIT
4150 ? i
.op
[1].regs
->reg_type
.bitfield
.dword
4151 : i
.op
[1].regs
->reg_type
.bitfield
.word
)))
4155 i
.tm
.base_opcode
= 0xb8;
4156 i
.tm
.opcode_modifier
.modrm
= 0;
4157 if (i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4158 i
.types
[0].bitfield
.imm32
= 1;
4160 i
.types
[0].bitfield
.imm16
= 1;
4163 && i
.op
[0].disps
->X_op
== O_constant
4164 && i
.op
[1].regs
->reg_type
.bitfield
.dword
4165 /* NB: Add () to !i.prefix[ADDR_PREFIX] to silence
4167 && (!i
.prefix
[ADDR_PREFIX
]) != (flag_code
== CODE_32BIT
))
4168 i
.op
[0].disps
->X_add_number
&= 0xffff;
4171 i
.tm
.operand_types
[0] = i
.types
[0];
4175 i
.op
[0].imms
= &im_expressions
[0];
4176 i
.op
[0].imms
->X_op
= O_absent
;
4179 else if (i
.op
[0].disps
4180 && (i
.op
[0].disps
->X_op
!= O_constant
4181 || i
.op
[0].disps
->X_add_number
))
4186 lea (%rM), %rN -> mov %rM, %rN
4187 lea (,%rM,1), %rN -> mov %rM, %rN
4188 lea (%rM), %rN -> movzx %rM, %rN
4190 const reg_entry
*addr_reg
;
4192 if (!i
.index_reg
&& i
.base_reg
->reg_num
!= RegIP
)
4193 addr_reg
= i
.base_reg
;
4194 else if (!i
.base_reg
4195 && i
.index_reg
->reg_num
!= RegIZ
4196 && !i
.log2_scale_factor
)
4197 addr_reg
= i
.index_reg
;
4201 if (addr_reg
->reg_type
.bitfield
.word
4202 && i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4204 if (flag_code
!= CODE_32BIT
)
4206 i
.tm
.opcode_modifier
.opcodespace
= SPACE_0F
;
4207 i
.tm
.base_opcode
= 0xb7;
4210 i
.tm
.base_opcode
= 0x8b;
4212 if (addr_reg
->reg_type
.bitfield
.dword
4213 && i
.op
[1].regs
->reg_type
.bitfield
.qword
)
4214 i
.tm
.opcode_modifier
.size
= SIZE32
;
4216 i
.op
[0].regs
= addr_reg
;
4221 i
.disp_operands
= 0;
4222 i
.prefix
[ADDR_PREFIX
] = 0;
4223 i
.prefix
[SEG_PREFIX
] = 0;
4227 if (optimize_for_space
4228 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4229 && i
.reg_operands
== 1
4230 && i
.imm_operands
== 1
4231 && !i
.types
[1].bitfield
.byte
4232 && i
.op
[0].imms
->X_op
== O_constant
4233 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4234 && (i
.tm
.base_opcode
== 0xa8
4235 || (i
.tm
.base_opcode
== 0xf6
4236 && i
.tm
.extension_opcode
== 0x0)))
4239 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4241 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
4242 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
4244 i
.types
[1].bitfield
.byte
= 1;
4245 /* Ignore the suffix. */
4247 /* Convert to byte registers. */
4248 if (i
.types
[1].bitfield
.word
)
4250 else if (i
.types
[1].bitfield
.dword
)
4254 if (!(i
.op
[1].regs
->reg_flags
& RegRex
) && base_regnum
< 4)
4259 else if (flag_code
== CODE_64BIT
4260 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4261 && ((i
.types
[1].bitfield
.qword
4262 && i
.reg_operands
== 1
4263 && i
.imm_operands
== 1
4264 && i
.op
[0].imms
->X_op
== O_constant
4265 && ((i
.tm
.base_opcode
== 0xb8
4266 && i
.tm
.extension_opcode
== None
4267 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4268 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4269 && ((i
.tm
.base_opcode
== 0x24
4270 || i
.tm
.base_opcode
== 0xa8)
4271 || (i
.tm
.base_opcode
== 0x80
4272 && i
.tm
.extension_opcode
== 0x4)
4273 || ((i
.tm
.base_opcode
== 0xf6
4274 || (i
.tm
.base_opcode
| 1) == 0xc7)
4275 && i
.tm
.extension_opcode
== 0x0)))
4276 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4277 && i
.tm
.base_opcode
== 0x83
4278 && i
.tm
.extension_opcode
== 0x4)))
4279 || (i
.types
[0].bitfield
.qword
4280 && ((i
.reg_operands
== 2
4281 && i
.op
[0].regs
== i
.op
[1].regs
4282 && (i
.tm
.base_opcode
== 0x30
4283 || i
.tm
.base_opcode
== 0x28))
4284 || (i
.reg_operands
== 1
4286 && i
.tm
.base_opcode
== 0x30)))))
4289 andq $imm31, %r64 -> andl $imm31, %r32
4290 andq $imm7, %r64 -> andl $imm7, %r32
4291 testq $imm31, %r64 -> testl $imm31, %r32
4292 xorq %r64, %r64 -> xorl %r32, %r32
4293 subq %r64, %r64 -> subl %r32, %r32
4294 movq $imm31, %r64 -> movl $imm31, %r32
4295 movq $imm32, %r64 -> movl $imm32, %r32
4297 i
.tm
.opcode_modifier
.norex64
= 1;
4298 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4301 movq $imm31, %r64 -> movl $imm31, %r32
4302 movq $imm32, %r64 -> movl $imm32, %r32
4304 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4305 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4306 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4307 i
.types
[0].bitfield
.imm32
= 1;
4308 i
.types
[0].bitfield
.imm32s
= 0;
4309 i
.types
[0].bitfield
.imm64
= 0;
4310 i
.types
[1].bitfield
.dword
= 1;
4311 i
.types
[1].bitfield
.qword
= 0;
4312 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4315 movq $imm31, %r64 -> movl $imm31, %r32
4317 i
.tm
.base_opcode
= 0xb8;
4318 i
.tm
.extension_opcode
= None
;
4319 i
.tm
.opcode_modifier
.w
= 0;
4320 i
.tm
.opcode_modifier
.modrm
= 0;
4324 else if (optimize
> 1
4325 && !optimize_for_space
4326 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4327 && i
.reg_operands
== 2
4328 && i
.op
[0].regs
== i
.op
[1].regs
4329 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4330 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4331 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4334 andb %rN, %rN -> testb %rN, %rN
4335 andw %rN, %rN -> testw %rN, %rN
4336 andq %rN, %rN -> testq %rN, %rN
4337 orb %rN, %rN -> testb %rN, %rN
4338 orw %rN, %rN -> testw %rN, %rN
4339 orq %rN, %rN -> testq %rN, %rN
4341 and outside of 64-bit mode
4343 andl %rN, %rN -> testl %rN, %rN
4344 orl %rN, %rN -> testl %rN, %rN
4346 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4348 else if (i
.reg_operands
== 3
4349 && i
.op
[0].regs
== i
.op
[1].regs
4350 && !i
.types
[2].bitfield
.xmmword
4351 && (i
.tm
.opcode_modifier
.vex
4352 || ((!i
.mask
.reg
|| i
.mask
.zeroing
)
4353 && i
.rounding
.type
== rc_none
4354 && is_evex_encoding (&i
.tm
)
4355 && (i
.vec_encoding
!= vex_encoding_evex
4356 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4357 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4358 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4359 && i
.types
[2].bitfield
.ymmword
))))
4360 && ((i
.tm
.base_opcode
== 0x55
4361 || i
.tm
.base_opcode
== 0x57
4362 || i
.tm
.base_opcode
== 0xdf
4363 || i
.tm
.base_opcode
== 0xef
4364 || i
.tm
.base_opcode
== 0xf8
4365 || i
.tm
.base_opcode
== 0xf9
4366 || i
.tm
.base_opcode
== 0xfa
4367 || i
.tm
.base_opcode
== 0xfb
4368 || i
.tm
.base_opcode
== 0x42
4369 || i
.tm
.base_opcode
== 0x47)
4370 && i
.tm
.extension_opcode
== None
))
4373 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4375 EVEX VOP %zmmM, %zmmM, %zmmN
4376 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4377 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4378 EVEX VOP %ymmM, %ymmM, %ymmN
4379 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4380 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4381 VEX VOP %ymmM, %ymmM, %ymmN
4382 -> VEX VOP %xmmM, %xmmM, %xmmN
4383 VOP, one of vpandn and vpxor:
4384 VEX VOP %ymmM, %ymmM, %ymmN
4385 -> VEX VOP %xmmM, %xmmM, %xmmN
4386 VOP, one of vpandnd and vpandnq:
4387 EVEX VOP %zmmM, %zmmM, %zmmN
4388 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4389 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4390 EVEX VOP %ymmM, %ymmM, %ymmN
4391 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4392 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4393 VOP, one of vpxord and vpxorq:
4394 EVEX VOP %zmmM, %zmmM, %zmmN
4395 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4396 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4397 EVEX VOP %ymmM, %ymmM, %ymmN
4398 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4399 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4400 VOP, one of kxord and kxorq:
4401 VEX VOP %kM, %kM, %kN
4402 -> VEX kxorw %kM, %kM, %kN
4403 VOP, one of kandnd and kandnq:
4404 VEX VOP %kM, %kM, %kN
4405 -> VEX kandnw %kM, %kM, %kN
4407 if (is_evex_encoding (&i
.tm
))
4409 if (i
.vec_encoding
!= vex_encoding_evex
)
4411 i
.tm
.opcode_modifier
.vex
= VEX128
;
4412 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4413 i
.tm
.opcode_modifier
.evex
= 0;
4415 else if (optimize
> 1)
4416 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4420 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4422 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_NONE
;
4423 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4426 i
.tm
.opcode_modifier
.vex
= VEX128
;
4428 if (i
.tm
.opcode_modifier
.vex
)
4429 for (j
= 0; j
< 3; j
++)
4431 i
.types
[j
].bitfield
.xmmword
= 1;
4432 i
.types
[j
].bitfield
.ymmword
= 0;
4435 else if (i
.vec_encoding
!= vex_encoding_evex
4436 && !i
.types
[0].bitfield
.zmmword
4437 && !i
.types
[1].bitfield
.zmmword
4439 && !i
.broadcast
.type
4440 && is_evex_encoding (&i
.tm
)
4441 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4442 || (i
.tm
.base_opcode
& ~4) == 0xdb
4443 || (i
.tm
.base_opcode
& ~4) == 0xeb)
4444 && i
.tm
.extension_opcode
== None
)
4447 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4448 vmovdqu32 and vmovdqu64:
4449 EVEX VOP %xmmM, %xmmN
4450 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4451 EVEX VOP %ymmM, %ymmN
4452 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4454 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4456 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4458 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4460 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4461 VOP, one of vpand, vpandn, vpor, vpxor:
4462 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4463 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4464 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4465 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4466 EVEX VOP{d,q} mem, %xmmM, %xmmN
4467 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4468 EVEX VOP{d,q} mem, %ymmM, %ymmN
4469 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4471 for (j
= 0; j
< i
.operands
; j
++)
4472 if (operand_type_check (i
.types
[j
], disp
)
4473 && i
.op
[j
].disps
->X_op
== O_constant
)
4475 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4476 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4477 bytes, we choose EVEX Disp8 over VEX Disp32. */
4478 int evex_disp8
, vex_disp8
;
4479 unsigned int memshift
= i
.memshift
;
4480 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4482 evex_disp8
= fits_in_disp8 (n
);
4484 vex_disp8
= fits_in_disp8 (n
);
4485 if (evex_disp8
!= vex_disp8
)
4487 i
.memshift
= memshift
;
4491 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4494 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4495 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
)
4496 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4497 i
.tm
.opcode_modifier
.vex
4498 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4499 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4500 /* VPAND, VPOR, and VPXOR are commutative. */
4501 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0xdf)
4502 i
.tm
.opcode_modifier
.commutative
= 1;
4503 i
.tm
.opcode_modifier
.evex
= 0;
4504 i
.tm
.opcode_modifier
.masking
= 0;
4505 i
.tm
.opcode_modifier
.broadcast
= 0;
4506 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4509 i
.types
[j
].bitfield
.disp8
4510 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4514 /* Return non-zero for load instruction. */
4520 int any_vex_p
= is_any_vex_encoding (&i
.tm
);
4521 unsigned int base_opcode
= i
.tm
.base_opcode
| 1;
4525 /* Anysize insns: lea, invlpg, clflush, prefetchnta, prefetcht0,
4526 prefetcht1, prefetcht2, prefetchtw, bndmk, bndcl, bndcu, bndcn,
4527 bndstx, bndldx, prefetchwt1, clflushopt, clwb, cldemote. */
4528 if (i
.tm
.opcode_modifier
.anysize
)
4532 if (strcmp (i
.tm
.name
, "pop") == 0)
4536 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4539 if (i
.tm
.base_opcode
== 0x9d
4540 || i
.tm
.base_opcode
== 0x61)
4543 /* movs, cmps, lods, scas. */
4544 if ((i
.tm
.base_opcode
| 0xb) == 0xaf)
4548 if (base_opcode
== 0x6f
4549 || i
.tm
.base_opcode
== 0xd7)
4551 /* NB: For AMD-specific insns with implicit memory operands,
4552 they're intentionally not covered. */
4555 /* No memory operand. */
4556 if (!i
.mem_operands
)
4562 if (i
.tm
.base_opcode
== 0xae
4563 && i
.tm
.opcode_modifier
.vex
4564 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4565 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4566 && i
.tm
.extension_opcode
== 2)
4569 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4571 /* test, not, neg, mul, imul, div, idiv. */
4572 if ((i
.tm
.base_opcode
== 0xf6 || i
.tm
.base_opcode
== 0xf7)
4573 && i
.tm
.extension_opcode
!= 1)
4577 if (base_opcode
== 0xff && i
.tm
.extension_opcode
<= 1)
4580 /* add, or, adc, sbb, and, sub, xor, cmp. */
4581 if (i
.tm
.base_opcode
>= 0x80 && i
.tm
.base_opcode
<= 0x83)
4584 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4585 if ((base_opcode
== 0xc1
4586 || (i
.tm
.base_opcode
>= 0xd0 && i
.tm
.base_opcode
<= 0xd3))
4587 && i
.tm
.extension_opcode
!= 6)
4590 /* Check for x87 instructions. */
4591 if (base_opcode
>= 0xd8 && base_opcode
<= 0xdf)
4593 /* Skip fst, fstp, fstenv, fstcw. */
4594 if (i
.tm
.base_opcode
== 0xd9
4595 && (i
.tm
.extension_opcode
== 2
4596 || i
.tm
.extension_opcode
== 3
4597 || i
.tm
.extension_opcode
== 6
4598 || i
.tm
.extension_opcode
== 7))
4601 /* Skip fisttp, fist, fistp, fstp. */
4602 if (i
.tm
.base_opcode
== 0xdb
4603 && (i
.tm
.extension_opcode
== 1
4604 || i
.tm
.extension_opcode
== 2
4605 || i
.tm
.extension_opcode
== 3
4606 || i
.tm
.extension_opcode
== 7))
4609 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4610 if (i
.tm
.base_opcode
== 0xdd
4611 && (i
.tm
.extension_opcode
== 1
4612 || i
.tm
.extension_opcode
== 2
4613 || i
.tm
.extension_opcode
== 3
4614 || i
.tm
.extension_opcode
== 6
4615 || i
.tm
.extension_opcode
== 7))
4618 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4619 if (i
.tm
.base_opcode
== 0xdf
4620 && (i
.tm
.extension_opcode
== 1
4621 || i
.tm
.extension_opcode
== 2
4622 || i
.tm
.extension_opcode
== 3
4623 || i
.tm
.extension_opcode
== 6
4624 || i
.tm
.extension_opcode
== 7))
4630 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
)
4632 /* bt, bts, btr, btc. */
4633 if (i
.tm
.base_opcode
== 0xba
4634 && (i
.tm
.extension_opcode
>= 4 && i
.tm
.extension_opcode
<= 7))
4637 /* cmpxchg8b, cmpxchg16b, xrstors, vmptrld. */
4638 if (i
.tm
.base_opcode
== 0xc7
4639 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4640 && (i
.tm
.extension_opcode
== 1 || i
.tm
.extension_opcode
== 3
4641 || i
.tm
.extension_opcode
== 6))
4644 /* fxrstor, ldmxcsr, xrstor. */
4645 if (i
.tm
.base_opcode
== 0xae
4646 && (i
.tm
.extension_opcode
== 1
4647 || i
.tm
.extension_opcode
== 2
4648 || i
.tm
.extension_opcode
== 5))
4651 /* lgdt, lidt, lmsw. */
4652 if (i
.tm
.base_opcode
== 0x01
4653 && (i
.tm
.extension_opcode
== 2
4654 || i
.tm
.extension_opcode
== 3
4655 || i
.tm
.extension_opcode
== 6))
4659 dest
= i
.operands
- 1;
4661 /* Check fake imm8 operand and 3 source operands. */
4662 if ((i
.tm
.opcode_modifier
.immext
4663 || i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
4664 && i
.types
[dest
].bitfield
.imm8
)
4667 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg. */
4668 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4669 && (base_opcode
== 0x1
4670 || base_opcode
== 0x9
4671 || base_opcode
== 0x11
4672 || base_opcode
== 0x19
4673 || base_opcode
== 0x21
4674 || base_opcode
== 0x29
4675 || base_opcode
== 0x31
4676 || base_opcode
== 0x39
4677 || (base_opcode
| 2) == 0x87))
4681 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4682 && base_opcode
== 0xc1)
4685 /* Check for load instruction. */
4686 return (i
.types
[dest
].bitfield
.class != ClassNone
4687 || i
.types
[dest
].bitfield
.instance
== Accum
);
4690 /* Output lfence, 0xfaee8, after instruction. */
4693 insert_lfence_after (void)
4695 if (lfence_after_load
&& load_insn_p ())
4697 /* There are also two REP string instructions that require
4698 special treatment. Specifically, the compare string (CMPS)
4699 and scan string (SCAS) instructions set EFLAGS in a manner
4700 that depends on the data being compared/scanned. When used
4701 with a REP prefix, the number of iterations may therefore
4702 vary depending on this data. If the data is a program secret
4703 chosen by the adversary using an LVI method,
4704 then this data-dependent behavior may leak some aspect
4706 if (((i
.tm
.base_opcode
| 0x1) == 0xa7
4707 || (i
.tm
.base_opcode
| 0x1) == 0xaf)
4708 && i
.prefix
[REP_PREFIX
])
4710 as_warn (_("`%s` changes flags which would affect control flow behavior"),
4713 char *p
= frag_more (3);
4720 /* Output lfence, 0xfaee8, before instruction. */
4723 insert_lfence_before (void)
4727 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
4730 if (i
.tm
.base_opcode
== 0xff
4731 && (i
.tm
.extension_opcode
== 2 || i
.tm
.extension_opcode
== 4))
4733 /* Insert lfence before indirect branch if needed. */
4735 if (lfence_before_indirect_branch
== lfence_branch_none
)
4738 if (i
.operands
!= 1)
4741 if (i
.reg_operands
== 1)
4743 /* Indirect branch via register. Don't insert lfence with
4744 -mlfence-after-load=yes. */
4745 if (lfence_after_load
4746 || lfence_before_indirect_branch
== lfence_branch_memory
)
4749 else if (i
.mem_operands
== 1
4750 && lfence_before_indirect_branch
!= lfence_branch_register
)
4752 as_warn (_("indirect `%s` with memory operand should be avoided"),
4759 if (last_insn
.kind
!= last_insn_other
4760 && last_insn
.seg
== now_seg
)
4762 as_warn_where (last_insn
.file
, last_insn
.line
,
4763 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4764 last_insn
.name
, i
.tm
.name
);
4775 /* Output or/not/shl and lfence before near ret. */
4776 if (lfence_before_ret
!= lfence_before_ret_none
4777 && (i
.tm
.base_opcode
== 0xc2
4778 || i
.tm
.base_opcode
== 0xc3))
4780 if (last_insn
.kind
!= last_insn_other
4781 && last_insn
.seg
== now_seg
)
4783 as_warn_where (last_insn
.file
, last_insn
.line
,
4784 _("`%s` skips -mlfence-before-ret on `%s`"),
4785 last_insn
.name
, i
.tm
.name
);
4789 /* Near ret ingore operand size override under CPU64. */
4790 char prefix
= flag_code
== CODE_64BIT
4792 : i
.prefix
[DATA_PREFIX
] ? 0x66 : 0x0;
4794 if (lfence_before_ret
== lfence_before_ret_not
)
4796 /* not: 0xf71424, may add prefix
4797 for operand size override or 64-bit code. */
4798 p
= frag_more ((prefix
? 2 : 0) + 6 + 3);
4812 p
= frag_more ((prefix
? 1 : 0) + 4 + 3);
4815 if (lfence_before_ret
== lfence_before_ret_or
)
4817 /* or: 0x830c2400, may add prefix
4818 for operand size override or 64-bit code. */
4824 /* shl: 0xc1242400, may add prefix
4825 for operand size override or 64-bit code. */
4840 /* This is the guts of the machine-dependent assembler. LINE points to a
4841 machine dependent instruction. This function is supposed to emit
4842 the frags/bytes it assembles to. */
4845 md_assemble (char *line
)
4848 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4849 const insn_template
*t
;
4851 /* Initialize globals. */
4852 memset (&i
, '\0', sizeof (i
));
4853 i
.rounding
.type
= rc_none
;
4854 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4855 i
.reloc
[j
] = NO_RELOC
;
4856 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4857 memset (im_expressions
, '\0', sizeof (im_expressions
));
4858 save_stack_p
= save_stack
;
4860 /* First parse an instruction mnemonic & call i386_operand for the operands.
4861 We assume that the scrubber has arranged it so that line[0] is the valid
4862 start of a (possibly prefixed) mnemonic. */
4864 line
= parse_insn (line
, mnemonic
);
4867 mnem_suffix
= i
.suffix
;
4869 line
= parse_operands (line
, mnemonic
);
4871 xfree (i
.memop1_string
);
4872 i
.memop1_string
= NULL
;
4876 /* Now we've parsed the mnemonic into a set of templates, and have the
4877 operands at hand. */
4879 /* All Intel opcodes have reversed operands except for "bound", "enter",
4880 "invlpg*", "monitor*", "mwait*", "tpause", "umwait", "pvalidate",
4881 "rmpadjust", and "rmpupdate". We also don't reverse intersegment "jmp"
4882 and "call" instructions with 2 immediate operands so that the immediate
4883 segment precedes the offset consistently in Intel and AT&T modes. */
4886 && (strcmp (mnemonic
, "bound") != 0)
4887 && (strncmp (mnemonic
, "invlpg", 6) != 0)
4888 && !startswith (mnemonic
, "monitor")
4889 && !startswith (mnemonic
, "mwait")
4890 && (strcmp (mnemonic
, "pvalidate") != 0)
4891 && !startswith (mnemonic
, "rmp")
4892 && (strcmp (mnemonic
, "tpause") != 0)
4893 && (strcmp (mnemonic
, "umwait") != 0)
4894 && !(operand_type_check (i
.types
[0], imm
)
4895 && operand_type_check (i
.types
[1], imm
)))
4898 /* The order of the immediates should be reversed
4899 for 2 immediates extrq and insertq instructions */
4900 if (i
.imm_operands
== 2
4901 && (strcmp (mnemonic
, "extrq") == 0
4902 || strcmp (mnemonic
, "insertq") == 0))
4903 swap_2_operands (0, 1);
4908 if (i
.disp_operands
&& !want_disp32 (current_templates
->start
))
4910 for (j
= 0; j
< i
.operands
; ++j
)
4912 const expressionS
*exp
= i
.op
[j
].disps
;
4914 if (!operand_type_check (i
.types
[j
], disp
))
4917 if (exp
->X_op
!= O_constant
)
4920 /* Since displacement is signed extended to 64bit, don't allow
4921 disp32 and turn off disp32s if they are out of range. */
4922 i
.types
[j
].bitfield
.disp32
= 0;
4923 if (fits_in_signed_long (exp
->X_add_number
))
4926 i
.types
[j
].bitfield
.disp32s
= 0;
4927 if (i
.types
[j
].bitfield
.baseindex
)
4929 as_bad (_("0x%" BFD_VMA_FMT
"x out of range of signed 32bit displacement"),
4936 /* Don't optimize displacement for movabs since it only takes 64bit
4939 && i
.disp_encoding
!= disp_encoding_32bit
4940 && (flag_code
!= CODE_64BIT
4941 || strcmp (mnemonic
, "movabs") != 0))
4944 /* Next, we find a template that matches the given insn,
4945 making sure the overlap of the given operands types is consistent
4946 with the template operand types. */
4948 if (!(t
= match_template (mnem_suffix
)))
4951 if (sse_check
!= check_none
4952 && !i
.tm
.opcode_modifier
.noavx
4953 && !i
.tm
.cpu_flags
.bitfield
.cpuavx
4954 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
4955 && (i
.tm
.cpu_flags
.bitfield
.cpusse
4956 || i
.tm
.cpu_flags
.bitfield
.cpusse2
4957 || i
.tm
.cpu_flags
.bitfield
.cpusse3
4958 || i
.tm
.cpu_flags
.bitfield
.cpussse3
4959 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
4960 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
4961 || i
.tm
.cpu_flags
.bitfield
.cpupclmul
4962 || i
.tm
.cpu_flags
.bitfield
.cpuaes
4963 || i
.tm
.cpu_flags
.bitfield
.cpusha
4964 || i
.tm
.cpu_flags
.bitfield
.cpugfni
))
4966 (sse_check
== check_warning
4968 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4971 if (i
.tm
.opcode_modifier
.fwait
)
4972 if (!add_prefix (FWAIT_OPCODE
))
4975 /* Check if REP prefix is OK. */
4976 if (i
.rep_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixRep
)
4978 as_bad (_("invalid instruction `%s' after `%s'"),
4979 i
.tm
.name
, i
.rep_prefix
);
4983 /* Check for lock without a lockable instruction. Destination operand
4984 must be memory unless it is xchg (0x86). */
4985 if (i
.prefix
[LOCK_PREFIX
]
4986 && (i
.tm
.opcode_modifier
.prefixok
< PrefixLock
4987 || i
.mem_operands
== 0
4988 || (i
.tm
.base_opcode
!= 0x86
4989 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
4991 as_bad (_("expecting lockable instruction after `lock'"));
4995 /* Check for data size prefix on VEX/XOP/EVEX encoded and SIMD insns. */
4996 if (i
.prefix
[DATA_PREFIX
]
4997 && (is_any_vex_encoding (&i
.tm
)
4998 || i
.tm
.operand_types
[i
.imm_operands
].bitfield
.class >= RegMMX
4999 || i
.tm
.operand_types
[i
.imm_operands
+ 1].bitfield
.class >= RegMMX
))
5001 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
5005 /* Check if HLE prefix is OK. */
5006 if (i
.hle_prefix
&& !check_hle ())
5009 /* Check BND prefix. */
5010 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
5011 as_bad (_("expecting valid branch instruction after `bnd'"));
5013 /* Check NOTRACK prefix. */
5014 if (i
.notrack_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixNoTrack
)
5015 as_bad (_("expecting indirect branch instruction after `notrack'"));
5017 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
5019 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
5020 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
5021 else if (flag_code
!= CODE_16BIT
5022 ? i
.prefix
[ADDR_PREFIX
]
5023 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
5024 as_bad (_("16-bit address isn't allowed in MPX instructions"));
5027 /* Insert BND prefix. */
5028 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
5030 if (!i
.prefix
[BND_PREFIX
])
5031 add_prefix (BND_PREFIX_OPCODE
);
5032 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
5034 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
5035 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
5039 /* Check string instruction segment overrides. */
5040 if (i
.tm
.opcode_modifier
.isstring
>= IS_STRING_ES_OP0
)
5042 gas_assert (i
.mem_operands
);
5043 if (!check_string ())
5045 i
.disp_operands
= 0;
5048 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
5049 optimize_encoding ();
5051 if (!process_suffix ())
5054 /* Update operand types and check extended states. */
5055 for (j
= 0; j
< i
.operands
; j
++)
5057 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
5058 switch (i
.tm
.operand_types
[j
].bitfield
.class)
5063 i
.xstate
|= xstate_mmx
;
5066 i
.xstate
|= xstate_mask
;
5069 if (i
.tm
.operand_types
[j
].bitfield
.tmmword
)
5070 i
.xstate
|= xstate_tmm
;
5071 else if (i
.tm
.operand_types
[j
].bitfield
.zmmword
)
5072 i
.xstate
|= xstate_zmm
;
5073 else if (i
.tm
.operand_types
[j
].bitfield
.ymmword
)
5074 i
.xstate
|= xstate_ymm
;
5075 else if (i
.tm
.operand_types
[j
].bitfield
.xmmword
)
5076 i
.xstate
|= xstate_xmm
;
5081 /* Make still unresolved immediate matches conform to size of immediate
5082 given in i.suffix. */
5083 if (!finalize_imm ())
5086 if (i
.types
[0].bitfield
.imm1
)
5087 i
.imm_operands
= 0; /* kludge for shift insns. */
5089 /* We only need to check those implicit registers for instructions
5090 with 3 operands or less. */
5091 if (i
.operands
<= 3)
5092 for (j
= 0; j
< i
.operands
; j
++)
5093 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
5094 && !i
.types
[j
].bitfield
.xmmword
)
5097 /* For insns with operands there are more diddles to do to the opcode. */
5100 if (!process_operands ())
5103 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
5105 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
5106 as_warn (_("translating to `%sp'"), i
.tm
.name
);
5109 if (is_any_vex_encoding (&i
.tm
))
5111 if (!cpu_arch_flags
.bitfield
.cpui286
)
5113 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
5118 /* Check for explicit REX prefix. */
5119 if (i
.prefix
[REX_PREFIX
] || i
.rex_encoding
)
5121 as_bad (_("REX prefix invalid with `%s'"), i
.tm
.name
);
5125 if (i
.tm
.opcode_modifier
.vex
)
5126 build_vex_prefix (t
);
5128 build_evex_prefix ();
5130 /* The individual REX.RXBW bits got consumed. */
5131 i
.rex
&= REX_OPCODE
;
5134 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
5135 instructions may define INT_OPCODE as well, so avoid this corner
5136 case for those instructions that use MODRM. */
5137 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
5138 && i
.tm
.base_opcode
== INT_OPCODE
5139 && !i
.tm
.opcode_modifier
.modrm
5140 && i
.op
[0].imms
->X_add_number
== 3)
5142 i
.tm
.base_opcode
= INT3_OPCODE
;
5146 if ((i
.tm
.opcode_modifier
.jump
== JUMP
5147 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
5148 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
5149 && i
.op
[0].disps
->X_op
== O_constant
)
5151 /* Convert "jmp constant" (and "call constant") to a jump (call) to
5152 the absolute address given by the constant. Since ix86 jumps and
5153 calls are pc relative, we need to generate a reloc. */
5154 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
5155 i
.op
[0].disps
->X_op
= O_symbol
;
5158 /* For 8 bit registers we need an empty rex prefix. Also if the
5159 instruction already has a prefix, we need to convert old
5160 registers to new ones. */
5162 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
5163 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
5164 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
5165 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
5166 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
5167 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
5172 i
.rex
|= REX_OPCODE
;
5173 for (x
= 0; x
< 2; x
++)
5175 /* Look for 8 bit operand that uses old registers. */
5176 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
5177 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
5179 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5180 /* In case it is "hi" register, give up. */
5181 if (i
.op
[x
].regs
->reg_num
> 3)
5182 as_bad (_("can't encode register '%s%s' in an "
5183 "instruction requiring REX prefix."),
5184 register_prefix
, i
.op
[x
].regs
->reg_name
);
5186 /* Otherwise it is equivalent to the extended register.
5187 Since the encoding doesn't change this is merely
5188 cosmetic cleanup for debug output. */
5190 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
5195 if (i
.rex
== 0 && i
.rex_encoding
)
5197 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
5198 that uses legacy register. If it is "hi" register, don't add
5199 the REX_OPCODE byte. */
5201 for (x
= 0; x
< 2; x
++)
5202 if (i
.types
[x
].bitfield
.class == Reg
5203 && i
.types
[x
].bitfield
.byte
5204 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
5205 && i
.op
[x
].regs
->reg_num
> 3)
5207 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5208 i
.rex_encoding
= false;
5217 add_prefix (REX_OPCODE
| i
.rex
);
5219 insert_lfence_before ();
5221 /* We are ready to output the insn. */
5224 insert_lfence_after ();
5226 last_insn
.seg
= now_seg
;
5228 if (i
.tm
.opcode_modifier
.isprefix
)
5230 last_insn
.kind
= last_insn_prefix
;
5231 last_insn
.name
= i
.tm
.name
;
5232 last_insn
.file
= as_where (&last_insn
.line
);
5235 last_insn
.kind
= last_insn_other
;
5239 parse_insn (char *line
, char *mnemonic
)
5242 char *token_start
= l
;
5245 const insn_template
*t
;
5251 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
5256 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
5258 as_bad (_("no such instruction: `%s'"), token_start
);
5263 if (!is_space_char (*l
)
5264 && *l
!= END_OF_INSN
5266 || (*l
!= PREFIX_SEPARATOR
5269 as_bad (_("invalid character %s in mnemonic"),
5270 output_invalid (*l
));
5273 if (token_start
== l
)
5275 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
5276 as_bad (_("expecting prefix; got nothing"));
5278 as_bad (_("expecting mnemonic; got nothing"));
5282 /* Look up instruction (or prefix) via hash table. */
5283 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5285 if (*l
!= END_OF_INSN
5286 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
5287 && current_templates
5288 && current_templates
->start
->opcode_modifier
.isprefix
)
5290 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
5292 as_bad ((flag_code
!= CODE_64BIT
5293 ? _("`%s' is only supported in 64-bit mode")
5294 : _("`%s' is not supported in 64-bit mode")),
5295 current_templates
->start
->name
);
5298 /* If we are in 16-bit mode, do not allow addr16 or data16.
5299 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5300 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
5301 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5302 && flag_code
!= CODE_64BIT
5303 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5304 ^ (flag_code
== CODE_16BIT
)))
5306 as_bad (_("redundant %s prefix"),
5307 current_templates
->start
->name
);
5311 if (current_templates
->start
->base_opcode
== PSEUDO_PREFIX
)
5313 /* Handle pseudo prefixes. */
5314 switch (current_templates
->start
->extension_opcode
)
5318 i
.disp_encoding
= disp_encoding_8bit
;
5322 i
.disp_encoding
= disp_encoding_16bit
;
5326 i
.disp_encoding
= disp_encoding_32bit
;
5330 i
.dir_encoding
= dir_encoding_load
;
5334 i
.dir_encoding
= dir_encoding_store
;
5338 i
.vec_encoding
= vex_encoding_vex
;
5342 i
.vec_encoding
= vex_encoding_vex3
;
5346 i
.vec_encoding
= vex_encoding_evex
;
5350 i
.rex_encoding
= true;
5352 case Prefix_NoOptimize
:
5354 i
.no_optimize
= true;
5362 /* Add prefix, checking for repeated prefixes. */
5363 switch (add_prefix (current_templates
->start
->base_opcode
))
5368 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
5369 i
.notrack_prefix
= current_templates
->start
->name
;
5372 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
5373 i
.hle_prefix
= current_templates
->start
->name
;
5374 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
5375 i
.bnd_prefix
= current_templates
->start
->name
;
5377 i
.rep_prefix
= current_templates
->start
->name
;
5383 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5390 if (!current_templates
)
5392 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5393 Check if we should swap operand or force 32bit displacement in
5395 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
5396 i
.dir_encoding
= dir_encoding_swap
;
5397 else if (mnem_p
- 3 == dot_p
5400 i
.disp_encoding
= disp_encoding_8bit
;
5401 else if (mnem_p
- 4 == dot_p
5405 i
.disp_encoding
= disp_encoding_32bit
;
5410 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5413 if (!current_templates
)
5416 if (mnem_p
> mnemonic
)
5418 /* See if we can get a match by trimming off a suffix. */
5421 case WORD_MNEM_SUFFIX
:
5422 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
5423 i
.suffix
= SHORT_MNEM_SUFFIX
;
5426 case BYTE_MNEM_SUFFIX
:
5427 case QWORD_MNEM_SUFFIX
:
5428 i
.suffix
= mnem_p
[-1];
5431 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5433 case SHORT_MNEM_SUFFIX
:
5434 case LONG_MNEM_SUFFIX
:
5437 i
.suffix
= mnem_p
[-1];
5440 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5448 if (intel_float_operand (mnemonic
) == 1)
5449 i
.suffix
= SHORT_MNEM_SUFFIX
;
5451 i
.suffix
= LONG_MNEM_SUFFIX
;
5454 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5460 if (!current_templates
)
5462 as_bad (_("no such instruction: `%s'"), token_start
);
5467 if (current_templates
->start
->opcode_modifier
.jump
== JUMP
5468 || current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
5470 /* Check for a branch hint. We allow ",pt" and ",pn" for
5471 predict taken and predict not taken respectively.
5472 I'm not sure that branch hints actually do anything on loop
5473 and jcxz insns (JumpByte) for current Pentium4 chips. They
5474 may work in the future and it doesn't hurt to accept them
5476 if (l
[0] == ',' && l
[1] == 'p')
5480 if (!add_prefix (DS_PREFIX_OPCODE
))
5484 else if (l
[2] == 'n')
5486 if (!add_prefix (CS_PREFIX_OPCODE
))
5492 /* Any other comma loses. */
5495 as_bad (_("invalid character %s in mnemonic"),
5496 output_invalid (*l
));
5500 /* Check if instruction is supported on specified architecture. */
5502 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
5504 supported
|= cpu_flags_match (t
);
5505 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
5507 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
5508 as_warn (_("use .code16 to ensure correct addressing mode"));
5514 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
5515 as_bad (flag_code
== CODE_64BIT
5516 ? _("`%s' is not supported in 64-bit mode")
5517 : _("`%s' is only supported in 64-bit mode"),
5518 current_templates
->start
->name
);
5520 as_bad (_("`%s' is not supported on `%s%s'"),
5521 current_templates
->start
->name
,
5522 cpu_arch_name
? cpu_arch_name
: default_arch
,
5523 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
5529 parse_operands (char *l
, const char *mnemonic
)
5533 /* 1 if operand is pending after ','. */
5534 unsigned int expecting_operand
= 0;
5536 /* Non-zero if operand parens not balanced. */
5537 unsigned int paren_not_balanced
;
5539 while (*l
!= END_OF_INSN
)
5541 /* Skip optional white space before operand. */
5542 if (is_space_char (*l
))
5544 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
5546 as_bad (_("invalid character %s before operand %d"),
5547 output_invalid (*l
),
5551 token_start
= l
; /* After white space. */
5552 paren_not_balanced
= 0;
5553 while (paren_not_balanced
|| *l
!= ',')
5555 if (*l
== END_OF_INSN
)
5557 if (paren_not_balanced
)
5559 know (!intel_syntax
);
5560 as_bad (_("unbalanced parenthesis in operand %d."),
5565 break; /* we are done */
5567 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
5569 as_bad (_("invalid character %s in operand %d"),
5570 output_invalid (*l
),
5577 ++paren_not_balanced
;
5579 --paren_not_balanced
;
5583 if (l
!= token_start
)
5584 { /* Yes, we've read in another operand. */
5585 unsigned int operand_ok
;
5586 this_operand
= i
.operands
++;
5587 if (i
.operands
> MAX_OPERANDS
)
5589 as_bad (_("spurious operands; (%d operands/instruction max)"),
5593 i
.types
[this_operand
].bitfield
.unspecified
= 1;
5594 /* Now parse operand adding info to 'i' as we go along. */
5595 END_STRING_AND_SAVE (l
);
5597 if (i
.mem_operands
> 1)
5599 as_bad (_("too many memory references for `%s'"),
5606 i386_intel_operand (token_start
,
5607 intel_float_operand (mnemonic
));
5609 operand_ok
= i386_att_operand (token_start
);
5611 RESTORE_END_STRING (l
);
5617 if (expecting_operand
)
5619 expecting_operand_after_comma
:
5620 as_bad (_("expecting operand after ','; got nothing"));
5625 as_bad (_("expecting operand before ','; got nothing"));
5630 /* Now *l must be either ',' or END_OF_INSN. */
5633 if (*++l
== END_OF_INSN
)
5635 /* Just skip it, if it's \n complain. */
5636 goto expecting_operand_after_comma
;
5638 expecting_operand
= 1;
5645 swap_2_operands (unsigned int xchg1
, unsigned int xchg2
)
5647 union i386_op temp_op
;
5648 i386_operand_type temp_type
;
5649 unsigned int temp_flags
;
5650 enum bfd_reloc_code_real temp_reloc
;
5652 temp_type
= i
.types
[xchg2
];
5653 i
.types
[xchg2
] = i
.types
[xchg1
];
5654 i
.types
[xchg1
] = temp_type
;
5656 temp_flags
= i
.flags
[xchg2
];
5657 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5658 i
.flags
[xchg1
] = temp_flags
;
5660 temp_op
= i
.op
[xchg2
];
5661 i
.op
[xchg2
] = i
.op
[xchg1
];
5662 i
.op
[xchg1
] = temp_op
;
5664 temp_reloc
= i
.reloc
[xchg2
];
5665 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5666 i
.reloc
[xchg1
] = temp_reloc
;
5670 if (i
.mask
.operand
== xchg1
)
5671 i
.mask
.operand
= xchg2
;
5672 else if (i
.mask
.operand
== xchg2
)
5673 i
.mask
.operand
= xchg1
;
5675 if (i
.broadcast
.type
)
5677 if (i
.broadcast
.operand
== xchg1
)
5678 i
.broadcast
.operand
= xchg2
;
5679 else if (i
.broadcast
.operand
== xchg2
)
5680 i
.broadcast
.operand
= xchg1
;
5682 if (i
.rounding
.type
!= rc_none
)
5684 if (i
.rounding
.operand
== xchg1
)
5685 i
.rounding
.operand
= xchg2
;
5686 else if (i
.rounding
.operand
== xchg2
)
5687 i
.rounding
.operand
= xchg1
;
5692 swap_operands (void)
5698 swap_2_operands (1, i
.operands
- 2);
5702 swap_2_operands (0, i
.operands
- 1);
5708 if (i
.mem_operands
== 2)
5710 const reg_entry
*temp_seg
;
5711 temp_seg
= i
.seg
[0];
5712 i
.seg
[0] = i
.seg
[1];
5713 i
.seg
[1] = temp_seg
;
5717 /* Try to ensure constant immediates are represented in the smallest
5722 char guess_suffix
= 0;
5726 guess_suffix
= i
.suffix
;
5727 else if (i
.reg_operands
)
5729 /* Figure out a suffix from the last register operand specified.
5730 We can't do this properly yet, i.e. excluding special register
5731 instances, but the following works for instructions with
5732 immediates. In any case, we can't set i.suffix yet. */
5733 for (op
= i
.operands
; --op
>= 0;)
5734 if (i
.types
[op
].bitfield
.class != Reg
)
5736 else if (i
.types
[op
].bitfield
.byte
)
5738 guess_suffix
= BYTE_MNEM_SUFFIX
;
5741 else if (i
.types
[op
].bitfield
.word
)
5743 guess_suffix
= WORD_MNEM_SUFFIX
;
5746 else if (i
.types
[op
].bitfield
.dword
)
5748 guess_suffix
= LONG_MNEM_SUFFIX
;
5751 else if (i
.types
[op
].bitfield
.qword
)
5753 guess_suffix
= QWORD_MNEM_SUFFIX
;
5757 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5758 guess_suffix
= WORD_MNEM_SUFFIX
;
5760 for (op
= i
.operands
; --op
>= 0;)
5761 if (operand_type_check (i
.types
[op
], imm
))
5763 switch (i
.op
[op
].imms
->X_op
)
5766 /* If a suffix is given, this operand may be shortened. */
5767 switch (guess_suffix
)
5769 case LONG_MNEM_SUFFIX
:
5770 i
.types
[op
].bitfield
.imm32
= 1;
5771 i
.types
[op
].bitfield
.imm64
= 1;
5773 case WORD_MNEM_SUFFIX
:
5774 i
.types
[op
].bitfield
.imm16
= 1;
5775 i
.types
[op
].bitfield
.imm32
= 1;
5776 i
.types
[op
].bitfield
.imm32s
= 1;
5777 i
.types
[op
].bitfield
.imm64
= 1;
5779 case BYTE_MNEM_SUFFIX
:
5780 i
.types
[op
].bitfield
.imm8
= 1;
5781 i
.types
[op
].bitfield
.imm8s
= 1;
5782 i
.types
[op
].bitfield
.imm16
= 1;
5783 i
.types
[op
].bitfield
.imm32
= 1;
5784 i
.types
[op
].bitfield
.imm32s
= 1;
5785 i
.types
[op
].bitfield
.imm64
= 1;
5789 /* If this operand is at most 16 bits, convert it
5790 to a signed 16 bit number before trying to see
5791 whether it will fit in an even smaller size.
5792 This allows a 16-bit operand such as $0xffe0 to
5793 be recognised as within Imm8S range. */
5794 if ((i
.types
[op
].bitfield
.imm16
)
5795 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
5797 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5798 ^ 0x8000) - 0x8000);
5801 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5802 if ((i
.types
[op
].bitfield
.imm32
)
5803 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
5806 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5807 ^ ((offsetT
) 1 << 31))
5808 - ((offsetT
) 1 << 31));
5812 = operand_type_or (i
.types
[op
],
5813 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5815 /* We must avoid matching of Imm32 templates when 64bit
5816 only immediate is available. */
5817 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5818 i
.types
[op
].bitfield
.imm32
= 0;
5825 /* Symbols and expressions. */
5827 /* Convert symbolic operand to proper sizes for matching, but don't
5828 prevent matching a set of insns that only supports sizes other
5829 than those matching the insn suffix. */
5831 i386_operand_type mask
, allowed
;
5832 const insn_template
*t
= current_templates
->start
;
5834 operand_type_set (&mask
, 0);
5835 allowed
= t
->operand_types
[op
];
5837 while (++t
< current_templates
->end
)
5839 allowed
= operand_type_and (allowed
, anyimm
);
5840 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5842 switch (guess_suffix
)
5844 case QWORD_MNEM_SUFFIX
:
5845 mask
.bitfield
.imm64
= 1;
5846 mask
.bitfield
.imm32s
= 1;
5848 case LONG_MNEM_SUFFIX
:
5849 mask
.bitfield
.imm32
= 1;
5851 case WORD_MNEM_SUFFIX
:
5852 mask
.bitfield
.imm16
= 1;
5854 case BYTE_MNEM_SUFFIX
:
5855 mask
.bitfield
.imm8
= 1;
5860 allowed
= operand_type_and (mask
, allowed
);
5861 if (!operand_type_all_zero (&allowed
))
5862 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5869 /* Try to use the smallest displacement type too. */
5871 optimize_disp (void)
5875 for (op
= i
.operands
; --op
>= 0;)
5876 if (operand_type_check (i
.types
[op
], disp
))
5878 if (i
.op
[op
].disps
->X_op
== O_constant
)
5880 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5882 if (i
.types
[op
].bitfield
.disp16
5883 && (op_disp
& ~(offsetT
) 0xffff) == 0)
5885 /* If this operand is at most 16 bits, convert
5886 to a signed 16 bit number and don't use 64bit
5888 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
5889 i
.types
[op
].bitfield
.disp64
= 0;
5891 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5893 i
.types
[op
].bitfield
.disp8
= 0;
5894 i
.types
[op
].bitfield
.disp16
= 0;
5895 i
.types
[op
].bitfield
.disp32
= 0;
5896 i
.types
[op
].bitfield
.disp32s
= 0;
5897 i
.types
[op
].bitfield
.disp64
= 0;
5902 else if (flag_code
== CODE_64BIT
)
5904 if (want_disp32 (current_templates
->start
)
5905 && fits_in_unsigned_long (op_disp
))
5906 i
.types
[op
].bitfield
.disp32
= 1;
5908 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5909 if (i
.types
[op
].bitfield
.disp32
5910 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
5912 /* If this operand is at most 32 bits, convert
5913 to a signed 32 bit number and don't use 64bit
5915 op_disp
&= (((offsetT
) 2 << 31) - 1);
5916 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5917 i
.types
[op
].bitfield
.disp64
= 0;
5920 if (fits_in_signed_long (op_disp
))
5922 i
.types
[op
].bitfield
.disp64
= 0;
5923 i
.types
[op
].bitfield
.disp32s
= 1;
5927 if ((i
.types
[op
].bitfield
.disp32
5928 || i
.types
[op
].bitfield
.disp32s
5929 || i
.types
[op
].bitfield
.disp16
)
5930 && fits_in_disp8 (op_disp
))
5931 i
.types
[op
].bitfield
.disp8
= 1;
5933 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5934 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5936 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5937 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5938 i
.types
[op
].bitfield
.disp8
= 0;
5939 i
.types
[op
].bitfield
.disp16
= 0;
5940 i
.types
[op
].bitfield
.disp32
= 0;
5941 i
.types
[op
].bitfield
.disp32s
= 0;
5942 i
.types
[op
].bitfield
.disp64
= 0;
5945 /* We only support 64bit displacement on constants. */
5946 i
.types
[op
].bitfield
.disp64
= 0;
5950 /* Return 1 if there is a match in broadcast bytes between operand
5951 GIVEN and instruction template T. */
5954 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5956 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5957 && i
.types
[given
].bitfield
.byte
)
5958 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5959 && i
.types
[given
].bitfield
.word
)
5960 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5961 && i
.types
[given
].bitfield
.dword
)
5962 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5963 && i
.types
[given
].bitfield
.qword
));
5966 /* Check if operands are valid for the instruction. */
5969 check_VecOperands (const insn_template
*t
)
5974 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5975 any one operand are implicity requiring AVX512VL support if the actual
5976 operand size is YMMword or XMMword. Since this function runs after
5977 template matching, there's no need to check for YMMword/XMMword in
5979 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
5980 if (!cpu_flags_all_zero (&cpu
)
5981 && !t
->cpu_flags
.bitfield
.cpuavx512vl
5982 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
5984 for (op
= 0; op
< t
->operands
; ++op
)
5986 if (t
->operand_types
[op
].bitfield
.zmmword
5987 && (i
.types
[op
].bitfield
.ymmword
5988 || i
.types
[op
].bitfield
.xmmword
))
5990 i
.error
= unsupported
;
5996 /* Without VSIB byte, we can't have a vector register for index. */
5997 if (!t
->opcode_modifier
.sib
5999 && (i
.index_reg
->reg_type
.bitfield
.xmmword
6000 || i
.index_reg
->reg_type
.bitfield
.ymmword
6001 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
6003 i
.error
= unsupported_vector_index_register
;
6007 /* Check if default mask is allowed. */
6008 if (t
->opcode_modifier
.nodefmask
6009 && (!i
.mask
.reg
|| i
.mask
.reg
->reg_num
== 0))
6011 i
.error
= no_default_mask
;
6015 /* For VSIB byte, we need a vector register for index, and all vector
6016 registers must be distinct. */
6017 if (t
->opcode_modifier
.sib
&& t
->opcode_modifier
.sib
!= SIBMEM
)
6020 || !((t
->opcode_modifier
.sib
== VECSIB128
6021 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
6022 || (t
->opcode_modifier
.sib
== VECSIB256
6023 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
6024 || (t
->opcode_modifier
.sib
== VECSIB512
6025 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
6027 i
.error
= invalid_vsib_address
;
6031 gas_assert (i
.reg_operands
== 2 || i
.mask
.reg
);
6032 if (i
.reg_operands
== 2 && !i
.mask
.reg
)
6034 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
6035 gas_assert (i
.types
[0].bitfield
.xmmword
6036 || i
.types
[0].bitfield
.ymmword
);
6037 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
6038 gas_assert (i
.types
[2].bitfield
.xmmword
6039 || i
.types
[2].bitfield
.ymmword
);
6040 if (operand_check
== check_none
)
6042 if (register_number (i
.op
[0].regs
)
6043 != register_number (i
.index_reg
)
6044 && register_number (i
.op
[2].regs
)
6045 != register_number (i
.index_reg
)
6046 && register_number (i
.op
[0].regs
)
6047 != register_number (i
.op
[2].regs
))
6049 if (operand_check
== check_error
)
6051 i
.error
= invalid_vector_register_set
;
6054 as_warn (_("mask, index, and destination registers should be distinct"));
6056 else if (i
.reg_operands
== 1 && i
.mask
.reg
)
6058 if (i
.types
[1].bitfield
.class == RegSIMD
6059 && (i
.types
[1].bitfield
.xmmword
6060 || i
.types
[1].bitfield
.ymmword
6061 || i
.types
[1].bitfield
.zmmword
)
6062 && (register_number (i
.op
[1].regs
)
6063 == register_number (i
.index_reg
)))
6065 if (operand_check
== check_error
)
6067 i
.error
= invalid_vector_register_set
;
6070 if (operand_check
!= check_none
)
6071 as_warn (_("index and destination registers should be distinct"));
6076 /* For AMX instructions with three tmmword operands, all tmmword operand must be
6078 if (t
->operand_types
[0].bitfield
.tmmword
6079 && i
.reg_operands
== 3)
6081 if (register_number (i
.op
[0].regs
)
6082 == register_number (i
.op
[1].regs
)
6083 || register_number (i
.op
[0].regs
)
6084 == register_number (i
.op
[2].regs
)
6085 || register_number (i
.op
[1].regs
)
6086 == register_number (i
.op
[2].regs
))
6088 i
.error
= invalid_tmm_register_set
;
6093 /* Check if broadcast is supported by the instruction and is applied
6094 to the memory operand. */
6095 if (i
.broadcast
.type
)
6097 i386_operand_type type
, overlap
;
6099 /* Check if specified broadcast is supported in this instruction,
6100 and its broadcast bytes match the memory operand. */
6101 op
= i
.broadcast
.operand
;
6102 if (!t
->opcode_modifier
.broadcast
6103 || !(i
.flags
[op
] & Operand_Mem
)
6104 || (!i
.types
[op
].bitfield
.unspecified
6105 && !match_broadcast_size (t
, op
)))
6108 i
.error
= unsupported_broadcast
;
6112 i
.broadcast
.bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
6113 * i
.broadcast
.type
);
6114 operand_type_set (&type
, 0);
6115 switch (i
.broadcast
.bytes
)
6118 type
.bitfield
.word
= 1;
6121 type
.bitfield
.dword
= 1;
6124 type
.bitfield
.qword
= 1;
6127 type
.bitfield
.xmmword
= 1;
6130 type
.bitfield
.ymmword
= 1;
6133 type
.bitfield
.zmmword
= 1;
6139 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
6140 if (t
->operand_types
[op
].bitfield
.class == RegSIMD
6141 && t
->operand_types
[op
].bitfield
.byte
6142 + t
->operand_types
[op
].bitfield
.word
6143 + t
->operand_types
[op
].bitfield
.dword
6144 + t
->operand_types
[op
].bitfield
.qword
> 1)
6146 overlap
.bitfield
.xmmword
= 0;
6147 overlap
.bitfield
.ymmword
= 0;
6148 overlap
.bitfield
.zmmword
= 0;
6150 if (operand_type_all_zero (&overlap
))
6153 if (t
->opcode_modifier
.checkregsize
)
6157 type
.bitfield
.baseindex
= 1;
6158 for (j
= 0; j
< i
.operands
; ++j
)
6161 && !operand_type_register_match(i
.types
[j
],
6162 t
->operand_types
[j
],
6164 t
->operand_types
[op
]))
6169 /* If broadcast is supported in this instruction, we need to check if
6170 operand of one-element size isn't specified without broadcast. */
6171 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
6173 /* Find memory operand. */
6174 for (op
= 0; op
< i
.operands
; op
++)
6175 if (i
.flags
[op
] & Operand_Mem
)
6177 gas_assert (op
< i
.operands
);
6178 /* Check size of the memory operand. */
6179 if (match_broadcast_size (t
, op
))
6181 i
.error
= broadcast_needed
;
6186 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
6188 /* Check if requested masking is supported. */
6191 switch (t
->opcode_modifier
.masking
)
6195 case MERGING_MASKING
:
6199 i
.error
= unsupported_masking
;
6203 case DYNAMIC_MASKING
:
6204 /* Memory destinations allow only merging masking. */
6205 if (i
.mask
.zeroing
&& i
.mem_operands
)
6207 /* Find memory operand. */
6208 for (op
= 0; op
< i
.operands
; op
++)
6209 if (i
.flags
[op
] & Operand_Mem
)
6211 gas_assert (op
< i
.operands
);
6212 if (op
== i
.operands
- 1)
6214 i
.error
= unsupported_masking
;
6224 /* Check if masking is applied to dest operand. */
6225 if (i
.mask
.reg
&& (i
.mask
.operand
!= i
.operands
- 1))
6227 i
.error
= mask_not_on_destination
;
6232 if (i
.rounding
.type
!= rc_none
)
6234 if (!t
->opcode_modifier
.sae
6235 || (i
.rounding
.type
!= saeonly
&& !t
->opcode_modifier
.staticrounding
))
6237 i
.error
= unsupported_rc_sae
;
6240 /* If the instruction has several immediate operands and one of
6241 them is rounding, the rounding operand should be the last
6242 immediate operand. */
6243 if (i
.imm_operands
> 1
6244 && i
.rounding
.operand
!= i
.imm_operands
- 1)
6246 i
.error
= rc_sae_operand_not_last_imm
;
6251 /* Check the special Imm4 cases; must be the first operand. */
6252 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
6254 if (i
.op
[0].imms
->X_op
!= O_constant
6255 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
6261 /* Turn off Imm<N> so that update_imm won't complain. */
6262 operand_type_set (&i
.types
[0], 0);
6265 /* Check vector Disp8 operand. */
6266 if (t
->opcode_modifier
.disp8memshift
6267 && i
.disp_encoding
!= disp_encoding_32bit
)
6269 if (i
.broadcast
.type
)
6270 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
6271 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
6272 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
6275 const i386_operand_type
*type
= NULL
;
6278 for (op
= 0; op
< i
.operands
; op
++)
6279 if (i
.flags
[op
] & Operand_Mem
)
6281 if (t
->opcode_modifier
.evex
== EVEXLIG
)
6282 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
6283 else if (t
->operand_types
[op
].bitfield
.xmmword
6284 + t
->operand_types
[op
].bitfield
.ymmword
6285 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
6286 type
= &t
->operand_types
[op
];
6287 else if (!i
.types
[op
].bitfield
.unspecified
)
6288 type
= &i
.types
[op
];
6290 else if (i
.types
[op
].bitfield
.class == RegSIMD
6291 && t
->opcode_modifier
.evex
!= EVEXLIG
)
6293 if (i
.types
[op
].bitfield
.zmmword
)
6295 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
6297 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
6303 if (type
->bitfield
.zmmword
)
6305 else if (type
->bitfield
.ymmword
)
6307 else if (type
->bitfield
.xmmword
)
6311 /* For the check in fits_in_disp8(). */
6312 if (i
.memshift
== 0)
6316 for (op
= 0; op
< i
.operands
; op
++)
6317 if (operand_type_check (i
.types
[op
], disp
)
6318 && i
.op
[op
].disps
->X_op
== O_constant
)
6320 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
6322 i
.types
[op
].bitfield
.disp8
= 1;
6325 i
.types
[op
].bitfield
.disp8
= 0;
6334 /* Check if encoding requirements are met by the instruction. */
6337 VEX_check_encoding (const insn_template
*t
)
6339 if (i
.vec_encoding
== vex_encoding_error
)
6341 i
.error
= unsupported
;
6345 if (i
.vec_encoding
== vex_encoding_evex
)
6347 /* This instruction must be encoded with EVEX prefix. */
6348 if (!is_evex_encoding (t
))
6350 i
.error
= unsupported
;
6356 if (!t
->opcode_modifier
.vex
)
6358 /* This instruction template doesn't have VEX prefix. */
6359 if (i
.vec_encoding
!= vex_encoding_default
)
6361 i
.error
= unsupported
;
6370 static const insn_template
*
6371 match_template (char mnem_suffix
)
6373 /* Points to template once we've found it. */
6374 const insn_template
*t
;
6375 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
6376 i386_operand_type overlap4
;
6377 unsigned int found_reverse_match
;
6378 i386_opcode_modifier suffix_check
;
6379 i386_operand_type operand_types
[MAX_OPERANDS
];
6380 int addr_prefix_disp
;
6381 unsigned int j
, size_match
, check_register
;
6382 enum i386_error specific_error
= 0;
6384 #if MAX_OPERANDS != 5
6385 # error "MAX_OPERANDS must be 5."
6388 found_reverse_match
= 0;
6389 addr_prefix_disp
= -1;
6391 /* Prepare for mnemonic suffix check. */
6392 memset (&suffix_check
, 0, sizeof (suffix_check
));
6393 switch (mnem_suffix
)
6395 case BYTE_MNEM_SUFFIX
:
6396 suffix_check
.no_bsuf
= 1;
6398 case WORD_MNEM_SUFFIX
:
6399 suffix_check
.no_wsuf
= 1;
6401 case SHORT_MNEM_SUFFIX
:
6402 suffix_check
.no_ssuf
= 1;
6404 case LONG_MNEM_SUFFIX
:
6405 suffix_check
.no_lsuf
= 1;
6407 case QWORD_MNEM_SUFFIX
:
6408 suffix_check
.no_qsuf
= 1;
6411 /* NB: In Intel syntax, normally we can check for memory operand
6412 size when there is no mnemonic suffix. But jmp and call have
6413 2 different encodings with Dword memory operand size, one with
6414 No_ldSuf and the other without. i.suffix is set to
6415 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
6416 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
6417 suffix_check
.no_ldsuf
= 1;
6420 /* Must have right number of operands. */
6421 i
.error
= number_of_operands_mismatch
;
6423 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
6425 addr_prefix_disp
= -1;
6426 found_reverse_match
= 0;
6428 if (i
.operands
!= t
->operands
)
6431 /* Check processor support. */
6432 i
.error
= unsupported
;
6433 if (cpu_flags_match (t
) != CPU_FLAGS_PERFECT_MATCH
)
6436 /* Check Pseudo Prefix. */
6437 i
.error
= unsupported
;
6438 if (t
->opcode_modifier
.pseudovexprefix
6439 && !(i
.vec_encoding
== vex_encoding_vex
6440 || i
.vec_encoding
== vex_encoding_vex3
))
6443 /* Check AT&T mnemonic. */
6444 i
.error
= unsupported_with_intel_mnemonic
;
6445 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
6448 /* Check AT&T/Intel syntax. */
6449 i
.error
= unsupported_syntax
;
6450 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
6451 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
6454 /* Check Intel64/AMD64 ISA. */
6458 /* Default: Don't accept Intel64. */
6459 if (t
->opcode_modifier
.isa64
== INTEL64
)
6463 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6464 if (t
->opcode_modifier
.isa64
>= INTEL64
)
6468 /* -mintel64: Don't accept AMD64. */
6469 if (t
->opcode_modifier
.isa64
== AMD64
&& flag_code
== CODE_64BIT
)
6474 /* Check the suffix. */
6475 i
.error
= invalid_instruction_suffix
;
6476 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
6477 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
6478 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
6479 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
6480 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
6481 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
6484 size_match
= operand_size_match (t
);
6488 /* This is intentionally not
6490 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6492 as the case of a missing * on the operand is accepted (perhaps with
6493 a warning, issued further down). */
6494 if (i
.jumpabsolute
&& t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
6496 i
.error
= operand_type_mismatch
;
6500 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6501 operand_types
[j
] = t
->operand_types
[j
];
6503 /* In general, don't allow
6504 - 64-bit operands outside of 64-bit mode,
6505 - 32-bit operands on pre-386. */
6506 j
= i
.imm_operands
+ (t
->operands
> i
.imm_operands
+ 1);
6507 if (((i
.suffix
== QWORD_MNEM_SUFFIX
6508 && flag_code
!= CODE_64BIT
6509 && !(t
->opcode_modifier
.opcodespace
== SPACE_0F
6510 && t
->base_opcode
== 0xc7
6511 && t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
6512 && t
->extension_opcode
== 1) /* cmpxchg8b */)
6513 || (i
.suffix
== LONG_MNEM_SUFFIX
6514 && !cpu_arch_flags
.bitfield
.cpui386
))
6516 ? (t
->opcode_modifier
.mnemonicsize
!= IGNORESIZE
6517 && !intel_float_operand (t
->name
))
6518 : intel_float_operand (t
->name
) != 2)
6519 && (t
->operands
== i
.imm_operands
6520 || (operand_types
[i
.imm_operands
].bitfield
.class != RegMMX
6521 && operand_types
[i
.imm_operands
].bitfield
.class != RegSIMD
6522 && operand_types
[i
.imm_operands
].bitfield
.class != RegMask
)
6523 || (operand_types
[j
].bitfield
.class != RegMMX
6524 && operand_types
[j
].bitfield
.class != RegSIMD
6525 && operand_types
[j
].bitfield
.class != RegMask
))
6526 && !t
->opcode_modifier
.sib
)
6529 /* Do not verify operands when there are none. */
6532 if (VEX_check_encoding (t
))
6534 specific_error
= i
.error
;
6538 /* We've found a match; break out of loop. */
6542 if (!t
->opcode_modifier
.jump
6543 || t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)
6545 /* There should be only one Disp operand. */
6546 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6547 if (operand_type_check (operand_types
[j
], disp
))
6549 if (j
< MAX_OPERANDS
)
6551 bool override
= (i
.prefix
[ADDR_PREFIX
] != 0);
6553 addr_prefix_disp
= j
;
6555 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
6556 operand into Disp32/Disp32/Disp16/Disp32 operand. */
6560 override
= !override
;
6563 if (operand_types
[j
].bitfield
.disp32
6564 && operand_types
[j
].bitfield
.disp16
)
6566 operand_types
[j
].bitfield
.disp16
= override
;
6567 operand_types
[j
].bitfield
.disp32
= !override
;
6569 operand_types
[j
].bitfield
.disp32s
= 0;
6570 operand_types
[j
].bitfield
.disp64
= 0;
6574 if (operand_types
[j
].bitfield
.disp32s
6575 || operand_types
[j
].bitfield
.disp64
)
6577 operand_types
[j
].bitfield
.disp64
&= !override
;
6578 operand_types
[j
].bitfield
.disp32s
&= !override
;
6579 operand_types
[j
].bitfield
.disp32
= override
;
6581 operand_types
[j
].bitfield
.disp16
= 0;
6587 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
6588 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
6589 && t
->base_opcode
== 0xa0
6590 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
)
6593 /* We check register size if needed. */
6594 if (t
->opcode_modifier
.checkregsize
)
6596 check_register
= (1 << t
->operands
) - 1;
6597 if (i
.broadcast
.type
)
6598 check_register
&= ~(1 << i
.broadcast
.operand
);
6603 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
6604 switch (t
->operands
)
6607 if (!operand_type_match (overlap0
, i
.types
[0]))
6611 /* xchg %eax, %eax is a special case. It is an alias for nop
6612 only in 32bit mode and we can use opcode 0x90. In 64bit
6613 mode, we can't use 0x90 for xchg %eax, %eax since it should
6614 zero-extend %eax to %rax. */
6615 if (flag_code
== CODE_64BIT
6616 && t
->base_opcode
== 0x90
6617 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6618 && i
.types
[0].bitfield
.instance
== Accum
6619 && i
.types
[0].bitfield
.dword
6620 && i
.types
[1].bitfield
.instance
== Accum
6621 && i
.types
[1].bitfield
.dword
)
6623 /* xrelease mov %eax, <disp> is another special case. It must not
6624 match the accumulator-only encoding of mov. */
6625 if (flag_code
!= CODE_64BIT
6627 && t
->base_opcode
== 0xa0
6628 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6629 && i
.types
[0].bitfield
.instance
== Accum
6630 && (i
.flags
[1] & Operand_Mem
))
6635 if (!(size_match
& MATCH_STRAIGHT
))
6637 /* Reverse direction of operands if swapping is possible in the first
6638 place (operands need to be symmetric) and
6639 - the load form is requested, and the template is a store form,
6640 - the store form is requested, and the template is a load form,
6641 - the non-default (swapped) form is requested. */
6642 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
6643 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
6644 && !operand_type_all_zero (&overlap1
))
6645 switch (i
.dir_encoding
)
6647 case dir_encoding_load
:
6648 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6649 || t
->opcode_modifier
.regmem
)
6653 case dir_encoding_store
:
6654 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6655 && !t
->opcode_modifier
.regmem
)
6659 case dir_encoding_swap
:
6662 case dir_encoding_default
:
6665 /* If we want store form, we skip the current load. */
6666 if ((i
.dir_encoding
== dir_encoding_store
6667 || i
.dir_encoding
== dir_encoding_swap
)
6668 && i
.mem_operands
== 0
6669 && t
->opcode_modifier
.load
)
6674 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
6675 if (!operand_type_match (overlap0
, i
.types
[0])
6676 || !operand_type_match (overlap1
, i
.types
[1])
6677 || ((check_register
& 3) == 3
6678 && !operand_type_register_match (i
.types
[0],
6683 /* Check if other direction is valid ... */
6684 if (!t
->opcode_modifier
.d
)
6688 if (!(size_match
& MATCH_REVERSE
))
6690 /* Try reversing direction of operands. */
6691 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
6692 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
6693 if (!operand_type_match (overlap0
, i
.types
[0])
6694 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
6696 && !operand_type_register_match (i
.types
[0],
6697 operand_types
[i
.operands
- 1],
6698 i
.types
[i
.operands
- 1],
6701 /* Does not match either direction. */
6704 /* found_reverse_match holds which of D or FloatR
6706 if (!t
->opcode_modifier
.d
)
6707 found_reverse_match
= 0;
6708 else if (operand_types
[0].bitfield
.tbyte
)
6709 found_reverse_match
= Opcode_FloatD
;
6710 else if (operand_types
[0].bitfield
.xmmword
6711 || operand_types
[i
.operands
- 1].bitfield
.xmmword
6712 || operand_types
[0].bitfield
.class == RegMMX
6713 || operand_types
[i
.operands
- 1].bitfield
.class == RegMMX
6714 || is_any_vex_encoding(t
))
6715 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6716 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
6718 found_reverse_match
= Opcode_D
;
6719 if (t
->opcode_modifier
.floatr
)
6720 found_reverse_match
|= Opcode_FloatR
;
6724 /* Found a forward 2 operand match here. */
6725 switch (t
->operands
)
6728 overlap4
= operand_type_and (i
.types
[4],
6732 overlap3
= operand_type_and (i
.types
[3],
6736 overlap2
= operand_type_and (i
.types
[2],
6741 switch (t
->operands
)
6744 if (!operand_type_match (overlap4
, i
.types
[4])
6745 || !operand_type_register_match (i
.types
[3],
6752 if (!operand_type_match (overlap3
, i
.types
[3])
6753 || ((check_register
& 0xa) == 0xa
6754 && !operand_type_register_match (i
.types
[1],
6758 || ((check_register
& 0xc) == 0xc
6759 && !operand_type_register_match (i
.types
[2],
6766 /* Here we make use of the fact that there are no
6767 reverse match 3 operand instructions. */
6768 if (!operand_type_match (overlap2
, i
.types
[2])
6769 || ((check_register
& 5) == 5
6770 && !operand_type_register_match (i
.types
[0],
6774 || ((check_register
& 6) == 6
6775 && !operand_type_register_match (i
.types
[1],
6783 /* Found either forward/reverse 2, 3 or 4 operand match here:
6784 slip through to break. */
6787 /* Check if vector operands are valid. */
6788 if (check_VecOperands (t
))
6790 specific_error
= i
.error
;
6794 /* Check if VEX/EVEX encoding requirements can be satisfied. */
6795 if (VEX_check_encoding (t
))
6797 specific_error
= i
.error
;
6801 /* We've found a match; break out of loop. */
6805 if (t
== current_templates
->end
)
6807 /* We found no match. */
6808 const char *err_msg
;
6809 switch (specific_error
? specific_error
: i
.error
)
6813 case operand_size_mismatch
:
6814 err_msg
= _("operand size mismatch");
6816 case operand_type_mismatch
:
6817 err_msg
= _("operand type mismatch");
6819 case register_type_mismatch
:
6820 err_msg
= _("register type mismatch");
6822 case number_of_operands_mismatch
:
6823 err_msg
= _("number of operands mismatch");
6825 case invalid_instruction_suffix
:
6826 err_msg
= _("invalid instruction suffix");
6829 err_msg
= _("constant doesn't fit in 4 bits");
6831 case unsupported_with_intel_mnemonic
:
6832 err_msg
= _("unsupported with Intel mnemonic");
6834 case unsupported_syntax
:
6835 err_msg
= _("unsupported syntax");
6838 as_bad (_("unsupported instruction `%s'"),
6839 current_templates
->start
->name
);
6841 case invalid_sib_address
:
6842 err_msg
= _("invalid SIB address");
6844 case invalid_vsib_address
:
6845 err_msg
= _("invalid VSIB address");
6847 case invalid_vector_register_set
:
6848 err_msg
= _("mask, index, and destination registers must be distinct");
6850 case invalid_tmm_register_set
:
6851 err_msg
= _("all tmm registers must be distinct");
6853 case unsupported_vector_index_register
:
6854 err_msg
= _("unsupported vector index register");
6856 case unsupported_broadcast
:
6857 err_msg
= _("unsupported broadcast");
6859 case broadcast_needed
:
6860 err_msg
= _("broadcast is needed for operand of such type");
6862 case unsupported_masking
:
6863 err_msg
= _("unsupported masking");
6865 case mask_not_on_destination
:
6866 err_msg
= _("mask not on destination operand");
6868 case no_default_mask
:
6869 err_msg
= _("default mask isn't allowed");
6871 case unsupported_rc_sae
:
6872 err_msg
= _("unsupported static rounding/sae");
6874 case rc_sae_operand_not_last_imm
:
6876 err_msg
= _("RC/SAE operand must precede immediate operands");
6878 err_msg
= _("RC/SAE operand must follow immediate operands");
6880 case invalid_register_operand
:
6881 err_msg
= _("invalid register operand");
6884 as_bad (_("%s for `%s'"), err_msg
,
6885 current_templates
->start
->name
);
6889 if (!quiet_warnings
)
6892 && (i
.jumpabsolute
!= (t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)))
6893 as_warn (_("indirect %s without `*'"), t
->name
);
6895 if (t
->opcode_modifier
.isprefix
6896 && t
->opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6898 /* Warn them that a data or address size prefix doesn't
6899 affect assembly of the next line of code. */
6900 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6904 /* Copy the template we found. */
6905 install_template (t
);
6907 if (addr_prefix_disp
!= -1)
6908 i
.tm
.operand_types
[addr_prefix_disp
]
6909 = operand_types
[addr_prefix_disp
];
6911 if (found_reverse_match
)
6913 /* If we found a reverse match we must alter the opcode direction
6914 bit and clear/flip the regmem modifier one. found_reverse_match
6915 holds bits to change (different for int & float insns). */
6917 i
.tm
.base_opcode
^= found_reverse_match
;
6919 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6920 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6922 /* Certain SIMD insns have their load forms specified in the opcode
6923 table, and hence we need to _set_ RegMem instead of clearing it.
6924 We need to avoid setting the bit though on insns like KMOVW. */
6925 i
.tm
.opcode_modifier
.regmem
6926 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6927 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
6928 && !i
.tm
.opcode_modifier
.regmem
;
6937 unsigned int es_op
= i
.tm
.opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
6938 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.baseindex
? es_op
: 0;
6940 if (i
.seg
[op
] != NULL
&& i
.seg
[op
] != reg_es
)
6942 as_bad (_("`%s' operand %u must use `%ses' segment"),
6944 intel_syntax
? i
.tm
.operands
- es_op
: es_op
+ 1,
6949 /* There's only ever one segment override allowed per instruction.
6950 This instruction possibly has a legal segment override on the
6951 second operand, so copy the segment to where non-string
6952 instructions store it, allowing common code. */
6953 i
.seg
[op
] = i
.seg
[1];
6959 process_suffix (void)
6961 bool is_crc32
= false, is_movx
= false;
6963 /* If matched instruction specifies an explicit instruction mnemonic
6965 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
6966 i
.suffix
= WORD_MNEM_SUFFIX
;
6967 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
6968 i
.suffix
= LONG_MNEM_SUFFIX
;
6969 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
6970 i
.suffix
= QWORD_MNEM_SUFFIX
;
6971 else if (i
.reg_operands
6972 && (i
.operands
> 1 || i
.types
[0].bitfield
.class == Reg
)
6973 && !i
.tm
.opcode_modifier
.addrprefixopreg
)
6975 unsigned int numop
= i
.operands
;
6978 is_movx
= (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
6979 && (i
.tm
.base_opcode
| 8) == 0xbe)
6980 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
6981 && i
.tm
.base_opcode
== 0x63
6982 && i
.tm
.cpu_flags
.bitfield
.cpu64
);
6985 is_crc32
= (i
.tm
.base_opcode
== 0xf0
6986 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
6987 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
);
6989 /* movsx/movzx want only their source operand considered here, for the
6990 ambiguity checking below. The suffix will be replaced afterwards
6991 to represent the destination (register). */
6992 if (is_movx
&& (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63))
6995 /* crc32 needs REX.W set regardless of suffix / source operand size. */
6996 if (is_crc32
&& i
.tm
.operand_types
[1].bitfield
.qword
)
6999 /* If there's no instruction mnemonic suffix we try to invent one
7000 based on GPR operands. */
7003 /* We take i.suffix from the last register operand specified,
7004 Destination register type is more significant than source
7005 register type. crc32 in SSE4.2 prefers source register
7007 unsigned int op
= is_crc32
? 1 : i
.operands
;
7010 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
7011 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7013 if (i
.types
[op
].bitfield
.class != Reg
)
7015 if (i
.types
[op
].bitfield
.byte
)
7016 i
.suffix
= BYTE_MNEM_SUFFIX
;
7017 else if (i
.types
[op
].bitfield
.word
)
7018 i
.suffix
= WORD_MNEM_SUFFIX
;
7019 else if (i
.types
[op
].bitfield
.dword
)
7020 i
.suffix
= LONG_MNEM_SUFFIX
;
7021 else if (i
.types
[op
].bitfield
.qword
)
7022 i
.suffix
= QWORD_MNEM_SUFFIX
;
7028 /* As an exception, movsx/movzx silently default to a byte source
7030 if (is_movx
&& i
.tm
.opcode_modifier
.w
&& !i
.suffix
&& !intel_syntax
)
7031 i
.suffix
= BYTE_MNEM_SUFFIX
;
7033 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7036 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7037 && i
.tm
.opcode_modifier
.no_bsuf
)
7039 else if (!check_byte_reg ())
7042 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
7045 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7046 && i
.tm
.opcode_modifier
.no_lsuf
7047 && !i
.tm
.opcode_modifier
.todword
7048 && !i
.tm
.opcode_modifier
.toqword
)
7050 else if (!check_long_reg ())
7053 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7056 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7057 && i
.tm
.opcode_modifier
.no_qsuf
7058 && !i
.tm
.opcode_modifier
.todword
7059 && !i
.tm
.opcode_modifier
.toqword
)
7061 else if (!check_qword_reg ())
7064 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7067 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7068 && i
.tm
.opcode_modifier
.no_wsuf
)
7070 else if (!check_word_reg ())
7073 else if (intel_syntax
7074 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
)
7075 /* Do nothing if the instruction is going to ignore the prefix. */
7080 /* Undo the movsx/movzx change done above. */
7083 else if (i
.tm
.opcode_modifier
.mnemonicsize
== DEFAULTSIZE
7086 i
.suffix
= stackop_size
;
7087 if (stackop_size
== LONG_MNEM_SUFFIX
)
7089 /* stackop_size is set to LONG_MNEM_SUFFIX for the
7090 .code16gcc directive to support 16-bit mode with
7091 32-bit address. For IRET without a suffix, generate
7092 16-bit IRET (opcode 0xcf) to return from an interrupt
7094 if (i
.tm
.base_opcode
== 0xcf)
7096 i
.suffix
= WORD_MNEM_SUFFIX
;
7097 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
7099 /* Warn about changed behavior for segment register push/pop. */
7100 else if ((i
.tm
.base_opcode
| 1) == 0x07)
7101 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
7106 && (i
.tm
.opcode_modifier
.jump
== JUMP_ABSOLUTE
7107 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
7108 || i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
7109 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
7110 && i
.tm
.base_opcode
== 0x01 /* [ls][gi]dt */
7111 && i
.tm
.extension_opcode
<= 3)))
7116 if (!i
.tm
.opcode_modifier
.no_qsuf
)
7118 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
7119 || i
.tm
.opcode_modifier
.no_lsuf
)
7120 i
.suffix
= QWORD_MNEM_SUFFIX
;
7125 if (!i
.tm
.opcode_modifier
.no_lsuf
)
7126 i
.suffix
= LONG_MNEM_SUFFIX
;
7129 if (!i
.tm
.opcode_modifier
.no_wsuf
)
7130 i
.suffix
= WORD_MNEM_SUFFIX
;
7136 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7137 /* Also cover lret/retf/iret in 64-bit mode. */
7138 || (flag_code
== CODE_64BIT
7139 && !i
.tm
.opcode_modifier
.no_lsuf
7140 && !i
.tm
.opcode_modifier
.no_qsuf
))
7141 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7142 /* Explicit sizing prefixes are assumed to disambiguate insns. */
7143 && !i
.prefix
[DATA_PREFIX
] && !(i
.prefix
[REX_PREFIX
] & REX_W
)
7144 /* Accept FLDENV et al without suffix. */
7145 && (i
.tm
.opcode_modifier
.no_ssuf
|| i
.tm
.opcode_modifier
.floatmf
))
7147 unsigned int suffixes
, evex
= 0;
7149 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
7150 if (!i
.tm
.opcode_modifier
.no_wsuf
)
7152 if (!i
.tm
.opcode_modifier
.no_lsuf
)
7154 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
7156 if (!i
.tm
.opcode_modifier
.no_ssuf
)
7158 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
7161 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
7162 also suitable for AT&T syntax mode, it was requested that this be
7163 restricted to just Intel syntax. */
7164 if (intel_syntax
&& is_any_vex_encoding (&i
.tm
) && !i
.broadcast
.type
)
7168 for (op
= 0; op
< i
.tm
.operands
; ++op
)
7170 if (is_evex_encoding (&i
.tm
)
7171 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
7173 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7174 i
.tm
.operand_types
[op
].bitfield
.xmmword
= 0;
7175 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7176 i
.tm
.operand_types
[op
].bitfield
.ymmword
= 0;
7177 if (!i
.tm
.opcode_modifier
.evex
7178 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
7179 i
.tm
.opcode_modifier
.evex
= EVEX512
;
7182 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
7183 + i
.tm
.operand_types
[op
].bitfield
.ymmword
7184 + i
.tm
.operand_types
[op
].bitfield
.zmmword
< 2)
7187 /* Any properly sized operand disambiguates the insn. */
7188 if (i
.types
[op
].bitfield
.xmmword
7189 || i
.types
[op
].bitfield
.ymmword
7190 || i
.types
[op
].bitfield
.zmmword
)
7192 suffixes
&= ~(7 << 6);
7197 if ((i
.flags
[op
] & Operand_Mem
)
7198 && i
.tm
.operand_types
[op
].bitfield
.unspecified
)
7200 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
)
7202 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7204 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7206 if (is_evex_encoding (&i
.tm
))
7212 /* Are multiple suffixes / operand sizes allowed? */
7213 if (suffixes
& (suffixes
- 1))
7216 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7217 || operand_check
== check_error
))
7219 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
7222 if (operand_check
== check_error
)
7224 as_bad (_("no instruction mnemonic suffix given and "
7225 "no register operands; can't size `%s'"), i
.tm
.name
);
7228 if (operand_check
== check_warning
)
7229 as_warn (_("%s; using default for `%s'"),
7231 ? _("ambiguous operand size")
7232 : _("no instruction mnemonic suffix given and "
7233 "no register operands"),
7236 if (i
.tm
.opcode_modifier
.floatmf
)
7237 i
.suffix
= SHORT_MNEM_SUFFIX
;
7239 /* handled below */;
7241 i
.tm
.opcode_modifier
.evex
= evex
;
7242 else if (flag_code
== CODE_16BIT
)
7243 i
.suffix
= WORD_MNEM_SUFFIX
;
7244 else if (!i
.tm
.opcode_modifier
.no_lsuf
)
7245 i
.suffix
= LONG_MNEM_SUFFIX
;
7247 i
.suffix
= QWORD_MNEM_SUFFIX
;
7253 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
7254 In AT&T syntax, if there is no suffix (warned about above), the default
7255 will be byte extension. */
7256 if (i
.tm
.opcode_modifier
.w
&& i
.suffix
&& i
.suffix
!= BYTE_MNEM_SUFFIX
)
7257 i
.tm
.base_opcode
|= 1;
7259 /* For further processing, the suffix should represent the destination
7260 (register). This is already the case when one was used with
7261 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
7262 no suffix to begin with. */
7263 if (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63 || !i
.suffix
)
7265 if (i
.types
[1].bitfield
.word
)
7266 i
.suffix
= WORD_MNEM_SUFFIX
;
7267 else if (i
.types
[1].bitfield
.qword
)
7268 i
.suffix
= QWORD_MNEM_SUFFIX
;
7270 i
.suffix
= LONG_MNEM_SUFFIX
;
7272 i
.tm
.opcode_modifier
.w
= 0;
7276 if (!i
.tm
.opcode_modifier
.modrm
&& i
.reg_operands
&& i
.tm
.operands
< 3)
7277 i
.short_form
= (i
.tm
.operand_types
[0].bitfield
.class == Reg
)
7278 != (i
.tm
.operand_types
[1].bitfield
.class == Reg
);
7280 /* Change the opcode based on the operand size given by i.suffix. */
7283 /* Size floating point instruction. */
7284 case LONG_MNEM_SUFFIX
:
7285 if (i
.tm
.opcode_modifier
.floatmf
)
7287 i
.tm
.base_opcode
^= 4;
7291 case WORD_MNEM_SUFFIX
:
7292 case QWORD_MNEM_SUFFIX
:
7293 /* It's not a byte, select word/dword operation. */
7294 if (i
.tm
.opcode_modifier
.w
)
7297 i
.tm
.base_opcode
|= 8;
7299 i
.tm
.base_opcode
|= 1;
7302 case SHORT_MNEM_SUFFIX
:
7303 /* Now select between word & dword operations via the operand
7304 size prefix, except for instructions that will ignore this
7306 if (i
.suffix
!= QWORD_MNEM_SUFFIX
7307 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7308 && !i
.tm
.opcode_modifier
.floatmf
7309 && !is_any_vex_encoding (&i
.tm
)
7310 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
7311 || (flag_code
== CODE_64BIT
7312 && i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)))
7314 unsigned int prefix
= DATA_PREFIX_OPCODE
;
7316 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
) /* jcxz, loop */
7317 prefix
= ADDR_PREFIX_OPCODE
;
7319 if (!add_prefix (prefix
))
7323 /* Set mode64 for an operand. */
7324 if (i
.suffix
== QWORD_MNEM_SUFFIX
7325 && flag_code
== CODE_64BIT
7326 && !i
.tm
.opcode_modifier
.norex64
7327 && !i
.tm
.opcode_modifier
.vexw
7328 /* Special case for xchg %rax,%rax. It is NOP and doesn't
7330 && ! (i
.operands
== 2
7331 && i
.tm
.base_opcode
== 0x90
7332 && i
.tm
.extension_opcode
== None
7333 && i
.types
[0].bitfield
.instance
== Accum
7334 && i
.types
[0].bitfield
.qword
7335 && i
.types
[1].bitfield
.instance
== Accum
7336 && i
.types
[1].bitfield
.qword
))
7342 /* Select word/dword/qword operation with explicit data sizing prefix
7343 when there are no suitable register operands. */
7344 if (i
.tm
.opcode_modifier
.w
7345 && (i
.prefix
[DATA_PREFIX
] || (i
.prefix
[REX_PREFIX
] & REX_W
))
7347 || (i
.reg_operands
== 1
7349 && (i
.tm
.operand_types
[0].bitfield
.instance
== RegC
7351 || i
.tm
.operand_types
[0].bitfield
.instance
== RegD
7352 || i
.tm
.operand_types
[1].bitfield
.instance
== RegD
7355 i
.tm
.base_opcode
|= 1;
7359 if (i
.tm
.opcode_modifier
.addrprefixopreg
)
7361 gas_assert (!i
.suffix
);
7362 gas_assert (i
.reg_operands
);
7364 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7367 /* The address size override prefix changes the size of the
7369 if (flag_code
== CODE_64BIT
7370 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
7372 as_bad (_("16-bit addressing unavailable for `%s'"),
7377 if ((flag_code
== CODE_32BIT
7378 ? i
.op
[0].regs
->reg_type
.bitfield
.word
7379 : i
.op
[0].regs
->reg_type
.bitfield
.dword
)
7380 && !add_prefix (ADDR_PREFIX_OPCODE
))
7385 /* Check invalid register operand when the address size override
7386 prefix changes the size of register operands. */
7388 enum { need_word
, need_dword
, need_qword
} need
;
7390 /* Check the register operand for the address size prefix if
7391 the memory operand has no real registers, like symbol, DISP
7392 or bogus (x32-only) symbol(%rip) when symbol(%eip) is meant. */
7393 if (i
.mem_operands
== 1
7394 && i
.reg_operands
== 1
7396 && i
.types
[1].bitfield
.class == Reg
7397 && (flag_code
== CODE_32BIT
7398 ? i
.op
[1].regs
->reg_type
.bitfield
.word
7399 : i
.op
[1].regs
->reg_type
.bitfield
.dword
)
7400 && ((i
.base_reg
== NULL
&& i
.index_reg
== NULL
)
7401 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7402 || (x86_elf_abi
== X86_64_X32_ABI
7404 && i
.base_reg
->reg_num
== RegIP
7405 && i
.base_reg
->reg_type
.bitfield
.qword
))
7409 && !add_prefix (ADDR_PREFIX_OPCODE
))
7412 if (flag_code
== CODE_32BIT
)
7413 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
7414 else if (i
.prefix
[ADDR_PREFIX
])
7417 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
7419 for (op
= 0; op
< i
.operands
; op
++)
7421 if (i
.types
[op
].bitfield
.class != Reg
)
7427 if (i
.op
[op
].regs
->reg_type
.bitfield
.word
)
7431 if (i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
7435 if (i
.op
[op
].regs
->reg_type
.bitfield
.qword
)
7440 as_bad (_("invalid register operand size for `%s'"),
7451 check_byte_reg (void)
7455 for (op
= i
.operands
; --op
>= 0;)
7457 /* Skip non-register operands. */
7458 if (i
.types
[op
].bitfield
.class != Reg
)
7461 /* If this is an eight bit register, it's OK. If it's the 16 or
7462 32 bit version of an eight bit register, we will just use the
7463 low portion, and that's OK too. */
7464 if (i
.types
[op
].bitfield
.byte
)
7467 /* I/O port address operands are OK too. */
7468 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
7469 && i
.tm
.operand_types
[op
].bitfield
.word
)
7472 /* crc32 only wants its source operand checked here. */
7473 if (i
.tm
.base_opcode
== 0xf0
7474 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
7475 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
7479 /* Any other register is bad. */
7480 as_bad (_("`%s%s' not allowed with `%s%c'"),
7481 register_prefix
, i
.op
[op
].regs
->reg_name
,
7482 i
.tm
.name
, i
.suffix
);
7489 check_long_reg (void)
7493 for (op
= i
.operands
; --op
>= 0;)
7494 /* Skip non-register operands. */
7495 if (i
.types
[op
].bitfield
.class != Reg
)
7497 /* Reject eight bit registers, except where the template requires
7498 them. (eg. movzb) */
7499 else if (i
.types
[op
].bitfield
.byte
7500 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7501 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7502 && (i
.tm
.operand_types
[op
].bitfield
.word
7503 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7505 as_bad (_("`%s%s' not allowed with `%s%c'"),
7507 i
.op
[op
].regs
->reg_name
,
7512 /* Error if the e prefix on a general reg is missing. */
7513 else if (i
.types
[op
].bitfield
.word
7514 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7515 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7516 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7518 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7519 register_prefix
, i
.op
[op
].regs
->reg_name
,
7523 /* Warn if the r prefix on a general reg is present. */
7524 else if (i
.types
[op
].bitfield
.qword
7525 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7526 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7527 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7530 && i
.tm
.opcode_modifier
.toqword
7531 && i
.types
[0].bitfield
.class != RegSIMD
)
7533 /* Convert to QWORD. We want REX byte. */
7534 i
.suffix
= QWORD_MNEM_SUFFIX
;
7538 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7539 register_prefix
, i
.op
[op
].regs
->reg_name
,
7548 check_qword_reg (void)
7552 for (op
= i
.operands
; --op
>= 0; )
7553 /* Skip non-register operands. */
7554 if (i
.types
[op
].bitfield
.class != Reg
)
7556 /* Reject eight bit registers, except where the template requires
7557 them. (eg. movzb) */
7558 else if (i
.types
[op
].bitfield
.byte
7559 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7560 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7561 && (i
.tm
.operand_types
[op
].bitfield
.word
7562 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7564 as_bad (_("`%s%s' not allowed with `%s%c'"),
7566 i
.op
[op
].regs
->reg_name
,
7571 /* Warn if the r prefix on a general reg is missing. */
7572 else if ((i
.types
[op
].bitfield
.word
7573 || i
.types
[op
].bitfield
.dword
)
7574 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7575 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7576 && i
.tm
.operand_types
[op
].bitfield
.qword
)
7578 /* Prohibit these changes in the 64bit mode, since the
7579 lowering is more complicated. */
7581 && i
.tm
.opcode_modifier
.todword
7582 && i
.types
[0].bitfield
.class != RegSIMD
)
7584 /* Convert to DWORD. We don't want REX byte. */
7585 i
.suffix
= LONG_MNEM_SUFFIX
;
7589 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7590 register_prefix
, i
.op
[op
].regs
->reg_name
,
7599 check_word_reg (void)
7602 for (op
= i
.operands
; --op
>= 0;)
7603 /* Skip non-register operands. */
7604 if (i
.types
[op
].bitfield
.class != Reg
)
7606 /* Reject eight bit registers, except where the template requires
7607 them. (eg. movzb) */
7608 else if (i
.types
[op
].bitfield
.byte
7609 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7610 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7611 && (i
.tm
.operand_types
[op
].bitfield
.word
7612 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7614 as_bad (_("`%s%s' not allowed with `%s%c'"),
7616 i
.op
[op
].regs
->reg_name
,
7621 /* Error if the e or r prefix on a general reg is present. */
7622 else if ((i
.types
[op
].bitfield
.dword
7623 || i
.types
[op
].bitfield
.qword
)
7624 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7625 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7626 && i
.tm
.operand_types
[op
].bitfield
.word
)
7628 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7629 register_prefix
, i
.op
[op
].regs
->reg_name
,
7637 update_imm (unsigned int j
)
7639 i386_operand_type overlap
= i
.types
[j
];
7640 if ((overlap
.bitfield
.imm8
7641 || overlap
.bitfield
.imm8s
7642 || overlap
.bitfield
.imm16
7643 || overlap
.bitfield
.imm32
7644 || overlap
.bitfield
.imm32s
7645 || overlap
.bitfield
.imm64
)
7646 && !operand_type_equal (&overlap
, &imm8
)
7647 && !operand_type_equal (&overlap
, &imm8s
)
7648 && !operand_type_equal (&overlap
, &imm16
)
7649 && !operand_type_equal (&overlap
, &imm32
)
7650 && !operand_type_equal (&overlap
, &imm32s
)
7651 && !operand_type_equal (&overlap
, &imm64
))
7655 i386_operand_type temp
;
7657 operand_type_set (&temp
, 0);
7658 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7660 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
7661 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
7663 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7664 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
7665 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7667 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
7668 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
7671 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
7674 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
7675 || operand_type_equal (&overlap
, &imm16_32
)
7676 || operand_type_equal (&overlap
, &imm16_32s
))
7678 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
7683 else if (i
.prefix
[REX_PREFIX
] & REX_W
)
7684 overlap
= operand_type_and (overlap
, imm32s
);
7685 else if (i
.prefix
[DATA_PREFIX
])
7686 overlap
= operand_type_and (overlap
,
7687 flag_code
!= CODE_16BIT
? imm16
: imm32
);
7688 if (!operand_type_equal (&overlap
, &imm8
)
7689 && !operand_type_equal (&overlap
, &imm8s
)
7690 && !operand_type_equal (&overlap
, &imm16
)
7691 && !operand_type_equal (&overlap
, &imm32
)
7692 && !operand_type_equal (&overlap
, &imm32s
)
7693 && !operand_type_equal (&overlap
, &imm64
))
7695 as_bad (_("no instruction mnemonic suffix given; "
7696 "can't determine immediate size"));
7700 i
.types
[j
] = overlap
;
7710 /* Update the first 2 immediate operands. */
7711 n
= i
.operands
> 2 ? 2 : i
.operands
;
7714 for (j
= 0; j
< n
; j
++)
7715 if (update_imm (j
) == 0)
7718 /* The 3rd operand can't be immediate operand. */
7719 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
7726 process_operands (void)
7728 /* Default segment register this instruction will use for memory
7729 accesses. 0 means unknown. This is only for optimizing out
7730 unnecessary segment overrides. */
7731 const reg_entry
*default_seg
= NULL
;
7733 if (i
.tm
.opcode_modifier
.sse2avx
)
7735 /* Legacy encoded insns allow explicit REX prefixes, so these prefixes
7737 i
.rex
|= i
.prefix
[REX_PREFIX
] & (REX_W
| REX_R
| REX_X
| REX_B
);
7738 i
.prefix
[REX_PREFIX
] = 0;
7741 /* ImmExt should be processed after SSE2AVX. */
7742 else if (i
.tm
.opcode_modifier
.immext
)
7745 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
7747 unsigned int dupl
= i
.operands
;
7748 unsigned int dest
= dupl
- 1;
7751 /* The destination must be an xmm register. */
7752 gas_assert (i
.reg_operands
7753 && MAX_OPERANDS
> dupl
7754 && operand_type_equal (&i
.types
[dest
], ®xmm
));
7756 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7757 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7759 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
7761 /* Keep xmm0 for instructions with VEX prefix and 3
7763 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
7764 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
7769 /* We remove the first xmm0 and keep the number of
7770 operands unchanged, which in fact duplicates the
7772 for (j
= 1; j
< i
.operands
; j
++)
7774 i
.op
[j
- 1] = i
.op
[j
];
7775 i
.types
[j
- 1] = i
.types
[j
];
7776 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7777 i
.flags
[j
- 1] = i
.flags
[j
];
7781 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
7783 gas_assert ((MAX_OPERANDS
- 1) > dupl
7784 && (i
.tm
.opcode_modifier
.vexsources
7787 /* Add the implicit xmm0 for instructions with VEX prefix
7789 for (j
= i
.operands
; j
> 0; j
--)
7791 i
.op
[j
] = i
.op
[j
- 1];
7792 i
.types
[j
] = i
.types
[j
- 1];
7793 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
7794 i
.flags
[j
] = i
.flags
[j
- 1];
7797 = (const reg_entry
*) str_hash_find (reg_hash
, "xmm0");
7798 i
.types
[0] = regxmm
;
7799 i
.tm
.operand_types
[0] = regxmm
;
7802 i
.reg_operands
+= 2;
7807 i
.op
[dupl
] = i
.op
[dest
];
7808 i
.types
[dupl
] = i
.types
[dest
];
7809 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7810 i
.flags
[dupl
] = i
.flags
[dest
];
7819 i
.op
[dupl
] = i
.op
[dest
];
7820 i
.types
[dupl
] = i
.types
[dest
];
7821 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7822 i
.flags
[dupl
] = i
.flags
[dest
];
7825 if (i
.tm
.opcode_modifier
.immext
)
7828 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7829 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7833 for (j
= 1; j
< i
.operands
; j
++)
7835 i
.op
[j
- 1] = i
.op
[j
];
7836 i
.types
[j
- 1] = i
.types
[j
];
7838 /* We need to adjust fields in i.tm since they are used by
7839 build_modrm_byte. */
7840 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7842 i
.flags
[j
- 1] = i
.flags
[j
];
7849 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
7851 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
7853 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7854 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
7855 regnum
= register_number (i
.op
[1].regs
);
7856 first_reg_in_group
= regnum
& ~3;
7857 last_reg_in_group
= first_reg_in_group
+ 3;
7858 if (regnum
!= first_reg_in_group
)
7859 as_warn (_("source register `%s%s' implicitly denotes"
7860 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7861 register_prefix
, i
.op
[1].regs
->reg_name
,
7862 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7863 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7866 else if (i
.tm
.opcode_modifier
.regkludge
)
7868 /* The imul $imm, %reg instruction is converted into
7869 imul $imm, %reg, %reg, and the clr %reg instruction
7870 is converted into xor %reg, %reg. */
7872 unsigned int first_reg_op
;
7874 if (operand_type_check (i
.types
[0], reg
))
7878 /* Pretend we saw the extra register operand. */
7879 gas_assert (i
.reg_operands
== 1
7880 && i
.op
[first_reg_op
+ 1].regs
== 0);
7881 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7882 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7887 if (i
.tm
.opcode_modifier
.modrm
)
7889 /* The opcode is completed (modulo i.tm.extension_opcode which
7890 must be put into the modrm byte). Now, we make the modrm and
7891 index base bytes based on all the info we've collected. */
7893 default_seg
= build_modrm_byte ();
7895 else if (i
.types
[0].bitfield
.class == SReg
)
7897 if (flag_code
!= CODE_64BIT
7898 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7899 && i
.op
[0].regs
->reg_num
== 1
7900 : (i
.tm
.base_opcode
| 1) == (POP_SEG386_SHORT
& 0xff)
7901 && i
.op
[0].regs
->reg_num
< 4)
7903 as_bad (_("you can't `%s %s%s'"),
7904 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7907 if (i
.op
[0].regs
->reg_num
> 3
7908 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
7910 i
.tm
.base_opcode
^= (POP_SEG_SHORT
^ POP_SEG386_SHORT
) & 0xff;
7911 i
.tm
.opcode_modifier
.opcodespace
= SPACE_0F
;
7913 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7915 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7916 && (i
.tm
.base_opcode
& ~3) == MOV_AX_DISP32
)
7918 default_seg
= reg_ds
;
7920 else if (i
.tm
.opcode_modifier
.isstring
)
7922 /* For the string instructions that allow a segment override
7923 on one of their operands, the default segment is ds. */
7924 default_seg
= reg_ds
;
7926 else if (i
.short_form
)
7928 /* The register or float register operand is in operand
7930 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
7932 /* Register goes in low 3 bits of opcode. */
7933 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
7934 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7936 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
7938 /* Warn about some common errors, but press on regardless.
7939 The first case can be generated by gcc (<= 2.8.1). */
7940 if (i
.operands
== 2)
7942 /* Reversed arguments on faddp, fsubp, etc. */
7943 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
7944 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
7945 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
7949 /* Extraneous `l' suffix on fp insn. */
7950 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
7951 register_prefix
, i
.op
[0].regs
->reg_name
);
7956 if ((i
.seg
[0] || i
.prefix
[SEG_PREFIX
])
7957 && i
.tm
.base_opcode
== 0x8d /* lea */
7958 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7959 && !is_any_vex_encoding(&i
.tm
))
7961 if (!quiet_warnings
)
7962 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
7966 i
.prefix
[SEG_PREFIX
] = 0;
7970 /* If a segment was explicitly specified, and the specified segment
7971 is neither the default nor the one already recorded from a prefix,
7972 use an opcode prefix to select it. If we never figured out what
7973 the default segment is, then default_seg will be zero at this
7974 point, and the specified segment prefix will always be used. */
7976 && i
.seg
[0] != default_seg
7977 && i386_seg_prefixes
[i
.seg
[0]->reg_num
] != i
.prefix
[SEG_PREFIX
])
7979 if (!add_prefix (i386_seg_prefixes
[i
.seg
[0]->reg_num
]))
7985 static INLINE
void set_rex_vrex (const reg_entry
*r
, unsigned int rex_bit
,
7988 if (r
->reg_flags
& RegRex
)
7990 if (i
.rex
& rex_bit
)
7991 as_bad (_("same type of prefix used twice"));
7994 else if (do_sse2avx
&& (i
.rex
& rex_bit
) && i
.vex
.register_specifier
)
7996 gas_assert (i
.vex
.register_specifier
== r
);
7997 i
.vex
.register_specifier
+= 8;
8000 if (r
->reg_flags
& RegVRex
)
8004 static const reg_entry
*
8005 build_modrm_byte (void)
8007 const reg_entry
*default_seg
= NULL
;
8008 unsigned int source
, dest
;
8011 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
8014 unsigned int nds
, reg_slot
;
8017 dest
= i
.operands
- 1;
8020 /* There are 2 kinds of instructions:
8021 1. 5 operands: 4 register operands or 3 register operands
8022 plus 1 memory operand plus one Imm4 operand, VexXDS, and
8023 VexW0 or VexW1. The destination must be either XMM, YMM or
8025 2. 4 operands: 4 register operands or 3 register operands
8026 plus 1 memory operand, with VexXDS. */
8027 gas_assert ((i
.reg_operands
== 4
8028 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
8029 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8030 && i
.tm
.opcode_modifier
.vexw
8031 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
8033 /* If VexW1 is set, the first non-immediate operand is the source and
8034 the second non-immediate one is encoded in the immediate operand. */
8035 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
8037 source
= i
.imm_operands
;
8038 reg_slot
= i
.imm_operands
+ 1;
8042 source
= i
.imm_operands
+ 1;
8043 reg_slot
= i
.imm_operands
;
8046 if (i
.imm_operands
== 0)
8048 /* When there is no immediate operand, generate an 8bit
8049 immediate operand to encode the first operand. */
8050 exp
= &im_expressions
[i
.imm_operands
++];
8051 i
.op
[i
.operands
].imms
= exp
;
8052 i
.types
[i
.operands
] = imm8
;
8055 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
8056 exp
->X_op
= O_constant
;
8057 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
8058 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
8062 gas_assert (i
.imm_operands
== 1);
8063 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
8064 gas_assert (!i
.tm
.opcode_modifier
.immext
);
8066 /* Turn on Imm8 again so that output_imm will generate it. */
8067 i
.types
[0].bitfield
.imm8
= 1;
8069 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
8070 i
.op
[0].imms
->X_add_number
8071 |= register_number (i
.op
[reg_slot
].regs
) << 4;
8072 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
8075 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
8076 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
8081 /* i.reg_operands MUST be the number of real register operands;
8082 implicit registers do not count. If there are 3 register
8083 operands, it must be a instruction with VexNDS. For a
8084 instruction with VexNDD, the destination register is encoded
8085 in VEX prefix. If there are 4 register operands, it must be
8086 a instruction with VEX prefix and 3 sources. */
8087 if (i
.mem_operands
== 0
8088 && ((i
.reg_operands
== 2
8089 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
8090 || (i
.reg_operands
== 3
8091 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8092 || (i
.reg_operands
== 4 && vex_3_sources
)))
8100 /* When there are 3 operands, one of them may be immediate,
8101 which may be the first or the last operand. Otherwise,
8102 the first operand must be shift count register (cl) or it
8103 is an instruction with VexNDS. */
8104 gas_assert (i
.imm_operands
== 1
8105 || (i
.imm_operands
== 0
8106 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8107 || (i
.types
[0].bitfield
.instance
== RegC
8108 && i
.types
[0].bitfield
.byte
))));
8109 if (operand_type_check (i
.types
[0], imm
)
8110 || (i
.types
[0].bitfield
.instance
== RegC
8111 && i
.types
[0].bitfield
.byte
))
8117 /* When there are 4 operands, the first two must be 8bit
8118 immediate operands. The source operand will be the 3rd
8121 For instructions with VexNDS, if the first operand
8122 an imm8, the source operand is the 2nd one. If the last
8123 operand is imm8, the source operand is the first one. */
8124 gas_assert ((i
.imm_operands
== 2
8125 && i
.types
[0].bitfield
.imm8
8126 && i
.types
[1].bitfield
.imm8
)
8127 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8128 && i
.imm_operands
== 1
8129 && (i
.types
[0].bitfield
.imm8
8130 || i
.types
[i
.operands
- 1].bitfield
.imm8
8131 || i
.rounding
.type
!= rc_none
)));
8132 if (i
.imm_operands
== 2)
8136 if (i
.types
[0].bitfield
.imm8
)
8143 if (is_evex_encoding (&i
.tm
))
8145 /* For EVEX instructions, when there are 5 operands, the
8146 first one must be immediate operand. If the second one
8147 is immediate operand, the source operand is the 3th
8148 one. If the last one is immediate operand, the source
8149 operand is the 2nd one. */
8150 gas_assert (i
.imm_operands
== 2
8151 && i
.tm
.opcode_modifier
.sae
8152 && operand_type_check (i
.types
[0], imm
));
8153 if (operand_type_check (i
.types
[1], imm
))
8155 else if (operand_type_check (i
.types
[4], imm
))
8169 /* RC/SAE operand could be between DEST and SRC. That happens
8170 when one operand is GPR and the other one is XMM/YMM/ZMM
8172 if (i
.rounding
.type
!= rc_none
&& i
.rounding
.operand
== dest
)
8175 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8177 /* For instructions with VexNDS, the register-only source
8178 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
8179 register. It is encoded in VEX prefix. */
8181 i386_operand_type op
;
8184 /* Swap two source operands if needed. */
8185 if (i
.tm
.opcode_modifier
.swapsources
)
8193 op
= i
.tm
.operand_types
[vvvv
];
8194 if ((dest
+ 1) >= i
.operands
8195 || ((op
.bitfield
.class != Reg
8196 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
8197 && op
.bitfield
.class != RegSIMD
8198 && !operand_type_equal (&op
, ®mask
)))
8200 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
8206 /* One of the register operands will be encoded in the i.rm.reg
8207 field, the other in the combined i.rm.mode and i.rm.regmem
8208 fields. If no form of this instruction supports a memory
8209 destination operand, then we assume the source operand may
8210 sometimes be a memory operand and so we need to store the
8211 destination in the i.rm.reg field. */
8212 if (!i
.tm
.opcode_modifier
.regmem
8213 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
8215 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
8216 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
8217 set_rex_vrex (i
.op
[dest
].regs
, REX_R
, i
.tm
.opcode_modifier
.sse2avx
);
8218 set_rex_vrex (i
.op
[source
].regs
, REX_B
, false);
8222 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
8223 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
8224 set_rex_vrex (i
.op
[dest
].regs
, REX_B
, i
.tm
.opcode_modifier
.sse2avx
);
8225 set_rex_vrex (i
.op
[source
].regs
, REX_R
, false);
8227 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
8229 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
8232 add_prefix (LOCK_PREFIX_OPCODE
);
8236 { /* If it's not 2 reg operands... */
8241 unsigned int fake_zero_displacement
= 0;
8244 for (op
= 0; op
< i
.operands
; op
++)
8245 if (i
.flags
[op
] & Operand_Mem
)
8247 gas_assert (op
< i
.operands
);
8249 if (i
.tm
.opcode_modifier
.sib
)
8251 /* The index register of VSIB shouldn't be RegIZ. */
8252 if (i
.tm
.opcode_modifier
.sib
!= SIBMEM
8253 && i
.index_reg
->reg_num
== RegIZ
)
8256 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8259 i
.sib
.base
= NO_BASE_REGISTER
;
8260 i
.sib
.scale
= i
.log2_scale_factor
;
8261 i
.types
[op
].bitfield
.disp8
= 0;
8262 i
.types
[op
].bitfield
.disp16
= 0;
8263 i
.types
[op
].bitfield
.disp64
= 0;
8264 if (want_disp32 (&i
.tm
))
8266 /* Must be 32 bit */
8267 i
.types
[op
].bitfield
.disp32
= 1;
8268 i
.types
[op
].bitfield
.disp32s
= 0;
8272 i
.types
[op
].bitfield
.disp32
= 0;
8273 i
.types
[op
].bitfield
.disp32s
= 1;
8277 /* Since the mandatory SIB always has index register, so
8278 the code logic remains unchanged. The non-mandatory SIB
8279 without index register is allowed and will be handled
8283 if (i
.index_reg
->reg_num
== RegIZ
)
8284 i
.sib
.index
= NO_INDEX_REGISTER
;
8286 i
.sib
.index
= i
.index_reg
->reg_num
;
8287 set_rex_vrex (i
.index_reg
, REX_X
, false);
8291 default_seg
= reg_ds
;
8293 if (i
.base_reg
== 0)
8296 if (!i
.disp_operands
)
8297 fake_zero_displacement
= 1;
8298 if (i
.index_reg
== 0)
8300 i386_operand_type newdisp
;
8302 /* Both check for VSIB and mandatory non-vector SIB. */
8303 gas_assert (!i
.tm
.opcode_modifier
.sib
8304 || i
.tm
.opcode_modifier
.sib
== SIBMEM
);
8305 /* Operand is just <disp> */
8306 if (flag_code
== CODE_64BIT
)
8308 /* 64bit mode overwrites the 32bit absolute
8309 addressing by RIP relative addressing and
8310 absolute addressing is encoded by one of the
8311 redundant SIB forms. */
8312 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8313 i
.sib
.base
= NO_BASE_REGISTER
;
8314 i
.sib
.index
= NO_INDEX_REGISTER
;
8315 newdisp
= (want_disp32(&i
.tm
) ? disp32
: disp32s
);
8317 else if ((flag_code
== CODE_16BIT
)
8318 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
8320 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
8325 i
.rm
.regmem
= NO_BASE_REGISTER
;
8328 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8329 i
.types
[op
] = operand_type_or (i
.types
[op
], newdisp
);
8331 else if (!i
.tm
.opcode_modifier
.sib
)
8333 /* !i.base_reg && i.index_reg */
8334 if (i
.index_reg
->reg_num
== RegIZ
)
8335 i
.sib
.index
= NO_INDEX_REGISTER
;
8337 i
.sib
.index
= i
.index_reg
->reg_num
;
8338 i
.sib
.base
= NO_BASE_REGISTER
;
8339 i
.sib
.scale
= i
.log2_scale_factor
;
8340 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8341 i
.types
[op
].bitfield
.disp8
= 0;
8342 i
.types
[op
].bitfield
.disp16
= 0;
8343 i
.types
[op
].bitfield
.disp64
= 0;
8344 if (want_disp32 (&i
.tm
))
8346 /* Must be 32 bit */
8347 i
.types
[op
].bitfield
.disp32
= 1;
8348 i
.types
[op
].bitfield
.disp32s
= 0;
8352 i
.types
[op
].bitfield
.disp32
= 0;
8353 i
.types
[op
].bitfield
.disp32s
= 1;
8355 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8359 /* RIP addressing for 64bit mode. */
8360 else if (i
.base_reg
->reg_num
== RegIP
)
8362 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8363 i
.rm
.regmem
= NO_BASE_REGISTER
;
8364 i
.types
[op
].bitfield
.disp8
= 0;
8365 i
.types
[op
].bitfield
.disp16
= 0;
8366 i
.types
[op
].bitfield
.disp32
= 0;
8367 i
.types
[op
].bitfield
.disp32s
= 1;
8368 i
.types
[op
].bitfield
.disp64
= 0;
8369 i
.flags
[op
] |= Operand_PCrel
;
8370 if (! i
.disp_operands
)
8371 fake_zero_displacement
= 1;
8373 else if (i
.base_reg
->reg_type
.bitfield
.word
)
8375 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8376 switch (i
.base_reg
->reg_num
)
8379 if (i
.index_reg
== 0)
8381 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
8382 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
8385 default_seg
= reg_ss
;
8386 if (i
.index_reg
== 0)
8389 if (operand_type_check (i
.types
[op
], disp
) == 0)
8391 /* fake (%bp) into 0(%bp) */
8392 if (i
.disp_encoding
== disp_encoding_16bit
)
8393 i
.types
[op
].bitfield
.disp16
= 1;
8395 i
.types
[op
].bitfield
.disp8
= 1;
8396 fake_zero_displacement
= 1;
8399 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
8400 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
8402 default: /* (%si) -> 4 or (%di) -> 5 */
8403 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
8405 if (!fake_zero_displacement
8409 fake_zero_displacement
= 1;
8410 if (i
.disp_encoding
== disp_encoding_8bit
)
8411 i
.types
[op
].bitfield
.disp8
= 1;
8413 i
.types
[op
].bitfield
.disp16
= 1;
8415 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8417 else /* i.base_reg and 32/64 bit mode */
8419 if (operand_type_check (i
.types
[op
], disp
))
8421 i
.types
[op
].bitfield
.disp16
= 0;
8422 i
.types
[op
].bitfield
.disp64
= 0;
8423 if (!want_disp32 (&i
.tm
))
8425 i
.types
[op
].bitfield
.disp32
= 0;
8426 i
.types
[op
].bitfield
.disp32s
= 1;
8430 i
.types
[op
].bitfield
.disp32
= 1;
8431 i
.types
[op
].bitfield
.disp32s
= 0;
8435 if (!i
.tm
.opcode_modifier
.sib
)
8436 i
.rm
.regmem
= i
.base_reg
->reg_num
;
8437 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
8439 i
.sib
.base
= i
.base_reg
->reg_num
;
8440 /* x86-64 ignores REX prefix bit here to avoid decoder
8442 if (!(i
.base_reg
->reg_flags
& RegRex
)
8443 && (i
.base_reg
->reg_num
== EBP_REG_NUM
8444 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
8445 default_seg
= reg_ss
;
8446 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
8448 fake_zero_displacement
= 1;
8449 if (i
.disp_encoding
== disp_encoding_32bit
)
8450 i
.types
[op
].bitfield
.disp32
= 1;
8452 i
.types
[op
].bitfield
.disp8
= 1;
8454 i
.sib
.scale
= i
.log2_scale_factor
;
8455 if (i
.index_reg
== 0)
8457 /* Only check for VSIB. */
8458 gas_assert (i
.tm
.opcode_modifier
.sib
!= VECSIB128
8459 && i
.tm
.opcode_modifier
.sib
!= VECSIB256
8460 && i
.tm
.opcode_modifier
.sib
!= VECSIB512
);
8462 /* <disp>(%esp) becomes two byte modrm with no index
8463 register. We've already stored the code for esp
8464 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8465 Any base register besides %esp will not use the
8466 extra modrm byte. */
8467 i
.sib
.index
= NO_INDEX_REGISTER
;
8469 else if (!i
.tm
.opcode_modifier
.sib
)
8471 if (i
.index_reg
->reg_num
== RegIZ
)
8472 i
.sib
.index
= NO_INDEX_REGISTER
;
8474 i
.sib
.index
= i
.index_reg
->reg_num
;
8475 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8476 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8481 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
8482 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
8486 if (!fake_zero_displacement
8490 fake_zero_displacement
= 1;
8491 if (i
.disp_encoding
== disp_encoding_8bit
)
8492 i
.types
[op
].bitfield
.disp8
= 1;
8494 i
.types
[op
].bitfield
.disp32
= 1;
8496 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8500 if (fake_zero_displacement
)
8502 /* Fakes a zero displacement assuming that i.types[op]
8503 holds the correct displacement size. */
8506 gas_assert (i
.op
[op
].disps
== 0);
8507 exp
= &disp_expressions
[i
.disp_operands
++];
8508 i
.op
[op
].disps
= exp
;
8509 exp
->X_op
= O_constant
;
8510 exp
->X_add_number
= 0;
8511 exp
->X_add_symbol
= (symbolS
*) 0;
8512 exp
->X_op_symbol
= (symbolS
*) 0;
8520 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
8522 if (operand_type_check (i
.types
[0], imm
))
8523 i
.vex
.register_specifier
= NULL
;
8526 /* VEX.vvvv encodes one of the sources when the first
8527 operand is not an immediate. */
8528 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8529 i
.vex
.register_specifier
= i
.op
[0].regs
;
8531 i
.vex
.register_specifier
= i
.op
[1].regs
;
8534 /* Destination is a XMM register encoded in the ModRM.reg
8536 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
8537 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
8540 /* ModRM.rm and VEX.B encodes the other source. */
8541 if (!i
.mem_operands
)
8545 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8546 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8548 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
8550 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8554 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
8556 i
.vex
.register_specifier
= i
.op
[2].regs
;
8557 if (!i
.mem_operands
)
8560 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8561 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8565 /* Fill in i.rm.reg or i.rm.regmem field with register operand
8566 (if any) based on i.tm.extension_opcode. Again, we must be
8567 careful to make sure that segment/control/debug/test/MMX
8568 registers are coded into the i.rm.reg field. */
8569 else if (i
.reg_operands
)
8572 unsigned int vex_reg
= ~0;
8574 for (op
= 0; op
< i
.operands
; op
++)
8575 if (i
.types
[op
].bitfield
.class == Reg
8576 || i
.types
[op
].bitfield
.class == RegBND
8577 || i
.types
[op
].bitfield
.class == RegMask
8578 || i
.types
[op
].bitfield
.class == SReg
8579 || i
.types
[op
].bitfield
.class == RegCR
8580 || i
.types
[op
].bitfield
.class == RegDR
8581 || i
.types
[op
].bitfield
.class == RegTR
8582 || i
.types
[op
].bitfield
.class == RegSIMD
8583 || i
.types
[op
].bitfield
.class == RegMMX
)
8588 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8590 /* For instructions with VexNDS, the register-only
8591 source operand is encoded in VEX prefix. */
8592 gas_assert (mem
!= (unsigned int) ~0);
8597 gas_assert (op
< i
.operands
);
8601 /* Check register-only source operand when two source
8602 operands are swapped. */
8603 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
8604 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
8608 gas_assert (mem
== (vex_reg
+ 1)
8609 && op
< i
.operands
);
8614 gas_assert (vex_reg
< i
.operands
);
8618 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
8620 /* For instructions with VexNDD, the register destination
8621 is encoded in VEX prefix. */
8622 if (i
.mem_operands
== 0)
8624 /* There is no memory operand. */
8625 gas_assert ((op
+ 2) == i
.operands
);
8630 /* There are only 2 non-immediate operands. */
8631 gas_assert (op
< i
.imm_operands
+ 2
8632 && i
.operands
== i
.imm_operands
+ 2);
8633 vex_reg
= i
.imm_operands
+ 1;
8637 gas_assert (op
< i
.operands
);
8639 if (vex_reg
!= (unsigned int) ~0)
8641 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
8643 if ((type
->bitfield
.class != Reg
8644 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
8645 && type
->bitfield
.class != RegSIMD
8646 && !operand_type_equal (type
, ®mask
))
8649 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
8652 /* Don't set OP operand twice. */
8655 /* If there is an extension opcode to put here, the
8656 register number must be put into the regmem field. */
8657 if (i
.tm
.extension_opcode
!= None
)
8659 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
8660 set_rex_vrex (i
.op
[op
].regs
, REX_B
,
8661 i
.tm
.opcode_modifier
.sse2avx
);
8665 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
8666 set_rex_vrex (i
.op
[op
].regs
, REX_R
,
8667 i
.tm
.opcode_modifier
.sse2avx
);
8671 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
8672 must set it to 3 to indicate this is a register operand
8673 in the regmem field. */
8674 if (!i
.mem_operands
)
8678 /* Fill in i.rm.reg field with extension opcode (if any). */
8679 if (i
.tm
.extension_opcode
!= None
)
8680 i
.rm
.reg
= i
.tm
.extension_opcode
;
8686 frag_opcode_byte (unsigned char byte
)
8688 if (now_seg
!= absolute_section
)
8689 FRAG_APPEND_1_CHAR (byte
);
8691 ++abs_section_offset
;
8695 flip_code16 (unsigned int code16
)
8697 gas_assert (i
.tm
.operands
== 1);
8699 return !(i
.prefix
[REX_PREFIX
] & REX_W
)
8700 && (code16
? i
.tm
.operand_types
[0].bitfield
.disp32
8701 || i
.tm
.operand_types
[0].bitfield
.disp32s
8702 : i
.tm
.operand_types
[0].bitfield
.disp16
)
8707 output_branch (void)
8713 relax_substateT subtype
;
8717 if (now_seg
== absolute_section
)
8719 as_bad (_("relaxable branches not supported in absolute section"));
8723 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
8724 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
8727 if (i
.prefix
[DATA_PREFIX
] != 0)
8731 code16
^= flip_code16(code16
);
8733 /* Pentium4 branch hints. */
8734 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8735 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8740 if (i
.prefix
[REX_PREFIX
] != 0)
8746 /* BND prefixed jump. */
8747 if (i
.prefix
[BND_PREFIX
] != 0)
8753 if (i
.prefixes
!= 0)
8754 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8756 /* It's always a symbol; End frag & setup for relax.
8757 Make sure there is enough room in this frag for the largest
8758 instruction we may generate in md_convert_frag. This is 2
8759 bytes for the opcode and room for the prefix and largest
8761 frag_grow (prefix
+ 2 + 4);
8762 /* Prefix and 1 opcode byte go in fr_fix. */
8763 p
= frag_more (prefix
+ 1);
8764 if (i
.prefix
[DATA_PREFIX
] != 0)
8765 *p
++ = DATA_PREFIX_OPCODE
;
8766 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
8767 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
8768 *p
++ = i
.prefix
[SEG_PREFIX
];
8769 if (i
.prefix
[BND_PREFIX
] != 0)
8770 *p
++ = BND_PREFIX_OPCODE
;
8771 if (i
.prefix
[REX_PREFIX
] != 0)
8772 *p
++ = i
.prefix
[REX_PREFIX
];
8773 *p
= i
.tm
.base_opcode
;
8775 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
8776 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
8777 else if (cpu_arch_flags
.bitfield
.cpui386
)
8778 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
8780 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
8783 sym
= i
.op
[0].disps
->X_add_symbol
;
8784 off
= i
.op
[0].disps
->X_add_number
;
8786 if (i
.op
[0].disps
->X_op
!= O_constant
8787 && i
.op
[0].disps
->X_op
!= O_symbol
)
8789 /* Handle complex expressions. */
8790 sym
= make_expr_symbol (i
.op
[0].disps
);
8794 /* 1 possible extra opcode + 4 byte displacement go in var part.
8795 Pass reloc in fr_var. */
8796 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
8799 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8800 /* Return TRUE iff PLT32 relocation should be used for branching to
8804 need_plt32_p (symbolS
*s
)
8806 /* PLT32 relocation is ELF only. */
8811 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8812 krtld support it. */
8816 /* Since there is no need to prepare for PLT branch on x86-64, we
8817 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8818 be used as a marker for 32-bit PC-relative branches. */
8825 /* Weak or undefined symbol need PLT32 relocation. */
8826 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
8829 /* Non-global symbol doesn't need PLT32 relocation. */
8830 if (! S_IS_EXTERNAL (s
))
8833 /* Other global symbols need PLT32 relocation. NB: Symbol with
8834 non-default visibilities are treated as normal global symbol
8835 so that PLT32 relocation can be used as a marker for 32-bit
8836 PC-relative branches. It is useful for linker relaxation. */
8847 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
8849 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)
8851 /* This is a loop or jecxz type instruction. */
8853 if (i
.prefix
[ADDR_PREFIX
] != 0)
8855 frag_opcode_byte (ADDR_PREFIX_OPCODE
);
8858 /* Pentium4 branch hints. */
8859 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8860 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8862 frag_opcode_byte (i
.prefix
[SEG_PREFIX
]);
8871 if (flag_code
== CODE_16BIT
)
8874 if (i
.prefix
[DATA_PREFIX
] != 0)
8876 frag_opcode_byte (DATA_PREFIX_OPCODE
);
8878 code16
^= flip_code16(code16
);
8886 /* BND prefixed jump. */
8887 if (i
.prefix
[BND_PREFIX
] != 0)
8889 frag_opcode_byte (i
.prefix
[BND_PREFIX
]);
8893 if (i
.prefix
[REX_PREFIX
] != 0)
8895 frag_opcode_byte (i
.prefix
[REX_PREFIX
]);
8899 if (i
.prefixes
!= 0)
8900 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8902 if (now_seg
== absolute_section
)
8904 abs_section_offset
+= i
.opcode_length
+ size
;
8908 p
= frag_more (i
.opcode_length
+ size
);
8909 switch (i
.opcode_length
)
8912 *p
++ = i
.tm
.base_opcode
>> 8;
8915 *p
++ = i
.tm
.base_opcode
;
8921 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8923 && jump_reloc
== NO_RELOC
8924 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8925 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8928 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8930 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8931 i
.op
[0].disps
, 1, jump_reloc
);
8933 /* All jumps handled here are signed, but don't unconditionally use a
8934 signed limit check for 32 and 16 bit jumps as we want to allow wrap
8935 around at 4G (outside of 64-bit mode) and 64k (except for XBEGIN)
8940 fixP
->fx_signed
= 1;
8944 if (i
.tm
.base_opcode
== 0xc7f8)
8945 fixP
->fx_signed
= 1;
8949 if (flag_code
== CODE_64BIT
)
8950 fixP
->fx_signed
= 1;
8956 output_interseg_jump (void)
8964 if (flag_code
== CODE_16BIT
)
8968 if (i
.prefix
[DATA_PREFIX
] != 0)
8975 gas_assert (!i
.prefix
[REX_PREFIX
]);
8981 if (i
.prefixes
!= 0)
8982 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8984 if (now_seg
== absolute_section
)
8986 abs_section_offset
+= prefix
+ 1 + 2 + size
;
8990 /* 1 opcode; 2 segment; offset */
8991 p
= frag_more (prefix
+ 1 + 2 + size
);
8993 if (i
.prefix
[DATA_PREFIX
] != 0)
8994 *p
++ = DATA_PREFIX_OPCODE
;
8996 if (i
.prefix
[REX_PREFIX
] != 0)
8997 *p
++ = i
.prefix
[REX_PREFIX
];
8999 *p
++ = i
.tm
.base_opcode
;
9000 if (i
.op
[1].imms
->X_op
== O_constant
)
9002 offsetT n
= i
.op
[1].imms
->X_add_number
;
9005 && !fits_in_unsigned_word (n
)
9006 && !fits_in_signed_word (n
))
9008 as_bad (_("16-bit jump out of range"));
9011 md_number_to_chars (p
, n
, size
);
9014 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
9015 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
9018 if (i
.op
[0].imms
->X_op
== O_constant
)
9019 md_number_to_chars (p
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
9021 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, 2,
9022 i
.op
[0].imms
, 0, reloc (2, 0, 0, i
.reloc
[0]));
9025 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9030 asection
*seg
= now_seg
;
9031 subsegT subseg
= now_subseg
;
9033 unsigned int alignment
, align_size_1
;
9034 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
9035 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
9036 unsigned int padding
;
9038 if (!IS_ELF
|| !x86_used_note
)
9041 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
9043 /* The .note.gnu.property section layout:
9045 Field Length Contents
9048 n_descsz 4 The note descriptor size
9049 n_type 4 NT_GNU_PROPERTY_TYPE_0
9051 n_desc n_descsz The program property array
9055 /* Create the .note.gnu.property section. */
9056 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
9057 bfd_set_section_flags (sec
,
9064 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
9075 bfd_set_section_alignment (sec
, alignment
);
9076 elf_section_type (sec
) = SHT_NOTE
;
9078 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
9080 isa_1_descsz_raw
= 4 + 4 + 4;
9081 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
9082 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
9084 feature_2_descsz_raw
= isa_1_descsz
;
9085 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
9087 feature_2_descsz_raw
+= 4 + 4 + 4;
9088 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
9089 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
9092 descsz
= feature_2_descsz
;
9093 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
9094 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
9096 /* Write n_namsz. */
9097 md_number_to_chars (p
, (valueT
) 4, 4);
9099 /* Write n_descsz. */
9100 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
9103 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
9106 memcpy (p
+ 4 * 3, "GNU", 4);
9108 /* Write 4-byte type. */
9109 md_number_to_chars (p
+ 4 * 4,
9110 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
9112 /* Write 4-byte data size. */
9113 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
9115 /* Write 4-byte data. */
9116 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
9118 /* Zero out paddings. */
9119 padding
= isa_1_descsz
- isa_1_descsz_raw
;
9121 memset (p
+ 4 * 7, 0, padding
);
9123 /* Write 4-byte type. */
9124 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
9125 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
9127 /* Write 4-byte data size. */
9128 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
9130 /* Write 4-byte data. */
9131 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
9132 (valueT
) x86_feature_2_used
, 4);
9134 /* Zero out paddings. */
9135 padding
= feature_2_descsz
- feature_2_descsz_raw
;
9137 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
9139 /* We probably can't restore the current segment, for there likely
9142 subseg_set (seg
, subseg
);
9147 encoding_length (const fragS
*start_frag
, offsetT start_off
,
9148 const char *frag_now_ptr
)
9150 unsigned int len
= 0;
9152 if (start_frag
!= frag_now
)
9154 const fragS
*fr
= start_frag
;
9159 } while (fr
&& fr
!= frag_now
);
9162 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
9165 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
9166 be macro-fused with conditional jumps.
9167 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
9168 or is one of the following format:
9181 maybe_fused_with_jcc_p (enum mf_cmp_kind
* mf_cmp_p
)
9183 /* No RIP address. */
9184 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
9187 /* No opcodes outside of base encoding space. */
9188 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9191 /* add, sub without add/sub m, imm. */
9192 if (i
.tm
.base_opcode
<= 5
9193 || (i
.tm
.base_opcode
>= 0x28 && i
.tm
.base_opcode
<= 0x2d)
9194 || ((i
.tm
.base_opcode
| 3) == 0x83
9195 && (i
.tm
.extension_opcode
== 0x5
9196 || i
.tm
.extension_opcode
== 0x0)))
9198 *mf_cmp_p
= mf_cmp_alu_cmp
;
9199 return !(i
.mem_operands
&& i
.imm_operands
);
9202 /* and without and m, imm. */
9203 if ((i
.tm
.base_opcode
>= 0x20 && i
.tm
.base_opcode
<= 0x25)
9204 || ((i
.tm
.base_opcode
| 3) == 0x83
9205 && i
.tm
.extension_opcode
== 0x4))
9207 *mf_cmp_p
= mf_cmp_test_and
;
9208 return !(i
.mem_operands
&& i
.imm_operands
);
9211 /* test without test m imm. */
9212 if ((i
.tm
.base_opcode
| 1) == 0x85
9213 || (i
.tm
.base_opcode
| 1) == 0xa9
9214 || ((i
.tm
.base_opcode
| 1) == 0xf7
9215 && i
.tm
.extension_opcode
== 0))
9217 *mf_cmp_p
= mf_cmp_test_and
;
9218 return !(i
.mem_operands
&& i
.imm_operands
);
9221 /* cmp without cmp m, imm. */
9222 if ((i
.tm
.base_opcode
>= 0x38 && i
.tm
.base_opcode
<= 0x3d)
9223 || ((i
.tm
.base_opcode
| 3) == 0x83
9224 && (i
.tm
.extension_opcode
== 0x7)))
9226 *mf_cmp_p
= mf_cmp_alu_cmp
;
9227 return !(i
.mem_operands
&& i
.imm_operands
);
9230 /* inc, dec without inc/dec m. */
9231 if ((i
.tm
.cpu_flags
.bitfield
.cpuno64
9232 && (i
.tm
.base_opcode
| 0xf) == 0x4f)
9233 || ((i
.tm
.base_opcode
| 1) == 0xff
9234 && i
.tm
.extension_opcode
<= 0x1))
9236 *mf_cmp_p
= mf_cmp_incdec
;
9237 return !i
.mem_operands
;
9243 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
9246 add_fused_jcc_padding_frag_p (enum mf_cmp_kind
* mf_cmp_p
)
9248 /* NB: Don't work with COND_JUMP86 without i386. */
9249 if (!align_branch_power
9250 || now_seg
== absolute_section
9251 || !cpu_arch_flags
.bitfield
.cpui386
9252 || !(align_branch
& align_branch_fused_bit
))
9255 if (maybe_fused_with_jcc_p (mf_cmp_p
))
9257 if (last_insn
.kind
== last_insn_other
9258 || last_insn
.seg
!= now_seg
)
9261 as_warn_where (last_insn
.file
, last_insn
.line
,
9262 _("`%s` skips -malign-branch-boundary on `%s`"),
9263 last_insn
.name
, i
.tm
.name
);
9269 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
9272 add_branch_prefix_frag_p (void)
9274 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
9275 to PadLock instructions since they include prefixes in opcode. */
9276 if (!align_branch_power
9277 || !align_branch_prefix_size
9278 || now_seg
== absolute_section
9279 || i
.tm
.cpu_flags
.bitfield
.cpupadlock
9280 || !cpu_arch_flags
.bitfield
.cpui386
)
9283 /* Don't add prefix if it is a prefix or there is no operand in case
9284 that segment prefix is special. */
9285 if (!i
.operands
|| i
.tm
.opcode_modifier
.isprefix
)
9288 if (last_insn
.kind
== last_insn_other
9289 || last_insn
.seg
!= now_seg
)
9293 as_warn_where (last_insn
.file
, last_insn
.line
,
9294 _("`%s` skips -malign-branch-boundary on `%s`"),
9295 last_insn
.name
, i
.tm
.name
);
9300 /* Return 1 if a BRANCH_PADDING frag should be generated. */
9303 add_branch_padding_frag_p (enum align_branch_kind
*branch_p
,
9304 enum mf_jcc_kind
*mf_jcc_p
)
9308 /* NB: Don't work with COND_JUMP86 without i386. */
9309 if (!align_branch_power
9310 || now_seg
== absolute_section
9311 || !cpu_arch_flags
.bitfield
.cpui386
9312 || i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9317 /* Check for jcc and direct jmp. */
9318 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9320 if (i
.tm
.base_opcode
== JUMP_PC_RELATIVE
)
9322 *branch_p
= align_branch_jmp
;
9323 add_padding
= align_branch
& align_branch_jmp_bit
;
9327 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
9328 igore the lowest bit. */
9329 *mf_jcc_p
= (i
.tm
.base_opcode
& 0x0e) >> 1;
9330 *branch_p
= align_branch_jcc
;
9331 if ((align_branch
& align_branch_jcc_bit
))
9335 else if ((i
.tm
.base_opcode
| 1) == 0xc3)
9338 *branch_p
= align_branch_ret
;
9339 if ((align_branch
& align_branch_ret_bit
))
9344 /* Check for indirect jmp, direct and indirect calls. */
9345 if (i
.tm
.base_opcode
== 0xe8)
9348 *branch_p
= align_branch_call
;
9349 if ((align_branch
& align_branch_call_bit
))
9352 else if (i
.tm
.base_opcode
== 0xff
9353 && (i
.tm
.extension_opcode
== 2
9354 || i
.tm
.extension_opcode
== 4))
9356 /* Indirect call and jmp. */
9357 *branch_p
= align_branch_indirect
;
9358 if ((align_branch
& align_branch_indirect_bit
))
9365 && (i
.op
[0].disps
->X_op
== O_symbol
9366 || (i
.op
[0].disps
->X_op
== O_subtract
9367 && i
.op
[0].disps
->X_op_symbol
== GOT_symbol
)))
9369 symbolS
*s
= i
.op
[0].disps
->X_add_symbol
;
9370 /* No padding to call to global or undefined tls_get_addr. */
9371 if ((S_IS_EXTERNAL (s
) || !S_IS_DEFINED (s
))
9372 && strcmp (S_GET_NAME (s
), tls_get_addr
) == 0)
9378 && last_insn
.kind
!= last_insn_other
9379 && last_insn
.seg
== now_seg
)
9382 as_warn_where (last_insn
.file
, last_insn
.line
,
9383 _("`%s` skips -malign-branch-boundary on `%s`"),
9384 last_insn
.name
, i
.tm
.name
);
9394 fragS
*insn_start_frag
;
9395 offsetT insn_start_off
;
9396 fragS
*fragP
= NULL
;
9397 enum align_branch_kind branch
= align_branch_none
;
9398 /* The initializer is arbitrary just to avoid uninitialized error.
9399 it's actually either assigned in add_branch_padding_frag_p
9400 or never be used. */
9401 enum mf_jcc_kind mf_jcc
= mf_jcc_jo
;
9403 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9404 if (IS_ELF
&& x86_used_note
&& now_seg
!= absolute_section
)
9406 if ((i
.xstate
& xstate_tmm
) == xstate_tmm
9407 || i
.tm
.cpu_flags
.bitfield
.cpuamx_tile
)
9408 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_TMM
;
9410 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
9411 || i
.tm
.cpu_flags
.bitfield
.cpu287
9412 || i
.tm
.cpu_flags
.bitfield
.cpu387
9413 || i
.tm
.cpu_flags
.bitfield
.cpu687
9414 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
9415 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
9417 if ((i
.xstate
& xstate_mmx
)
9418 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9419 && !is_any_vex_encoding (&i
.tm
)
9420 && (i
.tm
.base_opcode
== 0x77 /* emms */
9421 || i
.tm
.base_opcode
== 0x0e /* femms */)))
9422 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
9426 if (i
.index_reg
->reg_type
.bitfield
.zmmword
)
9427 i
.xstate
|= xstate_zmm
;
9428 else if (i
.index_reg
->reg_type
.bitfield
.ymmword
)
9429 i
.xstate
|= xstate_ymm
;
9430 else if (i
.index_reg
->reg_type
.bitfield
.xmmword
)
9431 i
.xstate
|= xstate_xmm
;
9434 /* vzeroall / vzeroupper */
9435 if (i
.tm
.base_opcode
== 0x77 && i
.tm
.cpu_flags
.bitfield
.cpuavx
)
9436 i
.xstate
|= xstate_ymm
;
9438 if ((i
.xstate
& xstate_xmm
)
9439 /* ldmxcsr / stmxcsr / vldmxcsr / vstmxcsr */
9440 || (i
.tm
.base_opcode
== 0xae
9441 && (i
.tm
.cpu_flags
.bitfield
.cpusse
9442 || i
.tm
.cpu_flags
.bitfield
.cpuavx
))
9443 || i
.tm
.cpu_flags
.bitfield
.cpuwidekl
9444 || i
.tm
.cpu_flags
.bitfield
.cpukl
)
9445 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
9447 if ((i
.xstate
& xstate_ymm
) == xstate_ymm
)
9448 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
9449 if ((i
.xstate
& xstate_zmm
) == xstate_zmm
)
9450 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
9451 if (i
.mask
.reg
|| (i
.xstate
& xstate_mask
) == xstate_mask
)
9452 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MASK
;
9453 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
9454 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
9455 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
9456 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
9457 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
9458 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
9459 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
9460 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
9462 if (x86_feature_2_used
9463 || i
.tm
.cpu_flags
.bitfield
.cpucmov
9464 || i
.tm
.cpu_flags
.bitfield
.cpusyscall
9465 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9466 && i
.tm
.base_opcode
== 0xc7
9467 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
9468 && i
.tm
.extension_opcode
== 1) /* cmpxchg8b */)
9469 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_BASELINE
;
9470 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
9471 || i
.tm
.cpu_flags
.bitfield
.cpussse3
9472 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
9473 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
9474 || i
.tm
.cpu_flags
.bitfield
.cpucx16
9475 || i
.tm
.cpu_flags
.bitfield
.cpupopcnt
9476 /* LAHF-SAHF insns in 64-bit mode. */
9477 || (flag_code
== CODE_64BIT
9478 && (i
.tm
.base_opcode
| 1) == 0x9f
9479 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
))
9480 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V2
;
9481 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
9482 || i
.tm
.cpu_flags
.bitfield
.cpuavx2
9483 /* Any VEX encoded insns execpt for CpuAVX512F, CpuAVX512BW,
9484 CpuAVX512DQ, LPW, TBM and AMX. */
9485 || (i
.tm
.opcode_modifier
.vex
9486 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9487 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9488 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9489 && !i
.tm
.cpu_flags
.bitfield
.cpulwp
9490 && !i
.tm
.cpu_flags
.bitfield
.cputbm
9491 && !(x86_feature_2_used
& GNU_PROPERTY_X86_FEATURE_2_TMM
))
9492 || i
.tm
.cpu_flags
.bitfield
.cpuf16c
9493 || i
.tm
.cpu_flags
.bitfield
.cpufma
9494 || i
.tm
.cpu_flags
.bitfield
.cpulzcnt
9495 || i
.tm
.cpu_flags
.bitfield
.cpumovbe
9496 || i
.tm
.cpu_flags
.bitfield
.cpuxsaves
9497 || (x86_feature_2_used
9498 & (GNU_PROPERTY_X86_FEATURE_2_XSAVE
9499 | GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
9500 | GNU_PROPERTY_X86_FEATURE_2_XSAVEC
)) != 0)
9501 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V3
;
9502 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9503 || i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9504 || i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9505 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
9506 /* Any EVEX encoded insns except for AVX512ER, AVX512PF and
9508 || (i
.tm
.opcode_modifier
.evex
9509 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512er
9510 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
9511 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
))
9512 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V4
;
9516 /* Tie dwarf2 debug info to the address at the start of the insn.
9517 We can't do this after the insn has been output as the current
9518 frag may have been closed off. eg. by frag_var. */
9519 dwarf2_emit_insn (0);
9521 insn_start_frag
= frag_now
;
9522 insn_start_off
= frag_now_fix ();
9524 if (add_branch_padding_frag_p (&branch
, &mf_jcc
))
9527 /* Branch can be 8 bytes. Leave some room for prefixes. */
9528 unsigned int max_branch_padding_size
= 14;
9530 /* Align section to boundary. */
9531 record_alignment (now_seg
, align_branch_power
);
9533 /* Make room for padding. */
9534 frag_grow (max_branch_padding_size
);
9536 /* Start of the padding. */
9541 frag_var (rs_machine_dependent
, max_branch_padding_size
, 0,
9542 ENCODE_RELAX_STATE (BRANCH_PADDING
, 0),
9545 fragP
->tc_frag_data
.mf_type
= mf_jcc
;
9546 fragP
->tc_frag_data
.branch_type
= branch
;
9547 fragP
->tc_frag_data
.max_bytes
= max_branch_padding_size
;
9551 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9553 else if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
9554 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
9556 else if (i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
)
9557 output_interseg_jump ();
9560 /* Output normal instructions here. */
9564 enum mf_cmp_kind mf_cmp
;
9567 && (i
.tm
.base_opcode
== 0xaee8
9568 || i
.tm
.base_opcode
== 0xaef0
9569 || i
.tm
.base_opcode
== 0xaef8))
9571 /* Encode lfence, mfence, and sfence as
9572 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9573 if (now_seg
!= absolute_section
)
9575 offsetT val
= 0x240483f0ULL
;
9578 md_number_to_chars (p
, val
, 5);
9581 abs_section_offset
+= 5;
9585 /* Some processors fail on LOCK prefix. This options makes
9586 assembler ignore LOCK prefix and serves as a workaround. */
9587 if (omit_lock_prefix
)
9589 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
9590 && i
.tm
.opcode_modifier
.isprefix
)
9592 i
.prefix
[LOCK_PREFIX
] = 0;
9596 /* Skip if this is a branch. */
9598 else if (add_fused_jcc_padding_frag_p (&mf_cmp
))
9600 /* Make room for padding. */
9601 frag_grow (MAX_FUSED_JCC_PADDING_SIZE
);
9606 frag_var (rs_machine_dependent
, MAX_FUSED_JCC_PADDING_SIZE
, 0,
9607 ENCODE_RELAX_STATE (FUSED_JCC_PADDING
, 0),
9610 fragP
->tc_frag_data
.mf_type
= mf_cmp
;
9611 fragP
->tc_frag_data
.branch_type
= align_branch_fused
;
9612 fragP
->tc_frag_data
.max_bytes
= MAX_FUSED_JCC_PADDING_SIZE
;
9614 else if (add_branch_prefix_frag_p ())
9616 unsigned int max_prefix_size
= align_branch_prefix_size
;
9618 /* Make room for padding. */
9619 frag_grow (max_prefix_size
);
9624 frag_var (rs_machine_dependent
, max_prefix_size
, 0,
9625 ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0),
9628 fragP
->tc_frag_data
.max_bytes
= max_prefix_size
;
9631 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9632 don't need the explicit prefix. */
9633 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
9635 switch (i
.tm
.opcode_modifier
.opcodeprefix
)
9644 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
9645 || (i
.prefix
[REP_PREFIX
] != 0xf3))
9649 switch (i
.opcode_length
)
9654 /* Check for pseudo prefixes. */
9655 if (!i
.tm
.opcode_modifier
.isprefix
|| i
.tm
.base_opcode
)
9657 as_bad_where (insn_start_frag
->fr_file
,
9658 insn_start_frag
->fr_line
,
9659 _("pseudo prefix without instruction"));
9669 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9670 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9671 R_X86_64_GOTTPOFF relocation so that linker can safely
9672 perform IE->LE optimization. A dummy REX_OPCODE prefix
9673 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9674 relocation for GDesc -> IE/LE optimization. */
9675 if (x86_elf_abi
== X86_64_X32_ABI
9677 && (i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
9678 || i
.reloc
[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC
)
9679 && i
.prefix
[REX_PREFIX
] == 0)
9680 add_prefix (REX_OPCODE
);
9683 /* The prefix bytes. */
9684 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
9686 frag_opcode_byte (*q
);
9690 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
9696 frag_opcode_byte (*q
);
9699 /* There should be no other prefixes for instructions
9704 /* For EVEX instructions i.vrex should become 0 after
9705 build_evex_prefix. For VEX instructions upper 16 registers
9706 aren't available, so VREX should be 0. */
9709 /* Now the VEX prefix. */
9710 if (now_seg
!= absolute_section
)
9712 p
= frag_more (i
.vex
.length
);
9713 for (j
= 0; j
< i
.vex
.length
; j
++)
9714 p
[j
] = i
.vex
.bytes
[j
];
9717 abs_section_offset
+= i
.vex
.length
;
9720 /* Now the opcode; be careful about word order here! */
9721 j
= i
.opcode_length
;
9723 switch (i
.tm
.opcode_modifier
.opcodespace
)
9738 if (now_seg
== absolute_section
)
9739 abs_section_offset
+= j
;
9742 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
9748 && i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9751 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_0F
)
9752 *p
++ = i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
9756 switch (i
.opcode_length
)
9759 /* Put out high byte first: can't use md_number_to_chars! */
9760 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
9763 *p
= i
.tm
.base_opcode
& 0xff;
9772 /* Now the modrm byte and sib byte (if present). */
9773 if (i
.tm
.opcode_modifier
.modrm
)
9775 frag_opcode_byte ((i
.rm
.regmem
<< 0)
9777 | (i
.rm
.mode
<< 6));
9778 /* If i.rm.regmem == ESP (4)
9779 && i.rm.mode != (Register mode)
9781 ==> need second modrm byte. */
9782 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
9784 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
9785 frag_opcode_byte ((i
.sib
.base
<< 0)
9786 | (i
.sib
.index
<< 3)
9787 | (i
.sib
.scale
<< 6));
9790 if (i
.disp_operands
)
9791 output_disp (insn_start_frag
, insn_start_off
);
9794 output_imm (insn_start_frag
, insn_start_off
);
9797 * frag_now_fix () returning plain abs_section_offset when we're in the
9798 * absolute section, and abs_section_offset not getting updated as data
9799 * gets added to the frag breaks the logic below.
9801 if (now_seg
!= absolute_section
)
9803 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
9805 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9809 /* NB: Don't add prefix with GOTPC relocation since
9810 output_disp() above depends on the fixed encoding
9811 length. Can't add prefix with TLS relocation since
9812 it breaks TLS linker optimization. */
9813 unsigned int max
= i
.has_gotpc_tls_reloc
? 0 : 15 - j
;
9814 /* Prefix count on the current instruction. */
9815 unsigned int count
= i
.vex
.length
;
9817 for (k
= 0; k
< ARRAY_SIZE (i
.prefix
); k
++)
9818 /* REX byte is encoded in VEX/EVEX prefix. */
9819 if (i
.prefix
[k
] && (k
!= REX_PREFIX
|| !i
.vex
.length
))
9822 /* Count prefixes for extended opcode maps. */
9824 switch (i
.tm
.opcode_modifier
.opcodespace
)
9839 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
9842 /* Set the maximum prefix size in BRANCH_PREFIX
9844 if (fragP
->tc_frag_data
.max_bytes
> max
)
9845 fragP
->tc_frag_data
.max_bytes
= max
;
9846 if (fragP
->tc_frag_data
.max_bytes
> count
)
9847 fragP
->tc_frag_data
.max_bytes
-= count
;
9849 fragP
->tc_frag_data
.max_bytes
= 0;
9853 /* Remember the maximum prefix size in FUSED_JCC_PADDING
9855 unsigned int max_prefix_size
;
9856 if (align_branch_prefix_size
> max
)
9857 max_prefix_size
= max
;
9859 max_prefix_size
= align_branch_prefix_size
;
9860 if (max_prefix_size
> count
)
9861 fragP
->tc_frag_data
.max_prefix_length
9862 = max_prefix_size
- count
;
9865 /* Use existing segment prefix if possible. Use CS
9866 segment prefix in 64-bit mode. In 32-bit mode, use SS
9867 segment prefix with ESP/EBP base register and use DS
9868 segment prefix without ESP/EBP base register. */
9869 if (i
.prefix
[SEG_PREFIX
])
9870 fragP
->tc_frag_data
.default_prefix
= i
.prefix
[SEG_PREFIX
];
9871 else if (flag_code
== CODE_64BIT
)
9872 fragP
->tc_frag_data
.default_prefix
= CS_PREFIX_OPCODE
;
9874 && (i
.base_reg
->reg_num
== 4
9875 || i
.base_reg
->reg_num
== 5))
9876 fragP
->tc_frag_data
.default_prefix
= SS_PREFIX_OPCODE
;
9878 fragP
->tc_frag_data
.default_prefix
= DS_PREFIX_OPCODE
;
9883 /* NB: Don't work with COND_JUMP86 without i386. */
9884 if (align_branch_power
9885 && now_seg
!= absolute_section
9886 && cpu_arch_flags
.bitfield
.cpui386
)
9888 /* Terminate each frag so that we can add prefix and check for
9890 frag_wane (frag_now
);
9897 pi ("" /*line*/, &i
);
9899 #endif /* DEBUG386 */
9902 /* Return the size of the displacement operand N. */
9905 disp_size (unsigned int n
)
9909 if (i
.types
[n
].bitfield
.disp64
)
9911 else if (i
.types
[n
].bitfield
.disp8
)
9913 else if (i
.types
[n
].bitfield
.disp16
)
9918 /* Return the size of the immediate operand N. */
9921 imm_size (unsigned int n
)
9924 if (i
.types
[n
].bitfield
.imm64
)
9926 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
9928 else if (i
.types
[n
].bitfield
.imm16
)
9934 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
9939 for (n
= 0; n
< i
.operands
; n
++)
9941 if (operand_type_check (i
.types
[n
], disp
))
9943 int size
= disp_size (n
);
9945 if (now_seg
== absolute_section
)
9946 abs_section_offset
+= size
;
9947 else if (i
.op
[n
].disps
->X_op
== O_constant
)
9949 offsetT val
= i
.op
[n
].disps
->X_add_number
;
9951 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
9953 p
= frag_more (size
);
9954 md_number_to_chars (p
, val
, size
);
9958 enum bfd_reloc_code_real reloc_type
;
9959 int sign
= i
.types
[n
].bitfield
.disp32s
;
9960 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
9963 /* We can't have 8 bit displacement here. */
9964 gas_assert (!i
.types
[n
].bitfield
.disp8
);
9966 /* The PC relative address is computed relative
9967 to the instruction boundary, so in case immediate
9968 fields follows, we need to adjust the value. */
9969 if (pcrel
&& i
.imm_operands
)
9974 for (n1
= 0; n1
< i
.operands
; n1
++)
9975 if (operand_type_check (i
.types
[n1
], imm
))
9977 /* Only one immediate is allowed for PC
9978 relative address. */
9979 gas_assert (sz
== 0);
9981 i
.op
[n
].disps
->X_add_number
-= sz
;
9983 /* We should find the immediate. */
9984 gas_assert (sz
!= 0);
9987 p
= frag_more (size
);
9988 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
9990 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
9991 && (((reloc_type
== BFD_RELOC_32
9992 || reloc_type
== BFD_RELOC_X86_64_32S
9993 || (reloc_type
== BFD_RELOC_64
9995 && (i
.op
[n
].disps
->X_op
== O_symbol
9996 || (i
.op
[n
].disps
->X_op
== O_add
9997 && ((symbol_get_value_expression
9998 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
10000 || reloc_type
== BFD_RELOC_32_PCREL
))
10004 reloc_type
= BFD_RELOC_386_GOTPC
;
10005 i
.has_gotpc_tls_reloc
= true;
10006 i
.op
[n
].disps
->X_add_number
+=
10007 encoding_length (insn_start_frag
, insn_start_off
, p
);
10009 else if (reloc_type
== BFD_RELOC_64
)
10010 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
10012 /* Don't do the adjustment for x86-64, as there
10013 the pcrel addressing is relative to the _next_
10014 insn, and that is taken care of in other code. */
10015 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
10017 else if (align_branch_power
)
10019 switch (reloc_type
)
10021 case BFD_RELOC_386_TLS_GD
:
10022 case BFD_RELOC_386_TLS_LDM
:
10023 case BFD_RELOC_386_TLS_IE
:
10024 case BFD_RELOC_386_TLS_IE_32
:
10025 case BFD_RELOC_386_TLS_GOTIE
:
10026 case BFD_RELOC_386_TLS_GOTDESC
:
10027 case BFD_RELOC_386_TLS_DESC_CALL
:
10028 case BFD_RELOC_X86_64_TLSGD
:
10029 case BFD_RELOC_X86_64_TLSLD
:
10030 case BFD_RELOC_X86_64_GOTTPOFF
:
10031 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10032 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10033 i
.has_gotpc_tls_reloc
= true;
10038 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
10039 size
, i
.op
[n
].disps
, pcrel
,
10042 if (flag_code
== CODE_64BIT
&& size
== 4 && pcrel
10043 && !i
.prefix
[ADDR_PREFIX
])
10044 fixP
->fx_signed
= 1;
10046 /* Check for "call/jmp *mem", "mov mem, %reg",
10047 "test %reg, mem" and "binop mem, %reg" where binop
10048 is one of adc, add, and, cmp, or, sbb, sub, xor
10049 instructions without data prefix. Always generate
10050 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
10051 if (i
.prefix
[DATA_PREFIX
] == 0
10052 && (generate_relax_relocations
10055 && i
.rm
.regmem
== 5))
10057 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
10058 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
10059 && ((i
.operands
== 1
10060 && i
.tm
.base_opcode
== 0xff
10061 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
10062 || (i
.operands
== 2
10063 && (i
.tm
.base_opcode
== 0x8b
10064 || i
.tm
.base_opcode
== 0x85
10065 || (i
.tm
.base_opcode
& ~0x38) == 0x03))))
10069 fixP
->fx_tcbit
= i
.rex
!= 0;
10071 && (i
.base_reg
->reg_num
== RegIP
))
10072 fixP
->fx_tcbit2
= 1;
10075 fixP
->fx_tcbit2
= 1;
10083 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
10088 for (n
= 0; n
< i
.operands
; n
++)
10090 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
10091 if (i
.rounding
.type
!= rc_none
&& n
== i
.rounding
.operand
)
10094 if (operand_type_check (i
.types
[n
], imm
))
10096 int size
= imm_size (n
);
10098 if (now_seg
== absolute_section
)
10099 abs_section_offset
+= size
;
10100 else if (i
.op
[n
].imms
->X_op
== O_constant
)
10104 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
10106 p
= frag_more (size
);
10107 md_number_to_chars (p
, val
, size
);
10111 /* Not absolute_section.
10112 Need a 32-bit fixup (don't support 8bit
10113 non-absolute imms). Try to support other
10115 enum bfd_reloc_code_real reloc_type
;
10118 if (i
.types
[n
].bitfield
.imm32s
10119 && (i
.suffix
== QWORD_MNEM_SUFFIX
10120 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
10125 p
= frag_more (size
);
10126 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
10128 /* This is tough to explain. We end up with this one if we
10129 * have operands that look like
10130 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
10131 * obtain the absolute address of the GOT, and it is strongly
10132 * preferable from a performance point of view to avoid using
10133 * a runtime relocation for this. The actual sequence of
10134 * instructions often look something like:
10139 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
10141 * The call and pop essentially return the absolute address
10142 * of the label .L66 and store it in %ebx. The linker itself
10143 * will ultimately change the first operand of the addl so
10144 * that %ebx points to the GOT, but to keep things simple, the
10145 * .o file must have this operand set so that it generates not
10146 * the absolute address of .L66, but the absolute address of
10147 * itself. This allows the linker itself simply treat a GOTPC
10148 * relocation as asking for a pcrel offset to the GOT to be
10149 * added in, and the addend of the relocation is stored in the
10150 * operand field for the instruction itself.
10152 * Our job here is to fix the operand so that it would add
10153 * the correct offset so that %ebx would point to itself. The
10154 * thing that is tricky is that .-.L66 will point to the
10155 * beginning of the instruction, so we need to further modify
10156 * the operand so that it will point to itself. There are
10157 * other cases where you have something like:
10159 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
10161 * and here no correction would be required. Internally in
10162 * the assembler we treat operands of this form as not being
10163 * pcrel since the '.' is explicitly mentioned, and I wonder
10164 * whether it would simplify matters to do it this way. Who
10165 * knows. In earlier versions of the PIC patches, the
10166 * pcrel_adjust field was used to store the correction, but
10167 * since the expression is not pcrel, I felt it would be
10168 * confusing to do it this way. */
10170 if ((reloc_type
== BFD_RELOC_32
10171 || reloc_type
== BFD_RELOC_X86_64_32S
10172 || reloc_type
== BFD_RELOC_64
)
10174 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
10175 && (i
.op
[n
].imms
->X_op
== O_symbol
10176 || (i
.op
[n
].imms
->X_op
== O_add
10177 && ((symbol_get_value_expression
10178 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
10182 reloc_type
= BFD_RELOC_386_GOTPC
;
10183 else if (size
== 4)
10184 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
10185 else if (size
== 8)
10186 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
10187 i
.has_gotpc_tls_reloc
= true;
10188 i
.op
[n
].imms
->X_add_number
+=
10189 encoding_length (insn_start_frag
, insn_start_off
, p
);
10191 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
10192 i
.op
[n
].imms
, 0, reloc_type
);
10198 /* x86_cons_fix_new is called via the expression parsing code when a
10199 reloc is needed. We use this hook to get the correct .got reloc. */
10200 static int cons_sign
= -1;
10203 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
10204 expressionS
*exp
, bfd_reloc_code_real_type r
)
10206 r
= reloc (len
, 0, cons_sign
, r
);
10209 if (exp
->X_op
== O_secrel
)
10211 exp
->X_op
= O_symbol
;
10212 r
= BFD_RELOC_32_SECREL
;
10216 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
10219 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
10220 purpose of the `.dc.a' internal pseudo-op. */
10223 x86_address_bytes (void)
10225 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
10227 return stdoutput
->arch_info
->bits_per_address
/ 8;
10230 #if (!(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
10231 || defined (LEX_AT)) && !defined (TE_PE)
10232 # define lex_got(reloc, adjust, types) NULL
10234 /* Parse operands of the form
10235 <symbol>@GOTOFF+<nnn>
10236 and similar .plt or .got references.
10238 If we find one, set up the correct relocation in RELOC and copy the
10239 input string, minus the `@GOTOFF' into a malloc'd buffer for
10240 parsing by the calling routine. Return this buffer, and if ADJUST
10241 is non-null set it to the length of the string we removed from the
10242 input line. Otherwise return NULL. */
10244 lex_got (enum bfd_reloc_code_real
*rel
,
10246 i386_operand_type
*types
)
10248 /* Some of the relocations depend on the size of what field is to
10249 be relocated. But in our callers i386_immediate and i386_displacement
10250 we don't yet know the operand size (this will be set by insn
10251 matching). Hence we record the word32 relocation here,
10252 and adjust the reloc according to the real size in reloc(). */
10253 static const struct {
10256 const enum bfd_reloc_code_real rel
[2];
10257 const i386_operand_type types64
;
10258 bool need_GOT_symbol
;
10261 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10262 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
10263 BFD_RELOC_SIZE32
},
10264 OPERAND_TYPE_IMM32_64
, false },
10266 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
10267 BFD_RELOC_X86_64_PLTOFF64
},
10268 OPERAND_TYPE_IMM64
, true },
10269 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
10270 BFD_RELOC_X86_64_PLT32
},
10271 OPERAND_TYPE_IMM32_32S_DISP32
, false },
10272 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
10273 BFD_RELOC_X86_64_GOTPLT64
},
10274 OPERAND_TYPE_IMM64_DISP64
, true },
10275 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
10276 BFD_RELOC_X86_64_GOTOFF64
},
10277 OPERAND_TYPE_IMM64_DISP64
, true },
10278 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
10279 BFD_RELOC_X86_64_GOTPCREL
},
10280 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10281 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
10282 BFD_RELOC_X86_64_TLSGD
},
10283 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10284 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
10285 _dummy_first_bfd_reloc_code_real
},
10286 OPERAND_TYPE_NONE
, true },
10287 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
10288 BFD_RELOC_X86_64_TLSLD
},
10289 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10290 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
10291 BFD_RELOC_X86_64_GOTTPOFF
},
10292 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10293 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
10294 BFD_RELOC_X86_64_TPOFF32
},
10295 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10296 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
10297 _dummy_first_bfd_reloc_code_real
},
10298 OPERAND_TYPE_NONE
, true },
10299 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
10300 BFD_RELOC_X86_64_DTPOFF32
},
10301 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10302 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
10303 _dummy_first_bfd_reloc_code_real
},
10304 OPERAND_TYPE_NONE
, true },
10305 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
10306 _dummy_first_bfd_reloc_code_real
},
10307 OPERAND_TYPE_NONE
, true },
10308 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
10309 BFD_RELOC_X86_64_GOT32
},
10310 OPERAND_TYPE_IMM32_32S_64_DISP32
, true },
10311 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
10312 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
10313 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10314 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
10315 BFD_RELOC_X86_64_TLSDESC_CALL
},
10316 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10318 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
10319 BFD_RELOC_32_SECREL
},
10320 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, false },
10326 #if defined (OBJ_MAYBE_ELF) && !defined (TE_PE)
10331 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
10332 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
10335 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
10337 int len
= gotrel
[j
].len
;
10338 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
10340 if (gotrel
[j
].rel
[object_64bit
] != 0)
10343 char *tmpbuf
, *past_reloc
;
10345 *rel
= gotrel
[j
].rel
[object_64bit
];
10349 if (flag_code
!= CODE_64BIT
)
10351 types
->bitfield
.imm32
= 1;
10352 types
->bitfield
.disp32
= 1;
10355 *types
= gotrel
[j
].types64
;
10358 if (gotrel
[j
].need_GOT_symbol
&& GOT_symbol
== NULL
)
10359 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
10361 /* The length of the first part of our input line. */
10362 first
= cp
- input_line_pointer
;
10364 /* The second part goes from after the reloc token until
10365 (and including) an end_of_line char or comma. */
10366 past_reloc
= cp
+ 1 + len
;
10368 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
10370 second
= cp
+ 1 - past_reloc
;
10372 /* Allocate and copy string. The trailing NUL shouldn't
10373 be necessary, but be safe. */
10374 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
10375 memcpy (tmpbuf
, input_line_pointer
, first
);
10376 if (second
!= 0 && *past_reloc
!= ' ')
10377 /* Replace the relocation token with ' ', so that
10378 errors like foo@GOTOFF1 will be detected. */
10379 tmpbuf
[first
++] = ' ';
10381 /* Increment length by 1 if the relocation token is
10386 memcpy (tmpbuf
+ first
, past_reloc
, second
);
10387 tmpbuf
[first
+ second
] = '\0';
10391 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10392 gotrel
[j
].str
, 1 << (5 + object_64bit
));
10397 /* Might be a symbol version string. Don't as_bad here. */
10402 bfd_reloc_code_real_type
10403 x86_cons (expressionS
*exp
, int size
)
10405 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
10407 intel_syntax
= -intel_syntax
;
10410 if (size
== 4 || (object_64bit
&& size
== 8))
10412 /* Handle @GOTOFF and the like in an expression. */
10414 char *gotfree_input_line
;
10417 save
= input_line_pointer
;
10418 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
10419 if (gotfree_input_line
)
10420 input_line_pointer
= gotfree_input_line
;
10424 if (gotfree_input_line
)
10426 /* expression () has merrily parsed up to the end of line,
10427 or a comma - in the wrong buffer. Transfer how far
10428 input_line_pointer has moved to the right buffer. */
10429 input_line_pointer
= (save
10430 + (input_line_pointer
- gotfree_input_line
)
10432 free (gotfree_input_line
);
10433 if (exp
->X_op
== O_constant
10434 || exp
->X_op
== O_absent
10435 || exp
->X_op
== O_illegal
10436 || exp
->X_op
== O_register
10437 || exp
->X_op
== O_big
)
10439 char c
= *input_line_pointer
;
10440 *input_line_pointer
= 0;
10441 as_bad (_("missing or invalid expression `%s'"), save
);
10442 *input_line_pointer
= c
;
10444 else if ((got_reloc
== BFD_RELOC_386_PLT32
10445 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
10446 && exp
->X_op
!= O_symbol
)
10448 char c
= *input_line_pointer
;
10449 *input_line_pointer
= 0;
10450 as_bad (_("invalid PLT expression `%s'"), save
);
10451 *input_line_pointer
= c
;
10458 intel_syntax
= -intel_syntax
;
10461 i386_intel_simplify (exp
);
10463 /* If not 64bit, massage value, to account for wraparound when !BFD64. */
10464 if (size
== 4 && exp
->X_op
== O_constant
&& !object_64bit
)
10465 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
10471 signed_cons (int size
)
10481 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
10488 if (exp
.X_op
== O_symbol
)
10489 exp
.X_op
= O_secrel
;
10491 emit_expr (&exp
, 4);
10493 while (*input_line_pointer
++ == ',');
10495 input_line_pointer
--;
10496 demand_empty_rest_of_line ();
10500 /* Handle Vector operations. */
10503 check_VecOperations (char *op_string
)
10505 const reg_entry
*mask
;
10512 if (*op_string
== '{')
10516 /* Check broadcasts. */
10517 if (startswith (op_string
, "1to"))
10519 unsigned int bcst_type
;
10521 if (i
.broadcast
.type
)
10522 goto duplicated_vec_op
;
10525 if (*op_string
== '8')
10527 else if (*op_string
== '4')
10529 else if (*op_string
== '2')
10531 else if (*op_string
== '1'
10532 && *(op_string
+1) == '6')
10539 as_bad (_("Unsupported broadcast: `%s'"), saved
);
10544 i
.broadcast
.type
= bcst_type
;
10545 i
.broadcast
.operand
= this_operand
;
10547 /* Check masking operation. */
10548 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
10550 if (mask
== &bad_reg
)
10553 /* k0 can't be used for write mask. */
10554 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
10556 as_bad (_("`%s%s' can't be used for write mask"),
10557 register_prefix
, mask
->reg_name
);
10564 i
.mask
.operand
= this_operand
;
10566 else if (i
.mask
.reg
->reg_num
)
10567 goto duplicated_vec_op
;
10572 /* Only "{z}" is allowed here. No need to check
10573 zeroing mask explicitly. */
10574 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10576 as_bad (_("invalid write mask `%s'"), saved
);
10581 op_string
= end_op
;
10583 /* Check zeroing-flag for masking operation. */
10584 else if (*op_string
== 'z')
10588 i
.mask
.reg
= reg_k0
;
10589 i
.mask
.zeroing
= 1;
10590 i
.mask
.operand
= this_operand
;
10594 if (i
.mask
.zeroing
)
10597 as_bad (_("duplicated `%s'"), saved
);
10601 i
.mask
.zeroing
= 1;
10603 /* Only "{%k}" is allowed here. No need to check mask
10604 register explicitly. */
10605 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10607 as_bad (_("invalid zeroing-masking `%s'"),
10616 goto unknown_vec_op
;
10618 if (*op_string
!= '}')
10620 as_bad (_("missing `}' in `%s'"), saved
);
10625 /* Strip whitespace since the addition of pseudo prefixes
10626 changed how the scrubber treats '{'. */
10627 if (is_space_char (*op_string
))
10633 /* We don't know this one. */
10634 as_bad (_("unknown vector operation: `%s'"), saved
);
10638 if (i
.mask
.reg
&& i
.mask
.zeroing
&& !i
.mask
.reg
->reg_num
)
10640 as_bad (_("zeroing-masking only allowed with write mask"));
10648 i386_immediate (char *imm_start
)
10650 char *save_input_line_pointer
;
10651 char *gotfree_input_line
;
10654 i386_operand_type types
;
10656 operand_type_set (&types
, ~0);
10658 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
10660 as_bad (_("at most %d immediate operands are allowed"),
10661 MAX_IMMEDIATE_OPERANDS
);
10665 exp
= &im_expressions
[i
.imm_operands
++];
10666 i
.op
[this_operand
].imms
= exp
;
10668 if (is_space_char (*imm_start
))
10671 save_input_line_pointer
= input_line_pointer
;
10672 input_line_pointer
= imm_start
;
10674 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10675 if (gotfree_input_line
)
10676 input_line_pointer
= gotfree_input_line
;
10678 exp_seg
= expression (exp
);
10680 SKIP_WHITESPACE ();
10681 if (*input_line_pointer
)
10682 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10684 input_line_pointer
= save_input_line_pointer
;
10685 if (gotfree_input_line
)
10687 free (gotfree_input_line
);
10689 if (exp
->X_op
== O_constant
)
10690 exp
->X_op
= O_illegal
;
10693 if (exp_seg
== reg_section
)
10695 as_bad (_("illegal immediate register operand %s"), imm_start
);
10699 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
10703 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10704 i386_operand_type types
, const char *imm_start
)
10706 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
10709 as_bad (_("missing or invalid immediate expression `%s'"),
10713 else if (exp
->X_op
== O_constant
)
10715 /* Size it properly later. */
10716 i
.types
[this_operand
].bitfield
.imm64
= 1;
10718 /* If not 64bit, sign/zero extend val, to account for wraparound
10720 if (flag_code
!= CODE_64BIT
)
10721 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
10723 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10724 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
10725 && exp_seg
!= absolute_section
10726 && exp_seg
!= text_section
10727 && exp_seg
!= data_section
10728 && exp_seg
!= bss_section
10729 && exp_seg
!= undefined_section
10730 && !bfd_is_com_section (exp_seg
))
10732 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10738 /* This is an address. The size of the address will be
10739 determined later, depending on destination register,
10740 suffix, or the default for the section. */
10741 i
.types
[this_operand
].bitfield
.imm8
= 1;
10742 i
.types
[this_operand
].bitfield
.imm16
= 1;
10743 i
.types
[this_operand
].bitfield
.imm32
= 1;
10744 i
.types
[this_operand
].bitfield
.imm32s
= 1;
10745 i
.types
[this_operand
].bitfield
.imm64
= 1;
10746 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10754 i386_scale (char *scale
)
10757 char *save
= input_line_pointer
;
10759 input_line_pointer
= scale
;
10760 val
= get_absolute_expression ();
10765 i
.log2_scale_factor
= 0;
10768 i
.log2_scale_factor
= 1;
10771 i
.log2_scale_factor
= 2;
10774 i
.log2_scale_factor
= 3;
10778 char sep
= *input_line_pointer
;
10780 *input_line_pointer
= '\0';
10781 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
10783 *input_line_pointer
= sep
;
10784 input_line_pointer
= save
;
10788 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
10790 as_warn (_("scale factor of %d without an index register"),
10791 1 << i
.log2_scale_factor
);
10792 i
.log2_scale_factor
= 0;
10794 scale
= input_line_pointer
;
10795 input_line_pointer
= save
;
10800 i386_displacement (char *disp_start
, char *disp_end
)
10804 char *save_input_line_pointer
;
10805 char *gotfree_input_line
;
10807 i386_operand_type bigdisp
, types
= anydisp
;
10810 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
10812 as_bad (_("at most %d displacement operands are allowed"),
10813 MAX_MEMORY_OPERANDS
);
10817 operand_type_set (&bigdisp
, 0);
10819 || i
.types
[this_operand
].bitfield
.baseindex
10820 || (current_templates
->start
->opcode_modifier
.jump
!= JUMP
10821 && current_templates
->start
->opcode_modifier
.jump
!= JUMP_DWORD
))
10823 i386_addressing_mode ();
10824 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
10825 if (flag_code
== CODE_64BIT
)
10829 bigdisp
.bitfield
.disp32s
= 1;
10830 bigdisp
.bitfield
.disp64
= 1;
10833 bigdisp
.bitfield
.disp32
= 1;
10835 else if ((flag_code
== CODE_16BIT
) ^ override
)
10836 bigdisp
.bitfield
.disp16
= 1;
10838 bigdisp
.bitfield
.disp32
= 1;
10842 /* For PC-relative branches, the width of the displacement may be
10843 dependent upon data size, but is never dependent upon address size.
10844 Also make sure to not unintentionally match against a non-PC-relative
10845 branch template. */
10846 static templates aux_templates
;
10847 const insn_template
*t
= current_templates
->start
;
10848 bool has_intel64
= false;
10850 aux_templates
.start
= t
;
10851 while (++t
< current_templates
->end
)
10853 if (t
->opcode_modifier
.jump
10854 != current_templates
->start
->opcode_modifier
.jump
)
10856 if ((t
->opcode_modifier
.isa64
>= INTEL64
))
10857 has_intel64
= true;
10859 if (t
< current_templates
->end
)
10861 aux_templates
.end
= t
;
10862 current_templates
= &aux_templates
;
10865 override
= (i
.prefix
[DATA_PREFIX
] != 0);
10866 if (flag_code
== CODE_64BIT
)
10868 if ((override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
10869 && (!intel64
|| !has_intel64
))
10870 bigdisp
.bitfield
.disp16
= 1;
10872 bigdisp
.bitfield
.disp32s
= 1;
10877 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
10879 : LONG_MNEM_SUFFIX
));
10880 bigdisp
.bitfield
.disp32
= 1;
10881 if ((flag_code
== CODE_16BIT
) ^ override
)
10883 bigdisp
.bitfield
.disp32
= 0;
10884 bigdisp
.bitfield
.disp16
= 1;
10888 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10891 exp
= &disp_expressions
[i
.disp_operands
];
10892 i
.op
[this_operand
].disps
= exp
;
10894 save_input_line_pointer
= input_line_pointer
;
10895 input_line_pointer
= disp_start
;
10896 END_STRING_AND_SAVE (disp_end
);
10898 #ifndef GCC_ASM_O_HACK
10899 #define GCC_ASM_O_HACK 0
10902 END_STRING_AND_SAVE (disp_end
+ 1);
10903 if (i
.types
[this_operand
].bitfield
.baseIndex
10904 && displacement_string_end
[-1] == '+')
10906 /* This hack is to avoid a warning when using the "o"
10907 constraint within gcc asm statements.
10910 #define _set_tssldt_desc(n,addr,limit,type) \
10911 __asm__ __volatile__ ( \
10912 "movw %w2,%0\n\t" \
10913 "movw %w1,2+%0\n\t" \
10914 "rorl $16,%1\n\t" \
10915 "movb %b1,4+%0\n\t" \
10916 "movb %4,5+%0\n\t" \
10917 "movb $0,6+%0\n\t" \
10918 "movb %h1,7+%0\n\t" \
10920 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
10922 This works great except that the output assembler ends
10923 up looking a bit weird if it turns out that there is
10924 no offset. You end up producing code that looks like:
10937 So here we provide the missing zero. */
10939 *displacement_string_end
= '0';
10942 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10943 if (gotfree_input_line
)
10944 input_line_pointer
= gotfree_input_line
;
10946 exp_seg
= expression (exp
);
10948 SKIP_WHITESPACE ();
10949 if (*input_line_pointer
)
10950 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10952 RESTORE_END_STRING (disp_end
+ 1);
10954 input_line_pointer
= save_input_line_pointer
;
10955 if (gotfree_input_line
)
10957 free (gotfree_input_line
);
10959 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
10960 exp
->X_op
= O_illegal
;
10963 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
10965 RESTORE_END_STRING (disp_end
);
10971 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10972 i386_operand_type types
, const char *disp_start
)
10974 i386_operand_type bigdisp
;
10977 /* We do this to make sure that the section symbol is in
10978 the symbol table. We will ultimately change the relocation
10979 to be relative to the beginning of the section. */
10980 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
10981 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
10982 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10984 if (exp
->X_op
!= O_symbol
)
10987 if (S_IS_LOCAL (exp
->X_add_symbol
)
10988 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
10989 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
10990 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
10991 exp
->X_op
= O_subtract
;
10992 exp
->X_op_symbol
= GOT_symbol
;
10993 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
10994 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
10995 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10996 i
.reloc
[this_operand
] = BFD_RELOC_64
;
10998 i
.reloc
[this_operand
] = BFD_RELOC_32
;
11001 else if (exp
->X_op
== O_absent
11002 || exp
->X_op
== O_illegal
11003 || exp
->X_op
== O_big
)
11006 as_bad (_("missing or invalid displacement expression `%s'"),
11011 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
11012 else if (exp
->X_op
!= O_constant
11013 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
11014 && exp_seg
!= absolute_section
11015 && exp_seg
!= text_section
11016 && exp_seg
!= data_section
11017 && exp_seg
!= bss_section
11018 && exp_seg
!= undefined_section
11019 && !bfd_is_com_section (exp_seg
))
11021 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
11026 if (current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
11027 /* Constants get taken care of by optimize_disp(). */
11028 && exp
->X_op
!= O_constant
)
11029 i
.types
[this_operand
].bitfield
.disp8
= 1;
11031 /* Check if this is a displacement only operand. */
11032 bigdisp
= i
.types
[this_operand
];
11033 bigdisp
.bitfield
.disp8
= 0;
11034 bigdisp
.bitfield
.disp16
= 0;
11035 bigdisp
.bitfield
.disp32
= 0;
11036 bigdisp
.bitfield
.disp32s
= 0;
11037 bigdisp
.bitfield
.disp64
= 0;
11038 if (operand_type_all_zero (&bigdisp
))
11039 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
11045 /* Return the active addressing mode, taking address override and
11046 registers forming the address into consideration. Update the
11047 address override prefix if necessary. */
11049 static enum flag_code
11050 i386_addressing_mode (void)
11052 enum flag_code addr_mode
;
11054 if (i
.prefix
[ADDR_PREFIX
])
11055 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
11056 else if (flag_code
== CODE_16BIT
11057 && current_templates
->start
->cpu_flags
.bitfield
.cpumpx
11058 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
11059 from md_assemble() by "is not a valid base/index expression"
11060 when there is a base and/or index. */
11061 && !i
.types
[this_operand
].bitfield
.baseindex
)
11063 /* MPX insn memory operands with neither base nor index must be forced
11064 to use 32-bit addressing in 16-bit mode. */
11065 addr_mode
= CODE_32BIT
;
11066 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
11068 gas_assert (!i
.types
[this_operand
].bitfield
.disp16
);
11069 gas_assert (!i
.types
[this_operand
].bitfield
.disp32
);
11073 addr_mode
= flag_code
;
11075 #if INFER_ADDR_PREFIX
11076 if (i
.mem_operands
== 0)
11078 /* Infer address prefix from the first memory operand. */
11079 const reg_entry
*addr_reg
= i
.base_reg
;
11081 if (addr_reg
== NULL
)
11082 addr_reg
= i
.index_reg
;
11086 if (addr_reg
->reg_type
.bitfield
.dword
)
11087 addr_mode
= CODE_32BIT
;
11088 else if (flag_code
!= CODE_64BIT
11089 && addr_reg
->reg_type
.bitfield
.word
)
11090 addr_mode
= CODE_16BIT
;
11092 if (addr_mode
!= flag_code
)
11094 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
11096 /* Change the size of any displacement too. At most one
11097 of Disp16 or Disp32 is set.
11098 FIXME. There doesn't seem to be any real need for
11099 separate Disp16 and Disp32 flags. The same goes for
11100 Imm16 and Imm32. Removing them would probably clean
11101 up the code quite a lot. */
11102 if (flag_code
!= CODE_64BIT
11103 && (i
.types
[this_operand
].bitfield
.disp16
11104 || i
.types
[this_operand
].bitfield
.disp32
))
11105 i
.types
[this_operand
]
11106 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
11116 /* Make sure the memory operand we've been dealt is valid.
11117 Return 1 on success, 0 on a failure. */
11120 i386_index_check (const char *operand_string
)
11122 const char *kind
= "base/index";
11123 enum flag_code addr_mode
= i386_addressing_mode ();
11124 const insn_template
*t
= current_templates
->start
;
11126 if (t
->opcode_modifier
.isstring
11127 && !t
->cpu_flags
.bitfield
.cpupadlock
11128 && (current_templates
->end
[-1].opcode_modifier
.isstring
11129 || i
.mem_operands
))
11131 /* Memory operands of string insns are special in that they only allow
11132 a single register (rDI, rSI, or rBX) as their memory address. */
11133 const reg_entry
*expected_reg
;
11134 static const char *di_si
[][2] =
11140 static const char *bx
[] = { "ebx", "bx", "rbx" };
11142 kind
= "string address";
11144 if (t
->opcode_modifier
.prefixok
== PrefixRep
)
11146 int es_op
= current_templates
->end
[-1].opcode_modifier
.isstring
11147 - IS_STRING_ES_OP0
;
11150 if (!current_templates
->end
[-1].operand_types
[0].bitfield
.baseindex
11151 || ((!i
.mem_operands
!= !intel_syntax
)
11152 && current_templates
->end
[-1].operand_types
[1]
11153 .bitfield
.baseindex
))
11156 = (const reg_entry
*) str_hash_find (reg_hash
,
11157 di_si
[addr_mode
][op
== es_op
]);
11161 = (const reg_entry
*)str_hash_find (reg_hash
, bx
[addr_mode
]);
11163 if (i
.base_reg
!= expected_reg
11165 || operand_type_check (i
.types
[this_operand
], disp
))
11167 /* The second memory operand must have the same size as
11171 && !((addr_mode
== CODE_64BIT
11172 && i
.base_reg
->reg_type
.bitfield
.qword
)
11173 || (addr_mode
== CODE_32BIT
11174 ? i
.base_reg
->reg_type
.bitfield
.dword
11175 : i
.base_reg
->reg_type
.bitfield
.word
)))
11178 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
11180 intel_syntax
? '[' : '(',
11182 expected_reg
->reg_name
,
11183 intel_syntax
? ']' : ')');
11190 as_bad (_("`%s' is not a valid %s expression"),
11191 operand_string
, kind
);
11196 if (addr_mode
!= CODE_16BIT
)
11198 /* 32-bit/64-bit checks. */
11199 if (i
.disp_encoding
== disp_encoding_16bit
)
11202 as_bad (_("invalid `%s' prefix"),
11203 addr_mode
== CODE_16BIT
? "{disp32}" : "{disp16}");
11208 && ((addr_mode
== CODE_64BIT
11209 ? !i
.base_reg
->reg_type
.bitfield
.qword
11210 : !i
.base_reg
->reg_type
.bitfield
.dword
)
11211 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
11212 || i
.base_reg
->reg_num
== RegIZ
))
11214 && !i
.index_reg
->reg_type
.bitfield
.xmmword
11215 && !i
.index_reg
->reg_type
.bitfield
.ymmword
11216 && !i
.index_reg
->reg_type
.bitfield
.zmmword
11217 && ((addr_mode
== CODE_64BIT
11218 ? !i
.index_reg
->reg_type
.bitfield
.qword
11219 : !i
.index_reg
->reg_type
.bitfield
.dword
)
11220 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
11223 /* bndmk, bndldx, bndstx and mandatory non-vector SIB have special restrictions. */
11224 if ((t
->opcode_modifier
.opcodeprefix
== PREFIX_0XF3
11225 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11226 && t
->base_opcode
== 0x1b)
11227 || (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11228 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11229 && (t
->base_opcode
& ~1) == 0x1a)
11230 || t
->opcode_modifier
.sib
== SIBMEM
)
11232 /* They cannot use RIP-relative addressing. */
11233 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
11235 as_bad (_("`%s' cannot be used here"), operand_string
);
11239 /* bndldx and bndstx ignore their scale factor. */
11240 if (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11241 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11242 && (t
->base_opcode
& ~1) == 0x1a
11243 && i
.log2_scale_factor
)
11244 as_warn (_("register scaling is being ignored here"));
11249 /* 16-bit checks. */
11250 if (i
.disp_encoding
== disp_encoding_32bit
)
11254 && (!i
.base_reg
->reg_type
.bitfield
.word
11255 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
11257 && (!i
.index_reg
->reg_type
.bitfield
.word
11258 || !i
.index_reg
->reg_type
.bitfield
.baseindex
11260 && i
.base_reg
->reg_num
< 6
11261 && i
.index_reg
->reg_num
>= 6
11262 && i
.log2_scale_factor
== 0))))
11269 /* Handle vector immediates. */
11272 RC_SAE_immediate (const char *imm_start
)
11274 unsigned int match_found
, j
;
11275 const char *pstr
= imm_start
;
11283 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
11285 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
11287 if (i
.rounding
.type
!= rc_none
)
11289 as_bad (_("duplicated `%s'"), imm_start
);
11293 i
.rounding
.type
= RC_NamesTable
[j
].type
;
11294 i
.rounding
.operand
= this_operand
;
11296 pstr
+= RC_NamesTable
[j
].len
;
11304 if (*pstr
++ != '}')
11306 as_bad (_("Missing '}': '%s'"), imm_start
);
11309 /* RC/SAE immediate string should contain nothing more. */;
11312 as_bad (_("Junk after '}': '%s'"), imm_start
);
11316 exp
= &im_expressions
[i
.imm_operands
++];
11317 i
.op
[this_operand
].imms
= exp
;
11319 exp
->X_op
= O_constant
;
11320 exp
->X_add_number
= 0;
11321 exp
->X_add_symbol
= (symbolS
*) 0;
11322 exp
->X_op_symbol
= (symbolS
*) 0;
11324 i
.types
[this_operand
].bitfield
.imm8
= 1;
11328 /* Only string instructions can have a second memory operand, so
11329 reduce current_templates to just those if it contains any. */
11331 maybe_adjust_templates (void)
11333 const insn_template
*t
;
11335 gas_assert (i
.mem_operands
== 1);
11337 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
11338 if (t
->opcode_modifier
.isstring
)
11341 if (t
< current_templates
->end
)
11343 static templates aux_templates
;
11346 aux_templates
.start
= t
;
11347 for (; t
< current_templates
->end
; ++t
)
11348 if (!t
->opcode_modifier
.isstring
)
11350 aux_templates
.end
= t
;
11352 /* Determine whether to re-check the first memory operand. */
11353 recheck
= (aux_templates
.start
!= current_templates
->start
11354 || t
!= current_templates
->end
);
11356 current_templates
= &aux_templates
;
11360 i
.mem_operands
= 0;
11361 if (i
.memop1_string
!= NULL
11362 && i386_index_check (i
.memop1_string
) == 0)
11364 i
.mem_operands
= 1;
11371 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
11375 i386_att_operand (char *operand_string
)
11377 const reg_entry
*r
;
11379 char *op_string
= operand_string
;
11381 if (is_space_char (*op_string
))
11384 /* We check for an absolute prefix (differentiating,
11385 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
11386 if (*op_string
== ABSOLUTE_PREFIX
)
11389 if (is_space_char (*op_string
))
11391 i
.jumpabsolute
= true;
11394 /* Check if operand is a register. */
11395 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
11397 i386_operand_type temp
;
11402 /* Check for a segment override by searching for ':' after a
11403 segment register. */
11404 op_string
= end_op
;
11405 if (is_space_char (*op_string
))
11407 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
11409 i
.seg
[i
.mem_operands
] = r
;
11411 /* Skip the ':' and whitespace. */
11413 if (is_space_char (*op_string
))
11416 /* Handle case of %es:*foo. */
11417 if (!i
.jumpabsolute
&& *op_string
== ABSOLUTE_PREFIX
)
11420 if (is_space_char (*op_string
))
11422 i
.jumpabsolute
= true;
11425 if (!is_digit_char (*op_string
)
11426 && !is_identifier_char (*op_string
)
11427 && *op_string
!= '(')
11429 as_bad (_("bad memory operand `%s'"), op_string
);
11432 goto do_memory_reference
;
11435 /* Handle vector operations. */
11436 if (*op_string
== '{')
11438 op_string
= check_VecOperations (op_string
);
11439 if (op_string
== NULL
)
11445 as_bad (_("junk `%s' after register"), op_string
);
11448 temp
= r
->reg_type
;
11449 temp
.bitfield
.baseindex
= 0;
11450 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
11452 i
.types
[this_operand
].bitfield
.unspecified
= 0;
11453 i
.op
[this_operand
].regs
= r
;
11456 else if (*op_string
== REGISTER_PREFIX
)
11458 as_bad (_("bad register name `%s'"), op_string
);
11461 else if (*op_string
== IMMEDIATE_PREFIX
)
11464 if (i
.jumpabsolute
)
11466 as_bad (_("immediate operand illegal with absolute jump"));
11469 if (!i386_immediate (op_string
))
11472 else if (RC_SAE_immediate (operand_string
))
11474 /* If it is a RC or SAE immediate, do nothing. */
11477 else if (is_digit_char (*op_string
)
11478 || is_identifier_char (*op_string
)
11479 || *op_string
== '"'
11480 || *op_string
== '(')
11482 /* This is a memory reference of some sort. */
11485 /* Start and end of displacement string expression (if found). */
11486 char *displacement_string_start
;
11487 char *displacement_string_end
;
11490 do_memory_reference
:
11491 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
11493 if ((i
.mem_operands
== 1
11494 && !current_templates
->start
->opcode_modifier
.isstring
)
11495 || i
.mem_operands
== 2)
11497 as_bad (_("too many memory references for `%s'"),
11498 current_templates
->start
->name
);
11502 /* Check for base index form. We detect the base index form by
11503 looking for an ')' at the end of the operand, searching
11504 for the '(' matching it, and finding a REGISTER_PREFIX or ','
11506 base_string
= op_string
+ strlen (op_string
);
11508 /* Handle vector operations. */
11509 vop_start
= strchr (op_string
, '{');
11510 if (vop_start
&& vop_start
< base_string
)
11512 if (check_VecOperations (vop_start
) == NULL
)
11514 base_string
= vop_start
;
11518 if (is_space_char (*base_string
))
11521 /* If we only have a displacement, set-up for it to be parsed later. */
11522 displacement_string_start
= op_string
;
11523 displacement_string_end
= base_string
+ 1;
11525 if (*base_string
== ')')
11528 unsigned int parens_balanced
= 1;
11529 /* We've already checked that the number of left & right ()'s are
11530 equal, so this loop will not be infinite. */
11534 if (*base_string
== ')')
11536 if (*base_string
== '(')
11539 while (parens_balanced
);
11541 temp_string
= base_string
;
11543 /* Skip past '(' and whitespace. */
11545 if (is_space_char (*base_string
))
11548 if (*base_string
== ','
11549 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
11552 displacement_string_end
= temp_string
;
11554 i
.types
[this_operand
].bitfield
.baseindex
= 1;
11558 if (i
.base_reg
== &bad_reg
)
11560 base_string
= end_op
;
11561 if (is_space_char (*base_string
))
11565 /* There may be an index reg or scale factor here. */
11566 if (*base_string
== ',')
11569 if (is_space_char (*base_string
))
11572 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
11575 if (i
.index_reg
== &bad_reg
)
11577 base_string
= end_op
;
11578 if (is_space_char (*base_string
))
11580 if (*base_string
== ',')
11583 if (is_space_char (*base_string
))
11586 else if (*base_string
!= ')')
11588 as_bad (_("expecting `,' or `)' "
11589 "after index register in `%s'"),
11594 else if (*base_string
== REGISTER_PREFIX
)
11596 end_op
= strchr (base_string
, ',');
11599 as_bad (_("bad register name `%s'"), base_string
);
11603 /* Check for scale factor. */
11604 if (*base_string
!= ')')
11606 char *end_scale
= i386_scale (base_string
);
11611 base_string
= end_scale
;
11612 if (is_space_char (*base_string
))
11614 if (*base_string
!= ')')
11616 as_bad (_("expecting `)' "
11617 "after scale factor in `%s'"),
11622 else if (!i
.index_reg
)
11624 as_bad (_("expecting index register or scale factor "
11625 "after `,'; got '%c'"),
11630 else if (*base_string
!= ')')
11632 as_bad (_("expecting `,' or `)' "
11633 "after base register in `%s'"),
11638 else if (*base_string
== REGISTER_PREFIX
)
11640 end_op
= strchr (base_string
, ',');
11643 as_bad (_("bad register name `%s'"), base_string
);
11648 /* If there's an expression beginning the operand, parse it,
11649 assuming displacement_string_start and
11650 displacement_string_end are meaningful. */
11651 if (displacement_string_start
!= displacement_string_end
)
11653 if (!i386_displacement (displacement_string_start
,
11654 displacement_string_end
))
11658 /* Special case for (%dx) while doing input/output op. */
11660 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
11661 && i
.base_reg
->reg_type
.bitfield
.word
11662 && i
.index_reg
== 0
11663 && i
.log2_scale_factor
== 0
11664 && i
.seg
[i
.mem_operands
] == 0
11665 && !operand_type_check (i
.types
[this_operand
], disp
))
11667 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
11671 if (i386_index_check (operand_string
) == 0)
11673 i
.flags
[this_operand
] |= Operand_Mem
;
11674 if (i
.mem_operands
== 0)
11675 i
.memop1_string
= xstrdup (operand_string
);
11680 /* It's not a memory operand; argh! */
11681 as_bad (_("invalid char %s beginning operand %d `%s'"),
11682 output_invalid (*op_string
),
11687 return 1; /* Normal return. */
11690 /* Calculate the maximum variable size (i.e., excluding fr_fix)
11691 that an rs_machine_dependent frag may reach. */
11694 i386_frag_max_var (fragS
*frag
)
11696 /* The only relaxable frags are for jumps.
11697 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
11698 gas_assert (frag
->fr_type
== rs_machine_dependent
);
11699 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
11702 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11704 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
11706 /* STT_GNU_IFUNC symbol must go through PLT. */
11707 if ((symbol_get_bfdsym (fr_symbol
)->flags
11708 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
11711 if (!S_IS_EXTERNAL (fr_symbol
))
11712 /* Symbol may be weak or local. */
11713 return !S_IS_WEAK (fr_symbol
);
11715 /* Global symbols with non-default visibility can't be preempted. */
11716 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
11719 if (fr_var
!= NO_RELOC
)
11720 switch ((enum bfd_reloc_code_real
) fr_var
)
11722 case BFD_RELOC_386_PLT32
:
11723 case BFD_RELOC_X86_64_PLT32
:
11724 /* Symbol with PLT relocation may be preempted. */
11730 /* Global symbols with default visibility in a shared library may be
11731 preempted by another definition. */
11736 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
11737 Note also work for Skylake and Cascadelake.
11738 ---------------------------------------------------------------------
11739 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
11740 | ------ | ----------- | ------- | -------- |
11742 | Jno | N | N | Y |
11743 | Jc/Jb | Y | N | Y |
11744 | Jae/Jnb | Y | N | Y |
11745 | Je/Jz | Y | Y | Y |
11746 | Jne/Jnz | Y | Y | Y |
11747 | Jna/Jbe | Y | N | Y |
11748 | Ja/Jnbe | Y | N | Y |
11750 | Jns | N | N | Y |
11751 | Jp/Jpe | N | N | Y |
11752 | Jnp/Jpo | N | N | Y |
11753 | Jl/Jnge | Y | Y | Y |
11754 | Jge/Jnl | Y | Y | Y |
11755 | Jle/Jng | Y | Y | Y |
11756 | Jg/Jnle | Y | Y | Y |
11757 --------------------------------------------------------------------- */
11759 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp
, enum mf_jcc_kind mf_jcc
)
11761 if (mf_cmp
== mf_cmp_alu_cmp
)
11762 return ((mf_jcc
>= mf_jcc_jc
&& mf_jcc
<= mf_jcc_jna
)
11763 || mf_jcc
== mf_jcc_jl
|| mf_jcc
== mf_jcc_jle
);
11764 if (mf_cmp
== mf_cmp_incdec
)
11765 return (mf_jcc
== mf_jcc_je
|| mf_jcc
== mf_jcc_jl
11766 || mf_jcc
== mf_jcc_jle
);
11767 if (mf_cmp
== mf_cmp_test_and
)
11772 /* Return the next non-empty frag. */
11775 i386_next_non_empty_frag (fragS
*fragP
)
11777 /* There may be a frag with a ".fill 0" when there is no room in
11778 the current frag for frag_grow in output_insn. */
11779 for (fragP
= fragP
->fr_next
;
11781 && fragP
->fr_type
== rs_fill
11782 && fragP
->fr_fix
== 0);
11783 fragP
= fragP
->fr_next
)
11788 /* Return the next jcc frag after BRANCH_PADDING. */
11791 i386_next_fusible_jcc_frag (fragS
*maybe_cmp_fragP
, fragS
*pad_fragP
)
11793 fragS
*branch_fragP
;
11797 if (pad_fragP
->fr_type
== rs_machine_dependent
11798 && (TYPE_FROM_RELAX_STATE (pad_fragP
->fr_subtype
)
11799 == BRANCH_PADDING
))
11801 branch_fragP
= i386_next_non_empty_frag (pad_fragP
);
11802 if (branch_fragP
->fr_type
!= rs_machine_dependent
)
11804 if (TYPE_FROM_RELAX_STATE (branch_fragP
->fr_subtype
) == COND_JUMP
11805 && i386_macro_fusible_p (maybe_cmp_fragP
->tc_frag_data
.mf_type
,
11806 pad_fragP
->tc_frag_data
.mf_type
))
11807 return branch_fragP
;
11813 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
11816 i386_classify_machine_dependent_frag (fragS
*fragP
)
11820 fragS
*branch_fragP
;
11822 unsigned int max_prefix_length
;
11824 if (fragP
->tc_frag_data
.classified
)
11827 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
11828 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
11829 for (next_fragP
= fragP
;
11830 next_fragP
!= NULL
;
11831 next_fragP
= next_fragP
->fr_next
)
11833 next_fragP
->tc_frag_data
.classified
= 1;
11834 if (next_fragP
->fr_type
== rs_machine_dependent
)
11835 switch (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
))
11837 case BRANCH_PADDING
:
11838 /* The BRANCH_PADDING frag must be followed by a branch
11840 branch_fragP
= i386_next_non_empty_frag (next_fragP
);
11841 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11843 case FUSED_JCC_PADDING
:
11844 /* Check if this is a fused jcc:
11846 CMP like instruction
11850 cmp_fragP
= i386_next_non_empty_frag (next_fragP
);
11851 pad_fragP
= i386_next_non_empty_frag (cmp_fragP
);
11852 branch_fragP
= i386_next_fusible_jcc_frag (next_fragP
, pad_fragP
);
11855 /* The BRANCH_PADDING frag is merged with the
11856 FUSED_JCC_PADDING frag. */
11857 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11858 /* CMP like instruction size. */
11859 next_fragP
->tc_frag_data
.cmp_size
= cmp_fragP
->fr_fix
;
11860 frag_wane (pad_fragP
);
11861 /* Skip to branch_fragP. */
11862 next_fragP
= branch_fragP
;
11864 else if (next_fragP
->tc_frag_data
.max_prefix_length
)
11866 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
11868 next_fragP
->fr_subtype
11869 = ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0);
11870 next_fragP
->tc_frag_data
.max_bytes
11871 = next_fragP
->tc_frag_data
.max_prefix_length
;
11872 /* This will be updated in the BRANCH_PREFIX scan. */
11873 next_fragP
->tc_frag_data
.max_prefix_length
= 0;
11876 frag_wane (next_fragP
);
11881 /* Stop if there is no BRANCH_PREFIX. */
11882 if (!align_branch_prefix_size
)
11885 /* Scan for BRANCH_PREFIX. */
11886 for (; fragP
!= NULL
; fragP
= fragP
->fr_next
)
11888 if (fragP
->fr_type
!= rs_machine_dependent
11889 || (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
11893 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
11894 COND_JUMP_PREFIX. */
11895 max_prefix_length
= 0;
11896 for (next_fragP
= fragP
;
11897 next_fragP
!= NULL
;
11898 next_fragP
= next_fragP
->fr_next
)
11900 if (next_fragP
->fr_type
== rs_fill
)
11901 /* Skip rs_fill frags. */
11903 else if (next_fragP
->fr_type
!= rs_machine_dependent
)
11904 /* Stop for all other frags. */
11907 /* rs_machine_dependent frags. */
11908 if (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11911 /* Count BRANCH_PREFIX frags. */
11912 if (max_prefix_length
>= MAX_FUSED_JCC_PADDING_SIZE
)
11914 max_prefix_length
= MAX_FUSED_JCC_PADDING_SIZE
;
11915 frag_wane (next_fragP
);
11919 += next_fragP
->tc_frag_data
.max_bytes
;
11921 else if ((TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11923 || (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11924 == FUSED_JCC_PADDING
))
11926 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
11927 fragP
->tc_frag_data
.u
.padding_fragP
= next_fragP
;
11931 /* Stop for other rs_machine_dependent frags. */
11935 fragP
->tc_frag_data
.max_prefix_length
= max_prefix_length
;
11937 /* Skip to the next frag. */
11938 fragP
= next_fragP
;
11942 /* Compute padding size for
11945 CMP like instruction
11947 COND_JUMP/UNCOND_JUMP
11952 COND_JUMP/UNCOND_JUMP
11956 i386_branch_padding_size (fragS
*fragP
, offsetT address
)
11958 unsigned int offset
, size
, padding_size
;
11959 fragS
*branch_fragP
= fragP
->tc_frag_data
.u
.branch_fragP
;
11961 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
11963 address
= fragP
->fr_address
;
11964 address
+= fragP
->fr_fix
;
11966 /* CMP like instrunction size. */
11967 size
= fragP
->tc_frag_data
.cmp_size
;
11969 /* The base size of the branch frag. */
11970 size
+= branch_fragP
->fr_fix
;
11972 /* Add opcode and displacement bytes for the rs_machine_dependent
11974 if (branch_fragP
->fr_type
== rs_machine_dependent
)
11975 size
+= md_relax_table
[branch_fragP
->fr_subtype
].rlx_length
;
11977 /* Check if branch is within boundary and doesn't end at the last
11979 offset
= address
& ((1U << align_branch_power
) - 1);
11980 if ((offset
+ size
) >= (1U << align_branch_power
))
11981 /* Padding needed to avoid crossing boundary. */
11982 padding_size
= (1U << align_branch_power
) - offset
;
11984 /* No padding needed. */
11987 /* The return value may be saved in tc_frag_data.length which is
11989 if (!fits_in_unsigned_byte (padding_size
))
11992 return padding_size
;
11995 /* i386_generic_table_relax_frag()
11997 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
11998 grow/shrink padding to align branch frags. Hand others to
12002 i386_generic_table_relax_frag (segT segment
, fragS
*fragP
, long stretch
)
12004 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12005 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
12007 long padding_size
= i386_branch_padding_size (fragP
, 0);
12008 long grow
= padding_size
- fragP
->tc_frag_data
.length
;
12010 /* When the BRANCH_PREFIX frag is used, the computed address
12011 must match the actual address and there should be no padding. */
12012 if (fragP
->tc_frag_data
.padding_address
12013 && (fragP
->tc_frag_data
.padding_address
!= fragP
->fr_address
12017 /* Update the padding size. */
12019 fragP
->tc_frag_data
.length
= padding_size
;
12023 else if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12025 fragS
*padding_fragP
, *next_fragP
;
12026 long padding_size
, left_size
, last_size
;
12028 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
12029 if (!padding_fragP
)
12030 /* Use the padding set by the leading BRANCH_PREFIX frag. */
12031 return (fragP
->tc_frag_data
.length
12032 - fragP
->tc_frag_data
.last_length
);
12034 /* Compute the relative address of the padding frag in the very
12035 first time where the BRANCH_PREFIX frag sizes are zero. */
12036 if (!fragP
->tc_frag_data
.padding_address
)
12037 fragP
->tc_frag_data
.padding_address
12038 = padding_fragP
->fr_address
- (fragP
->fr_address
- stretch
);
12040 /* First update the last length from the previous interation. */
12041 left_size
= fragP
->tc_frag_data
.prefix_length
;
12042 for (next_fragP
= fragP
;
12043 next_fragP
!= padding_fragP
;
12044 next_fragP
= next_fragP
->fr_next
)
12045 if (next_fragP
->fr_type
== rs_machine_dependent
12046 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12051 int max
= next_fragP
->tc_frag_data
.max_bytes
;
12055 if (max
> left_size
)
12060 next_fragP
->tc_frag_data
.last_length
= size
;
12064 next_fragP
->tc_frag_data
.last_length
= 0;
12067 /* Check the padding size for the padding frag. */
12068 padding_size
= i386_branch_padding_size
12069 (padding_fragP
, (fragP
->fr_address
12070 + fragP
->tc_frag_data
.padding_address
));
12072 last_size
= fragP
->tc_frag_data
.prefix_length
;
12073 /* Check if there is change from the last interation. */
12074 if (padding_size
== last_size
)
12076 /* Update the expected address of the padding frag. */
12077 padding_fragP
->tc_frag_data
.padding_address
12078 = (fragP
->fr_address
+ padding_size
12079 + fragP
->tc_frag_data
.padding_address
);
12083 if (padding_size
> fragP
->tc_frag_data
.max_prefix_length
)
12085 /* No padding if there is no sufficient room. Clear the
12086 expected address of the padding frag. */
12087 padding_fragP
->tc_frag_data
.padding_address
= 0;
12091 /* Store the expected address of the padding frag. */
12092 padding_fragP
->tc_frag_data
.padding_address
12093 = (fragP
->fr_address
+ padding_size
12094 + fragP
->tc_frag_data
.padding_address
);
12096 fragP
->tc_frag_data
.prefix_length
= padding_size
;
12098 /* Update the length for the current interation. */
12099 left_size
= padding_size
;
12100 for (next_fragP
= fragP
;
12101 next_fragP
!= padding_fragP
;
12102 next_fragP
= next_fragP
->fr_next
)
12103 if (next_fragP
->fr_type
== rs_machine_dependent
12104 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12109 int max
= next_fragP
->tc_frag_data
.max_bytes
;
12113 if (max
> left_size
)
12118 next_fragP
->tc_frag_data
.length
= size
;
12122 next_fragP
->tc_frag_data
.length
= 0;
12125 return (fragP
->tc_frag_data
.length
12126 - fragP
->tc_frag_data
.last_length
);
12128 return relax_frag (segment
, fragP
, stretch
);
12131 /* md_estimate_size_before_relax()
12133 Called just before relax() for rs_machine_dependent frags. The x86
12134 assembler uses these frags to handle variable size jump
12137 Any symbol that is now undefined will not become defined.
12138 Return the correct fr_subtype in the frag.
12139 Return the initial "guess for variable size of frag" to caller.
12140 The guess is actually the growth beyond the fixed part. Whatever
12141 we do to grow the fixed or variable part contributes to our
12145 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
12147 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12148 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
12149 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
12151 i386_classify_machine_dependent_frag (fragP
);
12152 return fragP
->tc_frag_data
.length
;
12155 /* We've already got fragP->fr_subtype right; all we have to do is
12156 check for un-relaxable symbols. On an ELF system, we can't relax
12157 an externally visible symbol, because it may be overridden by a
12159 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
12160 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12162 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
12165 #if defined (OBJ_COFF) && defined (TE_PE)
12166 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
12167 && S_IS_WEAK (fragP
->fr_symbol
))
12171 /* Symbol is undefined in this segment, or we need to keep a
12172 reloc so that weak symbols can be overridden. */
12173 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
12174 enum bfd_reloc_code_real reloc_type
;
12175 unsigned char *opcode
;
12179 if (fragP
->fr_var
!= NO_RELOC
)
12180 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
12181 else if (size
== 2)
12182 reloc_type
= BFD_RELOC_16_PCREL
;
12183 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12184 else if (need_plt32_p (fragP
->fr_symbol
))
12185 reloc_type
= BFD_RELOC_X86_64_PLT32
;
12188 reloc_type
= BFD_RELOC_32_PCREL
;
12190 old_fr_fix
= fragP
->fr_fix
;
12191 opcode
= (unsigned char *) fragP
->fr_opcode
;
12193 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
12196 /* Make jmp (0xeb) a (d)word displacement jump. */
12198 fragP
->fr_fix
+= size
;
12199 fixP
= fix_new (fragP
, old_fr_fix
, size
,
12201 fragP
->fr_offset
, 1,
12207 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
12209 /* Negate the condition, and branch past an
12210 unconditional jump. */
12213 /* Insert an unconditional jump. */
12215 /* We added two extra opcode bytes, and have a two byte
12217 fragP
->fr_fix
+= 2 + 2;
12218 fix_new (fragP
, old_fr_fix
+ 2, 2,
12220 fragP
->fr_offset
, 1,
12224 /* Fall through. */
12227 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
12229 fragP
->fr_fix
+= 1;
12230 fixP
= fix_new (fragP
, old_fr_fix
, 1,
12232 fragP
->fr_offset
, 1,
12233 BFD_RELOC_8_PCREL
);
12234 fixP
->fx_signed
= 1;
12238 /* This changes the byte-displacement jump 0x7N
12239 to the (d)word-displacement jump 0x0f,0x8N. */
12240 opcode
[1] = opcode
[0] + 0x10;
12241 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12242 /* We've added an opcode byte. */
12243 fragP
->fr_fix
+= 1 + size
;
12244 fixP
= fix_new (fragP
, old_fr_fix
+ 1, size
,
12246 fragP
->fr_offset
, 1,
12251 BAD_CASE (fragP
->fr_subtype
);
12255 /* All jumps handled here are signed, but don't unconditionally use a
12256 signed limit check for 32 and 16 bit jumps as we want to allow wrap
12257 around at 4G (outside of 64-bit mode) and 64k. */
12258 if (size
== 4 && flag_code
== CODE_64BIT
)
12259 fixP
->fx_signed
= 1;
12262 return fragP
->fr_fix
- old_fr_fix
;
12265 /* Guess size depending on current relax state. Initially the relax
12266 state will correspond to a short jump and we return 1, because
12267 the variable part of the frag (the branch offset) is one byte
12268 long. However, we can relax a section more than once and in that
12269 case we must either set fr_subtype back to the unrelaxed state,
12270 or return the value for the appropriate branch. */
12271 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
12274 /* Called after relax() is finished.
12276 In: Address of frag.
12277 fr_type == rs_machine_dependent.
12278 fr_subtype is what the address relaxed to.
12280 Out: Any fixSs and constants are set up.
12281 Caller will turn frag into a ".space 0". */
12284 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
12287 unsigned char *opcode
;
12288 unsigned char *where_to_put_displacement
= NULL
;
12289 offsetT target_address
;
12290 offsetT opcode_address
;
12291 unsigned int extension
= 0;
12292 offsetT displacement_from_opcode_start
;
12294 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12295 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
12296 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12298 /* Generate nop padding. */
12299 unsigned int size
= fragP
->tc_frag_data
.length
;
12302 if (size
> fragP
->tc_frag_data
.max_bytes
)
12308 const char *branch
= "branch";
12309 const char *prefix
= "";
12310 fragS
*padding_fragP
;
12311 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
12314 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
12315 switch (fragP
->tc_frag_data
.default_prefix
)
12320 case CS_PREFIX_OPCODE
:
12323 case DS_PREFIX_OPCODE
:
12326 case ES_PREFIX_OPCODE
:
12329 case FS_PREFIX_OPCODE
:
12332 case GS_PREFIX_OPCODE
:
12335 case SS_PREFIX_OPCODE
:
12340 msg
= _("%s:%u: add %d%s at 0x%llx to align "
12341 "%s within %d-byte boundary\n");
12343 msg
= _("%s:%u: add additional %d%s at 0x%llx to "
12344 "align %s within %d-byte boundary\n");
12348 padding_fragP
= fragP
;
12349 msg
= _("%s:%u: add %d%s-byte nop at 0x%llx to align "
12350 "%s within %d-byte boundary\n");
12354 switch (padding_fragP
->tc_frag_data
.branch_type
)
12356 case align_branch_jcc
:
12359 case align_branch_fused
:
12360 branch
= "fused jcc";
12362 case align_branch_jmp
:
12365 case align_branch_call
:
12368 case align_branch_indirect
:
12369 branch
= "indiret branch";
12371 case align_branch_ret
:
12378 fprintf (stdout
, msg
,
12379 fragP
->fr_file
, fragP
->fr_line
, size
, prefix
,
12380 (long long) fragP
->fr_address
, branch
,
12381 1 << align_branch_power
);
12383 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12384 memset (fragP
->fr_opcode
,
12385 fragP
->tc_frag_data
.default_prefix
, size
);
12387 i386_generate_nops (fragP
, (char *) fragP
->fr_opcode
,
12389 fragP
->fr_fix
+= size
;
12394 opcode
= (unsigned char *) fragP
->fr_opcode
;
12396 /* Address we want to reach in file space. */
12397 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
12399 /* Address opcode resides at in file space. */
12400 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
12402 /* Displacement from opcode start to fill into instruction. */
12403 displacement_from_opcode_start
= target_address
- opcode_address
;
12405 if ((fragP
->fr_subtype
& BIG
) == 0)
12407 /* Don't have to change opcode. */
12408 extension
= 1; /* 1 opcode + 1 displacement */
12409 where_to_put_displacement
= &opcode
[1];
12413 if (no_cond_jump_promotion
12414 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
12415 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
12416 _("long jump required"));
12418 switch (fragP
->fr_subtype
)
12420 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
12421 extension
= 4; /* 1 opcode + 4 displacement */
12423 where_to_put_displacement
= &opcode
[1];
12426 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
12427 extension
= 2; /* 1 opcode + 2 displacement */
12429 where_to_put_displacement
= &opcode
[1];
12432 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
12433 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
12434 extension
= 5; /* 2 opcode + 4 displacement */
12435 opcode
[1] = opcode
[0] + 0x10;
12436 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12437 where_to_put_displacement
= &opcode
[2];
12440 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
12441 extension
= 3; /* 2 opcode + 2 displacement */
12442 opcode
[1] = opcode
[0] + 0x10;
12443 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12444 where_to_put_displacement
= &opcode
[2];
12447 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
12452 where_to_put_displacement
= &opcode
[3];
12456 BAD_CASE (fragP
->fr_subtype
);
12461 /* If size if less then four we are sure that the operand fits,
12462 but if it's 4, then it could be that the displacement is larger
12464 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
12466 && ((addressT
) (displacement_from_opcode_start
- extension
12467 + ((addressT
) 1 << 31))
12468 > (((addressT
) 2 << 31) - 1)))
12470 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
12471 _("jump target out of range"));
12472 /* Make us emit 0. */
12473 displacement_from_opcode_start
= extension
;
12475 /* Now put displacement after opcode. */
12476 md_number_to_chars ((char *) where_to_put_displacement
,
12477 (valueT
) (displacement_from_opcode_start
- extension
),
12478 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
12479 fragP
->fr_fix
+= extension
;
12482 /* Apply a fixup (fixP) to segment data, once it has been determined
12483 by our caller that we have all the info we need to fix it up.
12485 Parameter valP is the pointer to the value of the bits.
12487 On the 386, immediates, displacements, and data pointers are all in
12488 the same (little-endian) format, so we don't need to care about which
12489 we are handling. */
12492 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
12494 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
12495 valueT value
= *valP
;
12497 #if !defined (TE_Mach)
12498 if (fixP
->fx_pcrel
)
12500 switch (fixP
->fx_r_type
)
12506 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
12509 case BFD_RELOC_X86_64_32S
:
12510 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
12513 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
12516 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
12521 if (fixP
->fx_addsy
!= NULL
12522 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
12523 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
12524 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
12525 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
12526 && !use_rela_relocations
)
12528 /* This is a hack. There should be a better way to handle this.
12529 This covers for the fact that bfd_install_relocation will
12530 subtract the current location (for partial_inplace, PC relative
12531 relocations); see more below. */
12535 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
12538 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12540 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12543 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
12545 if ((sym_seg
== seg
12546 || (symbol_section_p (fixP
->fx_addsy
)
12547 && sym_seg
!= absolute_section
))
12548 && !generic_force_reloc (fixP
))
12550 /* Yes, we add the values in twice. This is because
12551 bfd_install_relocation subtracts them out again. I think
12552 bfd_install_relocation is broken, but I don't dare change
12554 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12558 #if defined (OBJ_COFF) && defined (TE_PE)
12559 /* For some reason, the PE format does not store a
12560 section address offset for a PC relative symbol. */
12561 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
12562 || S_IS_WEAK (fixP
->fx_addsy
))
12563 value
+= md_pcrel_from (fixP
);
12566 #if defined (OBJ_COFF) && defined (TE_PE)
12567 if (fixP
->fx_addsy
!= NULL
12568 && S_IS_WEAK (fixP
->fx_addsy
)
12569 /* PR 16858: Do not modify weak function references. */
12570 && ! fixP
->fx_pcrel
)
12572 #if !defined (TE_PEP)
12573 /* For x86 PE weak function symbols are neither PC-relative
12574 nor do they set S_IS_FUNCTION. So the only reliable way
12575 to detect them is to check the flags of their containing
12577 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
12578 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
12582 value
-= S_GET_VALUE (fixP
->fx_addsy
);
12586 /* Fix a few things - the dynamic linker expects certain values here,
12587 and we must not disappoint it. */
12588 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12589 if (IS_ELF
&& fixP
->fx_addsy
)
12590 switch (fixP
->fx_r_type
)
12592 case BFD_RELOC_386_PLT32
:
12593 case BFD_RELOC_X86_64_PLT32
:
12594 /* Make the jump instruction point to the address of the operand.
12595 At runtime we merely add the offset to the actual PLT entry.
12596 NB: Subtract the offset size only for jump instructions. */
12597 if (fixP
->fx_pcrel
)
12601 case BFD_RELOC_386_TLS_GD
:
12602 case BFD_RELOC_386_TLS_LDM
:
12603 case BFD_RELOC_386_TLS_IE_32
:
12604 case BFD_RELOC_386_TLS_IE
:
12605 case BFD_RELOC_386_TLS_GOTIE
:
12606 case BFD_RELOC_386_TLS_GOTDESC
:
12607 case BFD_RELOC_X86_64_TLSGD
:
12608 case BFD_RELOC_X86_64_TLSLD
:
12609 case BFD_RELOC_X86_64_GOTTPOFF
:
12610 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12611 value
= 0; /* Fully resolved at runtime. No addend. */
12613 case BFD_RELOC_386_TLS_LE
:
12614 case BFD_RELOC_386_TLS_LDO_32
:
12615 case BFD_RELOC_386_TLS_LE_32
:
12616 case BFD_RELOC_X86_64_DTPOFF32
:
12617 case BFD_RELOC_X86_64_DTPOFF64
:
12618 case BFD_RELOC_X86_64_TPOFF32
:
12619 case BFD_RELOC_X86_64_TPOFF64
:
12620 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12623 case BFD_RELOC_386_TLS_DESC_CALL
:
12624 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12625 value
= 0; /* Fully resolved at runtime. No addend. */
12626 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12630 case BFD_RELOC_VTABLE_INHERIT
:
12631 case BFD_RELOC_VTABLE_ENTRY
:
12638 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
12640 /* If not 64bit, massage value, to account for wraparound when !BFD64. */
12642 value
= extend_to_32bit_address (value
);
12645 #endif /* !defined (TE_Mach) */
12647 /* Are we finished with this relocation now? */
12648 if (fixP
->fx_addsy
== NULL
)
12651 switch (fixP
->fx_r_type
)
12653 case BFD_RELOC_X86_64_32S
:
12654 fixP
->fx_signed
= 1;
12661 #if defined (OBJ_COFF) && defined (TE_PE)
12662 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
12665 /* Remember value for tc_gen_reloc. */
12666 fixP
->fx_addnumber
= value
;
12667 /* Clear out the frag for now. */
12671 else if (use_rela_relocations
)
12673 fixP
->fx_no_overflow
= 1;
12674 /* Remember value for tc_gen_reloc. */
12675 fixP
->fx_addnumber
= value
;
12679 md_number_to_chars (p
, value
, fixP
->fx_size
);
12683 md_atof (int type
, char *litP
, int *sizeP
)
12685 /* This outputs the LITTLENUMs in REVERSE order;
12686 in accord with the bigendian 386. */
12687 return ieee_md_atof (type
, litP
, sizeP
, false);
12690 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
12693 output_invalid (int c
)
12696 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12699 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12700 "(0x%x)", (unsigned char) c
);
12701 return output_invalid_buf
;
12704 /* Verify that @r can be used in the current context. */
12706 static bool check_register (const reg_entry
*r
)
12708 if (allow_pseudo_reg
)
12711 if (operand_type_all_zero (&r
->reg_type
))
12714 if ((r
->reg_type
.bitfield
.dword
12715 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
12716 || r
->reg_type
.bitfield
.class == RegCR
12717 || r
->reg_type
.bitfield
.class == RegDR
)
12718 && !cpu_arch_flags
.bitfield
.cpui386
)
12721 if (r
->reg_type
.bitfield
.class == RegTR
12722 && (flag_code
== CODE_64BIT
12723 || !cpu_arch_flags
.bitfield
.cpui386
12724 || cpu_arch_isa_flags
.bitfield
.cpui586
12725 || cpu_arch_isa_flags
.bitfield
.cpui686
))
12728 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
12731 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
12733 if (r
->reg_type
.bitfield
.zmmword
12734 || r
->reg_type
.bitfield
.class == RegMask
)
12737 if (!cpu_arch_flags
.bitfield
.cpuavx
)
12739 if (r
->reg_type
.bitfield
.ymmword
)
12742 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
12747 if (r
->reg_type
.bitfield
.tmmword
12748 && (!cpu_arch_flags
.bitfield
.cpuamx_tile
12749 || flag_code
!= CODE_64BIT
))
12752 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
12755 /* Don't allow fake index register unless allow_index_reg isn't 0. */
12756 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
12759 /* Upper 16 vector registers are only available with VREX in 64bit
12760 mode, and require EVEX encoding. */
12761 if (r
->reg_flags
& RegVRex
)
12763 if (!cpu_arch_flags
.bitfield
.cpuavx512f
12764 || flag_code
!= CODE_64BIT
)
12767 if (i
.vec_encoding
== vex_encoding_default
)
12768 i
.vec_encoding
= vex_encoding_evex
;
12769 else if (i
.vec_encoding
!= vex_encoding_evex
)
12770 i
.vec_encoding
= vex_encoding_error
;
12773 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
12774 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
12775 && flag_code
!= CODE_64BIT
)
12778 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
12785 /* REG_STRING starts *before* REGISTER_PREFIX. */
12787 static const reg_entry
*
12788 parse_real_register (char *reg_string
, char **end_op
)
12790 char *s
= reg_string
;
12792 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
12793 const reg_entry
*r
;
12795 /* Skip possible REGISTER_PREFIX and possible whitespace. */
12796 if (*s
== REGISTER_PREFIX
)
12799 if (is_space_char (*s
))
12802 p
= reg_name_given
;
12803 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
12805 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
12806 return (const reg_entry
*) NULL
;
12810 /* For naked regs, make sure that we are not dealing with an identifier.
12811 This prevents confusing an identifier like `eax_var' with register
12813 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
12814 return (const reg_entry
*) NULL
;
12818 r
= (const reg_entry
*) str_hash_find (reg_hash
, reg_name_given
);
12820 /* Handle floating point regs, allowing spaces in the (i) part. */
12823 if (!cpu_arch_flags
.bitfield
.cpu8087
12824 && !cpu_arch_flags
.bitfield
.cpu287
12825 && !cpu_arch_flags
.bitfield
.cpu387
12826 && !allow_pseudo_reg
)
12827 return (const reg_entry
*) NULL
;
12829 if (is_space_char (*s
))
12834 if (is_space_char (*s
))
12836 if (*s
>= '0' && *s
<= '7')
12838 int fpr
= *s
- '0';
12840 if (is_space_char (*s
))
12845 know (r
[fpr
].reg_num
== fpr
);
12849 /* We have "%st(" then garbage. */
12850 return (const reg_entry
*) NULL
;
12854 return r
&& check_register (r
) ? r
: NULL
;
12857 /* REG_STRING starts *before* REGISTER_PREFIX. */
12859 static const reg_entry
*
12860 parse_register (char *reg_string
, char **end_op
)
12862 const reg_entry
*r
;
12864 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
12865 r
= parse_real_register (reg_string
, end_op
);
12870 char *save
= input_line_pointer
;
12874 input_line_pointer
= reg_string
;
12875 c
= get_symbol_name (®_string
);
12876 symbolP
= symbol_find (reg_string
);
12877 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
12879 const expressionS
*e
= symbol_get_value_expression (symbolP
);
12881 know (e
->X_op
== O_register
);
12882 know (e
->X_add_number
>= 0
12883 && (valueT
) e
->X_add_number
< i386_regtab_size
);
12884 r
= i386_regtab
+ e
->X_add_number
;
12885 if (!check_register (r
))
12887 as_bad (_("register '%s%s' cannot be used here"),
12888 register_prefix
, r
->reg_name
);
12891 *end_op
= input_line_pointer
;
12893 *input_line_pointer
= c
;
12894 input_line_pointer
= save
;
12900 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
12902 const reg_entry
*r
;
12903 char *end
= input_line_pointer
;
12906 r
= parse_register (name
, &input_line_pointer
);
12907 if (r
&& end
<= input_line_pointer
)
12909 *nextcharP
= *input_line_pointer
;
12910 *input_line_pointer
= 0;
12913 e
->X_op
= O_register
;
12914 e
->X_add_number
= r
- i386_regtab
;
12917 e
->X_op
= O_illegal
;
12920 input_line_pointer
= end
;
12922 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
12926 md_operand (expressionS
*e
)
12929 const reg_entry
*r
;
12931 switch (*input_line_pointer
)
12933 case REGISTER_PREFIX
:
12934 r
= parse_real_register (input_line_pointer
, &end
);
12937 e
->X_op
= O_register
;
12938 e
->X_add_number
= r
- i386_regtab
;
12939 input_line_pointer
= end
;
12944 gas_assert (intel_syntax
);
12945 end
= input_line_pointer
++;
12947 if (*input_line_pointer
== ']')
12949 ++input_line_pointer
;
12950 e
->X_op_symbol
= make_expr_symbol (e
);
12951 e
->X_add_symbol
= NULL
;
12952 e
->X_add_number
= 0;
12957 e
->X_op
= O_absent
;
12958 input_line_pointer
= end
;
12965 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12966 const char *md_shortopts
= "kVQ:sqnO::";
12968 const char *md_shortopts
= "qnO::";
12971 #define OPTION_32 (OPTION_MD_BASE + 0)
12972 #define OPTION_64 (OPTION_MD_BASE + 1)
12973 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
12974 #define OPTION_MARCH (OPTION_MD_BASE + 3)
12975 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
12976 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
12977 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
12978 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
12979 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
12980 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
12981 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
12982 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
12983 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
12984 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
12985 #define OPTION_X32 (OPTION_MD_BASE + 14)
12986 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
12987 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
12988 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
12989 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
12990 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
12991 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
12992 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
12993 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
12994 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
12995 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
12996 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
12997 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
12998 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
12999 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
13000 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
13001 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
13002 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
13003 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
13004 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
13006 struct option md_longopts
[] =
13008 {"32", no_argument
, NULL
, OPTION_32
},
13009 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13010 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13011 {"64", no_argument
, NULL
, OPTION_64
},
13013 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13014 {"x32", no_argument
, NULL
, OPTION_X32
},
13015 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
13016 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
13018 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
13019 {"march", required_argument
, NULL
, OPTION_MARCH
},
13020 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
13021 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
13022 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
13023 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
13024 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
13025 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
13026 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
13027 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
13028 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
13029 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
13030 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
13031 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
13032 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
13033 # if defined (TE_PE) || defined (TE_PEP)
13034 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
13036 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
13037 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
13038 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
13039 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
13040 {"malign-branch-boundary", required_argument
, NULL
, OPTION_MALIGN_BRANCH_BOUNDARY
},
13041 {"malign-branch-prefix-size", required_argument
, NULL
, OPTION_MALIGN_BRANCH_PREFIX_SIZE
},
13042 {"malign-branch", required_argument
, NULL
, OPTION_MALIGN_BRANCH
},
13043 {"mbranches-within-32B-boundaries", no_argument
, NULL
, OPTION_MBRANCHES_WITH_32B_BOUNDARIES
},
13044 {"mlfence-after-load", required_argument
, NULL
, OPTION_MLFENCE_AFTER_LOAD
},
13045 {"mlfence-before-indirect-branch", required_argument
, NULL
,
13046 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
},
13047 {"mlfence-before-ret", required_argument
, NULL
, OPTION_MLFENCE_BEFORE_RET
},
13048 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
13049 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
13050 {NULL
, no_argument
, NULL
, 0}
13052 size_t md_longopts_size
= sizeof (md_longopts
);
13055 md_parse_option (int c
, const char *arg
)
13058 char *arch
, *next
, *saved
, *type
;
13063 optimize_align_code
= 0;
13067 quiet_warnings
= 1;
13070 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13071 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
13072 should be emitted or not. FIXME: Not implemented. */
13074 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
13078 /* -V: SVR4 argument to print version ID. */
13080 print_version_id ();
13083 /* -k: Ignore for FreeBSD compatibility. */
13088 /* -s: On i386 Solaris, this tells the native assembler to use
13089 .stab instead of .stab.excl. We always use .stab anyhow. */
13092 case OPTION_MSHARED
:
13096 case OPTION_X86_USED_NOTE
:
13097 if (strcasecmp (arg
, "yes") == 0)
13099 else if (strcasecmp (arg
, "no") == 0)
13102 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
13107 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13108 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13111 const char **list
, **l
;
13113 list
= bfd_target_list ();
13114 for (l
= list
; *l
!= NULL
; l
++)
13115 if (startswith (*l
, "elf64-x86-64")
13116 || strcmp (*l
, "coff-x86-64") == 0
13117 || strcmp (*l
, "pe-x86-64") == 0
13118 || strcmp (*l
, "pei-x86-64") == 0
13119 || strcmp (*l
, "mach-o-x86-64") == 0)
13121 default_arch
= "x86_64";
13125 as_fatal (_("no compiled in support for x86_64"));
13131 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13135 const char **list
, **l
;
13137 list
= bfd_target_list ();
13138 for (l
= list
; *l
!= NULL
; l
++)
13139 if (startswith (*l
, "elf32-x86-64"))
13141 default_arch
= "x86_64:32";
13145 as_fatal (_("no compiled in support for 32bit x86_64"));
13149 as_fatal (_("32bit x86_64 is only supported for ELF"));
13154 default_arch
= "i386";
13157 case OPTION_DIVIDE
:
13158 #ifdef SVR4_COMMENT_CHARS
13163 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
13165 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
13169 i386_comment_chars
= n
;
13175 saved
= xstrdup (arg
);
13177 /* Allow -march=+nosse. */
13183 as_fatal (_("invalid -march= option: `%s'"), arg
);
13184 next
= strchr (arch
, '+');
13187 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13189 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
13192 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13195 cpu_arch_name
= cpu_arch
[j
].name
;
13196 cpu_sub_arch_name
= NULL
;
13197 cpu_arch_flags
= cpu_arch
[j
].flags
;
13198 cpu_arch_isa
= cpu_arch
[j
].type
;
13199 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
13200 if (!cpu_arch_tune_set
)
13202 cpu_arch_tune
= cpu_arch_isa
;
13203 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13207 else if (*cpu_arch
[j
].name
== '.'
13208 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
13210 /* ISA extension. */
13211 i386_cpu_flags flags
;
13213 flags
= cpu_flags_or (cpu_arch_flags
,
13214 cpu_arch
[j
].flags
);
13216 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13218 if (cpu_sub_arch_name
)
13220 char *name
= cpu_sub_arch_name
;
13221 cpu_sub_arch_name
= concat (name
,
13223 (const char *) NULL
);
13227 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
13228 cpu_arch_flags
= flags
;
13229 cpu_arch_isa_flags
= flags
;
13233 = cpu_flags_or (cpu_arch_isa_flags
,
13234 cpu_arch
[j
].flags
);
13239 if (j
>= ARRAY_SIZE (cpu_arch
))
13241 /* Disable an ISA extension. */
13242 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13243 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
13245 i386_cpu_flags flags
;
13247 flags
= cpu_flags_and_not (cpu_arch_flags
,
13248 cpu_noarch
[j
].flags
);
13249 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13251 if (cpu_sub_arch_name
)
13253 char *name
= cpu_sub_arch_name
;
13254 cpu_sub_arch_name
= concat (arch
,
13255 (const char *) NULL
);
13259 cpu_sub_arch_name
= xstrdup (arch
);
13260 cpu_arch_flags
= flags
;
13261 cpu_arch_isa_flags
= flags
;
13266 if (j
>= ARRAY_SIZE (cpu_noarch
))
13267 j
= ARRAY_SIZE (cpu_arch
);
13270 if (j
>= ARRAY_SIZE (cpu_arch
))
13271 as_fatal (_("invalid -march= option: `%s'"), arg
);
13275 while (next
!= NULL
);
13281 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13282 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13284 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
13286 cpu_arch_tune_set
= 1;
13287 cpu_arch_tune
= cpu_arch
[j
].type
;
13288 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
13292 if (j
>= ARRAY_SIZE (cpu_arch
))
13293 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13296 case OPTION_MMNEMONIC
:
13297 if (strcasecmp (arg
, "att") == 0)
13298 intel_mnemonic
= 0;
13299 else if (strcasecmp (arg
, "intel") == 0)
13300 intel_mnemonic
= 1;
13302 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
13305 case OPTION_MSYNTAX
:
13306 if (strcasecmp (arg
, "att") == 0)
13308 else if (strcasecmp (arg
, "intel") == 0)
13311 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
13314 case OPTION_MINDEX_REG
:
13315 allow_index_reg
= 1;
13318 case OPTION_MNAKED_REG
:
13319 allow_naked_reg
= 1;
13322 case OPTION_MSSE2AVX
:
13326 case OPTION_MSSE_CHECK
:
13327 if (strcasecmp (arg
, "error") == 0)
13328 sse_check
= check_error
;
13329 else if (strcasecmp (arg
, "warning") == 0)
13330 sse_check
= check_warning
;
13331 else if (strcasecmp (arg
, "none") == 0)
13332 sse_check
= check_none
;
13334 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
13337 case OPTION_MOPERAND_CHECK
:
13338 if (strcasecmp (arg
, "error") == 0)
13339 operand_check
= check_error
;
13340 else if (strcasecmp (arg
, "warning") == 0)
13341 operand_check
= check_warning
;
13342 else if (strcasecmp (arg
, "none") == 0)
13343 operand_check
= check_none
;
13345 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
13348 case OPTION_MAVXSCALAR
:
13349 if (strcasecmp (arg
, "128") == 0)
13350 avxscalar
= vex128
;
13351 else if (strcasecmp (arg
, "256") == 0)
13352 avxscalar
= vex256
;
13354 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
13357 case OPTION_MVEXWIG
:
13358 if (strcmp (arg
, "0") == 0)
13360 else if (strcmp (arg
, "1") == 0)
13363 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
13366 case OPTION_MADD_BND_PREFIX
:
13367 add_bnd_prefix
= 1;
13370 case OPTION_MEVEXLIG
:
13371 if (strcmp (arg
, "128") == 0)
13372 evexlig
= evexl128
;
13373 else if (strcmp (arg
, "256") == 0)
13374 evexlig
= evexl256
;
13375 else if (strcmp (arg
, "512") == 0)
13376 evexlig
= evexl512
;
13378 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
13381 case OPTION_MEVEXRCIG
:
13382 if (strcmp (arg
, "rne") == 0)
13384 else if (strcmp (arg
, "rd") == 0)
13386 else if (strcmp (arg
, "ru") == 0)
13388 else if (strcmp (arg
, "rz") == 0)
13391 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
13394 case OPTION_MEVEXWIG
:
13395 if (strcmp (arg
, "0") == 0)
13397 else if (strcmp (arg
, "1") == 0)
13400 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
13403 # if defined (TE_PE) || defined (TE_PEP)
13404 case OPTION_MBIG_OBJ
:
13409 case OPTION_MOMIT_LOCK_PREFIX
:
13410 if (strcasecmp (arg
, "yes") == 0)
13411 omit_lock_prefix
= 1;
13412 else if (strcasecmp (arg
, "no") == 0)
13413 omit_lock_prefix
= 0;
13415 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
13418 case OPTION_MFENCE_AS_LOCK_ADD
:
13419 if (strcasecmp (arg
, "yes") == 0)
13421 else if (strcasecmp (arg
, "no") == 0)
13424 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
13427 case OPTION_MLFENCE_AFTER_LOAD
:
13428 if (strcasecmp (arg
, "yes") == 0)
13429 lfence_after_load
= 1;
13430 else if (strcasecmp (arg
, "no") == 0)
13431 lfence_after_load
= 0;
13433 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg
);
13436 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
:
13437 if (strcasecmp (arg
, "all") == 0)
13439 lfence_before_indirect_branch
= lfence_branch_all
;
13440 if (lfence_before_ret
== lfence_before_ret_none
)
13441 lfence_before_ret
= lfence_before_ret_shl
;
13443 else if (strcasecmp (arg
, "memory") == 0)
13444 lfence_before_indirect_branch
= lfence_branch_memory
;
13445 else if (strcasecmp (arg
, "register") == 0)
13446 lfence_before_indirect_branch
= lfence_branch_register
;
13447 else if (strcasecmp (arg
, "none") == 0)
13448 lfence_before_indirect_branch
= lfence_branch_none
;
13450 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
13454 case OPTION_MLFENCE_BEFORE_RET
:
13455 if (strcasecmp (arg
, "or") == 0)
13456 lfence_before_ret
= lfence_before_ret_or
;
13457 else if (strcasecmp (arg
, "not") == 0)
13458 lfence_before_ret
= lfence_before_ret_not
;
13459 else if (strcasecmp (arg
, "shl") == 0 || strcasecmp (arg
, "yes") == 0)
13460 lfence_before_ret
= lfence_before_ret_shl
;
13461 else if (strcasecmp (arg
, "none") == 0)
13462 lfence_before_ret
= lfence_before_ret_none
;
13464 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
13468 case OPTION_MRELAX_RELOCATIONS
:
13469 if (strcasecmp (arg
, "yes") == 0)
13470 generate_relax_relocations
= 1;
13471 else if (strcasecmp (arg
, "no") == 0)
13472 generate_relax_relocations
= 0;
13474 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
13477 case OPTION_MALIGN_BRANCH_BOUNDARY
:
13480 long int align
= strtoul (arg
, &end
, 0);
13485 align_branch_power
= 0;
13488 else if (align
>= 16)
13491 for (align_power
= 0;
13493 align
>>= 1, align_power
++)
13495 /* Limit alignment power to 31. */
13496 if (align
== 1 && align_power
< 32)
13498 align_branch_power
= align_power
;
13503 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg
);
13507 case OPTION_MALIGN_BRANCH_PREFIX_SIZE
:
13510 int align
= strtoul (arg
, &end
, 0);
13511 /* Some processors only support 5 prefixes. */
13512 if (*end
== '\0' && align
>= 0 && align
< 6)
13514 align_branch_prefix_size
= align
;
13517 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
13522 case OPTION_MALIGN_BRANCH
:
13524 saved
= xstrdup (arg
);
13528 next
= strchr (type
, '+');
13531 if (strcasecmp (type
, "jcc") == 0)
13532 align_branch
|= align_branch_jcc_bit
;
13533 else if (strcasecmp (type
, "fused") == 0)
13534 align_branch
|= align_branch_fused_bit
;
13535 else if (strcasecmp (type
, "jmp") == 0)
13536 align_branch
|= align_branch_jmp_bit
;
13537 else if (strcasecmp (type
, "call") == 0)
13538 align_branch
|= align_branch_call_bit
;
13539 else if (strcasecmp (type
, "ret") == 0)
13540 align_branch
|= align_branch_ret_bit
;
13541 else if (strcasecmp (type
, "indirect") == 0)
13542 align_branch
|= align_branch_indirect_bit
;
13544 as_fatal (_("invalid -malign-branch= option: `%s'"), arg
);
13547 while (next
!= NULL
);
13551 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES
:
13552 align_branch_power
= 5;
13553 align_branch_prefix_size
= 5;
13554 align_branch
= (align_branch_jcc_bit
13555 | align_branch_fused_bit
13556 | align_branch_jmp_bit
);
13559 case OPTION_MAMD64
:
13563 case OPTION_MINTEL64
:
13571 /* Turn off -Os. */
13572 optimize_for_space
= 0;
13574 else if (*arg
== 's')
13576 optimize_for_space
= 1;
13577 /* Turn on all encoding optimizations. */
13578 optimize
= INT_MAX
;
13582 optimize
= atoi (arg
);
13583 /* Turn off -Os. */
13584 optimize_for_space
= 0;
13594 #define MESSAGE_TEMPLATE \
13598 output_message (FILE *stream
, char *p
, char *message
, char *start
,
13599 int *left_p
, const char *name
, int len
)
13601 int size
= sizeof (MESSAGE_TEMPLATE
);
13602 int left
= *left_p
;
13604 /* Reserve 2 spaces for ", " or ",\0" */
13607 /* Check if there is any room. */
13615 p
= mempcpy (p
, name
, len
);
13619 /* Output the current message now and start a new one. */
13622 fprintf (stream
, "%s\n", message
);
13624 left
= size
- (start
- message
) - len
- 2;
13626 gas_assert (left
>= 0);
13628 p
= mempcpy (p
, name
, len
);
13636 show_arch (FILE *stream
, int ext
, int check
)
13638 static char message
[] = MESSAGE_TEMPLATE
;
13639 char *start
= message
+ 27;
13641 int size
= sizeof (MESSAGE_TEMPLATE
);
13648 left
= size
- (start
- message
);
13649 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13651 /* Should it be skipped? */
13652 if (cpu_arch
[j
].skip
)
13655 name
= cpu_arch
[j
].name
;
13656 len
= cpu_arch
[j
].len
;
13659 /* It is an extension. Skip if we aren't asked to show it. */
13670 /* It is an processor. Skip if we show only extension. */
13673 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13675 /* It is an impossible processor - skip. */
13679 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
13682 /* Display disabled extensions. */
13684 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13686 name
= cpu_noarch
[j
].name
;
13687 len
= cpu_noarch
[j
].len
;
13688 p
= output_message (stream
, p
, message
, start
, &left
, name
,
13693 fprintf (stream
, "%s\n", message
);
13697 md_show_usage (FILE *stream
)
13699 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13700 fprintf (stream
, _("\
13701 -Qy, -Qn ignored\n\
13702 -V print assembler version number\n\
13705 fprintf (stream
, _("\
13706 -n Do not optimize code alignment\n\
13707 -q quieten some warnings\n"));
13708 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13709 fprintf (stream
, _("\
13712 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13713 || defined (TE_PE) || defined (TE_PEP))
13714 fprintf (stream
, _("\
13715 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
13717 #ifdef SVR4_COMMENT_CHARS
13718 fprintf (stream
, _("\
13719 --divide do not treat `/' as a comment character\n"));
13721 fprintf (stream
, _("\
13722 --divide ignored\n"));
13724 fprintf (stream
, _("\
13725 -march=CPU[,+EXTENSION...]\n\
13726 generate code for CPU and EXTENSION, CPU is one of:\n"));
13727 show_arch (stream
, 0, 1);
13728 fprintf (stream
, _("\
13729 EXTENSION is combination of:\n"));
13730 show_arch (stream
, 1, 0);
13731 fprintf (stream
, _("\
13732 -mtune=CPU optimize for CPU, CPU is one of:\n"));
13733 show_arch (stream
, 0, 0);
13734 fprintf (stream
, _("\
13735 -msse2avx encode SSE instructions with VEX prefix\n"));
13736 fprintf (stream
, _("\
13737 -msse-check=[none|error|warning] (default: warning)\n\
13738 check SSE instructions\n"));
13739 fprintf (stream
, _("\
13740 -moperand-check=[none|error|warning] (default: warning)\n\
13741 check operand combinations for validity\n"));
13742 fprintf (stream
, _("\
13743 -mavxscalar=[128|256] (default: 128)\n\
13744 encode scalar AVX instructions with specific vector\n\
13746 fprintf (stream
, _("\
13747 -mvexwig=[0|1] (default: 0)\n\
13748 encode VEX instructions with specific VEX.W value\n\
13749 for VEX.W bit ignored instructions\n"));
13750 fprintf (stream
, _("\
13751 -mevexlig=[128|256|512] (default: 128)\n\
13752 encode scalar EVEX instructions with specific vector\n\
13754 fprintf (stream
, _("\
13755 -mevexwig=[0|1] (default: 0)\n\
13756 encode EVEX instructions with specific EVEX.W value\n\
13757 for EVEX.W bit ignored instructions\n"));
13758 fprintf (stream
, _("\
13759 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
13760 encode EVEX instructions with specific EVEX.RC value\n\
13761 for SAE-only ignored instructions\n"));
13762 fprintf (stream
, _("\
13763 -mmnemonic=[att|intel] "));
13764 if (SYSV386_COMPAT
)
13765 fprintf (stream
, _("(default: att)\n"));
13767 fprintf (stream
, _("(default: intel)\n"));
13768 fprintf (stream
, _("\
13769 use AT&T/Intel mnemonic\n"));
13770 fprintf (stream
, _("\
13771 -msyntax=[att|intel] (default: att)\n\
13772 use AT&T/Intel syntax\n"));
13773 fprintf (stream
, _("\
13774 -mindex-reg support pseudo index registers\n"));
13775 fprintf (stream
, _("\
13776 -mnaked-reg don't require `%%' prefix for registers\n"));
13777 fprintf (stream
, _("\
13778 -madd-bnd-prefix add BND prefix for all valid branches\n"));
13779 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13780 fprintf (stream
, _("\
13781 -mshared disable branch optimization for shared code\n"));
13782 fprintf (stream
, _("\
13783 -mx86-used-note=[no|yes] "));
13784 if (DEFAULT_X86_USED_NOTE
)
13785 fprintf (stream
, _("(default: yes)\n"));
13787 fprintf (stream
, _("(default: no)\n"));
13788 fprintf (stream
, _("\
13789 generate x86 used ISA and feature properties\n"));
13791 #if defined (TE_PE) || defined (TE_PEP)
13792 fprintf (stream
, _("\
13793 -mbig-obj generate big object files\n"));
13795 fprintf (stream
, _("\
13796 -momit-lock-prefix=[no|yes] (default: no)\n\
13797 strip all lock prefixes\n"));
13798 fprintf (stream
, _("\
13799 -mfence-as-lock-add=[no|yes] (default: no)\n\
13800 encode lfence, mfence and sfence as\n\
13801 lock addl $0x0, (%%{re}sp)\n"));
13802 fprintf (stream
, _("\
13803 -mrelax-relocations=[no|yes] "));
13804 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
13805 fprintf (stream
, _("(default: yes)\n"));
13807 fprintf (stream
, _("(default: no)\n"));
13808 fprintf (stream
, _("\
13809 generate relax relocations\n"));
13810 fprintf (stream
, _("\
13811 -malign-branch-boundary=NUM (default: 0)\n\
13812 align branches within NUM byte boundary\n"));
13813 fprintf (stream
, _("\
13814 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
13815 TYPE is combination of jcc, fused, jmp, call, ret,\n\
13817 specify types of branches to align\n"));
13818 fprintf (stream
, _("\
13819 -malign-branch-prefix-size=NUM (default: 5)\n\
13820 align branches with NUM prefixes per instruction\n"));
13821 fprintf (stream
, _("\
13822 -mbranches-within-32B-boundaries\n\
13823 align branches within 32 byte boundary\n"));
13824 fprintf (stream
, _("\
13825 -mlfence-after-load=[no|yes] (default: no)\n\
13826 generate lfence after load\n"));
13827 fprintf (stream
, _("\
13828 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
13829 generate lfence before indirect near branch\n"));
13830 fprintf (stream
, _("\
13831 -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
13832 generate lfence before ret\n"));
13833 fprintf (stream
, _("\
13834 -mamd64 accept only AMD64 ISA [default]\n"));
13835 fprintf (stream
, _("\
13836 -mintel64 accept only Intel64 ISA\n"));
13839 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
13840 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13841 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13843 /* Pick the target format to use. */
13846 i386_target_format (void)
13848 if (startswith (default_arch
, "x86_64"))
13850 update_code_flag (CODE_64BIT
, 1);
13851 if (default_arch
[6] == '\0')
13852 x86_elf_abi
= X86_64_ABI
;
13854 x86_elf_abi
= X86_64_X32_ABI
;
13856 else if (!strcmp (default_arch
, "i386"))
13857 update_code_flag (CODE_32BIT
, 1);
13858 else if (!strcmp (default_arch
, "iamcu"))
13860 update_code_flag (CODE_32BIT
, 1);
13861 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
13863 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
13864 cpu_arch_name
= "iamcu";
13865 cpu_sub_arch_name
= NULL
;
13866 cpu_arch_flags
= iamcu_flags
;
13867 cpu_arch_isa
= PROCESSOR_IAMCU
;
13868 cpu_arch_isa_flags
= iamcu_flags
;
13869 if (!cpu_arch_tune_set
)
13871 cpu_arch_tune
= cpu_arch_isa
;
13872 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13875 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
13876 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
13880 as_fatal (_("unknown architecture"));
13882 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
13883 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13884 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
13885 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13887 switch (OUTPUT_FLAVOR
)
13889 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
13890 case bfd_target_aout_flavour
:
13891 return AOUT_TARGET_FORMAT
;
13893 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
13894 # if defined (TE_PE) || defined (TE_PEP)
13895 case bfd_target_coff_flavour
:
13896 if (flag_code
== CODE_64BIT
)
13899 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
13901 return use_big_obj
? "pe-bigobj-i386" : "pe-i386";
13902 # elif defined (TE_GO32)
13903 case bfd_target_coff_flavour
:
13904 return "coff-go32";
13906 case bfd_target_coff_flavour
:
13907 return "coff-i386";
13910 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
13911 case bfd_target_elf_flavour
:
13913 const char *format
;
13915 switch (x86_elf_abi
)
13918 format
= ELF_TARGET_FORMAT
;
13920 tls_get_addr
= "___tls_get_addr";
13924 use_rela_relocations
= 1;
13927 tls_get_addr
= "__tls_get_addr";
13929 format
= ELF_TARGET_FORMAT64
;
13931 case X86_64_X32_ABI
:
13932 use_rela_relocations
= 1;
13935 tls_get_addr
= "__tls_get_addr";
13937 disallow_64bit_reloc
= 1;
13938 format
= ELF_TARGET_FORMAT32
;
13941 if (cpu_arch_isa
== PROCESSOR_L1OM
)
13943 if (x86_elf_abi
!= X86_64_ABI
)
13944 as_fatal (_("Intel L1OM is 64bit only"));
13945 return ELF_TARGET_L1OM_FORMAT
;
13947 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
13949 if (x86_elf_abi
!= X86_64_ABI
)
13950 as_fatal (_("Intel K1OM is 64bit only"));
13951 return ELF_TARGET_K1OM_FORMAT
;
13953 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
13955 if (x86_elf_abi
!= I386_ABI
)
13956 as_fatal (_("Intel MCU is 32bit only"));
13957 return ELF_TARGET_IAMCU_FORMAT
;
13963 #if defined (OBJ_MACH_O)
13964 case bfd_target_mach_o_flavour
:
13965 if (flag_code
== CODE_64BIT
)
13967 use_rela_relocations
= 1;
13969 return "mach-o-x86-64";
13972 return "mach-o-i386";
13980 #endif /* OBJ_MAYBE_ more than one */
13983 md_undefined_symbol (char *name
)
13985 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
13986 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
13987 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
13988 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
13992 if (symbol_find (name
))
13993 as_bad (_("GOT already in symbol table"));
13994 GOT_symbol
= symbol_new (name
, undefined_section
,
13995 &zero_address_frag
, 0);
14002 /* Round up a section size to the appropriate boundary. */
14005 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
14007 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
14008 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
14010 /* For a.out, force the section size to be aligned. If we don't do
14011 this, BFD will align it for us, but it will not write out the
14012 final bytes of the section. This may be a bug in BFD, but it is
14013 easier to fix it here since that is how the other a.out targets
14017 align
= bfd_section_alignment (segment
);
14018 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
14025 /* On the i386, PC-relative offsets are relative to the start of the
14026 next instruction. That is, the address of the offset, plus its
14027 size, since the offset is always the last part of the insn. */
14030 md_pcrel_from (fixS
*fixP
)
14032 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
14038 s_bss (int ignore ATTRIBUTE_UNUSED
)
14042 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14044 obj_elf_section_change_hook ();
14046 temp
= get_absolute_expression ();
14047 subseg_set (bss_section
, (subsegT
) temp
);
14048 demand_empty_rest_of_line ();
14053 /* Remember constant directive. */
14056 i386_cons_align (int ignore ATTRIBUTE_UNUSED
)
14058 if (last_insn
.kind
!= last_insn_directive
14059 && (bfd_section_flags (now_seg
) & SEC_CODE
))
14061 last_insn
.seg
= now_seg
;
14062 last_insn
.kind
= last_insn_directive
;
14063 last_insn
.name
= "constant directive";
14064 last_insn
.file
= as_where (&last_insn
.line
);
14065 if (lfence_before_ret
!= lfence_before_ret_none
)
14067 if (lfence_before_indirect_branch
!= lfence_branch_none
)
14068 as_warn (_("constant directive skips -mlfence-before-ret "
14069 "and -mlfence-before-indirect-branch"));
14071 as_warn (_("constant directive skips -mlfence-before-ret"));
14073 else if (lfence_before_indirect_branch
!= lfence_branch_none
)
14074 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
14079 i386_validate_fix (fixS
*fixp
)
14081 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14082 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
14083 || fixp
->fx_r_type
== BFD_RELOC_SIZE64
)
14084 return IS_ELF
&& fixp
->fx_addsy
14085 && (!S_IS_DEFINED (fixp
->fx_addsy
)
14086 || S_IS_EXTERNAL (fixp
->fx_addsy
));
14089 if (fixp
->fx_subsy
)
14091 if (fixp
->fx_subsy
== GOT_symbol
)
14093 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
14097 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14098 if (fixp
->fx_tcbit2
)
14099 fixp
->fx_r_type
= (fixp
->fx_tcbit
14100 ? BFD_RELOC_X86_64_REX_GOTPCRELX
14101 : BFD_RELOC_X86_64_GOTPCRELX
);
14104 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
14109 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
14111 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
14113 fixp
->fx_subsy
= 0;
14116 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14119 /* NB: Commit 292676c1 resolved PLT32 reloc aganst local symbol
14120 to section. Since PLT32 relocation must be against symbols,
14121 turn such PLT32 relocation into PC32 relocation. */
14123 && (fixp
->fx_r_type
== BFD_RELOC_386_PLT32
14124 || fixp
->fx_r_type
== BFD_RELOC_X86_64_PLT32
)
14125 && symbol_section_p (fixp
->fx_addsy
))
14126 fixp
->fx_r_type
= BFD_RELOC_32_PCREL
;
14129 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
14130 && fixp
->fx_tcbit2
)
14131 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
14140 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
14143 bfd_reloc_code_real_type code
;
14145 switch (fixp
->fx_r_type
)
14147 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14150 case BFD_RELOC_SIZE32
:
14151 case BFD_RELOC_SIZE64
:
14153 && !bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_addsy
))
14154 && (!fixp
->fx_subsy
14155 || bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_subsy
))))
14156 sym
= fixp
->fx_addsy
;
14157 else if (fixp
->fx_subsy
14158 && !bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_subsy
))
14159 && (!fixp
->fx_addsy
14160 || bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_addsy
))))
14161 sym
= fixp
->fx_subsy
;
14164 if (IS_ELF
&& sym
&& S_IS_DEFINED (sym
) && !S_IS_EXTERNAL (sym
))
14166 /* Resolve size relocation against local symbol to size of
14167 the symbol plus addend. */
14168 valueT value
= S_GET_SIZE (sym
);
14170 if (symbol_get_bfdsym (sym
)->flags
& BSF_SECTION_SYM
)
14171 value
= bfd_section_size (S_GET_SEGMENT (sym
));
14172 if (sym
== fixp
->fx_subsy
)
14175 if (fixp
->fx_addsy
)
14176 value
+= S_GET_VALUE (fixp
->fx_addsy
);
14178 else if (fixp
->fx_subsy
)
14179 value
-= S_GET_VALUE (fixp
->fx_subsy
);
14180 value
+= fixp
->fx_offset
;
14181 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
14183 && !fits_in_unsigned_long (value
))
14184 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14185 _("symbol size computation overflow"));
14186 fixp
->fx_addsy
= NULL
;
14187 fixp
->fx_subsy
= NULL
;
14188 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
14191 if (!fixp
->fx_addsy
|| fixp
->fx_subsy
)
14193 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14194 "unsupported expression involving @size");
14198 /* Fall through. */
14200 case BFD_RELOC_X86_64_PLT32
:
14201 case BFD_RELOC_X86_64_GOT32
:
14202 case BFD_RELOC_X86_64_GOTPCREL
:
14203 case BFD_RELOC_X86_64_GOTPCRELX
:
14204 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14205 case BFD_RELOC_386_PLT32
:
14206 case BFD_RELOC_386_GOT32
:
14207 case BFD_RELOC_386_GOT32X
:
14208 case BFD_RELOC_386_GOTOFF
:
14209 case BFD_RELOC_386_GOTPC
:
14210 case BFD_RELOC_386_TLS_GD
:
14211 case BFD_RELOC_386_TLS_LDM
:
14212 case BFD_RELOC_386_TLS_LDO_32
:
14213 case BFD_RELOC_386_TLS_IE_32
:
14214 case BFD_RELOC_386_TLS_IE
:
14215 case BFD_RELOC_386_TLS_GOTIE
:
14216 case BFD_RELOC_386_TLS_LE_32
:
14217 case BFD_RELOC_386_TLS_LE
:
14218 case BFD_RELOC_386_TLS_GOTDESC
:
14219 case BFD_RELOC_386_TLS_DESC_CALL
:
14220 case BFD_RELOC_X86_64_TLSGD
:
14221 case BFD_RELOC_X86_64_TLSLD
:
14222 case BFD_RELOC_X86_64_DTPOFF32
:
14223 case BFD_RELOC_X86_64_DTPOFF64
:
14224 case BFD_RELOC_X86_64_GOTTPOFF
:
14225 case BFD_RELOC_X86_64_TPOFF32
:
14226 case BFD_RELOC_X86_64_TPOFF64
:
14227 case BFD_RELOC_X86_64_GOTOFF64
:
14228 case BFD_RELOC_X86_64_GOTPC32
:
14229 case BFD_RELOC_X86_64_GOT64
:
14230 case BFD_RELOC_X86_64_GOTPCREL64
:
14231 case BFD_RELOC_X86_64_GOTPC64
:
14232 case BFD_RELOC_X86_64_GOTPLT64
:
14233 case BFD_RELOC_X86_64_PLTOFF64
:
14234 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14235 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14236 case BFD_RELOC_RVA
:
14237 case BFD_RELOC_VTABLE_ENTRY
:
14238 case BFD_RELOC_VTABLE_INHERIT
:
14240 case BFD_RELOC_32_SECREL
:
14242 code
= fixp
->fx_r_type
;
14244 case BFD_RELOC_X86_64_32S
:
14245 if (!fixp
->fx_pcrel
)
14247 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
14248 code
= fixp
->fx_r_type
;
14251 /* Fall through. */
14253 if (fixp
->fx_pcrel
)
14255 switch (fixp
->fx_size
)
14258 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14259 _("can not do %d byte pc-relative relocation"),
14261 code
= BFD_RELOC_32_PCREL
;
14263 case 1: code
= BFD_RELOC_8_PCREL
; break;
14264 case 2: code
= BFD_RELOC_16_PCREL
; break;
14265 case 4: code
= BFD_RELOC_32_PCREL
; break;
14267 case 8: code
= BFD_RELOC_64_PCREL
; break;
14273 switch (fixp
->fx_size
)
14276 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14277 _("can not do %d byte relocation"),
14279 code
= BFD_RELOC_32
;
14281 case 1: code
= BFD_RELOC_8
; break;
14282 case 2: code
= BFD_RELOC_16
; break;
14283 case 4: code
= BFD_RELOC_32
; break;
14285 case 8: code
= BFD_RELOC_64
; break;
14292 if ((code
== BFD_RELOC_32
14293 || code
== BFD_RELOC_32_PCREL
14294 || code
== BFD_RELOC_X86_64_32S
)
14296 && fixp
->fx_addsy
== GOT_symbol
)
14299 code
= BFD_RELOC_386_GOTPC
;
14301 code
= BFD_RELOC_X86_64_GOTPC32
;
14303 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
14305 && fixp
->fx_addsy
== GOT_symbol
)
14307 code
= BFD_RELOC_X86_64_GOTPC64
;
14310 rel
= XNEW (arelent
);
14311 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
14312 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
14314 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
14316 if (!use_rela_relocations
)
14318 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
14319 vtable entry to be used in the relocation's section offset. */
14320 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
14321 rel
->address
= fixp
->fx_offset
;
14322 #if defined (OBJ_COFF) && defined (TE_PE)
14323 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
14324 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
14329 /* Use the rela in 64bit mode. */
14332 if (disallow_64bit_reloc
)
14335 case BFD_RELOC_X86_64_DTPOFF64
:
14336 case BFD_RELOC_X86_64_TPOFF64
:
14337 case BFD_RELOC_64_PCREL
:
14338 case BFD_RELOC_X86_64_GOTOFF64
:
14339 case BFD_RELOC_X86_64_GOT64
:
14340 case BFD_RELOC_X86_64_GOTPCREL64
:
14341 case BFD_RELOC_X86_64_GOTPC64
:
14342 case BFD_RELOC_X86_64_GOTPLT64
:
14343 case BFD_RELOC_X86_64_PLTOFF64
:
14344 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14345 _("cannot represent relocation type %s in x32 mode"),
14346 bfd_get_reloc_code_name (code
));
14352 if (!fixp
->fx_pcrel
)
14353 rel
->addend
= fixp
->fx_offset
;
14357 case BFD_RELOC_X86_64_PLT32
:
14358 case BFD_RELOC_X86_64_GOT32
:
14359 case BFD_RELOC_X86_64_GOTPCREL
:
14360 case BFD_RELOC_X86_64_GOTPCRELX
:
14361 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14362 case BFD_RELOC_X86_64_TLSGD
:
14363 case BFD_RELOC_X86_64_TLSLD
:
14364 case BFD_RELOC_X86_64_GOTTPOFF
:
14365 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14366 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14367 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
14370 rel
->addend
= (section
->vma
14372 + fixp
->fx_addnumber
14373 + md_pcrel_from (fixp
));
14378 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
14379 if (rel
->howto
== NULL
)
14381 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14382 _("cannot represent relocation type %s"),
14383 bfd_get_reloc_code_name (code
));
14384 /* Set howto to a garbage value so that we can keep going. */
14385 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
14386 gas_assert (rel
->howto
!= NULL
);
14392 #include "tc-i386-intel.c"
14395 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
14397 int saved_naked_reg
;
14398 char saved_register_dot
;
14400 saved_naked_reg
= allow_naked_reg
;
14401 allow_naked_reg
= 1;
14402 saved_register_dot
= register_chars
['.'];
14403 register_chars
['.'] = '.';
14404 allow_pseudo_reg
= 1;
14405 expression_and_evaluate (exp
);
14406 allow_pseudo_reg
= 0;
14407 register_chars
['.'] = saved_register_dot
;
14408 allow_naked_reg
= saved_naked_reg
;
14410 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
14412 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
14414 exp
->X_op
= O_constant
;
14415 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
14416 .dw2_regnum
[flag_code
>> 1];
14419 exp
->X_op
= O_illegal
;
14424 tc_x86_frame_initial_instructions (void)
14426 static unsigned int sp_regno
[2];
14428 if (!sp_regno
[flag_code
>> 1])
14430 char *saved_input
= input_line_pointer
;
14431 char sp
[][4] = {"esp", "rsp"};
14434 input_line_pointer
= sp
[flag_code
>> 1];
14435 tc_x86_parse_to_dw2regnum (&exp
);
14436 gas_assert (exp
.X_op
== O_constant
);
14437 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
14438 input_line_pointer
= saved_input
;
14441 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
14442 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
14446 x86_dwarf2_addr_size (void)
14448 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14449 if (x86_elf_abi
== X86_64_X32_ABI
)
14452 return bfd_arch_bits_per_address (stdoutput
) / 8;
14456 i386_elf_section_type (const char *str
, size_t len
)
14458 if (flag_code
== CODE_64BIT
14459 && len
== sizeof ("unwind") - 1
14460 && startswith (str
, "unwind"))
14461 return SHT_X86_64_UNWIND
;
14468 i386_solaris_fix_up_eh_frame (segT sec
)
14470 if (flag_code
== CODE_64BIT
)
14471 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
14477 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
14481 exp
.X_op
= O_secrel
;
14482 exp
.X_add_symbol
= symbol
;
14483 exp
.X_add_number
= 0;
14484 emit_expr (&exp
, size
);
14488 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14489 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
14492 x86_64_section_letter (int letter
, const char **ptr_msg
)
14494 if (flag_code
== CODE_64BIT
)
14497 return SHF_X86_64_LARGE
;
14499 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
14502 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
14507 x86_64_section_word (char *str
, size_t len
)
14509 if (len
== 5 && flag_code
== CODE_64BIT
&& startswith (str
, "large"))
14510 return SHF_X86_64_LARGE
;
14516 handle_large_common (int small ATTRIBUTE_UNUSED
)
14518 if (flag_code
!= CODE_64BIT
)
14520 s_comm_internal (0, elf_common_parse
);
14521 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
14525 static segT lbss_section
;
14526 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
14527 asection
*saved_bss_section
= bss_section
;
14529 if (lbss_section
== NULL
)
14531 flagword applicable
;
14532 segT seg
= now_seg
;
14533 subsegT subseg
= now_subseg
;
14535 /* The .lbss section is for local .largecomm symbols. */
14536 lbss_section
= subseg_new (".lbss", 0);
14537 applicable
= bfd_applicable_section_flags (stdoutput
);
14538 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
14539 seg_info (lbss_section
)->bss
= 1;
14541 subseg_set (seg
, subseg
);
14544 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
14545 bss_section
= lbss_section
;
14547 s_comm_internal (0, elf_common_parse
);
14549 elf_com_section_ptr
= saved_com_section_ptr
;
14550 bss_section
= saved_bss_section
;
14553 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */