1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2020 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
39 #ifdef HAVE_SYS_PARAM_H
40 #include <sys/param.h>
43 #define INT_MAX (int) (((unsigned) (-1)) >> 1)
47 #ifndef INFER_ADDR_PREFIX
48 #define INFER_ADDR_PREFIX 1
52 #define DEFAULT_ARCH "i386"
57 #define INLINE __inline__
63 /* Prefixes will be emitted in the order defined below.
64 WAIT_PREFIX must be the first prefix since FWAIT is really is an
65 instruction, and so must come before any prefixes.
66 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
67 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
73 #define HLE_PREFIX REP_PREFIX
74 #define BND_PREFIX REP_PREFIX
76 #define REX_PREFIX 6 /* must come last. */
77 #define MAX_PREFIXES 7 /* max prefixes per opcode */
79 /* we define the syntax here (modulo base,index,scale syntax) */
80 #define REGISTER_PREFIX '%'
81 #define IMMEDIATE_PREFIX '$'
82 #define ABSOLUTE_PREFIX '*'
84 /* these are the instruction mnemonic suffixes in AT&T syntax or
85 memory operand size in Intel syntax. */
86 #define WORD_MNEM_SUFFIX 'w'
87 #define BYTE_MNEM_SUFFIX 'b'
88 #define SHORT_MNEM_SUFFIX 's'
89 #define LONG_MNEM_SUFFIX 'l'
90 #define QWORD_MNEM_SUFFIX 'q'
91 /* Intel Syntax. Use a non-ascii letter since since it never appears
93 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
95 #define END_OF_INSN '\0'
97 /* This matches the C -> StaticRounding alias in the opcode table. */
98 #define commutative staticrounding
101 'templates' is for grouping together 'template' structures for opcodes
102 of the same name. This is only used for storing the insns in the grand
103 ole hash table of insns.
104 The templates themselves start at START and range up to (but not including)
109 const insn_template
*start
;
110 const insn_template
*end
;
114 /* 386 operand encoding bytes: see 386 book for details of this. */
117 unsigned int regmem
; /* codes register or memory operand */
118 unsigned int reg
; /* codes register operand (or extended opcode) */
119 unsigned int mode
; /* how to interpret regmem & reg */
123 /* x86-64 extension prefix. */
124 typedef int rex_byte
;
126 /* 386 opcode byte to code indirect addressing. */
135 /* x86 arch names, types and features */
138 const char *name
; /* arch name */
139 unsigned int len
; /* arch string length */
140 enum processor_type type
; /* arch type */
141 i386_cpu_flags flags
; /* cpu feature flags */
142 unsigned int skip
; /* show_arch should skip this. */
146 /* Used to turn off indicated flags. */
149 const char *name
; /* arch name */
150 unsigned int len
; /* arch string length */
151 i386_cpu_flags flags
; /* cpu feature flags */
155 static void update_code_flag (int, int);
156 static void set_code_flag (int);
157 static void set_16bit_gcc_code_flag (int);
158 static void set_intel_syntax (int);
159 static void set_intel_mnemonic (int);
160 static void set_allow_index_reg (int);
161 static void set_check (int);
162 static void set_cpu_arch (int);
164 static void pe_directive_secrel (int);
166 static void signed_cons (int);
167 static char *output_invalid (int c
);
168 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
170 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
172 static int i386_att_operand (char *);
173 static int i386_intel_operand (char *, int);
174 static int i386_intel_simplify (expressionS
*);
175 static int i386_intel_parse_name (const char *, expressionS
*);
176 static const reg_entry
*parse_register (char *, char **);
177 static char *parse_insn (char *, char *);
178 static char *parse_operands (char *, const char *);
179 static void swap_operands (void);
180 static void swap_2_operands (int, int);
181 static enum flag_code
i386_addressing_mode (void);
182 static void optimize_imm (void);
183 static void optimize_disp (void);
184 static const insn_template
*match_template (char);
185 static int check_string (void);
186 static int process_suffix (void);
187 static int check_byte_reg (void);
188 static int check_long_reg (void);
189 static int check_qword_reg (void);
190 static int check_word_reg (void);
191 static int finalize_imm (void);
192 static int process_operands (void);
193 static const seg_entry
*build_modrm_byte (void);
194 static void output_insn (void);
195 static void output_imm (fragS
*, offsetT
);
196 static void output_disp (fragS
*, offsetT
);
198 static void s_bss (int);
200 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
201 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
203 /* GNU_PROPERTY_X86_ISA_1_USED. */
204 static unsigned int x86_isa_1_used
;
205 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
206 static unsigned int x86_feature_2_used
;
207 /* Generate x86 used ISA and feature properties. */
208 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
211 static const char *default_arch
= DEFAULT_ARCH
;
213 /* parse_register() returns this when a register alias cannot be used. */
214 static const reg_entry bad_reg
= { "<bad>", OPERAND_TYPE_NONE
, 0, 0,
215 { Dw2Inval
, Dw2Inval
} };
217 /* This struct describes rounding control and SAE in the instruction. */
231 static struct RC_Operation rc_op
;
233 /* The struct describes masking, applied to OPERAND in the instruction.
234 MASK is a pointer to the corresponding mask register. ZEROING tells
235 whether merging or zeroing mask is used. */
236 struct Mask_Operation
238 const reg_entry
*mask
;
239 unsigned int zeroing
;
240 /* The operand where this operation is associated. */
244 static struct Mask_Operation mask_op
;
246 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
248 struct Broadcast_Operation
250 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
253 /* Index of broadcasted operand. */
256 /* Number of bytes to broadcast. */
260 static struct Broadcast_Operation broadcast_op
;
265 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
266 unsigned char bytes
[4];
268 /* Destination or source register specifier. */
269 const reg_entry
*register_specifier
;
272 /* 'md_assemble ()' gathers together information and puts it into a
279 const reg_entry
*regs
;
284 operand_size_mismatch
,
285 operand_type_mismatch
,
286 register_type_mismatch
,
287 number_of_operands_mismatch
,
288 invalid_instruction_suffix
,
290 unsupported_with_intel_mnemonic
,
293 invalid_vsib_address
,
294 invalid_vector_register_set
,
295 unsupported_vector_index_register
,
296 unsupported_broadcast
,
299 mask_not_on_destination
,
302 rc_sae_operand_not_last_imm
,
303 invalid_register_operand
,
308 /* TM holds the template for the insn were currently assembling. */
311 /* SUFFIX holds the instruction size suffix for byte, word, dword
312 or qword, if given. */
315 /* OPERANDS gives the number of given operands. */
316 unsigned int operands
;
318 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
319 of given register, displacement, memory operands and immediate
321 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
323 /* TYPES [i] is the type (see above #defines) which tells us how to
324 use OP[i] for the corresponding operand. */
325 i386_operand_type types
[MAX_OPERANDS
];
327 /* Displacement expression, immediate expression, or register for each
329 union i386_op op
[MAX_OPERANDS
];
331 /* Flags for operands. */
332 unsigned int flags
[MAX_OPERANDS
];
333 #define Operand_PCrel 1
334 #define Operand_Mem 2
336 /* Relocation type for operand */
337 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
339 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
340 the base index byte below. */
341 const reg_entry
*base_reg
;
342 const reg_entry
*index_reg
;
343 unsigned int log2_scale_factor
;
345 /* SEG gives the seg_entries of this insn. They are zero unless
346 explicit segment overrides are given. */
347 const seg_entry
*seg
[2];
349 /* Copied first memory operand string, for re-checking. */
352 /* PREFIX holds all the given prefix opcodes (usually null).
353 PREFIXES is the number of prefix opcodes. */
354 unsigned int prefixes
;
355 unsigned char prefix
[MAX_PREFIXES
];
357 /* Register is in low 3 bits of opcode. */
358 bfd_boolean short_form
;
360 /* The operand to a branch insn indicates an absolute branch. */
361 bfd_boolean jumpabsolute
;
363 /* Has MMX register operands. */
364 bfd_boolean has_regmmx
;
366 /* Has XMM register operands. */
367 bfd_boolean has_regxmm
;
369 /* Has YMM register operands. */
370 bfd_boolean has_regymm
;
372 /* Has ZMM register operands. */
373 bfd_boolean has_regzmm
;
375 /* Has GOTPC or TLS relocation. */
376 bfd_boolean has_gotpc_tls_reloc
;
378 /* RM and SIB are the modrm byte and the sib byte where the
379 addressing modes of this insn are encoded. */
386 /* Masking attributes. */
387 struct Mask_Operation
*mask
;
389 /* Rounding control and SAE attributes. */
390 struct RC_Operation
*rounding
;
392 /* Broadcasting attributes. */
393 struct Broadcast_Operation
*broadcast
;
395 /* Compressed disp8*N attribute. */
396 unsigned int memshift
;
398 /* Prefer load or store in encoding. */
401 dir_encoding_default
= 0,
407 /* Prefer 8bit or 32bit displacement in encoding. */
410 disp_encoding_default
= 0,
415 /* Prefer the REX byte in encoding. */
416 bfd_boolean rex_encoding
;
418 /* Disable instruction size optimization. */
419 bfd_boolean no_optimize
;
421 /* How to encode vector instructions. */
424 vex_encoding_default
= 0,
432 const char *rep_prefix
;
435 const char *hle_prefix
;
437 /* Have BND prefix. */
438 const char *bnd_prefix
;
440 /* Have NOTRACK prefix. */
441 const char *notrack_prefix
;
444 enum i386_error error
;
447 typedef struct _i386_insn i386_insn
;
449 /* Link RC type with corresponding string, that'll be looked for in
458 static const struct RC_name RC_NamesTable
[] =
460 { rne
, STRING_COMMA_LEN ("rn-sae") },
461 { rd
, STRING_COMMA_LEN ("rd-sae") },
462 { ru
, STRING_COMMA_LEN ("ru-sae") },
463 { rz
, STRING_COMMA_LEN ("rz-sae") },
464 { saeonly
, STRING_COMMA_LEN ("sae") },
467 /* List of chars besides those in app.c:symbol_chars that can start an
468 operand. Used to prevent the scrubber eating vital white-space. */
469 const char extra_symbol_chars
[] = "*%-([{}"
478 #if (defined (TE_I386AIX) \
479 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
480 && !defined (TE_GNU) \
481 && !defined (TE_LINUX) \
482 && !defined (TE_NACL) \
483 && !defined (TE_FreeBSD) \
484 && !defined (TE_DragonFly) \
485 && !defined (TE_NetBSD)))
486 /* This array holds the chars that always start a comment. If the
487 pre-processor is disabled, these aren't very useful. The option
488 --divide will remove '/' from this list. */
489 const char *i386_comment_chars
= "#/";
490 #define SVR4_COMMENT_CHARS 1
491 #define PREFIX_SEPARATOR '\\'
494 const char *i386_comment_chars
= "#";
495 #define PREFIX_SEPARATOR '/'
498 /* This array holds the chars that only start a comment at the beginning of
499 a line. If the line seems to have the form '# 123 filename'
500 .line and .file directives will appear in the pre-processed output.
501 Note that input_file.c hand checks for '#' at the beginning of the
502 first line of the input file. This is because the compiler outputs
503 #NO_APP at the beginning of its output.
504 Also note that comments started like this one will always work if
505 '/' isn't otherwise defined. */
506 const char line_comment_chars
[] = "#/";
508 const char line_separator_chars
[] = ";";
510 /* Chars that can be used to separate mant from exp in floating point
512 const char EXP_CHARS
[] = "eE";
514 /* Chars that mean this number is a floating point constant
517 const char FLT_CHARS
[] = "fFdDxX";
519 /* Tables for lexical analysis. */
520 static char mnemonic_chars
[256];
521 static char register_chars
[256];
522 static char operand_chars
[256];
523 static char identifier_chars
[256];
524 static char digit_chars
[256];
526 /* Lexical macros. */
527 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
528 #define is_operand_char(x) (operand_chars[(unsigned char) x])
529 #define is_register_char(x) (register_chars[(unsigned char) x])
530 #define is_space_char(x) ((x) == ' ')
531 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
532 #define is_digit_char(x) (digit_chars[(unsigned char) x])
534 /* All non-digit non-letter characters that may occur in an operand. */
535 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
537 /* md_assemble() always leaves the strings it's passed unaltered. To
538 effect this we maintain a stack of saved characters that we've smashed
539 with '\0's (indicating end of strings for various sub-fields of the
540 assembler instruction). */
541 static char save_stack
[32];
542 static char *save_stack_p
;
543 #define END_STRING_AND_SAVE(s) \
544 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
545 #define RESTORE_END_STRING(s) \
546 do { *(s) = *--save_stack_p; } while (0)
548 /* The instruction we're assembling. */
551 /* Possible templates for current insn. */
552 static const templates
*current_templates
;
554 /* Per instruction expressionS buffers: max displacements & immediates. */
555 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
556 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
558 /* Current operand we are working on. */
559 static int this_operand
= -1;
561 /* We support four different modes. FLAG_CODE variable is used to distinguish
569 static enum flag_code flag_code
;
570 static unsigned int object_64bit
;
571 static unsigned int disallow_64bit_reloc
;
572 static int use_rela_relocations
= 0;
573 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
574 static const char *tls_get_addr
;
576 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
577 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
578 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
580 /* The ELF ABI to use. */
588 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
591 #if defined (TE_PE) || defined (TE_PEP)
592 /* Use big object file format. */
593 static int use_big_obj
= 0;
596 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
597 /* 1 if generating code for a shared library. */
598 static int shared
= 0;
601 /* 1 for intel syntax,
603 static int intel_syntax
= 0;
605 static enum x86_64_isa
607 amd64
= 1, /* AMD64 ISA. */
608 intel64
/* Intel64 ISA. */
611 /* 1 for intel mnemonic,
612 0 if att mnemonic. */
613 static int intel_mnemonic
= !SYSV386_COMPAT
;
615 /* 1 if pseudo registers are permitted. */
616 static int allow_pseudo_reg
= 0;
618 /* 1 if register prefix % not required. */
619 static int allow_naked_reg
= 0;
621 /* 1 if the assembler should add BND prefix for all control-transferring
622 instructions supporting it, even if this prefix wasn't specified
624 static int add_bnd_prefix
= 0;
626 /* 1 if pseudo index register, eiz/riz, is allowed . */
627 static int allow_index_reg
= 0;
629 /* 1 if the assembler should ignore LOCK prefix, even if it was
630 specified explicitly. */
631 static int omit_lock_prefix
= 0;
633 /* 1 if the assembler should encode lfence, mfence, and sfence as
634 "lock addl $0, (%{re}sp)". */
635 static int avoid_fence
= 0;
637 /* 1 if lfence should be inserted after every load. */
638 static int lfence_after_load
= 0;
640 /* Non-zero if lfence should be inserted before indirect branch. */
641 static enum lfence_before_indirect_branch_kind
643 lfence_branch_none
= 0,
644 lfence_branch_register
,
645 lfence_branch_memory
,
648 lfence_before_indirect_branch
;
650 /* Non-zero if lfence should be inserted before ret. */
651 static enum lfence_before_ret_kind
653 lfence_before_ret_none
= 0,
654 lfence_before_ret_not
,
655 lfence_before_ret_or
,
656 lfence_before_ret_shl
660 /* Types of previous instruction is .byte or prefix. */
675 /* 1 if the assembler should generate relax relocations. */
677 static int generate_relax_relocations
678 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
680 static enum check_kind
686 sse_check
, operand_check
= check_warning
;
688 /* Non-zero if branches should be aligned within power of 2 boundary. */
689 static int align_branch_power
= 0;
691 /* Types of branches to align. */
692 enum align_branch_kind
694 align_branch_none
= 0,
695 align_branch_jcc
= 1,
696 align_branch_fused
= 2,
697 align_branch_jmp
= 3,
698 align_branch_call
= 4,
699 align_branch_indirect
= 5,
703 /* Type bits of branches to align. */
704 enum align_branch_bit
706 align_branch_jcc_bit
= 1 << align_branch_jcc
,
707 align_branch_fused_bit
= 1 << align_branch_fused
,
708 align_branch_jmp_bit
= 1 << align_branch_jmp
,
709 align_branch_call_bit
= 1 << align_branch_call
,
710 align_branch_indirect_bit
= 1 << align_branch_indirect
,
711 align_branch_ret_bit
= 1 << align_branch_ret
714 static unsigned int align_branch
= (align_branch_jcc_bit
715 | align_branch_fused_bit
716 | align_branch_jmp_bit
);
718 /* Types of condition jump used by macro-fusion. */
721 mf_jcc_jo
= 0, /* base opcode 0x70 */
722 mf_jcc_jc
, /* base opcode 0x72 */
723 mf_jcc_je
, /* base opcode 0x74 */
724 mf_jcc_jna
, /* base opcode 0x76 */
725 mf_jcc_js
, /* base opcode 0x78 */
726 mf_jcc_jp
, /* base opcode 0x7a */
727 mf_jcc_jl
, /* base opcode 0x7c */
728 mf_jcc_jle
, /* base opcode 0x7e */
731 /* Types of compare flag-modifying insntructions used by macro-fusion. */
734 mf_cmp_test_and
, /* test/cmp */
735 mf_cmp_alu_cmp
, /* add/sub/cmp */
736 mf_cmp_incdec
/* inc/dec */
739 /* The maximum padding size for fused jcc. CMP like instruction can
740 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
742 #define MAX_FUSED_JCC_PADDING_SIZE 20
744 /* The maximum number of prefixes added for an instruction. */
745 static unsigned int align_branch_prefix_size
= 5;
748 1. Clear the REX_W bit with register operand if possible.
749 2. Above plus use 128bit vector instruction to clear the full vector
752 static int optimize
= 0;
755 1. Clear the REX_W bit with register operand if possible.
756 2. Above plus use 128bit vector instruction to clear the full vector
758 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
761 static int optimize_for_space
= 0;
763 /* Register prefix used for error message. */
764 static const char *register_prefix
= "%";
766 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
767 leave, push, and pop instructions so that gcc has the same stack
768 frame as in 32 bit mode. */
769 static char stackop_size
= '\0';
771 /* Non-zero to optimize code alignment. */
772 int optimize_align_code
= 1;
774 /* Non-zero to quieten some warnings. */
775 static int quiet_warnings
= 0;
778 static const char *cpu_arch_name
= NULL
;
779 static char *cpu_sub_arch_name
= NULL
;
781 /* CPU feature flags. */
782 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
784 /* If we have selected a cpu we are generating instructions for. */
785 static int cpu_arch_tune_set
= 0;
787 /* Cpu we are generating instructions for. */
788 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
790 /* CPU feature flags of cpu we are generating instructions for. */
791 static i386_cpu_flags cpu_arch_tune_flags
;
793 /* CPU instruction set architecture used. */
794 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
796 /* CPU feature flags of instruction set architecture used. */
797 i386_cpu_flags cpu_arch_isa_flags
;
799 /* If set, conditional jumps are not automatically promoted to handle
800 larger than a byte offset. */
801 static unsigned int no_cond_jump_promotion
= 0;
803 /* Encode SSE instructions with VEX prefix. */
804 static unsigned int sse2avx
;
806 /* Encode scalar AVX instructions with specific vector length. */
813 /* Encode VEX WIG instructions with specific vex.w. */
820 /* Encode scalar EVEX LIG instructions with specific vector length. */
828 /* Encode EVEX WIG instructions with specific evex.w. */
835 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
836 static enum rc_type evexrcig
= rne
;
838 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
839 static symbolS
*GOT_symbol
;
841 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
842 unsigned int x86_dwarf2_return_column
;
844 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
845 int x86_cie_data_alignment
;
847 /* Interface to relax_segment.
848 There are 3 major relax states for 386 jump insns because the
849 different types of jumps add different sizes to frags when we're
850 figuring out what sort of jump to choose to reach a given label.
852 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
853 branches which are handled by md_estimate_size_before_relax() and
854 i386_generic_table_relax_frag(). */
857 #define UNCOND_JUMP 0
859 #define COND_JUMP86 2
860 #define BRANCH_PADDING 3
861 #define BRANCH_PREFIX 4
862 #define FUSED_JCC_PADDING 5
867 #define SMALL16 (SMALL | CODE16)
869 #define BIG16 (BIG | CODE16)
873 #define INLINE __inline__
879 #define ENCODE_RELAX_STATE(type, size) \
880 ((relax_substateT) (((type) << 2) | (size)))
881 #define TYPE_FROM_RELAX_STATE(s) \
883 #define DISP_SIZE_FROM_RELAX_STATE(s) \
884 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
886 /* This table is used by relax_frag to promote short jumps to long
887 ones where necessary. SMALL (short) jumps may be promoted to BIG
888 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
889 don't allow a short jump in a 32 bit code segment to be promoted to
890 a 16 bit offset jump because it's slower (requires data size
891 prefix), and doesn't work, unless the destination is in the bottom
892 64k of the code segment (The top 16 bits of eip are zeroed). */
894 const relax_typeS md_relax_table
[] =
897 1) most positive reach of this state,
898 2) most negative reach of this state,
899 3) how many bytes this mode will have in the variable part of the frag
900 4) which index into the table to try if we can't fit into this one. */
902 /* UNCOND_JUMP states. */
903 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
904 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
905 /* dword jmp adds 4 bytes to frag:
906 0 extra opcode bytes, 4 displacement bytes. */
908 /* word jmp adds 2 byte2 to frag:
909 0 extra opcode bytes, 2 displacement bytes. */
912 /* COND_JUMP states. */
913 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
914 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
915 /* dword conditionals adds 5 bytes to frag:
916 1 extra opcode byte, 4 displacement bytes. */
918 /* word conditionals add 3 bytes to frag:
919 1 extra opcode byte, 2 displacement bytes. */
922 /* COND_JUMP86 states. */
923 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
924 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
925 /* dword conditionals adds 5 bytes to frag:
926 1 extra opcode byte, 4 displacement bytes. */
928 /* word conditionals add 4 bytes to frag:
929 1 displacement byte and a 3 byte long branch insn. */
933 static const arch_entry cpu_arch
[] =
935 /* Do not replace the first two entries - i386_target_format()
936 relies on them being there in this order. */
937 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
938 CPU_GENERIC32_FLAGS
, 0 },
939 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
940 CPU_GENERIC64_FLAGS
, 0 },
941 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
943 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
945 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
947 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
949 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
951 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
953 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
955 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
957 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
958 CPU_PENTIUMPRO_FLAGS
, 0 },
959 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
961 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
963 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
965 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
967 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
968 CPU_NOCONA_FLAGS
, 0 },
969 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
971 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
973 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
974 CPU_CORE2_FLAGS
, 1 },
975 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
976 CPU_CORE2_FLAGS
, 0 },
977 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
978 CPU_COREI7_FLAGS
, 0 },
979 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
981 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
983 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
984 CPU_IAMCU_FLAGS
, 0 },
985 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
987 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
989 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
990 CPU_ATHLON_FLAGS
, 0 },
991 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
993 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
995 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
997 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
998 CPU_AMDFAM10_FLAGS
, 0 },
999 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
1000 CPU_BDVER1_FLAGS
, 0 },
1001 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
1002 CPU_BDVER2_FLAGS
, 0 },
1003 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
1004 CPU_BDVER3_FLAGS
, 0 },
1005 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
1006 CPU_BDVER4_FLAGS
, 0 },
1007 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
1008 CPU_ZNVER1_FLAGS
, 0 },
1009 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
1010 CPU_ZNVER2_FLAGS
, 0 },
1011 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
1012 CPU_BTVER1_FLAGS
, 0 },
1013 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
1014 CPU_BTVER2_FLAGS
, 0 },
1015 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
1016 CPU_8087_FLAGS
, 0 },
1017 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
1019 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
1021 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
1023 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
1024 CPU_CMOV_FLAGS
, 0 },
1025 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
1026 CPU_FXSR_FLAGS
, 0 },
1027 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
1029 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
1031 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
1032 CPU_SSE2_FLAGS
, 0 },
1033 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
1034 CPU_SSE3_FLAGS
, 0 },
1035 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1036 CPU_SSE4A_FLAGS
, 0 },
1037 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
1038 CPU_SSSE3_FLAGS
, 0 },
1039 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
1040 CPU_SSE4_1_FLAGS
, 0 },
1041 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
1042 CPU_SSE4_2_FLAGS
, 0 },
1043 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
1044 CPU_SSE4_2_FLAGS
, 0 },
1045 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
1047 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
1048 CPU_AVX2_FLAGS
, 0 },
1049 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
1050 CPU_AVX512F_FLAGS
, 0 },
1051 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
1052 CPU_AVX512CD_FLAGS
, 0 },
1053 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
1054 CPU_AVX512ER_FLAGS
, 0 },
1055 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
1056 CPU_AVX512PF_FLAGS
, 0 },
1057 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
1058 CPU_AVX512DQ_FLAGS
, 0 },
1059 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
1060 CPU_AVX512BW_FLAGS
, 0 },
1061 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
1062 CPU_AVX512VL_FLAGS
, 0 },
1063 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
1065 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
1066 CPU_VMFUNC_FLAGS
, 0 },
1067 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
1069 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
1070 CPU_XSAVE_FLAGS
, 0 },
1071 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
1072 CPU_XSAVEOPT_FLAGS
, 0 },
1073 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
1074 CPU_XSAVEC_FLAGS
, 0 },
1075 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
1076 CPU_XSAVES_FLAGS
, 0 },
1077 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
1079 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
1080 CPU_PCLMUL_FLAGS
, 0 },
1081 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
1082 CPU_PCLMUL_FLAGS
, 1 },
1083 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
1084 CPU_FSGSBASE_FLAGS
, 0 },
1085 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
1086 CPU_RDRND_FLAGS
, 0 },
1087 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
1088 CPU_F16C_FLAGS
, 0 },
1089 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
1090 CPU_BMI2_FLAGS
, 0 },
1091 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
1093 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
1094 CPU_FMA4_FLAGS
, 0 },
1095 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
1097 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
1099 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
1100 CPU_MOVBE_FLAGS
, 0 },
1101 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
1102 CPU_CX16_FLAGS
, 0 },
1103 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
1105 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
1106 CPU_LZCNT_FLAGS
, 0 },
1107 { STRING_COMMA_LEN (".popcnt"), PROCESSOR_UNKNOWN
,
1108 CPU_POPCNT_FLAGS
, 0 },
1109 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
1111 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
1113 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
1114 CPU_INVPCID_FLAGS
, 0 },
1115 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
1116 CPU_CLFLUSH_FLAGS
, 0 },
1117 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
1119 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
1120 CPU_SYSCALL_FLAGS
, 0 },
1121 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
1122 CPU_RDTSCP_FLAGS
, 0 },
1123 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1124 CPU_3DNOW_FLAGS
, 0 },
1125 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1126 CPU_3DNOWA_FLAGS
, 0 },
1127 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1128 CPU_PADLOCK_FLAGS
, 0 },
1129 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1130 CPU_SVME_FLAGS
, 1 },
1131 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1132 CPU_SVME_FLAGS
, 0 },
1133 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1134 CPU_SSE4A_FLAGS
, 0 },
1135 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1137 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1139 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1141 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1143 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1144 CPU_RDSEED_FLAGS
, 0 },
1145 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1146 CPU_PRFCHW_FLAGS
, 0 },
1147 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1148 CPU_SMAP_FLAGS
, 0 },
1149 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1151 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1153 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1154 CPU_CLFLUSHOPT_FLAGS
, 0 },
1155 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1156 CPU_PREFETCHWT1_FLAGS
, 0 },
1157 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1159 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1160 CPU_CLWB_FLAGS
, 0 },
1161 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1162 CPU_AVX512IFMA_FLAGS
, 0 },
1163 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1164 CPU_AVX512VBMI_FLAGS
, 0 },
1165 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1166 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1167 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1168 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1169 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1170 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1171 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1172 CPU_AVX512_VBMI2_FLAGS
, 0 },
1173 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1174 CPU_AVX512_VNNI_FLAGS
, 0 },
1175 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1176 CPU_AVX512_BITALG_FLAGS
, 0 },
1177 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1178 CPU_CLZERO_FLAGS
, 0 },
1179 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1180 CPU_MWAITX_FLAGS
, 0 },
1181 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1182 CPU_OSPKE_FLAGS
, 0 },
1183 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1184 CPU_RDPID_FLAGS
, 0 },
1185 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1186 CPU_PTWRITE_FLAGS
, 0 },
1187 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1189 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1190 CPU_SHSTK_FLAGS
, 0 },
1191 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1192 CPU_GFNI_FLAGS
, 0 },
1193 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1194 CPU_VAES_FLAGS
, 0 },
1195 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1196 CPU_VPCLMULQDQ_FLAGS
, 0 },
1197 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1198 CPU_WBNOINVD_FLAGS
, 0 },
1199 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1200 CPU_PCONFIG_FLAGS
, 0 },
1201 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1202 CPU_WAITPKG_FLAGS
, 0 },
1203 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1204 CPU_CLDEMOTE_FLAGS
, 0 },
1205 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1206 CPU_MOVDIRI_FLAGS
, 0 },
1207 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1208 CPU_MOVDIR64B_FLAGS
, 0 },
1209 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1210 CPU_AVX512_BF16_FLAGS
, 0 },
1211 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1212 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1213 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1214 CPU_ENQCMD_FLAGS
, 0 },
1215 { STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN
,
1216 CPU_SERIALIZE_FLAGS
, 0 },
1217 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1218 CPU_RDPRU_FLAGS
, 0 },
1219 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1220 CPU_MCOMMIT_FLAGS
, 0 },
1221 { STRING_COMMA_LEN (".sev_es"), PROCESSOR_UNKNOWN
,
1222 CPU_SEV_ES_FLAGS
, 0 },
1223 { STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN
,
1224 CPU_TSXLDTRK_FLAGS
, 0 },
1227 static const noarch_entry cpu_noarch
[] =
1229 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1230 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1231 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1232 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1233 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1234 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1235 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1236 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1237 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1238 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1239 { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS
},
1240 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1241 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1242 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1243 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1244 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1245 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1246 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1247 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1248 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1249 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1250 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1251 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1252 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1253 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1254 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1255 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1256 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1257 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1258 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1259 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1260 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1261 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1262 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1263 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1264 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1265 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1266 { STRING_COMMA_LEN ("noavx512_vp2intersect"), CPU_ANY_SHSTK_FLAGS
},
1267 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1268 { STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS
},
1269 { STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS
},
1273 /* Like s_lcomm_internal in gas/read.c but the alignment string
1274 is allowed to be optional. */
1277 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1284 && *input_line_pointer
== ',')
1286 align
= parse_align (needs_align
- 1);
1288 if (align
== (addressT
) -1)
1303 bss_alloc (symbolP
, size
, align
);
1308 pe_lcomm (int needs_align
)
1310 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1314 const pseudo_typeS md_pseudo_table
[] =
1316 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1317 {"align", s_align_bytes
, 0},
1319 {"align", s_align_ptwo
, 0},
1321 {"arch", set_cpu_arch
, 0},
1325 {"lcomm", pe_lcomm
, 1},
1327 {"ffloat", float_cons
, 'f'},
1328 {"dfloat", float_cons
, 'd'},
1329 {"tfloat", float_cons
, 'x'},
1331 {"slong", signed_cons
, 4},
1332 {"noopt", s_ignore
, 0},
1333 {"optim", s_ignore
, 0},
1334 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1335 {"code16", set_code_flag
, CODE_16BIT
},
1336 {"code32", set_code_flag
, CODE_32BIT
},
1338 {"code64", set_code_flag
, CODE_64BIT
},
1340 {"intel_syntax", set_intel_syntax
, 1},
1341 {"att_syntax", set_intel_syntax
, 0},
1342 {"intel_mnemonic", set_intel_mnemonic
, 1},
1343 {"att_mnemonic", set_intel_mnemonic
, 0},
1344 {"allow_index_reg", set_allow_index_reg
, 1},
1345 {"disallow_index_reg", set_allow_index_reg
, 0},
1346 {"sse_check", set_check
, 0},
1347 {"operand_check", set_check
, 1},
1348 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1349 {"largecomm", handle_large_common
, 0},
1351 {"file", dwarf2_directive_file
, 0},
1352 {"loc", dwarf2_directive_loc
, 0},
1353 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1356 {"secrel32", pe_directive_secrel
, 0},
1361 /* For interface with expression (). */
1362 extern char *input_line_pointer
;
1364 /* Hash table for instruction mnemonic lookup. */
1365 static struct hash_control
*op_hash
;
1367 /* Hash table for register lookup. */
1368 static struct hash_control
*reg_hash
;
1370 /* Various efficient no-op patterns for aligning code labels.
1371 Note: Don't try to assemble the instructions in the comments.
1372 0L and 0w are not legal. */
1373 static const unsigned char f32_1
[] =
1375 static const unsigned char f32_2
[] =
1376 {0x66,0x90}; /* xchg %ax,%ax */
1377 static const unsigned char f32_3
[] =
1378 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1379 static const unsigned char f32_4
[] =
1380 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1381 static const unsigned char f32_6
[] =
1382 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1383 static const unsigned char f32_7
[] =
1384 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1385 static const unsigned char f16_3
[] =
1386 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1387 static const unsigned char f16_4
[] =
1388 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1389 static const unsigned char jump_disp8
[] =
1390 {0xeb}; /* jmp disp8 */
1391 static const unsigned char jump32_disp32
[] =
1392 {0xe9}; /* jmp disp32 */
1393 static const unsigned char jump16_disp32
[] =
1394 {0x66,0xe9}; /* jmp disp32 */
1395 /* 32-bit NOPs patterns. */
1396 static const unsigned char *const f32_patt
[] = {
1397 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1399 /* 16-bit NOPs patterns. */
1400 static const unsigned char *const f16_patt
[] = {
1401 f32_1
, f32_2
, f16_3
, f16_4
1403 /* nopl (%[re]ax) */
1404 static const unsigned char alt_3
[] =
1406 /* nopl 0(%[re]ax) */
1407 static const unsigned char alt_4
[] =
1408 {0x0f,0x1f,0x40,0x00};
1409 /* nopl 0(%[re]ax,%[re]ax,1) */
1410 static const unsigned char alt_5
[] =
1411 {0x0f,0x1f,0x44,0x00,0x00};
1412 /* nopw 0(%[re]ax,%[re]ax,1) */
1413 static const unsigned char alt_6
[] =
1414 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1415 /* nopl 0L(%[re]ax) */
1416 static const unsigned char alt_7
[] =
1417 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1418 /* nopl 0L(%[re]ax,%[re]ax,1) */
1419 static const unsigned char alt_8
[] =
1420 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1421 /* nopw 0L(%[re]ax,%[re]ax,1) */
1422 static const unsigned char alt_9
[] =
1423 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1424 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1425 static const unsigned char alt_10
[] =
1426 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1427 /* data16 nopw %cs:0L(%eax,%eax,1) */
1428 static const unsigned char alt_11
[] =
1429 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1430 /* 32-bit and 64-bit NOPs patterns. */
1431 static const unsigned char *const alt_patt
[] = {
1432 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1433 alt_9
, alt_10
, alt_11
1436 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1437 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1440 i386_output_nops (char *where
, const unsigned char *const *patt
,
1441 int count
, int max_single_nop_size
)
1444 /* Place the longer NOP first. */
1447 const unsigned char *nops
;
1449 if (max_single_nop_size
< 1)
1451 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1452 max_single_nop_size
);
1456 nops
= patt
[max_single_nop_size
- 1];
1458 /* Use the smaller one if the requsted one isn't available. */
1461 max_single_nop_size
--;
1462 nops
= patt
[max_single_nop_size
- 1];
1465 last
= count
% max_single_nop_size
;
1468 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1469 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1473 nops
= patt
[last
- 1];
1476 /* Use the smaller one plus one-byte NOP if the needed one
1479 nops
= patt
[last
- 1];
1480 memcpy (where
+ offset
, nops
, last
);
1481 where
[offset
+ last
] = *patt
[0];
1484 memcpy (where
+ offset
, nops
, last
);
1489 fits_in_imm7 (offsetT num
)
1491 return (num
& 0x7f) == num
;
1495 fits_in_imm31 (offsetT num
)
1497 return (num
& 0x7fffffff) == num
;
1500 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1501 single NOP instruction LIMIT. */
1504 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1506 const unsigned char *const *patt
= NULL
;
1507 int max_single_nop_size
;
1508 /* Maximum number of NOPs before switching to jump over NOPs. */
1509 int max_number_of_nops
;
1511 switch (fragP
->fr_type
)
1516 case rs_machine_dependent
:
1517 /* Allow NOP padding for jumps and calls. */
1518 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
1519 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
1526 /* We need to decide which NOP sequence to use for 32bit and
1527 64bit. When -mtune= is used:
1529 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1530 PROCESSOR_GENERIC32, f32_patt will be used.
1531 2. For the rest, alt_patt will be used.
1533 When -mtune= isn't used, alt_patt will be used if
1534 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1537 When -march= or .arch is used, we can't use anything beyond
1538 cpu_arch_isa_flags. */
1540 if (flag_code
== CODE_16BIT
)
1543 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1544 /* Limit number of NOPs to 2 in 16-bit mode. */
1545 max_number_of_nops
= 2;
1549 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1551 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1552 switch (cpu_arch_tune
)
1554 case PROCESSOR_UNKNOWN
:
1555 /* We use cpu_arch_isa_flags to check if we SHOULD
1556 optimize with nops. */
1557 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1562 case PROCESSOR_PENTIUM4
:
1563 case PROCESSOR_NOCONA
:
1564 case PROCESSOR_CORE
:
1565 case PROCESSOR_CORE2
:
1566 case PROCESSOR_COREI7
:
1567 case PROCESSOR_L1OM
:
1568 case PROCESSOR_K1OM
:
1569 case PROCESSOR_GENERIC64
:
1571 case PROCESSOR_ATHLON
:
1573 case PROCESSOR_AMDFAM10
:
1575 case PROCESSOR_ZNVER
:
1579 case PROCESSOR_I386
:
1580 case PROCESSOR_I486
:
1581 case PROCESSOR_PENTIUM
:
1582 case PROCESSOR_PENTIUMPRO
:
1583 case PROCESSOR_IAMCU
:
1584 case PROCESSOR_GENERIC32
:
1591 switch (fragP
->tc_frag_data
.tune
)
1593 case PROCESSOR_UNKNOWN
:
1594 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1595 PROCESSOR_UNKNOWN. */
1599 case PROCESSOR_I386
:
1600 case PROCESSOR_I486
:
1601 case PROCESSOR_PENTIUM
:
1602 case PROCESSOR_IAMCU
:
1604 case PROCESSOR_ATHLON
:
1606 case PROCESSOR_AMDFAM10
:
1608 case PROCESSOR_ZNVER
:
1610 case PROCESSOR_GENERIC32
:
1611 /* We use cpu_arch_isa_flags to check if we CAN optimize
1613 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1618 case PROCESSOR_PENTIUMPRO
:
1619 case PROCESSOR_PENTIUM4
:
1620 case PROCESSOR_NOCONA
:
1621 case PROCESSOR_CORE
:
1622 case PROCESSOR_CORE2
:
1623 case PROCESSOR_COREI7
:
1624 case PROCESSOR_L1OM
:
1625 case PROCESSOR_K1OM
:
1626 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1631 case PROCESSOR_GENERIC64
:
1637 if (patt
== f32_patt
)
1639 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1640 /* Limit number of NOPs to 2 for older processors. */
1641 max_number_of_nops
= 2;
1645 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1646 /* Limit number of NOPs to 7 for newer processors. */
1647 max_number_of_nops
= 7;
1652 limit
= max_single_nop_size
;
1654 if (fragP
->fr_type
== rs_fill_nop
)
1656 /* Output NOPs for .nop directive. */
1657 if (limit
> max_single_nop_size
)
1659 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1660 _("invalid single nop size: %d "
1661 "(expect within [0, %d])"),
1662 limit
, max_single_nop_size
);
1666 else if (fragP
->fr_type
!= rs_machine_dependent
)
1667 fragP
->fr_var
= count
;
1669 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1671 /* Generate jump over NOPs. */
1672 offsetT disp
= count
- 2;
1673 if (fits_in_imm7 (disp
))
1675 /* Use "jmp disp8" if possible. */
1677 where
[0] = jump_disp8
[0];
1683 unsigned int size_of_jump
;
1685 if (flag_code
== CODE_16BIT
)
1687 where
[0] = jump16_disp32
[0];
1688 where
[1] = jump16_disp32
[1];
1693 where
[0] = jump32_disp32
[0];
1697 count
-= size_of_jump
+ 4;
1698 if (!fits_in_imm31 (count
))
1700 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1701 _("jump over nop padding out of range"));
1705 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1706 where
+= size_of_jump
+ 4;
1710 /* Generate multiple NOPs. */
1711 i386_output_nops (where
, patt
, count
, limit
);
1715 operand_type_all_zero (const union i386_operand_type
*x
)
1717 switch (ARRAY_SIZE(x
->array
))
1728 return !x
->array
[0];
1735 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1737 switch (ARRAY_SIZE(x
->array
))
1753 x
->bitfield
.class = ClassNone
;
1754 x
->bitfield
.instance
= InstanceNone
;
1758 operand_type_equal (const union i386_operand_type
*x
,
1759 const union i386_operand_type
*y
)
1761 switch (ARRAY_SIZE(x
->array
))
1764 if (x
->array
[2] != y
->array
[2])
1768 if (x
->array
[1] != y
->array
[1])
1772 return x
->array
[0] == y
->array
[0];
1780 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1782 switch (ARRAY_SIZE(x
->array
))
1797 return !x
->array
[0];
1804 cpu_flags_equal (const union i386_cpu_flags
*x
,
1805 const union i386_cpu_flags
*y
)
1807 switch (ARRAY_SIZE(x
->array
))
1810 if (x
->array
[3] != y
->array
[3])
1814 if (x
->array
[2] != y
->array
[2])
1818 if (x
->array
[1] != y
->array
[1])
1822 return x
->array
[0] == y
->array
[0];
1830 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1832 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1833 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1836 static INLINE i386_cpu_flags
1837 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1839 switch (ARRAY_SIZE (x
.array
))
1842 x
.array
[3] &= y
.array
[3];
1845 x
.array
[2] &= y
.array
[2];
1848 x
.array
[1] &= y
.array
[1];
1851 x
.array
[0] &= y
.array
[0];
1859 static INLINE i386_cpu_flags
1860 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1862 switch (ARRAY_SIZE (x
.array
))
1865 x
.array
[3] |= y
.array
[3];
1868 x
.array
[2] |= y
.array
[2];
1871 x
.array
[1] |= y
.array
[1];
1874 x
.array
[0] |= y
.array
[0];
1882 static INLINE i386_cpu_flags
1883 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1885 switch (ARRAY_SIZE (x
.array
))
1888 x
.array
[3] &= ~y
.array
[3];
1891 x
.array
[2] &= ~y
.array
[2];
1894 x
.array
[1] &= ~y
.array
[1];
1897 x
.array
[0] &= ~y
.array
[0];
1905 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
1907 #define CPU_FLAGS_ARCH_MATCH 0x1
1908 #define CPU_FLAGS_64BIT_MATCH 0x2
1910 #define CPU_FLAGS_PERFECT_MATCH \
1911 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1913 /* Return CPU flags match bits. */
1916 cpu_flags_match (const insn_template
*t
)
1918 i386_cpu_flags x
= t
->cpu_flags
;
1919 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1921 x
.bitfield
.cpu64
= 0;
1922 x
.bitfield
.cpuno64
= 0;
1924 if (cpu_flags_all_zero (&x
))
1926 /* This instruction is available on all archs. */
1927 match
|= CPU_FLAGS_ARCH_MATCH
;
1931 /* This instruction is available only on some archs. */
1932 i386_cpu_flags cpu
= cpu_arch_flags
;
1934 /* AVX512VL is no standalone feature - match it and then strip it. */
1935 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1937 x
.bitfield
.cpuavx512vl
= 0;
1939 cpu
= cpu_flags_and (x
, cpu
);
1940 if (!cpu_flags_all_zero (&cpu
))
1942 if (x
.bitfield
.cpuavx
)
1944 /* We need to check a few extra flags with AVX. */
1945 if (cpu
.bitfield
.cpuavx
1946 && (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1947 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1948 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1949 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1950 match
|= CPU_FLAGS_ARCH_MATCH
;
1952 else if (x
.bitfield
.cpuavx512f
)
1954 /* We need to check a few extra flags with AVX512F. */
1955 if (cpu
.bitfield
.cpuavx512f
1956 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1957 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1958 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1959 match
|= CPU_FLAGS_ARCH_MATCH
;
1962 match
|= CPU_FLAGS_ARCH_MATCH
;
1968 static INLINE i386_operand_type
1969 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1971 if (x
.bitfield
.class != y
.bitfield
.class)
1972 x
.bitfield
.class = ClassNone
;
1973 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
1974 x
.bitfield
.instance
= InstanceNone
;
1976 switch (ARRAY_SIZE (x
.array
))
1979 x
.array
[2] &= y
.array
[2];
1982 x
.array
[1] &= y
.array
[1];
1985 x
.array
[0] &= y
.array
[0];
1993 static INLINE i386_operand_type
1994 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
1996 gas_assert (y
.bitfield
.class == ClassNone
);
1997 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1999 switch (ARRAY_SIZE (x
.array
))
2002 x
.array
[2] &= ~y
.array
[2];
2005 x
.array
[1] &= ~y
.array
[1];
2008 x
.array
[0] &= ~y
.array
[0];
2016 static INLINE i386_operand_type
2017 operand_type_or (i386_operand_type x
, i386_operand_type y
)
2019 gas_assert (x
.bitfield
.class == ClassNone
||
2020 y
.bitfield
.class == ClassNone
||
2021 x
.bitfield
.class == y
.bitfield
.class);
2022 gas_assert (x
.bitfield
.instance
== InstanceNone
||
2023 y
.bitfield
.instance
== InstanceNone
||
2024 x
.bitfield
.instance
== y
.bitfield
.instance
);
2026 switch (ARRAY_SIZE (x
.array
))
2029 x
.array
[2] |= y
.array
[2];
2032 x
.array
[1] |= y
.array
[1];
2035 x
.array
[0] |= y
.array
[0];
2043 static INLINE i386_operand_type
2044 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
2046 gas_assert (y
.bitfield
.class == ClassNone
);
2047 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2049 switch (ARRAY_SIZE (x
.array
))
2052 x
.array
[2] ^= y
.array
[2];
2055 x
.array
[1] ^= y
.array
[1];
2058 x
.array
[0] ^= y
.array
[0];
2066 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
2067 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
2068 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
2069 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
2070 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
2071 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
2072 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
2073 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
2074 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
2075 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
2076 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
2077 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
2078 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
2079 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
2080 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
2081 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
2082 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
2093 operand_type_check (i386_operand_type t
, enum operand_type c
)
2098 return t
.bitfield
.class == Reg
;
2101 return (t
.bitfield
.imm8
2105 || t
.bitfield
.imm32s
2106 || t
.bitfield
.imm64
);
2109 return (t
.bitfield
.disp8
2110 || t
.bitfield
.disp16
2111 || t
.bitfield
.disp32
2112 || t
.bitfield
.disp32s
2113 || t
.bitfield
.disp64
);
2116 return (t
.bitfield
.disp8
2117 || t
.bitfield
.disp16
2118 || t
.bitfield
.disp32
2119 || t
.bitfield
.disp32s
2120 || t
.bitfield
.disp64
2121 || t
.bitfield
.baseindex
);
2130 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2131 between operand GIVEN and opeand WANTED for instruction template T. */
2134 match_operand_size (const insn_template
*t
, unsigned int wanted
,
2137 return !((i
.types
[given
].bitfield
.byte
2138 && !t
->operand_types
[wanted
].bitfield
.byte
)
2139 || (i
.types
[given
].bitfield
.word
2140 && !t
->operand_types
[wanted
].bitfield
.word
)
2141 || (i
.types
[given
].bitfield
.dword
2142 && !t
->operand_types
[wanted
].bitfield
.dword
)
2143 || (i
.types
[given
].bitfield
.qword
2144 && !t
->operand_types
[wanted
].bitfield
.qword
)
2145 || (i
.types
[given
].bitfield
.tbyte
2146 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2149 /* Return 1 if there is no conflict in SIMD register between operand
2150 GIVEN and opeand WANTED for instruction template T. */
2153 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2156 return !((i
.types
[given
].bitfield
.xmmword
2157 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2158 || (i
.types
[given
].bitfield
.ymmword
2159 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2160 || (i
.types
[given
].bitfield
.zmmword
2161 && !t
->operand_types
[wanted
].bitfield
.zmmword
));
2164 /* Return 1 if there is no conflict in any size between operand GIVEN
2165 and opeand WANTED for instruction template T. */
2168 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2171 return (match_operand_size (t
, wanted
, given
)
2172 && !((i
.types
[given
].bitfield
.unspecified
2174 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2175 || (i
.types
[given
].bitfield
.fword
2176 && !t
->operand_types
[wanted
].bitfield
.fword
)
2177 /* For scalar opcode templates to allow register and memory
2178 operands at the same time, some special casing is needed
2179 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2180 down-conversion vpmov*. */
2181 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2182 && t
->operand_types
[wanted
].bitfield
.byte
2183 + t
->operand_types
[wanted
].bitfield
.word
2184 + t
->operand_types
[wanted
].bitfield
.dword
2185 + t
->operand_types
[wanted
].bitfield
.qword
2186 > !!t
->opcode_modifier
.broadcast
)
2187 ? (i
.types
[given
].bitfield
.xmmword
2188 || i
.types
[given
].bitfield
.ymmword
2189 || i
.types
[given
].bitfield
.zmmword
)
2190 : !match_simd_size(t
, wanted
, given
))));
2193 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2194 operands for instruction template T, and it has MATCH_REVERSE set if there
2195 is no size conflict on any operands for the template with operands reversed
2196 (and the template allows for reversing in the first place). */
2198 #define MATCH_STRAIGHT 1
2199 #define MATCH_REVERSE 2
2201 static INLINE
unsigned int
2202 operand_size_match (const insn_template
*t
)
2204 unsigned int j
, match
= MATCH_STRAIGHT
;
2206 /* Don't check non-absolute jump instructions. */
2207 if (t
->opcode_modifier
.jump
2208 && t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
2211 /* Check memory and accumulator operand size. */
2212 for (j
= 0; j
< i
.operands
; j
++)
2214 if (i
.types
[j
].bitfield
.class != Reg
2215 && i
.types
[j
].bitfield
.class != RegSIMD
2216 && t
->opcode_modifier
.anysize
)
2219 if (t
->operand_types
[j
].bitfield
.class == Reg
2220 && !match_operand_size (t
, j
, j
))
2226 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2227 && !match_simd_size (t
, j
, j
))
2233 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2234 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2240 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2247 if (!t
->opcode_modifier
.d
)
2251 i
.error
= operand_size_mismatch
;
2255 /* Check reverse. */
2256 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2258 for (j
= 0; j
< i
.operands
; j
++)
2260 unsigned int given
= i
.operands
- j
- 1;
2262 if (t
->operand_types
[j
].bitfield
.class == Reg
2263 && !match_operand_size (t
, j
, given
))
2266 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2267 && !match_simd_size (t
, j
, given
))
2270 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2271 && (!match_operand_size (t
, j
, given
)
2272 || !match_simd_size (t
, j
, given
)))
2275 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2279 return match
| MATCH_REVERSE
;
2283 operand_type_match (i386_operand_type overlap
,
2284 i386_operand_type given
)
2286 i386_operand_type temp
= overlap
;
2288 temp
.bitfield
.unspecified
= 0;
2289 temp
.bitfield
.byte
= 0;
2290 temp
.bitfield
.word
= 0;
2291 temp
.bitfield
.dword
= 0;
2292 temp
.bitfield
.fword
= 0;
2293 temp
.bitfield
.qword
= 0;
2294 temp
.bitfield
.tbyte
= 0;
2295 temp
.bitfield
.xmmword
= 0;
2296 temp
.bitfield
.ymmword
= 0;
2297 temp
.bitfield
.zmmword
= 0;
2298 if (operand_type_all_zero (&temp
))
2301 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
)
2305 i
.error
= operand_type_mismatch
;
2309 /* If given types g0 and g1 are registers they must be of the same type
2310 unless the expected operand type register overlap is null.
2311 Some Intel syntax memory operand size checking also happens here. */
2314 operand_type_register_match (i386_operand_type g0
,
2315 i386_operand_type t0
,
2316 i386_operand_type g1
,
2317 i386_operand_type t1
)
2319 if (g0
.bitfield
.class != Reg
2320 && g0
.bitfield
.class != RegSIMD
2321 && (!operand_type_check (g0
, anymem
)
2322 || g0
.bitfield
.unspecified
2323 || (t0
.bitfield
.class != Reg
2324 && t0
.bitfield
.class != RegSIMD
)))
2327 if (g1
.bitfield
.class != Reg
2328 && g1
.bitfield
.class != RegSIMD
2329 && (!operand_type_check (g1
, anymem
)
2330 || g1
.bitfield
.unspecified
2331 || (t1
.bitfield
.class != Reg
2332 && t1
.bitfield
.class != RegSIMD
)))
2335 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2336 && g0
.bitfield
.word
== g1
.bitfield
.word
2337 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2338 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2339 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2340 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2341 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2344 if (!(t0
.bitfield
.byte
& t1
.bitfield
.byte
)
2345 && !(t0
.bitfield
.word
& t1
.bitfield
.word
)
2346 && !(t0
.bitfield
.dword
& t1
.bitfield
.dword
)
2347 && !(t0
.bitfield
.qword
& t1
.bitfield
.qword
)
2348 && !(t0
.bitfield
.xmmword
& t1
.bitfield
.xmmword
)
2349 && !(t0
.bitfield
.ymmword
& t1
.bitfield
.ymmword
)
2350 && !(t0
.bitfield
.zmmword
& t1
.bitfield
.zmmword
))
2353 i
.error
= register_type_mismatch
;
2358 static INLINE
unsigned int
2359 register_number (const reg_entry
*r
)
2361 unsigned int nr
= r
->reg_num
;
2363 if (r
->reg_flags
& RegRex
)
2366 if (r
->reg_flags
& RegVRex
)
2372 static INLINE
unsigned int
2373 mode_from_disp_size (i386_operand_type t
)
2375 if (t
.bitfield
.disp8
)
2377 else if (t
.bitfield
.disp16
2378 || t
.bitfield
.disp32
2379 || t
.bitfield
.disp32s
)
2386 fits_in_signed_byte (addressT num
)
2388 return num
+ 0x80 <= 0xff;
2392 fits_in_unsigned_byte (addressT num
)
2398 fits_in_unsigned_word (addressT num
)
2400 return num
<= 0xffff;
2404 fits_in_signed_word (addressT num
)
2406 return num
+ 0x8000 <= 0xffff;
2410 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2415 return num
+ 0x80000000 <= 0xffffffff;
2417 } /* fits_in_signed_long() */
2420 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2425 return num
<= 0xffffffff;
2427 } /* fits_in_unsigned_long() */
2430 fits_in_disp8 (offsetT num
)
2432 int shift
= i
.memshift
;
2438 mask
= (1 << shift
) - 1;
2440 /* Return 0 if NUM isn't properly aligned. */
2444 /* Check if NUM will fit in 8bit after shift. */
2445 return fits_in_signed_byte (num
>> shift
);
2449 fits_in_imm4 (offsetT num
)
2451 return (num
& 0xf) == num
;
2454 static i386_operand_type
2455 smallest_imm_type (offsetT num
)
2457 i386_operand_type t
;
2459 operand_type_set (&t
, 0);
2460 t
.bitfield
.imm64
= 1;
2462 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2464 /* This code is disabled on the 486 because all the Imm1 forms
2465 in the opcode table are slower on the i486. They're the
2466 versions with the implicitly specified single-position
2467 displacement, which has another syntax if you really want to
2469 t
.bitfield
.imm1
= 1;
2470 t
.bitfield
.imm8
= 1;
2471 t
.bitfield
.imm8s
= 1;
2472 t
.bitfield
.imm16
= 1;
2473 t
.bitfield
.imm32
= 1;
2474 t
.bitfield
.imm32s
= 1;
2476 else if (fits_in_signed_byte (num
))
2478 t
.bitfield
.imm8
= 1;
2479 t
.bitfield
.imm8s
= 1;
2480 t
.bitfield
.imm16
= 1;
2481 t
.bitfield
.imm32
= 1;
2482 t
.bitfield
.imm32s
= 1;
2484 else if (fits_in_unsigned_byte (num
))
2486 t
.bitfield
.imm8
= 1;
2487 t
.bitfield
.imm16
= 1;
2488 t
.bitfield
.imm32
= 1;
2489 t
.bitfield
.imm32s
= 1;
2491 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2493 t
.bitfield
.imm16
= 1;
2494 t
.bitfield
.imm32
= 1;
2495 t
.bitfield
.imm32s
= 1;
2497 else if (fits_in_signed_long (num
))
2499 t
.bitfield
.imm32
= 1;
2500 t
.bitfield
.imm32s
= 1;
2502 else if (fits_in_unsigned_long (num
))
2503 t
.bitfield
.imm32
= 1;
2509 offset_in_range (offsetT val
, int size
)
2515 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2516 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2517 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2519 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2525 /* If BFD64, sign extend val for 32bit address mode. */
2526 if (flag_code
!= CODE_64BIT
2527 || i
.prefix
[ADDR_PREFIX
])
2528 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2529 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2532 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2534 char buf1
[40], buf2
[40];
2536 sprint_value (buf1
, val
);
2537 sprint_value (buf2
, val
& mask
);
2538 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2553 a. PREFIX_EXIST if attempting to add a prefix where one from the
2554 same class already exists.
2555 b. PREFIX_LOCK if lock prefix is added.
2556 c. PREFIX_REP if rep/repne prefix is added.
2557 d. PREFIX_DS if ds prefix is added.
2558 e. PREFIX_OTHER if other prefix is added.
2561 static enum PREFIX_GROUP
2562 add_prefix (unsigned int prefix
)
2564 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2567 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2568 && flag_code
== CODE_64BIT
)
2570 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2571 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2572 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2573 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2584 case DS_PREFIX_OPCODE
:
2587 case CS_PREFIX_OPCODE
:
2588 case ES_PREFIX_OPCODE
:
2589 case FS_PREFIX_OPCODE
:
2590 case GS_PREFIX_OPCODE
:
2591 case SS_PREFIX_OPCODE
:
2595 case REPNE_PREFIX_OPCODE
:
2596 case REPE_PREFIX_OPCODE
:
2601 case LOCK_PREFIX_OPCODE
:
2610 case ADDR_PREFIX_OPCODE
:
2614 case DATA_PREFIX_OPCODE
:
2618 if (i
.prefix
[q
] != 0)
2626 i
.prefix
[q
] |= prefix
;
2629 as_bad (_("same type of prefix used twice"));
2635 update_code_flag (int value
, int check
)
2637 PRINTF_LIKE ((*as_error
));
2639 flag_code
= (enum flag_code
) value
;
2640 if (flag_code
== CODE_64BIT
)
2642 cpu_arch_flags
.bitfield
.cpu64
= 1;
2643 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2647 cpu_arch_flags
.bitfield
.cpu64
= 0;
2648 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2650 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2653 as_error
= as_fatal
;
2656 (*as_error
) (_("64bit mode not supported on `%s'."),
2657 cpu_arch_name
? cpu_arch_name
: default_arch
);
2659 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2662 as_error
= as_fatal
;
2665 (*as_error
) (_("32bit mode not supported on `%s'."),
2666 cpu_arch_name
? cpu_arch_name
: default_arch
);
2668 stackop_size
= '\0';
2672 set_code_flag (int value
)
2674 update_code_flag (value
, 0);
2678 set_16bit_gcc_code_flag (int new_code_flag
)
2680 flag_code
= (enum flag_code
) new_code_flag
;
2681 if (flag_code
!= CODE_16BIT
)
2683 cpu_arch_flags
.bitfield
.cpu64
= 0;
2684 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2685 stackop_size
= LONG_MNEM_SUFFIX
;
2689 set_intel_syntax (int syntax_flag
)
2691 /* Find out if register prefixing is specified. */
2692 int ask_naked_reg
= 0;
2695 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2698 int e
= get_symbol_name (&string
);
2700 if (strcmp (string
, "prefix") == 0)
2702 else if (strcmp (string
, "noprefix") == 0)
2705 as_bad (_("bad argument to syntax directive."));
2706 (void) restore_line_pointer (e
);
2708 demand_empty_rest_of_line ();
2710 intel_syntax
= syntax_flag
;
2712 if (ask_naked_reg
== 0)
2713 allow_naked_reg
= (intel_syntax
2714 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2716 allow_naked_reg
= (ask_naked_reg
< 0);
2718 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2720 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2721 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2722 register_prefix
= allow_naked_reg
? "" : "%";
2726 set_intel_mnemonic (int mnemonic_flag
)
2728 intel_mnemonic
= mnemonic_flag
;
2732 set_allow_index_reg (int flag
)
2734 allow_index_reg
= flag
;
2738 set_check (int what
)
2740 enum check_kind
*kind
;
2745 kind
= &operand_check
;
2756 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2759 int e
= get_symbol_name (&string
);
2761 if (strcmp (string
, "none") == 0)
2763 else if (strcmp (string
, "warning") == 0)
2764 *kind
= check_warning
;
2765 else if (strcmp (string
, "error") == 0)
2766 *kind
= check_error
;
2768 as_bad (_("bad argument to %s_check directive."), str
);
2769 (void) restore_line_pointer (e
);
2772 as_bad (_("missing argument for %s_check directive"), str
);
2774 demand_empty_rest_of_line ();
2778 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2779 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2781 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2782 static const char *arch
;
2784 /* Intel LIOM is only supported on ELF. */
2790 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2791 use default_arch. */
2792 arch
= cpu_arch_name
;
2794 arch
= default_arch
;
2797 /* If we are targeting Intel MCU, we must enable it. */
2798 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2799 || new_flag
.bitfield
.cpuiamcu
)
2802 /* If we are targeting Intel L1OM, we must enable it. */
2803 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2804 || new_flag
.bitfield
.cpul1om
)
2807 /* If we are targeting Intel K1OM, we must enable it. */
2808 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2809 || new_flag
.bitfield
.cpuk1om
)
2812 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2817 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2821 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2824 int e
= get_symbol_name (&string
);
2826 i386_cpu_flags flags
;
2828 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2830 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2832 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2836 cpu_arch_name
= cpu_arch
[j
].name
;
2837 cpu_sub_arch_name
= NULL
;
2838 cpu_arch_flags
= cpu_arch
[j
].flags
;
2839 if (flag_code
== CODE_64BIT
)
2841 cpu_arch_flags
.bitfield
.cpu64
= 1;
2842 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2846 cpu_arch_flags
.bitfield
.cpu64
= 0;
2847 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2849 cpu_arch_isa
= cpu_arch
[j
].type
;
2850 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2851 if (!cpu_arch_tune_set
)
2853 cpu_arch_tune
= cpu_arch_isa
;
2854 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2859 flags
= cpu_flags_or (cpu_arch_flags
,
2862 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2864 if (cpu_sub_arch_name
)
2866 char *name
= cpu_sub_arch_name
;
2867 cpu_sub_arch_name
= concat (name
,
2869 (const char *) NULL
);
2873 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2874 cpu_arch_flags
= flags
;
2875 cpu_arch_isa_flags
= flags
;
2879 = cpu_flags_or (cpu_arch_isa_flags
,
2881 (void) restore_line_pointer (e
);
2882 demand_empty_rest_of_line ();
2887 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2889 /* Disable an ISA extension. */
2890 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2891 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2893 flags
= cpu_flags_and_not (cpu_arch_flags
,
2894 cpu_noarch
[j
].flags
);
2895 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2897 if (cpu_sub_arch_name
)
2899 char *name
= cpu_sub_arch_name
;
2900 cpu_sub_arch_name
= concat (name
, string
,
2901 (const char *) NULL
);
2905 cpu_sub_arch_name
= xstrdup (string
);
2906 cpu_arch_flags
= flags
;
2907 cpu_arch_isa_flags
= flags
;
2909 (void) restore_line_pointer (e
);
2910 demand_empty_rest_of_line ();
2914 j
= ARRAY_SIZE (cpu_arch
);
2917 if (j
>= ARRAY_SIZE (cpu_arch
))
2918 as_bad (_("no such architecture: `%s'"), string
);
2920 *input_line_pointer
= e
;
2923 as_bad (_("missing cpu architecture"));
2925 no_cond_jump_promotion
= 0;
2926 if (*input_line_pointer
== ','
2927 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2932 ++input_line_pointer
;
2933 e
= get_symbol_name (&string
);
2935 if (strcmp (string
, "nojumps") == 0)
2936 no_cond_jump_promotion
= 1;
2937 else if (strcmp (string
, "jumps") == 0)
2940 as_bad (_("no such architecture modifier: `%s'"), string
);
2942 (void) restore_line_pointer (e
);
2945 demand_empty_rest_of_line ();
2948 enum bfd_architecture
2951 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2953 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2954 || flag_code
!= CODE_64BIT
)
2955 as_fatal (_("Intel L1OM is 64bit ELF only"));
2956 return bfd_arch_l1om
;
2958 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2960 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2961 || flag_code
!= CODE_64BIT
)
2962 as_fatal (_("Intel K1OM is 64bit ELF only"));
2963 return bfd_arch_k1om
;
2965 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2967 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2968 || flag_code
== CODE_64BIT
)
2969 as_fatal (_("Intel MCU is 32bit ELF only"));
2970 return bfd_arch_iamcu
;
2973 return bfd_arch_i386
;
2979 if (!strncmp (default_arch
, "x86_64", 6))
2981 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2983 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2984 || default_arch
[6] != '\0')
2985 as_fatal (_("Intel L1OM is 64bit ELF only"));
2986 return bfd_mach_l1om
;
2988 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2990 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2991 || default_arch
[6] != '\0')
2992 as_fatal (_("Intel K1OM is 64bit ELF only"));
2993 return bfd_mach_k1om
;
2995 else if (default_arch
[6] == '\0')
2996 return bfd_mach_x86_64
;
2998 return bfd_mach_x64_32
;
3000 else if (!strcmp (default_arch
, "i386")
3001 || !strcmp (default_arch
, "iamcu"))
3003 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
3005 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
3006 as_fatal (_("Intel MCU is 32bit ELF only"));
3007 return bfd_mach_i386_iamcu
;
3010 return bfd_mach_i386_i386
;
3013 as_fatal (_("unknown architecture"));
3019 const char *hash_err
;
3021 /* Support pseudo prefixes like {disp32}. */
3022 lex_type
['{'] = LEX_BEGIN_NAME
;
3024 /* Initialize op_hash hash table. */
3025 op_hash
= hash_new ();
3028 const insn_template
*optab
;
3029 templates
*core_optab
;
3031 /* Setup for loop. */
3033 core_optab
= XNEW (templates
);
3034 core_optab
->start
= optab
;
3039 if (optab
->name
== NULL
3040 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
3042 /* different name --> ship out current template list;
3043 add to hash table; & begin anew. */
3044 core_optab
->end
= optab
;
3045 hash_err
= hash_insert (op_hash
,
3047 (void *) core_optab
);
3050 as_fatal (_("can't hash %s: %s"),
3054 if (optab
->name
== NULL
)
3056 core_optab
= XNEW (templates
);
3057 core_optab
->start
= optab
;
3062 /* Initialize reg_hash hash table. */
3063 reg_hash
= hash_new ();
3065 const reg_entry
*regtab
;
3066 unsigned int regtab_size
= i386_regtab_size
;
3068 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
3070 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
3072 as_fatal (_("can't hash %s: %s"),
3078 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3083 for (c
= 0; c
< 256; c
++)
3088 mnemonic_chars
[c
] = c
;
3089 register_chars
[c
] = c
;
3090 operand_chars
[c
] = c
;
3092 else if (ISLOWER (c
))
3094 mnemonic_chars
[c
] = c
;
3095 register_chars
[c
] = c
;
3096 operand_chars
[c
] = c
;
3098 else if (ISUPPER (c
))
3100 mnemonic_chars
[c
] = TOLOWER (c
);
3101 register_chars
[c
] = mnemonic_chars
[c
];
3102 operand_chars
[c
] = c
;
3104 else if (c
== '{' || c
== '}')
3106 mnemonic_chars
[c
] = c
;
3107 operand_chars
[c
] = c
;
3110 if (ISALPHA (c
) || ISDIGIT (c
))
3111 identifier_chars
[c
] = c
;
3114 identifier_chars
[c
] = c
;
3115 operand_chars
[c
] = c
;
3120 identifier_chars
['@'] = '@';
3123 identifier_chars
['?'] = '?';
3124 operand_chars
['?'] = '?';
3126 digit_chars
['-'] = '-';
3127 mnemonic_chars
['_'] = '_';
3128 mnemonic_chars
['-'] = '-';
3129 mnemonic_chars
['.'] = '.';
3130 identifier_chars
['_'] = '_';
3131 identifier_chars
['.'] = '.';
3133 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
3134 operand_chars
[(unsigned char) *p
] = *p
;
3137 if (flag_code
== CODE_64BIT
)
3139 #if defined (OBJ_COFF) && defined (TE_PE)
3140 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3143 x86_dwarf2_return_column
= 16;
3145 x86_cie_data_alignment
= -8;
3149 x86_dwarf2_return_column
= 8;
3150 x86_cie_data_alignment
= -4;
3153 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3154 can be turned into BRANCH_PREFIX frag. */
3155 if (align_branch_prefix_size
> MAX_FUSED_JCC_PADDING_SIZE
)
3160 i386_print_statistics (FILE *file
)
3162 hash_print_statistics (file
, "i386 opcode", op_hash
);
3163 hash_print_statistics (file
, "i386 register", reg_hash
);
3168 /* Debugging routines for md_assemble. */
3169 static void pte (insn_template
*);
3170 static void pt (i386_operand_type
);
3171 static void pe (expressionS
*);
3172 static void ps (symbolS
*);
3175 pi (const char *line
, i386_insn
*x
)
3179 fprintf (stdout
, "%s: template ", line
);
3181 fprintf (stdout
, " address: base %s index %s scale %x\n",
3182 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3183 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3184 x
->log2_scale_factor
);
3185 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3186 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3187 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3188 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3189 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3190 (x
->rex
& REX_W
) != 0,
3191 (x
->rex
& REX_R
) != 0,
3192 (x
->rex
& REX_X
) != 0,
3193 (x
->rex
& REX_B
) != 0);
3194 for (j
= 0; j
< x
->operands
; j
++)
3196 fprintf (stdout
, " #%d: ", j
+ 1);
3198 fprintf (stdout
, "\n");
3199 if (x
->types
[j
].bitfield
.class == Reg
3200 || x
->types
[j
].bitfield
.class == RegMMX
3201 || x
->types
[j
].bitfield
.class == RegSIMD
3202 || x
->types
[j
].bitfield
.class == RegMask
3203 || x
->types
[j
].bitfield
.class == SReg
3204 || x
->types
[j
].bitfield
.class == RegCR
3205 || x
->types
[j
].bitfield
.class == RegDR
3206 || x
->types
[j
].bitfield
.class == RegTR
3207 || x
->types
[j
].bitfield
.class == RegBND
)
3208 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3209 if (operand_type_check (x
->types
[j
], imm
))
3211 if (operand_type_check (x
->types
[j
], disp
))
3212 pe (x
->op
[j
].disps
);
3217 pte (insn_template
*t
)
3220 fprintf (stdout
, " %d operands ", t
->operands
);
3221 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3222 if (t
->extension_opcode
!= None
)
3223 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3224 if (t
->opcode_modifier
.d
)
3225 fprintf (stdout
, "D");
3226 if (t
->opcode_modifier
.w
)
3227 fprintf (stdout
, "W");
3228 fprintf (stdout
, "\n");
3229 for (j
= 0; j
< t
->operands
; j
++)
3231 fprintf (stdout
, " #%d type ", j
+ 1);
3232 pt (t
->operand_types
[j
]);
3233 fprintf (stdout
, "\n");
3240 fprintf (stdout
, " operation %d\n", e
->X_op
);
3241 fprintf (stdout
, " add_number %ld (%lx)\n",
3242 (long) e
->X_add_number
, (long) e
->X_add_number
);
3243 if (e
->X_add_symbol
)
3245 fprintf (stdout
, " add_symbol ");
3246 ps (e
->X_add_symbol
);
3247 fprintf (stdout
, "\n");
3251 fprintf (stdout
, " op_symbol ");
3252 ps (e
->X_op_symbol
);
3253 fprintf (stdout
, "\n");
3260 fprintf (stdout
, "%s type %s%s",
3262 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3263 segment_name (S_GET_SEGMENT (s
)));
3266 static struct type_name
3268 i386_operand_type mask
;
3271 const type_names
[] =
3273 { OPERAND_TYPE_REG8
, "r8" },
3274 { OPERAND_TYPE_REG16
, "r16" },
3275 { OPERAND_TYPE_REG32
, "r32" },
3276 { OPERAND_TYPE_REG64
, "r64" },
3277 { OPERAND_TYPE_ACC8
, "acc8" },
3278 { OPERAND_TYPE_ACC16
, "acc16" },
3279 { OPERAND_TYPE_ACC32
, "acc32" },
3280 { OPERAND_TYPE_ACC64
, "acc64" },
3281 { OPERAND_TYPE_IMM8
, "i8" },
3282 { OPERAND_TYPE_IMM8
, "i8s" },
3283 { OPERAND_TYPE_IMM16
, "i16" },
3284 { OPERAND_TYPE_IMM32
, "i32" },
3285 { OPERAND_TYPE_IMM32S
, "i32s" },
3286 { OPERAND_TYPE_IMM64
, "i64" },
3287 { OPERAND_TYPE_IMM1
, "i1" },
3288 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3289 { OPERAND_TYPE_DISP8
, "d8" },
3290 { OPERAND_TYPE_DISP16
, "d16" },
3291 { OPERAND_TYPE_DISP32
, "d32" },
3292 { OPERAND_TYPE_DISP32S
, "d32s" },
3293 { OPERAND_TYPE_DISP64
, "d64" },
3294 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3295 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3296 { OPERAND_TYPE_CONTROL
, "control reg" },
3297 { OPERAND_TYPE_TEST
, "test reg" },
3298 { OPERAND_TYPE_DEBUG
, "debug reg" },
3299 { OPERAND_TYPE_FLOATREG
, "FReg" },
3300 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3301 { OPERAND_TYPE_SREG
, "SReg" },
3302 { OPERAND_TYPE_REGMMX
, "rMMX" },
3303 { OPERAND_TYPE_REGXMM
, "rXMM" },
3304 { OPERAND_TYPE_REGYMM
, "rYMM" },
3305 { OPERAND_TYPE_REGZMM
, "rZMM" },
3306 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3310 pt (i386_operand_type t
)
3313 i386_operand_type a
;
3315 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3317 a
= operand_type_and (t
, type_names
[j
].mask
);
3318 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3319 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3324 #endif /* DEBUG386 */
3326 static bfd_reloc_code_real_type
3327 reloc (unsigned int size
,
3330 bfd_reloc_code_real_type other
)
3332 if (other
!= NO_RELOC
)
3334 reloc_howto_type
*rel
;
3339 case BFD_RELOC_X86_64_GOT32
:
3340 return BFD_RELOC_X86_64_GOT64
;
3342 case BFD_RELOC_X86_64_GOTPLT64
:
3343 return BFD_RELOC_X86_64_GOTPLT64
;
3345 case BFD_RELOC_X86_64_PLTOFF64
:
3346 return BFD_RELOC_X86_64_PLTOFF64
;
3348 case BFD_RELOC_X86_64_GOTPC32
:
3349 other
= BFD_RELOC_X86_64_GOTPC64
;
3351 case BFD_RELOC_X86_64_GOTPCREL
:
3352 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3354 case BFD_RELOC_X86_64_TPOFF32
:
3355 other
= BFD_RELOC_X86_64_TPOFF64
;
3357 case BFD_RELOC_X86_64_DTPOFF32
:
3358 other
= BFD_RELOC_X86_64_DTPOFF64
;
3364 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3365 if (other
== BFD_RELOC_SIZE32
)
3368 other
= BFD_RELOC_SIZE64
;
3371 as_bad (_("there are no pc-relative size relocations"));
3377 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3378 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3381 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3383 as_bad (_("unknown relocation (%u)"), other
);
3384 else if (size
!= bfd_get_reloc_size (rel
))
3385 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3386 bfd_get_reloc_size (rel
),
3388 else if (pcrel
&& !rel
->pc_relative
)
3389 as_bad (_("non-pc-relative relocation for pc-relative field"));
3390 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3392 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3394 as_bad (_("relocated field and relocation type differ in signedness"));
3403 as_bad (_("there are no unsigned pc-relative relocations"));
3406 case 1: return BFD_RELOC_8_PCREL
;
3407 case 2: return BFD_RELOC_16_PCREL
;
3408 case 4: return BFD_RELOC_32_PCREL
;
3409 case 8: return BFD_RELOC_64_PCREL
;
3411 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3418 case 4: return BFD_RELOC_X86_64_32S
;
3423 case 1: return BFD_RELOC_8
;
3424 case 2: return BFD_RELOC_16
;
3425 case 4: return BFD_RELOC_32
;
3426 case 8: return BFD_RELOC_64
;
3428 as_bad (_("cannot do %s %u byte relocation"),
3429 sign
> 0 ? "signed" : "unsigned", size
);
3435 /* Here we decide which fixups can be adjusted to make them relative to
3436 the beginning of the section instead of the symbol. Basically we need
3437 to make sure that the dynamic relocations are done correctly, so in
3438 some cases we force the original symbol to be used. */
3441 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3443 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3447 /* Don't adjust pc-relative references to merge sections in 64-bit
3449 if (use_rela_relocations
3450 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3454 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3455 and changed later by validate_fix. */
3456 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3457 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3460 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3461 for size relocations. */
3462 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3463 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3464 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3465 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3466 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3467 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3468 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3469 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3470 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3471 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3472 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3473 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3474 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3475 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3476 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3477 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3478 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3479 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3480 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3481 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3482 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3483 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3484 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3485 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3486 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3487 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3488 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3489 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3490 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3491 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3492 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3499 intel_float_operand (const char *mnemonic
)
3501 /* Note that the value returned is meaningful only for opcodes with (memory)
3502 operands, hence the code here is free to improperly handle opcodes that
3503 have no operands (for better performance and smaller code). */
3505 if (mnemonic
[0] != 'f')
3506 return 0; /* non-math */
3508 switch (mnemonic
[1])
3510 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3511 the fs segment override prefix not currently handled because no
3512 call path can make opcodes without operands get here */
3514 return 2 /* integer op */;
3516 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3517 return 3; /* fldcw/fldenv */
3520 if (mnemonic
[2] != 'o' /* fnop */)
3521 return 3; /* non-waiting control op */
3524 if (mnemonic
[2] == 's')
3525 return 3; /* frstor/frstpm */
3528 if (mnemonic
[2] == 'a')
3529 return 3; /* fsave */
3530 if (mnemonic
[2] == 't')
3532 switch (mnemonic
[3])
3534 case 'c': /* fstcw */
3535 case 'd': /* fstdw */
3536 case 'e': /* fstenv */
3537 case 's': /* fsts[gw] */
3543 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3544 return 0; /* fxsave/fxrstor are not really math ops */
3551 /* Build the VEX prefix. */
3554 build_vex_prefix (const insn_template
*t
)
3556 unsigned int register_specifier
;
3557 unsigned int implied_prefix
;
3558 unsigned int vector_length
;
3561 /* Check register specifier. */
3562 if (i
.vex
.register_specifier
)
3564 register_specifier
=
3565 ~register_number (i
.vex
.register_specifier
) & 0xf;
3566 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3569 register_specifier
= 0xf;
3571 /* Use 2-byte VEX prefix by swapping destination and source operand
3572 if there are more than 1 register operand. */
3573 if (i
.reg_operands
> 1
3574 && i
.vec_encoding
!= vex_encoding_vex3
3575 && i
.dir_encoding
== dir_encoding_default
3576 && i
.operands
== i
.reg_operands
3577 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3578 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3579 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3582 unsigned int xchg
= i
.operands
- 1;
3583 union i386_op temp_op
;
3584 i386_operand_type temp_type
;
3586 temp_type
= i
.types
[xchg
];
3587 i
.types
[xchg
] = i
.types
[0];
3588 i
.types
[0] = temp_type
;
3589 temp_op
= i
.op
[xchg
];
3590 i
.op
[xchg
] = i
.op
[0];
3593 gas_assert (i
.rm
.mode
== 3);
3597 i
.rm
.regmem
= i
.rm
.reg
;
3600 if (i
.tm
.opcode_modifier
.d
)
3601 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3602 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3603 else /* Use the next insn. */
3607 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3608 are no memory operands and at least 3 register ones. */
3609 if (i
.reg_operands
>= 3
3610 && i
.vec_encoding
!= vex_encoding_vex3
3611 && i
.reg_operands
== i
.operands
- i
.imm_operands
3612 && i
.tm
.opcode_modifier
.vex
3613 && i
.tm
.opcode_modifier
.commutative
3614 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3616 && i
.vex
.register_specifier
3617 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3619 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3620 union i386_op temp_op
;
3621 i386_operand_type temp_type
;
3623 gas_assert (i
.tm
.opcode_modifier
.vexopcode
== VEX0F
);
3624 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3625 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3626 &i
.types
[i
.operands
- 3]));
3627 gas_assert (i
.rm
.mode
== 3);
3629 temp_type
= i
.types
[xchg
];
3630 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3631 i
.types
[xchg
+ 1] = temp_type
;
3632 temp_op
= i
.op
[xchg
];
3633 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3634 i
.op
[xchg
+ 1] = temp_op
;
3637 xchg
= i
.rm
.regmem
| 8;
3638 i
.rm
.regmem
= ~register_specifier
& 0xf;
3639 gas_assert (!(i
.rm
.regmem
& 8));
3640 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3641 register_specifier
= ~xchg
& 0xf;
3644 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3645 vector_length
= avxscalar
;
3646 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3652 /* Determine vector length from the last multi-length vector
3655 for (op
= t
->operands
; op
--;)
3656 if (t
->operand_types
[op
].bitfield
.xmmword
3657 && t
->operand_types
[op
].bitfield
.ymmword
3658 && i
.types
[op
].bitfield
.ymmword
)
3665 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3670 case DATA_PREFIX_OPCODE
:
3673 case REPE_PREFIX_OPCODE
:
3676 case REPNE_PREFIX_OPCODE
:
3683 /* Check the REX.W bit and VEXW. */
3684 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3685 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3686 else if (i
.tm
.opcode_modifier
.vexw
)
3687 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3689 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3691 /* Use 2-byte VEX prefix if possible. */
3693 && i
.vec_encoding
!= vex_encoding_vex3
3694 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3695 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3697 /* 2-byte VEX prefix. */
3701 i
.vex
.bytes
[0] = 0xc5;
3703 /* Check the REX.R bit. */
3704 r
= (i
.rex
& REX_R
) ? 0 : 1;
3705 i
.vex
.bytes
[1] = (r
<< 7
3706 | register_specifier
<< 3
3707 | vector_length
<< 2
3712 /* 3-byte VEX prefix. */
3717 switch (i
.tm
.opcode_modifier
.vexopcode
)
3721 i
.vex
.bytes
[0] = 0xc4;
3725 i
.vex
.bytes
[0] = 0xc4;
3729 i
.vex
.bytes
[0] = 0xc4;
3733 i
.vex
.bytes
[0] = 0x8f;
3737 i
.vex
.bytes
[0] = 0x8f;
3741 i
.vex
.bytes
[0] = 0x8f;
3747 /* The high 3 bits of the second VEX byte are 1's compliment
3748 of RXB bits from REX. */
3749 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3751 i
.vex
.bytes
[2] = (w
<< 7
3752 | register_specifier
<< 3
3753 | vector_length
<< 2
3758 static INLINE bfd_boolean
3759 is_evex_encoding (const insn_template
*t
)
3761 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3762 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3763 || t
->opcode_modifier
.sae
;
3766 static INLINE bfd_boolean
3767 is_any_vex_encoding (const insn_template
*t
)
3769 return t
->opcode_modifier
.vex
|| t
->opcode_modifier
.vexopcode
3770 || is_evex_encoding (t
);
3773 /* Build the EVEX prefix. */
3776 build_evex_prefix (void)
3778 unsigned int register_specifier
;
3779 unsigned int implied_prefix
;
3781 rex_byte vrex_used
= 0;
3783 /* Check register specifier. */
3784 if (i
.vex
.register_specifier
)
3786 gas_assert ((i
.vrex
& REX_X
) == 0);
3788 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3789 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3790 register_specifier
+= 8;
3791 /* The upper 16 registers are encoded in the fourth byte of the
3793 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3794 i
.vex
.bytes
[3] = 0x8;
3795 register_specifier
= ~register_specifier
& 0xf;
3799 register_specifier
= 0xf;
3801 /* Encode upper 16 vector index register in the fourth byte of
3803 if (!(i
.vrex
& REX_X
))
3804 i
.vex
.bytes
[3] = 0x8;
3809 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3814 case DATA_PREFIX_OPCODE
:
3817 case REPE_PREFIX_OPCODE
:
3820 case REPNE_PREFIX_OPCODE
:
3827 /* 4 byte EVEX prefix. */
3829 i
.vex
.bytes
[0] = 0x62;
3832 switch (i
.tm
.opcode_modifier
.vexopcode
)
3848 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3850 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3852 /* The fifth bit of the second EVEX byte is 1's compliment of the
3853 REX_R bit in VREX. */
3854 if (!(i
.vrex
& REX_R
))
3855 i
.vex
.bytes
[1] |= 0x10;
3859 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3861 /* When all operands are registers, the REX_X bit in REX is not
3862 used. We reuse it to encode the upper 16 registers, which is
3863 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3864 as 1's compliment. */
3865 if ((i
.vrex
& REX_B
))
3868 i
.vex
.bytes
[1] &= ~0x40;
3872 /* EVEX instructions shouldn't need the REX prefix. */
3873 i
.vrex
&= ~vrex_used
;
3874 gas_assert (i
.vrex
== 0);
3876 /* Check the REX.W bit and VEXW. */
3877 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3878 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3879 else if (i
.tm
.opcode_modifier
.vexw
)
3880 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3882 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3884 /* Encode the U bit. */
3885 implied_prefix
|= 0x4;
3887 /* The third byte of the EVEX prefix. */
3888 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3890 /* The fourth byte of the EVEX prefix. */
3891 /* The zeroing-masking bit. */
3892 if (i
.mask
&& i
.mask
->zeroing
)
3893 i
.vex
.bytes
[3] |= 0x80;
3895 /* Don't always set the broadcast bit if there is no RC. */
3898 /* Encode the vector length. */
3899 unsigned int vec_length
;
3901 if (!i
.tm
.opcode_modifier
.evex
3902 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3906 /* Determine vector length from the last multi-length vector
3909 for (op
= i
.operands
; op
--;)
3910 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3911 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3912 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3914 if (i
.types
[op
].bitfield
.zmmword
)
3916 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3919 else if (i
.types
[op
].bitfield
.ymmword
)
3921 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3924 else if (i
.types
[op
].bitfield
.xmmword
)
3926 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3929 else if (i
.broadcast
&& (int) op
== i
.broadcast
->operand
)
3931 switch (i
.broadcast
->bytes
)
3934 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3937 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3940 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3949 if (op
>= MAX_OPERANDS
)
3953 switch (i
.tm
.opcode_modifier
.evex
)
3955 case EVEXLIG
: /* LL' is ignored */
3956 vec_length
= evexlig
<< 5;
3959 vec_length
= 0 << 5;
3962 vec_length
= 1 << 5;
3965 vec_length
= 2 << 5;
3971 i
.vex
.bytes
[3] |= vec_length
;
3972 /* Encode the broadcast bit. */
3974 i
.vex
.bytes
[3] |= 0x10;
3978 if (i
.rounding
->type
!= saeonly
)
3979 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3981 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3984 if (i
.mask
&& i
.mask
->mask
)
3985 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3989 process_immext (void)
3993 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3994 which is coded in the same place as an 8-bit immediate field
3995 would be. Here we fake an 8-bit immediate operand from the
3996 opcode suffix stored in tm.extension_opcode.
3998 AVX instructions also use this encoding, for some of
3999 3 argument instructions. */
4001 gas_assert (i
.imm_operands
<= 1
4003 || (is_any_vex_encoding (&i
.tm
)
4004 && i
.operands
<= 4)));
4006 exp
= &im_expressions
[i
.imm_operands
++];
4007 i
.op
[i
.operands
].imms
= exp
;
4008 i
.types
[i
.operands
] = imm8
;
4010 exp
->X_op
= O_constant
;
4011 exp
->X_add_number
= i
.tm
.extension_opcode
;
4012 i
.tm
.extension_opcode
= None
;
4019 switch (i
.tm
.opcode_modifier
.hleprefixok
)
4024 as_bad (_("invalid instruction `%s' after `%s'"),
4025 i
.tm
.name
, i
.hle_prefix
);
4028 if (i
.prefix
[LOCK_PREFIX
])
4030 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
4034 case HLEPrefixRelease
:
4035 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
4037 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4041 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
4043 as_bad (_("memory destination needed for instruction `%s'"
4044 " after `xrelease'"), i
.tm
.name
);
4051 /* Try the shortest encoding by shortening operand size. */
4054 optimize_encoding (void)
4058 if (optimize_for_space
4059 && !is_any_vex_encoding (&i
.tm
)
4060 && i
.reg_operands
== 1
4061 && i
.imm_operands
== 1
4062 && !i
.types
[1].bitfield
.byte
4063 && i
.op
[0].imms
->X_op
== O_constant
4064 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4065 && (i
.tm
.base_opcode
== 0xa8
4066 || (i
.tm
.base_opcode
== 0xf6
4067 && i
.tm
.extension_opcode
== 0x0)))
4070 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4072 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
4073 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
4075 i
.types
[1].bitfield
.byte
= 1;
4076 /* Ignore the suffix. */
4078 /* Convert to byte registers. */
4079 if (i
.types
[1].bitfield
.word
)
4081 else if (i
.types
[1].bitfield
.dword
)
4085 if (!(i
.op
[1].regs
->reg_flags
& RegRex
) && base_regnum
< 4)
4090 else if (flag_code
== CODE_64BIT
4091 && !is_any_vex_encoding (&i
.tm
)
4092 && ((i
.types
[1].bitfield
.qword
4093 && i
.reg_operands
== 1
4094 && i
.imm_operands
== 1
4095 && i
.op
[0].imms
->X_op
== O_constant
4096 && ((i
.tm
.base_opcode
== 0xb8
4097 && i
.tm
.extension_opcode
== None
4098 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4099 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4100 && ((i
.tm
.base_opcode
== 0x24
4101 || i
.tm
.base_opcode
== 0xa8)
4102 || (i
.tm
.base_opcode
== 0x80
4103 && i
.tm
.extension_opcode
== 0x4)
4104 || ((i
.tm
.base_opcode
== 0xf6
4105 || (i
.tm
.base_opcode
| 1) == 0xc7)
4106 && i
.tm
.extension_opcode
== 0x0)))
4107 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4108 && i
.tm
.base_opcode
== 0x83
4109 && i
.tm
.extension_opcode
== 0x4)))
4110 || (i
.types
[0].bitfield
.qword
4111 && ((i
.reg_operands
== 2
4112 && i
.op
[0].regs
== i
.op
[1].regs
4113 && (i
.tm
.base_opcode
== 0x30
4114 || i
.tm
.base_opcode
== 0x28))
4115 || (i
.reg_operands
== 1
4117 && i
.tm
.base_opcode
== 0x30)))))
4120 andq $imm31, %r64 -> andl $imm31, %r32
4121 andq $imm7, %r64 -> andl $imm7, %r32
4122 testq $imm31, %r64 -> testl $imm31, %r32
4123 xorq %r64, %r64 -> xorl %r32, %r32
4124 subq %r64, %r64 -> subl %r32, %r32
4125 movq $imm31, %r64 -> movl $imm31, %r32
4126 movq $imm32, %r64 -> movl $imm32, %r32
4128 i
.tm
.opcode_modifier
.norex64
= 1;
4129 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4132 movq $imm31, %r64 -> movl $imm31, %r32
4133 movq $imm32, %r64 -> movl $imm32, %r32
4135 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4136 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4137 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4138 i
.types
[0].bitfield
.imm32
= 1;
4139 i
.types
[0].bitfield
.imm32s
= 0;
4140 i
.types
[0].bitfield
.imm64
= 0;
4141 i
.types
[1].bitfield
.dword
= 1;
4142 i
.types
[1].bitfield
.qword
= 0;
4143 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4146 movq $imm31, %r64 -> movl $imm31, %r32
4148 i
.tm
.base_opcode
= 0xb8;
4149 i
.tm
.extension_opcode
= None
;
4150 i
.tm
.opcode_modifier
.w
= 0;
4151 i
.tm
.opcode_modifier
.modrm
= 0;
4155 else if (optimize
> 1
4156 && !optimize_for_space
4157 && !is_any_vex_encoding (&i
.tm
)
4158 && i
.reg_operands
== 2
4159 && i
.op
[0].regs
== i
.op
[1].regs
4160 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4161 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4162 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4165 andb %rN, %rN -> testb %rN, %rN
4166 andw %rN, %rN -> testw %rN, %rN
4167 andq %rN, %rN -> testq %rN, %rN
4168 orb %rN, %rN -> testb %rN, %rN
4169 orw %rN, %rN -> testw %rN, %rN
4170 orq %rN, %rN -> testq %rN, %rN
4172 and outside of 64-bit mode
4174 andl %rN, %rN -> testl %rN, %rN
4175 orl %rN, %rN -> testl %rN, %rN
4177 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4179 else if (i
.reg_operands
== 3
4180 && i
.op
[0].regs
== i
.op
[1].regs
4181 && !i
.types
[2].bitfield
.xmmword
4182 && (i
.tm
.opcode_modifier
.vex
4183 || ((!i
.mask
|| i
.mask
->zeroing
)
4185 && is_evex_encoding (&i
.tm
)
4186 && (i
.vec_encoding
!= vex_encoding_evex
4187 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4188 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4189 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4190 && i
.types
[2].bitfield
.ymmword
))))
4191 && ((i
.tm
.base_opcode
== 0x55
4192 || i
.tm
.base_opcode
== 0x6655
4193 || i
.tm
.base_opcode
== 0x66df
4194 || i
.tm
.base_opcode
== 0x57
4195 || i
.tm
.base_opcode
== 0x6657
4196 || i
.tm
.base_opcode
== 0x66ef
4197 || i
.tm
.base_opcode
== 0x66f8
4198 || i
.tm
.base_opcode
== 0x66f9
4199 || i
.tm
.base_opcode
== 0x66fa
4200 || i
.tm
.base_opcode
== 0x66fb
4201 || i
.tm
.base_opcode
== 0x42
4202 || i
.tm
.base_opcode
== 0x6642
4203 || i
.tm
.base_opcode
== 0x47
4204 || i
.tm
.base_opcode
== 0x6647)
4205 && i
.tm
.extension_opcode
== None
))
4208 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4210 EVEX VOP %zmmM, %zmmM, %zmmN
4211 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4212 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4213 EVEX VOP %ymmM, %ymmM, %ymmN
4214 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4215 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4216 VEX VOP %ymmM, %ymmM, %ymmN
4217 -> VEX VOP %xmmM, %xmmM, %xmmN
4218 VOP, one of vpandn and vpxor:
4219 VEX VOP %ymmM, %ymmM, %ymmN
4220 -> VEX VOP %xmmM, %xmmM, %xmmN
4221 VOP, one of vpandnd and vpandnq:
4222 EVEX VOP %zmmM, %zmmM, %zmmN
4223 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4224 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4225 EVEX VOP %ymmM, %ymmM, %ymmN
4226 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4227 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4228 VOP, one of vpxord and vpxorq:
4229 EVEX VOP %zmmM, %zmmM, %zmmN
4230 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4231 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4232 EVEX VOP %ymmM, %ymmM, %ymmN
4233 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4234 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4235 VOP, one of kxord and kxorq:
4236 VEX VOP %kM, %kM, %kN
4237 -> VEX kxorw %kM, %kM, %kN
4238 VOP, one of kandnd and kandnq:
4239 VEX VOP %kM, %kM, %kN
4240 -> VEX kandnw %kM, %kM, %kN
4242 if (is_evex_encoding (&i
.tm
))
4244 if (i
.vec_encoding
!= vex_encoding_evex
)
4246 i
.tm
.opcode_modifier
.vex
= VEX128
;
4247 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4248 i
.tm
.opcode_modifier
.evex
= 0;
4250 else if (optimize
> 1)
4251 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4255 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4257 i
.tm
.base_opcode
&= 0xff;
4258 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4261 i
.tm
.opcode_modifier
.vex
= VEX128
;
4263 if (i
.tm
.opcode_modifier
.vex
)
4264 for (j
= 0; j
< 3; j
++)
4266 i
.types
[j
].bitfield
.xmmword
= 1;
4267 i
.types
[j
].bitfield
.ymmword
= 0;
4270 else if (i
.vec_encoding
!= vex_encoding_evex
4271 && !i
.types
[0].bitfield
.zmmword
4272 && !i
.types
[1].bitfield
.zmmword
4275 && is_evex_encoding (&i
.tm
)
4276 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x666f
4277 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf36f
4278 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f
4279 || (i
.tm
.base_opcode
& ~4) == 0x66db
4280 || (i
.tm
.base_opcode
& ~4) == 0x66eb)
4281 && i
.tm
.extension_opcode
== None
)
4284 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4285 vmovdqu32 and vmovdqu64:
4286 EVEX VOP %xmmM, %xmmN
4287 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4288 EVEX VOP %ymmM, %ymmN
4289 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4291 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4293 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4295 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4297 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4298 VOP, one of vpand, vpandn, vpor, vpxor:
4299 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4300 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4301 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4302 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4303 EVEX VOP{d,q} mem, %xmmM, %xmmN
4304 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4305 EVEX VOP{d,q} mem, %ymmM, %ymmN
4306 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4308 for (j
= 0; j
< i
.operands
; j
++)
4309 if (operand_type_check (i
.types
[j
], disp
)
4310 && i
.op
[j
].disps
->X_op
== O_constant
)
4312 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4313 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4314 bytes, we choose EVEX Disp8 over VEX Disp32. */
4315 int evex_disp8
, vex_disp8
;
4316 unsigned int memshift
= i
.memshift
;
4317 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4319 evex_disp8
= fits_in_disp8 (n
);
4321 vex_disp8
= fits_in_disp8 (n
);
4322 if (evex_disp8
!= vex_disp8
)
4324 i
.memshift
= memshift
;
4328 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4331 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f)
4332 i
.tm
.base_opcode
^= 0xf36f ^ 0xf26f;
4333 i
.tm
.opcode_modifier
.vex
4334 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4335 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4336 /* VPAND, VPOR, and VPXOR are commutative. */
4337 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0x66df)
4338 i
.tm
.opcode_modifier
.commutative
= 1;
4339 i
.tm
.opcode_modifier
.evex
= 0;
4340 i
.tm
.opcode_modifier
.masking
= 0;
4341 i
.tm
.opcode_modifier
.broadcast
= 0;
4342 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4345 i
.types
[j
].bitfield
.disp8
4346 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4350 /* Return non-zero for load instruction. */
4356 int any_vex_p
= is_any_vex_encoding (&i
.tm
);
4357 unsigned int base_opcode
= i
.tm
.base_opcode
| 1;
4361 /* Anysize insns: lea, invlpg, clflush, prefetchnta, prefetcht0,
4362 prefetcht1, prefetcht2, prefetchtw, bndmk, bndcl, bndcu, bndcn,
4363 bndstx, bndldx, prefetchwt1, clflushopt, clwb, cldemote. */
4364 if (i
.tm
.opcode_modifier
.anysize
)
4367 /* pop, popf, popa. */
4368 if (strcmp (i
.tm
.name
, "pop") == 0
4369 || i
.tm
.base_opcode
== 0x9d
4370 || i
.tm
.base_opcode
== 0x61)
4373 /* movs, cmps, lods, scas. */
4374 if ((i
.tm
.base_opcode
| 0xb) == 0xaf)
4378 if (base_opcode
== 0x6f
4379 || i
.tm
.base_opcode
== 0xd7)
4381 /* NB: For AMD-specific insns with implicit memory operands,
4382 they're intentionally not covered. */
4385 /* No memory operand. */
4386 if (!i
.mem_operands
)
4392 if (i
.tm
.base_opcode
== 0xae
4393 && i
.tm
.opcode_modifier
.vex
4394 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
4395 && i
.tm
.extension_opcode
== 2)
4400 /* test, not, neg, mul, imul, div, idiv. */
4401 if ((i
.tm
.base_opcode
== 0xf6 || i
.tm
.base_opcode
== 0xf7)
4402 && i
.tm
.extension_opcode
!= 1)
4406 if (base_opcode
== 0xff && i
.tm
.extension_opcode
<= 1)
4409 /* add, or, adc, sbb, and, sub, xor, cmp. */
4410 if (i
.tm
.base_opcode
>= 0x80 && i
.tm
.base_opcode
<= 0x83)
4413 /* bt, bts, btr, btc. */
4414 if (i
.tm
.base_opcode
== 0xfba
4415 && (i
.tm
.extension_opcode
>= 4 && i
.tm
.extension_opcode
<= 7))
4418 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4419 if ((base_opcode
== 0xc1
4420 || (i
.tm
.base_opcode
>= 0xd0 && i
.tm
.base_opcode
<= 0xd3))
4421 && i
.tm
.extension_opcode
!= 6)
4424 /* cmpxchg8b, cmpxchg16b, xrstors. */
4425 if (i
.tm
.base_opcode
== 0xfc7
4426 && (i
.tm
.extension_opcode
== 1 || i
.tm
.extension_opcode
== 3))
4429 /* fxrstor, ldmxcsr, xrstor. */
4430 if (i
.tm
.base_opcode
== 0xfae
4431 && (i
.tm
.extension_opcode
== 1
4432 || i
.tm
.extension_opcode
== 2
4433 || i
.tm
.extension_opcode
== 5))
4436 /* lgdt, lidt, lmsw. */
4437 if (i
.tm
.base_opcode
== 0xf01
4438 && (i
.tm
.extension_opcode
== 2
4439 || i
.tm
.extension_opcode
== 3
4440 || i
.tm
.extension_opcode
== 6))
4444 if (i
.tm
.base_opcode
== 0xfc7
4445 && i
.tm
.extension_opcode
== 6)
4448 /* Check for x87 instructions. */
4449 if (i
.tm
.base_opcode
>= 0xd8 && i
.tm
.base_opcode
<= 0xdf)
4451 /* Skip fst, fstp, fstenv, fstcw. */
4452 if (i
.tm
.base_opcode
== 0xd9
4453 && (i
.tm
.extension_opcode
== 2
4454 || i
.tm
.extension_opcode
== 3
4455 || i
.tm
.extension_opcode
== 6
4456 || i
.tm
.extension_opcode
== 7))
4459 /* Skip fisttp, fist, fistp, fstp. */
4460 if (i
.tm
.base_opcode
== 0xdb
4461 && (i
.tm
.extension_opcode
== 1
4462 || i
.tm
.extension_opcode
== 2
4463 || i
.tm
.extension_opcode
== 3
4464 || i
.tm
.extension_opcode
== 7))
4467 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4468 if (i
.tm
.base_opcode
== 0xdd
4469 && (i
.tm
.extension_opcode
== 1
4470 || i
.tm
.extension_opcode
== 2
4471 || i
.tm
.extension_opcode
== 3
4472 || i
.tm
.extension_opcode
== 6
4473 || i
.tm
.extension_opcode
== 7))
4476 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4477 if (i
.tm
.base_opcode
== 0xdf
4478 && (i
.tm
.extension_opcode
== 1
4479 || i
.tm
.extension_opcode
== 2
4480 || i
.tm
.extension_opcode
== 3
4481 || i
.tm
.extension_opcode
== 6
4482 || i
.tm
.extension_opcode
== 7))
4489 dest
= i
.operands
- 1;
4491 /* Check fake imm8 operand and 3 source operands. */
4492 if ((i
.tm
.opcode_modifier
.immext
4493 || i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
4494 && i
.types
[dest
].bitfield
.imm8
)
4497 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg, xadd */
4499 && (base_opcode
== 0x1
4500 || base_opcode
== 0x9
4501 || base_opcode
== 0x11
4502 || base_opcode
== 0x19
4503 || base_opcode
== 0x21
4504 || base_opcode
== 0x29
4505 || base_opcode
== 0x31
4506 || base_opcode
== 0x39
4507 || (i
.tm
.base_opcode
>= 0x84 && i
.tm
.base_opcode
<= 0x87)
4508 || base_opcode
== 0xfc1))
4511 /* Check for load instruction. */
4512 return (i
.types
[dest
].bitfield
.class != ClassNone
4513 || i
.types
[dest
].bitfield
.instance
== Accum
);
4516 /* Output lfence, 0xfaee8, after instruction. */
4519 insert_lfence_after (void)
4521 if (lfence_after_load
&& load_insn_p ())
4523 /* There are also two REP string instructions that require
4524 special treatment. Specifically, the compare string (CMPS)
4525 and scan string (SCAS) instructions set EFLAGS in a manner
4526 that depends on the data being compared/scanned. When used
4527 with a REP prefix, the number of iterations may therefore
4528 vary depending on this data. If the data is a program secret
4529 chosen by the adversary using an LVI method,
4530 then this data-dependent behavior may leak some aspect
4532 if (((i
.tm
.base_opcode
| 0x1) == 0xa7
4533 || (i
.tm
.base_opcode
| 0x1) == 0xaf)
4534 && i
.prefix
[REP_PREFIX
])
4536 as_warn (_("`%s` changes flags which would affect control flow behavior"),
4539 char *p
= frag_more (3);
4546 /* Output lfence, 0xfaee8, before instruction. */
4549 insert_lfence_before (void)
4553 if (is_any_vex_encoding (&i
.tm
))
4556 if (i
.tm
.base_opcode
== 0xff
4557 && (i
.tm
.extension_opcode
== 2 || i
.tm
.extension_opcode
== 4))
4559 /* Insert lfence before indirect branch if needed. */
4561 if (lfence_before_indirect_branch
== lfence_branch_none
)
4564 if (i
.operands
!= 1)
4567 if (i
.reg_operands
== 1)
4569 /* Indirect branch via register. Don't insert lfence with
4570 -mlfence-after-load=yes. */
4571 if (lfence_after_load
4572 || lfence_before_indirect_branch
== lfence_branch_memory
)
4575 else if (i
.mem_operands
== 1
4576 && lfence_before_indirect_branch
!= lfence_branch_register
)
4578 as_warn (_("indirect `%s` with memory operand should be avoided"),
4585 if (last_insn
.kind
!= last_insn_other
4586 && last_insn
.seg
== now_seg
)
4588 as_warn_where (last_insn
.file
, last_insn
.line
,
4589 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4590 last_insn
.name
, i
.tm
.name
);
4601 /* Output or/not/shl and lfence before near ret. */
4602 if (lfence_before_ret
!= lfence_before_ret_none
4603 && (i
.tm
.base_opcode
== 0xc2
4604 || i
.tm
.base_opcode
== 0xc3))
4606 if (last_insn
.kind
!= last_insn_other
4607 && last_insn
.seg
== now_seg
)
4609 as_warn_where (last_insn
.file
, last_insn
.line
,
4610 _("`%s` skips -mlfence-before-ret on `%s`"),
4611 last_insn
.name
, i
.tm
.name
);
4615 /* Near ret ingore operand size override under CPU64. */
4616 char prefix
= flag_code
== CODE_64BIT
4618 : i
.prefix
[DATA_PREFIX
] ? 0x66 : 0x0;
4620 if (lfence_before_ret
== lfence_before_ret_not
)
4622 /* not: 0xf71424, may add prefix
4623 for operand size override or 64-bit code. */
4624 p
= frag_more ((prefix
? 2 : 0) + 6 + 3);
4638 p
= frag_more ((prefix
? 1 : 0) + 4 + 3);
4641 if (lfence_before_ret
== lfence_before_ret_or
)
4643 /* or: 0x830c2400, may add prefix
4644 for operand size override or 64-bit code. */
4650 /* shl: 0xc1242400, may add prefix
4651 for operand size override or 64-bit code. */
4666 /* This is the guts of the machine-dependent assembler. LINE points to a
4667 machine dependent instruction. This function is supposed to emit
4668 the frags/bytes it assembles to. */
4671 md_assemble (char *line
)
4674 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4675 const insn_template
*t
;
4677 /* Initialize globals. */
4678 memset (&i
, '\0', sizeof (i
));
4679 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4680 i
.reloc
[j
] = NO_RELOC
;
4681 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4682 memset (im_expressions
, '\0', sizeof (im_expressions
));
4683 save_stack_p
= save_stack
;
4685 /* First parse an instruction mnemonic & call i386_operand for the operands.
4686 We assume that the scrubber has arranged it so that line[0] is the valid
4687 start of a (possibly prefixed) mnemonic. */
4689 line
= parse_insn (line
, mnemonic
);
4692 mnem_suffix
= i
.suffix
;
4694 line
= parse_operands (line
, mnemonic
);
4696 xfree (i
.memop1_string
);
4697 i
.memop1_string
= NULL
;
4701 /* Now we've parsed the mnemonic into a set of templates, and have the
4702 operands at hand. */
4704 /* All Intel opcodes have reversed operands except for "bound", "enter",
4705 "monitor*", "mwait*", "tpause", and "umwait". We also don't reverse
4706 intersegment "jmp" and "call" instructions with 2 immediate operands so
4707 that the immediate segment precedes the offset, as it does when in AT&T
4711 && (strcmp (mnemonic
, "bound") != 0)
4712 && (strcmp (mnemonic
, "invlpga") != 0)
4713 && (strncmp (mnemonic
, "monitor", 7) != 0)
4714 && (strncmp (mnemonic
, "mwait", 5) != 0)
4715 && (strcmp (mnemonic
, "tpause") != 0)
4716 && (strcmp (mnemonic
, "umwait") != 0)
4717 && !(operand_type_check (i
.types
[0], imm
)
4718 && operand_type_check (i
.types
[1], imm
)))
4721 /* The order of the immediates should be reversed
4722 for 2 immediates extrq and insertq instructions */
4723 if (i
.imm_operands
== 2
4724 && (strcmp (mnemonic
, "extrq") == 0
4725 || strcmp (mnemonic
, "insertq") == 0))
4726 swap_2_operands (0, 1);
4731 /* Don't optimize displacement for movabs since it only takes 64bit
4734 && i
.disp_encoding
!= disp_encoding_32bit
4735 && (flag_code
!= CODE_64BIT
4736 || strcmp (mnemonic
, "movabs") != 0))
4739 /* Next, we find a template that matches the given insn,
4740 making sure the overlap of the given operands types is consistent
4741 with the template operand types. */
4743 if (!(t
= match_template (mnem_suffix
)))
4746 if (sse_check
!= check_none
4747 && !i
.tm
.opcode_modifier
.noavx
4748 && !i
.tm
.cpu_flags
.bitfield
.cpuavx
4749 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
4750 && (i
.tm
.cpu_flags
.bitfield
.cpusse
4751 || i
.tm
.cpu_flags
.bitfield
.cpusse2
4752 || i
.tm
.cpu_flags
.bitfield
.cpusse3
4753 || i
.tm
.cpu_flags
.bitfield
.cpussse3
4754 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
4755 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
4756 || i
.tm
.cpu_flags
.bitfield
.cpusse4a
4757 || i
.tm
.cpu_flags
.bitfield
.cpupclmul
4758 || i
.tm
.cpu_flags
.bitfield
.cpuaes
4759 || i
.tm
.cpu_flags
.bitfield
.cpusha
4760 || i
.tm
.cpu_flags
.bitfield
.cpugfni
))
4762 (sse_check
== check_warning
4764 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4767 if (i
.tm
.opcode_modifier
.fwait
)
4768 if (!add_prefix (FWAIT_OPCODE
))
4771 /* Check if REP prefix is OK. */
4772 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
4774 as_bad (_("invalid instruction `%s' after `%s'"),
4775 i
.tm
.name
, i
.rep_prefix
);
4779 /* Check for lock without a lockable instruction. Destination operand
4780 must be memory unless it is xchg (0x86). */
4781 if (i
.prefix
[LOCK_PREFIX
]
4782 && (!i
.tm
.opcode_modifier
.islockable
4783 || i
.mem_operands
== 0
4784 || (i
.tm
.base_opcode
!= 0x86
4785 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
4787 as_bad (_("expecting lockable instruction after `lock'"));
4791 /* Check for data size prefix on VEX/XOP/EVEX encoded insns. */
4792 if (i
.prefix
[DATA_PREFIX
] && is_any_vex_encoding (&i
.tm
))
4794 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
4798 /* Check if HLE prefix is OK. */
4799 if (i
.hle_prefix
&& !check_hle ())
4802 /* Check BND prefix. */
4803 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
4804 as_bad (_("expecting valid branch instruction after `bnd'"));
4806 /* Check NOTRACK prefix. */
4807 if (i
.notrack_prefix
&& !i
.tm
.opcode_modifier
.notrackprefixok
)
4808 as_bad (_("expecting indirect branch instruction after `notrack'"));
4810 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
4812 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4813 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4814 else if (flag_code
!= CODE_16BIT
4815 ? i
.prefix
[ADDR_PREFIX
]
4816 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
4817 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4820 /* Insert BND prefix. */
4821 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
4823 if (!i
.prefix
[BND_PREFIX
])
4824 add_prefix (BND_PREFIX_OPCODE
);
4825 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
4827 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4828 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
4832 /* Check string instruction segment overrides. */
4833 if (i
.tm
.opcode_modifier
.isstring
>= IS_STRING_ES_OP0
)
4835 gas_assert (i
.mem_operands
);
4836 if (!check_string ())
4838 i
.disp_operands
= 0;
4841 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
4842 optimize_encoding ();
4844 if (!process_suffix ())
4847 /* Update operand types. */
4848 for (j
= 0; j
< i
.operands
; j
++)
4849 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
4851 /* Make still unresolved immediate matches conform to size of immediate
4852 given in i.suffix. */
4853 if (!finalize_imm ())
4856 if (i
.types
[0].bitfield
.imm1
)
4857 i
.imm_operands
= 0; /* kludge for shift insns. */
4859 /* We only need to check those implicit registers for instructions
4860 with 3 operands or less. */
4861 if (i
.operands
<= 3)
4862 for (j
= 0; j
< i
.operands
; j
++)
4863 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
4864 && !i
.types
[j
].bitfield
.xmmword
)
4867 /* ImmExt should be processed after SSE2AVX. */
4868 if (!i
.tm
.opcode_modifier
.sse2avx
4869 && i
.tm
.opcode_modifier
.immext
)
4872 /* For insns with operands there are more diddles to do to the opcode. */
4875 if (!process_operands ())
4878 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
4880 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4881 as_warn (_("translating to `%sp'"), i
.tm
.name
);
4884 if (is_any_vex_encoding (&i
.tm
))
4886 if (!cpu_arch_flags
.bitfield
.cpui286
)
4888 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
4893 if (i
.tm
.opcode_modifier
.vex
)
4894 build_vex_prefix (t
);
4896 build_evex_prefix ();
4899 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4900 instructions may define INT_OPCODE as well, so avoid this corner
4901 case for those instructions that use MODRM. */
4902 if (i
.tm
.base_opcode
== INT_OPCODE
4903 && !i
.tm
.opcode_modifier
.modrm
4904 && i
.op
[0].imms
->X_add_number
== 3)
4906 i
.tm
.base_opcode
= INT3_OPCODE
;
4910 if ((i
.tm
.opcode_modifier
.jump
== JUMP
4911 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
4912 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
4913 && i
.op
[0].disps
->X_op
== O_constant
)
4915 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4916 the absolute address given by the constant. Since ix86 jumps and
4917 calls are pc relative, we need to generate a reloc. */
4918 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
4919 i
.op
[0].disps
->X_op
= O_symbol
;
4922 /* For 8 bit registers we need an empty rex prefix. Also if the
4923 instruction already has a prefix, we need to convert old
4924 registers to new ones. */
4926 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
4927 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
4928 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
4929 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
4930 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
4931 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
4936 i
.rex
|= REX_OPCODE
;
4937 for (x
= 0; x
< 2; x
++)
4939 /* Look for 8 bit operand that uses old registers. */
4940 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
4941 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
4943 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
4944 /* In case it is "hi" register, give up. */
4945 if (i
.op
[x
].regs
->reg_num
> 3)
4946 as_bad (_("can't encode register '%s%s' in an "
4947 "instruction requiring REX prefix."),
4948 register_prefix
, i
.op
[x
].regs
->reg_name
);
4950 /* Otherwise it is equivalent to the extended register.
4951 Since the encoding doesn't change this is merely
4952 cosmetic cleanup for debug output. */
4954 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
4959 if (i
.rex
== 0 && i
.rex_encoding
)
4961 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4962 that uses legacy register. If it is "hi" register, don't add
4963 the REX_OPCODE byte. */
4965 for (x
= 0; x
< 2; x
++)
4966 if (i
.types
[x
].bitfield
.class == Reg
4967 && i
.types
[x
].bitfield
.byte
4968 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
4969 && i
.op
[x
].regs
->reg_num
> 3)
4971 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
4972 i
.rex_encoding
= FALSE
;
4981 add_prefix (REX_OPCODE
| i
.rex
);
4983 insert_lfence_before ();
4985 /* We are ready to output the insn. */
4988 insert_lfence_after ();
4990 last_insn
.seg
= now_seg
;
4992 if (i
.tm
.opcode_modifier
.isprefix
)
4994 last_insn
.kind
= last_insn_prefix
;
4995 last_insn
.name
= i
.tm
.name
;
4996 last_insn
.file
= as_where (&last_insn
.line
);
4999 last_insn
.kind
= last_insn_other
;
5003 parse_insn (char *line
, char *mnemonic
)
5006 char *token_start
= l
;
5009 const insn_template
*t
;
5015 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
5020 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
5022 as_bad (_("no such instruction: `%s'"), token_start
);
5027 if (!is_space_char (*l
)
5028 && *l
!= END_OF_INSN
5030 || (*l
!= PREFIX_SEPARATOR
5033 as_bad (_("invalid character %s in mnemonic"),
5034 output_invalid (*l
));
5037 if (token_start
== l
)
5039 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
5040 as_bad (_("expecting prefix; got nothing"));
5042 as_bad (_("expecting mnemonic; got nothing"));
5046 /* Look up instruction (or prefix) via hash table. */
5047 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
5049 if (*l
!= END_OF_INSN
5050 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
5051 && current_templates
5052 && current_templates
->start
->opcode_modifier
.isprefix
)
5054 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
5056 as_bad ((flag_code
!= CODE_64BIT
5057 ? _("`%s' is only supported in 64-bit mode")
5058 : _("`%s' is not supported in 64-bit mode")),
5059 current_templates
->start
->name
);
5062 /* If we are in 16-bit mode, do not allow addr16 or data16.
5063 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5064 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
5065 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5066 && flag_code
!= CODE_64BIT
5067 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5068 ^ (flag_code
== CODE_16BIT
)))
5070 as_bad (_("redundant %s prefix"),
5071 current_templates
->start
->name
);
5074 if (current_templates
->start
->opcode_length
== 0)
5076 /* Handle pseudo prefixes. */
5077 switch (current_templates
->start
->base_opcode
)
5081 i
.disp_encoding
= disp_encoding_8bit
;
5085 i
.disp_encoding
= disp_encoding_32bit
;
5089 i
.dir_encoding
= dir_encoding_load
;
5093 i
.dir_encoding
= dir_encoding_store
;
5097 i
.vec_encoding
= vex_encoding_vex
;
5101 i
.vec_encoding
= vex_encoding_vex3
;
5105 i
.vec_encoding
= vex_encoding_evex
;
5109 i
.rex_encoding
= TRUE
;
5113 i
.no_optimize
= TRUE
;
5121 /* Add prefix, checking for repeated prefixes. */
5122 switch (add_prefix (current_templates
->start
->base_opcode
))
5127 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
5128 i
.notrack_prefix
= current_templates
->start
->name
;
5131 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
5132 i
.hle_prefix
= current_templates
->start
->name
;
5133 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
5134 i
.bnd_prefix
= current_templates
->start
->name
;
5136 i
.rep_prefix
= current_templates
->start
->name
;
5142 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5149 if (!current_templates
)
5151 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5152 Check if we should swap operand or force 32bit displacement in
5154 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
5155 i
.dir_encoding
= dir_encoding_swap
;
5156 else if (mnem_p
- 3 == dot_p
5159 i
.disp_encoding
= disp_encoding_8bit
;
5160 else if (mnem_p
- 4 == dot_p
5164 i
.disp_encoding
= disp_encoding_32bit
;
5169 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
5172 if (!current_templates
)
5175 if (mnem_p
> mnemonic
)
5177 /* See if we can get a match by trimming off a suffix. */
5180 case WORD_MNEM_SUFFIX
:
5181 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
5182 i
.suffix
= SHORT_MNEM_SUFFIX
;
5185 case BYTE_MNEM_SUFFIX
:
5186 case QWORD_MNEM_SUFFIX
:
5187 i
.suffix
= mnem_p
[-1];
5189 current_templates
= (const templates
*) hash_find (op_hash
,
5192 case SHORT_MNEM_SUFFIX
:
5193 case LONG_MNEM_SUFFIX
:
5196 i
.suffix
= mnem_p
[-1];
5198 current_templates
= (const templates
*) hash_find (op_hash
,
5207 if (intel_float_operand (mnemonic
) == 1)
5208 i
.suffix
= SHORT_MNEM_SUFFIX
;
5210 i
.suffix
= LONG_MNEM_SUFFIX
;
5212 current_templates
= (const templates
*) hash_find (op_hash
,
5219 if (!current_templates
)
5221 as_bad (_("no such instruction: `%s'"), token_start
);
5226 if (current_templates
->start
->opcode_modifier
.jump
== JUMP
5227 || current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
5229 /* Check for a branch hint. We allow ",pt" and ",pn" for
5230 predict taken and predict not taken respectively.
5231 I'm not sure that branch hints actually do anything on loop
5232 and jcxz insns (JumpByte) for current Pentium4 chips. They
5233 may work in the future and it doesn't hurt to accept them
5235 if (l
[0] == ',' && l
[1] == 'p')
5239 if (!add_prefix (DS_PREFIX_OPCODE
))
5243 else if (l
[2] == 'n')
5245 if (!add_prefix (CS_PREFIX_OPCODE
))
5251 /* Any other comma loses. */
5254 as_bad (_("invalid character %s in mnemonic"),
5255 output_invalid (*l
));
5259 /* Check if instruction is supported on specified architecture. */
5261 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
5263 supported
|= cpu_flags_match (t
);
5264 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
5266 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
5267 as_warn (_("use .code16 to ensure correct addressing mode"));
5273 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
5274 as_bad (flag_code
== CODE_64BIT
5275 ? _("`%s' is not supported in 64-bit mode")
5276 : _("`%s' is only supported in 64-bit mode"),
5277 current_templates
->start
->name
);
5279 as_bad (_("`%s' is not supported on `%s%s'"),
5280 current_templates
->start
->name
,
5281 cpu_arch_name
? cpu_arch_name
: default_arch
,
5282 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
5288 parse_operands (char *l
, const char *mnemonic
)
5292 /* 1 if operand is pending after ','. */
5293 unsigned int expecting_operand
= 0;
5295 /* Non-zero if operand parens not balanced. */
5296 unsigned int paren_not_balanced
;
5298 while (*l
!= END_OF_INSN
)
5300 /* Skip optional white space before operand. */
5301 if (is_space_char (*l
))
5303 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
5305 as_bad (_("invalid character %s before operand %d"),
5306 output_invalid (*l
),
5310 token_start
= l
; /* After white space. */
5311 paren_not_balanced
= 0;
5312 while (paren_not_balanced
|| *l
!= ',')
5314 if (*l
== END_OF_INSN
)
5316 if (paren_not_balanced
)
5319 as_bad (_("unbalanced parenthesis in operand %d."),
5322 as_bad (_("unbalanced brackets in operand %d."),
5327 break; /* we are done */
5329 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
5331 as_bad (_("invalid character %s in operand %d"),
5332 output_invalid (*l
),
5339 ++paren_not_balanced
;
5341 --paren_not_balanced
;
5346 ++paren_not_balanced
;
5348 --paren_not_balanced
;
5352 if (l
!= token_start
)
5353 { /* Yes, we've read in another operand. */
5354 unsigned int operand_ok
;
5355 this_operand
= i
.operands
++;
5356 if (i
.operands
> MAX_OPERANDS
)
5358 as_bad (_("spurious operands; (%d operands/instruction max)"),
5362 i
.types
[this_operand
].bitfield
.unspecified
= 1;
5363 /* Now parse operand adding info to 'i' as we go along. */
5364 END_STRING_AND_SAVE (l
);
5366 if (i
.mem_operands
> 1)
5368 as_bad (_("too many memory references for `%s'"),
5375 i386_intel_operand (token_start
,
5376 intel_float_operand (mnemonic
));
5378 operand_ok
= i386_att_operand (token_start
);
5380 RESTORE_END_STRING (l
);
5386 if (expecting_operand
)
5388 expecting_operand_after_comma
:
5389 as_bad (_("expecting operand after ','; got nothing"));
5394 as_bad (_("expecting operand before ','; got nothing"));
5399 /* Now *l must be either ',' or END_OF_INSN. */
5402 if (*++l
== END_OF_INSN
)
5404 /* Just skip it, if it's \n complain. */
5405 goto expecting_operand_after_comma
;
5407 expecting_operand
= 1;
5414 swap_2_operands (int xchg1
, int xchg2
)
5416 union i386_op temp_op
;
5417 i386_operand_type temp_type
;
5418 unsigned int temp_flags
;
5419 enum bfd_reloc_code_real temp_reloc
;
5421 temp_type
= i
.types
[xchg2
];
5422 i
.types
[xchg2
] = i
.types
[xchg1
];
5423 i
.types
[xchg1
] = temp_type
;
5425 temp_flags
= i
.flags
[xchg2
];
5426 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5427 i
.flags
[xchg1
] = temp_flags
;
5429 temp_op
= i
.op
[xchg2
];
5430 i
.op
[xchg2
] = i
.op
[xchg1
];
5431 i
.op
[xchg1
] = temp_op
;
5433 temp_reloc
= i
.reloc
[xchg2
];
5434 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5435 i
.reloc
[xchg1
] = temp_reloc
;
5439 if (i
.mask
->operand
== xchg1
)
5440 i
.mask
->operand
= xchg2
;
5441 else if (i
.mask
->operand
== xchg2
)
5442 i
.mask
->operand
= xchg1
;
5446 if (i
.broadcast
->operand
== xchg1
)
5447 i
.broadcast
->operand
= xchg2
;
5448 else if (i
.broadcast
->operand
== xchg2
)
5449 i
.broadcast
->operand
= xchg1
;
5453 if (i
.rounding
->operand
== xchg1
)
5454 i
.rounding
->operand
= xchg2
;
5455 else if (i
.rounding
->operand
== xchg2
)
5456 i
.rounding
->operand
= xchg1
;
5461 swap_operands (void)
5467 swap_2_operands (1, i
.operands
- 2);
5471 swap_2_operands (0, i
.operands
- 1);
5477 if (i
.mem_operands
== 2)
5479 const seg_entry
*temp_seg
;
5480 temp_seg
= i
.seg
[0];
5481 i
.seg
[0] = i
.seg
[1];
5482 i
.seg
[1] = temp_seg
;
5486 /* Try to ensure constant immediates are represented in the smallest
5491 char guess_suffix
= 0;
5495 guess_suffix
= i
.suffix
;
5496 else if (i
.reg_operands
)
5498 /* Figure out a suffix from the last register operand specified.
5499 We can't do this properly yet, i.e. excluding special register
5500 instances, but the following works for instructions with
5501 immediates. In any case, we can't set i.suffix yet. */
5502 for (op
= i
.operands
; --op
>= 0;)
5503 if (i
.types
[op
].bitfield
.class != Reg
)
5505 else if (i
.types
[op
].bitfield
.byte
)
5507 guess_suffix
= BYTE_MNEM_SUFFIX
;
5510 else if (i
.types
[op
].bitfield
.word
)
5512 guess_suffix
= WORD_MNEM_SUFFIX
;
5515 else if (i
.types
[op
].bitfield
.dword
)
5517 guess_suffix
= LONG_MNEM_SUFFIX
;
5520 else if (i
.types
[op
].bitfield
.qword
)
5522 guess_suffix
= QWORD_MNEM_SUFFIX
;
5526 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5527 guess_suffix
= WORD_MNEM_SUFFIX
;
5529 for (op
= i
.operands
; --op
>= 0;)
5530 if (operand_type_check (i
.types
[op
], imm
))
5532 switch (i
.op
[op
].imms
->X_op
)
5535 /* If a suffix is given, this operand may be shortened. */
5536 switch (guess_suffix
)
5538 case LONG_MNEM_SUFFIX
:
5539 i
.types
[op
].bitfield
.imm32
= 1;
5540 i
.types
[op
].bitfield
.imm64
= 1;
5542 case WORD_MNEM_SUFFIX
:
5543 i
.types
[op
].bitfield
.imm16
= 1;
5544 i
.types
[op
].bitfield
.imm32
= 1;
5545 i
.types
[op
].bitfield
.imm32s
= 1;
5546 i
.types
[op
].bitfield
.imm64
= 1;
5548 case BYTE_MNEM_SUFFIX
:
5549 i
.types
[op
].bitfield
.imm8
= 1;
5550 i
.types
[op
].bitfield
.imm8s
= 1;
5551 i
.types
[op
].bitfield
.imm16
= 1;
5552 i
.types
[op
].bitfield
.imm32
= 1;
5553 i
.types
[op
].bitfield
.imm32s
= 1;
5554 i
.types
[op
].bitfield
.imm64
= 1;
5558 /* If this operand is at most 16 bits, convert it
5559 to a signed 16 bit number before trying to see
5560 whether it will fit in an even smaller size.
5561 This allows a 16-bit operand such as $0xffe0 to
5562 be recognised as within Imm8S range. */
5563 if ((i
.types
[op
].bitfield
.imm16
)
5564 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
5566 i
.op
[op
].imms
->X_add_number
=
5567 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
5570 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5571 if ((i
.types
[op
].bitfield
.imm32
)
5572 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
5575 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5576 ^ ((offsetT
) 1 << 31))
5577 - ((offsetT
) 1 << 31));
5581 = operand_type_or (i
.types
[op
],
5582 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5584 /* We must avoid matching of Imm32 templates when 64bit
5585 only immediate is available. */
5586 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5587 i
.types
[op
].bitfield
.imm32
= 0;
5594 /* Symbols and expressions. */
5596 /* Convert symbolic operand to proper sizes for matching, but don't
5597 prevent matching a set of insns that only supports sizes other
5598 than those matching the insn suffix. */
5600 i386_operand_type mask
, allowed
;
5601 const insn_template
*t
;
5603 operand_type_set (&mask
, 0);
5604 operand_type_set (&allowed
, 0);
5606 for (t
= current_templates
->start
;
5607 t
< current_templates
->end
;
5610 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5611 allowed
= operand_type_and (allowed
, anyimm
);
5613 switch (guess_suffix
)
5615 case QWORD_MNEM_SUFFIX
:
5616 mask
.bitfield
.imm64
= 1;
5617 mask
.bitfield
.imm32s
= 1;
5619 case LONG_MNEM_SUFFIX
:
5620 mask
.bitfield
.imm32
= 1;
5622 case WORD_MNEM_SUFFIX
:
5623 mask
.bitfield
.imm16
= 1;
5625 case BYTE_MNEM_SUFFIX
:
5626 mask
.bitfield
.imm8
= 1;
5631 allowed
= operand_type_and (mask
, allowed
);
5632 if (!operand_type_all_zero (&allowed
))
5633 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5640 /* Try to use the smallest displacement type too. */
5642 optimize_disp (void)
5646 for (op
= i
.operands
; --op
>= 0;)
5647 if (operand_type_check (i
.types
[op
], disp
))
5649 if (i
.op
[op
].disps
->X_op
== O_constant
)
5651 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5653 if (i
.types
[op
].bitfield
.disp16
5654 && (op_disp
& ~(offsetT
) 0xffff) == 0)
5656 /* If this operand is at most 16 bits, convert
5657 to a signed 16 bit number and don't use 64bit
5659 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
5660 i
.types
[op
].bitfield
.disp64
= 0;
5663 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5664 if (i
.types
[op
].bitfield
.disp32
5665 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
5667 /* If this operand is at most 32 bits, convert
5668 to a signed 32 bit number and don't use 64bit
5670 op_disp
&= (((offsetT
) 2 << 31) - 1);
5671 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5672 i
.types
[op
].bitfield
.disp64
= 0;
5675 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5677 i
.types
[op
].bitfield
.disp8
= 0;
5678 i
.types
[op
].bitfield
.disp16
= 0;
5679 i
.types
[op
].bitfield
.disp32
= 0;
5680 i
.types
[op
].bitfield
.disp32s
= 0;
5681 i
.types
[op
].bitfield
.disp64
= 0;
5685 else if (flag_code
== CODE_64BIT
)
5687 if (fits_in_signed_long (op_disp
))
5689 i
.types
[op
].bitfield
.disp64
= 0;
5690 i
.types
[op
].bitfield
.disp32s
= 1;
5692 if (i
.prefix
[ADDR_PREFIX
]
5693 && fits_in_unsigned_long (op_disp
))
5694 i
.types
[op
].bitfield
.disp32
= 1;
5696 if ((i
.types
[op
].bitfield
.disp32
5697 || i
.types
[op
].bitfield
.disp32s
5698 || i
.types
[op
].bitfield
.disp16
)
5699 && fits_in_disp8 (op_disp
))
5700 i
.types
[op
].bitfield
.disp8
= 1;
5702 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5703 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5705 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5706 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5707 i
.types
[op
].bitfield
.disp8
= 0;
5708 i
.types
[op
].bitfield
.disp16
= 0;
5709 i
.types
[op
].bitfield
.disp32
= 0;
5710 i
.types
[op
].bitfield
.disp32s
= 0;
5711 i
.types
[op
].bitfield
.disp64
= 0;
5714 /* We only support 64bit displacement on constants. */
5715 i
.types
[op
].bitfield
.disp64
= 0;
5719 /* Return 1 if there is a match in broadcast bytes between operand
5720 GIVEN and instruction template T. */
5723 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5725 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5726 && i
.types
[given
].bitfield
.byte
)
5727 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5728 && i
.types
[given
].bitfield
.word
)
5729 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5730 && i
.types
[given
].bitfield
.dword
)
5731 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5732 && i
.types
[given
].bitfield
.qword
));
5735 /* Check if operands are valid for the instruction. */
5738 check_VecOperands (const insn_template
*t
)
5743 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5744 any one operand are implicity requiring AVX512VL support if the actual
5745 operand size is YMMword or XMMword. Since this function runs after
5746 template matching, there's no need to check for YMMword/XMMword in
5748 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
5749 if (!cpu_flags_all_zero (&cpu
)
5750 && !t
->cpu_flags
.bitfield
.cpuavx512vl
5751 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
5753 for (op
= 0; op
< t
->operands
; ++op
)
5755 if (t
->operand_types
[op
].bitfield
.zmmword
5756 && (i
.types
[op
].bitfield
.ymmword
5757 || i
.types
[op
].bitfield
.xmmword
))
5759 i
.error
= unsupported
;
5765 /* Without VSIB byte, we can't have a vector register for index. */
5766 if (!t
->opcode_modifier
.vecsib
5768 && (i
.index_reg
->reg_type
.bitfield
.xmmword
5769 || i
.index_reg
->reg_type
.bitfield
.ymmword
5770 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
5772 i
.error
= unsupported_vector_index_register
;
5776 /* Check if default mask is allowed. */
5777 if (t
->opcode_modifier
.nodefmask
5778 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
5780 i
.error
= no_default_mask
;
5784 /* For VSIB byte, we need a vector register for index, and all vector
5785 registers must be distinct. */
5786 if (t
->opcode_modifier
.vecsib
)
5789 || !((t
->opcode_modifier
.vecsib
== VecSIB128
5790 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
5791 || (t
->opcode_modifier
.vecsib
== VecSIB256
5792 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
5793 || (t
->opcode_modifier
.vecsib
== VecSIB512
5794 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
5796 i
.error
= invalid_vsib_address
;
5800 gas_assert (i
.reg_operands
== 2 || i
.mask
);
5801 if (i
.reg_operands
== 2 && !i
.mask
)
5803 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
5804 gas_assert (i
.types
[0].bitfield
.xmmword
5805 || i
.types
[0].bitfield
.ymmword
);
5806 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
5807 gas_assert (i
.types
[2].bitfield
.xmmword
5808 || i
.types
[2].bitfield
.ymmword
);
5809 if (operand_check
== check_none
)
5811 if (register_number (i
.op
[0].regs
)
5812 != register_number (i
.index_reg
)
5813 && register_number (i
.op
[2].regs
)
5814 != register_number (i
.index_reg
)
5815 && register_number (i
.op
[0].regs
)
5816 != register_number (i
.op
[2].regs
))
5818 if (operand_check
== check_error
)
5820 i
.error
= invalid_vector_register_set
;
5823 as_warn (_("mask, index, and destination registers should be distinct"));
5825 else if (i
.reg_operands
== 1 && i
.mask
)
5827 if (i
.types
[1].bitfield
.class == RegSIMD
5828 && (i
.types
[1].bitfield
.xmmword
5829 || i
.types
[1].bitfield
.ymmword
5830 || i
.types
[1].bitfield
.zmmword
)
5831 && (register_number (i
.op
[1].regs
)
5832 == register_number (i
.index_reg
)))
5834 if (operand_check
== check_error
)
5836 i
.error
= invalid_vector_register_set
;
5839 if (operand_check
!= check_none
)
5840 as_warn (_("index and destination registers should be distinct"));
5845 /* Check if broadcast is supported by the instruction and is applied
5846 to the memory operand. */
5849 i386_operand_type type
, overlap
;
5851 /* Check if specified broadcast is supported in this instruction,
5852 and its broadcast bytes match the memory operand. */
5853 op
= i
.broadcast
->operand
;
5854 if (!t
->opcode_modifier
.broadcast
5855 || !(i
.flags
[op
] & Operand_Mem
)
5856 || (!i
.types
[op
].bitfield
.unspecified
5857 && !match_broadcast_size (t
, op
)))
5860 i
.error
= unsupported_broadcast
;
5864 i
.broadcast
->bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
5865 * i
.broadcast
->type
);
5866 operand_type_set (&type
, 0);
5867 switch (i
.broadcast
->bytes
)
5870 type
.bitfield
.word
= 1;
5873 type
.bitfield
.dword
= 1;
5876 type
.bitfield
.qword
= 1;
5879 type
.bitfield
.xmmword
= 1;
5882 type
.bitfield
.ymmword
= 1;
5885 type
.bitfield
.zmmword
= 1;
5891 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
5892 if (t
->operand_types
[op
].bitfield
.class == RegSIMD
5893 && t
->operand_types
[op
].bitfield
.byte
5894 + t
->operand_types
[op
].bitfield
.word
5895 + t
->operand_types
[op
].bitfield
.dword
5896 + t
->operand_types
[op
].bitfield
.qword
> 1)
5898 overlap
.bitfield
.xmmword
= 0;
5899 overlap
.bitfield
.ymmword
= 0;
5900 overlap
.bitfield
.zmmword
= 0;
5902 if (operand_type_all_zero (&overlap
))
5905 if (t
->opcode_modifier
.checkregsize
)
5909 type
.bitfield
.baseindex
= 1;
5910 for (j
= 0; j
< i
.operands
; ++j
)
5913 && !operand_type_register_match(i
.types
[j
],
5914 t
->operand_types
[j
],
5916 t
->operand_types
[op
]))
5921 /* If broadcast is supported in this instruction, we need to check if
5922 operand of one-element size isn't specified without broadcast. */
5923 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
5925 /* Find memory operand. */
5926 for (op
= 0; op
< i
.operands
; op
++)
5927 if (i
.flags
[op
] & Operand_Mem
)
5929 gas_assert (op
< i
.operands
);
5930 /* Check size of the memory operand. */
5931 if (match_broadcast_size (t
, op
))
5933 i
.error
= broadcast_needed
;
5938 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
5940 /* Check if requested masking is supported. */
5943 switch (t
->opcode_modifier
.masking
)
5947 case MERGING_MASKING
:
5948 if (i
.mask
->zeroing
)
5951 i
.error
= unsupported_masking
;
5955 case DYNAMIC_MASKING
:
5956 /* Memory destinations allow only merging masking. */
5957 if (i
.mask
->zeroing
&& i
.mem_operands
)
5959 /* Find memory operand. */
5960 for (op
= 0; op
< i
.operands
; op
++)
5961 if (i
.flags
[op
] & Operand_Mem
)
5963 gas_assert (op
< i
.operands
);
5964 if (op
== i
.operands
- 1)
5966 i
.error
= unsupported_masking
;
5976 /* Check if masking is applied to dest operand. */
5977 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
5979 i
.error
= mask_not_on_destination
;
5986 if (!t
->opcode_modifier
.sae
5987 || (i
.rounding
->type
!= saeonly
&& !t
->opcode_modifier
.staticrounding
))
5989 i
.error
= unsupported_rc_sae
;
5992 /* If the instruction has several immediate operands and one of
5993 them is rounding, the rounding operand should be the last
5994 immediate operand. */
5995 if (i
.imm_operands
> 1
5996 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
5998 i
.error
= rc_sae_operand_not_last_imm
;
6003 /* Check the special Imm4 cases; must be the first operand. */
6004 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
6006 if (i
.op
[0].imms
->X_op
!= O_constant
6007 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
6013 /* Turn off Imm<N> so that update_imm won't complain. */
6014 operand_type_set (&i
.types
[0], 0);
6017 /* Check vector Disp8 operand. */
6018 if (t
->opcode_modifier
.disp8memshift
6019 && i
.disp_encoding
!= disp_encoding_32bit
)
6022 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
6023 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
6024 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
6027 const i386_operand_type
*type
= NULL
;
6030 for (op
= 0; op
< i
.operands
; op
++)
6031 if (i
.flags
[op
] & Operand_Mem
)
6033 if (t
->opcode_modifier
.evex
== EVEXLIG
)
6034 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
6035 else if (t
->operand_types
[op
].bitfield
.xmmword
6036 + t
->operand_types
[op
].bitfield
.ymmword
6037 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
6038 type
= &t
->operand_types
[op
];
6039 else if (!i
.types
[op
].bitfield
.unspecified
)
6040 type
= &i
.types
[op
];
6042 else if (i
.types
[op
].bitfield
.class == RegSIMD
6043 && t
->opcode_modifier
.evex
!= EVEXLIG
)
6045 if (i
.types
[op
].bitfield
.zmmword
)
6047 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
6049 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
6055 if (type
->bitfield
.zmmword
)
6057 else if (type
->bitfield
.ymmword
)
6059 else if (type
->bitfield
.xmmword
)
6063 /* For the check in fits_in_disp8(). */
6064 if (i
.memshift
== 0)
6068 for (op
= 0; op
< i
.operands
; op
++)
6069 if (operand_type_check (i
.types
[op
], disp
)
6070 && i
.op
[op
].disps
->X_op
== O_constant
)
6072 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
6074 i
.types
[op
].bitfield
.disp8
= 1;
6077 i
.types
[op
].bitfield
.disp8
= 0;
6086 /* Check if encoding requirements are met by the instruction. */
6089 VEX_check_encoding (const insn_template
*t
)
6091 if (i
.vec_encoding
== vex_encoding_error
)
6093 i
.error
= unsupported
;
6097 if (i
.vec_encoding
== vex_encoding_evex
)
6099 /* This instruction must be encoded with EVEX prefix. */
6100 if (!is_evex_encoding (t
))
6102 i
.error
= unsupported
;
6108 if (!t
->opcode_modifier
.vex
)
6110 /* This instruction template doesn't have VEX prefix. */
6111 if (i
.vec_encoding
!= vex_encoding_default
)
6113 i
.error
= unsupported
;
6122 static const insn_template
*
6123 match_template (char mnem_suffix
)
6125 /* Points to template once we've found it. */
6126 const insn_template
*t
;
6127 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
6128 i386_operand_type overlap4
;
6129 unsigned int found_reverse_match
;
6130 i386_opcode_modifier suffix_check
;
6131 i386_operand_type operand_types
[MAX_OPERANDS
];
6132 int addr_prefix_disp
;
6133 unsigned int j
, size_match
, check_register
;
6134 enum i386_error specific_error
= 0;
6136 #if MAX_OPERANDS != 5
6137 # error "MAX_OPERANDS must be 5."
6140 found_reverse_match
= 0;
6141 addr_prefix_disp
= -1;
6143 /* Prepare for mnemonic suffix check. */
6144 memset (&suffix_check
, 0, sizeof (suffix_check
));
6145 switch (mnem_suffix
)
6147 case BYTE_MNEM_SUFFIX
:
6148 suffix_check
.no_bsuf
= 1;
6150 case WORD_MNEM_SUFFIX
:
6151 suffix_check
.no_wsuf
= 1;
6153 case SHORT_MNEM_SUFFIX
:
6154 suffix_check
.no_ssuf
= 1;
6156 case LONG_MNEM_SUFFIX
:
6157 suffix_check
.no_lsuf
= 1;
6159 case QWORD_MNEM_SUFFIX
:
6160 suffix_check
.no_qsuf
= 1;
6163 /* NB: In Intel syntax, normally we can check for memory operand
6164 size when there is no mnemonic suffix. But jmp and call have
6165 2 different encodings with Dword memory operand size, one with
6166 No_ldSuf and the other without. i.suffix is set to
6167 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
6168 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
6169 suffix_check
.no_ldsuf
= 1;
6172 /* Must have right number of operands. */
6173 i
.error
= number_of_operands_mismatch
;
6175 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
6177 addr_prefix_disp
= -1;
6178 found_reverse_match
= 0;
6180 if (i
.operands
!= t
->operands
)
6183 /* Check processor support. */
6184 i
.error
= unsupported
;
6185 if (cpu_flags_match (t
) != CPU_FLAGS_PERFECT_MATCH
)
6188 /* Check AT&T mnemonic. */
6189 i
.error
= unsupported_with_intel_mnemonic
;
6190 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
6193 /* Check AT&T/Intel syntax. */
6194 i
.error
= unsupported_syntax
;
6195 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
6196 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
6199 /* Check Intel64/AMD64 ISA. */
6203 /* Default: Don't accept Intel64. */
6204 if (t
->opcode_modifier
.isa64
== INTEL64
)
6208 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6209 if (t
->opcode_modifier
.isa64
>= INTEL64
)
6213 /* -mintel64: Don't accept AMD64. */
6214 if (t
->opcode_modifier
.isa64
== AMD64
&& flag_code
== CODE_64BIT
)
6219 /* Check the suffix. */
6220 i
.error
= invalid_instruction_suffix
;
6221 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
6222 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
6223 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
6224 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
6225 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
6226 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
6229 size_match
= operand_size_match (t
);
6233 /* This is intentionally not
6235 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6237 as the case of a missing * on the operand is accepted (perhaps with
6238 a warning, issued further down). */
6239 if (i
.jumpabsolute
&& t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
6241 i
.error
= operand_type_mismatch
;
6245 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6246 operand_types
[j
] = t
->operand_types
[j
];
6248 /* In general, don't allow
6249 - 64-bit operands outside of 64-bit mode,
6250 - 32-bit operands on pre-386. */
6251 j
= i
.imm_operands
+ (t
->operands
> i
.imm_operands
+ 1);
6252 if (((i
.suffix
== QWORD_MNEM_SUFFIX
6253 && flag_code
!= CODE_64BIT
6254 && (t
->base_opcode
!= 0x0fc7
6255 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
6256 || (i
.suffix
== LONG_MNEM_SUFFIX
6257 && !cpu_arch_flags
.bitfield
.cpui386
))
6259 ? (t
->opcode_modifier
.mnemonicsize
!= IGNORESIZE
6260 && !intel_float_operand (t
->name
))
6261 : intel_float_operand (t
->name
) != 2)
6262 && (t
->operands
== i
.imm_operands
6263 || (operand_types
[i
.imm_operands
].bitfield
.class != RegMMX
6264 && operand_types
[i
.imm_operands
].bitfield
.class != RegSIMD
6265 && operand_types
[i
.imm_operands
].bitfield
.class != RegMask
)
6266 || (operand_types
[j
].bitfield
.class != RegMMX
6267 && operand_types
[j
].bitfield
.class != RegSIMD
6268 && operand_types
[j
].bitfield
.class != RegMask
))
6269 && !t
->opcode_modifier
.vecsib
)
6272 /* Do not verify operands when there are none. */
6275 if (VEX_check_encoding (t
))
6277 specific_error
= i
.error
;
6281 /* We've found a match; break out of loop. */
6285 if (!t
->opcode_modifier
.jump
6286 || t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)
6288 /* There should be only one Disp operand. */
6289 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6290 if (operand_type_check (operand_types
[j
], disp
))
6292 if (j
< MAX_OPERANDS
)
6294 bfd_boolean override
= (i
.prefix
[ADDR_PREFIX
] != 0);
6296 addr_prefix_disp
= j
;
6298 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
6299 operand into Disp32/Disp32/Disp16/Disp32 operand. */
6303 override
= !override
;
6306 if (operand_types
[j
].bitfield
.disp32
6307 && operand_types
[j
].bitfield
.disp16
)
6309 operand_types
[j
].bitfield
.disp16
= override
;
6310 operand_types
[j
].bitfield
.disp32
= !override
;
6312 operand_types
[j
].bitfield
.disp32s
= 0;
6313 operand_types
[j
].bitfield
.disp64
= 0;
6317 if (operand_types
[j
].bitfield
.disp32s
6318 || operand_types
[j
].bitfield
.disp64
)
6320 operand_types
[j
].bitfield
.disp64
&= !override
;
6321 operand_types
[j
].bitfield
.disp32s
&= !override
;
6322 operand_types
[j
].bitfield
.disp32
= override
;
6324 operand_types
[j
].bitfield
.disp16
= 0;
6330 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
6331 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
&& t
->base_opcode
== 0xa0)
6334 /* We check register size if needed. */
6335 if (t
->opcode_modifier
.checkregsize
)
6337 check_register
= (1 << t
->operands
) - 1;
6339 check_register
&= ~(1 << i
.broadcast
->operand
);
6344 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
6345 switch (t
->operands
)
6348 if (!operand_type_match (overlap0
, i
.types
[0]))
6352 /* xchg %eax, %eax is a special case. It is an alias for nop
6353 only in 32bit mode and we can use opcode 0x90. In 64bit
6354 mode, we can't use 0x90 for xchg %eax, %eax since it should
6355 zero-extend %eax to %rax. */
6356 if (flag_code
== CODE_64BIT
6357 && t
->base_opcode
== 0x90
6358 && i
.types
[0].bitfield
.instance
== Accum
6359 && i
.types
[0].bitfield
.dword
6360 && i
.types
[1].bitfield
.instance
== Accum
6361 && i
.types
[1].bitfield
.dword
)
6363 /* xrelease mov %eax, <disp> is another special case. It must not
6364 match the accumulator-only encoding of mov. */
6365 if (flag_code
!= CODE_64BIT
6367 && t
->base_opcode
== 0xa0
6368 && i
.types
[0].bitfield
.instance
== Accum
6369 && (i
.flags
[1] & Operand_Mem
))
6374 if (!(size_match
& MATCH_STRAIGHT
))
6376 /* Reverse direction of operands if swapping is possible in the first
6377 place (operands need to be symmetric) and
6378 - the load form is requested, and the template is a store form,
6379 - the store form is requested, and the template is a load form,
6380 - the non-default (swapped) form is requested. */
6381 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
6382 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
6383 && !operand_type_all_zero (&overlap1
))
6384 switch (i
.dir_encoding
)
6386 case dir_encoding_load
:
6387 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6388 || t
->opcode_modifier
.regmem
)
6392 case dir_encoding_store
:
6393 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6394 && !t
->opcode_modifier
.regmem
)
6398 case dir_encoding_swap
:
6401 case dir_encoding_default
:
6404 /* If we want store form, we skip the current load. */
6405 if ((i
.dir_encoding
== dir_encoding_store
6406 || i
.dir_encoding
== dir_encoding_swap
)
6407 && i
.mem_operands
== 0
6408 && t
->opcode_modifier
.load
)
6413 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
6414 if (!operand_type_match (overlap0
, i
.types
[0])
6415 || !operand_type_match (overlap1
, i
.types
[1])
6416 || ((check_register
& 3) == 3
6417 && !operand_type_register_match (i
.types
[0],
6422 /* Check if other direction is valid ... */
6423 if (!t
->opcode_modifier
.d
)
6427 if (!(size_match
& MATCH_REVERSE
))
6429 /* Try reversing direction of operands. */
6430 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
6431 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
6432 if (!operand_type_match (overlap0
, i
.types
[0])
6433 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
6435 && !operand_type_register_match (i
.types
[0],
6436 operand_types
[i
.operands
- 1],
6437 i
.types
[i
.operands
- 1],
6440 /* Does not match either direction. */
6443 /* found_reverse_match holds which of D or FloatR
6445 if (!t
->opcode_modifier
.d
)
6446 found_reverse_match
= 0;
6447 else if (operand_types
[0].bitfield
.tbyte
)
6448 found_reverse_match
= Opcode_FloatD
;
6449 else if (operand_types
[0].bitfield
.xmmword
6450 || operand_types
[i
.operands
- 1].bitfield
.xmmword
6451 || operand_types
[0].bitfield
.class == RegMMX
6452 || operand_types
[i
.operands
- 1].bitfield
.class == RegMMX
6453 || is_any_vex_encoding(t
))
6454 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6455 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
6457 found_reverse_match
= Opcode_D
;
6458 if (t
->opcode_modifier
.floatr
)
6459 found_reverse_match
|= Opcode_FloatR
;
6463 /* Found a forward 2 operand match here. */
6464 switch (t
->operands
)
6467 overlap4
= operand_type_and (i
.types
[4],
6471 overlap3
= operand_type_and (i
.types
[3],
6475 overlap2
= operand_type_and (i
.types
[2],
6480 switch (t
->operands
)
6483 if (!operand_type_match (overlap4
, i
.types
[4])
6484 || !operand_type_register_match (i
.types
[3],
6491 if (!operand_type_match (overlap3
, i
.types
[3])
6492 || ((check_register
& 0xa) == 0xa
6493 && !operand_type_register_match (i
.types
[1],
6497 || ((check_register
& 0xc) == 0xc
6498 && !operand_type_register_match (i
.types
[2],
6505 /* Here we make use of the fact that there are no
6506 reverse match 3 operand instructions. */
6507 if (!operand_type_match (overlap2
, i
.types
[2])
6508 || ((check_register
& 5) == 5
6509 && !operand_type_register_match (i
.types
[0],
6513 || ((check_register
& 6) == 6
6514 && !operand_type_register_match (i
.types
[1],
6522 /* Found either forward/reverse 2, 3 or 4 operand match here:
6523 slip through to break. */
6526 /* Check if vector operands are valid. */
6527 if (check_VecOperands (t
))
6529 specific_error
= i
.error
;
6533 /* Check if VEX/EVEX encoding requirements can be satisfied. */
6534 if (VEX_check_encoding (t
))
6536 specific_error
= i
.error
;
6540 /* We've found a match; break out of loop. */
6544 if (t
== current_templates
->end
)
6546 /* We found no match. */
6547 const char *err_msg
;
6548 switch (specific_error
? specific_error
: i
.error
)
6552 case operand_size_mismatch
:
6553 err_msg
= _("operand size mismatch");
6555 case operand_type_mismatch
:
6556 err_msg
= _("operand type mismatch");
6558 case register_type_mismatch
:
6559 err_msg
= _("register type mismatch");
6561 case number_of_operands_mismatch
:
6562 err_msg
= _("number of operands mismatch");
6564 case invalid_instruction_suffix
:
6565 err_msg
= _("invalid instruction suffix");
6568 err_msg
= _("constant doesn't fit in 4 bits");
6570 case unsupported_with_intel_mnemonic
:
6571 err_msg
= _("unsupported with Intel mnemonic");
6573 case unsupported_syntax
:
6574 err_msg
= _("unsupported syntax");
6577 as_bad (_("unsupported instruction `%s'"),
6578 current_templates
->start
->name
);
6580 case invalid_vsib_address
:
6581 err_msg
= _("invalid VSIB address");
6583 case invalid_vector_register_set
:
6584 err_msg
= _("mask, index, and destination registers must be distinct");
6586 case unsupported_vector_index_register
:
6587 err_msg
= _("unsupported vector index register");
6589 case unsupported_broadcast
:
6590 err_msg
= _("unsupported broadcast");
6592 case broadcast_needed
:
6593 err_msg
= _("broadcast is needed for operand of such type");
6595 case unsupported_masking
:
6596 err_msg
= _("unsupported masking");
6598 case mask_not_on_destination
:
6599 err_msg
= _("mask not on destination operand");
6601 case no_default_mask
:
6602 err_msg
= _("default mask isn't allowed");
6604 case unsupported_rc_sae
:
6605 err_msg
= _("unsupported static rounding/sae");
6607 case rc_sae_operand_not_last_imm
:
6609 err_msg
= _("RC/SAE operand must precede immediate operands");
6611 err_msg
= _("RC/SAE operand must follow immediate operands");
6613 case invalid_register_operand
:
6614 err_msg
= _("invalid register operand");
6617 as_bad (_("%s for `%s'"), err_msg
,
6618 current_templates
->start
->name
);
6622 if (!quiet_warnings
)
6625 && (i
.jumpabsolute
!= (t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)))
6626 as_warn (_("indirect %s without `*'"), t
->name
);
6628 if (t
->opcode_modifier
.isprefix
6629 && t
->opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6631 /* Warn them that a data or address size prefix doesn't
6632 affect assembly of the next line of code. */
6633 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6637 /* Copy the template we found. */
6640 if (addr_prefix_disp
!= -1)
6641 i
.tm
.operand_types
[addr_prefix_disp
]
6642 = operand_types
[addr_prefix_disp
];
6644 if (found_reverse_match
)
6646 /* If we found a reverse match we must alter the opcode direction
6647 bit and clear/flip the regmem modifier one. found_reverse_match
6648 holds bits to change (different for int & float insns). */
6650 i
.tm
.base_opcode
^= found_reverse_match
;
6652 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6653 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6655 /* Certain SIMD insns have their load forms specified in the opcode
6656 table, and hence we need to _set_ RegMem instead of clearing it.
6657 We need to avoid setting the bit though on insns like KMOVW. */
6658 i
.tm
.opcode_modifier
.regmem
6659 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6660 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
6661 && !i
.tm
.opcode_modifier
.regmem
;
6670 unsigned int es_op
= i
.tm
.opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
6671 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.baseindex
? es_op
: 0;
6673 if (i
.seg
[op
] != NULL
&& i
.seg
[op
] != &es
)
6675 as_bad (_("`%s' operand %u must use `%ses' segment"),
6677 intel_syntax
? i
.tm
.operands
- es_op
: es_op
+ 1,
6682 /* There's only ever one segment override allowed per instruction.
6683 This instruction possibly has a legal segment override on the
6684 second operand, so copy the segment to where non-string
6685 instructions store it, allowing common code. */
6686 i
.seg
[op
] = i
.seg
[1];
6692 process_suffix (void)
6694 /* If matched instruction specifies an explicit instruction mnemonic
6696 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
6697 i
.suffix
= WORD_MNEM_SUFFIX
;
6698 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
6699 i
.suffix
= LONG_MNEM_SUFFIX
;
6700 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
6701 i
.suffix
= QWORD_MNEM_SUFFIX
;
6702 else if (i
.reg_operands
6703 && (i
.operands
> 1 || i
.types
[0].bitfield
.class == Reg
)
6704 && !i
.tm
.opcode_modifier
.addrprefixopreg
)
6706 unsigned int numop
= i
.operands
;
6708 /* movsx/movzx want only their source operand considered here, for the
6709 ambiguity checking below. The suffix will be replaced afterwards
6710 to represent the destination (register). */
6711 if (((i
.tm
.base_opcode
| 8) == 0xfbe && i
.tm
.opcode_modifier
.w
)
6712 || (i
.tm
.base_opcode
== 0x63 && i
.tm
.cpu_flags
.bitfield
.cpu64
))
6715 /* crc32 needs REX.W set regardless of suffix / source operand size. */
6716 if (i
.tm
.base_opcode
== 0xf20f38f0
6717 && i
.tm
.operand_types
[1].bitfield
.qword
)
6720 /* If there's no instruction mnemonic suffix we try to invent one
6721 based on GPR operands. */
6724 /* We take i.suffix from the last register operand specified,
6725 Destination register type is more significant than source
6726 register type. crc32 in SSE4.2 prefers source register
6728 unsigned int op
= i
.tm
.base_opcode
!= 0xf20f38f0 ? i
.operands
: 1;
6731 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
6732 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6734 if (i
.types
[op
].bitfield
.class != Reg
)
6736 if (i
.types
[op
].bitfield
.byte
)
6737 i
.suffix
= BYTE_MNEM_SUFFIX
;
6738 else if (i
.types
[op
].bitfield
.word
)
6739 i
.suffix
= WORD_MNEM_SUFFIX
;
6740 else if (i
.types
[op
].bitfield
.dword
)
6741 i
.suffix
= LONG_MNEM_SUFFIX
;
6742 else if (i
.types
[op
].bitfield
.qword
)
6743 i
.suffix
= QWORD_MNEM_SUFFIX
;
6749 /* As an exception, movsx/movzx silently default to a byte source
6751 if ((i
.tm
.base_opcode
| 8) == 0xfbe && i
.tm
.opcode_modifier
.w
6752 && !i
.suffix
&& !intel_syntax
)
6753 i
.suffix
= BYTE_MNEM_SUFFIX
;
6755 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6758 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6759 && i
.tm
.opcode_modifier
.no_bsuf
)
6761 else if (!check_byte_reg ())
6764 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
6767 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6768 && i
.tm
.opcode_modifier
.no_lsuf
6769 && !i
.tm
.opcode_modifier
.todword
6770 && !i
.tm
.opcode_modifier
.toqword
)
6772 else if (!check_long_reg ())
6775 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6778 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6779 && i
.tm
.opcode_modifier
.no_qsuf
6780 && !i
.tm
.opcode_modifier
.todword
6781 && !i
.tm
.opcode_modifier
.toqword
)
6783 else if (!check_qword_reg ())
6786 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6789 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6790 && i
.tm
.opcode_modifier
.no_wsuf
)
6792 else if (!check_word_reg ())
6795 else if (intel_syntax
6796 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6797 /* Do nothing if the instruction is going to ignore the prefix. */
6802 /* Undo the movsx/movzx change done above. */
6805 else if (i
.tm
.opcode_modifier
.mnemonicsize
== DEFAULTSIZE
6808 i
.suffix
= stackop_size
;
6809 if (stackop_size
== LONG_MNEM_SUFFIX
)
6811 /* stackop_size is set to LONG_MNEM_SUFFIX for the
6812 .code16gcc directive to support 16-bit mode with
6813 32-bit address. For IRET without a suffix, generate
6814 16-bit IRET (opcode 0xcf) to return from an interrupt
6816 if (i
.tm
.base_opcode
== 0xcf)
6818 i
.suffix
= WORD_MNEM_SUFFIX
;
6819 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
6821 /* Warn about changed behavior for segment register push/pop. */
6822 else if ((i
.tm
.base_opcode
| 1) == 0x07)
6823 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
6828 && (i
.tm
.opcode_modifier
.jump
== JUMP_ABSOLUTE
6829 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
6830 || i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
6831 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
6832 && i
.tm
.extension_opcode
<= 3)))
6837 if (!i
.tm
.opcode_modifier
.no_qsuf
)
6839 i
.suffix
= QWORD_MNEM_SUFFIX
;
6844 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6845 i
.suffix
= LONG_MNEM_SUFFIX
;
6848 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6849 i
.suffix
= WORD_MNEM_SUFFIX
;
6855 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
6856 /* Also cover lret/retf/iret in 64-bit mode. */
6857 || (flag_code
== CODE_64BIT
6858 && !i
.tm
.opcode_modifier
.no_lsuf
6859 && !i
.tm
.opcode_modifier
.no_qsuf
))
6860 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
6861 /* Accept FLDENV et al without suffix. */
6862 && (i
.tm
.opcode_modifier
.no_ssuf
|| i
.tm
.opcode_modifier
.floatmf
))
6864 unsigned int suffixes
, evex
= 0;
6866 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
6867 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6869 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6871 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
6873 if (!i
.tm
.opcode_modifier
.no_ssuf
)
6875 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
6878 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
6879 also suitable for AT&T syntax mode, it was requested that this be
6880 restricted to just Intel syntax. */
6881 if (intel_syntax
&& is_any_vex_encoding (&i
.tm
) && !i
.broadcast
)
6885 for (op
= 0; op
< i
.tm
.operands
; ++op
)
6887 if (is_evex_encoding (&i
.tm
)
6888 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
6890 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
6891 i
.tm
.operand_types
[op
].bitfield
.xmmword
= 0;
6892 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
6893 i
.tm
.operand_types
[op
].bitfield
.ymmword
= 0;
6894 if (!i
.tm
.opcode_modifier
.evex
6895 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
6896 i
.tm
.opcode_modifier
.evex
= EVEX512
;
6899 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
6900 + i
.tm
.operand_types
[op
].bitfield
.ymmword
6901 + i
.tm
.operand_types
[op
].bitfield
.zmmword
< 2)
6904 /* Any properly sized operand disambiguates the insn. */
6905 if (i
.types
[op
].bitfield
.xmmword
6906 || i
.types
[op
].bitfield
.ymmword
6907 || i
.types
[op
].bitfield
.zmmword
)
6909 suffixes
&= ~(7 << 6);
6914 if ((i
.flags
[op
] & Operand_Mem
)
6915 && i
.tm
.operand_types
[op
].bitfield
.unspecified
)
6917 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
)
6919 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
6921 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
6923 if (is_evex_encoding (&i
.tm
))
6929 /* Are multiple suffixes / operand sizes allowed? */
6930 if (suffixes
& (suffixes
- 1))
6933 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
6934 || operand_check
== check_error
))
6936 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
6939 if (operand_check
== check_error
)
6941 as_bad (_("no instruction mnemonic suffix given and "
6942 "no register operands; can't size `%s'"), i
.tm
.name
);
6945 if (operand_check
== check_warning
)
6946 as_warn (_("%s; using default for `%s'"),
6948 ? _("ambiguous operand size")
6949 : _("no instruction mnemonic suffix given and "
6950 "no register operands"),
6953 if (i
.tm
.opcode_modifier
.floatmf
)
6954 i
.suffix
= SHORT_MNEM_SUFFIX
;
6955 else if ((i
.tm
.base_opcode
| 8) == 0xfbe
6956 || (i
.tm
.base_opcode
== 0x63
6957 && i
.tm
.cpu_flags
.bitfield
.cpu64
))
6958 /* handled below */;
6960 i
.tm
.opcode_modifier
.evex
= evex
;
6961 else if (flag_code
== CODE_16BIT
)
6962 i
.suffix
= WORD_MNEM_SUFFIX
;
6963 else if (!i
.tm
.opcode_modifier
.no_lsuf
)
6964 i
.suffix
= LONG_MNEM_SUFFIX
;
6966 i
.suffix
= QWORD_MNEM_SUFFIX
;
6970 if ((i
.tm
.base_opcode
| 8) == 0xfbe
6971 || (i
.tm
.base_opcode
== 0x63 && i
.tm
.cpu_flags
.bitfield
.cpu64
))
6973 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
6974 In AT&T syntax, if there is no suffix (warned about above), the default
6975 will be byte extension. */
6976 if (i
.tm
.opcode_modifier
.w
&& i
.suffix
&& i
.suffix
!= BYTE_MNEM_SUFFIX
)
6977 i
.tm
.base_opcode
|= 1;
6979 /* For further processing, the suffix should represent the destination
6980 (register). This is already the case when one was used with
6981 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
6982 no suffix to begin with. */
6983 if (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63 || !i
.suffix
)
6985 if (i
.types
[1].bitfield
.word
)
6986 i
.suffix
= WORD_MNEM_SUFFIX
;
6987 else if (i
.types
[1].bitfield
.qword
)
6988 i
.suffix
= QWORD_MNEM_SUFFIX
;
6990 i
.suffix
= LONG_MNEM_SUFFIX
;
6992 i
.tm
.opcode_modifier
.w
= 0;
6996 if (!i
.tm
.opcode_modifier
.modrm
&& i
.reg_operands
&& i
.tm
.operands
< 3)
6997 i
.short_form
= (i
.tm
.operand_types
[0].bitfield
.class == Reg
)
6998 != (i
.tm
.operand_types
[1].bitfield
.class == Reg
);
7000 /* Change the opcode based on the operand size given by i.suffix. */
7003 /* Size floating point instruction. */
7004 case LONG_MNEM_SUFFIX
:
7005 if (i
.tm
.opcode_modifier
.floatmf
)
7007 i
.tm
.base_opcode
^= 4;
7011 case WORD_MNEM_SUFFIX
:
7012 case QWORD_MNEM_SUFFIX
:
7013 /* It's not a byte, select word/dword operation. */
7014 if (i
.tm
.opcode_modifier
.w
)
7017 i
.tm
.base_opcode
|= 8;
7019 i
.tm
.base_opcode
|= 1;
7022 case SHORT_MNEM_SUFFIX
:
7023 /* Now select between word & dword operations via the operand
7024 size prefix, except for instructions that will ignore this
7026 if (i
.suffix
!= QWORD_MNEM_SUFFIX
7027 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7028 && !i
.tm
.opcode_modifier
.floatmf
7029 && !is_any_vex_encoding (&i
.tm
)
7030 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
7031 || (flag_code
== CODE_64BIT
7032 && i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)))
7034 unsigned int prefix
= DATA_PREFIX_OPCODE
;
7036 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
) /* jcxz, loop */
7037 prefix
= ADDR_PREFIX_OPCODE
;
7039 if (!add_prefix (prefix
))
7043 /* Set mode64 for an operand. */
7044 if (i
.suffix
== QWORD_MNEM_SUFFIX
7045 && flag_code
== CODE_64BIT
7046 && !i
.tm
.opcode_modifier
.norex64
7047 && !i
.tm
.opcode_modifier
.vexw
7048 /* Special case for xchg %rax,%rax. It is NOP and doesn't
7050 && ! (i
.operands
== 2
7051 && i
.tm
.base_opcode
== 0x90
7052 && i
.tm
.extension_opcode
== None
7053 && i
.types
[0].bitfield
.instance
== Accum
7054 && i
.types
[0].bitfield
.qword
7055 && i
.types
[1].bitfield
.instance
== Accum
7056 && i
.types
[1].bitfield
.qword
))
7062 if (i
.tm
.opcode_modifier
.addrprefixopreg
)
7064 gas_assert (!i
.suffix
);
7065 gas_assert (i
.reg_operands
);
7067 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7070 /* The address size override prefix changes the size of the
7072 if (flag_code
== CODE_64BIT
7073 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
7075 as_bad (_("16-bit addressing unavailable for `%s'"),
7080 if ((flag_code
== CODE_32BIT
7081 ? i
.op
[0].regs
->reg_type
.bitfield
.word
7082 : i
.op
[0].regs
->reg_type
.bitfield
.dword
)
7083 && !add_prefix (ADDR_PREFIX_OPCODE
))
7088 /* Check invalid register operand when the address size override
7089 prefix changes the size of register operands. */
7091 enum { need_word
, need_dword
, need_qword
} need
;
7093 if (flag_code
== CODE_32BIT
)
7094 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
7095 else if (i
.prefix
[ADDR_PREFIX
])
7098 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
7100 for (op
= 0; op
< i
.operands
; op
++)
7102 if (i
.types
[op
].bitfield
.class != Reg
)
7108 if (i
.op
[op
].regs
->reg_type
.bitfield
.word
)
7112 if (i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
7116 if (i
.op
[op
].regs
->reg_type
.bitfield
.qword
)
7121 as_bad (_("invalid register operand size for `%s'"),
7132 check_byte_reg (void)
7136 for (op
= i
.operands
; --op
>= 0;)
7138 /* Skip non-register operands. */
7139 if (i
.types
[op
].bitfield
.class != Reg
)
7142 /* If this is an eight bit register, it's OK. If it's the 16 or
7143 32 bit version of an eight bit register, we will just use the
7144 low portion, and that's OK too. */
7145 if (i
.types
[op
].bitfield
.byte
)
7148 /* I/O port address operands are OK too. */
7149 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
7150 && i
.tm
.operand_types
[op
].bitfield
.word
)
7153 /* crc32 only wants its source operand checked here. */
7154 if (i
.tm
.base_opcode
== 0xf20f38f0 && op
)
7157 /* Any other register is bad. */
7158 as_bad (_("`%s%s' not allowed with `%s%c'"),
7159 register_prefix
, i
.op
[op
].regs
->reg_name
,
7160 i
.tm
.name
, i
.suffix
);
7167 check_long_reg (void)
7171 for (op
= i
.operands
; --op
>= 0;)
7172 /* Skip non-register operands. */
7173 if (i
.types
[op
].bitfield
.class != Reg
)
7175 /* Reject eight bit registers, except where the template requires
7176 them. (eg. movzb) */
7177 else if (i
.types
[op
].bitfield
.byte
7178 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7179 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7180 && (i
.tm
.operand_types
[op
].bitfield
.word
7181 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7183 as_bad (_("`%s%s' not allowed with `%s%c'"),
7185 i
.op
[op
].regs
->reg_name
,
7190 /* Error if the e prefix on a general reg is missing. */
7191 else if (i
.types
[op
].bitfield
.word
7192 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7193 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7194 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7196 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7197 register_prefix
, i
.op
[op
].regs
->reg_name
,
7201 /* Warn if the r prefix on a general reg is present. */
7202 else if (i
.types
[op
].bitfield
.qword
7203 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7204 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7205 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7208 && i
.tm
.opcode_modifier
.toqword
7209 && i
.types
[0].bitfield
.class != RegSIMD
)
7211 /* Convert to QWORD. We want REX byte. */
7212 i
.suffix
= QWORD_MNEM_SUFFIX
;
7216 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7217 register_prefix
, i
.op
[op
].regs
->reg_name
,
7226 check_qword_reg (void)
7230 for (op
= i
.operands
; --op
>= 0; )
7231 /* Skip non-register operands. */
7232 if (i
.types
[op
].bitfield
.class != Reg
)
7234 /* Reject eight bit registers, except where the template requires
7235 them. (eg. movzb) */
7236 else if (i
.types
[op
].bitfield
.byte
7237 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7238 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7239 && (i
.tm
.operand_types
[op
].bitfield
.word
7240 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7242 as_bad (_("`%s%s' not allowed with `%s%c'"),
7244 i
.op
[op
].regs
->reg_name
,
7249 /* Warn if the r prefix on a general reg is missing. */
7250 else if ((i
.types
[op
].bitfield
.word
7251 || i
.types
[op
].bitfield
.dword
)
7252 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7253 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7254 && i
.tm
.operand_types
[op
].bitfield
.qword
)
7256 /* Prohibit these changes in the 64bit mode, since the
7257 lowering is more complicated. */
7259 && i
.tm
.opcode_modifier
.todword
7260 && i
.types
[0].bitfield
.class != RegSIMD
)
7262 /* Convert to DWORD. We don't want REX byte. */
7263 i
.suffix
= LONG_MNEM_SUFFIX
;
7267 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7268 register_prefix
, i
.op
[op
].regs
->reg_name
,
7277 check_word_reg (void)
7280 for (op
= i
.operands
; --op
>= 0;)
7281 /* Skip non-register operands. */
7282 if (i
.types
[op
].bitfield
.class != Reg
)
7284 /* Reject eight bit registers, except where the template requires
7285 them. (eg. movzb) */
7286 else if (i
.types
[op
].bitfield
.byte
7287 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7288 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7289 && (i
.tm
.operand_types
[op
].bitfield
.word
7290 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7292 as_bad (_("`%s%s' not allowed with `%s%c'"),
7294 i
.op
[op
].regs
->reg_name
,
7299 /* Error if the e or r prefix on a general reg is present. */
7300 else if ((i
.types
[op
].bitfield
.dword
7301 || i
.types
[op
].bitfield
.qword
)
7302 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7303 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7304 && i
.tm
.operand_types
[op
].bitfield
.word
)
7306 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7307 register_prefix
, i
.op
[op
].regs
->reg_name
,
7315 update_imm (unsigned int j
)
7317 i386_operand_type overlap
= i
.types
[j
];
7318 if ((overlap
.bitfield
.imm8
7319 || overlap
.bitfield
.imm8s
7320 || overlap
.bitfield
.imm16
7321 || overlap
.bitfield
.imm32
7322 || overlap
.bitfield
.imm32s
7323 || overlap
.bitfield
.imm64
)
7324 && !operand_type_equal (&overlap
, &imm8
)
7325 && !operand_type_equal (&overlap
, &imm8s
)
7326 && !operand_type_equal (&overlap
, &imm16
)
7327 && !operand_type_equal (&overlap
, &imm32
)
7328 && !operand_type_equal (&overlap
, &imm32s
)
7329 && !operand_type_equal (&overlap
, &imm64
))
7333 i386_operand_type temp
;
7335 operand_type_set (&temp
, 0);
7336 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7338 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
7339 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
7341 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7342 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
7343 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7345 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
7346 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
7349 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
7352 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
7353 || operand_type_equal (&overlap
, &imm16_32
)
7354 || operand_type_equal (&overlap
, &imm16_32s
))
7356 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
7361 if (!operand_type_equal (&overlap
, &imm8
)
7362 && !operand_type_equal (&overlap
, &imm8s
)
7363 && !operand_type_equal (&overlap
, &imm16
)
7364 && !operand_type_equal (&overlap
, &imm32
)
7365 && !operand_type_equal (&overlap
, &imm32s
)
7366 && !operand_type_equal (&overlap
, &imm64
))
7368 as_bad (_("no instruction mnemonic suffix given; "
7369 "can't determine immediate size"));
7373 i
.types
[j
] = overlap
;
7383 /* Update the first 2 immediate operands. */
7384 n
= i
.operands
> 2 ? 2 : i
.operands
;
7387 for (j
= 0; j
< n
; j
++)
7388 if (update_imm (j
) == 0)
7391 /* The 3rd operand can't be immediate operand. */
7392 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
7399 process_operands (void)
7401 /* Default segment register this instruction will use for memory
7402 accesses. 0 means unknown. This is only for optimizing out
7403 unnecessary segment overrides. */
7404 const seg_entry
*default_seg
= 0;
7406 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
7408 unsigned int dupl
= i
.operands
;
7409 unsigned int dest
= dupl
- 1;
7412 /* The destination must be an xmm register. */
7413 gas_assert (i
.reg_operands
7414 && MAX_OPERANDS
> dupl
7415 && operand_type_equal (&i
.types
[dest
], ®xmm
));
7417 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7418 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7420 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
7422 /* Keep xmm0 for instructions with VEX prefix and 3
7424 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
7425 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
7430 /* We remove the first xmm0 and keep the number of
7431 operands unchanged, which in fact duplicates the
7433 for (j
= 1; j
< i
.operands
; j
++)
7435 i
.op
[j
- 1] = i
.op
[j
];
7436 i
.types
[j
- 1] = i
.types
[j
];
7437 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7438 i
.flags
[j
- 1] = i
.flags
[j
];
7442 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
7444 gas_assert ((MAX_OPERANDS
- 1) > dupl
7445 && (i
.tm
.opcode_modifier
.vexsources
7448 /* Add the implicit xmm0 for instructions with VEX prefix
7450 for (j
= i
.operands
; j
> 0; j
--)
7452 i
.op
[j
] = i
.op
[j
- 1];
7453 i
.types
[j
] = i
.types
[j
- 1];
7454 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
7455 i
.flags
[j
] = i
.flags
[j
- 1];
7458 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
7459 i
.types
[0] = regxmm
;
7460 i
.tm
.operand_types
[0] = regxmm
;
7463 i
.reg_operands
+= 2;
7468 i
.op
[dupl
] = i
.op
[dest
];
7469 i
.types
[dupl
] = i
.types
[dest
];
7470 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7471 i
.flags
[dupl
] = i
.flags
[dest
];
7480 i
.op
[dupl
] = i
.op
[dest
];
7481 i
.types
[dupl
] = i
.types
[dest
];
7482 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7483 i
.flags
[dupl
] = i
.flags
[dest
];
7486 if (i
.tm
.opcode_modifier
.immext
)
7489 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7490 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7494 for (j
= 1; j
< i
.operands
; j
++)
7496 i
.op
[j
- 1] = i
.op
[j
];
7497 i
.types
[j
- 1] = i
.types
[j
];
7499 /* We need to adjust fields in i.tm since they are used by
7500 build_modrm_byte. */
7501 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7503 i
.flags
[j
- 1] = i
.flags
[j
];
7510 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
7512 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
7514 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7515 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
7516 regnum
= register_number (i
.op
[1].regs
);
7517 first_reg_in_group
= regnum
& ~3;
7518 last_reg_in_group
= first_reg_in_group
+ 3;
7519 if (regnum
!= first_reg_in_group
)
7520 as_warn (_("source register `%s%s' implicitly denotes"
7521 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7522 register_prefix
, i
.op
[1].regs
->reg_name
,
7523 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7524 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7527 else if (i
.tm
.opcode_modifier
.regkludge
)
7529 /* The imul $imm, %reg instruction is converted into
7530 imul $imm, %reg, %reg, and the clr %reg instruction
7531 is converted into xor %reg, %reg. */
7533 unsigned int first_reg_op
;
7535 if (operand_type_check (i
.types
[0], reg
))
7539 /* Pretend we saw the extra register operand. */
7540 gas_assert (i
.reg_operands
== 1
7541 && i
.op
[first_reg_op
+ 1].regs
== 0);
7542 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7543 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7548 if (i
.tm
.opcode_modifier
.modrm
)
7550 /* The opcode is completed (modulo i.tm.extension_opcode which
7551 must be put into the modrm byte). Now, we make the modrm and
7552 index base bytes based on all the info we've collected. */
7554 default_seg
= build_modrm_byte ();
7556 else if (i
.types
[0].bitfield
.class == SReg
)
7558 if (flag_code
!= CODE_64BIT
7559 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7560 && i
.op
[0].regs
->reg_num
== 1
7561 : (i
.tm
.base_opcode
| 1) == POP_SEG386_SHORT
7562 && i
.op
[0].regs
->reg_num
< 4)
7564 as_bad (_("you can't `%s %s%s'"),
7565 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7568 if ( i
.op
[0].regs
->reg_num
> 3 && i
.tm
.opcode_length
== 1 )
7570 i
.tm
.base_opcode
^= POP_SEG_SHORT
^ POP_SEG386_SHORT
;
7571 i
.tm
.opcode_length
= 2;
7573 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7575 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
7579 else if (i
.tm
.opcode_modifier
.isstring
)
7581 /* For the string instructions that allow a segment override
7582 on one of their operands, the default segment is ds. */
7585 else if (i
.short_form
)
7587 /* The register or float register operand is in operand
7589 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
7591 /* Register goes in low 3 bits of opcode. */
7592 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
7593 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7595 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
7597 /* Warn about some common errors, but press on regardless.
7598 The first case can be generated by gcc (<= 2.8.1). */
7599 if (i
.operands
== 2)
7601 /* Reversed arguments on faddp, fsubp, etc. */
7602 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
7603 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
7604 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
7608 /* Extraneous `l' suffix on fp insn. */
7609 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
7610 register_prefix
, i
.op
[0].regs
->reg_name
);
7615 if ((i
.seg
[0] || i
.prefix
[SEG_PREFIX
])
7616 && i
.tm
.base_opcode
== 0x8d /* lea */
7617 && !is_any_vex_encoding(&i
.tm
))
7619 if (!quiet_warnings
)
7620 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
7624 i
.prefix
[SEG_PREFIX
] = 0;
7628 /* If a segment was explicitly specified, and the specified segment
7629 is neither the default nor the one already recorded from a prefix,
7630 use an opcode prefix to select it. If we never figured out what
7631 the default segment is, then default_seg will be zero at this
7632 point, and the specified segment prefix will always be used. */
7634 && i
.seg
[0] != default_seg
7635 && i
.seg
[0]->seg_prefix
!= i
.prefix
[SEG_PREFIX
])
7637 if (!add_prefix (i
.seg
[0]->seg_prefix
))
7643 static const seg_entry
*
7644 build_modrm_byte (void)
7646 const seg_entry
*default_seg
= 0;
7647 unsigned int source
, dest
;
7650 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
7653 unsigned int nds
, reg_slot
;
7656 dest
= i
.operands
- 1;
7659 /* There are 2 kinds of instructions:
7660 1. 5 operands: 4 register operands or 3 register operands
7661 plus 1 memory operand plus one Imm4 operand, VexXDS, and
7662 VexW0 or VexW1. The destination must be either XMM, YMM or
7664 2. 4 operands: 4 register operands or 3 register operands
7665 plus 1 memory operand, with VexXDS. */
7666 gas_assert ((i
.reg_operands
== 4
7667 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
7668 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7669 && i
.tm
.opcode_modifier
.vexw
7670 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
7672 /* If VexW1 is set, the first non-immediate operand is the source and
7673 the second non-immediate one is encoded in the immediate operand. */
7674 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
7676 source
= i
.imm_operands
;
7677 reg_slot
= i
.imm_operands
+ 1;
7681 source
= i
.imm_operands
+ 1;
7682 reg_slot
= i
.imm_operands
;
7685 if (i
.imm_operands
== 0)
7687 /* When there is no immediate operand, generate an 8bit
7688 immediate operand to encode the first operand. */
7689 exp
= &im_expressions
[i
.imm_operands
++];
7690 i
.op
[i
.operands
].imms
= exp
;
7691 i
.types
[i
.operands
] = imm8
;
7694 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7695 exp
->X_op
= O_constant
;
7696 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
7697 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7701 gas_assert (i
.imm_operands
== 1);
7702 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
7703 gas_assert (!i
.tm
.opcode_modifier
.immext
);
7705 /* Turn on Imm8 again so that output_imm will generate it. */
7706 i
.types
[0].bitfield
.imm8
= 1;
7708 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7709 i
.op
[0].imms
->X_add_number
7710 |= register_number (i
.op
[reg_slot
].regs
) << 4;
7711 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7714 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
7715 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
7720 /* i.reg_operands MUST be the number of real register operands;
7721 implicit registers do not count. If there are 3 register
7722 operands, it must be a instruction with VexNDS. For a
7723 instruction with VexNDD, the destination register is encoded
7724 in VEX prefix. If there are 4 register operands, it must be
7725 a instruction with VEX prefix and 3 sources. */
7726 if (i
.mem_operands
== 0
7727 && ((i
.reg_operands
== 2
7728 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
7729 || (i
.reg_operands
== 3
7730 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7731 || (i
.reg_operands
== 4 && vex_3_sources
)))
7739 /* When there are 3 operands, one of them may be immediate,
7740 which may be the first or the last operand. Otherwise,
7741 the first operand must be shift count register (cl) or it
7742 is an instruction with VexNDS. */
7743 gas_assert (i
.imm_operands
== 1
7744 || (i
.imm_operands
== 0
7745 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7746 || (i
.types
[0].bitfield
.instance
== RegC
7747 && i
.types
[0].bitfield
.byte
))));
7748 if (operand_type_check (i
.types
[0], imm
)
7749 || (i
.types
[0].bitfield
.instance
== RegC
7750 && i
.types
[0].bitfield
.byte
))
7756 /* When there are 4 operands, the first two must be 8bit
7757 immediate operands. The source operand will be the 3rd
7760 For instructions with VexNDS, if the first operand
7761 an imm8, the source operand is the 2nd one. If the last
7762 operand is imm8, the source operand is the first one. */
7763 gas_assert ((i
.imm_operands
== 2
7764 && i
.types
[0].bitfield
.imm8
7765 && i
.types
[1].bitfield
.imm8
)
7766 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7767 && i
.imm_operands
== 1
7768 && (i
.types
[0].bitfield
.imm8
7769 || i
.types
[i
.operands
- 1].bitfield
.imm8
7771 if (i
.imm_operands
== 2)
7775 if (i
.types
[0].bitfield
.imm8
)
7782 if (is_evex_encoding (&i
.tm
))
7784 /* For EVEX instructions, when there are 5 operands, the
7785 first one must be immediate operand. If the second one
7786 is immediate operand, the source operand is the 3th
7787 one. If the last one is immediate operand, the source
7788 operand is the 2nd one. */
7789 gas_assert (i
.imm_operands
== 2
7790 && i
.tm
.opcode_modifier
.sae
7791 && operand_type_check (i
.types
[0], imm
));
7792 if (operand_type_check (i
.types
[1], imm
))
7794 else if (operand_type_check (i
.types
[4], imm
))
7808 /* RC/SAE operand could be between DEST and SRC. That happens
7809 when one operand is GPR and the other one is XMM/YMM/ZMM
7811 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
7814 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7816 /* For instructions with VexNDS, the register-only source
7817 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
7818 register. It is encoded in VEX prefix. */
7820 i386_operand_type op
;
7823 /* Check register-only source operand when two source
7824 operands are swapped. */
7825 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
7826 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
7834 op
= i
.tm
.operand_types
[vvvv
];
7835 if ((dest
+ 1) >= i
.operands
7836 || ((op
.bitfield
.class != Reg
7837 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
7838 && op
.bitfield
.class != RegSIMD
7839 && !operand_type_equal (&op
, ®mask
)))
7841 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
7847 /* One of the register operands will be encoded in the i.rm.reg
7848 field, the other in the combined i.rm.mode and i.rm.regmem
7849 fields. If no form of this instruction supports a memory
7850 destination operand, then we assume the source operand may
7851 sometimes be a memory operand and so we need to store the
7852 destination in the i.rm.reg field. */
7853 if (!i
.tm
.opcode_modifier
.regmem
7854 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
7856 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
7857 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
7858 if (i
.op
[dest
].regs
->reg_type
.bitfield
.class == RegMMX
7859 || i
.op
[source
].regs
->reg_type
.bitfield
.class == RegMMX
)
7860 i
.has_regmmx
= TRUE
;
7861 else if (i
.op
[dest
].regs
->reg_type
.bitfield
.class == RegSIMD
7862 || i
.op
[source
].regs
->reg_type
.bitfield
.class == RegSIMD
)
7864 if (i
.types
[dest
].bitfield
.zmmword
7865 || i
.types
[source
].bitfield
.zmmword
)
7866 i
.has_regzmm
= TRUE
;
7867 else if (i
.types
[dest
].bitfield
.ymmword
7868 || i
.types
[source
].bitfield
.ymmword
)
7869 i
.has_regymm
= TRUE
;
7871 i
.has_regxmm
= TRUE
;
7873 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7875 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7877 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7879 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7884 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
7885 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
7886 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7888 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7890 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7892 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7895 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
7897 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
7900 add_prefix (LOCK_PREFIX_OPCODE
);
7904 { /* If it's not 2 reg operands... */
7909 unsigned int fake_zero_displacement
= 0;
7912 for (op
= 0; op
< i
.operands
; op
++)
7913 if (i
.flags
[op
] & Operand_Mem
)
7915 gas_assert (op
< i
.operands
);
7917 if (i
.tm
.opcode_modifier
.vecsib
)
7919 if (i
.index_reg
->reg_num
== RegIZ
)
7922 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7925 i
.sib
.base
= NO_BASE_REGISTER
;
7926 i
.sib
.scale
= i
.log2_scale_factor
;
7927 i
.types
[op
].bitfield
.disp8
= 0;
7928 i
.types
[op
].bitfield
.disp16
= 0;
7929 i
.types
[op
].bitfield
.disp64
= 0;
7930 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7932 /* Must be 32 bit */
7933 i
.types
[op
].bitfield
.disp32
= 1;
7934 i
.types
[op
].bitfield
.disp32s
= 0;
7938 i
.types
[op
].bitfield
.disp32
= 0;
7939 i
.types
[op
].bitfield
.disp32s
= 1;
7942 i
.sib
.index
= i
.index_reg
->reg_num
;
7943 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7945 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
7951 if (i
.base_reg
== 0)
7954 if (!i
.disp_operands
)
7955 fake_zero_displacement
= 1;
7956 if (i
.index_reg
== 0)
7958 i386_operand_type newdisp
;
7960 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7961 /* Operand is just <disp> */
7962 if (flag_code
== CODE_64BIT
)
7964 /* 64bit mode overwrites the 32bit absolute
7965 addressing by RIP relative addressing and
7966 absolute addressing is encoded by one of the
7967 redundant SIB forms. */
7968 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7969 i
.sib
.base
= NO_BASE_REGISTER
;
7970 i
.sib
.index
= NO_INDEX_REGISTER
;
7971 newdisp
= (!i
.prefix
[ADDR_PREFIX
] ? disp32s
: disp32
);
7973 else if ((flag_code
== CODE_16BIT
)
7974 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
7976 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
7981 i
.rm
.regmem
= NO_BASE_REGISTER
;
7984 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
7985 i
.types
[op
] = operand_type_or (i
.types
[op
], newdisp
);
7987 else if (!i
.tm
.opcode_modifier
.vecsib
)
7989 /* !i.base_reg && i.index_reg */
7990 if (i
.index_reg
->reg_num
== RegIZ
)
7991 i
.sib
.index
= NO_INDEX_REGISTER
;
7993 i
.sib
.index
= i
.index_reg
->reg_num
;
7994 i
.sib
.base
= NO_BASE_REGISTER
;
7995 i
.sib
.scale
= i
.log2_scale_factor
;
7996 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7997 i
.types
[op
].bitfield
.disp8
= 0;
7998 i
.types
[op
].bitfield
.disp16
= 0;
7999 i
.types
[op
].bitfield
.disp64
= 0;
8000 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
8002 /* Must be 32 bit */
8003 i
.types
[op
].bitfield
.disp32
= 1;
8004 i
.types
[op
].bitfield
.disp32s
= 0;
8008 i
.types
[op
].bitfield
.disp32
= 0;
8009 i
.types
[op
].bitfield
.disp32s
= 1;
8011 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8015 /* RIP addressing for 64bit mode. */
8016 else if (i
.base_reg
->reg_num
== RegIP
)
8018 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
8019 i
.rm
.regmem
= NO_BASE_REGISTER
;
8020 i
.types
[op
].bitfield
.disp8
= 0;
8021 i
.types
[op
].bitfield
.disp16
= 0;
8022 i
.types
[op
].bitfield
.disp32
= 0;
8023 i
.types
[op
].bitfield
.disp32s
= 1;
8024 i
.types
[op
].bitfield
.disp64
= 0;
8025 i
.flags
[op
] |= Operand_PCrel
;
8026 if (! i
.disp_operands
)
8027 fake_zero_displacement
= 1;
8029 else if (i
.base_reg
->reg_type
.bitfield
.word
)
8031 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
8032 switch (i
.base_reg
->reg_num
)
8035 if (i
.index_reg
== 0)
8037 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
8038 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
8042 if (i
.index_reg
== 0)
8045 if (operand_type_check (i
.types
[op
], disp
) == 0)
8047 /* fake (%bp) into 0(%bp) */
8048 i
.types
[op
].bitfield
.disp8
= 1;
8049 fake_zero_displacement
= 1;
8052 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
8053 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
8055 default: /* (%si) -> 4 or (%di) -> 5 */
8056 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
8058 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8060 else /* i.base_reg and 32/64 bit mode */
8062 if (flag_code
== CODE_64BIT
8063 && operand_type_check (i
.types
[op
], disp
))
8065 i
.types
[op
].bitfield
.disp16
= 0;
8066 i
.types
[op
].bitfield
.disp64
= 0;
8067 if (i
.prefix
[ADDR_PREFIX
] == 0)
8069 i
.types
[op
].bitfield
.disp32
= 0;
8070 i
.types
[op
].bitfield
.disp32s
= 1;
8074 i
.types
[op
].bitfield
.disp32
= 1;
8075 i
.types
[op
].bitfield
.disp32s
= 0;
8079 if (!i
.tm
.opcode_modifier
.vecsib
)
8080 i
.rm
.regmem
= i
.base_reg
->reg_num
;
8081 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
8083 i
.sib
.base
= i
.base_reg
->reg_num
;
8084 /* x86-64 ignores REX prefix bit here to avoid decoder
8086 if (!(i
.base_reg
->reg_flags
& RegRex
)
8087 && (i
.base_reg
->reg_num
== EBP_REG_NUM
8088 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
8090 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
8092 fake_zero_displacement
= 1;
8093 i
.types
[op
].bitfield
.disp8
= 1;
8095 i
.sib
.scale
= i
.log2_scale_factor
;
8096 if (i
.index_reg
== 0)
8098 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
8099 /* <disp>(%esp) becomes two byte modrm with no index
8100 register. We've already stored the code for esp
8101 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8102 Any base register besides %esp will not use the
8103 extra modrm byte. */
8104 i
.sib
.index
= NO_INDEX_REGISTER
;
8106 else if (!i
.tm
.opcode_modifier
.vecsib
)
8108 if (i
.index_reg
->reg_num
== RegIZ
)
8109 i
.sib
.index
= NO_INDEX_REGISTER
;
8111 i
.sib
.index
= i
.index_reg
->reg_num
;
8112 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8113 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8118 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
8119 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
8123 if (!fake_zero_displacement
8127 fake_zero_displacement
= 1;
8128 if (i
.disp_encoding
== disp_encoding_8bit
)
8129 i
.types
[op
].bitfield
.disp8
= 1;
8131 i
.types
[op
].bitfield
.disp32
= 1;
8133 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8137 if (fake_zero_displacement
)
8139 /* Fakes a zero displacement assuming that i.types[op]
8140 holds the correct displacement size. */
8143 gas_assert (i
.op
[op
].disps
== 0);
8144 exp
= &disp_expressions
[i
.disp_operands
++];
8145 i
.op
[op
].disps
= exp
;
8146 exp
->X_op
= O_constant
;
8147 exp
->X_add_number
= 0;
8148 exp
->X_add_symbol
= (symbolS
*) 0;
8149 exp
->X_op_symbol
= (symbolS
*) 0;
8157 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
8159 if (operand_type_check (i
.types
[0], imm
))
8160 i
.vex
.register_specifier
= NULL
;
8163 /* VEX.vvvv encodes one of the sources when the first
8164 operand is not an immediate. */
8165 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8166 i
.vex
.register_specifier
= i
.op
[0].regs
;
8168 i
.vex
.register_specifier
= i
.op
[1].regs
;
8171 /* Destination is a XMM register encoded in the ModRM.reg
8173 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
8174 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
8177 /* ModRM.rm and VEX.B encodes the other source. */
8178 if (!i
.mem_operands
)
8182 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8183 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8185 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
8187 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8191 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
8193 i
.vex
.register_specifier
= i
.op
[2].regs
;
8194 if (!i
.mem_operands
)
8197 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8198 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8202 /* Fill in i.rm.reg or i.rm.regmem field with register operand
8203 (if any) based on i.tm.extension_opcode. Again, we must be
8204 careful to make sure that segment/control/debug/test/MMX
8205 registers are coded into the i.rm.reg field. */
8206 else if (i
.reg_operands
)
8209 unsigned int vex_reg
= ~0;
8211 for (op
= 0; op
< i
.operands
; op
++)
8213 if (i
.types
[op
].bitfield
.class == Reg
8214 || i
.types
[op
].bitfield
.class == RegBND
8215 || i
.types
[op
].bitfield
.class == RegMask
8216 || i
.types
[op
].bitfield
.class == SReg
8217 || i
.types
[op
].bitfield
.class == RegCR
8218 || i
.types
[op
].bitfield
.class == RegDR
8219 || i
.types
[op
].bitfield
.class == RegTR
)
8221 if (i
.types
[op
].bitfield
.class == RegSIMD
)
8223 if (i
.types
[op
].bitfield
.zmmword
)
8224 i
.has_regzmm
= TRUE
;
8225 else if (i
.types
[op
].bitfield
.ymmword
)
8226 i
.has_regymm
= TRUE
;
8228 i
.has_regxmm
= TRUE
;
8231 if (i
.types
[op
].bitfield
.class == RegMMX
)
8233 i
.has_regmmx
= TRUE
;
8240 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8242 /* For instructions with VexNDS, the register-only
8243 source operand is encoded in VEX prefix. */
8244 gas_assert (mem
!= (unsigned int) ~0);
8249 gas_assert (op
< i
.operands
);
8253 /* Check register-only source operand when two source
8254 operands are swapped. */
8255 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
8256 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
8260 gas_assert (mem
== (vex_reg
+ 1)
8261 && op
< i
.operands
);
8266 gas_assert (vex_reg
< i
.operands
);
8270 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
8272 /* For instructions with VexNDD, the register destination
8273 is encoded in VEX prefix. */
8274 if (i
.mem_operands
== 0)
8276 /* There is no memory operand. */
8277 gas_assert ((op
+ 2) == i
.operands
);
8282 /* There are only 2 non-immediate operands. */
8283 gas_assert (op
< i
.imm_operands
+ 2
8284 && i
.operands
== i
.imm_operands
+ 2);
8285 vex_reg
= i
.imm_operands
+ 1;
8289 gas_assert (op
< i
.operands
);
8291 if (vex_reg
!= (unsigned int) ~0)
8293 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
8295 if ((type
->bitfield
.class != Reg
8296 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
8297 && type
->bitfield
.class != RegSIMD
8298 && !operand_type_equal (type
, ®mask
))
8301 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
8304 /* Don't set OP operand twice. */
8307 /* If there is an extension opcode to put here, the
8308 register number must be put into the regmem field. */
8309 if (i
.tm
.extension_opcode
!= None
)
8311 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
8312 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
8314 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
8319 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
8320 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
8322 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
8327 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
8328 must set it to 3 to indicate this is a register operand
8329 in the regmem field. */
8330 if (!i
.mem_operands
)
8334 /* Fill in i.rm.reg field with extension opcode (if any). */
8335 if (i
.tm
.extension_opcode
!= None
)
8336 i
.rm
.reg
= i
.tm
.extension_opcode
;
8342 flip_code16 (unsigned int code16
)
8344 gas_assert (i
.tm
.operands
== 1);
8346 return !(i
.prefix
[REX_PREFIX
] & REX_W
)
8347 && (code16
? i
.tm
.operand_types
[0].bitfield
.disp32
8348 || i
.tm
.operand_types
[0].bitfield
.disp32s
8349 : i
.tm
.operand_types
[0].bitfield
.disp16
)
8354 output_branch (void)
8360 relax_substateT subtype
;
8364 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
8365 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
8368 if (i
.prefix
[DATA_PREFIX
] != 0)
8372 code16
^= flip_code16(code16
);
8374 /* Pentium4 branch hints. */
8375 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8376 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8381 if (i
.prefix
[REX_PREFIX
] != 0)
8387 /* BND prefixed jump. */
8388 if (i
.prefix
[BND_PREFIX
] != 0)
8394 if (i
.prefixes
!= 0)
8395 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8397 /* It's always a symbol; End frag & setup for relax.
8398 Make sure there is enough room in this frag for the largest
8399 instruction we may generate in md_convert_frag. This is 2
8400 bytes for the opcode and room for the prefix and largest
8402 frag_grow (prefix
+ 2 + 4);
8403 /* Prefix and 1 opcode byte go in fr_fix. */
8404 p
= frag_more (prefix
+ 1);
8405 if (i
.prefix
[DATA_PREFIX
] != 0)
8406 *p
++ = DATA_PREFIX_OPCODE
;
8407 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
8408 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
8409 *p
++ = i
.prefix
[SEG_PREFIX
];
8410 if (i
.prefix
[BND_PREFIX
] != 0)
8411 *p
++ = BND_PREFIX_OPCODE
;
8412 if (i
.prefix
[REX_PREFIX
] != 0)
8413 *p
++ = i
.prefix
[REX_PREFIX
];
8414 *p
= i
.tm
.base_opcode
;
8416 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
8417 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
8418 else if (cpu_arch_flags
.bitfield
.cpui386
)
8419 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
8421 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
8424 sym
= i
.op
[0].disps
->X_add_symbol
;
8425 off
= i
.op
[0].disps
->X_add_number
;
8427 if (i
.op
[0].disps
->X_op
!= O_constant
8428 && i
.op
[0].disps
->X_op
!= O_symbol
)
8430 /* Handle complex expressions. */
8431 sym
= make_expr_symbol (i
.op
[0].disps
);
8435 /* 1 possible extra opcode + 4 byte displacement go in var part.
8436 Pass reloc in fr_var. */
8437 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
8440 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8441 /* Return TRUE iff PLT32 relocation should be used for branching to
8445 need_plt32_p (symbolS
*s
)
8447 /* PLT32 relocation is ELF only. */
8452 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8453 krtld support it. */
8457 /* Since there is no need to prepare for PLT branch on x86-64, we
8458 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8459 be used as a marker for 32-bit PC-relative branches. */
8463 /* Weak or undefined symbol need PLT32 relocation. */
8464 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
8467 /* Non-global symbol doesn't need PLT32 relocation. */
8468 if (! S_IS_EXTERNAL (s
))
8471 /* Other global symbols need PLT32 relocation. NB: Symbol with
8472 non-default visibilities are treated as normal global symbol
8473 so that PLT32 relocation can be used as a marker for 32-bit
8474 PC-relative branches. It is useful for linker relaxation. */
8485 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
8487 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)
8489 /* This is a loop or jecxz type instruction. */
8491 if (i
.prefix
[ADDR_PREFIX
] != 0)
8493 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
8496 /* Pentium4 branch hints. */
8497 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8498 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8500 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
8509 if (flag_code
== CODE_16BIT
)
8512 if (i
.prefix
[DATA_PREFIX
] != 0)
8514 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
8516 code16
^= flip_code16(code16
);
8524 /* BND prefixed jump. */
8525 if (i
.prefix
[BND_PREFIX
] != 0)
8527 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
8531 if (i
.prefix
[REX_PREFIX
] != 0)
8533 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
8537 if (i
.prefixes
!= 0)
8538 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8540 p
= frag_more (i
.tm
.opcode_length
+ size
);
8541 switch (i
.tm
.opcode_length
)
8544 *p
++ = i
.tm
.base_opcode
>> 8;
8547 *p
++ = i
.tm
.base_opcode
;
8553 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8555 && jump_reloc
== NO_RELOC
8556 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8557 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8560 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8562 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8563 i
.op
[0].disps
, 1, jump_reloc
);
8565 /* All jumps handled here are signed, but don't use a signed limit
8566 check for 32 and 16 bit jumps as we want to allow wrap around at
8567 4G and 64k respectively. */
8569 fixP
->fx_signed
= 1;
8573 output_interseg_jump (void)
8581 if (flag_code
== CODE_16BIT
)
8585 if (i
.prefix
[DATA_PREFIX
] != 0)
8592 gas_assert (!i
.prefix
[REX_PREFIX
]);
8598 if (i
.prefixes
!= 0)
8599 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8601 /* 1 opcode; 2 segment; offset */
8602 p
= frag_more (prefix
+ 1 + 2 + size
);
8604 if (i
.prefix
[DATA_PREFIX
] != 0)
8605 *p
++ = DATA_PREFIX_OPCODE
;
8607 if (i
.prefix
[REX_PREFIX
] != 0)
8608 *p
++ = i
.prefix
[REX_PREFIX
];
8610 *p
++ = i
.tm
.base_opcode
;
8611 if (i
.op
[1].imms
->X_op
== O_constant
)
8613 offsetT n
= i
.op
[1].imms
->X_add_number
;
8616 && !fits_in_unsigned_word (n
)
8617 && !fits_in_signed_word (n
))
8619 as_bad (_("16-bit jump out of range"));
8622 md_number_to_chars (p
, n
, size
);
8625 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8626 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
8627 if (i
.op
[0].imms
->X_op
!= O_constant
)
8628 as_bad (_("can't handle non absolute segment in `%s'"),
8630 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
8633 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8638 asection
*seg
= now_seg
;
8639 subsegT subseg
= now_subseg
;
8641 unsigned int alignment
, align_size_1
;
8642 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
8643 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
8644 unsigned int padding
;
8646 if (!IS_ELF
|| !x86_used_note
)
8649 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
8651 /* The .note.gnu.property section layout:
8653 Field Length Contents
8656 n_descsz 4 The note descriptor size
8657 n_type 4 NT_GNU_PROPERTY_TYPE_0
8659 n_desc n_descsz The program property array
8663 /* Create the .note.gnu.property section. */
8664 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
8665 bfd_set_section_flags (sec
,
8672 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
8683 bfd_set_section_alignment (sec
, alignment
);
8684 elf_section_type (sec
) = SHT_NOTE
;
8686 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
8688 isa_1_descsz_raw
= 4 + 4 + 4;
8689 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
8690 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
8692 feature_2_descsz_raw
= isa_1_descsz
;
8693 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
8695 feature_2_descsz_raw
+= 4 + 4 + 4;
8696 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
8697 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
8700 descsz
= feature_2_descsz
;
8701 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
8702 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
8704 /* Write n_namsz. */
8705 md_number_to_chars (p
, (valueT
) 4, 4);
8707 /* Write n_descsz. */
8708 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
8711 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
8714 memcpy (p
+ 4 * 3, "GNU", 4);
8716 /* Write 4-byte type. */
8717 md_number_to_chars (p
+ 4 * 4,
8718 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
8720 /* Write 4-byte data size. */
8721 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
8723 /* Write 4-byte data. */
8724 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
8726 /* Zero out paddings. */
8727 padding
= isa_1_descsz
- isa_1_descsz_raw
;
8729 memset (p
+ 4 * 7, 0, padding
);
8731 /* Write 4-byte type. */
8732 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
8733 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
8735 /* Write 4-byte data size. */
8736 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
8738 /* Write 4-byte data. */
8739 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
8740 (valueT
) x86_feature_2_used
, 4);
8742 /* Zero out paddings. */
8743 padding
= feature_2_descsz
- feature_2_descsz_raw
;
8745 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
8747 /* We probably can't restore the current segment, for there likely
8750 subseg_set (seg
, subseg
);
8755 encoding_length (const fragS
*start_frag
, offsetT start_off
,
8756 const char *frag_now_ptr
)
8758 unsigned int len
= 0;
8760 if (start_frag
!= frag_now
)
8762 const fragS
*fr
= start_frag
;
8767 } while (fr
&& fr
!= frag_now
);
8770 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
8773 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
8774 be macro-fused with conditional jumps.
8775 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
8776 or is one of the following format:
8789 maybe_fused_with_jcc_p (enum mf_cmp_kind
* mf_cmp_p
)
8791 /* No RIP address. */
8792 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
8795 /* No VEX/EVEX encoding. */
8796 if (is_any_vex_encoding (&i
.tm
))
8799 /* add, sub without add/sub m, imm. */
8800 if (i
.tm
.base_opcode
<= 5
8801 || (i
.tm
.base_opcode
>= 0x28 && i
.tm
.base_opcode
<= 0x2d)
8802 || ((i
.tm
.base_opcode
| 3) == 0x83
8803 && (i
.tm
.extension_opcode
== 0x5
8804 || i
.tm
.extension_opcode
== 0x0)))
8806 *mf_cmp_p
= mf_cmp_alu_cmp
;
8807 return !(i
.mem_operands
&& i
.imm_operands
);
8810 /* and without and m, imm. */
8811 if ((i
.tm
.base_opcode
>= 0x20 && i
.tm
.base_opcode
<= 0x25)
8812 || ((i
.tm
.base_opcode
| 3) == 0x83
8813 && i
.tm
.extension_opcode
== 0x4))
8815 *mf_cmp_p
= mf_cmp_test_and
;
8816 return !(i
.mem_operands
&& i
.imm_operands
);
8819 /* test without test m imm. */
8820 if ((i
.tm
.base_opcode
| 1) == 0x85
8821 || (i
.tm
.base_opcode
| 1) == 0xa9
8822 || ((i
.tm
.base_opcode
| 1) == 0xf7
8823 && i
.tm
.extension_opcode
== 0))
8825 *mf_cmp_p
= mf_cmp_test_and
;
8826 return !(i
.mem_operands
&& i
.imm_operands
);
8829 /* cmp without cmp m, imm. */
8830 if ((i
.tm
.base_opcode
>= 0x38 && i
.tm
.base_opcode
<= 0x3d)
8831 || ((i
.tm
.base_opcode
| 3) == 0x83
8832 && (i
.tm
.extension_opcode
== 0x7)))
8834 *mf_cmp_p
= mf_cmp_alu_cmp
;
8835 return !(i
.mem_operands
&& i
.imm_operands
);
8838 /* inc, dec without inc/dec m. */
8839 if ((i
.tm
.cpu_flags
.bitfield
.cpuno64
8840 && (i
.tm
.base_opcode
| 0xf) == 0x4f)
8841 || ((i
.tm
.base_opcode
| 1) == 0xff
8842 && i
.tm
.extension_opcode
<= 0x1))
8844 *mf_cmp_p
= mf_cmp_incdec
;
8845 return !i
.mem_operands
;
8851 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
8854 add_fused_jcc_padding_frag_p (enum mf_cmp_kind
* mf_cmp_p
)
8856 /* NB: Don't work with COND_JUMP86 without i386. */
8857 if (!align_branch_power
8858 || now_seg
== absolute_section
8859 || !cpu_arch_flags
.bitfield
.cpui386
8860 || !(align_branch
& align_branch_fused_bit
))
8863 if (maybe_fused_with_jcc_p (mf_cmp_p
))
8865 if (last_insn
.kind
== last_insn_other
8866 || last_insn
.seg
!= now_seg
)
8869 as_warn_where (last_insn
.file
, last_insn
.line
,
8870 _("`%s` skips -malign-branch-boundary on `%s`"),
8871 last_insn
.name
, i
.tm
.name
);
8877 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
8880 add_branch_prefix_frag_p (void)
8882 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
8883 to PadLock instructions since they include prefixes in opcode. */
8884 if (!align_branch_power
8885 || !align_branch_prefix_size
8886 || now_seg
== absolute_section
8887 || i
.tm
.cpu_flags
.bitfield
.cpupadlock
8888 || !cpu_arch_flags
.bitfield
.cpui386
)
8891 /* Don't add prefix if it is a prefix or there is no operand in case
8892 that segment prefix is special. */
8893 if (!i
.operands
|| i
.tm
.opcode_modifier
.isprefix
)
8896 if (last_insn
.kind
== last_insn_other
8897 || last_insn
.seg
!= now_seg
)
8901 as_warn_where (last_insn
.file
, last_insn
.line
,
8902 _("`%s` skips -malign-branch-boundary on `%s`"),
8903 last_insn
.name
, i
.tm
.name
);
8908 /* Return 1 if a BRANCH_PADDING frag should be generated. */
8911 add_branch_padding_frag_p (enum align_branch_kind
*branch_p
,
8912 enum mf_jcc_kind
*mf_jcc_p
)
8916 /* NB: Don't work with COND_JUMP86 without i386. */
8917 if (!align_branch_power
8918 || now_seg
== absolute_section
8919 || !cpu_arch_flags
.bitfield
.cpui386
)
8924 /* Check for jcc and direct jmp. */
8925 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
8927 if (i
.tm
.base_opcode
== JUMP_PC_RELATIVE
)
8929 *branch_p
= align_branch_jmp
;
8930 add_padding
= align_branch
& align_branch_jmp_bit
;
8934 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
8935 igore the lowest bit. */
8936 *mf_jcc_p
= (i
.tm
.base_opcode
& 0x0e) >> 1;
8937 *branch_p
= align_branch_jcc
;
8938 if ((align_branch
& align_branch_jcc_bit
))
8942 else if (is_any_vex_encoding (&i
.tm
))
8944 else if ((i
.tm
.base_opcode
| 1) == 0xc3)
8947 *branch_p
= align_branch_ret
;
8948 if ((align_branch
& align_branch_ret_bit
))
8953 /* Check for indirect jmp, direct and indirect calls. */
8954 if (i
.tm
.base_opcode
== 0xe8)
8957 *branch_p
= align_branch_call
;
8958 if ((align_branch
& align_branch_call_bit
))
8961 else if (i
.tm
.base_opcode
== 0xff
8962 && (i
.tm
.extension_opcode
== 2
8963 || i
.tm
.extension_opcode
== 4))
8965 /* Indirect call and jmp. */
8966 *branch_p
= align_branch_indirect
;
8967 if ((align_branch
& align_branch_indirect_bit
))
8974 && (i
.op
[0].disps
->X_op
== O_symbol
8975 || (i
.op
[0].disps
->X_op
== O_subtract
8976 && i
.op
[0].disps
->X_op_symbol
== GOT_symbol
)))
8978 symbolS
*s
= i
.op
[0].disps
->X_add_symbol
;
8979 /* No padding to call to global or undefined tls_get_addr. */
8980 if ((S_IS_EXTERNAL (s
) || !S_IS_DEFINED (s
))
8981 && strcmp (S_GET_NAME (s
), tls_get_addr
) == 0)
8987 && last_insn
.kind
!= last_insn_other
8988 && last_insn
.seg
== now_seg
)
8991 as_warn_where (last_insn
.file
, last_insn
.line
,
8992 _("`%s` skips -malign-branch-boundary on `%s`"),
8993 last_insn
.name
, i
.tm
.name
);
9003 fragS
*insn_start_frag
;
9004 offsetT insn_start_off
;
9005 fragS
*fragP
= NULL
;
9006 enum align_branch_kind branch
= align_branch_none
;
9007 /* The initializer is arbitrary just to avoid uninitialized error.
9008 it's actually either assigned in add_branch_padding_frag_p
9009 or never be used. */
9010 enum mf_jcc_kind mf_jcc
= mf_jcc_jo
;
9012 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9013 if (IS_ELF
&& x86_used_note
)
9015 if (i
.tm
.cpu_flags
.bitfield
.cpucmov
)
9016 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_CMOV
;
9017 if (i
.tm
.cpu_flags
.bitfield
.cpusse
)
9018 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE
;
9019 if (i
.tm
.cpu_flags
.bitfield
.cpusse2
)
9020 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE2
;
9021 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
)
9022 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE3
;
9023 if (i
.tm
.cpu_flags
.bitfield
.cpussse3
)
9024 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSSE3
;
9025 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_1
)
9026 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_1
;
9027 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_2
)
9028 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_2
;
9029 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
)
9030 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX
;
9031 if (i
.tm
.cpu_flags
.bitfield
.cpuavx2
)
9032 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX2
;
9033 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
9034 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_FMA
;
9035 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
)
9036 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512F
;
9037 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512cd
)
9038 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512CD
;
9039 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512er
)
9040 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512ER
;
9041 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
)
9042 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512PF
;
9043 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
)
9044 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512VL
;
9045 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
)
9046 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512DQ
;
9047 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
)
9048 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512BW
;
9049 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4fmaps
)
9050 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4FMAPS
;
9051 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
)
9052 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4VNNIW
;
9053 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bitalg
)
9054 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BITALG
;
9055 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512ifma
)
9056 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_IFMA
;
9057 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vbmi
)
9058 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI
;
9059 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vbmi2
)
9060 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI2
;
9061 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vnni
)
9062 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VNNI
;
9063 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bf16
)
9064 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BF16
;
9066 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
9067 || i
.tm
.cpu_flags
.bitfield
.cpu287
9068 || i
.tm
.cpu_flags
.bitfield
.cpu387
9069 || i
.tm
.cpu_flags
.bitfield
.cpu687
9070 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
9071 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
9073 || i
.tm
.base_opcode
== 0xf77 /* emms */
9074 || i
.tm
.base_opcode
== 0xf0e /* femms */
9075 || i
.tm
.base_opcode
== 0xf2a /* cvtpi2ps */
9076 || i
.tm
.base_opcode
== 0x660f2a /* cvtpi2pd */)
9077 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
9079 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
9081 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
9083 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
9084 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
9085 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
9086 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
9087 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
9088 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
9089 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
9090 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
9091 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
9095 /* Tie dwarf2 debug info to the address at the start of the insn.
9096 We can't do this after the insn has been output as the current
9097 frag may have been closed off. eg. by frag_var. */
9098 dwarf2_emit_insn (0);
9100 insn_start_frag
= frag_now
;
9101 insn_start_off
= frag_now_fix ();
9103 if (add_branch_padding_frag_p (&branch
, &mf_jcc
))
9106 /* Branch can be 8 bytes. Leave some room for prefixes. */
9107 unsigned int max_branch_padding_size
= 14;
9109 /* Align section to boundary. */
9110 record_alignment (now_seg
, align_branch_power
);
9112 /* Make room for padding. */
9113 frag_grow (max_branch_padding_size
);
9115 /* Start of the padding. */
9120 frag_var (rs_machine_dependent
, max_branch_padding_size
, 0,
9121 ENCODE_RELAX_STATE (BRANCH_PADDING
, 0),
9124 fragP
->tc_frag_data
.mf_type
= mf_jcc
;
9125 fragP
->tc_frag_data
.branch_type
= branch
;
9126 fragP
->tc_frag_data
.max_bytes
= max_branch_padding_size
;
9130 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9132 else if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
9133 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
9135 else if (i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
)
9136 output_interseg_jump ();
9139 /* Output normal instructions here. */
9143 unsigned int prefix
;
9144 enum mf_cmp_kind mf_cmp
;
9147 && (i
.tm
.base_opcode
== 0xfaee8
9148 || i
.tm
.base_opcode
== 0xfaef0
9149 || i
.tm
.base_opcode
== 0xfaef8))
9151 /* Encode lfence, mfence, and sfence as
9152 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9153 offsetT val
= 0x240483f0ULL
;
9155 md_number_to_chars (p
, val
, 5);
9159 /* Some processors fail on LOCK prefix. This options makes
9160 assembler ignore LOCK prefix and serves as a workaround. */
9161 if (omit_lock_prefix
)
9163 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
)
9165 i
.prefix
[LOCK_PREFIX
] = 0;
9169 /* Skip if this is a branch. */
9171 else if (add_fused_jcc_padding_frag_p (&mf_cmp
))
9173 /* Make room for padding. */
9174 frag_grow (MAX_FUSED_JCC_PADDING_SIZE
);
9179 frag_var (rs_machine_dependent
, MAX_FUSED_JCC_PADDING_SIZE
, 0,
9180 ENCODE_RELAX_STATE (FUSED_JCC_PADDING
, 0),
9183 fragP
->tc_frag_data
.mf_type
= mf_cmp
;
9184 fragP
->tc_frag_data
.branch_type
= align_branch_fused
;
9185 fragP
->tc_frag_data
.max_bytes
= MAX_FUSED_JCC_PADDING_SIZE
;
9187 else if (add_branch_prefix_frag_p ())
9189 unsigned int max_prefix_size
= align_branch_prefix_size
;
9191 /* Make room for padding. */
9192 frag_grow (max_prefix_size
);
9197 frag_var (rs_machine_dependent
, max_prefix_size
, 0,
9198 ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0),
9201 fragP
->tc_frag_data
.max_bytes
= max_prefix_size
;
9204 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9205 don't need the explicit prefix. */
9206 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
9208 switch (i
.tm
.opcode_length
)
9211 if (i
.tm
.base_opcode
& 0xff000000)
9213 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
9214 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
9215 || prefix
!= REPE_PREFIX_OPCODE
9216 || (i
.prefix
[REP_PREFIX
] != REPE_PREFIX_OPCODE
))
9217 add_prefix (prefix
);
9221 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
9223 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
9224 add_prefix (prefix
);
9230 /* Check for pseudo prefixes. */
9231 as_bad_where (insn_start_frag
->fr_file
,
9232 insn_start_frag
->fr_line
,
9233 _("pseudo prefix without instruction"));
9239 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9240 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9241 R_X86_64_GOTTPOFF relocation so that linker can safely
9242 perform IE->LE optimization. A dummy REX_OPCODE prefix
9243 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9244 relocation for GDesc -> IE/LE optimization. */
9245 if (x86_elf_abi
== X86_64_X32_ABI
9247 && (i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
9248 || i
.reloc
[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC
)
9249 && i
.prefix
[REX_PREFIX
] == 0)
9250 add_prefix (REX_OPCODE
);
9253 /* The prefix bytes. */
9254 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
9256 FRAG_APPEND_1_CHAR (*q
);
9260 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
9265 /* REX byte is encoded in VEX prefix. */
9269 FRAG_APPEND_1_CHAR (*q
);
9272 /* There should be no other prefixes for instructions
9277 /* For EVEX instructions i.vrex should become 0 after
9278 build_evex_prefix. For VEX instructions upper 16 registers
9279 aren't available, so VREX should be 0. */
9282 /* Now the VEX prefix. */
9283 p
= frag_more (i
.vex
.length
);
9284 for (j
= 0; j
< i
.vex
.length
; j
++)
9285 p
[j
] = i
.vex
.bytes
[j
];
9288 /* Now the opcode; be careful about word order here! */
9289 if (i
.tm
.opcode_length
== 1)
9291 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
9295 switch (i
.tm
.opcode_length
)
9299 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
9300 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
9304 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
9314 /* Put out high byte first: can't use md_number_to_chars! */
9315 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
9316 *p
= i
.tm
.base_opcode
& 0xff;
9319 /* Now the modrm byte and sib byte (if present). */
9320 if (i
.tm
.opcode_modifier
.modrm
)
9322 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
9325 /* If i.rm.regmem == ESP (4)
9326 && i.rm.mode != (Register mode)
9328 ==> need second modrm byte. */
9329 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
9331 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
9332 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
9334 | i
.sib
.scale
<< 6));
9337 if (i
.disp_operands
)
9338 output_disp (insn_start_frag
, insn_start_off
);
9341 output_imm (insn_start_frag
, insn_start_off
);
9344 * frag_now_fix () returning plain abs_section_offset when we're in the
9345 * absolute section, and abs_section_offset not getting updated as data
9346 * gets added to the frag breaks the logic below.
9348 if (now_seg
!= absolute_section
)
9350 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
9352 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9356 /* NB: Don't add prefix with GOTPC relocation since
9357 output_disp() above depends on the fixed encoding
9358 length. Can't add prefix with TLS relocation since
9359 it breaks TLS linker optimization. */
9360 unsigned int max
= i
.has_gotpc_tls_reloc
? 0 : 15 - j
;
9361 /* Prefix count on the current instruction. */
9362 unsigned int count
= i
.vex
.length
;
9364 for (k
= 0; k
< ARRAY_SIZE (i
.prefix
); k
++)
9365 /* REX byte is encoded in VEX/EVEX prefix. */
9366 if (i
.prefix
[k
] && (k
!= REX_PREFIX
|| !i
.vex
.length
))
9369 /* Count prefixes for extended opcode maps. */
9371 switch (i
.tm
.opcode_length
)
9374 if (((i
.tm
.base_opcode
>> 16) & 0xff) == 0xf)
9377 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
9389 if (((i
.tm
.base_opcode
>> 8) & 0xff) == 0xf)
9398 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
9401 /* Set the maximum prefix size in BRANCH_PREFIX
9403 if (fragP
->tc_frag_data
.max_bytes
> max
)
9404 fragP
->tc_frag_data
.max_bytes
= max
;
9405 if (fragP
->tc_frag_data
.max_bytes
> count
)
9406 fragP
->tc_frag_data
.max_bytes
-= count
;
9408 fragP
->tc_frag_data
.max_bytes
= 0;
9412 /* Remember the maximum prefix size in FUSED_JCC_PADDING
9414 unsigned int max_prefix_size
;
9415 if (align_branch_prefix_size
> max
)
9416 max_prefix_size
= max
;
9418 max_prefix_size
= align_branch_prefix_size
;
9419 if (max_prefix_size
> count
)
9420 fragP
->tc_frag_data
.max_prefix_length
9421 = max_prefix_size
- count
;
9424 /* Use existing segment prefix if possible. Use CS
9425 segment prefix in 64-bit mode. In 32-bit mode, use SS
9426 segment prefix with ESP/EBP base register and use DS
9427 segment prefix without ESP/EBP base register. */
9428 if (i
.prefix
[SEG_PREFIX
])
9429 fragP
->tc_frag_data
.default_prefix
= i
.prefix
[SEG_PREFIX
];
9430 else if (flag_code
== CODE_64BIT
)
9431 fragP
->tc_frag_data
.default_prefix
= CS_PREFIX_OPCODE
;
9433 && (i
.base_reg
->reg_num
== 4
9434 || i
.base_reg
->reg_num
== 5))
9435 fragP
->tc_frag_data
.default_prefix
= SS_PREFIX_OPCODE
;
9437 fragP
->tc_frag_data
.default_prefix
= DS_PREFIX_OPCODE
;
9442 /* NB: Don't work with COND_JUMP86 without i386. */
9443 if (align_branch_power
9444 && now_seg
!= absolute_section
9445 && cpu_arch_flags
.bitfield
.cpui386
)
9447 /* Terminate each frag so that we can add prefix and check for
9449 frag_wane (frag_now
);
9456 pi ("" /*line*/, &i
);
9458 #endif /* DEBUG386 */
9461 /* Return the size of the displacement operand N. */
9464 disp_size (unsigned int n
)
9468 if (i
.types
[n
].bitfield
.disp64
)
9470 else if (i
.types
[n
].bitfield
.disp8
)
9472 else if (i
.types
[n
].bitfield
.disp16
)
9477 /* Return the size of the immediate operand N. */
9480 imm_size (unsigned int n
)
9483 if (i
.types
[n
].bitfield
.imm64
)
9485 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
9487 else if (i
.types
[n
].bitfield
.imm16
)
9493 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
9498 for (n
= 0; n
< i
.operands
; n
++)
9500 if (operand_type_check (i
.types
[n
], disp
))
9502 if (i
.op
[n
].disps
->X_op
== O_constant
)
9504 int size
= disp_size (n
);
9505 offsetT val
= i
.op
[n
].disps
->X_add_number
;
9507 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
9509 p
= frag_more (size
);
9510 md_number_to_chars (p
, val
, size
);
9514 enum bfd_reloc_code_real reloc_type
;
9515 int size
= disp_size (n
);
9516 int sign
= i
.types
[n
].bitfield
.disp32s
;
9517 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
9520 /* We can't have 8 bit displacement here. */
9521 gas_assert (!i
.types
[n
].bitfield
.disp8
);
9523 /* The PC relative address is computed relative
9524 to the instruction boundary, so in case immediate
9525 fields follows, we need to adjust the value. */
9526 if (pcrel
&& i
.imm_operands
)
9531 for (n1
= 0; n1
< i
.operands
; n1
++)
9532 if (operand_type_check (i
.types
[n1
], imm
))
9534 /* Only one immediate is allowed for PC
9535 relative address. */
9536 gas_assert (sz
== 0);
9538 i
.op
[n
].disps
->X_add_number
-= sz
;
9540 /* We should find the immediate. */
9541 gas_assert (sz
!= 0);
9544 p
= frag_more (size
);
9545 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
9547 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
9548 && (((reloc_type
== BFD_RELOC_32
9549 || reloc_type
== BFD_RELOC_X86_64_32S
9550 || (reloc_type
== BFD_RELOC_64
9552 && (i
.op
[n
].disps
->X_op
== O_symbol
9553 || (i
.op
[n
].disps
->X_op
== O_add
9554 && ((symbol_get_value_expression
9555 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
9557 || reloc_type
== BFD_RELOC_32_PCREL
))
9561 reloc_type
= BFD_RELOC_386_GOTPC
;
9562 i
.has_gotpc_tls_reloc
= TRUE
;
9563 i
.op
[n
].imms
->X_add_number
+=
9564 encoding_length (insn_start_frag
, insn_start_off
, p
);
9566 else if (reloc_type
== BFD_RELOC_64
)
9567 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
9569 /* Don't do the adjustment for x86-64, as there
9570 the pcrel addressing is relative to the _next_
9571 insn, and that is taken care of in other code. */
9572 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
9574 else if (align_branch_power
)
9578 case BFD_RELOC_386_TLS_GD
:
9579 case BFD_RELOC_386_TLS_LDM
:
9580 case BFD_RELOC_386_TLS_IE
:
9581 case BFD_RELOC_386_TLS_IE_32
:
9582 case BFD_RELOC_386_TLS_GOTIE
:
9583 case BFD_RELOC_386_TLS_GOTDESC
:
9584 case BFD_RELOC_386_TLS_DESC_CALL
:
9585 case BFD_RELOC_X86_64_TLSGD
:
9586 case BFD_RELOC_X86_64_TLSLD
:
9587 case BFD_RELOC_X86_64_GOTTPOFF
:
9588 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
9589 case BFD_RELOC_X86_64_TLSDESC_CALL
:
9590 i
.has_gotpc_tls_reloc
= TRUE
;
9595 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
9596 size
, i
.op
[n
].disps
, pcrel
,
9598 /* Check for "call/jmp *mem", "mov mem, %reg",
9599 "test %reg, mem" and "binop mem, %reg" where binop
9600 is one of adc, add, and, cmp, or, sbb, sub, xor
9601 instructions without data prefix. Always generate
9602 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
9603 if (i
.prefix
[DATA_PREFIX
] == 0
9604 && (generate_relax_relocations
9607 && i
.rm
.regmem
== 5))
9609 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
9610 && !is_any_vex_encoding(&i
.tm
)
9611 && ((i
.operands
== 1
9612 && i
.tm
.base_opcode
== 0xff
9613 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
9615 && (i
.tm
.base_opcode
== 0x8b
9616 || i
.tm
.base_opcode
== 0x85
9617 || (i
.tm
.base_opcode
& ~0x38) == 0x03))))
9621 fixP
->fx_tcbit
= i
.rex
!= 0;
9623 && (i
.base_reg
->reg_num
== RegIP
))
9624 fixP
->fx_tcbit2
= 1;
9627 fixP
->fx_tcbit2
= 1;
9635 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
9640 for (n
= 0; n
< i
.operands
; n
++)
9642 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
9643 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
9646 if (operand_type_check (i
.types
[n
], imm
))
9648 if (i
.op
[n
].imms
->X_op
== O_constant
)
9650 int size
= imm_size (n
);
9653 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
9655 p
= frag_more (size
);
9656 md_number_to_chars (p
, val
, size
);
9660 /* Not absolute_section.
9661 Need a 32-bit fixup (don't support 8bit
9662 non-absolute imms). Try to support other
9664 enum bfd_reloc_code_real reloc_type
;
9665 int size
= imm_size (n
);
9668 if (i
.types
[n
].bitfield
.imm32s
9669 && (i
.suffix
== QWORD_MNEM_SUFFIX
9670 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
9675 p
= frag_more (size
);
9676 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
9678 /* This is tough to explain. We end up with this one if we
9679 * have operands that look like
9680 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
9681 * obtain the absolute address of the GOT, and it is strongly
9682 * preferable from a performance point of view to avoid using
9683 * a runtime relocation for this. The actual sequence of
9684 * instructions often look something like:
9689 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
9691 * The call and pop essentially return the absolute address
9692 * of the label .L66 and store it in %ebx. The linker itself
9693 * will ultimately change the first operand of the addl so
9694 * that %ebx points to the GOT, but to keep things simple, the
9695 * .o file must have this operand set so that it generates not
9696 * the absolute address of .L66, but the absolute address of
9697 * itself. This allows the linker itself simply treat a GOTPC
9698 * relocation as asking for a pcrel offset to the GOT to be
9699 * added in, and the addend of the relocation is stored in the
9700 * operand field for the instruction itself.
9702 * Our job here is to fix the operand so that it would add
9703 * the correct offset so that %ebx would point to itself. The
9704 * thing that is tricky is that .-.L66 will point to the
9705 * beginning of the instruction, so we need to further modify
9706 * the operand so that it will point to itself. There are
9707 * other cases where you have something like:
9709 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
9711 * and here no correction would be required. Internally in
9712 * the assembler we treat operands of this form as not being
9713 * pcrel since the '.' is explicitly mentioned, and I wonder
9714 * whether it would simplify matters to do it this way. Who
9715 * knows. In earlier versions of the PIC patches, the
9716 * pcrel_adjust field was used to store the correction, but
9717 * since the expression is not pcrel, I felt it would be
9718 * confusing to do it this way. */
9720 if ((reloc_type
== BFD_RELOC_32
9721 || reloc_type
== BFD_RELOC_X86_64_32S
9722 || reloc_type
== BFD_RELOC_64
)
9724 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
9725 && (i
.op
[n
].imms
->X_op
== O_symbol
9726 || (i
.op
[n
].imms
->X_op
== O_add
9727 && ((symbol_get_value_expression
9728 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
9732 reloc_type
= BFD_RELOC_386_GOTPC
;
9734 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
9736 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
9737 i
.has_gotpc_tls_reloc
= TRUE
;
9738 i
.op
[n
].imms
->X_add_number
+=
9739 encoding_length (insn_start_frag
, insn_start_off
, p
);
9741 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
9742 i
.op
[n
].imms
, 0, reloc_type
);
9748 /* x86_cons_fix_new is called via the expression parsing code when a
9749 reloc is needed. We use this hook to get the correct .got reloc. */
9750 static int cons_sign
= -1;
9753 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
9754 expressionS
*exp
, bfd_reloc_code_real_type r
)
9756 r
= reloc (len
, 0, cons_sign
, r
);
9759 if (exp
->X_op
== O_secrel
)
9761 exp
->X_op
= O_symbol
;
9762 r
= BFD_RELOC_32_SECREL
;
9766 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
9769 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
9770 purpose of the `.dc.a' internal pseudo-op. */
9773 x86_address_bytes (void)
9775 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
9777 return stdoutput
->arch_info
->bits_per_address
/ 8;
9780 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
9782 # define lex_got(reloc, adjust, types) NULL
9784 /* Parse operands of the form
9785 <symbol>@GOTOFF+<nnn>
9786 and similar .plt or .got references.
9788 If we find one, set up the correct relocation in RELOC and copy the
9789 input string, minus the `@GOTOFF' into a malloc'd buffer for
9790 parsing by the calling routine. Return this buffer, and if ADJUST
9791 is non-null set it to the length of the string we removed from the
9792 input line. Otherwise return NULL. */
9794 lex_got (enum bfd_reloc_code_real
*rel
,
9796 i386_operand_type
*types
)
9798 /* Some of the relocations depend on the size of what field is to
9799 be relocated. But in our callers i386_immediate and i386_displacement
9800 we don't yet know the operand size (this will be set by insn
9801 matching). Hence we record the word32 relocation here,
9802 and adjust the reloc according to the real size in reloc(). */
9803 static const struct {
9806 const enum bfd_reloc_code_real rel
[2];
9807 const i386_operand_type types64
;
9809 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9810 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
9812 OPERAND_TYPE_IMM32_64
},
9814 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
9815 BFD_RELOC_X86_64_PLTOFF64
},
9816 OPERAND_TYPE_IMM64
},
9817 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
9818 BFD_RELOC_X86_64_PLT32
},
9819 OPERAND_TYPE_IMM32_32S_DISP32
},
9820 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
9821 BFD_RELOC_X86_64_GOTPLT64
},
9822 OPERAND_TYPE_IMM64_DISP64
},
9823 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
9824 BFD_RELOC_X86_64_GOTOFF64
},
9825 OPERAND_TYPE_IMM64_DISP64
},
9826 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
9827 BFD_RELOC_X86_64_GOTPCREL
},
9828 OPERAND_TYPE_IMM32_32S_DISP32
},
9829 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
9830 BFD_RELOC_X86_64_TLSGD
},
9831 OPERAND_TYPE_IMM32_32S_DISP32
},
9832 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
9833 _dummy_first_bfd_reloc_code_real
},
9834 OPERAND_TYPE_NONE
},
9835 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
9836 BFD_RELOC_X86_64_TLSLD
},
9837 OPERAND_TYPE_IMM32_32S_DISP32
},
9838 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
9839 BFD_RELOC_X86_64_GOTTPOFF
},
9840 OPERAND_TYPE_IMM32_32S_DISP32
},
9841 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
9842 BFD_RELOC_X86_64_TPOFF32
},
9843 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9844 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
9845 _dummy_first_bfd_reloc_code_real
},
9846 OPERAND_TYPE_NONE
},
9847 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
9848 BFD_RELOC_X86_64_DTPOFF32
},
9849 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9850 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
9851 _dummy_first_bfd_reloc_code_real
},
9852 OPERAND_TYPE_NONE
},
9853 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
9854 _dummy_first_bfd_reloc_code_real
},
9855 OPERAND_TYPE_NONE
},
9856 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
9857 BFD_RELOC_X86_64_GOT32
},
9858 OPERAND_TYPE_IMM32_32S_64_DISP32
},
9859 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
9860 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
9861 OPERAND_TYPE_IMM32_32S_DISP32
},
9862 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
9863 BFD_RELOC_X86_64_TLSDESC_CALL
},
9864 OPERAND_TYPE_IMM32_32S_DISP32
},
9869 #if defined (OBJ_MAYBE_ELF)
9874 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
9875 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
9878 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
9880 int len
= gotrel
[j
].len
;
9881 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
9883 if (gotrel
[j
].rel
[object_64bit
] != 0)
9886 char *tmpbuf
, *past_reloc
;
9888 *rel
= gotrel
[j
].rel
[object_64bit
];
9892 if (flag_code
!= CODE_64BIT
)
9894 types
->bitfield
.imm32
= 1;
9895 types
->bitfield
.disp32
= 1;
9898 *types
= gotrel
[j
].types64
;
9901 if (j
!= 0 && GOT_symbol
== NULL
)
9902 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
9904 /* The length of the first part of our input line. */
9905 first
= cp
- input_line_pointer
;
9907 /* The second part goes from after the reloc token until
9908 (and including) an end_of_line char or comma. */
9909 past_reloc
= cp
+ 1 + len
;
9911 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
9913 second
= cp
+ 1 - past_reloc
;
9915 /* Allocate and copy string. The trailing NUL shouldn't
9916 be necessary, but be safe. */
9917 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
9918 memcpy (tmpbuf
, input_line_pointer
, first
);
9919 if (second
!= 0 && *past_reloc
!= ' ')
9920 /* Replace the relocation token with ' ', so that
9921 errors like foo@GOTOFF1 will be detected. */
9922 tmpbuf
[first
++] = ' ';
9924 /* Increment length by 1 if the relocation token is
9929 memcpy (tmpbuf
+ first
, past_reloc
, second
);
9930 tmpbuf
[first
+ second
] = '\0';
9934 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9935 gotrel
[j
].str
, 1 << (5 + object_64bit
));
9940 /* Might be a symbol version string. Don't as_bad here. */
9949 /* Parse operands of the form
9950 <symbol>@SECREL32+<nnn>
9952 If we find one, set up the correct relocation in RELOC and copy the
9953 input string, minus the `@SECREL32' into a malloc'd buffer for
9954 parsing by the calling routine. Return this buffer, and if ADJUST
9955 is non-null set it to the length of the string we removed from the
9956 input line. Otherwise return NULL.
9958 This function is copied from the ELF version above adjusted for PE targets. */
9961 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
9962 int *adjust ATTRIBUTE_UNUSED
,
9963 i386_operand_type
*types
)
9969 const enum bfd_reloc_code_real rel
[2];
9970 const i386_operand_type types64
;
9974 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
9975 BFD_RELOC_32_SECREL
},
9976 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9982 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
9983 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
9986 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
9988 int len
= gotrel
[j
].len
;
9990 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
9992 if (gotrel
[j
].rel
[object_64bit
] != 0)
9995 char *tmpbuf
, *past_reloc
;
9997 *rel
= gotrel
[j
].rel
[object_64bit
];
10003 if (flag_code
!= CODE_64BIT
)
10005 types
->bitfield
.imm32
= 1;
10006 types
->bitfield
.disp32
= 1;
10009 *types
= gotrel
[j
].types64
;
10012 /* The length of the first part of our input line. */
10013 first
= cp
- input_line_pointer
;
10015 /* The second part goes from after the reloc token until
10016 (and including) an end_of_line char or comma. */
10017 past_reloc
= cp
+ 1 + len
;
10019 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
10021 second
= cp
+ 1 - past_reloc
;
10023 /* Allocate and copy string. The trailing NUL shouldn't
10024 be necessary, but be safe. */
10025 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
10026 memcpy (tmpbuf
, input_line_pointer
, first
);
10027 if (second
!= 0 && *past_reloc
!= ' ')
10028 /* Replace the relocation token with ' ', so that
10029 errors like foo@SECLREL321 will be detected. */
10030 tmpbuf
[first
++] = ' ';
10031 memcpy (tmpbuf
+ first
, past_reloc
, second
);
10032 tmpbuf
[first
+ second
] = '\0';
10036 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10037 gotrel
[j
].str
, 1 << (5 + object_64bit
));
10042 /* Might be a symbol version string. Don't as_bad here. */
10048 bfd_reloc_code_real_type
10049 x86_cons (expressionS
*exp
, int size
)
10051 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
10053 intel_syntax
= -intel_syntax
;
10056 if (size
== 4 || (object_64bit
&& size
== 8))
10058 /* Handle @GOTOFF and the like in an expression. */
10060 char *gotfree_input_line
;
10063 save
= input_line_pointer
;
10064 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
10065 if (gotfree_input_line
)
10066 input_line_pointer
= gotfree_input_line
;
10070 if (gotfree_input_line
)
10072 /* expression () has merrily parsed up to the end of line,
10073 or a comma - in the wrong buffer. Transfer how far
10074 input_line_pointer has moved to the right buffer. */
10075 input_line_pointer
= (save
10076 + (input_line_pointer
- gotfree_input_line
)
10078 free (gotfree_input_line
);
10079 if (exp
->X_op
== O_constant
10080 || exp
->X_op
== O_absent
10081 || exp
->X_op
== O_illegal
10082 || exp
->X_op
== O_register
10083 || exp
->X_op
== O_big
)
10085 char c
= *input_line_pointer
;
10086 *input_line_pointer
= 0;
10087 as_bad (_("missing or invalid expression `%s'"), save
);
10088 *input_line_pointer
= c
;
10090 else if ((got_reloc
== BFD_RELOC_386_PLT32
10091 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
10092 && exp
->X_op
!= O_symbol
)
10094 char c
= *input_line_pointer
;
10095 *input_line_pointer
= 0;
10096 as_bad (_("invalid PLT expression `%s'"), save
);
10097 *input_line_pointer
= c
;
10104 intel_syntax
= -intel_syntax
;
10107 i386_intel_simplify (exp
);
10113 signed_cons (int size
)
10115 if (flag_code
== CODE_64BIT
)
10123 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
10130 if (exp
.X_op
== O_symbol
)
10131 exp
.X_op
= O_secrel
;
10133 emit_expr (&exp
, 4);
10135 while (*input_line_pointer
++ == ',');
10137 input_line_pointer
--;
10138 demand_empty_rest_of_line ();
10142 /* Handle Vector operations. */
10145 check_VecOperations (char *op_string
, char *op_end
)
10147 const reg_entry
*mask
;
10152 && (op_end
== NULL
|| op_string
< op_end
))
10155 if (*op_string
== '{')
10159 /* Check broadcasts. */
10160 if (strncmp (op_string
, "1to", 3) == 0)
10165 goto duplicated_vec_op
;
10168 if (*op_string
== '8')
10170 else if (*op_string
== '4')
10172 else if (*op_string
== '2')
10174 else if (*op_string
== '1'
10175 && *(op_string
+1) == '6')
10182 as_bad (_("Unsupported broadcast: `%s'"), saved
);
10187 broadcast_op
.type
= bcst_type
;
10188 broadcast_op
.operand
= this_operand
;
10189 broadcast_op
.bytes
= 0;
10190 i
.broadcast
= &broadcast_op
;
10192 /* Check masking operation. */
10193 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
10195 if (mask
== &bad_reg
)
10198 /* k0 can't be used for write mask. */
10199 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
10201 as_bad (_("`%s%s' can't be used for write mask"),
10202 register_prefix
, mask
->reg_name
);
10208 mask_op
.mask
= mask
;
10209 mask_op
.zeroing
= 0;
10210 mask_op
.operand
= this_operand
;
10216 goto duplicated_vec_op
;
10218 i
.mask
->mask
= mask
;
10220 /* Only "{z}" is allowed here. No need to check
10221 zeroing mask explicitly. */
10222 if (i
.mask
->operand
!= this_operand
)
10224 as_bad (_("invalid write mask `%s'"), saved
);
10229 op_string
= end_op
;
10231 /* Check zeroing-flag for masking operation. */
10232 else if (*op_string
== 'z')
10236 mask_op
.mask
= NULL
;
10237 mask_op
.zeroing
= 1;
10238 mask_op
.operand
= this_operand
;
10243 if (i
.mask
->zeroing
)
10246 as_bad (_("duplicated `%s'"), saved
);
10250 i
.mask
->zeroing
= 1;
10252 /* Only "{%k}" is allowed here. No need to check mask
10253 register explicitly. */
10254 if (i
.mask
->operand
!= this_operand
)
10256 as_bad (_("invalid zeroing-masking `%s'"),
10265 goto unknown_vec_op
;
10267 if (*op_string
!= '}')
10269 as_bad (_("missing `}' in `%s'"), saved
);
10274 /* Strip whitespace since the addition of pseudo prefixes
10275 changed how the scrubber treats '{'. */
10276 if (is_space_char (*op_string
))
10282 /* We don't know this one. */
10283 as_bad (_("unknown vector operation: `%s'"), saved
);
10287 if (i
.mask
&& i
.mask
->zeroing
&& !i
.mask
->mask
)
10289 as_bad (_("zeroing-masking only allowed with write mask"));
10297 i386_immediate (char *imm_start
)
10299 char *save_input_line_pointer
;
10300 char *gotfree_input_line
;
10303 i386_operand_type types
;
10305 operand_type_set (&types
, ~0);
10307 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
10309 as_bad (_("at most %d immediate operands are allowed"),
10310 MAX_IMMEDIATE_OPERANDS
);
10314 exp
= &im_expressions
[i
.imm_operands
++];
10315 i
.op
[this_operand
].imms
= exp
;
10317 if (is_space_char (*imm_start
))
10320 save_input_line_pointer
= input_line_pointer
;
10321 input_line_pointer
= imm_start
;
10323 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10324 if (gotfree_input_line
)
10325 input_line_pointer
= gotfree_input_line
;
10327 exp_seg
= expression (exp
);
10329 SKIP_WHITESPACE ();
10331 /* Handle vector operations. */
10332 if (*input_line_pointer
== '{')
10334 input_line_pointer
= check_VecOperations (input_line_pointer
,
10336 if (input_line_pointer
== NULL
)
10340 if (*input_line_pointer
)
10341 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10343 input_line_pointer
= save_input_line_pointer
;
10344 if (gotfree_input_line
)
10346 free (gotfree_input_line
);
10348 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
10349 exp
->X_op
= O_illegal
;
10352 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
10356 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10357 i386_operand_type types
, const char *imm_start
)
10359 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
10362 as_bad (_("missing or invalid immediate expression `%s'"),
10366 else if (exp
->X_op
== O_constant
)
10368 /* Size it properly later. */
10369 i
.types
[this_operand
].bitfield
.imm64
= 1;
10370 /* If not 64bit, sign extend val. */
10371 if (flag_code
!= CODE_64BIT
10372 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
10374 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
10376 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10377 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
10378 && exp_seg
!= absolute_section
10379 && exp_seg
!= text_section
10380 && exp_seg
!= data_section
10381 && exp_seg
!= bss_section
10382 && exp_seg
!= undefined_section
10383 && !bfd_is_com_section (exp_seg
))
10385 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10389 else if (!intel_syntax
&& exp_seg
== reg_section
)
10392 as_bad (_("illegal immediate register operand %s"), imm_start
);
10397 /* This is an address. The size of the address will be
10398 determined later, depending on destination register,
10399 suffix, or the default for the section. */
10400 i
.types
[this_operand
].bitfield
.imm8
= 1;
10401 i
.types
[this_operand
].bitfield
.imm16
= 1;
10402 i
.types
[this_operand
].bitfield
.imm32
= 1;
10403 i
.types
[this_operand
].bitfield
.imm32s
= 1;
10404 i
.types
[this_operand
].bitfield
.imm64
= 1;
10405 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10413 i386_scale (char *scale
)
10416 char *save
= input_line_pointer
;
10418 input_line_pointer
= scale
;
10419 val
= get_absolute_expression ();
10424 i
.log2_scale_factor
= 0;
10427 i
.log2_scale_factor
= 1;
10430 i
.log2_scale_factor
= 2;
10433 i
.log2_scale_factor
= 3;
10437 char sep
= *input_line_pointer
;
10439 *input_line_pointer
= '\0';
10440 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
10442 *input_line_pointer
= sep
;
10443 input_line_pointer
= save
;
10447 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
10449 as_warn (_("scale factor of %d without an index register"),
10450 1 << i
.log2_scale_factor
);
10451 i
.log2_scale_factor
= 0;
10453 scale
= input_line_pointer
;
10454 input_line_pointer
= save
;
10459 i386_displacement (char *disp_start
, char *disp_end
)
10463 char *save_input_line_pointer
;
10464 char *gotfree_input_line
;
10466 i386_operand_type bigdisp
, types
= anydisp
;
10469 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
10471 as_bad (_("at most %d displacement operands are allowed"),
10472 MAX_MEMORY_OPERANDS
);
10476 operand_type_set (&bigdisp
, 0);
10478 || i
.types
[this_operand
].bitfield
.baseindex
10479 || (current_templates
->start
->opcode_modifier
.jump
!= JUMP
10480 && current_templates
->start
->opcode_modifier
.jump
!= JUMP_DWORD
))
10482 i386_addressing_mode ();
10483 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
10484 if (flag_code
== CODE_64BIT
)
10488 bigdisp
.bitfield
.disp32s
= 1;
10489 bigdisp
.bitfield
.disp64
= 1;
10492 bigdisp
.bitfield
.disp32
= 1;
10494 else if ((flag_code
== CODE_16BIT
) ^ override
)
10495 bigdisp
.bitfield
.disp16
= 1;
10497 bigdisp
.bitfield
.disp32
= 1;
10501 /* For PC-relative branches, the width of the displacement may be
10502 dependent upon data size, but is never dependent upon address size.
10503 Also make sure to not unintentionally match against a non-PC-relative
10504 branch template. */
10505 static templates aux_templates
;
10506 const insn_template
*t
= current_templates
->start
;
10507 bfd_boolean has_intel64
= FALSE
;
10509 aux_templates
.start
= t
;
10510 while (++t
< current_templates
->end
)
10512 if (t
->opcode_modifier
.jump
10513 != current_templates
->start
->opcode_modifier
.jump
)
10515 if ((t
->opcode_modifier
.isa64
>= INTEL64
))
10516 has_intel64
= TRUE
;
10518 if (t
< current_templates
->end
)
10520 aux_templates
.end
= t
;
10521 current_templates
= &aux_templates
;
10524 override
= (i
.prefix
[DATA_PREFIX
] != 0);
10525 if (flag_code
== CODE_64BIT
)
10527 if ((override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
10528 && (!intel64
|| !has_intel64
))
10529 bigdisp
.bitfield
.disp16
= 1;
10531 bigdisp
.bitfield
.disp32s
= 1;
10536 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
10538 : LONG_MNEM_SUFFIX
));
10539 bigdisp
.bitfield
.disp32
= 1;
10540 if ((flag_code
== CODE_16BIT
) ^ override
)
10542 bigdisp
.bitfield
.disp32
= 0;
10543 bigdisp
.bitfield
.disp16
= 1;
10547 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10550 exp
= &disp_expressions
[i
.disp_operands
];
10551 i
.op
[this_operand
].disps
= exp
;
10553 save_input_line_pointer
= input_line_pointer
;
10554 input_line_pointer
= disp_start
;
10555 END_STRING_AND_SAVE (disp_end
);
10557 #ifndef GCC_ASM_O_HACK
10558 #define GCC_ASM_O_HACK 0
10561 END_STRING_AND_SAVE (disp_end
+ 1);
10562 if (i
.types
[this_operand
].bitfield
.baseIndex
10563 && displacement_string_end
[-1] == '+')
10565 /* This hack is to avoid a warning when using the "o"
10566 constraint within gcc asm statements.
10569 #define _set_tssldt_desc(n,addr,limit,type) \
10570 __asm__ __volatile__ ( \
10571 "movw %w2,%0\n\t" \
10572 "movw %w1,2+%0\n\t" \
10573 "rorl $16,%1\n\t" \
10574 "movb %b1,4+%0\n\t" \
10575 "movb %4,5+%0\n\t" \
10576 "movb $0,6+%0\n\t" \
10577 "movb %h1,7+%0\n\t" \
10579 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
10581 This works great except that the output assembler ends
10582 up looking a bit weird if it turns out that there is
10583 no offset. You end up producing code that looks like:
10596 So here we provide the missing zero. */
10598 *displacement_string_end
= '0';
10601 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10602 if (gotfree_input_line
)
10603 input_line_pointer
= gotfree_input_line
;
10605 exp_seg
= expression (exp
);
10607 SKIP_WHITESPACE ();
10608 if (*input_line_pointer
)
10609 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10611 RESTORE_END_STRING (disp_end
+ 1);
10613 input_line_pointer
= save_input_line_pointer
;
10614 if (gotfree_input_line
)
10616 free (gotfree_input_line
);
10618 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
10619 exp
->X_op
= O_illegal
;
10622 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
10624 RESTORE_END_STRING (disp_end
);
10630 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10631 i386_operand_type types
, const char *disp_start
)
10633 i386_operand_type bigdisp
;
10636 /* We do this to make sure that the section symbol is in
10637 the symbol table. We will ultimately change the relocation
10638 to be relative to the beginning of the section. */
10639 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
10640 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
10641 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10643 if (exp
->X_op
!= O_symbol
)
10646 if (S_IS_LOCAL (exp
->X_add_symbol
)
10647 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
10648 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
10649 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
10650 exp
->X_op
= O_subtract
;
10651 exp
->X_op_symbol
= GOT_symbol
;
10652 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
10653 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
10654 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10655 i
.reloc
[this_operand
] = BFD_RELOC_64
;
10657 i
.reloc
[this_operand
] = BFD_RELOC_32
;
10660 else if (exp
->X_op
== O_absent
10661 || exp
->X_op
== O_illegal
10662 || exp
->X_op
== O_big
)
10665 as_bad (_("missing or invalid displacement expression `%s'"),
10670 else if (flag_code
== CODE_64BIT
10671 && !i
.prefix
[ADDR_PREFIX
]
10672 && exp
->X_op
== O_constant
)
10674 /* Since displacement is signed extended to 64bit, don't allow
10675 disp32 and turn off disp32s if they are out of range. */
10676 i
.types
[this_operand
].bitfield
.disp32
= 0;
10677 if (!fits_in_signed_long (exp
->X_add_number
))
10679 i
.types
[this_operand
].bitfield
.disp32s
= 0;
10680 if (i
.types
[this_operand
].bitfield
.baseindex
)
10682 as_bad (_("0x%lx out range of signed 32bit displacement"),
10683 (long) exp
->X_add_number
);
10689 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10690 else if (exp
->X_op
!= O_constant
10691 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
10692 && exp_seg
!= absolute_section
10693 && exp_seg
!= text_section
10694 && exp_seg
!= data_section
10695 && exp_seg
!= bss_section
10696 && exp_seg
!= undefined_section
10697 && !bfd_is_com_section (exp_seg
))
10699 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10704 if (current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
10705 /* Constants get taken care of by optimize_disp(). */
10706 && exp
->X_op
!= O_constant
)
10707 i
.types
[this_operand
].bitfield
.disp8
= 1;
10709 /* Check if this is a displacement only operand. */
10710 bigdisp
= i
.types
[this_operand
];
10711 bigdisp
.bitfield
.disp8
= 0;
10712 bigdisp
.bitfield
.disp16
= 0;
10713 bigdisp
.bitfield
.disp32
= 0;
10714 bigdisp
.bitfield
.disp32s
= 0;
10715 bigdisp
.bitfield
.disp64
= 0;
10716 if (operand_type_all_zero (&bigdisp
))
10717 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10723 /* Return the active addressing mode, taking address override and
10724 registers forming the address into consideration. Update the
10725 address override prefix if necessary. */
10727 static enum flag_code
10728 i386_addressing_mode (void)
10730 enum flag_code addr_mode
;
10732 if (i
.prefix
[ADDR_PREFIX
])
10733 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
10734 else if (flag_code
== CODE_16BIT
10735 && current_templates
->start
->cpu_flags
.bitfield
.cpumpx
10736 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
10737 from md_assemble() by "is not a valid base/index expression"
10738 when there is a base and/or index. */
10739 && !i
.types
[this_operand
].bitfield
.baseindex
)
10741 /* MPX insn memory operands with neither base nor index must be forced
10742 to use 32-bit addressing in 16-bit mode. */
10743 addr_mode
= CODE_32BIT
;
10744 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
10746 gas_assert (!i
.types
[this_operand
].bitfield
.disp16
);
10747 gas_assert (!i
.types
[this_operand
].bitfield
.disp32
);
10751 addr_mode
= flag_code
;
10753 #if INFER_ADDR_PREFIX
10754 if (i
.mem_operands
== 0)
10756 /* Infer address prefix from the first memory operand. */
10757 const reg_entry
*addr_reg
= i
.base_reg
;
10759 if (addr_reg
== NULL
)
10760 addr_reg
= i
.index_reg
;
10764 if (addr_reg
->reg_type
.bitfield
.dword
)
10765 addr_mode
= CODE_32BIT
;
10766 else if (flag_code
!= CODE_64BIT
10767 && addr_reg
->reg_type
.bitfield
.word
)
10768 addr_mode
= CODE_16BIT
;
10770 if (addr_mode
!= flag_code
)
10772 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
10774 /* Change the size of any displacement too. At most one
10775 of Disp16 or Disp32 is set.
10776 FIXME. There doesn't seem to be any real need for
10777 separate Disp16 and Disp32 flags. The same goes for
10778 Imm16 and Imm32. Removing them would probably clean
10779 up the code quite a lot. */
10780 if (flag_code
!= CODE_64BIT
10781 && (i
.types
[this_operand
].bitfield
.disp16
10782 || i
.types
[this_operand
].bitfield
.disp32
))
10783 i
.types
[this_operand
]
10784 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
10794 /* Make sure the memory operand we've been dealt is valid.
10795 Return 1 on success, 0 on a failure. */
10798 i386_index_check (const char *operand_string
)
10800 const char *kind
= "base/index";
10801 enum flag_code addr_mode
= i386_addressing_mode ();
10803 if (current_templates
->start
->opcode_modifier
.isstring
10804 && !current_templates
->start
->cpu_flags
.bitfield
.cpupadlock
10805 && (current_templates
->end
[-1].opcode_modifier
.isstring
10806 || i
.mem_operands
))
10808 /* Memory operands of string insns are special in that they only allow
10809 a single register (rDI, rSI, or rBX) as their memory address. */
10810 const reg_entry
*expected_reg
;
10811 static const char *di_si
[][2] =
10817 static const char *bx
[] = { "ebx", "bx", "rbx" };
10819 kind
= "string address";
10821 if (current_templates
->start
->opcode_modifier
.repprefixok
)
10823 int es_op
= current_templates
->end
[-1].opcode_modifier
.isstring
10824 - IS_STRING_ES_OP0
;
10827 if (!current_templates
->end
[-1].operand_types
[0].bitfield
.baseindex
10828 || ((!i
.mem_operands
!= !intel_syntax
)
10829 && current_templates
->end
[-1].operand_types
[1]
10830 .bitfield
.baseindex
))
10832 expected_reg
= hash_find (reg_hash
, di_si
[addr_mode
][op
== es_op
]);
10835 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
10837 if (i
.base_reg
!= expected_reg
10839 || operand_type_check (i
.types
[this_operand
], disp
))
10841 /* The second memory operand must have the same size as
10845 && !((addr_mode
== CODE_64BIT
10846 && i
.base_reg
->reg_type
.bitfield
.qword
)
10847 || (addr_mode
== CODE_32BIT
10848 ? i
.base_reg
->reg_type
.bitfield
.dword
10849 : i
.base_reg
->reg_type
.bitfield
.word
)))
10852 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
10854 intel_syntax
? '[' : '(',
10856 expected_reg
->reg_name
,
10857 intel_syntax
? ']' : ')');
10864 as_bad (_("`%s' is not a valid %s expression"),
10865 operand_string
, kind
);
10870 if (addr_mode
!= CODE_16BIT
)
10872 /* 32-bit/64-bit checks. */
10874 && ((addr_mode
== CODE_64BIT
10875 ? !i
.base_reg
->reg_type
.bitfield
.qword
10876 : !i
.base_reg
->reg_type
.bitfield
.dword
)
10877 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
10878 || i
.base_reg
->reg_num
== RegIZ
))
10880 && !i
.index_reg
->reg_type
.bitfield
.xmmword
10881 && !i
.index_reg
->reg_type
.bitfield
.ymmword
10882 && !i
.index_reg
->reg_type
.bitfield
.zmmword
10883 && ((addr_mode
== CODE_64BIT
10884 ? !i
.index_reg
->reg_type
.bitfield
.qword
10885 : !i
.index_reg
->reg_type
.bitfield
.dword
)
10886 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
10889 /* bndmk, bndldx, and bndstx have special restrictions. */
10890 if (current_templates
->start
->base_opcode
== 0xf30f1b
10891 || (current_templates
->start
->base_opcode
& ~1) == 0x0f1a)
10893 /* They cannot use RIP-relative addressing. */
10894 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
10896 as_bad (_("`%s' cannot be used here"), operand_string
);
10900 /* bndldx and bndstx ignore their scale factor. */
10901 if (current_templates
->start
->base_opcode
!= 0xf30f1b
10902 && i
.log2_scale_factor
)
10903 as_warn (_("register scaling is being ignored here"));
10908 /* 16-bit checks. */
10910 && (!i
.base_reg
->reg_type
.bitfield
.word
10911 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
10913 && (!i
.index_reg
->reg_type
.bitfield
.word
10914 || !i
.index_reg
->reg_type
.bitfield
.baseindex
10916 && i
.base_reg
->reg_num
< 6
10917 && i
.index_reg
->reg_num
>= 6
10918 && i
.log2_scale_factor
== 0))))
10925 /* Handle vector immediates. */
10928 RC_SAE_immediate (const char *imm_start
)
10930 unsigned int match_found
, j
;
10931 const char *pstr
= imm_start
;
10939 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
10941 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
10945 rc_op
.type
= RC_NamesTable
[j
].type
;
10946 rc_op
.operand
= this_operand
;
10947 i
.rounding
= &rc_op
;
10951 as_bad (_("duplicated `%s'"), imm_start
);
10954 pstr
+= RC_NamesTable
[j
].len
;
10962 if (*pstr
++ != '}')
10964 as_bad (_("Missing '}': '%s'"), imm_start
);
10967 /* RC/SAE immediate string should contain nothing more. */;
10970 as_bad (_("Junk after '}': '%s'"), imm_start
);
10974 exp
= &im_expressions
[i
.imm_operands
++];
10975 i
.op
[this_operand
].imms
= exp
;
10977 exp
->X_op
= O_constant
;
10978 exp
->X_add_number
= 0;
10979 exp
->X_add_symbol
= (symbolS
*) 0;
10980 exp
->X_op_symbol
= (symbolS
*) 0;
10982 i
.types
[this_operand
].bitfield
.imm8
= 1;
10986 /* Only string instructions can have a second memory operand, so
10987 reduce current_templates to just those if it contains any. */
10989 maybe_adjust_templates (void)
10991 const insn_template
*t
;
10993 gas_assert (i
.mem_operands
== 1);
10995 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
10996 if (t
->opcode_modifier
.isstring
)
10999 if (t
< current_templates
->end
)
11001 static templates aux_templates
;
11002 bfd_boolean recheck
;
11004 aux_templates
.start
= t
;
11005 for (; t
< current_templates
->end
; ++t
)
11006 if (!t
->opcode_modifier
.isstring
)
11008 aux_templates
.end
= t
;
11010 /* Determine whether to re-check the first memory operand. */
11011 recheck
= (aux_templates
.start
!= current_templates
->start
11012 || t
!= current_templates
->end
);
11014 current_templates
= &aux_templates
;
11018 i
.mem_operands
= 0;
11019 if (i
.memop1_string
!= NULL
11020 && i386_index_check (i
.memop1_string
) == 0)
11022 i
.mem_operands
= 1;
11029 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
11033 i386_att_operand (char *operand_string
)
11035 const reg_entry
*r
;
11037 char *op_string
= operand_string
;
11039 if (is_space_char (*op_string
))
11042 /* We check for an absolute prefix (differentiating,
11043 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
11044 if (*op_string
== ABSOLUTE_PREFIX
)
11047 if (is_space_char (*op_string
))
11049 i
.jumpabsolute
= TRUE
;
11052 /* Check if operand is a register. */
11053 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
11055 i386_operand_type temp
;
11060 /* Check for a segment override by searching for ':' after a
11061 segment register. */
11062 op_string
= end_op
;
11063 if (is_space_char (*op_string
))
11065 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
11067 switch (r
->reg_num
)
11070 i
.seg
[i
.mem_operands
] = &es
;
11073 i
.seg
[i
.mem_operands
] = &cs
;
11076 i
.seg
[i
.mem_operands
] = &ss
;
11079 i
.seg
[i
.mem_operands
] = &ds
;
11082 i
.seg
[i
.mem_operands
] = &fs
;
11085 i
.seg
[i
.mem_operands
] = &gs
;
11089 /* Skip the ':' and whitespace. */
11091 if (is_space_char (*op_string
))
11094 if (!is_digit_char (*op_string
)
11095 && !is_identifier_char (*op_string
)
11096 && *op_string
!= '('
11097 && *op_string
!= ABSOLUTE_PREFIX
)
11099 as_bad (_("bad memory operand `%s'"), op_string
);
11102 /* Handle case of %es:*foo. */
11103 if (*op_string
== ABSOLUTE_PREFIX
)
11106 if (is_space_char (*op_string
))
11108 i
.jumpabsolute
= TRUE
;
11110 goto do_memory_reference
;
11113 /* Handle vector operations. */
11114 if (*op_string
== '{')
11116 op_string
= check_VecOperations (op_string
, NULL
);
11117 if (op_string
== NULL
)
11123 as_bad (_("junk `%s' after register"), op_string
);
11126 temp
= r
->reg_type
;
11127 temp
.bitfield
.baseindex
= 0;
11128 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
11130 i
.types
[this_operand
].bitfield
.unspecified
= 0;
11131 i
.op
[this_operand
].regs
= r
;
11134 else if (*op_string
== REGISTER_PREFIX
)
11136 as_bad (_("bad register name `%s'"), op_string
);
11139 else if (*op_string
== IMMEDIATE_PREFIX
)
11142 if (i
.jumpabsolute
)
11144 as_bad (_("immediate operand illegal with absolute jump"));
11147 if (!i386_immediate (op_string
))
11150 else if (RC_SAE_immediate (operand_string
))
11152 /* If it is a RC or SAE immediate, do nothing. */
11155 else if (is_digit_char (*op_string
)
11156 || is_identifier_char (*op_string
)
11157 || *op_string
== '"'
11158 || *op_string
== '(')
11160 /* This is a memory reference of some sort. */
11163 /* Start and end of displacement string expression (if found). */
11164 char *displacement_string_start
;
11165 char *displacement_string_end
;
11168 do_memory_reference
:
11169 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
11171 if ((i
.mem_operands
== 1
11172 && !current_templates
->start
->opcode_modifier
.isstring
)
11173 || i
.mem_operands
== 2)
11175 as_bad (_("too many memory references for `%s'"),
11176 current_templates
->start
->name
);
11180 /* Check for base index form. We detect the base index form by
11181 looking for an ')' at the end of the operand, searching
11182 for the '(' matching it, and finding a REGISTER_PREFIX or ','
11184 base_string
= op_string
+ strlen (op_string
);
11186 /* Handle vector operations. */
11187 vop_start
= strchr (op_string
, '{');
11188 if (vop_start
&& vop_start
< base_string
)
11190 if (check_VecOperations (vop_start
, base_string
) == NULL
)
11192 base_string
= vop_start
;
11196 if (is_space_char (*base_string
))
11199 /* If we only have a displacement, set-up for it to be parsed later. */
11200 displacement_string_start
= op_string
;
11201 displacement_string_end
= base_string
+ 1;
11203 if (*base_string
== ')')
11206 unsigned int parens_balanced
= 1;
11207 /* We've already checked that the number of left & right ()'s are
11208 equal, so this loop will not be infinite. */
11212 if (*base_string
== ')')
11214 if (*base_string
== '(')
11217 while (parens_balanced
);
11219 temp_string
= base_string
;
11221 /* Skip past '(' and whitespace. */
11223 if (is_space_char (*base_string
))
11226 if (*base_string
== ','
11227 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
11230 displacement_string_end
= temp_string
;
11232 i
.types
[this_operand
].bitfield
.baseindex
= 1;
11236 if (i
.base_reg
== &bad_reg
)
11238 base_string
= end_op
;
11239 if (is_space_char (*base_string
))
11243 /* There may be an index reg or scale factor here. */
11244 if (*base_string
== ',')
11247 if (is_space_char (*base_string
))
11250 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
11253 if (i
.index_reg
== &bad_reg
)
11255 base_string
= end_op
;
11256 if (is_space_char (*base_string
))
11258 if (*base_string
== ',')
11261 if (is_space_char (*base_string
))
11264 else if (*base_string
!= ')')
11266 as_bad (_("expecting `,' or `)' "
11267 "after index register in `%s'"),
11272 else if (*base_string
== REGISTER_PREFIX
)
11274 end_op
= strchr (base_string
, ',');
11277 as_bad (_("bad register name `%s'"), base_string
);
11281 /* Check for scale factor. */
11282 if (*base_string
!= ')')
11284 char *end_scale
= i386_scale (base_string
);
11289 base_string
= end_scale
;
11290 if (is_space_char (*base_string
))
11292 if (*base_string
!= ')')
11294 as_bad (_("expecting `)' "
11295 "after scale factor in `%s'"),
11300 else if (!i
.index_reg
)
11302 as_bad (_("expecting index register or scale factor "
11303 "after `,'; got '%c'"),
11308 else if (*base_string
!= ')')
11310 as_bad (_("expecting `,' or `)' "
11311 "after base register in `%s'"),
11316 else if (*base_string
== REGISTER_PREFIX
)
11318 end_op
= strchr (base_string
, ',');
11321 as_bad (_("bad register name `%s'"), base_string
);
11326 /* If there's an expression beginning the operand, parse it,
11327 assuming displacement_string_start and
11328 displacement_string_end are meaningful. */
11329 if (displacement_string_start
!= displacement_string_end
)
11331 if (!i386_displacement (displacement_string_start
,
11332 displacement_string_end
))
11336 /* Special case for (%dx) while doing input/output op. */
11338 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
11339 && i
.base_reg
->reg_type
.bitfield
.word
11340 && i
.index_reg
== 0
11341 && i
.log2_scale_factor
== 0
11342 && i
.seg
[i
.mem_operands
] == 0
11343 && !operand_type_check (i
.types
[this_operand
], disp
))
11345 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
11349 if (i386_index_check (operand_string
) == 0)
11351 i
.flags
[this_operand
] |= Operand_Mem
;
11352 if (i
.mem_operands
== 0)
11353 i
.memop1_string
= xstrdup (operand_string
);
11358 /* It's not a memory operand; argh! */
11359 as_bad (_("invalid char %s beginning operand %d `%s'"),
11360 output_invalid (*op_string
),
11365 return 1; /* Normal return. */
11368 /* Calculate the maximum variable size (i.e., excluding fr_fix)
11369 that an rs_machine_dependent frag may reach. */
11372 i386_frag_max_var (fragS
*frag
)
11374 /* The only relaxable frags are for jumps.
11375 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
11376 gas_assert (frag
->fr_type
== rs_machine_dependent
);
11377 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
11380 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11382 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
11384 /* STT_GNU_IFUNC symbol must go through PLT. */
11385 if ((symbol_get_bfdsym (fr_symbol
)->flags
11386 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
11389 if (!S_IS_EXTERNAL (fr_symbol
))
11390 /* Symbol may be weak or local. */
11391 return !S_IS_WEAK (fr_symbol
);
11393 /* Global symbols with non-default visibility can't be preempted. */
11394 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
11397 if (fr_var
!= NO_RELOC
)
11398 switch ((enum bfd_reloc_code_real
) fr_var
)
11400 case BFD_RELOC_386_PLT32
:
11401 case BFD_RELOC_X86_64_PLT32
:
11402 /* Symbol with PLT relocation may be preempted. */
11408 /* Global symbols with default visibility in a shared library may be
11409 preempted by another definition. */
11414 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
11415 Note also work for Skylake and Cascadelake.
11416 ---------------------------------------------------------------------
11417 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
11418 | ------ | ----------- | ------- | -------- |
11420 | Jno | N | N | Y |
11421 | Jc/Jb | Y | N | Y |
11422 | Jae/Jnb | Y | N | Y |
11423 | Je/Jz | Y | Y | Y |
11424 | Jne/Jnz | Y | Y | Y |
11425 | Jna/Jbe | Y | N | Y |
11426 | Ja/Jnbe | Y | N | Y |
11428 | Jns | N | N | Y |
11429 | Jp/Jpe | N | N | Y |
11430 | Jnp/Jpo | N | N | Y |
11431 | Jl/Jnge | Y | Y | Y |
11432 | Jge/Jnl | Y | Y | Y |
11433 | Jle/Jng | Y | Y | Y |
11434 | Jg/Jnle | Y | Y | Y |
11435 --------------------------------------------------------------------- */
11437 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp
, enum mf_jcc_kind mf_jcc
)
11439 if (mf_cmp
== mf_cmp_alu_cmp
)
11440 return ((mf_jcc
>= mf_jcc_jc
&& mf_jcc
<= mf_jcc_jna
)
11441 || mf_jcc
== mf_jcc_jl
|| mf_jcc
== mf_jcc_jle
);
11442 if (mf_cmp
== mf_cmp_incdec
)
11443 return (mf_jcc
== mf_jcc_je
|| mf_jcc
== mf_jcc_jl
11444 || mf_jcc
== mf_jcc_jle
);
11445 if (mf_cmp
== mf_cmp_test_and
)
11450 /* Return the next non-empty frag. */
11453 i386_next_non_empty_frag (fragS
*fragP
)
11455 /* There may be a frag with a ".fill 0" when there is no room in
11456 the current frag for frag_grow in output_insn. */
11457 for (fragP
= fragP
->fr_next
;
11459 && fragP
->fr_type
== rs_fill
11460 && fragP
->fr_fix
== 0);
11461 fragP
= fragP
->fr_next
)
11466 /* Return the next jcc frag after BRANCH_PADDING. */
11469 i386_next_fusible_jcc_frag (fragS
*maybe_cmp_fragP
, fragS
*pad_fragP
)
11471 fragS
*branch_fragP
;
11475 if (pad_fragP
->fr_type
== rs_machine_dependent
11476 && (TYPE_FROM_RELAX_STATE (pad_fragP
->fr_subtype
)
11477 == BRANCH_PADDING
))
11479 branch_fragP
= i386_next_non_empty_frag (pad_fragP
);
11480 if (branch_fragP
->fr_type
!= rs_machine_dependent
)
11482 if (TYPE_FROM_RELAX_STATE (branch_fragP
->fr_subtype
) == COND_JUMP
11483 && i386_macro_fusible_p (maybe_cmp_fragP
->tc_frag_data
.mf_type
,
11484 pad_fragP
->tc_frag_data
.mf_type
))
11485 return branch_fragP
;
11491 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
11494 i386_classify_machine_dependent_frag (fragS
*fragP
)
11498 fragS
*branch_fragP
;
11500 unsigned int max_prefix_length
;
11502 if (fragP
->tc_frag_data
.classified
)
11505 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
11506 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
11507 for (next_fragP
= fragP
;
11508 next_fragP
!= NULL
;
11509 next_fragP
= next_fragP
->fr_next
)
11511 next_fragP
->tc_frag_data
.classified
= 1;
11512 if (next_fragP
->fr_type
== rs_machine_dependent
)
11513 switch (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
))
11515 case BRANCH_PADDING
:
11516 /* The BRANCH_PADDING frag must be followed by a branch
11518 branch_fragP
= i386_next_non_empty_frag (next_fragP
);
11519 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11521 case FUSED_JCC_PADDING
:
11522 /* Check if this is a fused jcc:
11524 CMP like instruction
11528 cmp_fragP
= i386_next_non_empty_frag (next_fragP
);
11529 pad_fragP
= i386_next_non_empty_frag (cmp_fragP
);
11530 branch_fragP
= i386_next_fusible_jcc_frag (next_fragP
, pad_fragP
);
11533 /* The BRANCH_PADDING frag is merged with the
11534 FUSED_JCC_PADDING frag. */
11535 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11536 /* CMP like instruction size. */
11537 next_fragP
->tc_frag_data
.cmp_size
= cmp_fragP
->fr_fix
;
11538 frag_wane (pad_fragP
);
11539 /* Skip to branch_fragP. */
11540 next_fragP
= branch_fragP
;
11542 else if (next_fragP
->tc_frag_data
.max_prefix_length
)
11544 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
11546 next_fragP
->fr_subtype
11547 = ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0);
11548 next_fragP
->tc_frag_data
.max_bytes
11549 = next_fragP
->tc_frag_data
.max_prefix_length
;
11550 /* This will be updated in the BRANCH_PREFIX scan. */
11551 next_fragP
->tc_frag_data
.max_prefix_length
= 0;
11554 frag_wane (next_fragP
);
11559 /* Stop if there is no BRANCH_PREFIX. */
11560 if (!align_branch_prefix_size
)
11563 /* Scan for BRANCH_PREFIX. */
11564 for (; fragP
!= NULL
; fragP
= fragP
->fr_next
)
11566 if (fragP
->fr_type
!= rs_machine_dependent
11567 || (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
11571 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
11572 COND_JUMP_PREFIX. */
11573 max_prefix_length
= 0;
11574 for (next_fragP
= fragP
;
11575 next_fragP
!= NULL
;
11576 next_fragP
= next_fragP
->fr_next
)
11578 if (next_fragP
->fr_type
== rs_fill
)
11579 /* Skip rs_fill frags. */
11581 else if (next_fragP
->fr_type
!= rs_machine_dependent
)
11582 /* Stop for all other frags. */
11585 /* rs_machine_dependent frags. */
11586 if (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11589 /* Count BRANCH_PREFIX frags. */
11590 if (max_prefix_length
>= MAX_FUSED_JCC_PADDING_SIZE
)
11592 max_prefix_length
= MAX_FUSED_JCC_PADDING_SIZE
;
11593 frag_wane (next_fragP
);
11597 += next_fragP
->tc_frag_data
.max_bytes
;
11599 else if ((TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11601 || (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11602 == FUSED_JCC_PADDING
))
11604 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
11605 fragP
->tc_frag_data
.u
.padding_fragP
= next_fragP
;
11609 /* Stop for other rs_machine_dependent frags. */
11613 fragP
->tc_frag_data
.max_prefix_length
= max_prefix_length
;
11615 /* Skip to the next frag. */
11616 fragP
= next_fragP
;
11620 /* Compute padding size for
11623 CMP like instruction
11625 COND_JUMP/UNCOND_JUMP
11630 COND_JUMP/UNCOND_JUMP
11634 i386_branch_padding_size (fragS
*fragP
, offsetT address
)
11636 unsigned int offset
, size
, padding_size
;
11637 fragS
*branch_fragP
= fragP
->tc_frag_data
.u
.branch_fragP
;
11639 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
11641 address
= fragP
->fr_address
;
11642 address
+= fragP
->fr_fix
;
11644 /* CMP like instrunction size. */
11645 size
= fragP
->tc_frag_data
.cmp_size
;
11647 /* The base size of the branch frag. */
11648 size
+= branch_fragP
->fr_fix
;
11650 /* Add opcode and displacement bytes for the rs_machine_dependent
11652 if (branch_fragP
->fr_type
== rs_machine_dependent
)
11653 size
+= md_relax_table
[branch_fragP
->fr_subtype
].rlx_length
;
11655 /* Check if branch is within boundary and doesn't end at the last
11657 offset
= address
& ((1U << align_branch_power
) - 1);
11658 if ((offset
+ size
) >= (1U << align_branch_power
))
11659 /* Padding needed to avoid crossing boundary. */
11660 padding_size
= (1U << align_branch_power
) - offset
;
11662 /* No padding needed. */
11665 /* The return value may be saved in tc_frag_data.length which is
11667 if (!fits_in_unsigned_byte (padding_size
))
11670 return padding_size
;
11673 /* i386_generic_table_relax_frag()
11675 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
11676 grow/shrink padding to align branch frags. Hand others to
11680 i386_generic_table_relax_frag (segT segment
, fragS
*fragP
, long stretch
)
11682 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11683 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
11685 long padding_size
= i386_branch_padding_size (fragP
, 0);
11686 long grow
= padding_size
- fragP
->tc_frag_data
.length
;
11688 /* When the BRANCH_PREFIX frag is used, the computed address
11689 must match the actual address and there should be no padding. */
11690 if (fragP
->tc_frag_data
.padding_address
11691 && (fragP
->tc_frag_data
.padding_address
!= fragP
->fr_address
11695 /* Update the padding size. */
11697 fragP
->tc_frag_data
.length
= padding_size
;
11701 else if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
11703 fragS
*padding_fragP
, *next_fragP
;
11704 long padding_size
, left_size
, last_size
;
11706 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
11707 if (!padding_fragP
)
11708 /* Use the padding set by the leading BRANCH_PREFIX frag. */
11709 return (fragP
->tc_frag_data
.length
11710 - fragP
->tc_frag_data
.last_length
);
11712 /* Compute the relative address of the padding frag in the very
11713 first time where the BRANCH_PREFIX frag sizes are zero. */
11714 if (!fragP
->tc_frag_data
.padding_address
)
11715 fragP
->tc_frag_data
.padding_address
11716 = padding_fragP
->fr_address
- (fragP
->fr_address
- stretch
);
11718 /* First update the last length from the previous interation. */
11719 left_size
= fragP
->tc_frag_data
.prefix_length
;
11720 for (next_fragP
= fragP
;
11721 next_fragP
!= padding_fragP
;
11722 next_fragP
= next_fragP
->fr_next
)
11723 if (next_fragP
->fr_type
== rs_machine_dependent
11724 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11729 int max
= next_fragP
->tc_frag_data
.max_bytes
;
11733 if (max
> left_size
)
11738 next_fragP
->tc_frag_data
.last_length
= size
;
11742 next_fragP
->tc_frag_data
.last_length
= 0;
11745 /* Check the padding size for the padding frag. */
11746 padding_size
= i386_branch_padding_size
11747 (padding_fragP
, (fragP
->fr_address
11748 + fragP
->tc_frag_data
.padding_address
));
11750 last_size
= fragP
->tc_frag_data
.prefix_length
;
11751 /* Check if there is change from the last interation. */
11752 if (padding_size
== last_size
)
11754 /* Update the expected address of the padding frag. */
11755 padding_fragP
->tc_frag_data
.padding_address
11756 = (fragP
->fr_address
+ padding_size
11757 + fragP
->tc_frag_data
.padding_address
);
11761 if (padding_size
> fragP
->tc_frag_data
.max_prefix_length
)
11763 /* No padding if there is no sufficient room. Clear the
11764 expected address of the padding frag. */
11765 padding_fragP
->tc_frag_data
.padding_address
= 0;
11769 /* Store the expected address of the padding frag. */
11770 padding_fragP
->tc_frag_data
.padding_address
11771 = (fragP
->fr_address
+ padding_size
11772 + fragP
->tc_frag_data
.padding_address
);
11774 fragP
->tc_frag_data
.prefix_length
= padding_size
;
11776 /* Update the length for the current interation. */
11777 left_size
= padding_size
;
11778 for (next_fragP
= fragP
;
11779 next_fragP
!= padding_fragP
;
11780 next_fragP
= next_fragP
->fr_next
)
11781 if (next_fragP
->fr_type
== rs_machine_dependent
11782 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11787 int max
= next_fragP
->tc_frag_data
.max_bytes
;
11791 if (max
> left_size
)
11796 next_fragP
->tc_frag_data
.length
= size
;
11800 next_fragP
->tc_frag_data
.length
= 0;
11803 return (fragP
->tc_frag_data
.length
11804 - fragP
->tc_frag_data
.last_length
);
11806 return relax_frag (segment
, fragP
, stretch
);
11809 /* md_estimate_size_before_relax()
11811 Called just before relax() for rs_machine_dependent frags. The x86
11812 assembler uses these frags to handle variable size jump
11815 Any symbol that is now undefined will not become defined.
11816 Return the correct fr_subtype in the frag.
11817 Return the initial "guess for variable size of frag" to caller.
11818 The guess is actually the growth beyond the fixed part. Whatever
11819 we do to grow the fixed or variable part contributes to our
11823 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
11825 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11826 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
11827 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
11829 i386_classify_machine_dependent_frag (fragP
);
11830 return fragP
->tc_frag_data
.length
;
11833 /* We've already got fragP->fr_subtype right; all we have to do is
11834 check for un-relaxable symbols. On an ELF system, we can't relax
11835 an externally visible symbol, because it may be overridden by a
11837 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
11838 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11840 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
11843 #if defined (OBJ_COFF) && defined (TE_PE)
11844 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
11845 && S_IS_WEAK (fragP
->fr_symbol
))
11849 /* Symbol is undefined in this segment, or we need to keep a
11850 reloc so that weak symbols can be overridden. */
11851 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
11852 enum bfd_reloc_code_real reloc_type
;
11853 unsigned char *opcode
;
11856 if (fragP
->fr_var
!= NO_RELOC
)
11857 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
11858 else if (size
== 2)
11859 reloc_type
= BFD_RELOC_16_PCREL
;
11860 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11861 else if (need_plt32_p (fragP
->fr_symbol
))
11862 reloc_type
= BFD_RELOC_X86_64_PLT32
;
11865 reloc_type
= BFD_RELOC_32_PCREL
;
11867 old_fr_fix
= fragP
->fr_fix
;
11868 opcode
= (unsigned char *) fragP
->fr_opcode
;
11870 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
11873 /* Make jmp (0xeb) a (d)word displacement jump. */
11875 fragP
->fr_fix
+= size
;
11876 fix_new (fragP
, old_fr_fix
, size
,
11878 fragP
->fr_offset
, 1,
11884 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
11886 /* Negate the condition, and branch past an
11887 unconditional jump. */
11890 /* Insert an unconditional jump. */
11892 /* We added two extra opcode bytes, and have a two byte
11894 fragP
->fr_fix
+= 2 + 2;
11895 fix_new (fragP
, old_fr_fix
+ 2, 2,
11897 fragP
->fr_offset
, 1,
11901 /* Fall through. */
11904 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
11908 fragP
->fr_fix
+= 1;
11909 fixP
= fix_new (fragP
, old_fr_fix
, 1,
11911 fragP
->fr_offset
, 1,
11912 BFD_RELOC_8_PCREL
);
11913 fixP
->fx_signed
= 1;
11917 /* This changes the byte-displacement jump 0x7N
11918 to the (d)word-displacement jump 0x0f,0x8N. */
11919 opcode
[1] = opcode
[0] + 0x10;
11920 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
11921 /* We've added an opcode byte. */
11922 fragP
->fr_fix
+= 1 + size
;
11923 fix_new (fragP
, old_fr_fix
+ 1, size
,
11925 fragP
->fr_offset
, 1,
11930 BAD_CASE (fragP
->fr_subtype
);
11934 return fragP
->fr_fix
- old_fr_fix
;
11937 /* Guess size depending on current relax state. Initially the relax
11938 state will correspond to a short jump and we return 1, because
11939 the variable part of the frag (the branch offset) is one byte
11940 long. However, we can relax a section more than once and in that
11941 case we must either set fr_subtype back to the unrelaxed state,
11942 or return the value for the appropriate branch. */
11943 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
11946 /* Called after relax() is finished.
11948 In: Address of frag.
11949 fr_type == rs_machine_dependent.
11950 fr_subtype is what the address relaxed to.
11952 Out: Any fixSs and constants are set up.
11953 Caller will turn frag into a ".space 0". */
11956 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
11959 unsigned char *opcode
;
11960 unsigned char *where_to_put_displacement
= NULL
;
11961 offsetT target_address
;
11962 offsetT opcode_address
;
11963 unsigned int extension
= 0;
11964 offsetT displacement_from_opcode_start
;
11966 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11967 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
11968 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
11970 /* Generate nop padding. */
11971 unsigned int size
= fragP
->tc_frag_data
.length
;
11974 if (size
> fragP
->tc_frag_data
.max_bytes
)
11980 const char *branch
= "branch";
11981 const char *prefix
= "";
11982 fragS
*padding_fragP
;
11983 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
11986 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
11987 switch (fragP
->tc_frag_data
.default_prefix
)
11992 case CS_PREFIX_OPCODE
:
11995 case DS_PREFIX_OPCODE
:
11998 case ES_PREFIX_OPCODE
:
12001 case FS_PREFIX_OPCODE
:
12004 case GS_PREFIX_OPCODE
:
12007 case SS_PREFIX_OPCODE
:
12012 msg
= _("%s:%u: add %d%s at 0x%llx to align "
12013 "%s within %d-byte boundary\n");
12015 msg
= _("%s:%u: add additional %d%s at 0x%llx to "
12016 "align %s within %d-byte boundary\n");
12020 padding_fragP
= fragP
;
12021 msg
= _("%s:%u: add %d%s-byte nop at 0x%llx to align "
12022 "%s within %d-byte boundary\n");
12026 switch (padding_fragP
->tc_frag_data
.branch_type
)
12028 case align_branch_jcc
:
12031 case align_branch_fused
:
12032 branch
= "fused jcc";
12034 case align_branch_jmp
:
12037 case align_branch_call
:
12040 case align_branch_indirect
:
12041 branch
= "indiret branch";
12043 case align_branch_ret
:
12050 fprintf (stdout
, msg
,
12051 fragP
->fr_file
, fragP
->fr_line
, size
, prefix
,
12052 (long long) fragP
->fr_address
, branch
,
12053 1 << align_branch_power
);
12055 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12056 memset (fragP
->fr_opcode
,
12057 fragP
->tc_frag_data
.default_prefix
, size
);
12059 i386_generate_nops (fragP
, (char *) fragP
->fr_opcode
,
12061 fragP
->fr_fix
+= size
;
12066 opcode
= (unsigned char *) fragP
->fr_opcode
;
12068 /* Address we want to reach in file space. */
12069 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
12071 /* Address opcode resides at in file space. */
12072 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
12074 /* Displacement from opcode start to fill into instruction. */
12075 displacement_from_opcode_start
= target_address
- opcode_address
;
12077 if ((fragP
->fr_subtype
& BIG
) == 0)
12079 /* Don't have to change opcode. */
12080 extension
= 1; /* 1 opcode + 1 displacement */
12081 where_to_put_displacement
= &opcode
[1];
12085 if (no_cond_jump_promotion
12086 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
12087 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
12088 _("long jump required"));
12090 switch (fragP
->fr_subtype
)
12092 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
12093 extension
= 4; /* 1 opcode + 4 displacement */
12095 where_to_put_displacement
= &opcode
[1];
12098 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
12099 extension
= 2; /* 1 opcode + 2 displacement */
12101 where_to_put_displacement
= &opcode
[1];
12104 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
12105 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
12106 extension
= 5; /* 2 opcode + 4 displacement */
12107 opcode
[1] = opcode
[0] + 0x10;
12108 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12109 where_to_put_displacement
= &opcode
[2];
12112 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
12113 extension
= 3; /* 2 opcode + 2 displacement */
12114 opcode
[1] = opcode
[0] + 0x10;
12115 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12116 where_to_put_displacement
= &opcode
[2];
12119 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
12124 where_to_put_displacement
= &opcode
[3];
12128 BAD_CASE (fragP
->fr_subtype
);
12133 /* If size if less then four we are sure that the operand fits,
12134 but if it's 4, then it could be that the displacement is larger
12136 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
12138 && ((addressT
) (displacement_from_opcode_start
- extension
12139 + ((addressT
) 1 << 31))
12140 > (((addressT
) 2 << 31) - 1)))
12142 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
12143 _("jump target out of range"));
12144 /* Make us emit 0. */
12145 displacement_from_opcode_start
= extension
;
12147 /* Now put displacement after opcode. */
12148 md_number_to_chars ((char *) where_to_put_displacement
,
12149 (valueT
) (displacement_from_opcode_start
- extension
),
12150 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
12151 fragP
->fr_fix
+= extension
;
12154 /* Apply a fixup (fixP) to segment data, once it has been determined
12155 by our caller that we have all the info we need to fix it up.
12157 Parameter valP is the pointer to the value of the bits.
12159 On the 386, immediates, displacements, and data pointers are all in
12160 the same (little-endian) format, so we don't need to care about which
12161 we are handling. */
12164 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
12166 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
12167 valueT value
= *valP
;
12169 #if !defined (TE_Mach)
12170 if (fixP
->fx_pcrel
)
12172 switch (fixP
->fx_r_type
)
12178 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
12181 case BFD_RELOC_X86_64_32S
:
12182 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
12185 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
12188 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
12193 if (fixP
->fx_addsy
!= NULL
12194 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
12195 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
12196 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
12197 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
12198 && !use_rela_relocations
)
12200 /* This is a hack. There should be a better way to handle this.
12201 This covers for the fact that bfd_install_relocation will
12202 subtract the current location (for partial_inplace, PC relative
12203 relocations); see more below. */
12207 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
12210 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12212 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12215 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
12217 if ((sym_seg
== seg
12218 || (symbol_section_p (fixP
->fx_addsy
)
12219 && sym_seg
!= absolute_section
))
12220 && !generic_force_reloc (fixP
))
12222 /* Yes, we add the values in twice. This is because
12223 bfd_install_relocation subtracts them out again. I think
12224 bfd_install_relocation is broken, but I don't dare change
12226 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12230 #if defined (OBJ_COFF) && defined (TE_PE)
12231 /* For some reason, the PE format does not store a
12232 section address offset for a PC relative symbol. */
12233 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
12234 || S_IS_WEAK (fixP
->fx_addsy
))
12235 value
+= md_pcrel_from (fixP
);
12238 #if defined (OBJ_COFF) && defined (TE_PE)
12239 if (fixP
->fx_addsy
!= NULL
12240 && S_IS_WEAK (fixP
->fx_addsy
)
12241 /* PR 16858: Do not modify weak function references. */
12242 && ! fixP
->fx_pcrel
)
12244 #if !defined (TE_PEP)
12245 /* For x86 PE weak function symbols are neither PC-relative
12246 nor do they set S_IS_FUNCTION. So the only reliable way
12247 to detect them is to check the flags of their containing
12249 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
12250 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
12254 value
-= S_GET_VALUE (fixP
->fx_addsy
);
12258 /* Fix a few things - the dynamic linker expects certain values here,
12259 and we must not disappoint it. */
12260 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12261 if (IS_ELF
&& fixP
->fx_addsy
)
12262 switch (fixP
->fx_r_type
)
12264 case BFD_RELOC_386_PLT32
:
12265 case BFD_RELOC_X86_64_PLT32
:
12266 /* Make the jump instruction point to the address of the operand.
12267 At runtime we merely add the offset to the actual PLT entry.
12268 NB: Subtract the offset size only for jump instructions. */
12269 if (fixP
->fx_pcrel
)
12273 case BFD_RELOC_386_TLS_GD
:
12274 case BFD_RELOC_386_TLS_LDM
:
12275 case BFD_RELOC_386_TLS_IE_32
:
12276 case BFD_RELOC_386_TLS_IE
:
12277 case BFD_RELOC_386_TLS_GOTIE
:
12278 case BFD_RELOC_386_TLS_GOTDESC
:
12279 case BFD_RELOC_X86_64_TLSGD
:
12280 case BFD_RELOC_X86_64_TLSLD
:
12281 case BFD_RELOC_X86_64_GOTTPOFF
:
12282 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12283 value
= 0; /* Fully resolved at runtime. No addend. */
12285 case BFD_RELOC_386_TLS_LE
:
12286 case BFD_RELOC_386_TLS_LDO_32
:
12287 case BFD_RELOC_386_TLS_LE_32
:
12288 case BFD_RELOC_X86_64_DTPOFF32
:
12289 case BFD_RELOC_X86_64_DTPOFF64
:
12290 case BFD_RELOC_X86_64_TPOFF32
:
12291 case BFD_RELOC_X86_64_TPOFF64
:
12292 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12295 case BFD_RELOC_386_TLS_DESC_CALL
:
12296 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12297 value
= 0; /* Fully resolved at runtime. No addend. */
12298 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12302 case BFD_RELOC_VTABLE_INHERIT
:
12303 case BFD_RELOC_VTABLE_ENTRY
:
12310 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
12312 #endif /* !defined (TE_Mach) */
12314 /* Are we finished with this relocation now? */
12315 if (fixP
->fx_addsy
== NULL
)
12317 #if defined (OBJ_COFF) && defined (TE_PE)
12318 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
12321 /* Remember value for tc_gen_reloc. */
12322 fixP
->fx_addnumber
= value
;
12323 /* Clear out the frag for now. */
12327 else if (use_rela_relocations
)
12329 fixP
->fx_no_overflow
= 1;
12330 /* Remember value for tc_gen_reloc. */
12331 fixP
->fx_addnumber
= value
;
12335 md_number_to_chars (p
, value
, fixP
->fx_size
);
12339 md_atof (int type
, char *litP
, int *sizeP
)
12341 /* This outputs the LITTLENUMs in REVERSE order;
12342 in accord with the bigendian 386. */
12343 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
12346 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
12349 output_invalid (int c
)
12352 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12355 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12356 "(0x%x)", (unsigned char) c
);
12357 return output_invalid_buf
;
12360 /* Verify that @r can be used in the current context. */
12362 static bfd_boolean
check_register (const reg_entry
*r
)
12364 if (allow_pseudo_reg
)
12367 if (operand_type_all_zero (&r
->reg_type
))
12370 if ((r
->reg_type
.bitfield
.dword
12371 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
12372 || r
->reg_type
.bitfield
.class == RegCR
12373 || r
->reg_type
.bitfield
.class == RegDR
)
12374 && !cpu_arch_flags
.bitfield
.cpui386
)
12377 if (r
->reg_type
.bitfield
.class == RegTR
12378 && (flag_code
== CODE_64BIT
12379 || !cpu_arch_flags
.bitfield
.cpui386
12380 || cpu_arch_isa_flags
.bitfield
.cpui586
12381 || cpu_arch_isa_flags
.bitfield
.cpui686
))
12384 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
12387 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
12389 if (r
->reg_type
.bitfield
.zmmword
12390 || r
->reg_type
.bitfield
.class == RegMask
)
12393 if (!cpu_arch_flags
.bitfield
.cpuavx
)
12395 if (r
->reg_type
.bitfield
.ymmword
)
12398 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
12403 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
12406 /* Don't allow fake index register unless allow_index_reg isn't 0. */
12407 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
12410 /* Upper 16 vector registers are only available with VREX in 64bit
12411 mode, and require EVEX encoding. */
12412 if (r
->reg_flags
& RegVRex
)
12414 if (!cpu_arch_flags
.bitfield
.cpuavx512f
12415 || flag_code
!= CODE_64BIT
)
12418 if (i
.vec_encoding
== vex_encoding_default
)
12419 i
.vec_encoding
= vex_encoding_evex
;
12420 else if (i
.vec_encoding
!= vex_encoding_evex
)
12421 i
.vec_encoding
= vex_encoding_error
;
12424 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
12425 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
12426 && flag_code
!= CODE_64BIT
)
12429 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
12436 /* REG_STRING starts *before* REGISTER_PREFIX. */
12438 static const reg_entry
*
12439 parse_real_register (char *reg_string
, char **end_op
)
12441 char *s
= reg_string
;
12443 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
12444 const reg_entry
*r
;
12446 /* Skip possible REGISTER_PREFIX and possible whitespace. */
12447 if (*s
== REGISTER_PREFIX
)
12450 if (is_space_char (*s
))
12453 p
= reg_name_given
;
12454 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
12456 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
12457 return (const reg_entry
*) NULL
;
12461 /* For naked regs, make sure that we are not dealing with an identifier.
12462 This prevents confusing an identifier like `eax_var' with register
12464 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
12465 return (const reg_entry
*) NULL
;
12469 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
12471 /* Handle floating point regs, allowing spaces in the (i) part. */
12472 if (r
== i386_regtab
/* %st is first entry of table */)
12474 if (!cpu_arch_flags
.bitfield
.cpu8087
12475 && !cpu_arch_flags
.bitfield
.cpu287
12476 && !cpu_arch_flags
.bitfield
.cpu387
12477 && !allow_pseudo_reg
)
12478 return (const reg_entry
*) NULL
;
12480 if (is_space_char (*s
))
12485 if (is_space_char (*s
))
12487 if (*s
>= '0' && *s
<= '7')
12489 int fpr
= *s
- '0';
12491 if (is_space_char (*s
))
12496 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
12501 /* We have "%st(" then garbage. */
12502 return (const reg_entry
*) NULL
;
12506 return r
&& check_register (r
) ? r
: NULL
;
12509 /* REG_STRING starts *before* REGISTER_PREFIX. */
12511 static const reg_entry
*
12512 parse_register (char *reg_string
, char **end_op
)
12514 const reg_entry
*r
;
12516 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
12517 r
= parse_real_register (reg_string
, end_op
);
12522 char *save
= input_line_pointer
;
12526 input_line_pointer
= reg_string
;
12527 c
= get_symbol_name (®_string
);
12528 symbolP
= symbol_find (reg_string
);
12529 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
12531 const expressionS
*e
= symbol_get_value_expression (symbolP
);
12533 know (e
->X_op
== O_register
);
12534 know (e
->X_add_number
>= 0
12535 && (valueT
) e
->X_add_number
< i386_regtab_size
);
12536 r
= i386_regtab
+ e
->X_add_number
;
12537 if (!check_register (r
))
12539 as_bad (_("register '%s%s' cannot be used here"),
12540 register_prefix
, r
->reg_name
);
12543 *end_op
= input_line_pointer
;
12545 *input_line_pointer
= c
;
12546 input_line_pointer
= save
;
12552 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
12554 const reg_entry
*r
;
12555 char *end
= input_line_pointer
;
12558 r
= parse_register (name
, &input_line_pointer
);
12559 if (r
&& end
<= input_line_pointer
)
12561 *nextcharP
= *input_line_pointer
;
12562 *input_line_pointer
= 0;
12565 e
->X_op
= O_register
;
12566 e
->X_add_number
= r
- i386_regtab
;
12569 e
->X_op
= O_illegal
;
12572 input_line_pointer
= end
;
12574 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
12578 md_operand (expressionS
*e
)
12581 const reg_entry
*r
;
12583 switch (*input_line_pointer
)
12585 case REGISTER_PREFIX
:
12586 r
= parse_real_register (input_line_pointer
, &end
);
12589 e
->X_op
= O_register
;
12590 e
->X_add_number
= r
- i386_regtab
;
12591 input_line_pointer
= end
;
12596 gas_assert (intel_syntax
);
12597 end
= input_line_pointer
++;
12599 if (*input_line_pointer
== ']')
12601 ++input_line_pointer
;
12602 e
->X_op_symbol
= make_expr_symbol (e
);
12603 e
->X_add_symbol
= NULL
;
12604 e
->X_add_number
= 0;
12609 e
->X_op
= O_absent
;
12610 input_line_pointer
= end
;
12617 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12618 const char *md_shortopts
= "kVQ:sqnO::";
12620 const char *md_shortopts
= "qnO::";
12623 #define OPTION_32 (OPTION_MD_BASE + 0)
12624 #define OPTION_64 (OPTION_MD_BASE + 1)
12625 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
12626 #define OPTION_MARCH (OPTION_MD_BASE + 3)
12627 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
12628 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
12629 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
12630 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
12631 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
12632 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
12633 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
12634 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
12635 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
12636 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
12637 #define OPTION_X32 (OPTION_MD_BASE + 14)
12638 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
12639 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
12640 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
12641 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
12642 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
12643 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
12644 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
12645 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
12646 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
12647 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
12648 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
12649 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
12650 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
12651 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
12652 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
12653 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
12654 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
12655 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
12656 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
12658 struct option md_longopts
[] =
12660 {"32", no_argument
, NULL
, OPTION_32
},
12661 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12662 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12663 {"64", no_argument
, NULL
, OPTION_64
},
12665 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12666 {"x32", no_argument
, NULL
, OPTION_X32
},
12667 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
12668 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
12670 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
12671 {"march", required_argument
, NULL
, OPTION_MARCH
},
12672 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
12673 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
12674 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
12675 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
12676 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
12677 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
12678 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
12679 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
12680 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
12681 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
12682 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
12683 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
12684 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
12685 # if defined (TE_PE) || defined (TE_PEP)
12686 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
12688 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
12689 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
12690 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
12691 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
12692 {"malign-branch-boundary", required_argument
, NULL
, OPTION_MALIGN_BRANCH_BOUNDARY
},
12693 {"malign-branch-prefix-size", required_argument
, NULL
, OPTION_MALIGN_BRANCH_PREFIX_SIZE
},
12694 {"malign-branch", required_argument
, NULL
, OPTION_MALIGN_BRANCH
},
12695 {"mbranches-within-32B-boundaries", no_argument
, NULL
, OPTION_MBRANCHES_WITH_32B_BOUNDARIES
},
12696 {"mlfence-after-load", required_argument
, NULL
, OPTION_MLFENCE_AFTER_LOAD
},
12697 {"mlfence-before-indirect-branch", required_argument
, NULL
,
12698 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
},
12699 {"mlfence-before-ret", required_argument
, NULL
, OPTION_MLFENCE_BEFORE_RET
},
12700 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
12701 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
12702 {NULL
, no_argument
, NULL
, 0}
12704 size_t md_longopts_size
= sizeof (md_longopts
);
12707 md_parse_option (int c
, const char *arg
)
12710 char *arch
, *next
, *saved
, *type
;
12715 optimize_align_code
= 0;
12719 quiet_warnings
= 1;
12722 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12723 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
12724 should be emitted or not. FIXME: Not implemented. */
12726 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
12730 /* -V: SVR4 argument to print version ID. */
12732 print_version_id ();
12735 /* -k: Ignore for FreeBSD compatibility. */
12740 /* -s: On i386 Solaris, this tells the native assembler to use
12741 .stab instead of .stab.excl. We always use .stab anyhow. */
12744 case OPTION_MSHARED
:
12748 case OPTION_X86_USED_NOTE
:
12749 if (strcasecmp (arg
, "yes") == 0)
12751 else if (strcasecmp (arg
, "no") == 0)
12754 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
12759 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12760 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12763 const char **list
, **l
;
12765 list
= bfd_target_list ();
12766 for (l
= list
; *l
!= NULL
; l
++)
12767 if (CONST_STRNEQ (*l
, "elf64-x86-64")
12768 || strcmp (*l
, "coff-x86-64") == 0
12769 || strcmp (*l
, "pe-x86-64") == 0
12770 || strcmp (*l
, "pei-x86-64") == 0
12771 || strcmp (*l
, "mach-o-x86-64") == 0)
12773 default_arch
= "x86_64";
12777 as_fatal (_("no compiled in support for x86_64"));
12783 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12787 const char **list
, **l
;
12789 list
= bfd_target_list ();
12790 for (l
= list
; *l
!= NULL
; l
++)
12791 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
12793 default_arch
= "x86_64:32";
12797 as_fatal (_("no compiled in support for 32bit x86_64"));
12801 as_fatal (_("32bit x86_64 is only supported for ELF"));
12806 default_arch
= "i386";
12809 case OPTION_DIVIDE
:
12810 #ifdef SVR4_COMMENT_CHARS
12815 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
12817 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
12821 i386_comment_chars
= n
;
12827 saved
= xstrdup (arg
);
12829 /* Allow -march=+nosse. */
12835 as_fatal (_("invalid -march= option: `%s'"), arg
);
12836 next
= strchr (arch
, '+');
12839 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
12841 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
12844 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
12847 cpu_arch_name
= cpu_arch
[j
].name
;
12848 cpu_sub_arch_name
= NULL
;
12849 cpu_arch_flags
= cpu_arch
[j
].flags
;
12850 cpu_arch_isa
= cpu_arch
[j
].type
;
12851 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
12852 if (!cpu_arch_tune_set
)
12854 cpu_arch_tune
= cpu_arch_isa
;
12855 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
12859 else if (*cpu_arch
[j
].name
== '.'
12860 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
12862 /* ISA extension. */
12863 i386_cpu_flags flags
;
12865 flags
= cpu_flags_or (cpu_arch_flags
,
12866 cpu_arch
[j
].flags
);
12868 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
12870 if (cpu_sub_arch_name
)
12872 char *name
= cpu_sub_arch_name
;
12873 cpu_sub_arch_name
= concat (name
,
12875 (const char *) NULL
);
12879 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
12880 cpu_arch_flags
= flags
;
12881 cpu_arch_isa_flags
= flags
;
12885 = cpu_flags_or (cpu_arch_isa_flags
,
12886 cpu_arch
[j
].flags
);
12891 if (j
>= ARRAY_SIZE (cpu_arch
))
12893 /* Disable an ISA extension. */
12894 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
12895 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
12897 i386_cpu_flags flags
;
12899 flags
= cpu_flags_and_not (cpu_arch_flags
,
12900 cpu_noarch
[j
].flags
);
12901 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
12903 if (cpu_sub_arch_name
)
12905 char *name
= cpu_sub_arch_name
;
12906 cpu_sub_arch_name
= concat (arch
,
12907 (const char *) NULL
);
12911 cpu_sub_arch_name
= xstrdup (arch
);
12912 cpu_arch_flags
= flags
;
12913 cpu_arch_isa_flags
= flags
;
12918 if (j
>= ARRAY_SIZE (cpu_noarch
))
12919 j
= ARRAY_SIZE (cpu_arch
);
12922 if (j
>= ARRAY_SIZE (cpu_arch
))
12923 as_fatal (_("invalid -march= option: `%s'"), arg
);
12927 while (next
!= NULL
);
12933 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
12934 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
12936 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
12938 cpu_arch_tune_set
= 1;
12939 cpu_arch_tune
= cpu_arch
[j
].type
;
12940 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
12944 if (j
>= ARRAY_SIZE (cpu_arch
))
12945 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
12948 case OPTION_MMNEMONIC
:
12949 if (strcasecmp (arg
, "att") == 0)
12950 intel_mnemonic
= 0;
12951 else if (strcasecmp (arg
, "intel") == 0)
12952 intel_mnemonic
= 1;
12954 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
12957 case OPTION_MSYNTAX
:
12958 if (strcasecmp (arg
, "att") == 0)
12960 else if (strcasecmp (arg
, "intel") == 0)
12963 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
12966 case OPTION_MINDEX_REG
:
12967 allow_index_reg
= 1;
12970 case OPTION_MNAKED_REG
:
12971 allow_naked_reg
= 1;
12974 case OPTION_MSSE2AVX
:
12978 case OPTION_MSSE_CHECK
:
12979 if (strcasecmp (arg
, "error") == 0)
12980 sse_check
= check_error
;
12981 else if (strcasecmp (arg
, "warning") == 0)
12982 sse_check
= check_warning
;
12983 else if (strcasecmp (arg
, "none") == 0)
12984 sse_check
= check_none
;
12986 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
12989 case OPTION_MOPERAND_CHECK
:
12990 if (strcasecmp (arg
, "error") == 0)
12991 operand_check
= check_error
;
12992 else if (strcasecmp (arg
, "warning") == 0)
12993 operand_check
= check_warning
;
12994 else if (strcasecmp (arg
, "none") == 0)
12995 operand_check
= check_none
;
12997 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
13000 case OPTION_MAVXSCALAR
:
13001 if (strcasecmp (arg
, "128") == 0)
13002 avxscalar
= vex128
;
13003 else if (strcasecmp (arg
, "256") == 0)
13004 avxscalar
= vex256
;
13006 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
13009 case OPTION_MVEXWIG
:
13010 if (strcmp (arg
, "0") == 0)
13012 else if (strcmp (arg
, "1") == 0)
13015 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
13018 case OPTION_MADD_BND_PREFIX
:
13019 add_bnd_prefix
= 1;
13022 case OPTION_MEVEXLIG
:
13023 if (strcmp (arg
, "128") == 0)
13024 evexlig
= evexl128
;
13025 else if (strcmp (arg
, "256") == 0)
13026 evexlig
= evexl256
;
13027 else if (strcmp (arg
, "512") == 0)
13028 evexlig
= evexl512
;
13030 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
13033 case OPTION_MEVEXRCIG
:
13034 if (strcmp (arg
, "rne") == 0)
13036 else if (strcmp (arg
, "rd") == 0)
13038 else if (strcmp (arg
, "ru") == 0)
13040 else if (strcmp (arg
, "rz") == 0)
13043 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
13046 case OPTION_MEVEXWIG
:
13047 if (strcmp (arg
, "0") == 0)
13049 else if (strcmp (arg
, "1") == 0)
13052 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
13055 # if defined (TE_PE) || defined (TE_PEP)
13056 case OPTION_MBIG_OBJ
:
13061 case OPTION_MOMIT_LOCK_PREFIX
:
13062 if (strcasecmp (arg
, "yes") == 0)
13063 omit_lock_prefix
= 1;
13064 else if (strcasecmp (arg
, "no") == 0)
13065 omit_lock_prefix
= 0;
13067 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
13070 case OPTION_MFENCE_AS_LOCK_ADD
:
13071 if (strcasecmp (arg
, "yes") == 0)
13073 else if (strcasecmp (arg
, "no") == 0)
13076 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
13079 case OPTION_MLFENCE_AFTER_LOAD
:
13080 if (strcasecmp (arg
, "yes") == 0)
13081 lfence_after_load
= 1;
13082 else if (strcasecmp (arg
, "no") == 0)
13083 lfence_after_load
= 0;
13085 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg
);
13088 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
:
13089 if (strcasecmp (arg
, "all") == 0)
13091 lfence_before_indirect_branch
= lfence_branch_all
;
13092 if (lfence_before_ret
== lfence_before_ret_none
)
13093 lfence_before_ret
= lfence_before_ret_shl
;
13095 else if (strcasecmp (arg
, "memory") == 0)
13096 lfence_before_indirect_branch
= lfence_branch_memory
;
13097 else if (strcasecmp (arg
, "register") == 0)
13098 lfence_before_indirect_branch
= lfence_branch_register
;
13099 else if (strcasecmp (arg
, "none") == 0)
13100 lfence_before_indirect_branch
= lfence_branch_none
;
13102 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
13106 case OPTION_MLFENCE_BEFORE_RET
:
13107 if (strcasecmp (arg
, "or") == 0)
13108 lfence_before_ret
= lfence_before_ret_or
;
13109 else if (strcasecmp (arg
, "not") == 0)
13110 lfence_before_ret
= lfence_before_ret_not
;
13111 else if (strcasecmp (arg
, "shl") == 0 || strcasecmp (arg
, "yes") == 0)
13112 lfence_before_ret
= lfence_before_ret_shl
;
13113 else if (strcasecmp (arg
, "none") == 0)
13114 lfence_before_ret
= lfence_before_ret_none
;
13116 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
13120 case OPTION_MRELAX_RELOCATIONS
:
13121 if (strcasecmp (arg
, "yes") == 0)
13122 generate_relax_relocations
= 1;
13123 else if (strcasecmp (arg
, "no") == 0)
13124 generate_relax_relocations
= 0;
13126 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
13129 case OPTION_MALIGN_BRANCH_BOUNDARY
:
13132 long int align
= strtoul (arg
, &end
, 0);
13137 align_branch_power
= 0;
13140 else if (align
>= 16)
13143 for (align_power
= 0;
13145 align
>>= 1, align_power
++)
13147 /* Limit alignment power to 31. */
13148 if (align
== 1 && align_power
< 32)
13150 align_branch_power
= align_power
;
13155 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg
);
13159 case OPTION_MALIGN_BRANCH_PREFIX_SIZE
:
13162 int align
= strtoul (arg
, &end
, 0);
13163 /* Some processors only support 5 prefixes. */
13164 if (*end
== '\0' && align
>= 0 && align
< 6)
13166 align_branch_prefix_size
= align
;
13169 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
13174 case OPTION_MALIGN_BRANCH
:
13176 saved
= xstrdup (arg
);
13180 next
= strchr (type
, '+');
13183 if (strcasecmp (type
, "jcc") == 0)
13184 align_branch
|= align_branch_jcc_bit
;
13185 else if (strcasecmp (type
, "fused") == 0)
13186 align_branch
|= align_branch_fused_bit
;
13187 else if (strcasecmp (type
, "jmp") == 0)
13188 align_branch
|= align_branch_jmp_bit
;
13189 else if (strcasecmp (type
, "call") == 0)
13190 align_branch
|= align_branch_call_bit
;
13191 else if (strcasecmp (type
, "ret") == 0)
13192 align_branch
|= align_branch_ret_bit
;
13193 else if (strcasecmp (type
, "indirect") == 0)
13194 align_branch
|= align_branch_indirect_bit
;
13196 as_fatal (_("invalid -malign-branch= option: `%s'"), arg
);
13199 while (next
!= NULL
);
13203 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES
:
13204 align_branch_power
= 5;
13205 align_branch_prefix_size
= 5;
13206 align_branch
= (align_branch_jcc_bit
13207 | align_branch_fused_bit
13208 | align_branch_jmp_bit
);
13211 case OPTION_MAMD64
:
13215 case OPTION_MINTEL64
:
13223 /* Turn off -Os. */
13224 optimize_for_space
= 0;
13226 else if (*arg
== 's')
13228 optimize_for_space
= 1;
13229 /* Turn on all encoding optimizations. */
13230 optimize
= INT_MAX
;
13234 optimize
= atoi (arg
);
13235 /* Turn off -Os. */
13236 optimize_for_space
= 0;
13246 #define MESSAGE_TEMPLATE \
13250 output_message (FILE *stream
, char *p
, char *message
, char *start
,
13251 int *left_p
, const char *name
, int len
)
13253 int size
= sizeof (MESSAGE_TEMPLATE
);
13254 int left
= *left_p
;
13256 /* Reserve 2 spaces for ", " or ",\0" */
13259 /* Check if there is any room. */
13267 p
= mempcpy (p
, name
, len
);
13271 /* Output the current message now and start a new one. */
13274 fprintf (stream
, "%s\n", message
);
13276 left
= size
- (start
- message
) - len
- 2;
13278 gas_assert (left
>= 0);
13280 p
= mempcpy (p
, name
, len
);
13288 show_arch (FILE *stream
, int ext
, int check
)
13290 static char message
[] = MESSAGE_TEMPLATE
;
13291 char *start
= message
+ 27;
13293 int size
= sizeof (MESSAGE_TEMPLATE
);
13300 left
= size
- (start
- message
);
13301 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13303 /* Should it be skipped? */
13304 if (cpu_arch
[j
].skip
)
13307 name
= cpu_arch
[j
].name
;
13308 len
= cpu_arch
[j
].len
;
13311 /* It is an extension. Skip if we aren't asked to show it. */
13322 /* It is an processor. Skip if we show only extension. */
13325 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13327 /* It is an impossible processor - skip. */
13331 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
13334 /* Display disabled extensions. */
13336 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13338 name
= cpu_noarch
[j
].name
;
13339 len
= cpu_noarch
[j
].len
;
13340 p
= output_message (stream
, p
, message
, start
, &left
, name
,
13345 fprintf (stream
, "%s\n", message
);
13349 md_show_usage (FILE *stream
)
13351 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13352 fprintf (stream
, _("\
13353 -Qy, -Qn ignored\n\
13354 -V print assembler version number\n\
13357 fprintf (stream
, _("\
13358 -n Do not optimize code alignment\n\
13359 -q quieten some warnings\n"));
13360 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13361 fprintf (stream
, _("\
13364 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13365 || defined (TE_PE) || defined (TE_PEP))
13366 fprintf (stream
, _("\
13367 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
13369 #ifdef SVR4_COMMENT_CHARS
13370 fprintf (stream
, _("\
13371 --divide do not treat `/' as a comment character\n"));
13373 fprintf (stream
, _("\
13374 --divide ignored\n"));
13376 fprintf (stream
, _("\
13377 -march=CPU[,+EXTENSION...]\n\
13378 generate code for CPU and EXTENSION, CPU is one of:\n"));
13379 show_arch (stream
, 0, 1);
13380 fprintf (stream
, _("\
13381 EXTENSION is combination of:\n"));
13382 show_arch (stream
, 1, 0);
13383 fprintf (stream
, _("\
13384 -mtune=CPU optimize for CPU, CPU is one of:\n"));
13385 show_arch (stream
, 0, 0);
13386 fprintf (stream
, _("\
13387 -msse2avx encode SSE instructions with VEX prefix\n"));
13388 fprintf (stream
, _("\
13389 -msse-check=[none|error|warning] (default: warning)\n\
13390 check SSE instructions\n"));
13391 fprintf (stream
, _("\
13392 -moperand-check=[none|error|warning] (default: warning)\n\
13393 check operand combinations for validity\n"));
13394 fprintf (stream
, _("\
13395 -mavxscalar=[128|256] (default: 128)\n\
13396 encode scalar AVX instructions with specific vector\n\
13398 fprintf (stream
, _("\
13399 -mvexwig=[0|1] (default: 0)\n\
13400 encode VEX instructions with specific VEX.W value\n\
13401 for VEX.W bit ignored instructions\n"));
13402 fprintf (stream
, _("\
13403 -mevexlig=[128|256|512] (default: 128)\n\
13404 encode scalar EVEX instructions with specific vector\n\
13406 fprintf (stream
, _("\
13407 -mevexwig=[0|1] (default: 0)\n\
13408 encode EVEX instructions with specific EVEX.W value\n\
13409 for EVEX.W bit ignored instructions\n"));
13410 fprintf (stream
, _("\
13411 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
13412 encode EVEX instructions with specific EVEX.RC value\n\
13413 for SAE-only ignored instructions\n"));
13414 fprintf (stream
, _("\
13415 -mmnemonic=[att|intel] "));
13416 if (SYSV386_COMPAT
)
13417 fprintf (stream
, _("(default: att)\n"));
13419 fprintf (stream
, _("(default: intel)\n"));
13420 fprintf (stream
, _("\
13421 use AT&T/Intel mnemonic\n"));
13422 fprintf (stream
, _("\
13423 -msyntax=[att|intel] (default: att)\n\
13424 use AT&T/Intel syntax\n"));
13425 fprintf (stream
, _("\
13426 -mindex-reg support pseudo index registers\n"));
13427 fprintf (stream
, _("\
13428 -mnaked-reg don't require `%%' prefix for registers\n"));
13429 fprintf (stream
, _("\
13430 -madd-bnd-prefix add BND prefix for all valid branches\n"));
13431 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13432 fprintf (stream
, _("\
13433 -mshared disable branch optimization for shared code\n"));
13434 fprintf (stream
, _("\
13435 -mx86-used-note=[no|yes] "));
13436 if (DEFAULT_X86_USED_NOTE
)
13437 fprintf (stream
, _("(default: yes)\n"));
13439 fprintf (stream
, _("(default: no)\n"));
13440 fprintf (stream
, _("\
13441 generate x86 used ISA and feature properties\n"));
13443 #if defined (TE_PE) || defined (TE_PEP)
13444 fprintf (stream
, _("\
13445 -mbig-obj generate big object files\n"));
13447 fprintf (stream
, _("\
13448 -momit-lock-prefix=[no|yes] (default: no)\n\
13449 strip all lock prefixes\n"));
13450 fprintf (stream
, _("\
13451 -mfence-as-lock-add=[no|yes] (default: no)\n\
13452 encode lfence, mfence and sfence as\n\
13453 lock addl $0x0, (%%{re}sp)\n"));
13454 fprintf (stream
, _("\
13455 -mrelax-relocations=[no|yes] "));
13456 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
13457 fprintf (stream
, _("(default: yes)\n"));
13459 fprintf (stream
, _("(default: no)\n"));
13460 fprintf (stream
, _("\
13461 generate relax relocations\n"));
13462 fprintf (stream
, _("\
13463 -malign-branch-boundary=NUM (default: 0)\n\
13464 align branches within NUM byte boundary\n"));
13465 fprintf (stream
, _("\
13466 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
13467 TYPE is combination of jcc, fused, jmp, call, ret,\n\
13469 specify types of branches to align\n"));
13470 fprintf (stream
, _("\
13471 -malign-branch-prefix-size=NUM (default: 5)\n\
13472 align branches with NUM prefixes per instruction\n"));
13473 fprintf (stream
, _("\
13474 -mbranches-within-32B-boundaries\n\
13475 align branches within 32 byte boundary\n"));
13476 fprintf (stream
, _("\
13477 -mlfence-after-load=[no|yes] (default: no)\n\
13478 generate lfence after load\n"));
13479 fprintf (stream
, _("\
13480 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
13481 generate lfence before indirect near branch\n"));
13482 fprintf (stream
, _("\
13483 -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
13484 generate lfence before ret\n"));
13485 fprintf (stream
, _("\
13486 -mamd64 accept only AMD64 ISA [default]\n"));
13487 fprintf (stream
, _("\
13488 -mintel64 accept only Intel64 ISA\n"));
13491 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
13492 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13493 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13495 /* Pick the target format to use. */
13498 i386_target_format (void)
13500 if (!strncmp (default_arch
, "x86_64", 6))
13502 update_code_flag (CODE_64BIT
, 1);
13503 if (default_arch
[6] == '\0')
13504 x86_elf_abi
= X86_64_ABI
;
13506 x86_elf_abi
= X86_64_X32_ABI
;
13508 else if (!strcmp (default_arch
, "i386"))
13509 update_code_flag (CODE_32BIT
, 1);
13510 else if (!strcmp (default_arch
, "iamcu"))
13512 update_code_flag (CODE_32BIT
, 1);
13513 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
13515 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
13516 cpu_arch_name
= "iamcu";
13517 cpu_sub_arch_name
= NULL
;
13518 cpu_arch_flags
= iamcu_flags
;
13519 cpu_arch_isa
= PROCESSOR_IAMCU
;
13520 cpu_arch_isa_flags
= iamcu_flags
;
13521 if (!cpu_arch_tune_set
)
13523 cpu_arch_tune
= cpu_arch_isa
;
13524 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13527 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
13528 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
13532 as_fatal (_("unknown architecture"));
13534 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
13535 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13536 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
13537 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13539 switch (OUTPUT_FLAVOR
)
13541 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
13542 case bfd_target_aout_flavour
:
13543 return AOUT_TARGET_FORMAT
;
13545 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
13546 # if defined (TE_PE) || defined (TE_PEP)
13547 case bfd_target_coff_flavour
:
13548 if (flag_code
== CODE_64BIT
)
13549 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
13551 return use_big_obj
? "pe-bigobj-i386" : "pe-i386";
13552 # elif defined (TE_GO32)
13553 case bfd_target_coff_flavour
:
13554 return "coff-go32";
13556 case bfd_target_coff_flavour
:
13557 return "coff-i386";
13560 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
13561 case bfd_target_elf_flavour
:
13563 const char *format
;
13565 switch (x86_elf_abi
)
13568 format
= ELF_TARGET_FORMAT
;
13570 tls_get_addr
= "___tls_get_addr";
13574 use_rela_relocations
= 1;
13577 tls_get_addr
= "__tls_get_addr";
13579 format
= ELF_TARGET_FORMAT64
;
13581 case X86_64_X32_ABI
:
13582 use_rela_relocations
= 1;
13585 tls_get_addr
= "__tls_get_addr";
13587 disallow_64bit_reloc
= 1;
13588 format
= ELF_TARGET_FORMAT32
;
13591 if (cpu_arch_isa
== PROCESSOR_L1OM
)
13593 if (x86_elf_abi
!= X86_64_ABI
)
13594 as_fatal (_("Intel L1OM is 64bit only"));
13595 return ELF_TARGET_L1OM_FORMAT
;
13597 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
13599 if (x86_elf_abi
!= X86_64_ABI
)
13600 as_fatal (_("Intel K1OM is 64bit only"));
13601 return ELF_TARGET_K1OM_FORMAT
;
13603 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
13605 if (x86_elf_abi
!= I386_ABI
)
13606 as_fatal (_("Intel MCU is 32bit only"));
13607 return ELF_TARGET_IAMCU_FORMAT
;
13613 #if defined (OBJ_MACH_O)
13614 case bfd_target_mach_o_flavour
:
13615 if (flag_code
== CODE_64BIT
)
13617 use_rela_relocations
= 1;
13619 return "mach-o-x86-64";
13622 return "mach-o-i386";
13630 #endif /* OBJ_MAYBE_ more than one */
13633 md_undefined_symbol (char *name
)
13635 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
13636 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
13637 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
13638 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
13642 if (symbol_find (name
))
13643 as_bad (_("GOT already in symbol table"));
13644 GOT_symbol
= symbol_new (name
, undefined_section
,
13645 (valueT
) 0, &zero_address_frag
);
13652 /* Round up a section size to the appropriate boundary. */
13655 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
13657 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
13658 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
13660 /* For a.out, force the section size to be aligned. If we don't do
13661 this, BFD will align it for us, but it will not write out the
13662 final bytes of the section. This may be a bug in BFD, but it is
13663 easier to fix it here since that is how the other a.out targets
13667 align
= bfd_section_alignment (segment
);
13668 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
13675 /* On the i386, PC-relative offsets are relative to the start of the
13676 next instruction. That is, the address of the offset, plus its
13677 size, since the offset is always the last part of the insn. */
13680 md_pcrel_from (fixS
*fixP
)
13682 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
13688 s_bss (int ignore ATTRIBUTE_UNUSED
)
13692 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13694 obj_elf_section_change_hook ();
13696 temp
= get_absolute_expression ();
13697 subseg_set (bss_section
, (subsegT
) temp
);
13698 demand_empty_rest_of_line ();
13703 /* Remember constant directive. */
13706 i386_cons_align (int ignore ATTRIBUTE_UNUSED
)
13708 if (last_insn
.kind
!= last_insn_directive
13709 && (bfd_section_flags (now_seg
) & SEC_CODE
))
13711 last_insn
.seg
= now_seg
;
13712 last_insn
.kind
= last_insn_directive
;
13713 last_insn
.name
= "constant directive";
13714 last_insn
.file
= as_where (&last_insn
.line
);
13715 if (lfence_before_ret
!= lfence_before_ret_none
)
13717 if (lfence_before_indirect_branch
!= lfence_branch_none
)
13718 as_warn (_("constant directive skips -mlfence-before-ret "
13719 "and -mlfence-before-indirect-branch"));
13721 as_warn (_("constant directive skips -mlfence-before-ret"));
13723 else if (lfence_before_indirect_branch
!= lfence_branch_none
)
13724 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
13729 i386_validate_fix (fixS
*fixp
)
13731 if (fixp
->fx_subsy
)
13733 if (fixp
->fx_subsy
== GOT_symbol
)
13735 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
13739 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13740 if (fixp
->fx_tcbit2
)
13741 fixp
->fx_r_type
= (fixp
->fx_tcbit
13742 ? BFD_RELOC_X86_64_REX_GOTPCRELX
13743 : BFD_RELOC_X86_64_GOTPCRELX
);
13746 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
13751 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
13753 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
13755 fixp
->fx_subsy
= 0;
13758 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13759 else if (!object_64bit
)
13761 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
13762 && fixp
->fx_tcbit2
)
13763 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
13769 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
13772 bfd_reloc_code_real_type code
;
13774 switch (fixp
->fx_r_type
)
13776 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13777 case BFD_RELOC_SIZE32
:
13778 case BFD_RELOC_SIZE64
:
13779 if (S_IS_DEFINED (fixp
->fx_addsy
)
13780 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
13782 /* Resolve size relocation against local symbol to size of
13783 the symbol plus addend. */
13784 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
13785 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
13786 && !fits_in_unsigned_long (value
))
13787 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13788 _("symbol size computation overflow"));
13789 fixp
->fx_addsy
= NULL
;
13790 fixp
->fx_subsy
= NULL
;
13791 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
13795 /* Fall through. */
13797 case BFD_RELOC_X86_64_PLT32
:
13798 case BFD_RELOC_X86_64_GOT32
:
13799 case BFD_RELOC_X86_64_GOTPCREL
:
13800 case BFD_RELOC_X86_64_GOTPCRELX
:
13801 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
13802 case BFD_RELOC_386_PLT32
:
13803 case BFD_RELOC_386_GOT32
:
13804 case BFD_RELOC_386_GOT32X
:
13805 case BFD_RELOC_386_GOTOFF
:
13806 case BFD_RELOC_386_GOTPC
:
13807 case BFD_RELOC_386_TLS_GD
:
13808 case BFD_RELOC_386_TLS_LDM
:
13809 case BFD_RELOC_386_TLS_LDO_32
:
13810 case BFD_RELOC_386_TLS_IE_32
:
13811 case BFD_RELOC_386_TLS_IE
:
13812 case BFD_RELOC_386_TLS_GOTIE
:
13813 case BFD_RELOC_386_TLS_LE_32
:
13814 case BFD_RELOC_386_TLS_LE
:
13815 case BFD_RELOC_386_TLS_GOTDESC
:
13816 case BFD_RELOC_386_TLS_DESC_CALL
:
13817 case BFD_RELOC_X86_64_TLSGD
:
13818 case BFD_RELOC_X86_64_TLSLD
:
13819 case BFD_RELOC_X86_64_DTPOFF32
:
13820 case BFD_RELOC_X86_64_DTPOFF64
:
13821 case BFD_RELOC_X86_64_GOTTPOFF
:
13822 case BFD_RELOC_X86_64_TPOFF32
:
13823 case BFD_RELOC_X86_64_TPOFF64
:
13824 case BFD_RELOC_X86_64_GOTOFF64
:
13825 case BFD_RELOC_X86_64_GOTPC32
:
13826 case BFD_RELOC_X86_64_GOT64
:
13827 case BFD_RELOC_X86_64_GOTPCREL64
:
13828 case BFD_RELOC_X86_64_GOTPC64
:
13829 case BFD_RELOC_X86_64_GOTPLT64
:
13830 case BFD_RELOC_X86_64_PLTOFF64
:
13831 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
13832 case BFD_RELOC_X86_64_TLSDESC_CALL
:
13833 case BFD_RELOC_RVA
:
13834 case BFD_RELOC_VTABLE_ENTRY
:
13835 case BFD_RELOC_VTABLE_INHERIT
:
13837 case BFD_RELOC_32_SECREL
:
13839 code
= fixp
->fx_r_type
;
13841 case BFD_RELOC_X86_64_32S
:
13842 if (!fixp
->fx_pcrel
)
13844 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
13845 code
= fixp
->fx_r_type
;
13848 /* Fall through. */
13850 if (fixp
->fx_pcrel
)
13852 switch (fixp
->fx_size
)
13855 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13856 _("can not do %d byte pc-relative relocation"),
13858 code
= BFD_RELOC_32_PCREL
;
13860 case 1: code
= BFD_RELOC_8_PCREL
; break;
13861 case 2: code
= BFD_RELOC_16_PCREL
; break;
13862 case 4: code
= BFD_RELOC_32_PCREL
; break;
13864 case 8: code
= BFD_RELOC_64_PCREL
; break;
13870 switch (fixp
->fx_size
)
13873 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13874 _("can not do %d byte relocation"),
13876 code
= BFD_RELOC_32
;
13878 case 1: code
= BFD_RELOC_8
; break;
13879 case 2: code
= BFD_RELOC_16
; break;
13880 case 4: code
= BFD_RELOC_32
; break;
13882 case 8: code
= BFD_RELOC_64
; break;
13889 if ((code
== BFD_RELOC_32
13890 || code
== BFD_RELOC_32_PCREL
13891 || code
== BFD_RELOC_X86_64_32S
)
13893 && fixp
->fx_addsy
== GOT_symbol
)
13896 code
= BFD_RELOC_386_GOTPC
;
13898 code
= BFD_RELOC_X86_64_GOTPC32
;
13900 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
13902 && fixp
->fx_addsy
== GOT_symbol
)
13904 code
= BFD_RELOC_X86_64_GOTPC64
;
13907 rel
= XNEW (arelent
);
13908 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
13909 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
13911 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
13913 if (!use_rela_relocations
)
13915 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
13916 vtable entry to be used in the relocation's section offset. */
13917 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
13918 rel
->address
= fixp
->fx_offset
;
13919 #if defined (OBJ_COFF) && defined (TE_PE)
13920 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
13921 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
13926 /* Use the rela in 64bit mode. */
13929 if (disallow_64bit_reloc
)
13932 case BFD_RELOC_X86_64_DTPOFF64
:
13933 case BFD_RELOC_X86_64_TPOFF64
:
13934 case BFD_RELOC_64_PCREL
:
13935 case BFD_RELOC_X86_64_GOTOFF64
:
13936 case BFD_RELOC_X86_64_GOT64
:
13937 case BFD_RELOC_X86_64_GOTPCREL64
:
13938 case BFD_RELOC_X86_64_GOTPC64
:
13939 case BFD_RELOC_X86_64_GOTPLT64
:
13940 case BFD_RELOC_X86_64_PLTOFF64
:
13941 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13942 _("cannot represent relocation type %s in x32 mode"),
13943 bfd_get_reloc_code_name (code
));
13949 if (!fixp
->fx_pcrel
)
13950 rel
->addend
= fixp
->fx_offset
;
13954 case BFD_RELOC_X86_64_PLT32
:
13955 case BFD_RELOC_X86_64_GOT32
:
13956 case BFD_RELOC_X86_64_GOTPCREL
:
13957 case BFD_RELOC_X86_64_GOTPCRELX
:
13958 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
13959 case BFD_RELOC_X86_64_TLSGD
:
13960 case BFD_RELOC_X86_64_TLSLD
:
13961 case BFD_RELOC_X86_64_GOTTPOFF
:
13962 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
13963 case BFD_RELOC_X86_64_TLSDESC_CALL
:
13964 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
13967 rel
->addend
= (section
->vma
13969 + fixp
->fx_addnumber
13970 + md_pcrel_from (fixp
));
13975 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
13976 if (rel
->howto
== NULL
)
13978 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13979 _("cannot represent relocation type %s"),
13980 bfd_get_reloc_code_name (code
));
13981 /* Set howto to a garbage value so that we can keep going. */
13982 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
13983 gas_assert (rel
->howto
!= NULL
);
13989 #include "tc-i386-intel.c"
13992 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
13994 int saved_naked_reg
;
13995 char saved_register_dot
;
13997 saved_naked_reg
= allow_naked_reg
;
13998 allow_naked_reg
= 1;
13999 saved_register_dot
= register_chars
['.'];
14000 register_chars
['.'] = '.';
14001 allow_pseudo_reg
= 1;
14002 expression_and_evaluate (exp
);
14003 allow_pseudo_reg
= 0;
14004 register_chars
['.'] = saved_register_dot
;
14005 allow_naked_reg
= saved_naked_reg
;
14007 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
14009 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
14011 exp
->X_op
= O_constant
;
14012 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
14013 .dw2_regnum
[flag_code
>> 1];
14016 exp
->X_op
= O_illegal
;
14021 tc_x86_frame_initial_instructions (void)
14023 static unsigned int sp_regno
[2];
14025 if (!sp_regno
[flag_code
>> 1])
14027 char *saved_input
= input_line_pointer
;
14028 char sp
[][4] = {"esp", "rsp"};
14031 input_line_pointer
= sp
[flag_code
>> 1];
14032 tc_x86_parse_to_dw2regnum (&exp
);
14033 gas_assert (exp
.X_op
== O_constant
);
14034 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
14035 input_line_pointer
= saved_input
;
14038 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
14039 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
14043 x86_dwarf2_addr_size (void)
14045 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14046 if (x86_elf_abi
== X86_64_X32_ABI
)
14049 return bfd_arch_bits_per_address (stdoutput
) / 8;
14053 i386_elf_section_type (const char *str
, size_t len
)
14055 if (flag_code
== CODE_64BIT
14056 && len
== sizeof ("unwind") - 1
14057 && strncmp (str
, "unwind", 6) == 0)
14058 return SHT_X86_64_UNWIND
;
14065 i386_solaris_fix_up_eh_frame (segT sec
)
14067 if (flag_code
== CODE_64BIT
)
14068 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
14074 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
14078 exp
.X_op
= O_secrel
;
14079 exp
.X_add_symbol
= symbol
;
14080 exp
.X_add_number
= 0;
14081 emit_expr (&exp
, size
);
14085 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14086 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
14089 x86_64_section_letter (int letter
, const char **ptr_msg
)
14091 if (flag_code
== CODE_64BIT
)
14094 return SHF_X86_64_LARGE
;
14096 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
14099 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
14104 x86_64_section_word (char *str
, size_t len
)
14106 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
14107 return SHF_X86_64_LARGE
;
14113 handle_large_common (int small ATTRIBUTE_UNUSED
)
14115 if (flag_code
!= CODE_64BIT
)
14117 s_comm_internal (0, elf_common_parse
);
14118 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
14122 static segT lbss_section
;
14123 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
14124 asection
*saved_bss_section
= bss_section
;
14126 if (lbss_section
== NULL
)
14128 flagword applicable
;
14129 segT seg
= now_seg
;
14130 subsegT subseg
= now_subseg
;
14132 /* The .lbss section is for local .largecomm symbols. */
14133 lbss_section
= subseg_new (".lbss", 0);
14134 applicable
= bfd_applicable_section_flags (stdoutput
);
14135 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
14136 seg_info (lbss_section
)->bss
= 1;
14138 subseg_set (seg
, subseg
);
14141 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
14142 bss_section
= lbss_section
;
14144 s_comm_internal (0, elf_common_parse
);
14146 elf_com_section_ptr
= saved_com_section_ptr
;
14147 bss_section
= saved_bss_section
;
14150 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */