1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2020 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
39 #ifdef HAVE_SYS_PARAM_H
40 #include <sys/param.h>
43 #define INT_MAX (int) (((unsigned) (-1)) >> 1)
47 #ifndef INFER_ADDR_PREFIX
48 #define INFER_ADDR_PREFIX 1
52 #define DEFAULT_ARCH "i386"
57 #define INLINE __inline__
63 /* Prefixes will be emitted in the order defined below.
64 WAIT_PREFIX must be the first prefix since FWAIT is really is an
65 instruction, and so must come before any prefixes.
66 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
67 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
73 #define HLE_PREFIX REP_PREFIX
74 #define BND_PREFIX REP_PREFIX
76 #define REX_PREFIX 6 /* must come last. */
77 #define MAX_PREFIXES 7 /* max prefixes per opcode */
79 /* we define the syntax here (modulo base,index,scale syntax) */
80 #define REGISTER_PREFIX '%'
81 #define IMMEDIATE_PREFIX '$'
82 #define ABSOLUTE_PREFIX '*'
84 /* these are the instruction mnemonic suffixes in AT&T syntax or
85 memory operand size in Intel syntax. */
86 #define WORD_MNEM_SUFFIX 'w'
87 #define BYTE_MNEM_SUFFIX 'b'
88 #define SHORT_MNEM_SUFFIX 's'
89 #define LONG_MNEM_SUFFIX 'l'
90 #define QWORD_MNEM_SUFFIX 'q'
91 /* Intel Syntax. Use a non-ascii letter since since it never appears
93 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
95 #define END_OF_INSN '\0'
97 /* This matches the C -> StaticRounding alias in the opcode table. */
98 #define commutative staticrounding
101 'templates' is for grouping together 'template' structures for opcodes
102 of the same name. This is only used for storing the insns in the grand
103 ole hash table of insns.
104 The templates themselves start at START and range up to (but not including)
109 const insn_template
*start
;
110 const insn_template
*end
;
114 /* 386 operand encoding bytes: see 386 book for details of this. */
117 unsigned int regmem
; /* codes register or memory operand */
118 unsigned int reg
; /* codes register operand (or extended opcode) */
119 unsigned int mode
; /* how to interpret regmem & reg */
123 /* x86-64 extension prefix. */
124 typedef int rex_byte
;
126 /* 386 opcode byte to code indirect addressing. */
135 /* x86 arch names, types and features */
138 const char *name
; /* arch name */
139 unsigned int len
; /* arch string length */
140 enum processor_type type
; /* arch type */
141 i386_cpu_flags flags
; /* cpu feature flags */
142 unsigned int skip
; /* show_arch should skip this. */
146 /* Used to turn off indicated flags. */
149 const char *name
; /* arch name */
150 unsigned int len
; /* arch string length */
151 i386_cpu_flags flags
; /* cpu feature flags */
155 static void update_code_flag (int, int);
156 static void set_code_flag (int);
157 static void set_16bit_gcc_code_flag (int);
158 static void set_intel_syntax (int);
159 static void set_intel_mnemonic (int);
160 static void set_allow_index_reg (int);
161 static void set_check (int);
162 static void set_cpu_arch (int);
164 static void pe_directive_secrel (int);
166 static void signed_cons (int);
167 static char *output_invalid (int c
);
168 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
170 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
172 static int i386_att_operand (char *);
173 static int i386_intel_operand (char *, int);
174 static int i386_intel_simplify (expressionS
*);
175 static int i386_intel_parse_name (const char *, expressionS
*);
176 static const reg_entry
*parse_register (char *, char **);
177 static char *parse_insn (char *, char *);
178 static char *parse_operands (char *, const char *);
179 static void swap_operands (void);
180 static void swap_2_operands (int, int);
181 static enum flag_code
i386_addressing_mode (void);
182 static void optimize_imm (void);
183 static void optimize_disp (void);
184 static const insn_template
*match_template (char);
185 static int check_string (void);
186 static int process_suffix (void);
187 static int check_byte_reg (void);
188 static int check_long_reg (void);
189 static int check_qword_reg (void);
190 static int check_word_reg (void);
191 static int finalize_imm (void);
192 static int process_operands (void);
193 static const seg_entry
*build_modrm_byte (void);
194 static void output_insn (void);
195 static void output_imm (fragS
*, offsetT
);
196 static void output_disp (fragS
*, offsetT
);
198 static void s_bss (int);
200 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
201 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
203 /* GNU_PROPERTY_X86_ISA_1_USED. */
204 static unsigned int x86_isa_1_used
;
205 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
206 static unsigned int x86_feature_2_used
;
207 /* Generate x86 used ISA and feature properties. */
208 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
211 static const char *default_arch
= DEFAULT_ARCH
;
213 /* This struct describes rounding control and SAE in the instruction. */
227 static struct RC_Operation rc_op
;
229 /* The struct describes masking, applied to OPERAND in the instruction.
230 MASK is a pointer to the corresponding mask register. ZEROING tells
231 whether merging or zeroing mask is used. */
232 struct Mask_Operation
234 const reg_entry
*mask
;
235 unsigned int zeroing
;
236 /* The operand where this operation is associated. */
240 static struct Mask_Operation mask_op
;
242 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
244 struct Broadcast_Operation
246 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
249 /* Index of broadcasted operand. */
252 /* Number of bytes to broadcast. */
256 static struct Broadcast_Operation broadcast_op
;
261 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
262 unsigned char bytes
[4];
264 /* Destination or source register specifier. */
265 const reg_entry
*register_specifier
;
268 /* 'md_assemble ()' gathers together information and puts it into a
275 const reg_entry
*regs
;
280 operand_size_mismatch
,
281 operand_type_mismatch
,
282 register_type_mismatch
,
283 number_of_operands_mismatch
,
284 invalid_instruction_suffix
,
286 unsupported_with_intel_mnemonic
,
289 invalid_vsib_address
,
290 invalid_vector_register_set
,
291 unsupported_vector_index_register
,
292 unsupported_broadcast
,
295 mask_not_on_destination
,
298 rc_sae_operand_not_last_imm
,
299 invalid_register_operand
,
304 /* TM holds the template for the insn were currently assembling. */
307 /* SUFFIX holds the instruction size suffix for byte, word, dword
308 or qword, if given. */
311 /* OPERANDS gives the number of given operands. */
312 unsigned int operands
;
314 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
315 of given register, displacement, memory operands and immediate
317 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
319 /* TYPES [i] is the type (see above #defines) which tells us how to
320 use OP[i] for the corresponding operand. */
321 i386_operand_type types
[MAX_OPERANDS
];
323 /* Displacement expression, immediate expression, or register for each
325 union i386_op op
[MAX_OPERANDS
];
327 /* Flags for operands. */
328 unsigned int flags
[MAX_OPERANDS
];
329 #define Operand_PCrel 1
330 #define Operand_Mem 2
332 /* Relocation type for operand */
333 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
335 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
336 the base index byte below. */
337 const reg_entry
*base_reg
;
338 const reg_entry
*index_reg
;
339 unsigned int log2_scale_factor
;
341 /* SEG gives the seg_entries of this insn. They are zero unless
342 explicit segment overrides are given. */
343 const seg_entry
*seg
[2];
345 /* Copied first memory operand string, for re-checking. */
348 /* PREFIX holds all the given prefix opcodes (usually null).
349 PREFIXES is the number of prefix opcodes. */
350 unsigned int prefixes
;
351 unsigned char prefix
[MAX_PREFIXES
];
353 /* Register is in low 3 bits of opcode. */
354 bfd_boolean short_form
;
356 /* The operand to a branch insn indicates an absolute branch. */
357 bfd_boolean jumpabsolute
;
359 /* Has MMX register operands. */
360 bfd_boolean has_regmmx
;
362 /* Has XMM register operands. */
363 bfd_boolean has_regxmm
;
365 /* Has YMM register operands. */
366 bfd_boolean has_regymm
;
368 /* Has ZMM register operands. */
369 bfd_boolean has_regzmm
;
371 /* Has GOTPC or TLS relocation. */
372 bfd_boolean has_gotpc_tls_reloc
;
374 /* RM and SIB are the modrm byte and the sib byte where the
375 addressing modes of this insn are encoded. */
382 /* Masking attributes. */
383 struct Mask_Operation
*mask
;
385 /* Rounding control and SAE attributes. */
386 struct RC_Operation
*rounding
;
388 /* Broadcasting attributes. */
389 struct Broadcast_Operation
*broadcast
;
391 /* Compressed disp8*N attribute. */
392 unsigned int memshift
;
394 /* Prefer load or store in encoding. */
397 dir_encoding_default
= 0,
403 /* Prefer 8bit or 32bit displacement in encoding. */
406 disp_encoding_default
= 0,
411 /* Prefer the REX byte in encoding. */
412 bfd_boolean rex_encoding
;
414 /* Disable instruction size optimization. */
415 bfd_boolean no_optimize
;
417 /* How to encode vector instructions. */
420 vex_encoding_default
= 0,
427 const char *rep_prefix
;
430 const char *hle_prefix
;
432 /* Have BND prefix. */
433 const char *bnd_prefix
;
435 /* Have NOTRACK prefix. */
436 const char *notrack_prefix
;
439 enum i386_error error
;
442 typedef struct _i386_insn i386_insn
;
444 /* Link RC type with corresponding string, that'll be looked for in
453 static const struct RC_name RC_NamesTable
[] =
455 { rne
, STRING_COMMA_LEN ("rn-sae") },
456 { rd
, STRING_COMMA_LEN ("rd-sae") },
457 { ru
, STRING_COMMA_LEN ("ru-sae") },
458 { rz
, STRING_COMMA_LEN ("rz-sae") },
459 { saeonly
, STRING_COMMA_LEN ("sae") },
462 /* List of chars besides those in app.c:symbol_chars that can start an
463 operand. Used to prevent the scrubber eating vital white-space. */
464 const char extra_symbol_chars
[] = "*%-([{}"
473 #if (defined (TE_I386AIX) \
474 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
475 && !defined (TE_GNU) \
476 && !defined (TE_LINUX) \
477 && !defined (TE_NACL) \
478 && !defined (TE_FreeBSD) \
479 && !defined (TE_DragonFly) \
480 && !defined (TE_NetBSD)))
481 /* This array holds the chars that always start a comment. If the
482 pre-processor is disabled, these aren't very useful. The option
483 --divide will remove '/' from this list. */
484 const char *i386_comment_chars
= "#/";
485 #define SVR4_COMMENT_CHARS 1
486 #define PREFIX_SEPARATOR '\\'
489 const char *i386_comment_chars
= "#";
490 #define PREFIX_SEPARATOR '/'
493 /* This array holds the chars that only start a comment at the beginning of
494 a line. If the line seems to have the form '# 123 filename'
495 .line and .file directives will appear in the pre-processed output.
496 Note that input_file.c hand checks for '#' at the beginning of the
497 first line of the input file. This is because the compiler outputs
498 #NO_APP at the beginning of its output.
499 Also note that comments started like this one will always work if
500 '/' isn't otherwise defined. */
501 const char line_comment_chars
[] = "#/";
503 const char line_separator_chars
[] = ";";
505 /* Chars that can be used to separate mant from exp in floating point
507 const char EXP_CHARS
[] = "eE";
509 /* Chars that mean this number is a floating point constant
512 const char FLT_CHARS
[] = "fFdDxX";
514 /* Tables for lexical analysis. */
515 static char mnemonic_chars
[256];
516 static char register_chars
[256];
517 static char operand_chars
[256];
518 static char identifier_chars
[256];
519 static char digit_chars
[256];
521 /* Lexical macros. */
522 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
523 #define is_operand_char(x) (operand_chars[(unsigned char) x])
524 #define is_register_char(x) (register_chars[(unsigned char) x])
525 #define is_space_char(x) ((x) == ' ')
526 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
527 #define is_digit_char(x) (digit_chars[(unsigned char) x])
529 /* All non-digit non-letter characters that may occur in an operand. */
530 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
532 /* md_assemble() always leaves the strings it's passed unaltered. To
533 effect this we maintain a stack of saved characters that we've smashed
534 with '\0's (indicating end of strings for various sub-fields of the
535 assembler instruction). */
536 static char save_stack
[32];
537 static char *save_stack_p
;
538 #define END_STRING_AND_SAVE(s) \
539 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
540 #define RESTORE_END_STRING(s) \
541 do { *(s) = *--save_stack_p; } while (0)
543 /* The instruction we're assembling. */
546 /* Possible templates for current insn. */
547 static const templates
*current_templates
;
549 /* Per instruction expressionS buffers: max displacements & immediates. */
550 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
551 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
553 /* Current operand we are working on. */
554 static int this_operand
= -1;
556 /* We support four different modes. FLAG_CODE variable is used to distinguish
564 static enum flag_code flag_code
;
565 static unsigned int object_64bit
;
566 static unsigned int disallow_64bit_reloc
;
567 static int use_rela_relocations
= 0;
568 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
569 static const char *tls_get_addr
;
571 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
572 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
573 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
575 /* The ELF ABI to use. */
583 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
586 #if defined (TE_PE) || defined (TE_PEP)
587 /* Use big object file format. */
588 static int use_big_obj
= 0;
591 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
592 /* 1 if generating code for a shared library. */
593 static int shared
= 0;
596 /* 1 for intel syntax,
598 static int intel_syntax
= 0;
600 static enum x86_64_isa
602 amd64
= 1, /* AMD64 ISA. */
603 intel64
/* Intel64 ISA. */
606 /* 1 for intel mnemonic,
607 0 if att mnemonic. */
608 static int intel_mnemonic
= !SYSV386_COMPAT
;
610 /* 1 if pseudo registers are permitted. */
611 static int allow_pseudo_reg
= 0;
613 /* 1 if register prefix % not required. */
614 static int allow_naked_reg
= 0;
616 /* 1 if the assembler should add BND prefix for all control-transferring
617 instructions supporting it, even if this prefix wasn't specified
619 static int add_bnd_prefix
= 0;
621 /* 1 if pseudo index register, eiz/riz, is allowed . */
622 static int allow_index_reg
= 0;
624 /* 1 if the assembler should ignore LOCK prefix, even if it was
625 specified explicitly. */
626 static int omit_lock_prefix
= 0;
628 /* 1 if the assembler should encode lfence, mfence, and sfence as
629 "lock addl $0, (%{re}sp)". */
630 static int avoid_fence
= 0;
632 /* 1 if lfence should be inserted after every load. */
633 static int lfence_after_load
= 0;
635 /* Non-zero if lfence should be inserted before indirect branch. */
636 static enum lfence_before_indirect_branch_kind
638 lfence_branch_none
= 0,
639 lfence_branch_register
,
640 lfence_branch_memory
,
643 lfence_before_indirect_branch
;
645 /* Non-zero if lfence should be inserted before ret. */
646 static enum lfence_before_ret_kind
648 lfence_before_ret_none
= 0,
649 lfence_before_ret_not
,
654 /* Types of previous instruction is .byte or prefix. */
669 /* 1 if the assembler should generate relax relocations. */
671 static int generate_relax_relocations
672 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
674 static enum check_kind
680 sse_check
, operand_check
= check_warning
;
682 /* Non-zero if branches should be aligned within power of 2 boundary. */
683 static int align_branch_power
= 0;
685 /* Types of branches to align. */
686 enum align_branch_kind
688 align_branch_none
= 0,
689 align_branch_jcc
= 1,
690 align_branch_fused
= 2,
691 align_branch_jmp
= 3,
692 align_branch_call
= 4,
693 align_branch_indirect
= 5,
697 /* Type bits of branches to align. */
698 enum align_branch_bit
700 align_branch_jcc_bit
= 1 << align_branch_jcc
,
701 align_branch_fused_bit
= 1 << align_branch_fused
,
702 align_branch_jmp_bit
= 1 << align_branch_jmp
,
703 align_branch_call_bit
= 1 << align_branch_call
,
704 align_branch_indirect_bit
= 1 << align_branch_indirect
,
705 align_branch_ret_bit
= 1 << align_branch_ret
708 static unsigned int align_branch
= (align_branch_jcc_bit
709 | align_branch_fused_bit
710 | align_branch_jmp_bit
);
712 /* Types of condition jump used by macro-fusion. */
715 mf_jcc_jo
= 0, /* base opcode 0x70 */
716 mf_jcc_jc
, /* base opcode 0x72 */
717 mf_jcc_je
, /* base opcode 0x74 */
718 mf_jcc_jna
, /* base opcode 0x76 */
719 mf_jcc_js
, /* base opcode 0x78 */
720 mf_jcc_jp
, /* base opcode 0x7a */
721 mf_jcc_jl
, /* base opcode 0x7c */
722 mf_jcc_jle
, /* base opcode 0x7e */
725 /* Types of compare flag-modifying insntructions used by macro-fusion. */
728 mf_cmp_test_and
, /* test/cmp */
729 mf_cmp_alu_cmp
, /* add/sub/cmp */
730 mf_cmp_incdec
/* inc/dec */
733 /* The maximum padding size for fused jcc. CMP like instruction can
734 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
736 #define MAX_FUSED_JCC_PADDING_SIZE 20
738 /* The maximum number of prefixes added for an instruction. */
739 static unsigned int align_branch_prefix_size
= 5;
742 1. Clear the REX_W bit with register operand if possible.
743 2. Above plus use 128bit vector instruction to clear the full vector
746 static int optimize
= 0;
749 1. Clear the REX_W bit with register operand if possible.
750 2. Above plus use 128bit vector instruction to clear the full vector
752 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
755 static int optimize_for_space
= 0;
757 /* Register prefix used for error message. */
758 static const char *register_prefix
= "%";
760 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
761 leave, push, and pop instructions so that gcc has the same stack
762 frame as in 32 bit mode. */
763 static char stackop_size
= '\0';
765 /* Non-zero to optimize code alignment. */
766 int optimize_align_code
= 1;
768 /* Non-zero to quieten some warnings. */
769 static int quiet_warnings
= 0;
772 static const char *cpu_arch_name
= NULL
;
773 static char *cpu_sub_arch_name
= NULL
;
775 /* CPU feature flags. */
776 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
778 /* If we have selected a cpu we are generating instructions for. */
779 static int cpu_arch_tune_set
= 0;
781 /* Cpu we are generating instructions for. */
782 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
784 /* CPU feature flags of cpu we are generating instructions for. */
785 static i386_cpu_flags cpu_arch_tune_flags
;
787 /* CPU instruction set architecture used. */
788 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
790 /* CPU feature flags of instruction set architecture used. */
791 i386_cpu_flags cpu_arch_isa_flags
;
793 /* If set, conditional jumps are not automatically promoted to handle
794 larger than a byte offset. */
795 static unsigned int no_cond_jump_promotion
= 0;
797 /* Encode SSE instructions with VEX prefix. */
798 static unsigned int sse2avx
;
800 /* Encode scalar AVX instructions with specific vector length. */
807 /* Encode VEX WIG instructions with specific vex.w. */
814 /* Encode scalar EVEX LIG instructions with specific vector length. */
822 /* Encode EVEX WIG instructions with specific evex.w. */
829 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
830 static enum rc_type evexrcig
= rne
;
832 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
833 static symbolS
*GOT_symbol
;
835 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
836 unsigned int x86_dwarf2_return_column
;
838 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
839 int x86_cie_data_alignment
;
841 /* Interface to relax_segment.
842 There are 3 major relax states for 386 jump insns because the
843 different types of jumps add different sizes to frags when we're
844 figuring out what sort of jump to choose to reach a given label.
846 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
847 branches which are handled by md_estimate_size_before_relax() and
848 i386_generic_table_relax_frag(). */
851 #define UNCOND_JUMP 0
853 #define COND_JUMP86 2
854 #define BRANCH_PADDING 3
855 #define BRANCH_PREFIX 4
856 #define FUSED_JCC_PADDING 5
861 #define SMALL16 (SMALL | CODE16)
863 #define BIG16 (BIG | CODE16)
867 #define INLINE __inline__
873 #define ENCODE_RELAX_STATE(type, size) \
874 ((relax_substateT) (((type) << 2) | (size)))
875 #define TYPE_FROM_RELAX_STATE(s) \
877 #define DISP_SIZE_FROM_RELAX_STATE(s) \
878 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
880 /* This table is used by relax_frag to promote short jumps to long
881 ones where necessary. SMALL (short) jumps may be promoted to BIG
882 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
883 don't allow a short jump in a 32 bit code segment to be promoted to
884 a 16 bit offset jump because it's slower (requires data size
885 prefix), and doesn't work, unless the destination is in the bottom
886 64k of the code segment (The top 16 bits of eip are zeroed). */
888 const relax_typeS md_relax_table
[] =
891 1) most positive reach of this state,
892 2) most negative reach of this state,
893 3) how many bytes this mode will have in the variable part of the frag
894 4) which index into the table to try if we can't fit into this one. */
896 /* UNCOND_JUMP states. */
897 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
898 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
899 /* dword jmp adds 4 bytes to frag:
900 0 extra opcode bytes, 4 displacement bytes. */
902 /* word jmp adds 2 byte2 to frag:
903 0 extra opcode bytes, 2 displacement bytes. */
906 /* COND_JUMP states. */
907 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
908 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
909 /* dword conditionals adds 5 bytes to frag:
910 1 extra opcode byte, 4 displacement bytes. */
912 /* word conditionals add 3 bytes to frag:
913 1 extra opcode byte, 2 displacement bytes. */
916 /* COND_JUMP86 states. */
917 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
918 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
919 /* dword conditionals adds 5 bytes to frag:
920 1 extra opcode byte, 4 displacement bytes. */
922 /* word conditionals add 4 bytes to frag:
923 1 displacement byte and a 3 byte long branch insn. */
927 static const arch_entry cpu_arch
[] =
929 /* Do not replace the first two entries - i386_target_format()
930 relies on them being there in this order. */
931 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
932 CPU_GENERIC32_FLAGS
, 0 },
933 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
934 CPU_GENERIC64_FLAGS
, 0 },
935 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
937 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
939 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
941 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
943 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
945 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
947 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
949 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
951 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
952 CPU_PENTIUMPRO_FLAGS
, 0 },
953 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
955 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
957 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
959 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
961 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
962 CPU_NOCONA_FLAGS
, 0 },
963 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
965 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
967 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
968 CPU_CORE2_FLAGS
, 1 },
969 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
970 CPU_CORE2_FLAGS
, 0 },
971 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
972 CPU_COREI7_FLAGS
, 0 },
973 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
975 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
977 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
978 CPU_IAMCU_FLAGS
, 0 },
979 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
981 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
983 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
984 CPU_ATHLON_FLAGS
, 0 },
985 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
987 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
989 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
991 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
992 CPU_AMDFAM10_FLAGS
, 0 },
993 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
994 CPU_BDVER1_FLAGS
, 0 },
995 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
996 CPU_BDVER2_FLAGS
, 0 },
997 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
998 CPU_BDVER3_FLAGS
, 0 },
999 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
1000 CPU_BDVER4_FLAGS
, 0 },
1001 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
1002 CPU_ZNVER1_FLAGS
, 0 },
1003 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
1004 CPU_ZNVER2_FLAGS
, 0 },
1005 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
1006 CPU_BTVER1_FLAGS
, 0 },
1007 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
1008 CPU_BTVER2_FLAGS
, 0 },
1009 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
1010 CPU_8087_FLAGS
, 0 },
1011 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
1013 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
1015 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
1017 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
1018 CPU_CMOV_FLAGS
, 0 },
1019 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
1020 CPU_FXSR_FLAGS
, 0 },
1021 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
1023 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
1025 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
1026 CPU_SSE2_FLAGS
, 0 },
1027 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
1028 CPU_SSE3_FLAGS
, 0 },
1029 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1030 CPU_SSE4A_FLAGS
, 0 },
1031 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
1032 CPU_SSSE3_FLAGS
, 0 },
1033 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
1034 CPU_SSE4_1_FLAGS
, 0 },
1035 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
1036 CPU_SSE4_2_FLAGS
, 0 },
1037 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
1038 CPU_SSE4_2_FLAGS
, 0 },
1039 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
1041 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
1042 CPU_AVX2_FLAGS
, 0 },
1043 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
1044 CPU_AVX512F_FLAGS
, 0 },
1045 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
1046 CPU_AVX512CD_FLAGS
, 0 },
1047 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
1048 CPU_AVX512ER_FLAGS
, 0 },
1049 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
1050 CPU_AVX512PF_FLAGS
, 0 },
1051 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
1052 CPU_AVX512DQ_FLAGS
, 0 },
1053 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
1054 CPU_AVX512BW_FLAGS
, 0 },
1055 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
1056 CPU_AVX512VL_FLAGS
, 0 },
1057 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
1059 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
1060 CPU_VMFUNC_FLAGS
, 0 },
1061 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
1063 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
1064 CPU_XSAVE_FLAGS
, 0 },
1065 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
1066 CPU_XSAVEOPT_FLAGS
, 0 },
1067 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
1068 CPU_XSAVEC_FLAGS
, 0 },
1069 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
1070 CPU_XSAVES_FLAGS
, 0 },
1071 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
1073 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
1074 CPU_PCLMUL_FLAGS
, 0 },
1075 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
1076 CPU_PCLMUL_FLAGS
, 1 },
1077 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
1078 CPU_FSGSBASE_FLAGS
, 0 },
1079 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
1080 CPU_RDRND_FLAGS
, 0 },
1081 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
1082 CPU_F16C_FLAGS
, 0 },
1083 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
1084 CPU_BMI2_FLAGS
, 0 },
1085 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
1087 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
1088 CPU_FMA4_FLAGS
, 0 },
1089 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
1091 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
1093 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
1094 CPU_MOVBE_FLAGS
, 0 },
1095 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
1096 CPU_CX16_FLAGS
, 0 },
1097 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
1099 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
1100 CPU_LZCNT_FLAGS
, 0 },
1101 { STRING_COMMA_LEN (".popcnt"), PROCESSOR_UNKNOWN
,
1102 CPU_POPCNT_FLAGS
, 0 },
1103 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
1105 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
1107 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
1108 CPU_INVPCID_FLAGS
, 0 },
1109 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
1110 CPU_CLFLUSH_FLAGS
, 0 },
1111 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
1113 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
1114 CPU_SYSCALL_FLAGS
, 0 },
1115 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
1116 CPU_RDTSCP_FLAGS
, 0 },
1117 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1118 CPU_3DNOW_FLAGS
, 0 },
1119 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1120 CPU_3DNOWA_FLAGS
, 0 },
1121 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1122 CPU_PADLOCK_FLAGS
, 0 },
1123 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1124 CPU_SVME_FLAGS
, 1 },
1125 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1126 CPU_SVME_FLAGS
, 0 },
1127 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1128 CPU_SSE4A_FLAGS
, 0 },
1129 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1131 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1133 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1135 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1137 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1138 CPU_RDSEED_FLAGS
, 0 },
1139 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1140 CPU_PRFCHW_FLAGS
, 0 },
1141 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1142 CPU_SMAP_FLAGS
, 0 },
1143 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1145 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1147 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1148 CPU_CLFLUSHOPT_FLAGS
, 0 },
1149 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1150 CPU_PREFETCHWT1_FLAGS
, 0 },
1151 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1153 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1154 CPU_CLWB_FLAGS
, 0 },
1155 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1156 CPU_AVX512IFMA_FLAGS
, 0 },
1157 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1158 CPU_AVX512VBMI_FLAGS
, 0 },
1159 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1160 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1161 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1162 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1163 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1164 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1165 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1166 CPU_AVX512_VBMI2_FLAGS
, 0 },
1167 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1168 CPU_AVX512_VNNI_FLAGS
, 0 },
1169 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1170 CPU_AVX512_BITALG_FLAGS
, 0 },
1171 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1172 CPU_CLZERO_FLAGS
, 0 },
1173 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1174 CPU_MWAITX_FLAGS
, 0 },
1175 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1176 CPU_OSPKE_FLAGS
, 0 },
1177 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1178 CPU_RDPID_FLAGS
, 0 },
1179 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1180 CPU_PTWRITE_FLAGS
, 0 },
1181 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1183 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1184 CPU_SHSTK_FLAGS
, 0 },
1185 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1186 CPU_GFNI_FLAGS
, 0 },
1187 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1188 CPU_VAES_FLAGS
, 0 },
1189 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1190 CPU_VPCLMULQDQ_FLAGS
, 0 },
1191 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1192 CPU_WBNOINVD_FLAGS
, 0 },
1193 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1194 CPU_PCONFIG_FLAGS
, 0 },
1195 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1196 CPU_WAITPKG_FLAGS
, 0 },
1197 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1198 CPU_CLDEMOTE_FLAGS
, 0 },
1199 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1200 CPU_MOVDIRI_FLAGS
, 0 },
1201 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1202 CPU_MOVDIR64B_FLAGS
, 0 },
1203 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1204 CPU_AVX512_BF16_FLAGS
, 0 },
1205 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1206 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1207 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1208 CPU_ENQCMD_FLAGS
, 0 },
1209 { STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN
,
1210 CPU_SERIALIZE_FLAGS
, 0 },
1211 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1212 CPU_RDPRU_FLAGS
, 0 },
1213 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1214 CPU_MCOMMIT_FLAGS
, 0 },
1215 { STRING_COMMA_LEN (".sev_es"), PROCESSOR_UNKNOWN
,
1216 CPU_SEV_ES_FLAGS
, 0 },
1217 { STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN
,
1218 CPU_TSXLDTRK_FLAGS
, 0 },
1221 static const noarch_entry cpu_noarch
[] =
1223 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1224 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1225 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1226 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1227 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1228 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1229 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1230 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1231 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1232 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1233 { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS
},
1234 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1235 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1236 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1237 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1238 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1239 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1240 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1241 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1242 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1243 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1244 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1245 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1246 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1247 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1248 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1249 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1250 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1251 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1252 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1253 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1254 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1255 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1256 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1257 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1258 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1259 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1260 { STRING_COMMA_LEN ("noavx512_vp2intersect"), CPU_ANY_SHSTK_FLAGS
},
1261 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1262 { STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS
},
1263 { STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS
},
1267 /* Like s_lcomm_internal in gas/read.c but the alignment string
1268 is allowed to be optional. */
1271 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1278 && *input_line_pointer
== ',')
1280 align
= parse_align (needs_align
- 1);
1282 if (align
== (addressT
) -1)
1297 bss_alloc (symbolP
, size
, align
);
1302 pe_lcomm (int needs_align
)
1304 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1308 const pseudo_typeS md_pseudo_table
[] =
1310 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1311 {"align", s_align_bytes
, 0},
1313 {"align", s_align_ptwo
, 0},
1315 {"arch", set_cpu_arch
, 0},
1319 {"lcomm", pe_lcomm
, 1},
1321 {"ffloat", float_cons
, 'f'},
1322 {"dfloat", float_cons
, 'd'},
1323 {"tfloat", float_cons
, 'x'},
1325 {"slong", signed_cons
, 4},
1326 {"noopt", s_ignore
, 0},
1327 {"optim", s_ignore
, 0},
1328 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1329 {"code16", set_code_flag
, CODE_16BIT
},
1330 {"code32", set_code_flag
, CODE_32BIT
},
1332 {"code64", set_code_flag
, CODE_64BIT
},
1334 {"intel_syntax", set_intel_syntax
, 1},
1335 {"att_syntax", set_intel_syntax
, 0},
1336 {"intel_mnemonic", set_intel_mnemonic
, 1},
1337 {"att_mnemonic", set_intel_mnemonic
, 0},
1338 {"allow_index_reg", set_allow_index_reg
, 1},
1339 {"disallow_index_reg", set_allow_index_reg
, 0},
1340 {"sse_check", set_check
, 0},
1341 {"operand_check", set_check
, 1},
1342 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1343 {"largecomm", handle_large_common
, 0},
1345 {"file", dwarf2_directive_file
, 0},
1346 {"loc", dwarf2_directive_loc
, 0},
1347 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1350 {"secrel32", pe_directive_secrel
, 0},
1355 /* For interface with expression (). */
1356 extern char *input_line_pointer
;
1358 /* Hash table for instruction mnemonic lookup. */
1359 static struct hash_control
*op_hash
;
1361 /* Hash table for register lookup. */
1362 static struct hash_control
*reg_hash
;
1364 /* Various efficient no-op patterns for aligning code labels.
1365 Note: Don't try to assemble the instructions in the comments.
1366 0L and 0w are not legal. */
1367 static const unsigned char f32_1
[] =
1369 static const unsigned char f32_2
[] =
1370 {0x66,0x90}; /* xchg %ax,%ax */
1371 static const unsigned char f32_3
[] =
1372 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1373 static const unsigned char f32_4
[] =
1374 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1375 static const unsigned char f32_6
[] =
1376 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1377 static const unsigned char f32_7
[] =
1378 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1379 static const unsigned char f16_3
[] =
1380 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1381 static const unsigned char f16_4
[] =
1382 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1383 static const unsigned char jump_disp8
[] =
1384 {0xeb}; /* jmp disp8 */
1385 static const unsigned char jump32_disp32
[] =
1386 {0xe9}; /* jmp disp32 */
1387 static const unsigned char jump16_disp32
[] =
1388 {0x66,0xe9}; /* jmp disp32 */
1389 /* 32-bit NOPs patterns. */
1390 static const unsigned char *const f32_patt
[] = {
1391 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1393 /* 16-bit NOPs patterns. */
1394 static const unsigned char *const f16_patt
[] = {
1395 f32_1
, f32_2
, f16_3
, f16_4
1397 /* nopl (%[re]ax) */
1398 static const unsigned char alt_3
[] =
1400 /* nopl 0(%[re]ax) */
1401 static const unsigned char alt_4
[] =
1402 {0x0f,0x1f,0x40,0x00};
1403 /* nopl 0(%[re]ax,%[re]ax,1) */
1404 static const unsigned char alt_5
[] =
1405 {0x0f,0x1f,0x44,0x00,0x00};
1406 /* nopw 0(%[re]ax,%[re]ax,1) */
1407 static const unsigned char alt_6
[] =
1408 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1409 /* nopl 0L(%[re]ax) */
1410 static const unsigned char alt_7
[] =
1411 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1412 /* nopl 0L(%[re]ax,%[re]ax,1) */
1413 static const unsigned char alt_8
[] =
1414 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1415 /* nopw 0L(%[re]ax,%[re]ax,1) */
1416 static const unsigned char alt_9
[] =
1417 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1418 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1419 static const unsigned char alt_10
[] =
1420 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1421 /* data16 nopw %cs:0L(%eax,%eax,1) */
1422 static const unsigned char alt_11
[] =
1423 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1424 /* 32-bit and 64-bit NOPs patterns. */
1425 static const unsigned char *const alt_patt
[] = {
1426 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1427 alt_9
, alt_10
, alt_11
1430 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1431 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1434 i386_output_nops (char *where
, const unsigned char *const *patt
,
1435 int count
, int max_single_nop_size
)
1438 /* Place the longer NOP first. */
1441 const unsigned char *nops
;
1443 if (max_single_nop_size
< 1)
1445 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1446 max_single_nop_size
);
1450 nops
= patt
[max_single_nop_size
- 1];
1452 /* Use the smaller one if the requsted one isn't available. */
1455 max_single_nop_size
--;
1456 nops
= patt
[max_single_nop_size
- 1];
1459 last
= count
% max_single_nop_size
;
1462 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1463 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1467 nops
= patt
[last
- 1];
1470 /* Use the smaller one plus one-byte NOP if the needed one
1473 nops
= patt
[last
- 1];
1474 memcpy (where
+ offset
, nops
, last
);
1475 where
[offset
+ last
] = *patt
[0];
1478 memcpy (where
+ offset
, nops
, last
);
1483 fits_in_imm7 (offsetT num
)
1485 return (num
& 0x7f) == num
;
1489 fits_in_imm31 (offsetT num
)
1491 return (num
& 0x7fffffff) == num
;
1494 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1495 single NOP instruction LIMIT. */
1498 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1500 const unsigned char *const *patt
= NULL
;
1501 int max_single_nop_size
;
1502 /* Maximum number of NOPs before switching to jump over NOPs. */
1503 int max_number_of_nops
;
1505 switch (fragP
->fr_type
)
1510 case rs_machine_dependent
:
1511 /* Allow NOP padding for jumps and calls. */
1512 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
1513 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
1520 /* We need to decide which NOP sequence to use for 32bit and
1521 64bit. When -mtune= is used:
1523 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1524 PROCESSOR_GENERIC32, f32_patt will be used.
1525 2. For the rest, alt_patt will be used.
1527 When -mtune= isn't used, alt_patt will be used if
1528 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1531 When -march= or .arch is used, we can't use anything beyond
1532 cpu_arch_isa_flags. */
1534 if (flag_code
== CODE_16BIT
)
1537 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1538 /* Limit number of NOPs to 2 in 16-bit mode. */
1539 max_number_of_nops
= 2;
1543 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1545 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1546 switch (cpu_arch_tune
)
1548 case PROCESSOR_UNKNOWN
:
1549 /* We use cpu_arch_isa_flags to check if we SHOULD
1550 optimize with nops. */
1551 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1556 case PROCESSOR_PENTIUM4
:
1557 case PROCESSOR_NOCONA
:
1558 case PROCESSOR_CORE
:
1559 case PROCESSOR_CORE2
:
1560 case PROCESSOR_COREI7
:
1561 case PROCESSOR_L1OM
:
1562 case PROCESSOR_K1OM
:
1563 case PROCESSOR_GENERIC64
:
1565 case PROCESSOR_ATHLON
:
1567 case PROCESSOR_AMDFAM10
:
1569 case PROCESSOR_ZNVER
:
1573 case PROCESSOR_I386
:
1574 case PROCESSOR_I486
:
1575 case PROCESSOR_PENTIUM
:
1576 case PROCESSOR_PENTIUMPRO
:
1577 case PROCESSOR_IAMCU
:
1578 case PROCESSOR_GENERIC32
:
1585 switch (fragP
->tc_frag_data
.tune
)
1587 case PROCESSOR_UNKNOWN
:
1588 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1589 PROCESSOR_UNKNOWN. */
1593 case PROCESSOR_I386
:
1594 case PROCESSOR_I486
:
1595 case PROCESSOR_PENTIUM
:
1596 case PROCESSOR_IAMCU
:
1598 case PROCESSOR_ATHLON
:
1600 case PROCESSOR_AMDFAM10
:
1602 case PROCESSOR_ZNVER
:
1604 case PROCESSOR_GENERIC32
:
1605 /* We use cpu_arch_isa_flags to check if we CAN optimize
1607 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1612 case PROCESSOR_PENTIUMPRO
:
1613 case PROCESSOR_PENTIUM4
:
1614 case PROCESSOR_NOCONA
:
1615 case PROCESSOR_CORE
:
1616 case PROCESSOR_CORE2
:
1617 case PROCESSOR_COREI7
:
1618 case PROCESSOR_L1OM
:
1619 case PROCESSOR_K1OM
:
1620 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1625 case PROCESSOR_GENERIC64
:
1631 if (patt
== f32_patt
)
1633 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1634 /* Limit number of NOPs to 2 for older processors. */
1635 max_number_of_nops
= 2;
1639 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1640 /* Limit number of NOPs to 7 for newer processors. */
1641 max_number_of_nops
= 7;
1646 limit
= max_single_nop_size
;
1648 if (fragP
->fr_type
== rs_fill_nop
)
1650 /* Output NOPs for .nop directive. */
1651 if (limit
> max_single_nop_size
)
1653 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1654 _("invalid single nop size: %d "
1655 "(expect within [0, %d])"),
1656 limit
, max_single_nop_size
);
1660 else if (fragP
->fr_type
!= rs_machine_dependent
)
1661 fragP
->fr_var
= count
;
1663 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1665 /* Generate jump over NOPs. */
1666 offsetT disp
= count
- 2;
1667 if (fits_in_imm7 (disp
))
1669 /* Use "jmp disp8" if possible. */
1671 where
[0] = jump_disp8
[0];
1677 unsigned int size_of_jump
;
1679 if (flag_code
== CODE_16BIT
)
1681 where
[0] = jump16_disp32
[0];
1682 where
[1] = jump16_disp32
[1];
1687 where
[0] = jump32_disp32
[0];
1691 count
-= size_of_jump
+ 4;
1692 if (!fits_in_imm31 (count
))
1694 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1695 _("jump over nop padding out of range"));
1699 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1700 where
+= size_of_jump
+ 4;
1704 /* Generate multiple NOPs. */
1705 i386_output_nops (where
, patt
, count
, limit
);
1709 operand_type_all_zero (const union i386_operand_type
*x
)
1711 switch (ARRAY_SIZE(x
->array
))
1722 return !x
->array
[0];
1729 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1731 switch (ARRAY_SIZE(x
->array
))
1747 x
->bitfield
.class = ClassNone
;
1748 x
->bitfield
.instance
= InstanceNone
;
1752 operand_type_equal (const union i386_operand_type
*x
,
1753 const union i386_operand_type
*y
)
1755 switch (ARRAY_SIZE(x
->array
))
1758 if (x
->array
[2] != y
->array
[2])
1762 if (x
->array
[1] != y
->array
[1])
1766 return x
->array
[0] == y
->array
[0];
1774 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1776 switch (ARRAY_SIZE(x
->array
))
1791 return !x
->array
[0];
1798 cpu_flags_equal (const union i386_cpu_flags
*x
,
1799 const union i386_cpu_flags
*y
)
1801 switch (ARRAY_SIZE(x
->array
))
1804 if (x
->array
[3] != y
->array
[3])
1808 if (x
->array
[2] != y
->array
[2])
1812 if (x
->array
[1] != y
->array
[1])
1816 return x
->array
[0] == y
->array
[0];
1824 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1826 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1827 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1830 static INLINE i386_cpu_flags
1831 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1833 switch (ARRAY_SIZE (x
.array
))
1836 x
.array
[3] &= y
.array
[3];
1839 x
.array
[2] &= y
.array
[2];
1842 x
.array
[1] &= y
.array
[1];
1845 x
.array
[0] &= y
.array
[0];
1853 static INLINE i386_cpu_flags
1854 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1856 switch (ARRAY_SIZE (x
.array
))
1859 x
.array
[3] |= y
.array
[3];
1862 x
.array
[2] |= y
.array
[2];
1865 x
.array
[1] |= y
.array
[1];
1868 x
.array
[0] |= y
.array
[0];
1876 static INLINE i386_cpu_flags
1877 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1879 switch (ARRAY_SIZE (x
.array
))
1882 x
.array
[3] &= ~y
.array
[3];
1885 x
.array
[2] &= ~y
.array
[2];
1888 x
.array
[1] &= ~y
.array
[1];
1891 x
.array
[0] &= ~y
.array
[0];
1899 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
1901 #define CPU_FLAGS_ARCH_MATCH 0x1
1902 #define CPU_FLAGS_64BIT_MATCH 0x2
1904 #define CPU_FLAGS_PERFECT_MATCH \
1905 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1907 /* Return CPU flags match bits. */
1910 cpu_flags_match (const insn_template
*t
)
1912 i386_cpu_flags x
= t
->cpu_flags
;
1913 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1915 x
.bitfield
.cpu64
= 0;
1916 x
.bitfield
.cpuno64
= 0;
1918 if (cpu_flags_all_zero (&x
))
1920 /* This instruction is available on all archs. */
1921 match
|= CPU_FLAGS_ARCH_MATCH
;
1925 /* This instruction is available only on some archs. */
1926 i386_cpu_flags cpu
= cpu_arch_flags
;
1928 /* AVX512VL is no standalone feature - match it and then strip it. */
1929 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1931 x
.bitfield
.cpuavx512vl
= 0;
1933 cpu
= cpu_flags_and (x
, cpu
);
1934 if (!cpu_flags_all_zero (&cpu
))
1936 if (x
.bitfield
.cpuavx
)
1938 /* We need to check a few extra flags with AVX. */
1939 if (cpu
.bitfield
.cpuavx
1940 && (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1941 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1942 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1943 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1944 match
|= CPU_FLAGS_ARCH_MATCH
;
1946 else if (x
.bitfield
.cpuavx512f
)
1948 /* We need to check a few extra flags with AVX512F. */
1949 if (cpu
.bitfield
.cpuavx512f
1950 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1951 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1952 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1953 match
|= CPU_FLAGS_ARCH_MATCH
;
1956 match
|= CPU_FLAGS_ARCH_MATCH
;
1962 static INLINE i386_operand_type
1963 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1965 if (x
.bitfield
.class != y
.bitfield
.class)
1966 x
.bitfield
.class = ClassNone
;
1967 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
1968 x
.bitfield
.instance
= InstanceNone
;
1970 switch (ARRAY_SIZE (x
.array
))
1973 x
.array
[2] &= y
.array
[2];
1976 x
.array
[1] &= y
.array
[1];
1979 x
.array
[0] &= y
.array
[0];
1987 static INLINE i386_operand_type
1988 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
1990 gas_assert (y
.bitfield
.class == ClassNone
);
1991 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1993 switch (ARRAY_SIZE (x
.array
))
1996 x
.array
[2] &= ~y
.array
[2];
1999 x
.array
[1] &= ~y
.array
[1];
2002 x
.array
[0] &= ~y
.array
[0];
2010 static INLINE i386_operand_type
2011 operand_type_or (i386_operand_type x
, i386_operand_type y
)
2013 gas_assert (x
.bitfield
.class == ClassNone
||
2014 y
.bitfield
.class == ClassNone
||
2015 x
.bitfield
.class == y
.bitfield
.class);
2016 gas_assert (x
.bitfield
.instance
== InstanceNone
||
2017 y
.bitfield
.instance
== InstanceNone
||
2018 x
.bitfield
.instance
== y
.bitfield
.instance
);
2020 switch (ARRAY_SIZE (x
.array
))
2023 x
.array
[2] |= y
.array
[2];
2026 x
.array
[1] |= y
.array
[1];
2029 x
.array
[0] |= y
.array
[0];
2037 static INLINE i386_operand_type
2038 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
2040 gas_assert (y
.bitfield
.class == ClassNone
);
2041 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2043 switch (ARRAY_SIZE (x
.array
))
2046 x
.array
[2] ^= y
.array
[2];
2049 x
.array
[1] ^= y
.array
[1];
2052 x
.array
[0] ^= y
.array
[0];
2060 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
2061 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
2062 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
2063 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
2064 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
2065 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
2066 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
2067 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
2068 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
2069 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
2070 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
2071 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
2072 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
2073 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
2074 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
2075 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
2076 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
2087 operand_type_check (i386_operand_type t
, enum operand_type c
)
2092 return t
.bitfield
.class == Reg
;
2095 return (t
.bitfield
.imm8
2099 || t
.bitfield
.imm32s
2100 || t
.bitfield
.imm64
);
2103 return (t
.bitfield
.disp8
2104 || t
.bitfield
.disp16
2105 || t
.bitfield
.disp32
2106 || t
.bitfield
.disp32s
2107 || t
.bitfield
.disp64
);
2110 return (t
.bitfield
.disp8
2111 || t
.bitfield
.disp16
2112 || t
.bitfield
.disp32
2113 || t
.bitfield
.disp32s
2114 || t
.bitfield
.disp64
2115 || t
.bitfield
.baseindex
);
2124 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2125 between operand GIVEN and opeand WANTED for instruction template T. */
2128 match_operand_size (const insn_template
*t
, unsigned int wanted
,
2131 return !((i
.types
[given
].bitfield
.byte
2132 && !t
->operand_types
[wanted
].bitfield
.byte
)
2133 || (i
.types
[given
].bitfield
.word
2134 && !t
->operand_types
[wanted
].bitfield
.word
)
2135 || (i
.types
[given
].bitfield
.dword
2136 && !t
->operand_types
[wanted
].bitfield
.dword
)
2137 || (i
.types
[given
].bitfield
.qword
2138 && !t
->operand_types
[wanted
].bitfield
.qword
)
2139 || (i
.types
[given
].bitfield
.tbyte
2140 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2143 /* Return 1 if there is no conflict in SIMD register between operand
2144 GIVEN and opeand WANTED for instruction template T. */
2147 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2150 return !((i
.types
[given
].bitfield
.xmmword
2151 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2152 || (i
.types
[given
].bitfield
.ymmword
2153 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2154 || (i
.types
[given
].bitfield
.zmmword
2155 && !t
->operand_types
[wanted
].bitfield
.zmmword
));
2158 /* Return 1 if there is no conflict in any size between operand GIVEN
2159 and opeand WANTED for instruction template T. */
2162 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2165 return (match_operand_size (t
, wanted
, given
)
2166 && !((i
.types
[given
].bitfield
.unspecified
2168 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2169 || (i
.types
[given
].bitfield
.fword
2170 && !t
->operand_types
[wanted
].bitfield
.fword
)
2171 /* For scalar opcode templates to allow register and memory
2172 operands at the same time, some special casing is needed
2173 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2174 down-conversion vpmov*. */
2175 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2176 && t
->operand_types
[wanted
].bitfield
.byte
2177 + t
->operand_types
[wanted
].bitfield
.word
2178 + t
->operand_types
[wanted
].bitfield
.dword
2179 + t
->operand_types
[wanted
].bitfield
.qword
2180 > !!t
->opcode_modifier
.broadcast
)
2181 ? (i
.types
[given
].bitfield
.xmmword
2182 || i
.types
[given
].bitfield
.ymmword
2183 || i
.types
[given
].bitfield
.zmmword
)
2184 : !match_simd_size(t
, wanted
, given
))));
2187 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2188 operands for instruction template T, and it has MATCH_REVERSE set if there
2189 is no size conflict on any operands for the template with operands reversed
2190 (and the template allows for reversing in the first place). */
2192 #define MATCH_STRAIGHT 1
2193 #define MATCH_REVERSE 2
2195 static INLINE
unsigned int
2196 operand_size_match (const insn_template
*t
)
2198 unsigned int j
, match
= MATCH_STRAIGHT
;
2200 /* Don't check non-absolute jump instructions. */
2201 if (t
->opcode_modifier
.jump
2202 && t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
2205 /* Check memory and accumulator operand size. */
2206 for (j
= 0; j
< i
.operands
; j
++)
2208 if (i
.types
[j
].bitfield
.class != Reg
2209 && i
.types
[j
].bitfield
.class != RegSIMD
2210 && t
->opcode_modifier
.anysize
)
2213 if (t
->operand_types
[j
].bitfield
.class == Reg
2214 && !match_operand_size (t
, j
, j
))
2220 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2221 && !match_simd_size (t
, j
, j
))
2227 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2228 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2234 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2241 if (!t
->opcode_modifier
.d
)
2245 i
.error
= operand_size_mismatch
;
2249 /* Check reverse. */
2250 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2252 for (j
= 0; j
< i
.operands
; j
++)
2254 unsigned int given
= i
.operands
- j
- 1;
2256 if (t
->operand_types
[j
].bitfield
.class == Reg
2257 && !match_operand_size (t
, j
, given
))
2260 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2261 && !match_simd_size (t
, j
, given
))
2264 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2265 && (!match_operand_size (t
, j
, given
)
2266 || !match_simd_size (t
, j
, given
)))
2269 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2273 return match
| MATCH_REVERSE
;
2277 operand_type_match (i386_operand_type overlap
,
2278 i386_operand_type given
)
2280 i386_operand_type temp
= overlap
;
2282 temp
.bitfield
.unspecified
= 0;
2283 temp
.bitfield
.byte
= 0;
2284 temp
.bitfield
.word
= 0;
2285 temp
.bitfield
.dword
= 0;
2286 temp
.bitfield
.fword
= 0;
2287 temp
.bitfield
.qword
= 0;
2288 temp
.bitfield
.tbyte
= 0;
2289 temp
.bitfield
.xmmword
= 0;
2290 temp
.bitfield
.ymmword
= 0;
2291 temp
.bitfield
.zmmword
= 0;
2292 if (operand_type_all_zero (&temp
))
2295 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
)
2299 i
.error
= operand_type_mismatch
;
2303 /* If given types g0 and g1 are registers they must be of the same type
2304 unless the expected operand type register overlap is null.
2305 Some Intel syntax memory operand size checking also happens here. */
2308 operand_type_register_match (i386_operand_type g0
,
2309 i386_operand_type t0
,
2310 i386_operand_type g1
,
2311 i386_operand_type t1
)
2313 if (g0
.bitfield
.class != Reg
2314 && g0
.bitfield
.class != RegSIMD
2315 && (!operand_type_check (g0
, anymem
)
2316 || g0
.bitfield
.unspecified
2317 || (t0
.bitfield
.class != Reg
2318 && t0
.bitfield
.class != RegSIMD
)))
2321 if (g1
.bitfield
.class != Reg
2322 && g1
.bitfield
.class != RegSIMD
2323 && (!operand_type_check (g1
, anymem
)
2324 || g1
.bitfield
.unspecified
2325 || (t1
.bitfield
.class != Reg
2326 && t1
.bitfield
.class != RegSIMD
)))
2329 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2330 && g0
.bitfield
.word
== g1
.bitfield
.word
2331 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2332 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2333 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2334 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2335 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2338 if (!(t0
.bitfield
.byte
& t1
.bitfield
.byte
)
2339 && !(t0
.bitfield
.word
& t1
.bitfield
.word
)
2340 && !(t0
.bitfield
.dword
& t1
.bitfield
.dword
)
2341 && !(t0
.bitfield
.qword
& t1
.bitfield
.qword
)
2342 && !(t0
.bitfield
.xmmword
& t1
.bitfield
.xmmword
)
2343 && !(t0
.bitfield
.ymmword
& t1
.bitfield
.ymmword
)
2344 && !(t0
.bitfield
.zmmword
& t1
.bitfield
.zmmword
))
2347 i
.error
= register_type_mismatch
;
2352 static INLINE
unsigned int
2353 register_number (const reg_entry
*r
)
2355 unsigned int nr
= r
->reg_num
;
2357 if (r
->reg_flags
& RegRex
)
2360 if (r
->reg_flags
& RegVRex
)
2366 static INLINE
unsigned int
2367 mode_from_disp_size (i386_operand_type t
)
2369 if (t
.bitfield
.disp8
)
2371 else if (t
.bitfield
.disp16
2372 || t
.bitfield
.disp32
2373 || t
.bitfield
.disp32s
)
2380 fits_in_signed_byte (addressT num
)
2382 return num
+ 0x80 <= 0xff;
2386 fits_in_unsigned_byte (addressT num
)
2392 fits_in_unsigned_word (addressT num
)
2394 return num
<= 0xffff;
2398 fits_in_signed_word (addressT num
)
2400 return num
+ 0x8000 <= 0xffff;
2404 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2409 return num
+ 0x80000000 <= 0xffffffff;
2411 } /* fits_in_signed_long() */
2414 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2419 return num
<= 0xffffffff;
2421 } /* fits_in_unsigned_long() */
2424 fits_in_disp8 (offsetT num
)
2426 int shift
= i
.memshift
;
2432 mask
= (1 << shift
) - 1;
2434 /* Return 0 if NUM isn't properly aligned. */
2438 /* Check if NUM will fit in 8bit after shift. */
2439 return fits_in_signed_byte (num
>> shift
);
2443 fits_in_imm4 (offsetT num
)
2445 return (num
& 0xf) == num
;
2448 static i386_operand_type
2449 smallest_imm_type (offsetT num
)
2451 i386_operand_type t
;
2453 operand_type_set (&t
, 0);
2454 t
.bitfield
.imm64
= 1;
2456 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2458 /* This code is disabled on the 486 because all the Imm1 forms
2459 in the opcode table are slower on the i486. They're the
2460 versions with the implicitly specified single-position
2461 displacement, which has another syntax if you really want to
2463 t
.bitfield
.imm1
= 1;
2464 t
.bitfield
.imm8
= 1;
2465 t
.bitfield
.imm8s
= 1;
2466 t
.bitfield
.imm16
= 1;
2467 t
.bitfield
.imm32
= 1;
2468 t
.bitfield
.imm32s
= 1;
2470 else if (fits_in_signed_byte (num
))
2472 t
.bitfield
.imm8
= 1;
2473 t
.bitfield
.imm8s
= 1;
2474 t
.bitfield
.imm16
= 1;
2475 t
.bitfield
.imm32
= 1;
2476 t
.bitfield
.imm32s
= 1;
2478 else if (fits_in_unsigned_byte (num
))
2480 t
.bitfield
.imm8
= 1;
2481 t
.bitfield
.imm16
= 1;
2482 t
.bitfield
.imm32
= 1;
2483 t
.bitfield
.imm32s
= 1;
2485 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2487 t
.bitfield
.imm16
= 1;
2488 t
.bitfield
.imm32
= 1;
2489 t
.bitfield
.imm32s
= 1;
2491 else if (fits_in_signed_long (num
))
2493 t
.bitfield
.imm32
= 1;
2494 t
.bitfield
.imm32s
= 1;
2496 else if (fits_in_unsigned_long (num
))
2497 t
.bitfield
.imm32
= 1;
2503 offset_in_range (offsetT val
, int size
)
2509 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2510 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2511 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2513 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2519 /* If BFD64, sign extend val for 32bit address mode. */
2520 if (flag_code
!= CODE_64BIT
2521 || i
.prefix
[ADDR_PREFIX
])
2522 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2523 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2526 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2528 char buf1
[40], buf2
[40];
2530 sprint_value (buf1
, val
);
2531 sprint_value (buf2
, val
& mask
);
2532 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2547 a. PREFIX_EXIST if attempting to add a prefix where one from the
2548 same class already exists.
2549 b. PREFIX_LOCK if lock prefix is added.
2550 c. PREFIX_REP if rep/repne prefix is added.
2551 d. PREFIX_DS if ds prefix is added.
2552 e. PREFIX_OTHER if other prefix is added.
2555 static enum PREFIX_GROUP
2556 add_prefix (unsigned int prefix
)
2558 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2561 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2562 && flag_code
== CODE_64BIT
)
2564 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2565 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2566 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2567 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2578 case DS_PREFIX_OPCODE
:
2581 case CS_PREFIX_OPCODE
:
2582 case ES_PREFIX_OPCODE
:
2583 case FS_PREFIX_OPCODE
:
2584 case GS_PREFIX_OPCODE
:
2585 case SS_PREFIX_OPCODE
:
2589 case REPNE_PREFIX_OPCODE
:
2590 case REPE_PREFIX_OPCODE
:
2595 case LOCK_PREFIX_OPCODE
:
2604 case ADDR_PREFIX_OPCODE
:
2608 case DATA_PREFIX_OPCODE
:
2612 if (i
.prefix
[q
] != 0)
2620 i
.prefix
[q
] |= prefix
;
2623 as_bad (_("same type of prefix used twice"));
2629 update_code_flag (int value
, int check
)
2631 PRINTF_LIKE ((*as_error
));
2633 flag_code
= (enum flag_code
) value
;
2634 if (flag_code
== CODE_64BIT
)
2636 cpu_arch_flags
.bitfield
.cpu64
= 1;
2637 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2641 cpu_arch_flags
.bitfield
.cpu64
= 0;
2642 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2644 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2647 as_error
= as_fatal
;
2650 (*as_error
) (_("64bit mode not supported on `%s'."),
2651 cpu_arch_name
? cpu_arch_name
: default_arch
);
2653 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2656 as_error
= as_fatal
;
2659 (*as_error
) (_("32bit mode not supported on `%s'."),
2660 cpu_arch_name
? cpu_arch_name
: default_arch
);
2662 stackop_size
= '\0';
2666 set_code_flag (int value
)
2668 update_code_flag (value
, 0);
2672 set_16bit_gcc_code_flag (int new_code_flag
)
2674 flag_code
= (enum flag_code
) new_code_flag
;
2675 if (flag_code
!= CODE_16BIT
)
2677 cpu_arch_flags
.bitfield
.cpu64
= 0;
2678 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2679 stackop_size
= LONG_MNEM_SUFFIX
;
2683 set_intel_syntax (int syntax_flag
)
2685 /* Find out if register prefixing is specified. */
2686 int ask_naked_reg
= 0;
2689 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2692 int e
= get_symbol_name (&string
);
2694 if (strcmp (string
, "prefix") == 0)
2696 else if (strcmp (string
, "noprefix") == 0)
2699 as_bad (_("bad argument to syntax directive."));
2700 (void) restore_line_pointer (e
);
2702 demand_empty_rest_of_line ();
2704 intel_syntax
= syntax_flag
;
2706 if (ask_naked_reg
== 0)
2707 allow_naked_reg
= (intel_syntax
2708 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2710 allow_naked_reg
= (ask_naked_reg
< 0);
2712 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2714 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2715 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2716 register_prefix
= allow_naked_reg
? "" : "%";
2720 set_intel_mnemonic (int mnemonic_flag
)
2722 intel_mnemonic
= mnemonic_flag
;
2726 set_allow_index_reg (int flag
)
2728 allow_index_reg
= flag
;
2732 set_check (int what
)
2734 enum check_kind
*kind
;
2739 kind
= &operand_check
;
2750 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2753 int e
= get_symbol_name (&string
);
2755 if (strcmp (string
, "none") == 0)
2757 else if (strcmp (string
, "warning") == 0)
2758 *kind
= check_warning
;
2759 else if (strcmp (string
, "error") == 0)
2760 *kind
= check_error
;
2762 as_bad (_("bad argument to %s_check directive."), str
);
2763 (void) restore_line_pointer (e
);
2766 as_bad (_("missing argument for %s_check directive"), str
);
2768 demand_empty_rest_of_line ();
2772 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2773 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2775 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2776 static const char *arch
;
2778 /* Intel LIOM is only supported on ELF. */
2784 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2785 use default_arch. */
2786 arch
= cpu_arch_name
;
2788 arch
= default_arch
;
2791 /* If we are targeting Intel MCU, we must enable it. */
2792 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2793 || new_flag
.bitfield
.cpuiamcu
)
2796 /* If we are targeting Intel L1OM, we must enable it. */
2797 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2798 || new_flag
.bitfield
.cpul1om
)
2801 /* If we are targeting Intel K1OM, we must enable it. */
2802 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2803 || new_flag
.bitfield
.cpuk1om
)
2806 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2811 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2815 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2818 int e
= get_symbol_name (&string
);
2820 i386_cpu_flags flags
;
2822 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2824 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2826 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2830 cpu_arch_name
= cpu_arch
[j
].name
;
2831 cpu_sub_arch_name
= NULL
;
2832 cpu_arch_flags
= cpu_arch
[j
].flags
;
2833 if (flag_code
== CODE_64BIT
)
2835 cpu_arch_flags
.bitfield
.cpu64
= 1;
2836 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2840 cpu_arch_flags
.bitfield
.cpu64
= 0;
2841 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2843 cpu_arch_isa
= cpu_arch
[j
].type
;
2844 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2845 if (!cpu_arch_tune_set
)
2847 cpu_arch_tune
= cpu_arch_isa
;
2848 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2853 flags
= cpu_flags_or (cpu_arch_flags
,
2856 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2858 if (cpu_sub_arch_name
)
2860 char *name
= cpu_sub_arch_name
;
2861 cpu_sub_arch_name
= concat (name
,
2863 (const char *) NULL
);
2867 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2868 cpu_arch_flags
= flags
;
2869 cpu_arch_isa_flags
= flags
;
2873 = cpu_flags_or (cpu_arch_isa_flags
,
2875 (void) restore_line_pointer (e
);
2876 demand_empty_rest_of_line ();
2881 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2883 /* Disable an ISA extension. */
2884 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2885 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2887 flags
= cpu_flags_and_not (cpu_arch_flags
,
2888 cpu_noarch
[j
].flags
);
2889 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2891 if (cpu_sub_arch_name
)
2893 char *name
= cpu_sub_arch_name
;
2894 cpu_sub_arch_name
= concat (name
, string
,
2895 (const char *) NULL
);
2899 cpu_sub_arch_name
= xstrdup (string
);
2900 cpu_arch_flags
= flags
;
2901 cpu_arch_isa_flags
= flags
;
2903 (void) restore_line_pointer (e
);
2904 demand_empty_rest_of_line ();
2908 j
= ARRAY_SIZE (cpu_arch
);
2911 if (j
>= ARRAY_SIZE (cpu_arch
))
2912 as_bad (_("no such architecture: `%s'"), string
);
2914 *input_line_pointer
= e
;
2917 as_bad (_("missing cpu architecture"));
2919 no_cond_jump_promotion
= 0;
2920 if (*input_line_pointer
== ','
2921 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2926 ++input_line_pointer
;
2927 e
= get_symbol_name (&string
);
2929 if (strcmp (string
, "nojumps") == 0)
2930 no_cond_jump_promotion
= 1;
2931 else if (strcmp (string
, "jumps") == 0)
2934 as_bad (_("no such architecture modifier: `%s'"), string
);
2936 (void) restore_line_pointer (e
);
2939 demand_empty_rest_of_line ();
2942 enum bfd_architecture
2945 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2947 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2948 || flag_code
!= CODE_64BIT
)
2949 as_fatal (_("Intel L1OM is 64bit ELF only"));
2950 return bfd_arch_l1om
;
2952 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2954 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2955 || flag_code
!= CODE_64BIT
)
2956 as_fatal (_("Intel K1OM is 64bit ELF only"));
2957 return bfd_arch_k1om
;
2959 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2961 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2962 || flag_code
== CODE_64BIT
)
2963 as_fatal (_("Intel MCU is 32bit ELF only"));
2964 return bfd_arch_iamcu
;
2967 return bfd_arch_i386
;
2973 if (!strncmp (default_arch
, "x86_64", 6))
2975 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2977 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2978 || default_arch
[6] != '\0')
2979 as_fatal (_("Intel L1OM is 64bit ELF only"));
2980 return bfd_mach_l1om
;
2982 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2984 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2985 || default_arch
[6] != '\0')
2986 as_fatal (_("Intel K1OM is 64bit ELF only"));
2987 return bfd_mach_k1om
;
2989 else if (default_arch
[6] == '\0')
2990 return bfd_mach_x86_64
;
2992 return bfd_mach_x64_32
;
2994 else if (!strcmp (default_arch
, "i386")
2995 || !strcmp (default_arch
, "iamcu"))
2997 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2999 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
3000 as_fatal (_("Intel MCU is 32bit ELF only"));
3001 return bfd_mach_i386_iamcu
;
3004 return bfd_mach_i386_i386
;
3007 as_fatal (_("unknown architecture"));
3013 const char *hash_err
;
3015 /* Support pseudo prefixes like {disp32}. */
3016 lex_type
['{'] = LEX_BEGIN_NAME
;
3018 /* Initialize op_hash hash table. */
3019 op_hash
= hash_new ();
3022 const insn_template
*optab
;
3023 templates
*core_optab
;
3025 /* Setup for loop. */
3027 core_optab
= XNEW (templates
);
3028 core_optab
->start
= optab
;
3033 if (optab
->name
== NULL
3034 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
3036 /* different name --> ship out current template list;
3037 add to hash table; & begin anew. */
3038 core_optab
->end
= optab
;
3039 hash_err
= hash_insert (op_hash
,
3041 (void *) core_optab
);
3044 as_fatal (_("can't hash %s: %s"),
3048 if (optab
->name
== NULL
)
3050 core_optab
= XNEW (templates
);
3051 core_optab
->start
= optab
;
3056 /* Initialize reg_hash hash table. */
3057 reg_hash
= hash_new ();
3059 const reg_entry
*regtab
;
3060 unsigned int regtab_size
= i386_regtab_size
;
3062 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
3064 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
3066 as_fatal (_("can't hash %s: %s"),
3072 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3077 for (c
= 0; c
< 256; c
++)
3082 mnemonic_chars
[c
] = c
;
3083 register_chars
[c
] = c
;
3084 operand_chars
[c
] = c
;
3086 else if (ISLOWER (c
))
3088 mnemonic_chars
[c
] = c
;
3089 register_chars
[c
] = c
;
3090 operand_chars
[c
] = c
;
3092 else if (ISUPPER (c
))
3094 mnemonic_chars
[c
] = TOLOWER (c
);
3095 register_chars
[c
] = mnemonic_chars
[c
];
3096 operand_chars
[c
] = c
;
3098 else if (c
== '{' || c
== '}')
3100 mnemonic_chars
[c
] = c
;
3101 operand_chars
[c
] = c
;
3104 if (ISALPHA (c
) || ISDIGIT (c
))
3105 identifier_chars
[c
] = c
;
3108 identifier_chars
[c
] = c
;
3109 operand_chars
[c
] = c
;
3114 identifier_chars
['@'] = '@';
3117 identifier_chars
['?'] = '?';
3118 operand_chars
['?'] = '?';
3120 digit_chars
['-'] = '-';
3121 mnemonic_chars
['_'] = '_';
3122 mnemonic_chars
['-'] = '-';
3123 mnemonic_chars
['.'] = '.';
3124 identifier_chars
['_'] = '_';
3125 identifier_chars
['.'] = '.';
3127 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
3128 operand_chars
[(unsigned char) *p
] = *p
;
3131 if (flag_code
== CODE_64BIT
)
3133 #if defined (OBJ_COFF) && defined (TE_PE)
3134 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3137 x86_dwarf2_return_column
= 16;
3139 x86_cie_data_alignment
= -8;
3143 x86_dwarf2_return_column
= 8;
3144 x86_cie_data_alignment
= -4;
3147 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3148 can be turned into BRANCH_PREFIX frag. */
3149 if (align_branch_prefix_size
> MAX_FUSED_JCC_PADDING_SIZE
)
3154 i386_print_statistics (FILE *file
)
3156 hash_print_statistics (file
, "i386 opcode", op_hash
);
3157 hash_print_statistics (file
, "i386 register", reg_hash
);
3162 /* Debugging routines for md_assemble. */
3163 static void pte (insn_template
*);
3164 static void pt (i386_operand_type
);
3165 static void pe (expressionS
*);
3166 static void ps (symbolS
*);
3169 pi (const char *line
, i386_insn
*x
)
3173 fprintf (stdout
, "%s: template ", line
);
3175 fprintf (stdout
, " address: base %s index %s scale %x\n",
3176 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3177 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3178 x
->log2_scale_factor
);
3179 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3180 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3181 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3182 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3183 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3184 (x
->rex
& REX_W
) != 0,
3185 (x
->rex
& REX_R
) != 0,
3186 (x
->rex
& REX_X
) != 0,
3187 (x
->rex
& REX_B
) != 0);
3188 for (j
= 0; j
< x
->operands
; j
++)
3190 fprintf (stdout
, " #%d: ", j
+ 1);
3192 fprintf (stdout
, "\n");
3193 if (x
->types
[j
].bitfield
.class == Reg
3194 || x
->types
[j
].bitfield
.class == RegMMX
3195 || x
->types
[j
].bitfield
.class == RegSIMD
3196 || x
->types
[j
].bitfield
.class == SReg
3197 || x
->types
[j
].bitfield
.class == RegCR
3198 || x
->types
[j
].bitfield
.class == RegDR
3199 || x
->types
[j
].bitfield
.class == RegTR
)
3200 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3201 if (operand_type_check (x
->types
[j
], imm
))
3203 if (operand_type_check (x
->types
[j
], disp
))
3204 pe (x
->op
[j
].disps
);
3209 pte (insn_template
*t
)
3212 fprintf (stdout
, " %d operands ", t
->operands
);
3213 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3214 if (t
->extension_opcode
!= None
)
3215 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3216 if (t
->opcode_modifier
.d
)
3217 fprintf (stdout
, "D");
3218 if (t
->opcode_modifier
.w
)
3219 fprintf (stdout
, "W");
3220 fprintf (stdout
, "\n");
3221 for (j
= 0; j
< t
->operands
; j
++)
3223 fprintf (stdout
, " #%d type ", j
+ 1);
3224 pt (t
->operand_types
[j
]);
3225 fprintf (stdout
, "\n");
3232 fprintf (stdout
, " operation %d\n", e
->X_op
);
3233 fprintf (stdout
, " add_number %ld (%lx)\n",
3234 (long) e
->X_add_number
, (long) e
->X_add_number
);
3235 if (e
->X_add_symbol
)
3237 fprintf (stdout
, " add_symbol ");
3238 ps (e
->X_add_symbol
);
3239 fprintf (stdout
, "\n");
3243 fprintf (stdout
, " op_symbol ");
3244 ps (e
->X_op_symbol
);
3245 fprintf (stdout
, "\n");
3252 fprintf (stdout
, "%s type %s%s",
3254 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3255 segment_name (S_GET_SEGMENT (s
)));
3258 static struct type_name
3260 i386_operand_type mask
;
3263 const type_names
[] =
3265 { OPERAND_TYPE_REG8
, "r8" },
3266 { OPERAND_TYPE_REG16
, "r16" },
3267 { OPERAND_TYPE_REG32
, "r32" },
3268 { OPERAND_TYPE_REG64
, "r64" },
3269 { OPERAND_TYPE_ACC8
, "acc8" },
3270 { OPERAND_TYPE_ACC16
, "acc16" },
3271 { OPERAND_TYPE_ACC32
, "acc32" },
3272 { OPERAND_TYPE_ACC64
, "acc64" },
3273 { OPERAND_TYPE_IMM8
, "i8" },
3274 { OPERAND_TYPE_IMM8
, "i8s" },
3275 { OPERAND_TYPE_IMM16
, "i16" },
3276 { OPERAND_TYPE_IMM32
, "i32" },
3277 { OPERAND_TYPE_IMM32S
, "i32s" },
3278 { OPERAND_TYPE_IMM64
, "i64" },
3279 { OPERAND_TYPE_IMM1
, "i1" },
3280 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3281 { OPERAND_TYPE_DISP8
, "d8" },
3282 { OPERAND_TYPE_DISP16
, "d16" },
3283 { OPERAND_TYPE_DISP32
, "d32" },
3284 { OPERAND_TYPE_DISP32S
, "d32s" },
3285 { OPERAND_TYPE_DISP64
, "d64" },
3286 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3287 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3288 { OPERAND_TYPE_CONTROL
, "control reg" },
3289 { OPERAND_TYPE_TEST
, "test reg" },
3290 { OPERAND_TYPE_DEBUG
, "debug reg" },
3291 { OPERAND_TYPE_FLOATREG
, "FReg" },
3292 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3293 { OPERAND_TYPE_SREG
, "SReg" },
3294 { OPERAND_TYPE_REGMMX
, "rMMX" },
3295 { OPERAND_TYPE_REGXMM
, "rXMM" },
3296 { OPERAND_TYPE_REGYMM
, "rYMM" },
3297 { OPERAND_TYPE_REGZMM
, "rZMM" },
3298 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3302 pt (i386_operand_type t
)
3305 i386_operand_type a
;
3307 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3309 a
= operand_type_and (t
, type_names
[j
].mask
);
3310 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3311 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3316 #endif /* DEBUG386 */
3318 static bfd_reloc_code_real_type
3319 reloc (unsigned int size
,
3322 bfd_reloc_code_real_type other
)
3324 if (other
!= NO_RELOC
)
3326 reloc_howto_type
*rel
;
3331 case BFD_RELOC_X86_64_GOT32
:
3332 return BFD_RELOC_X86_64_GOT64
;
3334 case BFD_RELOC_X86_64_GOTPLT64
:
3335 return BFD_RELOC_X86_64_GOTPLT64
;
3337 case BFD_RELOC_X86_64_PLTOFF64
:
3338 return BFD_RELOC_X86_64_PLTOFF64
;
3340 case BFD_RELOC_X86_64_GOTPC32
:
3341 other
= BFD_RELOC_X86_64_GOTPC64
;
3343 case BFD_RELOC_X86_64_GOTPCREL
:
3344 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3346 case BFD_RELOC_X86_64_TPOFF32
:
3347 other
= BFD_RELOC_X86_64_TPOFF64
;
3349 case BFD_RELOC_X86_64_DTPOFF32
:
3350 other
= BFD_RELOC_X86_64_DTPOFF64
;
3356 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3357 if (other
== BFD_RELOC_SIZE32
)
3360 other
= BFD_RELOC_SIZE64
;
3363 as_bad (_("there are no pc-relative size relocations"));
3369 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3370 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3373 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3375 as_bad (_("unknown relocation (%u)"), other
);
3376 else if (size
!= bfd_get_reloc_size (rel
))
3377 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3378 bfd_get_reloc_size (rel
),
3380 else if (pcrel
&& !rel
->pc_relative
)
3381 as_bad (_("non-pc-relative relocation for pc-relative field"));
3382 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3384 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3386 as_bad (_("relocated field and relocation type differ in signedness"));
3395 as_bad (_("there are no unsigned pc-relative relocations"));
3398 case 1: return BFD_RELOC_8_PCREL
;
3399 case 2: return BFD_RELOC_16_PCREL
;
3400 case 4: return BFD_RELOC_32_PCREL
;
3401 case 8: return BFD_RELOC_64_PCREL
;
3403 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3410 case 4: return BFD_RELOC_X86_64_32S
;
3415 case 1: return BFD_RELOC_8
;
3416 case 2: return BFD_RELOC_16
;
3417 case 4: return BFD_RELOC_32
;
3418 case 8: return BFD_RELOC_64
;
3420 as_bad (_("cannot do %s %u byte relocation"),
3421 sign
> 0 ? "signed" : "unsigned", size
);
3427 /* Here we decide which fixups can be adjusted to make them relative to
3428 the beginning of the section instead of the symbol. Basically we need
3429 to make sure that the dynamic relocations are done correctly, so in
3430 some cases we force the original symbol to be used. */
3433 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3435 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3439 /* Don't adjust pc-relative references to merge sections in 64-bit
3441 if (use_rela_relocations
3442 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3446 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3447 and changed later by validate_fix. */
3448 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3449 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3452 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3453 for size relocations. */
3454 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3455 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3456 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3457 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3458 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3459 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3460 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3461 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3462 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3463 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3464 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3465 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3466 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3467 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3468 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3469 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3470 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3471 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3472 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3473 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3474 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3475 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3476 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3477 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3478 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3479 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3480 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3481 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3482 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3483 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3484 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3491 intel_float_operand (const char *mnemonic
)
3493 /* Note that the value returned is meaningful only for opcodes with (memory)
3494 operands, hence the code here is free to improperly handle opcodes that
3495 have no operands (for better performance and smaller code). */
3497 if (mnemonic
[0] != 'f')
3498 return 0; /* non-math */
3500 switch (mnemonic
[1])
3502 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3503 the fs segment override prefix not currently handled because no
3504 call path can make opcodes without operands get here */
3506 return 2 /* integer op */;
3508 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3509 return 3; /* fldcw/fldenv */
3512 if (mnemonic
[2] != 'o' /* fnop */)
3513 return 3; /* non-waiting control op */
3516 if (mnemonic
[2] == 's')
3517 return 3; /* frstor/frstpm */
3520 if (mnemonic
[2] == 'a')
3521 return 3; /* fsave */
3522 if (mnemonic
[2] == 't')
3524 switch (mnemonic
[3])
3526 case 'c': /* fstcw */
3527 case 'd': /* fstdw */
3528 case 'e': /* fstenv */
3529 case 's': /* fsts[gw] */
3535 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3536 return 0; /* fxsave/fxrstor are not really math ops */
3543 /* Build the VEX prefix. */
3546 build_vex_prefix (const insn_template
*t
)
3548 unsigned int register_specifier
;
3549 unsigned int implied_prefix
;
3550 unsigned int vector_length
;
3553 /* Check register specifier. */
3554 if (i
.vex
.register_specifier
)
3556 register_specifier
=
3557 ~register_number (i
.vex
.register_specifier
) & 0xf;
3558 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3561 register_specifier
= 0xf;
3563 /* Use 2-byte VEX prefix by swapping destination and source operand
3564 if there are more than 1 register operand. */
3565 if (i
.reg_operands
> 1
3566 && i
.vec_encoding
!= vex_encoding_vex3
3567 && i
.dir_encoding
== dir_encoding_default
3568 && i
.operands
== i
.reg_operands
3569 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3570 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3571 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3574 unsigned int xchg
= i
.operands
- 1;
3575 union i386_op temp_op
;
3576 i386_operand_type temp_type
;
3578 temp_type
= i
.types
[xchg
];
3579 i
.types
[xchg
] = i
.types
[0];
3580 i
.types
[0] = temp_type
;
3581 temp_op
= i
.op
[xchg
];
3582 i
.op
[xchg
] = i
.op
[0];
3585 gas_assert (i
.rm
.mode
== 3);
3589 i
.rm
.regmem
= i
.rm
.reg
;
3592 if (i
.tm
.opcode_modifier
.d
)
3593 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3594 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3595 else /* Use the next insn. */
3599 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3600 are no memory operands and at least 3 register ones. */
3601 if (i
.reg_operands
>= 3
3602 && i
.vec_encoding
!= vex_encoding_vex3
3603 && i
.reg_operands
== i
.operands
- i
.imm_operands
3604 && i
.tm
.opcode_modifier
.vex
3605 && i
.tm
.opcode_modifier
.commutative
3606 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3608 && i
.vex
.register_specifier
3609 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3611 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3612 union i386_op temp_op
;
3613 i386_operand_type temp_type
;
3615 gas_assert (i
.tm
.opcode_modifier
.vexopcode
== VEX0F
);
3616 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3617 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3618 &i
.types
[i
.operands
- 3]));
3619 gas_assert (i
.rm
.mode
== 3);
3621 temp_type
= i
.types
[xchg
];
3622 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3623 i
.types
[xchg
+ 1] = temp_type
;
3624 temp_op
= i
.op
[xchg
];
3625 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3626 i
.op
[xchg
+ 1] = temp_op
;
3629 xchg
= i
.rm
.regmem
| 8;
3630 i
.rm
.regmem
= ~register_specifier
& 0xf;
3631 gas_assert (!(i
.rm
.regmem
& 8));
3632 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3633 register_specifier
= ~xchg
& 0xf;
3636 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3637 vector_length
= avxscalar
;
3638 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3644 /* Determine vector length from the last multi-length vector
3647 for (op
= t
->operands
; op
--;)
3648 if (t
->operand_types
[op
].bitfield
.xmmword
3649 && t
->operand_types
[op
].bitfield
.ymmword
3650 && i
.types
[op
].bitfield
.ymmword
)
3657 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3662 case DATA_PREFIX_OPCODE
:
3665 case REPE_PREFIX_OPCODE
:
3668 case REPNE_PREFIX_OPCODE
:
3675 /* Check the REX.W bit and VEXW. */
3676 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3677 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3678 else if (i
.tm
.opcode_modifier
.vexw
)
3679 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3681 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3683 /* Use 2-byte VEX prefix if possible. */
3685 && i
.vec_encoding
!= vex_encoding_vex3
3686 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3687 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3689 /* 2-byte VEX prefix. */
3693 i
.vex
.bytes
[0] = 0xc5;
3695 /* Check the REX.R bit. */
3696 r
= (i
.rex
& REX_R
) ? 0 : 1;
3697 i
.vex
.bytes
[1] = (r
<< 7
3698 | register_specifier
<< 3
3699 | vector_length
<< 2
3704 /* 3-byte VEX prefix. */
3709 switch (i
.tm
.opcode_modifier
.vexopcode
)
3713 i
.vex
.bytes
[0] = 0xc4;
3717 i
.vex
.bytes
[0] = 0xc4;
3721 i
.vex
.bytes
[0] = 0xc4;
3725 i
.vex
.bytes
[0] = 0x8f;
3729 i
.vex
.bytes
[0] = 0x8f;
3733 i
.vex
.bytes
[0] = 0x8f;
3739 /* The high 3 bits of the second VEX byte are 1's compliment
3740 of RXB bits from REX. */
3741 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3743 i
.vex
.bytes
[2] = (w
<< 7
3744 | register_specifier
<< 3
3745 | vector_length
<< 2
3750 static INLINE bfd_boolean
3751 is_evex_encoding (const insn_template
*t
)
3753 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3754 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3755 || t
->opcode_modifier
.sae
;
3758 static INLINE bfd_boolean
3759 is_any_vex_encoding (const insn_template
*t
)
3761 return t
->opcode_modifier
.vex
|| t
->opcode_modifier
.vexopcode
3762 || is_evex_encoding (t
);
3765 /* Build the EVEX prefix. */
3768 build_evex_prefix (void)
3770 unsigned int register_specifier
;
3771 unsigned int implied_prefix
;
3773 rex_byte vrex_used
= 0;
3775 /* Check register specifier. */
3776 if (i
.vex
.register_specifier
)
3778 gas_assert ((i
.vrex
& REX_X
) == 0);
3780 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3781 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3782 register_specifier
+= 8;
3783 /* The upper 16 registers are encoded in the fourth byte of the
3785 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3786 i
.vex
.bytes
[3] = 0x8;
3787 register_specifier
= ~register_specifier
& 0xf;
3791 register_specifier
= 0xf;
3793 /* Encode upper 16 vector index register in the fourth byte of
3795 if (!(i
.vrex
& REX_X
))
3796 i
.vex
.bytes
[3] = 0x8;
3801 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3806 case DATA_PREFIX_OPCODE
:
3809 case REPE_PREFIX_OPCODE
:
3812 case REPNE_PREFIX_OPCODE
:
3819 /* 4 byte EVEX prefix. */
3821 i
.vex
.bytes
[0] = 0x62;
3824 switch (i
.tm
.opcode_modifier
.vexopcode
)
3840 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3842 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3844 /* The fifth bit of the second EVEX byte is 1's compliment of the
3845 REX_R bit in VREX. */
3846 if (!(i
.vrex
& REX_R
))
3847 i
.vex
.bytes
[1] |= 0x10;
3851 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3853 /* When all operands are registers, the REX_X bit in REX is not
3854 used. We reuse it to encode the upper 16 registers, which is
3855 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3856 as 1's compliment. */
3857 if ((i
.vrex
& REX_B
))
3860 i
.vex
.bytes
[1] &= ~0x40;
3864 /* EVEX instructions shouldn't need the REX prefix. */
3865 i
.vrex
&= ~vrex_used
;
3866 gas_assert (i
.vrex
== 0);
3868 /* Check the REX.W bit and VEXW. */
3869 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3870 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3871 else if (i
.tm
.opcode_modifier
.vexw
)
3872 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3874 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3876 /* Encode the U bit. */
3877 implied_prefix
|= 0x4;
3879 /* The third byte of the EVEX prefix. */
3880 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3882 /* The fourth byte of the EVEX prefix. */
3883 /* The zeroing-masking bit. */
3884 if (i
.mask
&& i
.mask
->zeroing
)
3885 i
.vex
.bytes
[3] |= 0x80;
3887 /* Don't always set the broadcast bit if there is no RC. */
3890 /* Encode the vector length. */
3891 unsigned int vec_length
;
3893 if (!i
.tm
.opcode_modifier
.evex
3894 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3898 /* Determine vector length from the last multi-length vector
3901 for (op
= i
.operands
; op
--;)
3902 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3903 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3904 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3906 if (i
.types
[op
].bitfield
.zmmword
)
3908 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3911 else if (i
.types
[op
].bitfield
.ymmword
)
3913 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3916 else if (i
.types
[op
].bitfield
.xmmword
)
3918 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3921 else if (i
.broadcast
&& (int) op
== i
.broadcast
->operand
)
3923 switch (i
.broadcast
->bytes
)
3926 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3929 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3932 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3941 if (op
>= MAX_OPERANDS
)
3945 switch (i
.tm
.opcode_modifier
.evex
)
3947 case EVEXLIG
: /* LL' is ignored */
3948 vec_length
= evexlig
<< 5;
3951 vec_length
= 0 << 5;
3954 vec_length
= 1 << 5;
3957 vec_length
= 2 << 5;
3963 i
.vex
.bytes
[3] |= vec_length
;
3964 /* Encode the broadcast bit. */
3966 i
.vex
.bytes
[3] |= 0x10;
3970 if (i
.rounding
->type
!= saeonly
)
3971 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3973 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3976 if (i
.mask
&& i
.mask
->mask
)
3977 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3981 process_immext (void)
3985 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3986 which is coded in the same place as an 8-bit immediate field
3987 would be. Here we fake an 8-bit immediate operand from the
3988 opcode suffix stored in tm.extension_opcode.
3990 AVX instructions also use this encoding, for some of
3991 3 argument instructions. */
3993 gas_assert (i
.imm_operands
<= 1
3995 || (is_any_vex_encoding (&i
.tm
)
3996 && i
.operands
<= 4)));
3998 exp
= &im_expressions
[i
.imm_operands
++];
3999 i
.op
[i
.operands
].imms
= exp
;
4000 i
.types
[i
.operands
] = imm8
;
4002 exp
->X_op
= O_constant
;
4003 exp
->X_add_number
= i
.tm
.extension_opcode
;
4004 i
.tm
.extension_opcode
= None
;
4011 switch (i
.tm
.opcode_modifier
.hleprefixok
)
4016 as_bad (_("invalid instruction `%s' after `%s'"),
4017 i
.tm
.name
, i
.hle_prefix
);
4020 if (i
.prefix
[LOCK_PREFIX
])
4022 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
4026 case HLEPrefixRelease
:
4027 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
4029 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4033 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
4035 as_bad (_("memory destination needed for instruction `%s'"
4036 " after `xrelease'"), i
.tm
.name
);
4043 /* Try the shortest encoding by shortening operand size. */
4046 optimize_encoding (void)
4050 if (optimize_for_space
4051 && !is_any_vex_encoding (&i
.tm
)
4052 && i
.reg_operands
== 1
4053 && i
.imm_operands
== 1
4054 && !i
.types
[1].bitfield
.byte
4055 && i
.op
[0].imms
->X_op
== O_constant
4056 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4057 && (i
.tm
.base_opcode
== 0xa8
4058 || (i
.tm
.base_opcode
== 0xf6
4059 && i
.tm
.extension_opcode
== 0x0)))
4062 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4064 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
4065 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
4067 i
.types
[1].bitfield
.byte
= 1;
4068 /* Ignore the suffix. */
4070 /* Convert to byte registers. */
4071 if (i
.types
[1].bitfield
.word
)
4073 else if (i
.types
[1].bitfield
.dword
)
4077 if (!(i
.op
[1].regs
->reg_flags
& RegRex
) && base_regnum
< 4)
4082 else if (flag_code
== CODE_64BIT
4083 && !is_any_vex_encoding (&i
.tm
)
4084 && ((i
.types
[1].bitfield
.qword
4085 && i
.reg_operands
== 1
4086 && i
.imm_operands
== 1
4087 && i
.op
[0].imms
->X_op
== O_constant
4088 && ((i
.tm
.base_opcode
== 0xb8
4089 && i
.tm
.extension_opcode
== None
4090 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4091 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4092 && ((i
.tm
.base_opcode
== 0x24
4093 || i
.tm
.base_opcode
== 0xa8)
4094 || (i
.tm
.base_opcode
== 0x80
4095 && i
.tm
.extension_opcode
== 0x4)
4096 || ((i
.tm
.base_opcode
== 0xf6
4097 || (i
.tm
.base_opcode
| 1) == 0xc7)
4098 && i
.tm
.extension_opcode
== 0x0)))
4099 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4100 && i
.tm
.base_opcode
== 0x83
4101 && i
.tm
.extension_opcode
== 0x4)))
4102 || (i
.types
[0].bitfield
.qword
4103 && ((i
.reg_operands
== 2
4104 && i
.op
[0].regs
== i
.op
[1].regs
4105 && (i
.tm
.base_opcode
== 0x30
4106 || i
.tm
.base_opcode
== 0x28))
4107 || (i
.reg_operands
== 1
4109 && i
.tm
.base_opcode
== 0x30)))))
4112 andq $imm31, %r64 -> andl $imm31, %r32
4113 andq $imm7, %r64 -> andl $imm7, %r32
4114 testq $imm31, %r64 -> testl $imm31, %r32
4115 xorq %r64, %r64 -> xorl %r32, %r32
4116 subq %r64, %r64 -> subl %r32, %r32
4117 movq $imm31, %r64 -> movl $imm31, %r32
4118 movq $imm32, %r64 -> movl $imm32, %r32
4120 i
.tm
.opcode_modifier
.norex64
= 1;
4121 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4124 movq $imm31, %r64 -> movl $imm31, %r32
4125 movq $imm32, %r64 -> movl $imm32, %r32
4127 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4128 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4129 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4130 i
.types
[0].bitfield
.imm32
= 1;
4131 i
.types
[0].bitfield
.imm32s
= 0;
4132 i
.types
[0].bitfield
.imm64
= 0;
4133 i
.types
[1].bitfield
.dword
= 1;
4134 i
.types
[1].bitfield
.qword
= 0;
4135 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4138 movq $imm31, %r64 -> movl $imm31, %r32
4140 i
.tm
.base_opcode
= 0xb8;
4141 i
.tm
.extension_opcode
= None
;
4142 i
.tm
.opcode_modifier
.w
= 0;
4143 i
.tm
.opcode_modifier
.modrm
= 0;
4147 else if (optimize
> 1
4148 && !optimize_for_space
4149 && !is_any_vex_encoding (&i
.tm
)
4150 && i
.reg_operands
== 2
4151 && i
.op
[0].regs
== i
.op
[1].regs
4152 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4153 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4154 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4157 andb %rN, %rN -> testb %rN, %rN
4158 andw %rN, %rN -> testw %rN, %rN
4159 andq %rN, %rN -> testq %rN, %rN
4160 orb %rN, %rN -> testb %rN, %rN
4161 orw %rN, %rN -> testw %rN, %rN
4162 orq %rN, %rN -> testq %rN, %rN
4164 and outside of 64-bit mode
4166 andl %rN, %rN -> testl %rN, %rN
4167 orl %rN, %rN -> testl %rN, %rN
4169 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4171 else if (i
.reg_operands
== 3
4172 && i
.op
[0].regs
== i
.op
[1].regs
4173 && !i
.types
[2].bitfield
.xmmword
4174 && (i
.tm
.opcode_modifier
.vex
4175 || ((!i
.mask
|| i
.mask
->zeroing
)
4177 && is_evex_encoding (&i
.tm
)
4178 && (i
.vec_encoding
!= vex_encoding_evex
4179 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4180 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4181 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4182 && i
.types
[2].bitfield
.ymmword
))))
4183 && ((i
.tm
.base_opcode
== 0x55
4184 || i
.tm
.base_opcode
== 0x6655
4185 || i
.tm
.base_opcode
== 0x66df
4186 || i
.tm
.base_opcode
== 0x57
4187 || i
.tm
.base_opcode
== 0x6657
4188 || i
.tm
.base_opcode
== 0x66ef
4189 || i
.tm
.base_opcode
== 0x66f8
4190 || i
.tm
.base_opcode
== 0x66f9
4191 || i
.tm
.base_opcode
== 0x66fa
4192 || i
.tm
.base_opcode
== 0x66fb
4193 || i
.tm
.base_opcode
== 0x42
4194 || i
.tm
.base_opcode
== 0x6642
4195 || i
.tm
.base_opcode
== 0x47
4196 || i
.tm
.base_opcode
== 0x6647)
4197 && i
.tm
.extension_opcode
== None
))
4200 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4202 EVEX VOP %zmmM, %zmmM, %zmmN
4203 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4204 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4205 EVEX VOP %ymmM, %ymmM, %ymmN
4206 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4207 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4208 VEX VOP %ymmM, %ymmM, %ymmN
4209 -> VEX VOP %xmmM, %xmmM, %xmmN
4210 VOP, one of vpandn and vpxor:
4211 VEX VOP %ymmM, %ymmM, %ymmN
4212 -> VEX VOP %xmmM, %xmmM, %xmmN
4213 VOP, one of vpandnd and vpandnq:
4214 EVEX VOP %zmmM, %zmmM, %zmmN
4215 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4216 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4217 EVEX VOP %ymmM, %ymmM, %ymmN
4218 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4219 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4220 VOP, one of vpxord and vpxorq:
4221 EVEX VOP %zmmM, %zmmM, %zmmN
4222 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4223 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4224 EVEX VOP %ymmM, %ymmM, %ymmN
4225 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4226 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4227 VOP, one of kxord and kxorq:
4228 VEX VOP %kM, %kM, %kN
4229 -> VEX kxorw %kM, %kM, %kN
4230 VOP, one of kandnd and kandnq:
4231 VEX VOP %kM, %kM, %kN
4232 -> VEX kandnw %kM, %kM, %kN
4234 if (is_evex_encoding (&i
.tm
))
4236 if (i
.vec_encoding
!= vex_encoding_evex
)
4238 i
.tm
.opcode_modifier
.vex
= VEX128
;
4239 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4240 i
.tm
.opcode_modifier
.evex
= 0;
4242 else if (optimize
> 1)
4243 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4247 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4249 i
.tm
.base_opcode
&= 0xff;
4250 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4253 i
.tm
.opcode_modifier
.vex
= VEX128
;
4255 if (i
.tm
.opcode_modifier
.vex
)
4256 for (j
= 0; j
< 3; j
++)
4258 i
.types
[j
].bitfield
.xmmword
= 1;
4259 i
.types
[j
].bitfield
.ymmword
= 0;
4262 else if (i
.vec_encoding
!= vex_encoding_evex
4263 && !i
.types
[0].bitfield
.zmmword
4264 && !i
.types
[1].bitfield
.zmmword
4267 && is_evex_encoding (&i
.tm
)
4268 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x666f
4269 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf36f
4270 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f
4271 || (i
.tm
.base_opcode
& ~4) == 0x66db
4272 || (i
.tm
.base_opcode
& ~4) == 0x66eb)
4273 && i
.tm
.extension_opcode
== None
)
4276 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4277 vmovdqu32 and vmovdqu64:
4278 EVEX VOP %xmmM, %xmmN
4279 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4280 EVEX VOP %ymmM, %ymmN
4281 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4283 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4285 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4287 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4289 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4290 VOP, one of vpand, vpandn, vpor, vpxor:
4291 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4292 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4293 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4294 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4295 EVEX VOP{d,q} mem, %xmmM, %xmmN
4296 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4297 EVEX VOP{d,q} mem, %ymmM, %ymmN
4298 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4300 for (j
= 0; j
< i
.operands
; j
++)
4301 if (operand_type_check (i
.types
[j
], disp
)
4302 && i
.op
[j
].disps
->X_op
== O_constant
)
4304 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4305 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4306 bytes, we choose EVEX Disp8 over VEX Disp32. */
4307 int evex_disp8
, vex_disp8
;
4308 unsigned int memshift
= i
.memshift
;
4309 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4311 evex_disp8
= fits_in_disp8 (n
);
4313 vex_disp8
= fits_in_disp8 (n
);
4314 if (evex_disp8
!= vex_disp8
)
4316 i
.memshift
= memshift
;
4320 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4323 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f)
4324 i
.tm
.base_opcode
^= 0xf36f ^ 0xf26f;
4325 i
.tm
.opcode_modifier
.vex
4326 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4327 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4328 /* VPAND, VPOR, and VPXOR are commutative. */
4329 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0x66df)
4330 i
.tm
.opcode_modifier
.commutative
= 1;
4331 i
.tm
.opcode_modifier
.evex
= 0;
4332 i
.tm
.opcode_modifier
.masking
= 0;
4333 i
.tm
.opcode_modifier
.broadcast
= 0;
4334 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4337 i
.types
[j
].bitfield
.disp8
4338 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4342 /* Return non-zero for load instruction. */
4348 int any_vex_p
= is_any_vex_encoding (&i
.tm
);
4349 unsigned int base_opcode
= i
.tm
.base_opcode
| 1;
4354 if (i
.tm
.base_opcode
== 0x8d)
4358 if ((i
.tm
.base_opcode
& ~7) == 0x58
4359 || (i
.tm
.base_opcode
== 0x8f && i
.tm
.extension_opcode
== 0))
4362 /* movs, cmps, lods, scas. */
4363 if ((i
.tm
.base_opcode
| 0xb) == 0xaf)
4367 if (base_opcode
== 0x6f)
4371 /* No memory operand. */
4372 if (!i
.mem_operands
)
4378 if (i
.tm
.base_opcode
== 0xae
4379 && i
.tm
.opcode_modifier
.vex
4380 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
4381 && i
.tm
.extension_opcode
== 2)
4386 /* test, not, neg, mul, imul, div, idiv. */
4387 if ((i
.tm
.base_opcode
== 0xf6 || i
.tm
.base_opcode
== 0xf7)
4388 && i
.tm
.extension_opcode
!= 1)
4392 if (base_opcode
== 0xff && i
.tm
.extension_opcode
<= 1)
4395 /* add, or, adc, sbb, and, sub, xor, cmp. */
4396 if (i
.tm
.base_opcode
>= 0x80 && i
.tm
.base_opcode
<= 0x83)
4399 /* bt, bts, btr, btc. */
4400 if (i
.tm
.base_opcode
== 0xfba
4401 && (i
.tm
.extension_opcode
>= 4 && i
.tm
.extension_opcode
<= 7))
4404 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4405 if ((base_opcode
== 0xc1
4406 || (i
.tm
.base_opcode
>= 0xd0 && i
.tm
.base_opcode
<= 0xd3))
4407 && i
.tm
.extension_opcode
!= 6)
4410 /* cmpxchg8b, cmpxchg16b, xrstors. */
4411 if (i
.tm
.base_opcode
== 0xfc7
4412 && (i
.tm
.extension_opcode
== 1 || i
.tm
.extension_opcode
== 3))
4415 /* fxrstor, ldmxcsr, xrstor. */
4416 if (i
.tm
.base_opcode
== 0xfae
4417 && (i
.tm
.extension_opcode
== 1
4418 || i
.tm
.extension_opcode
== 2
4419 || i
.tm
.extension_opcode
== 5))
4422 /* lgdt, lidt, lmsw. */
4423 if (i
.tm
.base_opcode
== 0xf01
4424 && (i
.tm
.extension_opcode
== 2
4425 || i
.tm
.extension_opcode
== 3
4426 || i
.tm
.extension_opcode
== 6))
4430 if (i
.tm
.base_opcode
== 0xfc7
4431 && i
.tm
.extension_opcode
== 6)
4434 /* Check for x87 instructions. */
4435 if (i
.tm
.base_opcode
>= 0xd8 && i
.tm
.base_opcode
<= 0xdf)
4437 /* Skip fst, fstp, fstenv, fstcw. */
4438 if (i
.tm
.base_opcode
== 0xd9
4439 && (i
.tm
.extension_opcode
== 2
4440 || i
.tm
.extension_opcode
== 3
4441 || i
.tm
.extension_opcode
== 6
4442 || i
.tm
.extension_opcode
== 7))
4445 /* Skip fisttp, fist, fistp, fstp. */
4446 if (i
.tm
.base_opcode
== 0xdb
4447 && (i
.tm
.extension_opcode
== 1
4448 || i
.tm
.extension_opcode
== 2
4449 || i
.tm
.extension_opcode
== 3
4450 || i
.tm
.extension_opcode
== 7))
4453 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4454 if (i
.tm
.base_opcode
== 0xdd
4455 && (i
.tm
.extension_opcode
== 1
4456 || i
.tm
.extension_opcode
== 2
4457 || i
.tm
.extension_opcode
== 3
4458 || i
.tm
.extension_opcode
== 6
4459 || i
.tm
.extension_opcode
== 7))
4462 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4463 if (i
.tm
.base_opcode
== 0xdf
4464 && (i
.tm
.extension_opcode
== 1
4465 || i
.tm
.extension_opcode
== 2
4466 || i
.tm
.extension_opcode
== 3
4467 || i
.tm
.extension_opcode
== 6
4468 || i
.tm
.extension_opcode
== 7))
4475 dest
= i
.operands
- 1;
4477 /* Check fake imm8 operand and 3 source operands. */
4478 if ((i
.tm
.opcode_modifier
.immext
4479 || i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
4480 && i
.types
[dest
].bitfield
.imm8
)
4483 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg, xadd */
4485 && (base_opcode
== 0x1
4486 || base_opcode
== 0x9
4487 || base_opcode
== 0x11
4488 || base_opcode
== 0x19
4489 || base_opcode
== 0x21
4490 || base_opcode
== 0x29
4491 || base_opcode
== 0x31
4492 || base_opcode
== 0x39
4493 || (i
.tm
.base_opcode
>= 0x84 && i
.tm
.base_opcode
<= 0x87)
4494 || base_opcode
== 0xfc1))
4497 /* Check for load instruction. */
4498 return (i
.types
[dest
].bitfield
.class != ClassNone
4499 || i
.types
[dest
].bitfield
.instance
== Accum
);
4502 /* Output lfence, 0xfaee8, after instruction. */
4505 insert_lfence_after (void)
4507 if (lfence_after_load
&& load_insn_p ())
4509 char *p
= frag_more (3);
4516 /* Output lfence, 0xfaee8, before instruction. */
4519 insert_lfence_before (void)
4523 if (is_any_vex_encoding (&i
.tm
))
4526 if (i
.tm
.base_opcode
== 0xff
4527 && (i
.tm
.extension_opcode
== 2 || i
.tm
.extension_opcode
== 4))
4529 /* Insert lfence before indirect branch if needed. */
4531 if (lfence_before_indirect_branch
== lfence_branch_none
)
4534 if (i
.operands
!= 1)
4537 if (i
.reg_operands
== 1)
4539 /* Indirect branch via register. Don't insert lfence with
4540 -mlfence-after-load=yes. */
4541 if (lfence_after_load
4542 || lfence_before_indirect_branch
== lfence_branch_memory
)
4545 else if (i
.mem_operands
== 1
4546 && lfence_before_indirect_branch
!= lfence_branch_register
)
4548 as_warn (_("indirect `%s` with memory operand should be avoided"),
4555 if (last_insn
.kind
!= last_insn_other
4556 && last_insn
.seg
== now_seg
)
4558 as_warn_where (last_insn
.file
, last_insn
.line
,
4559 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4560 last_insn
.name
, i
.tm
.name
);
4571 /* Output or/not and lfence before ret. */
4572 if (lfence_before_ret
!= lfence_before_ret_none
4573 && (i
.tm
.base_opcode
== 0xc2
4574 || i
.tm
.base_opcode
== 0xc3
4575 || i
.tm
.base_opcode
== 0xca
4576 || i
.tm
.base_opcode
== 0xcb))
4578 if (last_insn
.kind
!= last_insn_other
4579 && last_insn
.seg
== now_seg
)
4581 as_warn_where (last_insn
.file
, last_insn
.line
,
4582 _("`%s` skips -mlfence-before-ret on `%s`"),
4583 last_insn
.name
, i
.tm
.name
);
4586 if (lfence_before_ret
== lfence_before_ret_or
)
4588 /* orl: 0x830c2400. */
4589 p
= frag_more ((flag_code
== CODE_64BIT
? 1 : 0) + 4 + 3);
4590 if (flag_code
== CODE_64BIT
)
4599 p
= frag_more ((flag_code
== CODE_64BIT
? 2 : 0) + 6 + 3);
4600 /* notl: 0xf71424. */
4601 if (flag_code
== CODE_64BIT
)
4606 /* notl: 0xf71424. */
4607 if (flag_code
== CODE_64BIT
)
4619 /* This is the guts of the machine-dependent assembler. LINE points to a
4620 machine dependent instruction. This function is supposed to emit
4621 the frags/bytes it assembles to. */
4624 md_assemble (char *line
)
4627 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4628 const insn_template
*t
;
4630 /* Initialize globals. */
4631 memset (&i
, '\0', sizeof (i
));
4632 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4633 i
.reloc
[j
] = NO_RELOC
;
4634 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4635 memset (im_expressions
, '\0', sizeof (im_expressions
));
4636 save_stack_p
= save_stack
;
4638 /* First parse an instruction mnemonic & call i386_operand for the operands.
4639 We assume that the scrubber has arranged it so that line[0] is the valid
4640 start of a (possibly prefixed) mnemonic. */
4642 line
= parse_insn (line
, mnemonic
);
4645 mnem_suffix
= i
.suffix
;
4647 line
= parse_operands (line
, mnemonic
);
4649 xfree (i
.memop1_string
);
4650 i
.memop1_string
= NULL
;
4654 /* Now we've parsed the mnemonic into a set of templates, and have the
4655 operands at hand. */
4657 /* All Intel opcodes have reversed operands except for "bound", "enter",
4658 "monitor*", "mwait*", "tpause", and "umwait". We also don't reverse
4659 intersegment "jmp" and "call" instructions with 2 immediate operands so
4660 that the immediate segment precedes the offset, as it does when in AT&T
4664 && (strcmp (mnemonic
, "bound") != 0)
4665 && (strcmp (mnemonic
, "invlpga") != 0)
4666 && (strncmp (mnemonic
, "monitor", 7) != 0)
4667 && (strncmp (mnemonic
, "mwait", 5) != 0)
4668 && (strcmp (mnemonic
, "tpause") != 0)
4669 && (strcmp (mnemonic
, "umwait") != 0)
4670 && !(operand_type_check (i
.types
[0], imm
)
4671 && operand_type_check (i
.types
[1], imm
)))
4674 /* The order of the immediates should be reversed
4675 for 2 immediates extrq and insertq instructions */
4676 if (i
.imm_operands
== 2
4677 && (strcmp (mnemonic
, "extrq") == 0
4678 || strcmp (mnemonic
, "insertq") == 0))
4679 swap_2_operands (0, 1);
4684 /* Don't optimize displacement for movabs since it only takes 64bit
4687 && i
.disp_encoding
!= disp_encoding_32bit
4688 && (flag_code
!= CODE_64BIT
4689 || strcmp (mnemonic
, "movabs") != 0))
4692 /* Next, we find a template that matches the given insn,
4693 making sure the overlap of the given operands types is consistent
4694 with the template operand types. */
4696 if (!(t
= match_template (mnem_suffix
)))
4699 if (sse_check
!= check_none
4700 && !i
.tm
.opcode_modifier
.noavx
4701 && !i
.tm
.cpu_flags
.bitfield
.cpuavx
4702 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
4703 && (i
.tm
.cpu_flags
.bitfield
.cpusse
4704 || i
.tm
.cpu_flags
.bitfield
.cpusse2
4705 || i
.tm
.cpu_flags
.bitfield
.cpusse3
4706 || i
.tm
.cpu_flags
.bitfield
.cpussse3
4707 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
4708 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
4709 || i
.tm
.cpu_flags
.bitfield
.cpusse4a
4710 || i
.tm
.cpu_flags
.bitfield
.cpupclmul
4711 || i
.tm
.cpu_flags
.bitfield
.cpuaes
4712 || i
.tm
.cpu_flags
.bitfield
.cpusha
4713 || i
.tm
.cpu_flags
.bitfield
.cpugfni
))
4715 (sse_check
== check_warning
4717 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4720 if (i
.tm
.opcode_modifier
.fwait
)
4721 if (!add_prefix (FWAIT_OPCODE
))
4724 /* Check if REP prefix is OK. */
4725 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
4727 as_bad (_("invalid instruction `%s' after `%s'"),
4728 i
.tm
.name
, i
.rep_prefix
);
4732 /* Check for lock without a lockable instruction. Destination operand
4733 must be memory unless it is xchg (0x86). */
4734 if (i
.prefix
[LOCK_PREFIX
]
4735 && (!i
.tm
.opcode_modifier
.islockable
4736 || i
.mem_operands
== 0
4737 || (i
.tm
.base_opcode
!= 0x86
4738 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
4740 as_bad (_("expecting lockable instruction after `lock'"));
4744 /* Check for data size prefix on VEX/XOP/EVEX encoded insns. */
4745 if (i
.prefix
[DATA_PREFIX
] && is_any_vex_encoding (&i
.tm
))
4747 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
4751 /* Check if HLE prefix is OK. */
4752 if (i
.hle_prefix
&& !check_hle ())
4755 /* Check BND prefix. */
4756 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
4757 as_bad (_("expecting valid branch instruction after `bnd'"));
4759 /* Check NOTRACK prefix. */
4760 if (i
.notrack_prefix
&& !i
.tm
.opcode_modifier
.notrackprefixok
)
4761 as_bad (_("expecting indirect branch instruction after `notrack'"));
4763 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
4765 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4766 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4767 else if (flag_code
!= CODE_16BIT
4768 ? i
.prefix
[ADDR_PREFIX
]
4769 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
4770 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4773 /* Insert BND prefix. */
4774 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
4776 if (!i
.prefix
[BND_PREFIX
])
4777 add_prefix (BND_PREFIX_OPCODE
);
4778 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
4780 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4781 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
4785 /* Check string instruction segment overrides. */
4786 if (i
.tm
.opcode_modifier
.isstring
>= IS_STRING_ES_OP0
)
4788 gas_assert (i
.mem_operands
);
4789 if (!check_string ())
4791 i
.disp_operands
= 0;
4794 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
4795 optimize_encoding ();
4797 if (!process_suffix ())
4800 /* Update operand types. */
4801 for (j
= 0; j
< i
.operands
; j
++)
4802 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
4804 /* Make still unresolved immediate matches conform to size of immediate
4805 given in i.suffix. */
4806 if (!finalize_imm ())
4809 if (i
.types
[0].bitfield
.imm1
)
4810 i
.imm_operands
= 0; /* kludge for shift insns. */
4812 /* We only need to check those implicit registers for instructions
4813 with 3 operands or less. */
4814 if (i
.operands
<= 3)
4815 for (j
= 0; j
< i
.operands
; j
++)
4816 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
4817 && !i
.types
[j
].bitfield
.xmmword
)
4820 /* ImmExt should be processed after SSE2AVX. */
4821 if (!i
.tm
.opcode_modifier
.sse2avx
4822 && i
.tm
.opcode_modifier
.immext
)
4825 /* For insns with operands there are more diddles to do to the opcode. */
4828 if (!process_operands ())
4831 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
4833 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4834 as_warn (_("translating to `%sp'"), i
.tm
.name
);
4837 if (is_any_vex_encoding (&i
.tm
))
4839 if (!cpu_arch_flags
.bitfield
.cpui286
)
4841 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
4846 if (i
.tm
.opcode_modifier
.vex
)
4847 build_vex_prefix (t
);
4849 build_evex_prefix ();
4852 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4853 instructions may define INT_OPCODE as well, so avoid this corner
4854 case for those instructions that use MODRM. */
4855 if (i
.tm
.base_opcode
== INT_OPCODE
4856 && !i
.tm
.opcode_modifier
.modrm
4857 && i
.op
[0].imms
->X_add_number
== 3)
4859 i
.tm
.base_opcode
= INT3_OPCODE
;
4863 if ((i
.tm
.opcode_modifier
.jump
== JUMP
4864 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
4865 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
4866 && i
.op
[0].disps
->X_op
== O_constant
)
4868 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4869 the absolute address given by the constant. Since ix86 jumps and
4870 calls are pc relative, we need to generate a reloc. */
4871 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
4872 i
.op
[0].disps
->X_op
= O_symbol
;
4875 /* For 8 bit registers we need an empty rex prefix. Also if the
4876 instruction already has a prefix, we need to convert old
4877 registers to new ones. */
4879 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
4880 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
4881 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
4882 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
4883 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
4884 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
4889 i
.rex
|= REX_OPCODE
;
4890 for (x
= 0; x
< 2; x
++)
4892 /* Look for 8 bit operand that uses old registers. */
4893 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
4894 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
4896 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
4897 /* In case it is "hi" register, give up. */
4898 if (i
.op
[x
].regs
->reg_num
> 3)
4899 as_bad (_("can't encode register '%s%s' in an "
4900 "instruction requiring REX prefix."),
4901 register_prefix
, i
.op
[x
].regs
->reg_name
);
4903 /* Otherwise it is equivalent to the extended register.
4904 Since the encoding doesn't change this is merely
4905 cosmetic cleanup for debug output. */
4907 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
4912 if (i
.rex
== 0 && i
.rex_encoding
)
4914 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4915 that uses legacy register. If it is "hi" register, don't add
4916 the REX_OPCODE byte. */
4918 for (x
= 0; x
< 2; x
++)
4919 if (i
.types
[x
].bitfield
.class == Reg
4920 && i
.types
[x
].bitfield
.byte
4921 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
4922 && i
.op
[x
].regs
->reg_num
> 3)
4924 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
4925 i
.rex_encoding
= FALSE
;
4934 add_prefix (REX_OPCODE
| i
.rex
);
4936 insert_lfence_before ();
4938 /* We are ready to output the insn. */
4941 insert_lfence_after ();
4943 last_insn
.seg
= now_seg
;
4945 if (i
.tm
.opcode_modifier
.isprefix
)
4947 last_insn
.kind
= last_insn_prefix
;
4948 last_insn
.name
= i
.tm
.name
;
4949 last_insn
.file
= as_where (&last_insn
.line
);
4952 last_insn
.kind
= last_insn_other
;
4956 parse_insn (char *line
, char *mnemonic
)
4959 char *token_start
= l
;
4962 const insn_template
*t
;
4968 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
4973 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
4975 as_bad (_("no such instruction: `%s'"), token_start
);
4980 if (!is_space_char (*l
)
4981 && *l
!= END_OF_INSN
4983 || (*l
!= PREFIX_SEPARATOR
4986 as_bad (_("invalid character %s in mnemonic"),
4987 output_invalid (*l
));
4990 if (token_start
== l
)
4992 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
4993 as_bad (_("expecting prefix; got nothing"));
4995 as_bad (_("expecting mnemonic; got nothing"));
4999 /* Look up instruction (or prefix) via hash table. */
5000 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
5002 if (*l
!= END_OF_INSN
5003 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
5004 && current_templates
5005 && current_templates
->start
->opcode_modifier
.isprefix
)
5007 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
5009 as_bad ((flag_code
!= CODE_64BIT
5010 ? _("`%s' is only supported in 64-bit mode")
5011 : _("`%s' is not supported in 64-bit mode")),
5012 current_templates
->start
->name
);
5015 /* If we are in 16-bit mode, do not allow addr16 or data16.
5016 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5017 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
5018 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5019 && flag_code
!= CODE_64BIT
5020 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5021 ^ (flag_code
== CODE_16BIT
)))
5023 as_bad (_("redundant %s prefix"),
5024 current_templates
->start
->name
);
5027 if (current_templates
->start
->opcode_length
== 0)
5029 /* Handle pseudo prefixes. */
5030 switch (current_templates
->start
->base_opcode
)
5034 i
.disp_encoding
= disp_encoding_8bit
;
5038 i
.disp_encoding
= disp_encoding_32bit
;
5042 i
.dir_encoding
= dir_encoding_load
;
5046 i
.dir_encoding
= dir_encoding_store
;
5050 i
.vec_encoding
= vex_encoding_vex
;
5054 i
.vec_encoding
= vex_encoding_vex3
;
5058 i
.vec_encoding
= vex_encoding_evex
;
5062 i
.rex_encoding
= TRUE
;
5066 i
.no_optimize
= TRUE
;
5074 /* Add prefix, checking for repeated prefixes. */
5075 switch (add_prefix (current_templates
->start
->base_opcode
))
5080 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
5081 i
.notrack_prefix
= current_templates
->start
->name
;
5084 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
5085 i
.hle_prefix
= current_templates
->start
->name
;
5086 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
5087 i
.bnd_prefix
= current_templates
->start
->name
;
5089 i
.rep_prefix
= current_templates
->start
->name
;
5095 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5102 if (!current_templates
)
5104 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5105 Check if we should swap operand or force 32bit displacement in
5107 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
5108 i
.dir_encoding
= dir_encoding_swap
;
5109 else if (mnem_p
- 3 == dot_p
5112 i
.disp_encoding
= disp_encoding_8bit
;
5113 else if (mnem_p
- 4 == dot_p
5117 i
.disp_encoding
= disp_encoding_32bit
;
5122 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
5125 if (!current_templates
)
5128 if (mnem_p
> mnemonic
)
5130 /* See if we can get a match by trimming off a suffix. */
5133 case WORD_MNEM_SUFFIX
:
5134 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
5135 i
.suffix
= SHORT_MNEM_SUFFIX
;
5138 case BYTE_MNEM_SUFFIX
:
5139 case QWORD_MNEM_SUFFIX
:
5140 i
.suffix
= mnem_p
[-1];
5142 current_templates
= (const templates
*) hash_find (op_hash
,
5145 case SHORT_MNEM_SUFFIX
:
5146 case LONG_MNEM_SUFFIX
:
5149 i
.suffix
= mnem_p
[-1];
5151 current_templates
= (const templates
*) hash_find (op_hash
,
5160 if (intel_float_operand (mnemonic
) == 1)
5161 i
.suffix
= SHORT_MNEM_SUFFIX
;
5163 i
.suffix
= LONG_MNEM_SUFFIX
;
5165 current_templates
= (const templates
*) hash_find (op_hash
,
5172 if (!current_templates
)
5174 as_bad (_("no such instruction: `%s'"), token_start
);
5179 if (current_templates
->start
->opcode_modifier
.jump
== JUMP
5180 || current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
5182 /* Check for a branch hint. We allow ",pt" and ",pn" for
5183 predict taken and predict not taken respectively.
5184 I'm not sure that branch hints actually do anything on loop
5185 and jcxz insns (JumpByte) for current Pentium4 chips. They
5186 may work in the future and it doesn't hurt to accept them
5188 if (l
[0] == ',' && l
[1] == 'p')
5192 if (!add_prefix (DS_PREFIX_OPCODE
))
5196 else if (l
[2] == 'n')
5198 if (!add_prefix (CS_PREFIX_OPCODE
))
5204 /* Any other comma loses. */
5207 as_bad (_("invalid character %s in mnemonic"),
5208 output_invalid (*l
));
5212 /* Check if instruction is supported on specified architecture. */
5214 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
5216 supported
|= cpu_flags_match (t
);
5217 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
5219 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
5220 as_warn (_("use .code16 to ensure correct addressing mode"));
5226 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
5227 as_bad (flag_code
== CODE_64BIT
5228 ? _("`%s' is not supported in 64-bit mode")
5229 : _("`%s' is only supported in 64-bit mode"),
5230 current_templates
->start
->name
);
5232 as_bad (_("`%s' is not supported on `%s%s'"),
5233 current_templates
->start
->name
,
5234 cpu_arch_name
? cpu_arch_name
: default_arch
,
5235 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
5241 parse_operands (char *l
, const char *mnemonic
)
5245 /* 1 if operand is pending after ','. */
5246 unsigned int expecting_operand
= 0;
5248 /* Non-zero if operand parens not balanced. */
5249 unsigned int paren_not_balanced
;
5251 while (*l
!= END_OF_INSN
)
5253 /* Skip optional white space before operand. */
5254 if (is_space_char (*l
))
5256 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
5258 as_bad (_("invalid character %s before operand %d"),
5259 output_invalid (*l
),
5263 token_start
= l
; /* After white space. */
5264 paren_not_balanced
= 0;
5265 while (paren_not_balanced
|| *l
!= ',')
5267 if (*l
== END_OF_INSN
)
5269 if (paren_not_balanced
)
5272 as_bad (_("unbalanced parenthesis in operand %d."),
5275 as_bad (_("unbalanced brackets in operand %d."),
5280 break; /* we are done */
5282 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
5284 as_bad (_("invalid character %s in operand %d"),
5285 output_invalid (*l
),
5292 ++paren_not_balanced
;
5294 --paren_not_balanced
;
5299 ++paren_not_balanced
;
5301 --paren_not_balanced
;
5305 if (l
!= token_start
)
5306 { /* Yes, we've read in another operand. */
5307 unsigned int operand_ok
;
5308 this_operand
= i
.operands
++;
5309 if (i
.operands
> MAX_OPERANDS
)
5311 as_bad (_("spurious operands; (%d operands/instruction max)"),
5315 i
.types
[this_operand
].bitfield
.unspecified
= 1;
5316 /* Now parse operand adding info to 'i' as we go along. */
5317 END_STRING_AND_SAVE (l
);
5319 if (i
.mem_operands
> 1)
5321 as_bad (_("too many memory references for `%s'"),
5328 i386_intel_operand (token_start
,
5329 intel_float_operand (mnemonic
));
5331 operand_ok
= i386_att_operand (token_start
);
5333 RESTORE_END_STRING (l
);
5339 if (expecting_operand
)
5341 expecting_operand_after_comma
:
5342 as_bad (_("expecting operand after ','; got nothing"));
5347 as_bad (_("expecting operand before ','; got nothing"));
5352 /* Now *l must be either ',' or END_OF_INSN. */
5355 if (*++l
== END_OF_INSN
)
5357 /* Just skip it, if it's \n complain. */
5358 goto expecting_operand_after_comma
;
5360 expecting_operand
= 1;
5367 swap_2_operands (int xchg1
, int xchg2
)
5369 union i386_op temp_op
;
5370 i386_operand_type temp_type
;
5371 unsigned int temp_flags
;
5372 enum bfd_reloc_code_real temp_reloc
;
5374 temp_type
= i
.types
[xchg2
];
5375 i
.types
[xchg2
] = i
.types
[xchg1
];
5376 i
.types
[xchg1
] = temp_type
;
5378 temp_flags
= i
.flags
[xchg2
];
5379 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5380 i
.flags
[xchg1
] = temp_flags
;
5382 temp_op
= i
.op
[xchg2
];
5383 i
.op
[xchg2
] = i
.op
[xchg1
];
5384 i
.op
[xchg1
] = temp_op
;
5386 temp_reloc
= i
.reloc
[xchg2
];
5387 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5388 i
.reloc
[xchg1
] = temp_reloc
;
5392 if (i
.mask
->operand
== xchg1
)
5393 i
.mask
->operand
= xchg2
;
5394 else if (i
.mask
->operand
== xchg2
)
5395 i
.mask
->operand
= xchg1
;
5399 if (i
.broadcast
->operand
== xchg1
)
5400 i
.broadcast
->operand
= xchg2
;
5401 else if (i
.broadcast
->operand
== xchg2
)
5402 i
.broadcast
->operand
= xchg1
;
5406 if (i
.rounding
->operand
== xchg1
)
5407 i
.rounding
->operand
= xchg2
;
5408 else if (i
.rounding
->operand
== xchg2
)
5409 i
.rounding
->operand
= xchg1
;
5414 swap_operands (void)
5420 swap_2_operands (1, i
.operands
- 2);
5424 swap_2_operands (0, i
.operands
- 1);
5430 if (i
.mem_operands
== 2)
5432 const seg_entry
*temp_seg
;
5433 temp_seg
= i
.seg
[0];
5434 i
.seg
[0] = i
.seg
[1];
5435 i
.seg
[1] = temp_seg
;
5439 /* Try to ensure constant immediates are represented in the smallest
5444 char guess_suffix
= 0;
5448 guess_suffix
= i
.suffix
;
5449 else if (i
.reg_operands
)
5451 /* Figure out a suffix from the last register operand specified.
5452 We can't do this properly yet, i.e. excluding special register
5453 instances, but the following works for instructions with
5454 immediates. In any case, we can't set i.suffix yet. */
5455 for (op
= i
.operands
; --op
>= 0;)
5456 if (i
.types
[op
].bitfield
.class != Reg
)
5458 else if (i
.types
[op
].bitfield
.byte
)
5460 guess_suffix
= BYTE_MNEM_SUFFIX
;
5463 else if (i
.types
[op
].bitfield
.word
)
5465 guess_suffix
= WORD_MNEM_SUFFIX
;
5468 else if (i
.types
[op
].bitfield
.dword
)
5470 guess_suffix
= LONG_MNEM_SUFFIX
;
5473 else if (i
.types
[op
].bitfield
.qword
)
5475 guess_suffix
= QWORD_MNEM_SUFFIX
;
5479 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5480 guess_suffix
= WORD_MNEM_SUFFIX
;
5482 for (op
= i
.operands
; --op
>= 0;)
5483 if (operand_type_check (i
.types
[op
], imm
))
5485 switch (i
.op
[op
].imms
->X_op
)
5488 /* If a suffix is given, this operand may be shortened. */
5489 switch (guess_suffix
)
5491 case LONG_MNEM_SUFFIX
:
5492 i
.types
[op
].bitfield
.imm32
= 1;
5493 i
.types
[op
].bitfield
.imm64
= 1;
5495 case WORD_MNEM_SUFFIX
:
5496 i
.types
[op
].bitfield
.imm16
= 1;
5497 i
.types
[op
].bitfield
.imm32
= 1;
5498 i
.types
[op
].bitfield
.imm32s
= 1;
5499 i
.types
[op
].bitfield
.imm64
= 1;
5501 case BYTE_MNEM_SUFFIX
:
5502 i
.types
[op
].bitfield
.imm8
= 1;
5503 i
.types
[op
].bitfield
.imm8s
= 1;
5504 i
.types
[op
].bitfield
.imm16
= 1;
5505 i
.types
[op
].bitfield
.imm32
= 1;
5506 i
.types
[op
].bitfield
.imm32s
= 1;
5507 i
.types
[op
].bitfield
.imm64
= 1;
5511 /* If this operand is at most 16 bits, convert it
5512 to a signed 16 bit number before trying to see
5513 whether it will fit in an even smaller size.
5514 This allows a 16-bit operand such as $0xffe0 to
5515 be recognised as within Imm8S range. */
5516 if ((i
.types
[op
].bitfield
.imm16
)
5517 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
5519 i
.op
[op
].imms
->X_add_number
=
5520 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
5523 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5524 if ((i
.types
[op
].bitfield
.imm32
)
5525 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
5528 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5529 ^ ((offsetT
) 1 << 31))
5530 - ((offsetT
) 1 << 31));
5534 = operand_type_or (i
.types
[op
],
5535 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5537 /* We must avoid matching of Imm32 templates when 64bit
5538 only immediate is available. */
5539 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5540 i
.types
[op
].bitfield
.imm32
= 0;
5547 /* Symbols and expressions. */
5549 /* Convert symbolic operand to proper sizes for matching, but don't
5550 prevent matching a set of insns that only supports sizes other
5551 than those matching the insn suffix. */
5553 i386_operand_type mask
, allowed
;
5554 const insn_template
*t
;
5556 operand_type_set (&mask
, 0);
5557 operand_type_set (&allowed
, 0);
5559 for (t
= current_templates
->start
;
5560 t
< current_templates
->end
;
5563 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5564 allowed
= operand_type_and (allowed
, anyimm
);
5566 switch (guess_suffix
)
5568 case QWORD_MNEM_SUFFIX
:
5569 mask
.bitfield
.imm64
= 1;
5570 mask
.bitfield
.imm32s
= 1;
5572 case LONG_MNEM_SUFFIX
:
5573 mask
.bitfield
.imm32
= 1;
5575 case WORD_MNEM_SUFFIX
:
5576 mask
.bitfield
.imm16
= 1;
5578 case BYTE_MNEM_SUFFIX
:
5579 mask
.bitfield
.imm8
= 1;
5584 allowed
= operand_type_and (mask
, allowed
);
5585 if (!operand_type_all_zero (&allowed
))
5586 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5593 /* Try to use the smallest displacement type too. */
5595 optimize_disp (void)
5599 for (op
= i
.operands
; --op
>= 0;)
5600 if (operand_type_check (i
.types
[op
], disp
))
5602 if (i
.op
[op
].disps
->X_op
== O_constant
)
5604 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5606 if (i
.types
[op
].bitfield
.disp16
5607 && (op_disp
& ~(offsetT
) 0xffff) == 0)
5609 /* If this operand is at most 16 bits, convert
5610 to a signed 16 bit number and don't use 64bit
5612 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
5613 i
.types
[op
].bitfield
.disp64
= 0;
5616 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5617 if (i
.types
[op
].bitfield
.disp32
5618 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
5620 /* If this operand is at most 32 bits, convert
5621 to a signed 32 bit number and don't use 64bit
5623 op_disp
&= (((offsetT
) 2 << 31) - 1);
5624 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5625 i
.types
[op
].bitfield
.disp64
= 0;
5628 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5630 i
.types
[op
].bitfield
.disp8
= 0;
5631 i
.types
[op
].bitfield
.disp16
= 0;
5632 i
.types
[op
].bitfield
.disp32
= 0;
5633 i
.types
[op
].bitfield
.disp32s
= 0;
5634 i
.types
[op
].bitfield
.disp64
= 0;
5638 else if (flag_code
== CODE_64BIT
)
5640 if (fits_in_signed_long (op_disp
))
5642 i
.types
[op
].bitfield
.disp64
= 0;
5643 i
.types
[op
].bitfield
.disp32s
= 1;
5645 if (i
.prefix
[ADDR_PREFIX
]
5646 && fits_in_unsigned_long (op_disp
))
5647 i
.types
[op
].bitfield
.disp32
= 1;
5649 if ((i
.types
[op
].bitfield
.disp32
5650 || i
.types
[op
].bitfield
.disp32s
5651 || i
.types
[op
].bitfield
.disp16
)
5652 && fits_in_disp8 (op_disp
))
5653 i
.types
[op
].bitfield
.disp8
= 1;
5655 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5656 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5658 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5659 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5660 i
.types
[op
].bitfield
.disp8
= 0;
5661 i
.types
[op
].bitfield
.disp16
= 0;
5662 i
.types
[op
].bitfield
.disp32
= 0;
5663 i
.types
[op
].bitfield
.disp32s
= 0;
5664 i
.types
[op
].bitfield
.disp64
= 0;
5667 /* We only support 64bit displacement on constants. */
5668 i
.types
[op
].bitfield
.disp64
= 0;
5672 /* Return 1 if there is a match in broadcast bytes between operand
5673 GIVEN and instruction template T. */
5676 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5678 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5679 && i
.types
[given
].bitfield
.byte
)
5680 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5681 && i
.types
[given
].bitfield
.word
)
5682 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5683 && i
.types
[given
].bitfield
.dword
)
5684 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5685 && i
.types
[given
].bitfield
.qword
));
5688 /* Check if operands are valid for the instruction. */
5691 check_VecOperands (const insn_template
*t
)
5696 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5697 any one operand are implicity requiring AVX512VL support if the actual
5698 operand size is YMMword or XMMword. Since this function runs after
5699 template matching, there's no need to check for YMMword/XMMword in
5701 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
5702 if (!cpu_flags_all_zero (&cpu
)
5703 && !t
->cpu_flags
.bitfield
.cpuavx512vl
5704 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
5706 for (op
= 0; op
< t
->operands
; ++op
)
5708 if (t
->operand_types
[op
].bitfield
.zmmword
5709 && (i
.types
[op
].bitfield
.ymmword
5710 || i
.types
[op
].bitfield
.xmmword
))
5712 i
.error
= unsupported
;
5718 /* Without VSIB byte, we can't have a vector register for index. */
5719 if (!t
->opcode_modifier
.vecsib
5721 && (i
.index_reg
->reg_type
.bitfield
.xmmword
5722 || i
.index_reg
->reg_type
.bitfield
.ymmword
5723 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
5725 i
.error
= unsupported_vector_index_register
;
5729 /* Check if default mask is allowed. */
5730 if (t
->opcode_modifier
.nodefmask
5731 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
5733 i
.error
= no_default_mask
;
5737 /* For VSIB byte, we need a vector register for index, and all vector
5738 registers must be distinct. */
5739 if (t
->opcode_modifier
.vecsib
)
5742 || !((t
->opcode_modifier
.vecsib
== VecSIB128
5743 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
5744 || (t
->opcode_modifier
.vecsib
== VecSIB256
5745 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
5746 || (t
->opcode_modifier
.vecsib
== VecSIB512
5747 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
5749 i
.error
= invalid_vsib_address
;
5753 gas_assert (i
.reg_operands
== 2 || i
.mask
);
5754 if (i
.reg_operands
== 2 && !i
.mask
)
5756 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
5757 gas_assert (i
.types
[0].bitfield
.xmmword
5758 || i
.types
[0].bitfield
.ymmword
);
5759 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
5760 gas_assert (i
.types
[2].bitfield
.xmmword
5761 || i
.types
[2].bitfield
.ymmword
);
5762 if (operand_check
== check_none
)
5764 if (register_number (i
.op
[0].regs
)
5765 != register_number (i
.index_reg
)
5766 && register_number (i
.op
[2].regs
)
5767 != register_number (i
.index_reg
)
5768 && register_number (i
.op
[0].regs
)
5769 != register_number (i
.op
[2].regs
))
5771 if (operand_check
== check_error
)
5773 i
.error
= invalid_vector_register_set
;
5776 as_warn (_("mask, index, and destination registers should be distinct"));
5778 else if (i
.reg_operands
== 1 && i
.mask
)
5780 if (i
.types
[1].bitfield
.class == RegSIMD
5781 && (i
.types
[1].bitfield
.xmmword
5782 || i
.types
[1].bitfield
.ymmword
5783 || i
.types
[1].bitfield
.zmmword
)
5784 && (register_number (i
.op
[1].regs
)
5785 == register_number (i
.index_reg
)))
5787 if (operand_check
== check_error
)
5789 i
.error
= invalid_vector_register_set
;
5792 if (operand_check
!= check_none
)
5793 as_warn (_("index and destination registers should be distinct"));
5798 /* Check if broadcast is supported by the instruction and is applied
5799 to the memory operand. */
5802 i386_operand_type type
, overlap
;
5804 /* Check if specified broadcast is supported in this instruction,
5805 and its broadcast bytes match the memory operand. */
5806 op
= i
.broadcast
->operand
;
5807 if (!t
->opcode_modifier
.broadcast
5808 || !(i
.flags
[op
] & Operand_Mem
)
5809 || (!i
.types
[op
].bitfield
.unspecified
5810 && !match_broadcast_size (t
, op
)))
5813 i
.error
= unsupported_broadcast
;
5817 i
.broadcast
->bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
5818 * i
.broadcast
->type
);
5819 operand_type_set (&type
, 0);
5820 switch (i
.broadcast
->bytes
)
5823 type
.bitfield
.word
= 1;
5826 type
.bitfield
.dword
= 1;
5829 type
.bitfield
.qword
= 1;
5832 type
.bitfield
.xmmword
= 1;
5835 type
.bitfield
.ymmword
= 1;
5838 type
.bitfield
.zmmword
= 1;
5844 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
5845 if (t
->operand_types
[op
].bitfield
.class == RegSIMD
5846 && t
->operand_types
[op
].bitfield
.byte
5847 + t
->operand_types
[op
].bitfield
.word
5848 + t
->operand_types
[op
].bitfield
.dword
5849 + t
->operand_types
[op
].bitfield
.qword
> 1)
5851 overlap
.bitfield
.xmmword
= 0;
5852 overlap
.bitfield
.ymmword
= 0;
5853 overlap
.bitfield
.zmmword
= 0;
5855 if (operand_type_all_zero (&overlap
))
5858 if (t
->opcode_modifier
.checkregsize
)
5862 type
.bitfield
.baseindex
= 1;
5863 for (j
= 0; j
< i
.operands
; ++j
)
5866 && !operand_type_register_match(i
.types
[j
],
5867 t
->operand_types
[j
],
5869 t
->operand_types
[op
]))
5874 /* If broadcast is supported in this instruction, we need to check if
5875 operand of one-element size isn't specified without broadcast. */
5876 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
5878 /* Find memory operand. */
5879 for (op
= 0; op
< i
.operands
; op
++)
5880 if (i
.flags
[op
] & Operand_Mem
)
5882 gas_assert (op
< i
.operands
);
5883 /* Check size of the memory operand. */
5884 if (match_broadcast_size (t
, op
))
5886 i
.error
= broadcast_needed
;
5891 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
5893 /* Check if requested masking is supported. */
5896 switch (t
->opcode_modifier
.masking
)
5900 case MERGING_MASKING
:
5901 if (i
.mask
->zeroing
)
5904 i
.error
= unsupported_masking
;
5908 case DYNAMIC_MASKING
:
5909 /* Memory destinations allow only merging masking. */
5910 if (i
.mask
->zeroing
&& i
.mem_operands
)
5912 /* Find memory operand. */
5913 for (op
= 0; op
< i
.operands
; op
++)
5914 if (i
.flags
[op
] & Operand_Mem
)
5916 gas_assert (op
< i
.operands
);
5917 if (op
== i
.operands
- 1)
5919 i
.error
= unsupported_masking
;
5929 /* Check if masking is applied to dest operand. */
5930 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
5932 i
.error
= mask_not_on_destination
;
5939 if (!t
->opcode_modifier
.sae
5940 || (i
.rounding
->type
!= saeonly
&& !t
->opcode_modifier
.staticrounding
))
5942 i
.error
= unsupported_rc_sae
;
5945 /* If the instruction has several immediate operands and one of
5946 them is rounding, the rounding operand should be the last
5947 immediate operand. */
5948 if (i
.imm_operands
> 1
5949 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
5951 i
.error
= rc_sae_operand_not_last_imm
;
5956 /* Check vector Disp8 operand. */
5957 if (t
->opcode_modifier
.disp8memshift
5958 && i
.disp_encoding
!= disp_encoding_32bit
)
5961 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
5962 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
5963 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
5966 const i386_operand_type
*type
= NULL
;
5969 for (op
= 0; op
< i
.operands
; op
++)
5970 if (i
.flags
[op
] & Operand_Mem
)
5972 if (t
->opcode_modifier
.evex
== EVEXLIG
)
5973 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
5974 else if (t
->operand_types
[op
].bitfield
.xmmword
5975 + t
->operand_types
[op
].bitfield
.ymmword
5976 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
5977 type
= &t
->operand_types
[op
];
5978 else if (!i
.types
[op
].bitfield
.unspecified
)
5979 type
= &i
.types
[op
];
5981 else if (i
.types
[op
].bitfield
.class == RegSIMD
5982 && t
->opcode_modifier
.evex
!= EVEXLIG
)
5984 if (i
.types
[op
].bitfield
.zmmword
)
5986 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
5988 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
5994 if (type
->bitfield
.zmmword
)
5996 else if (type
->bitfield
.ymmword
)
5998 else if (type
->bitfield
.xmmword
)
6002 /* For the check in fits_in_disp8(). */
6003 if (i
.memshift
== 0)
6007 for (op
= 0; op
< i
.operands
; op
++)
6008 if (operand_type_check (i
.types
[op
], disp
)
6009 && i
.op
[op
].disps
->X_op
== O_constant
)
6011 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
6013 i
.types
[op
].bitfield
.disp8
= 1;
6016 i
.types
[op
].bitfield
.disp8
= 0;
6025 /* Check if operands are valid for the instruction. Update VEX
6029 VEX_check_operands (const insn_template
*t
)
6031 if (i
.vec_encoding
== vex_encoding_evex
)
6033 /* This instruction must be encoded with EVEX prefix. */
6034 if (!is_evex_encoding (t
))
6036 i
.error
= unsupported
;
6042 if (!t
->opcode_modifier
.vex
)
6044 /* This instruction template doesn't have VEX prefix. */
6045 if (i
.vec_encoding
!= vex_encoding_default
)
6047 i
.error
= unsupported
;
6053 /* Check the special Imm4 cases; must be the first operand. */
6054 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
6056 if (i
.op
[0].imms
->X_op
!= O_constant
6057 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
6063 /* Turn off Imm<N> so that update_imm won't complain. */
6064 operand_type_set (&i
.types
[0], 0);
6070 static const insn_template
*
6071 match_template (char mnem_suffix
)
6073 /* Points to template once we've found it. */
6074 const insn_template
*t
;
6075 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
6076 i386_operand_type overlap4
;
6077 unsigned int found_reverse_match
;
6078 i386_opcode_modifier suffix_check
;
6079 i386_operand_type operand_types
[MAX_OPERANDS
];
6080 int addr_prefix_disp
;
6081 unsigned int j
, size_match
, check_register
;
6082 enum i386_error specific_error
= 0;
6084 #if MAX_OPERANDS != 5
6085 # error "MAX_OPERANDS must be 5."
6088 found_reverse_match
= 0;
6089 addr_prefix_disp
= -1;
6091 /* Prepare for mnemonic suffix check. */
6092 memset (&suffix_check
, 0, sizeof (suffix_check
));
6093 switch (mnem_suffix
)
6095 case BYTE_MNEM_SUFFIX
:
6096 suffix_check
.no_bsuf
= 1;
6098 case WORD_MNEM_SUFFIX
:
6099 suffix_check
.no_wsuf
= 1;
6101 case SHORT_MNEM_SUFFIX
:
6102 suffix_check
.no_ssuf
= 1;
6104 case LONG_MNEM_SUFFIX
:
6105 suffix_check
.no_lsuf
= 1;
6107 case QWORD_MNEM_SUFFIX
:
6108 suffix_check
.no_qsuf
= 1;
6111 /* NB: In Intel syntax, normally we can check for memory operand
6112 size when there is no mnemonic suffix. But jmp and call have
6113 2 different encodings with Dword memory operand size, one with
6114 No_ldSuf and the other without. i.suffix is set to
6115 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
6116 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
6117 suffix_check
.no_ldsuf
= 1;
6120 /* Must have right number of operands. */
6121 i
.error
= number_of_operands_mismatch
;
6123 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
6125 addr_prefix_disp
= -1;
6126 found_reverse_match
= 0;
6128 if (i
.operands
!= t
->operands
)
6131 /* Check processor support. */
6132 i
.error
= unsupported
;
6133 if (cpu_flags_match (t
) != CPU_FLAGS_PERFECT_MATCH
)
6136 /* Check AT&T mnemonic. */
6137 i
.error
= unsupported_with_intel_mnemonic
;
6138 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
6141 /* Check AT&T/Intel syntax. */
6142 i
.error
= unsupported_syntax
;
6143 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
6144 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
6147 /* Check Intel64/AMD64 ISA. */
6151 /* Default: Don't accept Intel64. */
6152 if (t
->opcode_modifier
.isa64
== INTEL64
)
6156 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6157 if (t
->opcode_modifier
.isa64
>= INTEL64
)
6161 /* -mintel64: Don't accept AMD64. */
6162 if (t
->opcode_modifier
.isa64
== AMD64
&& flag_code
== CODE_64BIT
)
6167 /* Check the suffix. */
6168 i
.error
= invalid_instruction_suffix
;
6169 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
6170 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
6171 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
6172 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
6173 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
6174 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
6177 size_match
= operand_size_match (t
);
6181 /* This is intentionally not
6183 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6185 as the case of a missing * on the operand is accepted (perhaps with
6186 a warning, issued further down). */
6187 if (i
.jumpabsolute
&& t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
6189 i
.error
= operand_type_mismatch
;
6193 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6194 operand_types
[j
] = t
->operand_types
[j
];
6196 /* In general, don't allow
6197 - 64-bit operands outside of 64-bit mode,
6198 - 32-bit operands on pre-386. */
6199 j
= i
.imm_operands
+ (t
->operands
> i
.imm_operands
+ 1);
6200 if (((i
.suffix
== QWORD_MNEM_SUFFIX
6201 && flag_code
!= CODE_64BIT
6202 && (t
->base_opcode
!= 0x0fc7
6203 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
6204 || (i
.suffix
== LONG_MNEM_SUFFIX
6205 && !cpu_arch_flags
.bitfield
.cpui386
))
6207 ? (t
->opcode_modifier
.mnemonicsize
!= IGNORESIZE
6208 && !intel_float_operand (t
->name
))
6209 : intel_float_operand (t
->name
) != 2)
6210 && (t
->operands
== i
.imm_operands
6211 || (operand_types
[i
.imm_operands
].bitfield
.class != RegMMX
6212 && operand_types
[i
.imm_operands
].bitfield
.class != RegSIMD
6213 && operand_types
[i
.imm_operands
].bitfield
.class != RegMask
)
6214 || (operand_types
[j
].bitfield
.class != RegMMX
6215 && operand_types
[j
].bitfield
.class != RegSIMD
6216 && operand_types
[j
].bitfield
.class != RegMask
))
6217 && !t
->opcode_modifier
.vecsib
)
6220 /* Do not verify operands when there are none. */
6222 /* We've found a match; break out of loop. */
6225 if (!t
->opcode_modifier
.jump
6226 || t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)
6228 /* There should be only one Disp operand. */
6229 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6230 if (operand_type_check (operand_types
[j
], disp
))
6232 if (j
< MAX_OPERANDS
)
6234 bfd_boolean override
= (i
.prefix
[ADDR_PREFIX
] != 0);
6236 addr_prefix_disp
= j
;
6238 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
6239 operand into Disp32/Disp32/Disp16/Disp32 operand. */
6243 override
= !override
;
6246 if (operand_types
[j
].bitfield
.disp32
6247 && operand_types
[j
].bitfield
.disp16
)
6249 operand_types
[j
].bitfield
.disp16
= override
;
6250 operand_types
[j
].bitfield
.disp32
= !override
;
6252 operand_types
[j
].bitfield
.disp32s
= 0;
6253 operand_types
[j
].bitfield
.disp64
= 0;
6257 if (operand_types
[j
].bitfield
.disp32s
6258 || operand_types
[j
].bitfield
.disp64
)
6260 operand_types
[j
].bitfield
.disp64
&= !override
;
6261 operand_types
[j
].bitfield
.disp32s
&= !override
;
6262 operand_types
[j
].bitfield
.disp32
= override
;
6264 operand_types
[j
].bitfield
.disp16
= 0;
6270 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
6271 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
&& t
->base_opcode
== 0xa0)
6274 /* We check register size if needed. */
6275 if (t
->opcode_modifier
.checkregsize
)
6277 check_register
= (1 << t
->operands
) - 1;
6279 check_register
&= ~(1 << i
.broadcast
->operand
);
6284 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
6285 switch (t
->operands
)
6288 if (!operand_type_match (overlap0
, i
.types
[0]))
6292 /* xchg %eax, %eax is a special case. It is an alias for nop
6293 only in 32bit mode and we can use opcode 0x90. In 64bit
6294 mode, we can't use 0x90 for xchg %eax, %eax since it should
6295 zero-extend %eax to %rax. */
6296 if (flag_code
== CODE_64BIT
6297 && t
->base_opcode
== 0x90
6298 && i
.types
[0].bitfield
.instance
== Accum
6299 && i
.types
[0].bitfield
.dword
6300 && i
.types
[1].bitfield
.instance
== Accum
6301 && i
.types
[1].bitfield
.dword
)
6303 /* xrelease mov %eax, <disp> is another special case. It must not
6304 match the accumulator-only encoding of mov. */
6305 if (flag_code
!= CODE_64BIT
6307 && t
->base_opcode
== 0xa0
6308 && i
.types
[0].bitfield
.instance
== Accum
6309 && (i
.flags
[1] & Operand_Mem
))
6314 if (!(size_match
& MATCH_STRAIGHT
))
6316 /* Reverse direction of operands if swapping is possible in the first
6317 place (operands need to be symmetric) and
6318 - the load form is requested, and the template is a store form,
6319 - the store form is requested, and the template is a load form,
6320 - the non-default (swapped) form is requested. */
6321 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
6322 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
6323 && !operand_type_all_zero (&overlap1
))
6324 switch (i
.dir_encoding
)
6326 case dir_encoding_load
:
6327 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6328 || t
->opcode_modifier
.regmem
)
6332 case dir_encoding_store
:
6333 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6334 && !t
->opcode_modifier
.regmem
)
6338 case dir_encoding_swap
:
6341 case dir_encoding_default
:
6344 /* If we want store form, we skip the current load. */
6345 if ((i
.dir_encoding
== dir_encoding_store
6346 || i
.dir_encoding
== dir_encoding_swap
)
6347 && i
.mem_operands
== 0
6348 && t
->opcode_modifier
.load
)
6353 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
6354 if (!operand_type_match (overlap0
, i
.types
[0])
6355 || !operand_type_match (overlap1
, i
.types
[1])
6356 || ((check_register
& 3) == 3
6357 && !operand_type_register_match (i
.types
[0],
6362 /* Check if other direction is valid ... */
6363 if (!t
->opcode_modifier
.d
)
6367 if (!(size_match
& MATCH_REVERSE
))
6369 /* Try reversing direction of operands. */
6370 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
6371 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
6372 if (!operand_type_match (overlap0
, i
.types
[0])
6373 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
6375 && !operand_type_register_match (i
.types
[0],
6376 operand_types
[i
.operands
- 1],
6377 i
.types
[i
.operands
- 1],
6380 /* Does not match either direction. */
6383 /* found_reverse_match holds which of D or FloatR
6385 if (!t
->opcode_modifier
.d
)
6386 found_reverse_match
= 0;
6387 else if (operand_types
[0].bitfield
.tbyte
)
6388 found_reverse_match
= Opcode_FloatD
;
6389 else if (operand_types
[0].bitfield
.xmmword
6390 || operand_types
[i
.operands
- 1].bitfield
.xmmword
6391 || operand_types
[0].bitfield
.class == RegMMX
6392 || operand_types
[i
.operands
- 1].bitfield
.class == RegMMX
6393 || is_any_vex_encoding(t
))
6394 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6395 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
6397 found_reverse_match
= Opcode_D
;
6398 if (t
->opcode_modifier
.floatr
)
6399 found_reverse_match
|= Opcode_FloatR
;
6403 /* Found a forward 2 operand match here. */
6404 switch (t
->operands
)
6407 overlap4
= operand_type_and (i
.types
[4],
6411 overlap3
= operand_type_and (i
.types
[3],
6415 overlap2
= operand_type_and (i
.types
[2],
6420 switch (t
->operands
)
6423 if (!operand_type_match (overlap4
, i
.types
[4])
6424 || !operand_type_register_match (i
.types
[3],
6431 if (!operand_type_match (overlap3
, i
.types
[3])
6432 || ((check_register
& 0xa) == 0xa
6433 && !operand_type_register_match (i
.types
[1],
6437 || ((check_register
& 0xc) == 0xc
6438 && !operand_type_register_match (i
.types
[2],
6445 /* Here we make use of the fact that there are no
6446 reverse match 3 operand instructions. */
6447 if (!operand_type_match (overlap2
, i
.types
[2])
6448 || ((check_register
& 5) == 5
6449 && !operand_type_register_match (i
.types
[0],
6453 || ((check_register
& 6) == 6
6454 && !operand_type_register_match (i
.types
[1],
6462 /* Found either forward/reverse 2, 3 or 4 operand match here:
6463 slip through to break. */
6466 /* Check if vector and VEX operands are valid. */
6467 if (check_VecOperands (t
) || VEX_check_operands (t
))
6469 specific_error
= i
.error
;
6473 /* We've found a match; break out of loop. */
6477 if (t
== current_templates
->end
)
6479 /* We found no match. */
6480 const char *err_msg
;
6481 switch (specific_error
? specific_error
: i
.error
)
6485 case operand_size_mismatch
:
6486 err_msg
= _("operand size mismatch");
6488 case operand_type_mismatch
:
6489 err_msg
= _("operand type mismatch");
6491 case register_type_mismatch
:
6492 err_msg
= _("register type mismatch");
6494 case number_of_operands_mismatch
:
6495 err_msg
= _("number of operands mismatch");
6497 case invalid_instruction_suffix
:
6498 err_msg
= _("invalid instruction suffix");
6501 err_msg
= _("constant doesn't fit in 4 bits");
6503 case unsupported_with_intel_mnemonic
:
6504 err_msg
= _("unsupported with Intel mnemonic");
6506 case unsupported_syntax
:
6507 err_msg
= _("unsupported syntax");
6510 as_bad (_("unsupported instruction `%s'"),
6511 current_templates
->start
->name
);
6513 case invalid_vsib_address
:
6514 err_msg
= _("invalid VSIB address");
6516 case invalid_vector_register_set
:
6517 err_msg
= _("mask, index, and destination registers must be distinct");
6519 case unsupported_vector_index_register
:
6520 err_msg
= _("unsupported vector index register");
6522 case unsupported_broadcast
:
6523 err_msg
= _("unsupported broadcast");
6525 case broadcast_needed
:
6526 err_msg
= _("broadcast is needed for operand of such type");
6528 case unsupported_masking
:
6529 err_msg
= _("unsupported masking");
6531 case mask_not_on_destination
:
6532 err_msg
= _("mask not on destination operand");
6534 case no_default_mask
:
6535 err_msg
= _("default mask isn't allowed");
6537 case unsupported_rc_sae
:
6538 err_msg
= _("unsupported static rounding/sae");
6540 case rc_sae_operand_not_last_imm
:
6542 err_msg
= _("RC/SAE operand must precede immediate operands");
6544 err_msg
= _("RC/SAE operand must follow immediate operands");
6546 case invalid_register_operand
:
6547 err_msg
= _("invalid register operand");
6550 as_bad (_("%s for `%s'"), err_msg
,
6551 current_templates
->start
->name
);
6555 if (!quiet_warnings
)
6558 && (i
.jumpabsolute
!= (t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)))
6559 as_warn (_("indirect %s without `*'"), t
->name
);
6561 if (t
->opcode_modifier
.isprefix
6562 && t
->opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6564 /* Warn them that a data or address size prefix doesn't
6565 affect assembly of the next line of code. */
6566 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6570 /* Copy the template we found. */
6573 if (addr_prefix_disp
!= -1)
6574 i
.tm
.operand_types
[addr_prefix_disp
]
6575 = operand_types
[addr_prefix_disp
];
6577 if (found_reverse_match
)
6579 /* If we found a reverse match we must alter the opcode direction
6580 bit and clear/flip the regmem modifier one. found_reverse_match
6581 holds bits to change (different for int & float insns). */
6583 i
.tm
.base_opcode
^= found_reverse_match
;
6585 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6586 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6588 /* Certain SIMD insns have their load forms specified in the opcode
6589 table, and hence we need to _set_ RegMem instead of clearing it.
6590 We need to avoid setting the bit though on insns like KMOVW. */
6591 i
.tm
.opcode_modifier
.regmem
6592 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6593 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
6594 && !i
.tm
.opcode_modifier
.regmem
;
6603 unsigned int es_op
= i
.tm
.opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
6604 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.baseindex
? es_op
: 0;
6606 if (i
.seg
[op
] != NULL
&& i
.seg
[op
] != &es
)
6608 as_bad (_("`%s' operand %u must use `%ses' segment"),
6610 intel_syntax
? i
.tm
.operands
- es_op
: es_op
+ 1,
6615 /* There's only ever one segment override allowed per instruction.
6616 This instruction possibly has a legal segment override on the
6617 second operand, so copy the segment to where non-string
6618 instructions store it, allowing common code. */
6619 i
.seg
[op
] = i
.seg
[1];
6625 process_suffix (void)
6627 /* If matched instruction specifies an explicit instruction mnemonic
6629 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
6630 i
.suffix
= WORD_MNEM_SUFFIX
;
6631 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
6632 i
.suffix
= LONG_MNEM_SUFFIX
;
6633 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
6634 i
.suffix
= QWORD_MNEM_SUFFIX
;
6635 else if (i
.reg_operands
6636 && (i
.operands
> 1 || i
.types
[0].bitfield
.class == Reg
)
6637 && !i
.tm
.opcode_modifier
.addrprefixopreg
)
6639 unsigned int numop
= i
.operands
;
6641 /* movsx/movzx want only their source operand considered here, for the
6642 ambiguity checking below. The suffix will be replaced afterwards
6643 to represent the destination (register). */
6644 if (((i
.tm
.base_opcode
| 8) == 0xfbe && i
.tm
.opcode_modifier
.w
)
6645 || (i
.tm
.base_opcode
== 0x63 && i
.tm
.cpu_flags
.bitfield
.cpu64
))
6648 /* crc32 needs REX.W set regardless of suffix / source operand size. */
6649 if (i
.tm
.base_opcode
== 0xf20f38f0
6650 && i
.tm
.operand_types
[1].bitfield
.qword
)
6653 /* If there's no instruction mnemonic suffix we try to invent one
6654 based on GPR operands. */
6657 /* We take i.suffix from the last register operand specified,
6658 Destination register type is more significant than source
6659 register type. crc32 in SSE4.2 prefers source register
6661 unsigned int op
= i
.tm
.base_opcode
!= 0xf20f38f0 ? i
.operands
: 1;
6664 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
6665 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6667 if (i
.types
[op
].bitfield
.class != Reg
)
6669 if (i
.types
[op
].bitfield
.byte
)
6670 i
.suffix
= BYTE_MNEM_SUFFIX
;
6671 else if (i
.types
[op
].bitfield
.word
)
6672 i
.suffix
= WORD_MNEM_SUFFIX
;
6673 else if (i
.types
[op
].bitfield
.dword
)
6674 i
.suffix
= LONG_MNEM_SUFFIX
;
6675 else if (i
.types
[op
].bitfield
.qword
)
6676 i
.suffix
= QWORD_MNEM_SUFFIX
;
6682 /* As an exception, movsx/movzx silently default to a byte source
6684 if ((i
.tm
.base_opcode
| 8) == 0xfbe && i
.tm
.opcode_modifier
.w
6685 && !i
.suffix
&& !intel_syntax
)
6686 i
.suffix
= BYTE_MNEM_SUFFIX
;
6688 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6691 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6692 && i
.tm
.opcode_modifier
.no_bsuf
)
6694 else if (!check_byte_reg ())
6697 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
6700 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6701 && i
.tm
.opcode_modifier
.no_lsuf
6702 && !i
.tm
.opcode_modifier
.todword
6703 && !i
.tm
.opcode_modifier
.toqword
)
6705 else if (!check_long_reg ())
6708 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6711 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6712 && i
.tm
.opcode_modifier
.no_qsuf
6713 && !i
.tm
.opcode_modifier
.todword
6714 && !i
.tm
.opcode_modifier
.toqword
)
6716 else if (!check_qword_reg ())
6719 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6722 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6723 && i
.tm
.opcode_modifier
.no_wsuf
)
6725 else if (!check_word_reg ())
6728 else if (intel_syntax
6729 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6730 /* Do nothing if the instruction is going to ignore the prefix. */
6735 /* Undo the movsx/movzx change done above. */
6738 else if (i
.tm
.opcode_modifier
.mnemonicsize
== DEFAULTSIZE
6741 i
.suffix
= stackop_size
;
6742 if (stackop_size
== LONG_MNEM_SUFFIX
)
6744 /* stackop_size is set to LONG_MNEM_SUFFIX for the
6745 .code16gcc directive to support 16-bit mode with
6746 32-bit address. For IRET without a suffix, generate
6747 16-bit IRET (opcode 0xcf) to return from an interrupt
6749 if (i
.tm
.base_opcode
== 0xcf)
6751 i
.suffix
= WORD_MNEM_SUFFIX
;
6752 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
6754 /* Warn about changed behavior for segment register push/pop. */
6755 else if ((i
.tm
.base_opcode
| 1) == 0x07)
6756 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
6761 && (i
.tm
.opcode_modifier
.jump
== JUMP_ABSOLUTE
6762 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
6763 || i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
6764 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
6765 && i
.tm
.extension_opcode
<= 3)))
6770 if (!i
.tm
.opcode_modifier
.no_qsuf
)
6772 i
.suffix
= QWORD_MNEM_SUFFIX
;
6777 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6778 i
.suffix
= LONG_MNEM_SUFFIX
;
6781 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6782 i
.suffix
= WORD_MNEM_SUFFIX
;
6788 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
6789 /* Also cover lret/retf/iret in 64-bit mode. */
6790 || (flag_code
== CODE_64BIT
6791 && !i
.tm
.opcode_modifier
.no_lsuf
6792 && !i
.tm
.opcode_modifier
.no_qsuf
))
6793 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
6794 /* Accept FLDENV et al without suffix. */
6795 && (i
.tm
.opcode_modifier
.no_ssuf
|| i
.tm
.opcode_modifier
.floatmf
))
6797 unsigned int suffixes
, evex
= 0;
6799 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
6800 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6802 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6804 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
6806 if (!i
.tm
.opcode_modifier
.no_ssuf
)
6808 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
6811 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
6812 also suitable for AT&T syntax mode, it was requested that this be
6813 restricted to just Intel syntax. */
6814 if (intel_syntax
&& is_any_vex_encoding (&i
.tm
) && !i
.broadcast
)
6818 for (op
= 0; op
< i
.tm
.operands
; ++op
)
6820 if (is_evex_encoding (&i
.tm
)
6821 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
6823 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
6824 i
.tm
.operand_types
[op
].bitfield
.xmmword
= 0;
6825 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
6826 i
.tm
.operand_types
[op
].bitfield
.ymmword
= 0;
6827 if (!i
.tm
.opcode_modifier
.evex
6828 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
6829 i
.tm
.opcode_modifier
.evex
= EVEX512
;
6832 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
6833 + i
.tm
.operand_types
[op
].bitfield
.ymmword
6834 + i
.tm
.operand_types
[op
].bitfield
.zmmword
< 2)
6837 /* Any properly sized operand disambiguates the insn. */
6838 if (i
.types
[op
].bitfield
.xmmword
6839 || i
.types
[op
].bitfield
.ymmword
6840 || i
.types
[op
].bitfield
.zmmword
)
6842 suffixes
&= ~(7 << 6);
6847 if ((i
.flags
[op
] & Operand_Mem
)
6848 && i
.tm
.operand_types
[op
].bitfield
.unspecified
)
6850 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
)
6852 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
6854 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
6856 if (is_evex_encoding (&i
.tm
))
6862 /* Are multiple suffixes / operand sizes allowed? */
6863 if (suffixes
& (suffixes
- 1))
6866 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
6867 || operand_check
== check_error
))
6869 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
6872 if (operand_check
== check_error
)
6874 as_bad (_("no instruction mnemonic suffix given and "
6875 "no register operands; can't size `%s'"), i
.tm
.name
);
6878 if (operand_check
== check_warning
)
6879 as_warn (_("%s; using default for `%s'"),
6881 ? _("ambiguous operand size")
6882 : _("no instruction mnemonic suffix given and "
6883 "no register operands"),
6886 if (i
.tm
.opcode_modifier
.floatmf
)
6887 i
.suffix
= SHORT_MNEM_SUFFIX
;
6888 else if ((i
.tm
.base_opcode
| 8) == 0xfbe
6889 || (i
.tm
.base_opcode
== 0x63
6890 && i
.tm
.cpu_flags
.bitfield
.cpu64
))
6891 /* handled below */;
6893 i
.tm
.opcode_modifier
.evex
= evex
;
6894 else if (flag_code
== CODE_16BIT
)
6895 i
.suffix
= WORD_MNEM_SUFFIX
;
6896 else if (!i
.tm
.opcode_modifier
.no_lsuf
)
6897 i
.suffix
= LONG_MNEM_SUFFIX
;
6899 i
.suffix
= QWORD_MNEM_SUFFIX
;
6903 if ((i
.tm
.base_opcode
| 8) == 0xfbe
6904 || (i
.tm
.base_opcode
== 0x63 && i
.tm
.cpu_flags
.bitfield
.cpu64
))
6906 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
6907 In AT&T syntax, if there is no suffix (warned about above), the default
6908 will be byte extension. */
6909 if (i
.tm
.opcode_modifier
.w
&& i
.suffix
&& i
.suffix
!= BYTE_MNEM_SUFFIX
)
6910 i
.tm
.base_opcode
|= 1;
6912 /* For further processing, the suffix should represent the destination
6913 (register). This is already the case when one was used with
6914 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
6915 no suffix to begin with. */
6916 if (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63 || !i
.suffix
)
6918 if (i
.types
[1].bitfield
.word
)
6919 i
.suffix
= WORD_MNEM_SUFFIX
;
6920 else if (i
.types
[1].bitfield
.qword
)
6921 i
.suffix
= QWORD_MNEM_SUFFIX
;
6923 i
.suffix
= LONG_MNEM_SUFFIX
;
6925 i
.tm
.opcode_modifier
.w
= 0;
6929 if (!i
.tm
.opcode_modifier
.modrm
&& i
.reg_operands
&& i
.tm
.operands
< 3)
6930 i
.short_form
= (i
.tm
.operand_types
[0].bitfield
.class == Reg
)
6931 != (i
.tm
.operand_types
[1].bitfield
.class == Reg
);
6933 /* Change the opcode based on the operand size given by i.suffix. */
6936 /* Size floating point instruction. */
6937 case LONG_MNEM_SUFFIX
:
6938 if (i
.tm
.opcode_modifier
.floatmf
)
6940 i
.tm
.base_opcode
^= 4;
6944 case WORD_MNEM_SUFFIX
:
6945 case QWORD_MNEM_SUFFIX
:
6946 /* It's not a byte, select word/dword operation. */
6947 if (i
.tm
.opcode_modifier
.w
)
6950 i
.tm
.base_opcode
|= 8;
6952 i
.tm
.base_opcode
|= 1;
6955 case SHORT_MNEM_SUFFIX
:
6956 /* Now select between word & dword operations via the operand
6957 size prefix, except for instructions that will ignore this
6959 if (i
.suffix
!= QWORD_MNEM_SUFFIX
6960 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
6961 && !i
.tm
.opcode_modifier
.floatmf
6962 && !is_any_vex_encoding (&i
.tm
)
6963 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
6964 || (flag_code
== CODE_64BIT
6965 && i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)))
6967 unsigned int prefix
= DATA_PREFIX_OPCODE
;
6969 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
) /* jcxz, loop */
6970 prefix
= ADDR_PREFIX_OPCODE
;
6972 if (!add_prefix (prefix
))
6976 /* Set mode64 for an operand. */
6977 if (i
.suffix
== QWORD_MNEM_SUFFIX
6978 && flag_code
== CODE_64BIT
6979 && !i
.tm
.opcode_modifier
.norex64
6980 && !i
.tm
.opcode_modifier
.vexw
6981 /* Special case for xchg %rax,%rax. It is NOP and doesn't
6983 && ! (i
.operands
== 2
6984 && i
.tm
.base_opcode
== 0x90
6985 && i
.tm
.extension_opcode
== None
6986 && i
.types
[0].bitfield
.instance
== Accum
6987 && i
.types
[0].bitfield
.qword
6988 && i
.types
[1].bitfield
.instance
== Accum
6989 && i
.types
[1].bitfield
.qword
))
6995 if (i
.tm
.opcode_modifier
.addrprefixopreg
)
6997 gas_assert (!i
.suffix
);
6998 gas_assert (i
.reg_operands
);
7000 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7003 /* The address size override prefix changes the size of the
7005 if (flag_code
== CODE_64BIT
7006 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
7008 as_bad (_("16-bit addressing unavailable for `%s'"),
7013 if ((flag_code
== CODE_32BIT
7014 ? i
.op
[0].regs
->reg_type
.bitfield
.word
7015 : i
.op
[0].regs
->reg_type
.bitfield
.dword
)
7016 && !add_prefix (ADDR_PREFIX_OPCODE
))
7021 /* Check invalid register operand when the address size override
7022 prefix changes the size of register operands. */
7024 enum { need_word
, need_dword
, need_qword
} need
;
7026 if (flag_code
== CODE_32BIT
)
7027 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
7028 else if (i
.prefix
[ADDR_PREFIX
])
7031 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
7033 for (op
= 0; op
< i
.operands
; op
++)
7035 if (i
.types
[op
].bitfield
.class != Reg
)
7041 if (i
.op
[op
].regs
->reg_type
.bitfield
.word
)
7045 if (i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
7049 if (i
.op
[op
].regs
->reg_type
.bitfield
.qword
)
7054 as_bad (_("invalid register operand size for `%s'"),
7065 check_byte_reg (void)
7069 for (op
= i
.operands
; --op
>= 0;)
7071 /* Skip non-register operands. */
7072 if (i
.types
[op
].bitfield
.class != Reg
)
7075 /* If this is an eight bit register, it's OK. If it's the 16 or
7076 32 bit version of an eight bit register, we will just use the
7077 low portion, and that's OK too. */
7078 if (i
.types
[op
].bitfield
.byte
)
7081 /* I/O port address operands are OK too. */
7082 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
7083 && i
.tm
.operand_types
[op
].bitfield
.word
)
7086 /* crc32 only wants its source operand checked here. */
7087 if (i
.tm
.base_opcode
== 0xf20f38f0 && op
)
7090 /* Any other register is bad. */
7091 if (i
.types
[op
].bitfield
.class == Reg
7092 || i
.types
[op
].bitfield
.class == RegMMX
7093 || i
.types
[op
].bitfield
.class == RegSIMD
7094 || i
.types
[op
].bitfield
.class == SReg
7095 || i
.types
[op
].bitfield
.class == RegCR
7096 || i
.types
[op
].bitfield
.class == RegDR
7097 || i
.types
[op
].bitfield
.class == RegTR
)
7099 as_bad (_("`%s%s' not allowed with `%s%c'"),
7101 i
.op
[op
].regs
->reg_name
,
7111 check_long_reg (void)
7115 for (op
= i
.operands
; --op
>= 0;)
7116 /* Skip non-register operands. */
7117 if (i
.types
[op
].bitfield
.class != Reg
)
7119 /* Reject eight bit registers, except where the template requires
7120 them. (eg. movzb) */
7121 else if (i
.types
[op
].bitfield
.byte
7122 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7123 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7124 && (i
.tm
.operand_types
[op
].bitfield
.word
7125 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7127 as_bad (_("`%s%s' not allowed with `%s%c'"),
7129 i
.op
[op
].regs
->reg_name
,
7134 /* Error if the e prefix on a general reg is missing. */
7135 else if (i
.types
[op
].bitfield
.word
7136 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7137 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7138 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7140 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7141 register_prefix
, i
.op
[op
].regs
->reg_name
,
7145 /* Warn if the r prefix on a general reg is present. */
7146 else if (i
.types
[op
].bitfield
.qword
7147 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7148 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7149 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7152 && i
.tm
.opcode_modifier
.toqword
7153 && i
.types
[0].bitfield
.class != RegSIMD
)
7155 /* Convert to QWORD. We want REX byte. */
7156 i
.suffix
= QWORD_MNEM_SUFFIX
;
7160 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7161 register_prefix
, i
.op
[op
].regs
->reg_name
,
7170 check_qword_reg (void)
7174 for (op
= i
.operands
; --op
>= 0; )
7175 /* Skip non-register operands. */
7176 if (i
.types
[op
].bitfield
.class != Reg
)
7178 /* Reject eight bit registers, except where the template requires
7179 them. (eg. movzb) */
7180 else if (i
.types
[op
].bitfield
.byte
7181 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7182 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7183 && (i
.tm
.operand_types
[op
].bitfield
.word
7184 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7186 as_bad (_("`%s%s' not allowed with `%s%c'"),
7188 i
.op
[op
].regs
->reg_name
,
7193 /* Warn if the r prefix on a general reg is missing. */
7194 else if ((i
.types
[op
].bitfield
.word
7195 || i
.types
[op
].bitfield
.dword
)
7196 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7197 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7198 && i
.tm
.operand_types
[op
].bitfield
.qword
)
7200 /* Prohibit these changes in the 64bit mode, since the
7201 lowering is more complicated. */
7203 && i
.tm
.opcode_modifier
.todword
7204 && i
.types
[0].bitfield
.class != RegSIMD
)
7206 /* Convert to DWORD. We don't want REX byte. */
7207 i
.suffix
= LONG_MNEM_SUFFIX
;
7211 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7212 register_prefix
, i
.op
[op
].regs
->reg_name
,
7221 check_word_reg (void)
7224 for (op
= i
.operands
; --op
>= 0;)
7225 /* Skip non-register operands. */
7226 if (i
.types
[op
].bitfield
.class != Reg
)
7228 /* Reject eight bit registers, except where the template requires
7229 them. (eg. movzb) */
7230 else if (i
.types
[op
].bitfield
.byte
7231 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7232 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7233 && (i
.tm
.operand_types
[op
].bitfield
.word
7234 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7236 as_bad (_("`%s%s' not allowed with `%s%c'"),
7238 i
.op
[op
].regs
->reg_name
,
7243 /* Error if the e or r prefix on a general reg is present. */
7244 else if ((i
.types
[op
].bitfield
.dword
7245 || i
.types
[op
].bitfield
.qword
)
7246 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7247 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7248 && i
.tm
.operand_types
[op
].bitfield
.word
)
7250 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7251 register_prefix
, i
.op
[op
].regs
->reg_name
,
7259 update_imm (unsigned int j
)
7261 i386_operand_type overlap
= i
.types
[j
];
7262 if ((overlap
.bitfield
.imm8
7263 || overlap
.bitfield
.imm8s
7264 || overlap
.bitfield
.imm16
7265 || overlap
.bitfield
.imm32
7266 || overlap
.bitfield
.imm32s
7267 || overlap
.bitfield
.imm64
)
7268 && !operand_type_equal (&overlap
, &imm8
)
7269 && !operand_type_equal (&overlap
, &imm8s
)
7270 && !operand_type_equal (&overlap
, &imm16
)
7271 && !operand_type_equal (&overlap
, &imm32
)
7272 && !operand_type_equal (&overlap
, &imm32s
)
7273 && !operand_type_equal (&overlap
, &imm64
))
7277 i386_operand_type temp
;
7279 operand_type_set (&temp
, 0);
7280 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7282 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
7283 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
7285 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7286 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
7287 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7289 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
7290 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
7293 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
7296 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
7297 || operand_type_equal (&overlap
, &imm16_32
)
7298 || operand_type_equal (&overlap
, &imm16_32s
))
7300 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
7305 if (!operand_type_equal (&overlap
, &imm8
)
7306 && !operand_type_equal (&overlap
, &imm8s
)
7307 && !operand_type_equal (&overlap
, &imm16
)
7308 && !operand_type_equal (&overlap
, &imm32
)
7309 && !operand_type_equal (&overlap
, &imm32s
)
7310 && !operand_type_equal (&overlap
, &imm64
))
7312 as_bad (_("no instruction mnemonic suffix given; "
7313 "can't determine immediate size"));
7317 i
.types
[j
] = overlap
;
7327 /* Update the first 2 immediate operands. */
7328 n
= i
.operands
> 2 ? 2 : i
.operands
;
7331 for (j
= 0; j
< n
; j
++)
7332 if (update_imm (j
) == 0)
7335 /* The 3rd operand can't be immediate operand. */
7336 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
7343 process_operands (void)
7345 /* Default segment register this instruction will use for memory
7346 accesses. 0 means unknown. This is only for optimizing out
7347 unnecessary segment overrides. */
7348 const seg_entry
*default_seg
= 0;
7350 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
7352 unsigned int dupl
= i
.operands
;
7353 unsigned int dest
= dupl
- 1;
7356 /* The destination must be an xmm register. */
7357 gas_assert (i
.reg_operands
7358 && MAX_OPERANDS
> dupl
7359 && operand_type_equal (&i
.types
[dest
], ®xmm
));
7361 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7362 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7364 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
7366 /* Keep xmm0 for instructions with VEX prefix and 3
7368 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
7369 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
7374 /* We remove the first xmm0 and keep the number of
7375 operands unchanged, which in fact duplicates the
7377 for (j
= 1; j
< i
.operands
; j
++)
7379 i
.op
[j
- 1] = i
.op
[j
];
7380 i
.types
[j
- 1] = i
.types
[j
];
7381 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7382 i
.flags
[j
- 1] = i
.flags
[j
];
7386 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
7388 gas_assert ((MAX_OPERANDS
- 1) > dupl
7389 && (i
.tm
.opcode_modifier
.vexsources
7392 /* Add the implicit xmm0 for instructions with VEX prefix
7394 for (j
= i
.operands
; j
> 0; j
--)
7396 i
.op
[j
] = i
.op
[j
- 1];
7397 i
.types
[j
] = i
.types
[j
- 1];
7398 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
7399 i
.flags
[j
] = i
.flags
[j
- 1];
7402 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
7403 i
.types
[0] = regxmm
;
7404 i
.tm
.operand_types
[0] = regxmm
;
7407 i
.reg_operands
+= 2;
7412 i
.op
[dupl
] = i
.op
[dest
];
7413 i
.types
[dupl
] = i
.types
[dest
];
7414 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7415 i
.flags
[dupl
] = i
.flags
[dest
];
7424 i
.op
[dupl
] = i
.op
[dest
];
7425 i
.types
[dupl
] = i
.types
[dest
];
7426 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7427 i
.flags
[dupl
] = i
.flags
[dest
];
7430 if (i
.tm
.opcode_modifier
.immext
)
7433 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7434 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7438 for (j
= 1; j
< i
.operands
; j
++)
7440 i
.op
[j
- 1] = i
.op
[j
];
7441 i
.types
[j
- 1] = i
.types
[j
];
7443 /* We need to adjust fields in i.tm since they are used by
7444 build_modrm_byte. */
7445 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7447 i
.flags
[j
- 1] = i
.flags
[j
];
7454 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
7456 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
7458 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7459 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
7460 regnum
= register_number (i
.op
[1].regs
);
7461 first_reg_in_group
= regnum
& ~3;
7462 last_reg_in_group
= first_reg_in_group
+ 3;
7463 if (regnum
!= first_reg_in_group
)
7464 as_warn (_("source register `%s%s' implicitly denotes"
7465 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7466 register_prefix
, i
.op
[1].regs
->reg_name
,
7467 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7468 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7471 else if (i
.tm
.opcode_modifier
.regkludge
)
7473 /* The imul $imm, %reg instruction is converted into
7474 imul $imm, %reg, %reg, and the clr %reg instruction
7475 is converted into xor %reg, %reg. */
7477 unsigned int first_reg_op
;
7479 if (operand_type_check (i
.types
[0], reg
))
7483 /* Pretend we saw the extra register operand. */
7484 gas_assert (i
.reg_operands
== 1
7485 && i
.op
[first_reg_op
+ 1].regs
== 0);
7486 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7487 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7492 if (i
.tm
.opcode_modifier
.modrm
)
7494 /* The opcode is completed (modulo i.tm.extension_opcode which
7495 must be put into the modrm byte). Now, we make the modrm and
7496 index base bytes based on all the info we've collected. */
7498 default_seg
= build_modrm_byte ();
7500 else if (i
.types
[0].bitfield
.class == SReg
)
7502 if (flag_code
!= CODE_64BIT
7503 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7504 && i
.op
[0].regs
->reg_num
== 1
7505 : (i
.tm
.base_opcode
| 1) == POP_SEG386_SHORT
7506 && i
.op
[0].regs
->reg_num
< 4)
7508 as_bad (_("you can't `%s %s%s'"),
7509 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7512 if ( i
.op
[0].regs
->reg_num
> 3 && i
.tm
.opcode_length
== 1 )
7514 i
.tm
.base_opcode
^= POP_SEG_SHORT
^ POP_SEG386_SHORT
;
7515 i
.tm
.opcode_length
= 2;
7517 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7519 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
7523 else if (i
.tm
.opcode_modifier
.isstring
)
7525 /* For the string instructions that allow a segment override
7526 on one of their operands, the default segment is ds. */
7529 else if (i
.short_form
)
7531 /* The register or float register operand is in operand
7533 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
7535 /* Register goes in low 3 bits of opcode. */
7536 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
7537 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7539 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
7541 /* Warn about some common errors, but press on regardless.
7542 The first case can be generated by gcc (<= 2.8.1). */
7543 if (i
.operands
== 2)
7545 /* Reversed arguments on faddp, fsubp, etc. */
7546 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
7547 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
7548 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
7552 /* Extraneous `l' suffix on fp insn. */
7553 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
7554 register_prefix
, i
.op
[0].regs
->reg_name
);
7559 if ((i
.seg
[0] || i
.prefix
[SEG_PREFIX
])
7560 && i
.tm
.base_opcode
== 0x8d /* lea */
7561 && !is_any_vex_encoding(&i
.tm
))
7563 if (!quiet_warnings
)
7564 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
7568 i
.prefix
[SEG_PREFIX
] = 0;
7572 /* If a segment was explicitly specified, and the specified segment
7573 is neither the default nor the one already recorded from a prefix,
7574 use an opcode prefix to select it. If we never figured out what
7575 the default segment is, then default_seg will be zero at this
7576 point, and the specified segment prefix will always be used. */
7578 && i
.seg
[0] != default_seg
7579 && i
.seg
[0]->seg_prefix
!= i
.prefix
[SEG_PREFIX
])
7581 if (!add_prefix (i
.seg
[0]->seg_prefix
))
7587 static const seg_entry
*
7588 build_modrm_byte (void)
7590 const seg_entry
*default_seg
= 0;
7591 unsigned int source
, dest
;
7594 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
7597 unsigned int nds
, reg_slot
;
7600 dest
= i
.operands
- 1;
7603 /* There are 2 kinds of instructions:
7604 1. 5 operands: 4 register operands or 3 register operands
7605 plus 1 memory operand plus one Imm4 operand, VexXDS, and
7606 VexW0 or VexW1. The destination must be either XMM, YMM or
7608 2. 4 operands: 4 register operands or 3 register operands
7609 plus 1 memory operand, with VexXDS. */
7610 gas_assert ((i
.reg_operands
== 4
7611 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
7612 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7613 && i
.tm
.opcode_modifier
.vexw
7614 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
7616 /* If VexW1 is set, the first non-immediate operand is the source and
7617 the second non-immediate one is encoded in the immediate operand. */
7618 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
7620 source
= i
.imm_operands
;
7621 reg_slot
= i
.imm_operands
+ 1;
7625 source
= i
.imm_operands
+ 1;
7626 reg_slot
= i
.imm_operands
;
7629 if (i
.imm_operands
== 0)
7631 /* When there is no immediate operand, generate an 8bit
7632 immediate operand to encode the first operand. */
7633 exp
= &im_expressions
[i
.imm_operands
++];
7634 i
.op
[i
.operands
].imms
= exp
;
7635 i
.types
[i
.operands
] = imm8
;
7638 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7639 exp
->X_op
= O_constant
;
7640 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
7641 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7645 gas_assert (i
.imm_operands
== 1);
7646 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
7647 gas_assert (!i
.tm
.opcode_modifier
.immext
);
7649 /* Turn on Imm8 again so that output_imm will generate it. */
7650 i
.types
[0].bitfield
.imm8
= 1;
7652 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7653 i
.op
[0].imms
->X_add_number
7654 |= register_number (i
.op
[reg_slot
].regs
) << 4;
7655 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7658 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
7659 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
7664 /* i.reg_operands MUST be the number of real register operands;
7665 implicit registers do not count. If there are 3 register
7666 operands, it must be a instruction with VexNDS. For a
7667 instruction with VexNDD, the destination register is encoded
7668 in VEX prefix. If there are 4 register operands, it must be
7669 a instruction with VEX prefix and 3 sources. */
7670 if (i
.mem_operands
== 0
7671 && ((i
.reg_operands
== 2
7672 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
7673 || (i
.reg_operands
== 3
7674 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7675 || (i
.reg_operands
== 4 && vex_3_sources
)))
7683 /* When there are 3 operands, one of them may be immediate,
7684 which may be the first or the last operand. Otherwise,
7685 the first operand must be shift count register (cl) or it
7686 is an instruction with VexNDS. */
7687 gas_assert (i
.imm_operands
== 1
7688 || (i
.imm_operands
== 0
7689 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7690 || (i
.types
[0].bitfield
.instance
== RegC
7691 && i
.types
[0].bitfield
.byte
))));
7692 if (operand_type_check (i
.types
[0], imm
)
7693 || (i
.types
[0].bitfield
.instance
== RegC
7694 && i
.types
[0].bitfield
.byte
))
7700 /* When there are 4 operands, the first two must be 8bit
7701 immediate operands. The source operand will be the 3rd
7704 For instructions with VexNDS, if the first operand
7705 an imm8, the source operand is the 2nd one. If the last
7706 operand is imm8, the source operand is the first one. */
7707 gas_assert ((i
.imm_operands
== 2
7708 && i
.types
[0].bitfield
.imm8
7709 && i
.types
[1].bitfield
.imm8
)
7710 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7711 && i
.imm_operands
== 1
7712 && (i
.types
[0].bitfield
.imm8
7713 || i
.types
[i
.operands
- 1].bitfield
.imm8
7715 if (i
.imm_operands
== 2)
7719 if (i
.types
[0].bitfield
.imm8
)
7726 if (is_evex_encoding (&i
.tm
))
7728 /* For EVEX instructions, when there are 5 operands, the
7729 first one must be immediate operand. If the second one
7730 is immediate operand, the source operand is the 3th
7731 one. If the last one is immediate operand, the source
7732 operand is the 2nd one. */
7733 gas_assert (i
.imm_operands
== 2
7734 && i
.tm
.opcode_modifier
.sae
7735 && operand_type_check (i
.types
[0], imm
));
7736 if (operand_type_check (i
.types
[1], imm
))
7738 else if (operand_type_check (i
.types
[4], imm
))
7752 /* RC/SAE operand could be between DEST and SRC. That happens
7753 when one operand is GPR and the other one is XMM/YMM/ZMM
7755 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
7758 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7760 /* For instructions with VexNDS, the register-only source
7761 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
7762 register. It is encoded in VEX prefix. */
7764 i386_operand_type op
;
7767 /* Check register-only source operand when two source
7768 operands are swapped. */
7769 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
7770 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
7778 op
= i
.tm
.operand_types
[vvvv
];
7779 if ((dest
+ 1) >= i
.operands
7780 || ((op
.bitfield
.class != Reg
7781 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
7782 && op
.bitfield
.class != RegSIMD
7783 && !operand_type_equal (&op
, ®mask
)))
7785 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
7791 /* One of the register operands will be encoded in the i.rm.reg
7792 field, the other in the combined i.rm.mode and i.rm.regmem
7793 fields. If no form of this instruction supports a memory
7794 destination operand, then we assume the source operand may
7795 sometimes be a memory operand and so we need to store the
7796 destination in the i.rm.reg field. */
7797 if (!i
.tm
.opcode_modifier
.regmem
7798 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
7800 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
7801 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
7802 if (i
.op
[dest
].regs
->reg_type
.bitfield
.class == RegMMX
7803 || i
.op
[source
].regs
->reg_type
.bitfield
.class == RegMMX
)
7804 i
.has_regmmx
= TRUE
;
7805 else if (i
.op
[dest
].regs
->reg_type
.bitfield
.class == RegSIMD
7806 || i
.op
[source
].regs
->reg_type
.bitfield
.class == RegSIMD
)
7808 if (i
.types
[dest
].bitfield
.zmmword
7809 || i
.types
[source
].bitfield
.zmmword
)
7810 i
.has_regzmm
= TRUE
;
7811 else if (i
.types
[dest
].bitfield
.ymmword
7812 || i
.types
[source
].bitfield
.ymmword
)
7813 i
.has_regymm
= TRUE
;
7815 i
.has_regxmm
= TRUE
;
7817 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7819 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7821 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7823 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7828 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
7829 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
7830 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7832 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7834 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7836 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7839 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
7841 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
7844 add_prefix (LOCK_PREFIX_OPCODE
);
7848 { /* If it's not 2 reg operands... */
7853 unsigned int fake_zero_displacement
= 0;
7856 for (op
= 0; op
< i
.operands
; op
++)
7857 if (i
.flags
[op
] & Operand_Mem
)
7859 gas_assert (op
< i
.operands
);
7861 if (i
.tm
.opcode_modifier
.vecsib
)
7863 if (i
.index_reg
->reg_num
== RegIZ
)
7866 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7869 i
.sib
.base
= NO_BASE_REGISTER
;
7870 i
.sib
.scale
= i
.log2_scale_factor
;
7871 i
.types
[op
].bitfield
.disp8
= 0;
7872 i
.types
[op
].bitfield
.disp16
= 0;
7873 i
.types
[op
].bitfield
.disp64
= 0;
7874 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7876 /* Must be 32 bit */
7877 i
.types
[op
].bitfield
.disp32
= 1;
7878 i
.types
[op
].bitfield
.disp32s
= 0;
7882 i
.types
[op
].bitfield
.disp32
= 0;
7883 i
.types
[op
].bitfield
.disp32s
= 1;
7886 i
.sib
.index
= i
.index_reg
->reg_num
;
7887 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7889 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
7895 if (i
.base_reg
== 0)
7898 if (!i
.disp_operands
)
7899 fake_zero_displacement
= 1;
7900 if (i
.index_reg
== 0)
7902 i386_operand_type newdisp
;
7904 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7905 /* Operand is just <disp> */
7906 if (flag_code
== CODE_64BIT
)
7908 /* 64bit mode overwrites the 32bit absolute
7909 addressing by RIP relative addressing and
7910 absolute addressing is encoded by one of the
7911 redundant SIB forms. */
7912 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7913 i
.sib
.base
= NO_BASE_REGISTER
;
7914 i
.sib
.index
= NO_INDEX_REGISTER
;
7915 newdisp
= (!i
.prefix
[ADDR_PREFIX
] ? disp32s
: disp32
);
7917 else if ((flag_code
== CODE_16BIT
)
7918 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
7920 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
7925 i
.rm
.regmem
= NO_BASE_REGISTER
;
7928 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
7929 i
.types
[op
] = operand_type_or (i
.types
[op
], newdisp
);
7931 else if (!i
.tm
.opcode_modifier
.vecsib
)
7933 /* !i.base_reg && i.index_reg */
7934 if (i
.index_reg
->reg_num
== RegIZ
)
7935 i
.sib
.index
= NO_INDEX_REGISTER
;
7937 i
.sib
.index
= i
.index_reg
->reg_num
;
7938 i
.sib
.base
= NO_BASE_REGISTER
;
7939 i
.sib
.scale
= i
.log2_scale_factor
;
7940 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7941 i
.types
[op
].bitfield
.disp8
= 0;
7942 i
.types
[op
].bitfield
.disp16
= 0;
7943 i
.types
[op
].bitfield
.disp64
= 0;
7944 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7946 /* Must be 32 bit */
7947 i
.types
[op
].bitfield
.disp32
= 1;
7948 i
.types
[op
].bitfield
.disp32s
= 0;
7952 i
.types
[op
].bitfield
.disp32
= 0;
7953 i
.types
[op
].bitfield
.disp32s
= 1;
7955 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7959 /* RIP addressing for 64bit mode. */
7960 else if (i
.base_reg
->reg_num
== RegIP
)
7962 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7963 i
.rm
.regmem
= NO_BASE_REGISTER
;
7964 i
.types
[op
].bitfield
.disp8
= 0;
7965 i
.types
[op
].bitfield
.disp16
= 0;
7966 i
.types
[op
].bitfield
.disp32
= 0;
7967 i
.types
[op
].bitfield
.disp32s
= 1;
7968 i
.types
[op
].bitfield
.disp64
= 0;
7969 i
.flags
[op
] |= Operand_PCrel
;
7970 if (! i
.disp_operands
)
7971 fake_zero_displacement
= 1;
7973 else if (i
.base_reg
->reg_type
.bitfield
.word
)
7975 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7976 switch (i
.base_reg
->reg_num
)
7979 if (i
.index_reg
== 0)
7981 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
7982 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
7986 if (i
.index_reg
== 0)
7989 if (operand_type_check (i
.types
[op
], disp
) == 0)
7991 /* fake (%bp) into 0(%bp) */
7992 i
.types
[op
].bitfield
.disp8
= 1;
7993 fake_zero_displacement
= 1;
7996 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
7997 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
7999 default: /* (%si) -> 4 or (%di) -> 5 */
8000 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
8002 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8004 else /* i.base_reg and 32/64 bit mode */
8006 if (flag_code
== CODE_64BIT
8007 && operand_type_check (i
.types
[op
], disp
))
8009 i
.types
[op
].bitfield
.disp16
= 0;
8010 i
.types
[op
].bitfield
.disp64
= 0;
8011 if (i
.prefix
[ADDR_PREFIX
] == 0)
8013 i
.types
[op
].bitfield
.disp32
= 0;
8014 i
.types
[op
].bitfield
.disp32s
= 1;
8018 i
.types
[op
].bitfield
.disp32
= 1;
8019 i
.types
[op
].bitfield
.disp32s
= 0;
8023 if (!i
.tm
.opcode_modifier
.vecsib
)
8024 i
.rm
.regmem
= i
.base_reg
->reg_num
;
8025 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
8027 i
.sib
.base
= i
.base_reg
->reg_num
;
8028 /* x86-64 ignores REX prefix bit here to avoid decoder
8030 if (!(i
.base_reg
->reg_flags
& RegRex
)
8031 && (i
.base_reg
->reg_num
== EBP_REG_NUM
8032 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
8034 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
8036 fake_zero_displacement
= 1;
8037 i
.types
[op
].bitfield
.disp8
= 1;
8039 i
.sib
.scale
= i
.log2_scale_factor
;
8040 if (i
.index_reg
== 0)
8042 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
8043 /* <disp>(%esp) becomes two byte modrm with no index
8044 register. We've already stored the code for esp
8045 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8046 Any base register besides %esp will not use the
8047 extra modrm byte. */
8048 i
.sib
.index
= NO_INDEX_REGISTER
;
8050 else if (!i
.tm
.opcode_modifier
.vecsib
)
8052 if (i
.index_reg
->reg_num
== RegIZ
)
8053 i
.sib
.index
= NO_INDEX_REGISTER
;
8055 i
.sib
.index
= i
.index_reg
->reg_num
;
8056 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8057 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8062 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
8063 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
8067 if (!fake_zero_displacement
8071 fake_zero_displacement
= 1;
8072 if (i
.disp_encoding
== disp_encoding_8bit
)
8073 i
.types
[op
].bitfield
.disp8
= 1;
8075 i
.types
[op
].bitfield
.disp32
= 1;
8077 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8081 if (fake_zero_displacement
)
8083 /* Fakes a zero displacement assuming that i.types[op]
8084 holds the correct displacement size. */
8087 gas_assert (i
.op
[op
].disps
== 0);
8088 exp
= &disp_expressions
[i
.disp_operands
++];
8089 i
.op
[op
].disps
= exp
;
8090 exp
->X_op
= O_constant
;
8091 exp
->X_add_number
= 0;
8092 exp
->X_add_symbol
= (symbolS
*) 0;
8093 exp
->X_op_symbol
= (symbolS
*) 0;
8101 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
8103 if (operand_type_check (i
.types
[0], imm
))
8104 i
.vex
.register_specifier
= NULL
;
8107 /* VEX.vvvv encodes one of the sources when the first
8108 operand is not an immediate. */
8109 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8110 i
.vex
.register_specifier
= i
.op
[0].regs
;
8112 i
.vex
.register_specifier
= i
.op
[1].regs
;
8115 /* Destination is a XMM register encoded in the ModRM.reg
8117 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
8118 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
8121 /* ModRM.rm and VEX.B encodes the other source. */
8122 if (!i
.mem_operands
)
8126 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8127 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8129 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
8131 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8135 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
8137 i
.vex
.register_specifier
= i
.op
[2].regs
;
8138 if (!i
.mem_operands
)
8141 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8142 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8146 /* Fill in i.rm.reg or i.rm.regmem field with register operand
8147 (if any) based on i.tm.extension_opcode. Again, we must be
8148 careful to make sure that segment/control/debug/test/MMX
8149 registers are coded into the i.rm.reg field. */
8150 else if (i
.reg_operands
)
8153 unsigned int vex_reg
= ~0;
8155 for (op
= 0; op
< i
.operands
; op
++)
8157 if (i
.types
[op
].bitfield
.class == Reg
8158 || i
.types
[op
].bitfield
.class == RegBND
8159 || i
.types
[op
].bitfield
.class == RegMask
8160 || i
.types
[op
].bitfield
.class == SReg
8161 || i
.types
[op
].bitfield
.class == RegCR
8162 || i
.types
[op
].bitfield
.class == RegDR
8163 || i
.types
[op
].bitfield
.class == RegTR
)
8165 if (i
.types
[op
].bitfield
.class == RegSIMD
)
8167 if (i
.types
[op
].bitfield
.zmmword
)
8168 i
.has_regzmm
= TRUE
;
8169 else if (i
.types
[op
].bitfield
.ymmword
)
8170 i
.has_regymm
= TRUE
;
8172 i
.has_regxmm
= TRUE
;
8175 if (i
.types
[op
].bitfield
.class == RegMMX
)
8177 i
.has_regmmx
= TRUE
;
8184 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8186 /* For instructions with VexNDS, the register-only
8187 source operand is encoded in VEX prefix. */
8188 gas_assert (mem
!= (unsigned int) ~0);
8193 gas_assert (op
< i
.operands
);
8197 /* Check register-only source operand when two source
8198 operands are swapped. */
8199 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
8200 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
8204 gas_assert (mem
== (vex_reg
+ 1)
8205 && op
< i
.operands
);
8210 gas_assert (vex_reg
< i
.operands
);
8214 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
8216 /* For instructions with VexNDD, the register destination
8217 is encoded in VEX prefix. */
8218 if (i
.mem_operands
== 0)
8220 /* There is no memory operand. */
8221 gas_assert ((op
+ 2) == i
.operands
);
8226 /* There are only 2 non-immediate operands. */
8227 gas_assert (op
< i
.imm_operands
+ 2
8228 && i
.operands
== i
.imm_operands
+ 2);
8229 vex_reg
= i
.imm_operands
+ 1;
8233 gas_assert (op
< i
.operands
);
8235 if (vex_reg
!= (unsigned int) ~0)
8237 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
8239 if ((type
->bitfield
.class != Reg
8240 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
8241 && type
->bitfield
.class != RegSIMD
8242 && !operand_type_equal (type
, ®mask
))
8245 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
8248 /* Don't set OP operand twice. */
8251 /* If there is an extension opcode to put here, the
8252 register number must be put into the regmem field. */
8253 if (i
.tm
.extension_opcode
!= None
)
8255 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
8256 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
8258 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
8263 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
8264 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
8266 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
8271 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
8272 must set it to 3 to indicate this is a register operand
8273 in the regmem field. */
8274 if (!i
.mem_operands
)
8278 /* Fill in i.rm.reg field with extension opcode (if any). */
8279 if (i
.tm
.extension_opcode
!= None
)
8280 i
.rm
.reg
= i
.tm
.extension_opcode
;
8286 flip_code16 (unsigned int code16
)
8288 gas_assert (i
.tm
.operands
== 1);
8290 return !(i
.prefix
[REX_PREFIX
] & REX_W
)
8291 && (code16
? i
.tm
.operand_types
[0].bitfield
.disp32
8292 || i
.tm
.operand_types
[0].bitfield
.disp32s
8293 : i
.tm
.operand_types
[0].bitfield
.disp16
)
8298 output_branch (void)
8304 relax_substateT subtype
;
8308 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
8309 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
8312 if (i
.prefix
[DATA_PREFIX
] != 0)
8316 code16
^= flip_code16(code16
);
8318 /* Pentium4 branch hints. */
8319 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8320 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8325 if (i
.prefix
[REX_PREFIX
] != 0)
8331 /* BND prefixed jump. */
8332 if (i
.prefix
[BND_PREFIX
] != 0)
8338 if (i
.prefixes
!= 0)
8339 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8341 /* It's always a symbol; End frag & setup for relax.
8342 Make sure there is enough room in this frag for the largest
8343 instruction we may generate in md_convert_frag. This is 2
8344 bytes for the opcode and room for the prefix and largest
8346 frag_grow (prefix
+ 2 + 4);
8347 /* Prefix and 1 opcode byte go in fr_fix. */
8348 p
= frag_more (prefix
+ 1);
8349 if (i
.prefix
[DATA_PREFIX
] != 0)
8350 *p
++ = DATA_PREFIX_OPCODE
;
8351 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
8352 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
8353 *p
++ = i
.prefix
[SEG_PREFIX
];
8354 if (i
.prefix
[BND_PREFIX
] != 0)
8355 *p
++ = BND_PREFIX_OPCODE
;
8356 if (i
.prefix
[REX_PREFIX
] != 0)
8357 *p
++ = i
.prefix
[REX_PREFIX
];
8358 *p
= i
.tm
.base_opcode
;
8360 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
8361 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
8362 else if (cpu_arch_flags
.bitfield
.cpui386
)
8363 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
8365 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
8368 sym
= i
.op
[0].disps
->X_add_symbol
;
8369 off
= i
.op
[0].disps
->X_add_number
;
8371 if (i
.op
[0].disps
->X_op
!= O_constant
8372 && i
.op
[0].disps
->X_op
!= O_symbol
)
8374 /* Handle complex expressions. */
8375 sym
= make_expr_symbol (i
.op
[0].disps
);
8379 /* 1 possible extra opcode + 4 byte displacement go in var part.
8380 Pass reloc in fr_var. */
8381 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
8384 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8385 /* Return TRUE iff PLT32 relocation should be used for branching to
8389 need_plt32_p (symbolS
*s
)
8391 /* PLT32 relocation is ELF only. */
8396 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8397 krtld support it. */
8401 /* Since there is no need to prepare for PLT branch on x86-64, we
8402 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8403 be used as a marker for 32-bit PC-relative branches. */
8407 /* Weak or undefined symbol need PLT32 relocation. */
8408 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
8411 /* Non-global symbol doesn't need PLT32 relocation. */
8412 if (! S_IS_EXTERNAL (s
))
8415 /* Other global symbols need PLT32 relocation. NB: Symbol with
8416 non-default visibilities are treated as normal global symbol
8417 so that PLT32 relocation can be used as a marker for 32-bit
8418 PC-relative branches. It is useful for linker relaxation. */
8429 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
8431 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)
8433 /* This is a loop or jecxz type instruction. */
8435 if (i
.prefix
[ADDR_PREFIX
] != 0)
8437 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
8440 /* Pentium4 branch hints. */
8441 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8442 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8444 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
8453 if (flag_code
== CODE_16BIT
)
8456 if (i
.prefix
[DATA_PREFIX
] != 0)
8458 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
8460 code16
^= flip_code16(code16
);
8468 /* BND prefixed jump. */
8469 if (i
.prefix
[BND_PREFIX
] != 0)
8471 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
8475 if (i
.prefix
[REX_PREFIX
] != 0)
8477 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
8481 if (i
.prefixes
!= 0)
8482 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8484 p
= frag_more (i
.tm
.opcode_length
+ size
);
8485 switch (i
.tm
.opcode_length
)
8488 *p
++ = i
.tm
.base_opcode
>> 8;
8491 *p
++ = i
.tm
.base_opcode
;
8497 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8499 && jump_reloc
== NO_RELOC
8500 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8501 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8504 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8506 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8507 i
.op
[0].disps
, 1, jump_reloc
);
8509 /* All jumps handled here are signed, but don't use a signed limit
8510 check for 32 and 16 bit jumps as we want to allow wrap around at
8511 4G and 64k respectively. */
8513 fixP
->fx_signed
= 1;
8517 output_interseg_jump (void)
8525 if (flag_code
== CODE_16BIT
)
8529 if (i
.prefix
[DATA_PREFIX
] != 0)
8536 gas_assert (!i
.prefix
[REX_PREFIX
]);
8542 if (i
.prefixes
!= 0)
8543 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8545 /* 1 opcode; 2 segment; offset */
8546 p
= frag_more (prefix
+ 1 + 2 + size
);
8548 if (i
.prefix
[DATA_PREFIX
] != 0)
8549 *p
++ = DATA_PREFIX_OPCODE
;
8551 if (i
.prefix
[REX_PREFIX
] != 0)
8552 *p
++ = i
.prefix
[REX_PREFIX
];
8554 *p
++ = i
.tm
.base_opcode
;
8555 if (i
.op
[1].imms
->X_op
== O_constant
)
8557 offsetT n
= i
.op
[1].imms
->X_add_number
;
8560 && !fits_in_unsigned_word (n
)
8561 && !fits_in_signed_word (n
))
8563 as_bad (_("16-bit jump out of range"));
8566 md_number_to_chars (p
, n
, size
);
8569 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8570 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
8571 if (i
.op
[0].imms
->X_op
!= O_constant
)
8572 as_bad (_("can't handle non absolute segment in `%s'"),
8574 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
8577 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8582 asection
*seg
= now_seg
;
8583 subsegT subseg
= now_subseg
;
8585 unsigned int alignment
, align_size_1
;
8586 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
8587 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
8588 unsigned int padding
;
8590 if (!IS_ELF
|| !x86_used_note
)
8593 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
8595 /* The .note.gnu.property section layout:
8597 Field Length Contents
8600 n_descsz 4 The note descriptor size
8601 n_type 4 NT_GNU_PROPERTY_TYPE_0
8603 n_desc n_descsz The program property array
8607 /* Create the .note.gnu.property section. */
8608 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
8609 bfd_set_section_flags (sec
,
8616 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
8627 bfd_set_section_alignment (sec
, alignment
);
8628 elf_section_type (sec
) = SHT_NOTE
;
8630 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
8632 isa_1_descsz_raw
= 4 + 4 + 4;
8633 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
8634 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
8636 feature_2_descsz_raw
= isa_1_descsz
;
8637 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
8639 feature_2_descsz_raw
+= 4 + 4 + 4;
8640 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
8641 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
8644 descsz
= feature_2_descsz
;
8645 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
8646 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
8648 /* Write n_namsz. */
8649 md_number_to_chars (p
, (valueT
) 4, 4);
8651 /* Write n_descsz. */
8652 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
8655 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
8658 memcpy (p
+ 4 * 3, "GNU", 4);
8660 /* Write 4-byte type. */
8661 md_number_to_chars (p
+ 4 * 4,
8662 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
8664 /* Write 4-byte data size. */
8665 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
8667 /* Write 4-byte data. */
8668 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
8670 /* Zero out paddings. */
8671 padding
= isa_1_descsz
- isa_1_descsz_raw
;
8673 memset (p
+ 4 * 7, 0, padding
);
8675 /* Write 4-byte type. */
8676 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
8677 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
8679 /* Write 4-byte data size. */
8680 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
8682 /* Write 4-byte data. */
8683 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
8684 (valueT
) x86_feature_2_used
, 4);
8686 /* Zero out paddings. */
8687 padding
= feature_2_descsz
- feature_2_descsz_raw
;
8689 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
8691 /* We probably can't restore the current segment, for there likely
8694 subseg_set (seg
, subseg
);
8699 encoding_length (const fragS
*start_frag
, offsetT start_off
,
8700 const char *frag_now_ptr
)
8702 unsigned int len
= 0;
8704 if (start_frag
!= frag_now
)
8706 const fragS
*fr
= start_frag
;
8711 } while (fr
&& fr
!= frag_now
);
8714 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
8717 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
8718 be macro-fused with conditional jumps.
8719 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
8720 or is one of the following format:
8733 maybe_fused_with_jcc_p (enum mf_cmp_kind
* mf_cmp_p
)
8735 /* No RIP address. */
8736 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
8739 /* No VEX/EVEX encoding. */
8740 if (is_any_vex_encoding (&i
.tm
))
8743 /* add, sub without add/sub m, imm. */
8744 if (i
.tm
.base_opcode
<= 5
8745 || (i
.tm
.base_opcode
>= 0x28 && i
.tm
.base_opcode
<= 0x2d)
8746 || ((i
.tm
.base_opcode
| 3) == 0x83
8747 && (i
.tm
.extension_opcode
== 0x5
8748 || i
.tm
.extension_opcode
== 0x0)))
8750 *mf_cmp_p
= mf_cmp_alu_cmp
;
8751 return !(i
.mem_operands
&& i
.imm_operands
);
8754 /* and without and m, imm. */
8755 if ((i
.tm
.base_opcode
>= 0x20 && i
.tm
.base_opcode
<= 0x25)
8756 || ((i
.tm
.base_opcode
| 3) == 0x83
8757 && i
.tm
.extension_opcode
== 0x4))
8759 *mf_cmp_p
= mf_cmp_test_and
;
8760 return !(i
.mem_operands
&& i
.imm_operands
);
8763 /* test without test m imm. */
8764 if ((i
.tm
.base_opcode
| 1) == 0x85
8765 || (i
.tm
.base_opcode
| 1) == 0xa9
8766 || ((i
.tm
.base_opcode
| 1) == 0xf7
8767 && i
.tm
.extension_opcode
== 0))
8769 *mf_cmp_p
= mf_cmp_test_and
;
8770 return !(i
.mem_operands
&& i
.imm_operands
);
8773 /* cmp without cmp m, imm. */
8774 if ((i
.tm
.base_opcode
>= 0x38 && i
.tm
.base_opcode
<= 0x3d)
8775 || ((i
.tm
.base_opcode
| 3) == 0x83
8776 && (i
.tm
.extension_opcode
== 0x7)))
8778 *mf_cmp_p
= mf_cmp_alu_cmp
;
8779 return !(i
.mem_operands
&& i
.imm_operands
);
8782 /* inc, dec without inc/dec m. */
8783 if ((i
.tm
.cpu_flags
.bitfield
.cpuno64
8784 && (i
.tm
.base_opcode
| 0xf) == 0x4f)
8785 || ((i
.tm
.base_opcode
| 1) == 0xff
8786 && i
.tm
.extension_opcode
<= 0x1))
8788 *mf_cmp_p
= mf_cmp_incdec
;
8789 return !i
.mem_operands
;
8795 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
8798 add_fused_jcc_padding_frag_p (enum mf_cmp_kind
* mf_cmp_p
)
8800 /* NB: Don't work with COND_JUMP86 without i386. */
8801 if (!align_branch_power
8802 || now_seg
== absolute_section
8803 || !cpu_arch_flags
.bitfield
.cpui386
8804 || !(align_branch
& align_branch_fused_bit
))
8807 if (maybe_fused_with_jcc_p (mf_cmp_p
))
8809 if (last_insn
.kind
== last_insn_other
8810 || last_insn
.seg
!= now_seg
)
8813 as_warn_where (last_insn
.file
, last_insn
.line
,
8814 _("`%s` skips -malign-branch-boundary on `%s`"),
8815 last_insn
.name
, i
.tm
.name
);
8821 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
8824 add_branch_prefix_frag_p (void)
8826 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
8827 to PadLock instructions since they include prefixes in opcode. */
8828 if (!align_branch_power
8829 || !align_branch_prefix_size
8830 || now_seg
== absolute_section
8831 || i
.tm
.cpu_flags
.bitfield
.cpupadlock
8832 || !cpu_arch_flags
.bitfield
.cpui386
)
8835 /* Don't add prefix if it is a prefix or there is no operand in case
8836 that segment prefix is special. */
8837 if (!i
.operands
|| i
.tm
.opcode_modifier
.isprefix
)
8840 if (last_insn
.kind
== last_insn_other
8841 || last_insn
.seg
!= now_seg
)
8845 as_warn_where (last_insn
.file
, last_insn
.line
,
8846 _("`%s` skips -malign-branch-boundary on `%s`"),
8847 last_insn
.name
, i
.tm
.name
);
8852 /* Return 1 if a BRANCH_PADDING frag should be generated. */
8855 add_branch_padding_frag_p (enum align_branch_kind
*branch_p
,
8856 enum mf_jcc_kind
*mf_jcc_p
)
8860 /* NB: Don't work with COND_JUMP86 without i386. */
8861 if (!align_branch_power
8862 || now_seg
== absolute_section
8863 || !cpu_arch_flags
.bitfield
.cpui386
)
8868 /* Check for jcc and direct jmp. */
8869 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
8871 if (i
.tm
.base_opcode
== JUMP_PC_RELATIVE
)
8873 *branch_p
= align_branch_jmp
;
8874 add_padding
= align_branch
& align_branch_jmp_bit
;
8878 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
8879 igore the lowest bit. */
8880 *mf_jcc_p
= (i
.tm
.base_opcode
& 0x0e) >> 1;
8881 *branch_p
= align_branch_jcc
;
8882 if ((align_branch
& align_branch_jcc_bit
))
8886 else if (is_any_vex_encoding (&i
.tm
))
8888 else if ((i
.tm
.base_opcode
| 1) == 0xc3)
8891 *branch_p
= align_branch_ret
;
8892 if ((align_branch
& align_branch_ret_bit
))
8897 /* Check for indirect jmp, direct and indirect calls. */
8898 if (i
.tm
.base_opcode
== 0xe8)
8901 *branch_p
= align_branch_call
;
8902 if ((align_branch
& align_branch_call_bit
))
8905 else if (i
.tm
.base_opcode
== 0xff
8906 && (i
.tm
.extension_opcode
== 2
8907 || i
.tm
.extension_opcode
== 4))
8909 /* Indirect call and jmp. */
8910 *branch_p
= align_branch_indirect
;
8911 if ((align_branch
& align_branch_indirect_bit
))
8918 && (i
.op
[0].disps
->X_op
== O_symbol
8919 || (i
.op
[0].disps
->X_op
== O_subtract
8920 && i
.op
[0].disps
->X_op_symbol
== GOT_symbol
)))
8922 symbolS
*s
= i
.op
[0].disps
->X_add_symbol
;
8923 /* No padding to call to global or undefined tls_get_addr. */
8924 if ((S_IS_EXTERNAL (s
) || !S_IS_DEFINED (s
))
8925 && strcmp (S_GET_NAME (s
), tls_get_addr
) == 0)
8931 && last_insn
.kind
!= last_insn_other
8932 && last_insn
.seg
== now_seg
)
8935 as_warn_where (last_insn
.file
, last_insn
.line
,
8936 _("`%s` skips -malign-branch-boundary on `%s`"),
8937 last_insn
.name
, i
.tm
.name
);
8947 fragS
*insn_start_frag
;
8948 offsetT insn_start_off
;
8949 fragS
*fragP
= NULL
;
8950 enum align_branch_kind branch
= align_branch_none
;
8951 /* The initializer is arbitrary just to avoid uninitialized error.
8952 it's actually either assigned in add_branch_padding_frag_p
8953 or never be used. */
8954 enum mf_jcc_kind mf_jcc
= mf_jcc_jo
;
8956 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8957 if (IS_ELF
&& x86_used_note
)
8959 if (i
.tm
.cpu_flags
.bitfield
.cpucmov
)
8960 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_CMOV
;
8961 if (i
.tm
.cpu_flags
.bitfield
.cpusse
)
8962 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE
;
8963 if (i
.tm
.cpu_flags
.bitfield
.cpusse2
)
8964 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE2
;
8965 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
)
8966 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE3
;
8967 if (i
.tm
.cpu_flags
.bitfield
.cpussse3
)
8968 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSSE3
;
8969 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_1
)
8970 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_1
;
8971 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_2
)
8972 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_2
;
8973 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
)
8974 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX
;
8975 if (i
.tm
.cpu_flags
.bitfield
.cpuavx2
)
8976 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX2
;
8977 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
8978 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_FMA
;
8979 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
)
8980 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512F
;
8981 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512cd
)
8982 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512CD
;
8983 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512er
)
8984 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512ER
;
8985 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
)
8986 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512PF
;
8987 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
)
8988 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512VL
;
8989 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
)
8990 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512DQ
;
8991 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
)
8992 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512BW
;
8993 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4fmaps
)
8994 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4FMAPS
;
8995 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
)
8996 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4VNNIW
;
8997 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bitalg
)
8998 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BITALG
;
8999 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512ifma
)
9000 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_IFMA
;
9001 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vbmi
)
9002 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI
;
9003 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vbmi2
)
9004 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI2
;
9005 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vnni
)
9006 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VNNI
;
9007 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bf16
)
9008 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BF16
;
9010 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
9011 || i
.tm
.cpu_flags
.bitfield
.cpu287
9012 || i
.tm
.cpu_flags
.bitfield
.cpu387
9013 || i
.tm
.cpu_flags
.bitfield
.cpu687
9014 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
9015 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
9017 || i
.tm
.base_opcode
== 0xf77 /* emms */
9018 || i
.tm
.base_opcode
== 0xf0e /* femms */
9019 || i
.tm
.base_opcode
== 0xf2a /* cvtpi2ps */
9020 || i
.tm
.base_opcode
== 0x660f2a /* cvtpi2pd */)
9021 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
9023 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
9025 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
9027 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
9028 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
9029 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
9030 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
9031 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
9032 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
9033 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
9034 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
9035 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
9039 /* Tie dwarf2 debug info to the address at the start of the insn.
9040 We can't do this after the insn has been output as the current
9041 frag may have been closed off. eg. by frag_var. */
9042 dwarf2_emit_insn (0);
9044 insn_start_frag
= frag_now
;
9045 insn_start_off
= frag_now_fix ();
9047 if (add_branch_padding_frag_p (&branch
, &mf_jcc
))
9050 /* Branch can be 8 bytes. Leave some room for prefixes. */
9051 unsigned int max_branch_padding_size
= 14;
9053 /* Align section to boundary. */
9054 record_alignment (now_seg
, align_branch_power
);
9056 /* Make room for padding. */
9057 frag_grow (max_branch_padding_size
);
9059 /* Start of the padding. */
9064 frag_var (rs_machine_dependent
, max_branch_padding_size
, 0,
9065 ENCODE_RELAX_STATE (BRANCH_PADDING
, 0),
9068 fragP
->tc_frag_data
.mf_type
= mf_jcc
;
9069 fragP
->tc_frag_data
.branch_type
= branch
;
9070 fragP
->tc_frag_data
.max_bytes
= max_branch_padding_size
;
9074 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9076 else if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
9077 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
9079 else if (i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
)
9080 output_interseg_jump ();
9083 /* Output normal instructions here. */
9087 unsigned int prefix
;
9088 enum mf_cmp_kind mf_cmp
;
9091 && (i
.tm
.base_opcode
== 0xfaee8
9092 || i
.tm
.base_opcode
== 0xfaef0
9093 || i
.tm
.base_opcode
== 0xfaef8))
9095 /* Encode lfence, mfence, and sfence as
9096 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9097 offsetT val
= 0x240483f0ULL
;
9099 md_number_to_chars (p
, val
, 5);
9103 /* Some processors fail on LOCK prefix. This options makes
9104 assembler ignore LOCK prefix and serves as a workaround. */
9105 if (omit_lock_prefix
)
9107 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
)
9109 i
.prefix
[LOCK_PREFIX
] = 0;
9113 /* Skip if this is a branch. */
9115 else if (add_fused_jcc_padding_frag_p (&mf_cmp
))
9117 /* Make room for padding. */
9118 frag_grow (MAX_FUSED_JCC_PADDING_SIZE
);
9123 frag_var (rs_machine_dependent
, MAX_FUSED_JCC_PADDING_SIZE
, 0,
9124 ENCODE_RELAX_STATE (FUSED_JCC_PADDING
, 0),
9127 fragP
->tc_frag_data
.mf_type
= mf_cmp
;
9128 fragP
->tc_frag_data
.branch_type
= align_branch_fused
;
9129 fragP
->tc_frag_data
.max_bytes
= MAX_FUSED_JCC_PADDING_SIZE
;
9131 else if (add_branch_prefix_frag_p ())
9133 unsigned int max_prefix_size
= align_branch_prefix_size
;
9135 /* Make room for padding. */
9136 frag_grow (max_prefix_size
);
9141 frag_var (rs_machine_dependent
, max_prefix_size
, 0,
9142 ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0),
9145 fragP
->tc_frag_data
.max_bytes
= max_prefix_size
;
9148 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9149 don't need the explicit prefix. */
9150 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
9152 switch (i
.tm
.opcode_length
)
9155 if (i
.tm
.base_opcode
& 0xff000000)
9157 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
9158 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
9159 || prefix
!= REPE_PREFIX_OPCODE
9160 || (i
.prefix
[REP_PREFIX
] != REPE_PREFIX_OPCODE
))
9161 add_prefix (prefix
);
9165 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
9167 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
9168 add_prefix (prefix
);
9174 /* Check for pseudo prefixes. */
9175 as_bad_where (insn_start_frag
->fr_file
,
9176 insn_start_frag
->fr_line
,
9177 _("pseudo prefix without instruction"));
9183 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9184 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9185 R_X86_64_GOTTPOFF relocation so that linker can safely
9186 perform IE->LE optimization. A dummy REX_OPCODE prefix
9187 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9188 relocation for GDesc -> IE/LE optimization. */
9189 if (x86_elf_abi
== X86_64_X32_ABI
9191 && (i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
9192 || i
.reloc
[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC
)
9193 && i
.prefix
[REX_PREFIX
] == 0)
9194 add_prefix (REX_OPCODE
);
9197 /* The prefix bytes. */
9198 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
9200 FRAG_APPEND_1_CHAR (*q
);
9204 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
9209 /* REX byte is encoded in VEX prefix. */
9213 FRAG_APPEND_1_CHAR (*q
);
9216 /* There should be no other prefixes for instructions
9221 /* For EVEX instructions i.vrex should become 0 after
9222 build_evex_prefix. For VEX instructions upper 16 registers
9223 aren't available, so VREX should be 0. */
9226 /* Now the VEX prefix. */
9227 p
= frag_more (i
.vex
.length
);
9228 for (j
= 0; j
< i
.vex
.length
; j
++)
9229 p
[j
] = i
.vex
.bytes
[j
];
9232 /* Now the opcode; be careful about word order here! */
9233 if (i
.tm
.opcode_length
== 1)
9235 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
9239 switch (i
.tm
.opcode_length
)
9243 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
9244 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
9248 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
9258 /* Put out high byte first: can't use md_number_to_chars! */
9259 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
9260 *p
= i
.tm
.base_opcode
& 0xff;
9263 /* Now the modrm byte and sib byte (if present). */
9264 if (i
.tm
.opcode_modifier
.modrm
)
9266 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
9269 /* If i.rm.regmem == ESP (4)
9270 && i.rm.mode != (Register mode)
9272 ==> need second modrm byte. */
9273 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
9275 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
9276 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
9278 | i
.sib
.scale
<< 6));
9281 if (i
.disp_operands
)
9282 output_disp (insn_start_frag
, insn_start_off
);
9285 output_imm (insn_start_frag
, insn_start_off
);
9288 * frag_now_fix () returning plain abs_section_offset when we're in the
9289 * absolute section, and abs_section_offset not getting updated as data
9290 * gets added to the frag breaks the logic below.
9292 if (now_seg
!= absolute_section
)
9294 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
9296 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9300 /* NB: Don't add prefix with GOTPC relocation since
9301 output_disp() above depends on the fixed encoding
9302 length. Can't add prefix with TLS relocation since
9303 it breaks TLS linker optimization. */
9304 unsigned int max
= i
.has_gotpc_tls_reloc
? 0 : 15 - j
;
9305 /* Prefix count on the current instruction. */
9306 unsigned int count
= i
.vex
.length
;
9308 for (k
= 0; k
< ARRAY_SIZE (i
.prefix
); k
++)
9309 /* REX byte is encoded in VEX/EVEX prefix. */
9310 if (i
.prefix
[k
] && (k
!= REX_PREFIX
|| !i
.vex
.length
))
9313 /* Count prefixes for extended opcode maps. */
9315 switch (i
.tm
.opcode_length
)
9318 if (((i
.tm
.base_opcode
>> 16) & 0xff) == 0xf)
9321 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
9333 if (((i
.tm
.base_opcode
>> 8) & 0xff) == 0xf)
9342 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
9345 /* Set the maximum prefix size in BRANCH_PREFIX
9347 if (fragP
->tc_frag_data
.max_bytes
> max
)
9348 fragP
->tc_frag_data
.max_bytes
= max
;
9349 if (fragP
->tc_frag_data
.max_bytes
> count
)
9350 fragP
->tc_frag_data
.max_bytes
-= count
;
9352 fragP
->tc_frag_data
.max_bytes
= 0;
9356 /* Remember the maximum prefix size in FUSED_JCC_PADDING
9358 unsigned int max_prefix_size
;
9359 if (align_branch_prefix_size
> max
)
9360 max_prefix_size
= max
;
9362 max_prefix_size
= align_branch_prefix_size
;
9363 if (max_prefix_size
> count
)
9364 fragP
->tc_frag_data
.max_prefix_length
9365 = max_prefix_size
- count
;
9368 /* Use existing segment prefix if possible. Use CS
9369 segment prefix in 64-bit mode. In 32-bit mode, use SS
9370 segment prefix with ESP/EBP base register and use DS
9371 segment prefix without ESP/EBP base register. */
9372 if (i
.prefix
[SEG_PREFIX
])
9373 fragP
->tc_frag_data
.default_prefix
= i
.prefix
[SEG_PREFIX
];
9374 else if (flag_code
== CODE_64BIT
)
9375 fragP
->tc_frag_data
.default_prefix
= CS_PREFIX_OPCODE
;
9377 && (i
.base_reg
->reg_num
== 4
9378 || i
.base_reg
->reg_num
== 5))
9379 fragP
->tc_frag_data
.default_prefix
= SS_PREFIX_OPCODE
;
9381 fragP
->tc_frag_data
.default_prefix
= DS_PREFIX_OPCODE
;
9386 /* NB: Don't work with COND_JUMP86 without i386. */
9387 if (align_branch_power
9388 && now_seg
!= absolute_section
9389 && cpu_arch_flags
.bitfield
.cpui386
)
9391 /* Terminate each frag so that we can add prefix and check for
9393 frag_wane (frag_now
);
9400 pi ("" /*line*/, &i
);
9402 #endif /* DEBUG386 */
9405 /* Return the size of the displacement operand N. */
9408 disp_size (unsigned int n
)
9412 if (i
.types
[n
].bitfield
.disp64
)
9414 else if (i
.types
[n
].bitfield
.disp8
)
9416 else if (i
.types
[n
].bitfield
.disp16
)
9421 /* Return the size of the immediate operand N. */
9424 imm_size (unsigned int n
)
9427 if (i
.types
[n
].bitfield
.imm64
)
9429 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
9431 else if (i
.types
[n
].bitfield
.imm16
)
9437 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
9442 for (n
= 0; n
< i
.operands
; n
++)
9444 if (operand_type_check (i
.types
[n
], disp
))
9446 if (i
.op
[n
].disps
->X_op
== O_constant
)
9448 int size
= disp_size (n
);
9449 offsetT val
= i
.op
[n
].disps
->X_add_number
;
9451 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
9453 p
= frag_more (size
);
9454 md_number_to_chars (p
, val
, size
);
9458 enum bfd_reloc_code_real reloc_type
;
9459 int size
= disp_size (n
);
9460 int sign
= i
.types
[n
].bitfield
.disp32s
;
9461 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
9464 /* We can't have 8 bit displacement here. */
9465 gas_assert (!i
.types
[n
].bitfield
.disp8
);
9467 /* The PC relative address is computed relative
9468 to the instruction boundary, so in case immediate
9469 fields follows, we need to adjust the value. */
9470 if (pcrel
&& i
.imm_operands
)
9475 for (n1
= 0; n1
< i
.operands
; n1
++)
9476 if (operand_type_check (i
.types
[n1
], imm
))
9478 /* Only one immediate is allowed for PC
9479 relative address. */
9480 gas_assert (sz
== 0);
9482 i
.op
[n
].disps
->X_add_number
-= sz
;
9484 /* We should find the immediate. */
9485 gas_assert (sz
!= 0);
9488 p
= frag_more (size
);
9489 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
9491 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
9492 && (((reloc_type
== BFD_RELOC_32
9493 || reloc_type
== BFD_RELOC_X86_64_32S
9494 || (reloc_type
== BFD_RELOC_64
9496 && (i
.op
[n
].disps
->X_op
== O_symbol
9497 || (i
.op
[n
].disps
->X_op
== O_add
9498 && ((symbol_get_value_expression
9499 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
9501 || reloc_type
== BFD_RELOC_32_PCREL
))
9505 reloc_type
= BFD_RELOC_386_GOTPC
;
9506 i
.has_gotpc_tls_reloc
= TRUE
;
9507 i
.op
[n
].imms
->X_add_number
+=
9508 encoding_length (insn_start_frag
, insn_start_off
, p
);
9510 else if (reloc_type
== BFD_RELOC_64
)
9511 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
9513 /* Don't do the adjustment for x86-64, as there
9514 the pcrel addressing is relative to the _next_
9515 insn, and that is taken care of in other code. */
9516 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
9518 else if (align_branch_power
)
9522 case BFD_RELOC_386_TLS_GD
:
9523 case BFD_RELOC_386_TLS_LDM
:
9524 case BFD_RELOC_386_TLS_IE
:
9525 case BFD_RELOC_386_TLS_IE_32
:
9526 case BFD_RELOC_386_TLS_GOTIE
:
9527 case BFD_RELOC_386_TLS_GOTDESC
:
9528 case BFD_RELOC_386_TLS_DESC_CALL
:
9529 case BFD_RELOC_X86_64_TLSGD
:
9530 case BFD_RELOC_X86_64_TLSLD
:
9531 case BFD_RELOC_X86_64_GOTTPOFF
:
9532 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
9533 case BFD_RELOC_X86_64_TLSDESC_CALL
:
9534 i
.has_gotpc_tls_reloc
= TRUE
;
9539 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
9540 size
, i
.op
[n
].disps
, pcrel
,
9542 /* Check for "call/jmp *mem", "mov mem, %reg",
9543 "test %reg, mem" and "binop mem, %reg" where binop
9544 is one of adc, add, and, cmp, or, sbb, sub, xor
9545 instructions without data prefix. Always generate
9546 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
9547 if (i
.prefix
[DATA_PREFIX
] == 0
9548 && (generate_relax_relocations
9551 && i
.rm
.regmem
== 5))
9553 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
9554 && !is_any_vex_encoding(&i
.tm
)
9555 && ((i
.operands
== 1
9556 && i
.tm
.base_opcode
== 0xff
9557 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
9559 && (i
.tm
.base_opcode
== 0x8b
9560 || i
.tm
.base_opcode
== 0x85
9561 || (i
.tm
.base_opcode
& ~0x38) == 0x03))))
9565 fixP
->fx_tcbit
= i
.rex
!= 0;
9567 && (i
.base_reg
->reg_num
== RegIP
))
9568 fixP
->fx_tcbit2
= 1;
9571 fixP
->fx_tcbit2
= 1;
9579 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
9584 for (n
= 0; n
< i
.operands
; n
++)
9586 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
9587 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
9590 if (operand_type_check (i
.types
[n
], imm
))
9592 if (i
.op
[n
].imms
->X_op
== O_constant
)
9594 int size
= imm_size (n
);
9597 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
9599 p
= frag_more (size
);
9600 md_number_to_chars (p
, val
, size
);
9604 /* Not absolute_section.
9605 Need a 32-bit fixup (don't support 8bit
9606 non-absolute imms). Try to support other
9608 enum bfd_reloc_code_real reloc_type
;
9609 int size
= imm_size (n
);
9612 if (i
.types
[n
].bitfield
.imm32s
9613 && (i
.suffix
== QWORD_MNEM_SUFFIX
9614 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
9619 p
= frag_more (size
);
9620 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
9622 /* This is tough to explain. We end up with this one if we
9623 * have operands that look like
9624 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
9625 * obtain the absolute address of the GOT, and it is strongly
9626 * preferable from a performance point of view to avoid using
9627 * a runtime relocation for this. The actual sequence of
9628 * instructions often look something like:
9633 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
9635 * The call and pop essentially return the absolute address
9636 * of the label .L66 and store it in %ebx. The linker itself
9637 * will ultimately change the first operand of the addl so
9638 * that %ebx points to the GOT, but to keep things simple, the
9639 * .o file must have this operand set so that it generates not
9640 * the absolute address of .L66, but the absolute address of
9641 * itself. This allows the linker itself simply treat a GOTPC
9642 * relocation as asking for a pcrel offset to the GOT to be
9643 * added in, and the addend of the relocation is stored in the
9644 * operand field for the instruction itself.
9646 * Our job here is to fix the operand so that it would add
9647 * the correct offset so that %ebx would point to itself. The
9648 * thing that is tricky is that .-.L66 will point to the
9649 * beginning of the instruction, so we need to further modify
9650 * the operand so that it will point to itself. There are
9651 * other cases where you have something like:
9653 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
9655 * and here no correction would be required. Internally in
9656 * the assembler we treat operands of this form as not being
9657 * pcrel since the '.' is explicitly mentioned, and I wonder
9658 * whether it would simplify matters to do it this way. Who
9659 * knows. In earlier versions of the PIC patches, the
9660 * pcrel_adjust field was used to store the correction, but
9661 * since the expression is not pcrel, I felt it would be
9662 * confusing to do it this way. */
9664 if ((reloc_type
== BFD_RELOC_32
9665 || reloc_type
== BFD_RELOC_X86_64_32S
9666 || reloc_type
== BFD_RELOC_64
)
9668 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
9669 && (i
.op
[n
].imms
->X_op
== O_symbol
9670 || (i
.op
[n
].imms
->X_op
== O_add
9671 && ((symbol_get_value_expression
9672 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
9676 reloc_type
= BFD_RELOC_386_GOTPC
;
9678 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
9680 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
9681 i
.has_gotpc_tls_reloc
= TRUE
;
9682 i
.op
[n
].imms
->X_add_number
+=
9683 encoding_length (insn_start_frag
, insn_start_off
, p
);
9685 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
9686 i
.op
[n
].imms
, 0, reloc_type
);
9692 /* x86_cons_fix_new is called via the expression parsing code when a
9693 reloc is needed. We use this hook to get the correct .got reloc. */
9694 static int cons_sign
= -1;
9697 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
9698 expressionS
*exp
, bfd_reloc_code_real_type r
)
9700 r
= reloc (len
, 0, cons_sign
, r
);
9703 if (exp
->X_op
== O_secrel
)
9705 exp
->X_op
= O_symbol
;
9706 r
= BFD_RELOC_32_SECREL
;
9710 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
9713 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
9714 purpose of the `.dc.a' internal pseudo-op. */
9717 x86_address_bytes (void)
9719 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
9721 return stdoutput
->arch_info
->bits_per_address
/ 8;
9724 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
9726 # define lex_got(reloc, adjust, types) NULL
9728 /* Parse operands of the form
9729 <symbol>@GOTOFF+<nnn>
9730 and similar .plt or .got references.
9732 If we find one, set up the correct relocation in RELOC and copy the
9733 input string, minus the `@GOTOFF' into a malloc'd buffer for
9734 parsing by the calling routine. Return this buffer, and if ADJUST
9735 is non-null set it to the length of the string we removed from the
9736 input line. Otherwise return NULL. */
9738 lex_got (enum bfd_reloc_code_real
*rel
,
9740 i386_operand_type
*types
)
9742 /* Some of the relocations depend on the size of what field is to
9743 be relocated. But in our callers i386_immediate and i386_displacement
9744 we don't yet know the operand size (this will be set by insn
9745 matching). Hence we record the word32 relocation here,
9746 and adjust the reloc according to the real size in reloc(). */
9747 static const struct {
9750 const enum bfd_reloc_code_real rel
[2];
9751 const i386_operand_type types64
;
9753 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9754 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
9756 OPERAND_TYPE_IMM32_64
},
9758 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
9759 BFD_RELOC_X86_64_PLTOFF64
},
9760 OPERAND_TYPE_IMM64
},
9761 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
9762 BFD_RELOC_X86_64_PLT32
},
9763 OPERAND_TYPE_IMM32_32S_DISP32
},
9764 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
9765 BFD_RELOC_X86_64_GOTPLT64
},
9766 OPERAND_TYPE_IMM64_DISP64
},
9767 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
9768 BFD_RELOC_X86_64_GOTOFF64
},
9769 OPERAND_TYPE_IMM64_DISP64
},
9770 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
9771 BFD_RELOC_X86_64_GOTPCREL
},
9772 OPERAND_TYPE_IMM32_32S_DISP32
},
9773 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
9774 BFD_RELOC_X86_64_TLSGD
},
9775 OPERAND_TYPE_IMM32_32S_DISP32
},
9776 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
9777 _dummy_first_bfd_reloc_code_real
},
9778 OPERAND_TYPE_NONE
},
9779 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
9780 BFD_RELOC_X86_64_TLSLD
},
9781 OPERAND_TYPE_IMM32_32S_DISP32
},
9782 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
9783 BFD_RELOC_X86_64_GOTTPOFF
},
9784 OPERAND_TYPE_IMM32_32S_DISP32
},
9785 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
9786 BFD_RELOC_X86_64_TPOFF32
},
9787 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9788 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
9789 _dummy_first_bfd_reloc_code_real
},
9790 OPERAND_TYPE_NONE
},
9791 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
9792 BFD_RELOC_X86_64_DTPOFF32
},
9793 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9794 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
9795 _dummy_first_bfd_reloc_code_real
},
9796 OPERAND_TYPE_NONE
},
9797 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
9798 _dummy_first_bfd_reloc_code_real
},
9799 OPERAND_TYPE_NONE
},
9800 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
9801 BFD_RELOC_X86_64_GOT32
},
9802 OPERAND_TYPE_IMM32_32S_64_DISP32
},
9803 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
9804 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
9805 OPERAND_TYPE_IMM32_32S_DISP32
},
9806 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
9807 BFD_RELOC_X86_64_TLSDESC_CALL
},
9808 OPERAND_TYPE_IMM32_32S_DISP32
},
9813 #if defined (OBJ_MAYBE_ELF)
9818 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
9819 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
9822 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
9824 int len
= gotrel
[j
].len
;
9825 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
9827 if (gotrel
[j
].rel
[object_64bit
] != 0)
9830 char *tmpbuf
, *past_reloc
;
9832 *rel
= gotrel
[j
].rel
[object_64bit
];
9836 if (flag_code
!= CODE_64BIT
)
9838 types
->bitfield
.imm32
= 1;
9839 types
->bitfield
.disp32
= 1;
9842 *types
= gotrel
[j
].types64
;
9845 if (j
!= 0 && GOT_symbol
== NULL
)
9846 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
9848 /* The length of the first part of our input line. */
9849 first
= cp
- input_line_pointer
;
9851 /* The second part goes from after the reloc token until
9852 (and including) an end_of_line char or comma. */
9853 past_reloc
= cp
+ 1 + len
;
9855 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
9857 second
= cp
+ 1 - past_reloc
;
9859 /* Allocate and copy string. The trailing NUL shouldn't
9860 be necessary, but be safe. */
9861 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
9862 memcpy (tmpbuf
, input_line_pointer
, first
);
9863 if (second
!= 0 && *past_reloc
!= ' ')
9864 /* Replace the relocation token with ' ', so that
9865 errors like foo@GOTOFF1 will be detected. */
9866 tmpbuf
[first
++] = ' ';
9868 /* Increment length by 1 if the relocation token is
9873 memcpy (tmpbuf
+ first
, past_reloc
, second
);
9874 tmpbuf
[first
+ second
] = '\0';
9878 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9879 gotrel
[j
].str
, 1 << (5 + object_64bit
));
9884 /* Might be a symbol version string. Don't as_bad here. */
9893 /* Parse operands of the form
9894 <symbol>@SECREL32+<nnn>
9896 If we find one, set up the correct relocation in RELOC and copy the
9897 input string, minus the `@SECREL32' into a malloc'd buffer for
9898 parsing by the calling routine. Return this buffer, and if ADJUST
9899 is non-null set it to the length of the string we removed from the
9900 input line. Otherwise return NULL.
9902 This function is copied from the ELF version above adjusted for PE targets. */
9905 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
9906 int *adjust ATTRIBUTE_UNUSED
,
9907 i386_operand_type
*types
)
9913 const enum bfd_reloc_code_real rel
[2];
9914 const i386_operand_type types64
;
9918 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
9919 BFD_RELOC_32_SECREL
},
9920 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9926 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
9927 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
9930 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
9932 int len
= gotrel
[j
].len
;
9934 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
9936 if (gotrel
[j
].rel
[object_64bit
] != 0)
9939 char *tmpbuf
, *past_reloc
;
9941 *rel
= gotrel
[j
].rel
[object_64bit
];
9947 if (flag_code
!= CODE_64BIT
)
9949 types
->bitfield
.imm32
= 1;
9950 types
->bitfield
.disp32
= 1;
9953 *types
= gotrel
[j
].types64
;
9956 /* The length of the first part of our input line. */
9957 first
= cp
- input_line_pointer
;
9959 /* The second part goes from after the reloc token until
9960 (and including) an end_of_line char or comma. */
9961 past_reloc
= cp
+ 1 + len
;
9963 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
9965 second
= cp
+ 1 - past_reloc
;
9967 /* Allocate and copy string. The trailing NUL shouldn't
9968 be necessary, but be safe. */
9969 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
9970 memcpy (tmpbuf
, input_line_pointer
, first
);
9971 if (second
!= 0 && *past_reloc
!= ' ')
9972 /* Replace the relocation token with ' ', so that
9973 errors like foo@SECLREL321 will be detected. */
9974 tmpbuf
[first
++] = ' ';
9975 memcpy (tmpbuf
+ first
, past_reloc
, second
);
9976 tmpbuf
[first
+ second
] = '\0';
9980 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9981 gotrel
[j
].str
, 1 << (5 + object_64bit
));
9986 /* Might be a symbol version string. Don't as_bad here. */
9992 bfd_reloc_code_real_type
9993 x86_cons (expressionS
*exp
, int size
)
9995 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
9997 intel_syntax
= -intel_syntax
;
10000 if (size
== 4 || (object_64bit
&& size
== 8))
10002 /* Handle @GOTOFF and the like in an expression. */
10004 char *gotfree_input_line
;
10007 save
= input_line_pointer
;
10008 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
10009 if (gotfree_input_line
)
10010 input_line_pointer
= gotfree_input_line
;
10014 if (gotfree_input_line
)
10016 /* expression () has merrily parsed up to the end of line,
10017 or a comma - in the wrong buffer. Transfer how far
10018 input_line_pointer has moved to the right buffer. */
10019 input_line_pointer
= (save
10020 + (input_line_pointer
- gotfree_input_line
)
10022 free (gotfree_input_line
);
10023 if (exp
->X_op
== O_constant
10024 || exp
->X_op
== O_absent
10025 || exp
->X_op
== O_illegal
10026 || exp
->X_op
== O_register
10027 || exp
->X_op
== O_big
)
10029 char c
= *input_line_pointer
;
10030 *input_line_pointer
= 0;
10031 as_bad (_("missing or invalid expression `%s'"), save
);
10032 *input_line_pointer
= c
;
10034 else if ((got_reloc
== BFD_RELOC_386_PLT32
10035 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
10036 && exp
->X_op
!= O_symbol
)
10038 char c
= *input_line_pointer
;
10039 *input_line_pointer
= 0;
10040 as_bad (_("invalid PLT expression `%s'"), save
);
10041 *input_line_pointer
= c
;
10048 intel_syntax
= -intel_syntax
;
10051 i386_intel_simplify (exp
);
10057 signed_cons (int size
)
10059 if (flag_code
== CODE_64BIT
)
10067 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
10074 if (exp
.X_op
== O_symbol
)
10075 exp
.X_op
= O_secrel
;
10077 emit_expr (&exp
, 4);
10079 while (*input_line_pointer
++ == ',');
10081 input_line_pointer
--;
10082 demand_empty_rest_of_line ();
10086 /* Handle Vector operations. */
10089 check_VecOperations (char *op_string
, char *op_end
)
10091 const reg_entry
*mask
;
10096 && (op_end
== NULL
|| op_string
< op_end
))
10099 if (*op_string
== '{')
10103 /* Check broadcasts. */
10104 if (strncmp (op_string
, "1to", 3) == 0)
10109 goto duplicated_vec_op
;
10112 if (*op_string
== '8')
10114 else if (*op_string
== '4')
10116 else if (*op_string
== '2')
10118 else if (*op_string
== '1'
10119 && *(op_string
+1) == '6')
10126 as_bad (_("Unsupported broadcast: `%s'"), saved
);
10131 broadcast_op
.type
= bcst_type
;
10132 broadcast_op
.operand
= this_operand
;
10133 broadcast_op
.bytes
= 0;
10134 i
.broadcast
= &broadcast_op
;
10136 /* Check masking operation. */
10137 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
10139 /* k0 can't be used for write mask. */
10140 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
10142 as_bad (_("`%s%s' can't be used for write mask"),
10143 register_prefix
, mask
->reg_name
);
10149 mask_op
.mask
= mask
;
10150 mask_op
.zeroing
= 0;
10151 mask_op
.operand
= this_operand
;
10157 goto duplicated_vec_op
;
10159 i
.mask
->mask
= mask
;
10161 /* Only "{z}" is allowed here. No need to check
10162 zeroing mask explicitly. */
10163 if (i
.mask
->operand
!= this_operand
)
10165 as_bad (_("invalid write mask `%s'"), saved
);
10170 op_string
= end_op
;
10172 /* Check zeroing-flag for masking operation. */
10173 else if (*op_string
== 'z')
10177 mask_op
.mask
= NULL
;
10178 mask_op
.zeroing
= 1;
10179 mask_op
.operand
= this_operand
;
10184 if (i
.mask
->zeroing
)
10187 as_bad (_("duplicated `%s'"), saved
);
10191 i
.mask
->zeroing
= 1;
10193 /* Only "{%k}" is allowed here. No need to check mask
10194 register explicitly. */
10195 if (i
.mask
->operand
!= this_operand
)
10197 as_bad (_("invalid zeroing-masking `%s'"),
10206 goto unknown_vec_op
;
10208 if (*op_string
!= '}')
10210 as_bad (_("missing `}' in `%s'"), saved
);
10215 /* Strip whitespace since the addition of pseudo prefixes
10216 changed how the scrubber treats '{'. */
10217 if (is_space_char (*op_string
))
10223 /* We don't know this one. */
10224 as_bad (_("unknown vector operation: `%s'"), saved
);
10228 if (i
.mask
&& i
.mask
->zeroing
&& !i
.mask
->mask
)
10230 as_bad (_("zeroing-masking only allowed with write mask"));
10238 i386_immediate (char *imm_start
)
10240 char *save_input_line_pointer
;
10241 char *gotfree_input_line
;
10244 i386_operand_type types
;
10246 operand_type_set (&types
, ~0);
10248 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
10250 as_bad (_("at most %d immediate operands are allowed"),
10251 MAX_IMMEDIATE_OPERANDS
);
10255 exp
= &im_expressions
[i
.imm_operands
++];
10256 i
.op
[this_operand
].imms
= exp
;
10258 if (is_space_char (*imm_start
))
10261 save_input_line_pointer
= input_line_pointer
;
10262 input_line_pointer
= imm_start
;
10264 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10265 if (gotfree_input_line
)
10266 input_line_pointer
= gotfree_input_line
;
10268 exp_seg
= expression (exp
);
10270 SKIP_WHITESPACE ();
10272 /* Handle vector operations. */
10273 if (*input_line_pointer
== '{')
10275 input_line_pointer
= check_VecOperations (input_line_pointer
,
10277 if (input_line_pointer
== NULL
)
10281 if (*input_line_pointer
)
10282 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10284 input_line_pointer
= save_input_line_pointer
;
10285 if (gotfree_input_line
)
10287 free (gotfree_input_line
);
10289 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
10290 exp
->X_op
= O_illegal
;
10293 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
10297 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10298 i386_operand_type types
, const char *imm_start
)
10300 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
10303 as_bad (_("missing or invalid immediate expression `%s'"),
10307 else if (exp
->X_op
== O_constant
)
10309 /* Size it properly later. */
10310 i
.types
[this_operand
].bitfield
.imm64
= 1;
10311 /* If not 64bit, sign extend val. */
10312 if (flag_code
!= CODE_64BIT
10313 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
10315 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
10317 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10318 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
10319 && exp_seg
!= absolute_section
10320 && exp_seg
!= text_section
10321 && exp_seg
!= data_section
10322 && exp_seg
!= bss_section
10323 && exp_seg
!= undefined_section
10324 && !bfd_is_com_section (exp_seg
))
10326 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10330 else if (!intel_syntax
&& exp_seg
== reg_section
)
10333 as_bad (_("illegal immediate register operand %s"), imm_start
);
10338 /* This is an address. The size of the address will be
10339 determined later, depending on destination register,
10340 suffix, or the default for the section. */
10341 i
.types
[this_operand
].bitfield
.imm8
= 1;
10342 i
.types
[this_operand
].bitfield
.imm16
= 1;
10343 i
.types
[this_operand
].bitfield
.imm32
= 1;
10344 i
.types
[this_operand
].bitfield
.imm32s
= 1;
10345 i
.types
[this_operand
].bitfield
.imm64
= 1;
10346 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10354 i386_scale (char *scale
)
10357 char *save
= input_line_pointer
;
10359 input_line_pointer
= scale
;
10360 val
= get_absolute_expression ();
10365 i
.log2_scale_factor
= 0;
10368 i
.log2_scale_factor
= 1;
10371 i
.log2_scale_factor
= 2;
10374 i
.log2_scale_factor
= 3;
10378 char sep
= *input_line_pointer
;
10380 *input_line_pointer
= '\0';
10381 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
10383 *input_line_pointer
= sep
;
10384 input_line_pointer
= save
;
10388 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
10390 as_warn (_("scale factor of %d without an index register"),
10391 1 << i
.log2_scale_factor
);
10392 i
.log2_scale_factor
= 0;
10394 scale
= input_line_pointer
;
10395 input_line_pointer
= save
;
10400 i386_displacement (char *disp_start
, char *disp_end
)
10404 char *save_input_line_pointer
;
10405 char *gotfree_input_line
;
10407 i386_operand_type bigdisp
, types
= anydisp
;
10410 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
10412 as_bad (_("at most %d displacement operands are allowed"),
10413 MAX_MEMORY_OPERANDS
);
10417 operand_type_set (&bigdisp
, 0);
10419 || i
.types
[this_operand
].bitfield
.baseindex
10420 || (current_templates
->start
->opcode_modifier
.jump
!= JUMP
10421 && current_templates
->start
->opcode_modifier
.jump
!= JUMP_DWORD
))
10423 i386_addressing_mode ();
10424 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
10425 if (flag_code
== CODE_64BIT
)
10429 bigdisp
.bitfield
.disp32s
= 1;
10430 bigdisp
.bitfield
.disp64
= 1;
10433 bigdisp
.bitfield
.disp32
= 1;
10435 else if ((flag_code
== CODE_16BIT
) ^ override
)
10436 bigdisp
.bitfield
.disp16
= 1;
10438 bigdisp
.bitfield
.disp32
= 1;
10442 /* For PC-relative branches, the width of the displacement may be
10443 dependent upon data size, but is never dependent upon address size.
10444 Also make sure to not unintentionally match against a non-PC-relative
10445 branch template. */
10446 static templates aux_templates
;
10447 const insn_template
*t
= current_templates
->start
;
10448 bfd_boolean has_intel64
= FALSE
;
10450 aux_templates
.start
= t
;
10451 while (++t
< current_templates
->end
)
10453 if (t
->opcode_modifier
.jump
10454 != current_templates
->start
->opcode_modifier
.jump
)
10456 if ((t
->opcode_modifier
.isa64
>= INTEL64
))
10457 has_intel64
= TRUE
;
10459 if (t
< current_templates
->end
)
10461 aux_templates
.end
= t
;
10462 current_templates
= &aux_templates
;
10465 override
= (i
.prefix
[DATA_PREFIX
] != 0);
10466 if (flag_code
== CODE_64BIT
)
10468 if ((override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
10469 && (!intel64
|| !has_intel64
))
10470 bigdisp
.bitfield
.disp16
= 1;
10472 bigdisp
.bitfield
.disp32s
= 1;
10477 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
10479 : LONG_MNEM_SUFFIX
));
10480 bigdisp
.bitfield
.disp32
= 1;
10481 if ((flag_code
== CODE_16BIT
) ^ override
)
10483 bigdisp
.bitfield
.disp32
= 0;
10484 bigdisp
.bitfield
.disp16
= 1;
10488 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10491 exp
= &disp_expressions
[i
.disp_operands
];
10492 i
.op
[this_operand
].disps
= exp
;
10494 save_input_line_pointer
= input_line_pointer
;
10495 input_line_pointer
= disp_start
;
10496 END_STRING_AND_SAVE (disp_end
);
10498 #ifndef GCC_ASM_O_HACK
10499 #define GCC_ASM_O_HACK 0
10502 END_STRING_AND_SAVE (disp_end
+ 1);
10503 if (i
.types
[this_operand
].bitfield
.baseIndex
10504 && displacement_string_end
[-1] == '+')
10506 /* This hack is to avoid a warning when using the "o"
10507 constraint within gcc asm statements.
10510 #define _set_tssldt_desc(n,addr,limit,type) \
10511 __asm__ __volatile__ ( \
10512 "movw %w2,%0\n\t" \
10513 "movw %w1,2+%0\n\t" \
10514 "rorl $16,%1\n\t" \
10515 "movb %b1,4+%0\n\t" \
10516 "movb %4,5+%0\n\t" \
10517 "movb $0,6+%0\n\t" \
10518 "movb %h1,7+%0\n\t" \
10520 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
10522 This works great except that the output assembler ends
10523 up looking a bit weird if it turns out that there is
10524 no offset. You end up producing code that looks like:
10537 So here we provide the missing zero. */
10539 *displacement_string_end
= '0';
10542 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10543 if (gotfree_input_line
)
10544 input_line_pointer
= gotfree_input_line
;
10546 exp_seg
= expression (exp
);
10548 SKIP_WHITESPACE ();
10549 if (*input_line_pointer
)
10550 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10552 RESTORE_END_STRING (disp_end
+ 1);
10554 input_line_pointer
= save_input_line_pointer
;
10555 if (gotfree_input_line
)
10557 free (gotfree_input_line
);
10559 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
10560 exp
->X_op
= O_illegal
;
10563 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
10565 RESTORE_END_STRING (disp_end
);
10571 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10572 i386_operand_type types
, const char *disp_start
)
10574 i386_operand_type bigdisp
;
10577 /* We do this to make sure that the section symbol is in
10578 the symbol table. We will ultimately change the relocation
10579 to be relative to the beginning of the section. */
10580 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
10581 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
10582 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10584 if (exp
->X_op
!= O_symbol
)
10587 if (S_IS_LOCAL (exp
->X_add_symbol
)
10588 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
10589 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
10590 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
10591 exp
->X_op
= O_subtract
;
10592 exp
->X_op_symbol
= GOT_symbol
;
10593 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
10594 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
10595 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10596 i
.reloc
[this_operand
] = BFD_RELOC_64
;
10598 i
.reloc
[this_operand
] = BFD_RELOC_32
;
10601 else if (exp
->X_op
== O_absent
10602 || exp
->X_op
== O_illegal
10603 || exp
->X_op
== O_big
)
10606 as_bad (_("missing or invalid displacement expression `%s'"),
10611 else if (flag_code
== CODE_64BIT
10612 && !i
.prefix
[ADDR_PREFIX
]
10613 && exp
->X_op
== O_constant
)
10615 /* Since displacement is signed extended to 64bit, don't allow
10616 disp32 and turn off disp32s if they are out of range. */
10617 i
.types
[this_operand
].bitfield
.disp32
= 0;
10618 if (!fits_in_signed_long (exp
->X_add_number
))
10620 i
.types
[this_operand
].bitfield
.disp32s
= 0;
10621 if (i
.types
[this_operand
].bitfield
.baseindex
)
10623 as_bad (_("0x%lx out range of signed 32bit displacement"),
10624 (long) exp
->X_add_number
);
10630 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10631 else if (exp
->X_op
!= O_constant
10632 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
10633 && exp_seg
!= absolute_section
10634 && exp_seg
!= text_section
10635 && exp_seg
!= data_section
10636 && exp_seg
!= bss_section
10637 && exp_seg
!= undefined_section
10638 && !bfd_is_com_section (exp_seg
))
10640 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10645 if (current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
10646 /* Constants get taken care of by optimize_disp(). */
10647 && exp
->X_op
!= O_constant
)
10648 i
.types
[this_operand
].bitfield
.disp8
= 1;
10650 /* Check if this is a displacement only operand. */
10651 bigdisp
= i
.types
[this_operand
];
10652 bigdisp
.bitfield
.disp8
= 0;
10653 bigdisp
.bitfield
.disp16
= 0;
10654 bigdisp
.bitfield
.disp32
= 0;
10655 bigdisp
.bitfield
.disp32s
= 0;
10656 bigdisp
.bitfield
.disp64
= 0;
10657 if (operand_type_all_zero (&bigdisp
))
10658 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10664 /* Return the active addressing mode, taking address override and
10665 registers forming the address into consideration. Update the
10666 address override prefix if necessary. */
10668 static enum flag_code
10669 i386_addressing_mode (void)
10671 enum flag_code addr_mode
;
10673 if (i
.prefix
[ADDR_PREFIX
])
10674 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
10675 else if (flag_code
== CODE_16BIT
10676 && current_templates
->start
->cpu_flags
.bitfield
.cpumpx
10677 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
10678 from md_assemble() by "is not a valid base/index expression"
10679 when there is a base and/or index. */
10680 && !i
.types
[this_operand
].bitfield
.baseindex
)
10682 /* MPX insn memory operands with neither base nor index must be forced
10683 to use 32-bit addressing in 16-bit mode. */
10684 addr_mode
= CODE_32BIT
;
10685 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
10687 gas_assert (!i
.types
[this_operand
].bitfield
.disp16
);
10688 gas_assert (!i
.types
[this_operand
].bitfield
.disp32
);
10692 addr_mode
= flag_code
;
10694 #if INFER_ADDR_PREFIX
10695 if (i
.mem_operands
== 0)
10697 /* Infer address prefix from the first memory operand. */
10698 const reg_entry
*addr_reg
= i
.base_reg
;
10700 if (addr_reg
== NULL
)
10701 addr_reg
= i
.index_reg
;
10705 if (addr_reg
->reg_type
.bitfield
.dword
)
10706 addr_mode
= CODE_32BIT
;
10707 else if (flag_code
!= CODE_64BIT
10708 && addr_reg
->reg_type
.bitfield
.word
)
10709 addr_mode
= CODE_16BIT
;
10711 if (addr_mode
!= flag_code
)
10713 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
10715 /* Change the size of any displacement too. At most one
10716 of Disp16 or Disp32 is set.
10717 FIXME. There doesn't seem to be any real need for
10718 separate Disp16 and Disp32 flags. The same goes for
10719 Imm16 and Imm32. Removing them would probably clean
10720 up the code quite a lot. */
10721 if (flag_code
!= CODE_64BIT
10722 && (i
.types
[this_operand
].bitfield
.disp16
10723 || i
.types
[this_operand
].bitfield
.disp32
))
10724 i
.types
[this_operand
]
10725 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
10735 /* Make sure the memory operand we've been dealt is valid.
10736 Return 1 on success, 0 on a failure. */
10739 i386_index_check (const char *operand_string
)
10741 const char *kind
= "base/index";
10742 enum flag_code addr_mode
= i386_addressing_mode ();
10744 if (current_templates
->start
->opcode_modifier
.isstring
10745 && !current_templates
->start
->cpu_flags
.bitfield
.cpupadlock
10746 && (current_templates
->end
[-1].opcode_modifier
.isstring
10747 || i
.mem_operands
))
10749 /* Memory operands of string insns are special in that they only allow
10750 a single register (rDI, rSI, or rBX) as their memory address. */
10751 const reg_entry
*expected_reg
;
10752 static const char *di_si
[][2] =
10758 static const char *bx
[] = { "ebx", "bx", "rbx" };
10760 kind
= "string address";
10762 if (current_templates
->start
->opcode_modifier
.repprefixok
)
10764 int es_op
= current_templates
->end
[-1].opcode_modifier
.isstring
10765 - IS_STRING_ES_OP0
;
10768 if (!current_templates
->end
[-1].operand_types
[0].bitfield
.baseindex
10769 || ((!i
.mem_operands
!= !intel_syntax
)
10770 && current_templates
->end
[-1].operand_types
[1]
10771 .bitfield
.baseindex
))
10773 expected_reg
= hash_find (reg_hash
, di_si
[addr_mode
][op
== es_op
]);
10776 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
10778 if (i
.base_reg
!= expected_reg
10780 || operand_type_check (i
.types
[this_operand
], disp
))
10782 /* The second memory operand must have the same size as
10786 && !((addr_mode
== CODE_64BIT
10787 && i
.base_reg
->reg_type
.bitfield
.qword
)
10788 || (addr_mode
== CODE_32BIT
10789 ? i
.base_reg
->reg_type
.bitfield
.dword
10790 : i
.base_reg
->reg_type
.bitfield
.word
)))
10793 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
10795 intel_syntax
? '[' : '(',
10797 expected_reg
->reg_name
,
10798 intel_syntax
? ']' : ')');
10805 as_bad (_("`%s' is not a valid %s expression"),
10806 operand_string
, kind
);
10811 if (addr_mode
!= CODE_16BIT
)
10813 /* 32-bit/64-bit checks. */
10815 && ((addr_mode
== CODE_64BIT
10816 ? !i
.base_reg
->reg_type
.bitfield
.qword
10817 : !i
.base_reg
->reg_type
.bitfield
.dword
)
10818 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
10819 || i
.base_reg
->reg_num
== RegIZ
))
10821 && !i
.index_reg
->reg_type
.bitfield
.xmmword
10822 && !i
.index_reg
->reg_type
.bitfield
.ymmword
10823 && !i
.index_reg
->reg_type
.bitfield
.zmmword
10824 && ((addr_mode
== CODE_64BIT
10825 ? !i
.index_reg
->reg_type
.bitfield
.qword
10826 : !i
.index_reg
->reg_type
.bitfield
.dword
)
10827 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
10830 /* bndmk, bndldx, and bndstx have special restrictions. */
10831 if (current_templates
->start
->base_opcode
== 0xf30f1b
10832 || (current_templates
->start
->base_opcode
& ~1) == 0x0f1a)
10834 /* They cannot use RIP-relative addressing. */
10835 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
10837 as_bad (_("`%s' cannot be used here"), operand_string
);
10841 /* bndldx and bndstx ignore their scale factor. */
10842 if (current_templates
->start
->base_opcode
!= 0xf30f1b
10843 && i
.log2_scale_factor
)
10844 as_warn (_("register scaling is being ignored here"));
10849 /* 16-bit checks. */
10851 && (!i
.base_reg
->reg_type
.bitfield
.word
10852 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
10854 && (!i
.index_reg
->reg_type
.bitfield
.word
10855 || !i
.index_reg
->reg_type
.bitfield
.baseindex
10857 && i
.base_reg
->reg_num
< 6
10858 && i
.index_reg
->reg_num
>= 6
10859 && i
.log2_scale_factor
== 0))))
10866 /* Handle vector immediates. */
10869 RC_SAE_immediate (const char *imm_start
)
10871 unsigned int match_found
, j
;
10872 const char *pstr
= imm_start
;
10880 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
10882 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
10886 rc_op
.type
= RC_NamesTable
[j
].type
;
10887 rc_op
.operand
= this_operand
;
10888 i
.rounding
= &rc_op
;
10892 as_bad (_("duplicated `%s'"), imm_start
);
10895 pstr
+= RC_NamesTable
[j
].len
;
10903 if (*pstr
++ != '}')
10905 as_bad (_("Missing '}': '%s'"), imm_start
);
10908 /* RC/SAE immediate string should contain nothing more. */;
10911 as_bad (_("Junk after '}': '%s'"), imm_start
);
10915 exp
= &im_expressions
[i
.imm_operands
++];
10916 i
.op
[this_operand
].imms
= exp
;
10918 exp
->X_op
= O_constant
;
10919 exp
->X_add_number
= 0;
10920 exp
->X_add_symbol
= (symbolS
*) 0;
10921 exp
->X_op_symbol
= (symbolS
*) 0;
10923 i
.types
[this_operand
].bitfield
.imm8
= 1;
10927 /* Only string instructions can have a second memory operand, so
10928 reduce current_templates to just those if it contains any. */
10930 maybe_adjust_templates (void)
10932 const insn_template
*t
;
10934 gas_assert (i
.mem_operands
== 1);
10936 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
10937 if (t
->opcode_modifier
.isstring
)
10940 if (t
< current_templates
->end
)
10942 static templates aux_templates
;
10943 bfd_boolean recheck
;
10945 aux_templates
.start
= t
;
10946 for (; t
< current_templates
->end
; ++t
)
10947 if (!t
->opcode_modifier
.isstring
)
10949 aux_templates
.end
= t
;
10951 /* Determine whether to re-check the first memory operand. */
10952 recheck
= (aux_templates
.start
!= current_templates
->start
10953 || t
!= current_templates
->end
);
10955 current_templates
= &aux_templates
;
10959 i
.mem_operands
= 0;
10960 if (i
.memop1_string
!= NULL
10961 && i386_index_check (i
.memop1_string
) == 0)
10963 i
.mem_operands
= 1;
10970 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
10974 i386_att_operand (char *operand_string
)
10976 const reg_entry
*r
;
10978 char *op_string
= operand_string
;
10980 if (is_space_char (*op_string
))
10983 /* We check for an absolute prefix (differentiating,
10984 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
10985 if (*op_string
== ABSOLUTE_PREFIX
)
10988 if (is_space_char (*op_string
))
10990 i
.jumpabsolute
= TRUE
;
10993 /* Check if operand is a register. */
10994 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
10996 i386_operand_type temp
;
10998 /* Check for a segment override by searching for ':' after a
10999 segment register. */
11000 op_string
= end_op
;
11001 if (is_space_char (*op_string
))
11003 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
11005 switch (r
->reg_num
)
11008 i
.seg
[i
.mem_operands
] = &es
;
11011 i
.seg
[i
.mem_operands
] = &cs
;
11014 i
.seg
[i
.mem_operands
] = &ss
;
11017 i
.seg
[i
.mem_operands
] = &ds
;
11020 i
.seg
[i
.mem_operands
] = &fs
;
11023 i
.seg
[i
.mem_operands
] = &gs
;
11027 /* Skip the ':' and whitespace. */
11029 if (is_space_char (*op_string
))
11032 if (!is_digit_char (*op_string
)
11033 && !is_identifier_char (*op_string
)
11034 && *op_string
!= '('
11035 && *op_string
!= ABSOLUTE_PREFIX
)
11037 as_bad (_("bad memory operand `%s'"), op_string
);
11040 /* Handle case of %es:*foo. */
11041 if (*op_string
== ABSOLUTE_PREFIX
)
11044 if (is_space_char (*op_string
))
11046 i
.jumpabsolute
= TRUE
;
11048 goto do_memory_reference
;
11051 /* Handle vector operations. */
11052 if (*op_string
== '{')
11054 op_string
= check_VecOperations (op_string
, NULL
);
11055 if (op_string
== NULL
)
11061 as_bad (_("junk `%s' after register"), op_string
);
11064 temp
= r
->reg_type
;
11065 temp
.bitfield
.baseindex
= 0;
11066 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
11068 i
.types
[this_operand
].bitfield
.unspecified
= 0;
11069 i
.op
[this_operand
].regs
= r
;
11072 else if (*op_string
== REGISTER_PREFIX
)
11074 as_bad (_("bad register name `%s'"), op_string
);
11077 else if (*op_string
== IMMEDIATE_PREFIX
)
11080 if (i
.jumpabsolute
)
11082 as_bad (_("immediate operand illegal with absolute jump"));
11085 if (!i386_immediate (op_string
))
11088 else if (RC_SAE_immediate (operand_string
))
11090 /* If it is a RC or SAE immediate, do nothing. */
11093 else if (is_digit_char (*op_string
)
11094 || is_identifier_char (*op_string
)
11095 || *op_string
== '"'
11096 || *op_string
== '(')
11098 /* This is a memory reference of some sort. */
11101 /* Start and end of displacement string expression (if found). */
11102 char *displacement_string_start
;
11103 char *displacement_string_end
;
11106 do_memory_reference
:
11107 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
11109 if ((i
.mem_operands
== 1
11110 && !current_templates
->start
->opcode_modifier
.isstring
)
11111 || i
.mem_operands
== 2)
11113 as_bad (_("too many memory references for `%s'"),
11114 current_templates
->start
->name
);
11118 /* Check for base index form. We detect the base index form by
11119 looking for an ')' at the end of the operand, searching
11120 for the '(' matching it, and finding a REGISTER_PREFIX or ','
11122 base_string
= op_string
+ strlen (op_string
);
11124 /* Handle vector operations. */
11125 vop_start
= strchr (op_string
, '{');
11126 if (vop_start
&& vop_start
< base_string
)
11128 if (check_VecOperations (vop_start
, base_string
) == NULL
)
11130 base_string
= vop_start
;
11134 if (is_space_char (*base_string
))
11137 /* If we only have a displacement, set-up for it to be parsed later. */
11138 displacement_string_start
= op_string
;
11139 displacement_string_end
= base_string
+ 1;
11141 if (*base_string
== ')')
11144 unsigned int parens_balanced
= 1;
11145 /* We've already checked that the number of left & right ()'s are
11146 equal, so this loop will not be infinite. */
11150 if (*base_string
== ')')
11152 if (*base_string
== '(')
11155 while (parens_balanced
);
11157 temp_string
= base_string
;
11159 /* Skip past '(' and whitespace. */
11161 if (is_space_char (*base_string
))
11164 if (*base_string
== ','
11165 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
11168 displacement_string_end
= temp_string
;
11170 i
.types
[this_operand
].bitfield
.baseindex
= 1;
11174 base_string
= end_op
;
11175 if (is_space_char (*base_string
))
11179 /* There may be an index reg or scale factor here. */
11180 if (*base_string
== ',')
11183 if (is_space_char (*base_string
))
11186 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
11189 base_string
= end_op
;
11190 if (is_space_char (*base_string
))
11192 if (*base_string
== ',')
11195 if (is_space_char (*base_string
))
11198 else if (*base_string
!= ')')
11200 as_bad (_("expecting `,' or `)' "
11201 "after index register in `%s'"),
11206 else if (*base_string
== REGISTER_PREFIX
)
11208 end_op
= strchr (base_string
, ',');
11211 as_bad (_("bad register name `%s'"), base_string
);
11215 /* Check for scale factor. */
11216 if (*base_string
!= ')')
11218 char *end_scale
= i386_scale (base_string
);
11223 base_string
= end_scale
;
11224 if (is_space_char (*base_string
))
11226 if (*base_string
!= ')')
11228 as_bad (_("expecting `)' "
11229 "after scale factor in `%s'"),
11234 else if (!i
.index_reg
)
11236 as_bad (_("expecting index register or scale factor "
11237 "after `,'; got '%c'"),
11242 else if (*base_string
!= ')')
11244 as_bad (_("expecting `,' or `)' "
11245 "after base register in `%s'"),
11250 else if (*base_string
== REGISTER_PREFIX
)
11252 end_op
= strchr (base_string
, ',');
11255 as_bad (_("bad register name `%s'"), base_string
);
11260 /* If there's an expression beginning the operand, parse it,
11261 assuming displacement_string_start and
11262 displacement_string_end are meaningful. */
11263 if (displacement_string_start
!= displacement_string_end
)
11265 if (!i386_displacement (displacement_string_start
,
11266 displacement_string_end
))
11270 /* Special case for (%dx) while doing input/output op. */
11272 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
11273 && i
.base_reg
->reg_type
.bitfield
.word
11274 && i
.index_reg
== 0
11275 && i
.log2_scale_factor
== 0
11276 && i
.seg
[i
.mem_operands
] == 0
11277 && !operand_type_check (i
.types
[this_operand
], disp
))
11279 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
11283 if (i386_index_check (operand_string
) == 0)
11285 i
.flags
[this_operand
] |= Operand_Mem
;
11286 if (i
.mem_operands
== 0)
11287 i
.memop1_string
= xstrdup (operand_string
);
11292 /* It's not a memory operand; argh! */
11293 as_bad (_("invalid char %s beginning operand %d `%s'"),
11294 output_invalid (*op_string
),
11299 return 1; /* Normal return. */
11302 /* Calculate the maximum variable size (i.e., excluding fr_fix)
11303 that an rs_machine_dependent frag may reach. */
11306 i386_frag_max_var (fragS
*frag
)
11308 /* The only relaxable frags are for jumps.
11309 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
11310 gas_assert (frag
->fr_type
== rs_machine_dependent
);
11311 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
11314 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11316 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
11318 /* STT_GNU_IFUNC symbol must go through PLT. */
11319 if ((symbol_get_bfdsym (fr_symbol
)->flags
11320 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
11323 if (!S_IS_EXTERNAL (fr_symbol
))
11324 /* Symbol may be weak or local. */
11325 return !S_IS_WEAK (fr_symbol
);
11327 /* Global symbols with non-default visibility can't be preempted. */
11328 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
11331 if (fr_var
!= NO_RELOC
)
11332 switch ((enum bfd_reloc_code_real
) fr_var
)
11334 case BFD_RELOC_386_PLT32
:
11335 case BFD_RELOC_X86_64_PLT32
:
11336 /* Symbol with PLT relocation may be preempted. */
11342 /* Global symbols with default visibility in a shared library may be
11343 preempted by another definition. */
11348 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
11349 Note also work for Skylake and Cascadelake.
11350 ---------------------------------------------------------------------
11351 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
11352 | ------ | ----------- | ------- | -------- |
11354 | Jno | N | N | Y |
11355 | Jc/Jb | Y | N | Y |
11356 | Jae/Jnb | Y | N | Y |
11357 | Je/Jz | Y | Y | Y |
11358 | Jne/Jnz | Y | Y | Y |
11359 | Jna/Jbe | Y | N | Y |
11360 | Ja/Jnbe | Y | N | Y |
11362 | Jns | N | N | Y |
11363 | Jp/Jpe | N | N | Y |
11364 | Jnp/Jpo | N | N | Y |
11365 | Jl/Jnge | Y | Y | Y |
11366 | Jge/Jnl | Y | Y | Y |
11367 | Jle/Jng | Y | Y | Y |
11368 | Jg/Jnle | Y | Y | Y |
11369 --------------------------------------------------------------------- */
11371 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp
, enum mf_jcc_kind mf_jcc
)
11373 if (mf_cmp
== mf_cmp_alu_cmp
)
11374 return ((mf_jcc
>= mf_jcc_jc
&& mf_jcc
<= mf_jcc_jna
)
11375 || mf_jcc
== mf_jcc_jl
|| mf_jcc
== mf_jcc_jle
);
11376 if (mf_cmp
== mf_cmp_incdec
)
11377 return (mf_jcc
== mf_jcc_je
|| mf_jcc
== mf_jcc_jl
11378 || mf_jcc
== mf_jcc_jle
);
11379 if (mf_cmp
== mf_cmp_test_and
)
11384 /* Return the next non-empty frag. */
11387 i386_next_non_empty_frag (fragS
*fragP
)
11389 /* There may be a frag with a ".fill 0" when there is no room in
11390 the current frag for frag_grow in output_insn. */
11391 for (fragP
= fragP
->fr_next
;
11393 && fragP
->fr_type
== rs_fill
11394 && fragP
->fr_fix
== 0);
11395 fragP
= fragP
->fr_next
)
11400 /* Return the next jcc frag after BRANCH_PADDING. */
11403 i386_next_fusible_jcc_frag (fragS
*maybe_cmp_fragP
, fragS
*pad_fragP
)
11405 fragS
*branch_fragP
;
11409 if (pad_fragP
->fr_type
== rs_machine_dependent
11410 && (TYPE_FROM_RELAX_STATE (pad_fragP
->fr_subtype
)
11411 == BRANCH_PADDING
))
11413 branch_fragP
= i386_next_non_empty_frag (pad_fragP
);
11414 if (branch_fragP
->fr_type
!= rs_machine_dependent
)
11416 if (TYPE_FROM_RELAX_STATE (branch_fragP
->fr_subtype
) == COND_JUMP
11417 && i386_macro_fusible_p (maybe_cmp_fragP
->tc_frag_data
.mf_type
,
11418 pad_fragP
->tc_frag_data
.mf_type
))
11419 return branch_fragP
;
11425 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
11428 i386_classify_machine_dependent_frag (fragS
*fragP
)
11432 fragS
*branch_fragP
;
11434 unsigned int max_prefix_length
;
11436 if (fragP
->tc_frag_data
.classified
)
11439 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
11440 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
11441 for (next_fragP
= fragP
;
11442 next_fragP
!= NULL
;
11443 next_fragP
= next_fragP
->fr_next
)
11445 next_fragP
->tc_frag_data
.classified
= 1;
11446 if (next_fragP
->fr_type
== rs_machine_dependent
)
11447 switch (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
))
11449 case BRANCH_PADDING
:
11450 /* The BRANCH_PADDING frag must be followed by a branch
11452 branch_fragP
= i386_next_non_empty_frag (next_fragP
);
11453 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11455 case FUSED_JCC_PADDING
:
11456 /* Check if this is a fused jcc:
11458 CMP like instruction
11462 cmp_fragP
= i386_next_non_empty_frag (next_fragP
);
11463 pad_fragP
= i386_next_non_empty_frag (cmp_fragP
);
11464 branch_fragP
= i386_next_fusible_jcc_frag (next_fragP
, pad_fragP
);
11467 /* The BRANCH_PADDING frag is merged with the
11468 FUSED_JCC_PADDING frag. */
11469 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11470 /* CMP like instruction size. */
11471 next_fragP
->tc_frag_data
.cmp_size
= cmp_fragP
->fr_fix
;
11472 frag_wane (pad_fragP
);
11473 /* Skip to branch_fragP. */
11474 next_fragP
= branch_fragP
;
11476 else if (next_fragP
->tc_frag_data
.max_prefix_length
)
11478 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
11480 next_fragP
->fr_subtype
11481 = ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0);
11482 next_fragP
->tc_frag_data
.max_bytes
11483 = next_fragP
->tc_frag_data
.max_prefix_length
;
11484 /* This will be updated in the BRANCH_PREFIX scan. */
11485 next_fragP
->tc_frag_data
.max_prefix_length
= 0;
11488 frag_wane (next_fragP
);
11493 /* Stop if there is no BRANCH_PREFIX. */
11494 if (!align_branch_prefix_size
)
11497 /* Scan for BRANCH_PREFIX. */
11498 for (; fragP
!= NULL
; fragP
= fragP
->fr_next
)
11500 if (fragP
->fr_type
!= rs_machine_dependent
11501 || (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
11505 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
11506 COND_JUMP_PREFIX. */
11507 max_prefix_length
= 0;
11508 for (next_fragP
= fragP
;
11509 next_fragP
!= NULL
;
11510 next_fragP
= next_fragP
->fr_next
)
11512 if (next_fragP
->fr_type
== rs_fill
)
11513 /* Skip rs_fill frags. */
11515 else if (next_fragP
->fr_type
!= rs_machine_dependent
)
11516 /* Stop for all other frags. */
11519 /* rs_machine_dependent frags. */
11520 if (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11523 /* Count BRANCH_PREFIX frags. */
11524 if (max_prefix_length
>= MAX_FUSED_JCC_PADDING_SIZE
)
11526 max_prefix_length
= MAX_FUSED_JCC_PADDING_SIZE
;
11527 frag_wane (next_fragP
);
11531 += next_fragP
->tc_frag_data
.max_bytes
;
11533 else if ((TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11535 || (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11536 == FUSED_JCC_PADDING
))
11538 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
11539 fragP
->tc_frag_data
.u
.padding_fragP
= next_fragP
;
11543 /* Stop for other rs_machine_dependent frags. */
11547 fragP
->tc_frag_data
.max_prefix_length
= max_prefix_length
;
11549 /* Skip to the next frag. */
11550 fragP
= next_fragP
;
11554 /* Compute padding size for
11557 CMP like instruction
11559 COND_JUMP/UNCOND_JUMP
11564 COND_JUMP/UNCOND_JUMP
11568 i386_branch_padding_size (fragS
*fragP
, offsetT address
)
11570 unsigned int offset
, size
, padding_size
;
11571 fragS
*branch_fragP
= fragP
->tc_frag_data
.u
.branch_fragP
;
11573 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
11575 address
= fragP
->fr_address
;
11576 address
+= fragP
->fr_fix
;
11578 /* CMP like instrunction size. */
11579 size
= fragP
->tc_frag_data
.cmp_size
;
11581 /* The base size of the branch frag. */
11582 size
+= branch_fragP
->fr_fix
;
11584 /* Add opcode and displacement bytes for the rs_machine_dependent
11586 if (branch_fragP
->fr_type
== rs_machine_dependent
)
11587 size
+= md_relax_table
[branch_fragP
->fr_subtype
].rlx_length
;
11589 /* Check if branch is within boundary and doesn't end at the last
11591 offset
= address
& ((1U << align_branch_power
) - 1);
11592 if ((offset
+ size
) >= (1U << align_branch_power
))
11593 /* Padding needed to avoid crossing boundary. */
11594 padding_size
= (1U << align_branch_power
) - offset
;
11596 /* No padding needed. */
11599 /* The return value may be saved in tc_frag_data.length which is
11601 if (!fits_in_unsigned_byte (padding_size
))
11604 return padding_size
;
11607 /* i386_generic_table_relax_frag()
11609 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
11610 grow/shrink padding to align branch frags. Hand others to
11614 i386_generic_table_relax_frag (segT segment
, fragS
*fragP
, long stretch
)
11616 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11617 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
11619 long padding_size
= i386_branch_padding_size (fragP
, 0);
11620 long grow
= padding_size
- fragP
->tc_frag_data
.length
;
11622 /* When the BRANCH_PREFIX frag is used, the computed address
11623 must match the actual address and there should be no padding. */
11624 if (fragP
->tc_frag_data
.padding_address
11625 && (fragP
->tc_frag_data
.padding_address
!= fragP
->fr_address
11629 /* Update the padding size. */
11631 fragP
->tc_frag_data
.length
= padding_size
;
11635 else if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
11637 fragS
*padding_fragP
, *next_fragP
;
11638 long padding_size
, left_size
, last_size
;
11640 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
11641 if (!padding_fragP
)
11642 /* Use the padding set by the leading BRANCH_PREFIX frag. */
11643 return (fragP
->tc_frag_data
.length
11644 - fragP
->tc_frag_data
.last_length
);
11646 /* Compute the relative address of the padding frag in the very
11647 first time where the BRANCH_PREFIX frag sizes are zero. */
11648 if (!fragP
->tc_frag_data
.padding_address
)
11649 fragP
->tc_frag_data
.padding_address
11650 = padding_fragP
->fr_address
- (fragP
->fr_address
- stretch
);
11652 /* First update the last length from the previous interation. */
11653 left_size
= fragP
->tc_frag_data
.prefix_length
;
11654 for (next_fragP
= fragP
;
11655 next_fragP
!= padding_fragP
;
11656 next_fragP
= next_fragP
->fr_next
)
11657 if (next_fragP
->fr_type
== rs_machine_dependent
11658 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11663 int max
= next_fragP
->tc_frag_data
.max_bytes
;
11667 if (max
> left_size
)
11672 next_fragP
->tc_frag_data
.last_length
= size
;
11676 next_fragP
->tc_frag_data
.last_length
= 0;
11679 /* Check the padding size for the padding frag. */
11680 padding_size
= i386_branch_padding_size
11681 (padding_fragP
, (fragP
->fr_address
11682 + fragP
->tc_frag_data
.padding_address
));
11684 last_size
= fragP
->tc_frag_data
.prefix_length
;
11685 /* Check if there is change from the last interation. */
11686 if (padding_size
== last_size
)
11688 /* Update the expected address of the padding frag. */
11689 padding_fragP
->tc_frag_data
.padding_address
11690 = (fragP
->fr_address
+ padding_size
11691 + fragP
->tc_frag_data
.padding_address
);
11695 if (padding_size
> fragP
->tc_frag_data
.max_prefix_length
)
11697 /* No padding if there is no sufficient room. Clear the
11698 expected address of the padding frag. */
11699 padding_fragP
->tc_frag_data
.padding_address
= 0;
11703 /* Store the expected address of the padding frag. */
11704 padding_fragP
->tc_frag_data
.padding_address
11705 = (fragP
->fr_address
+ padding_size
11706 + fragP
->tc_frag_data
.padding_address
);
11708 fragP
->tc_frag_data
.prefix_length
= padding_size
;
11710 /* Update the length for the current interation. */
11711 left_size
= padding_size
;
11712 for (next_fragP
= fragP
;
11713 next_fragP
!= padding_fragP
;
11714 next_fragP
= next_fragP
->fr_next
)
11715 if (next_fragP
->fr_type
== rs_machine_dependent
11716 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11721 int max
= next_fragP
->tc_frag_data
.max_bytes
;
11725 if (max
> left_size
)
11730 next_fragP
->tc_frag_data
.length
= size
;
11734 next_fragP
->tc_frag_data
.length
= 0;
11737 return (fragP
->tc_frag_data
.length
11738 - fragP
->tc_frag_data
.last_length
);
11740 return relax_frag (segment
, fragP
, stretch
);
11743 /* md_estimate_size_before_relax()
11745 Called just before relax() for rs_machine_dependent frags. The x86
11746 assembler uses these frags to handle variable size jump
11749 Any symbol that is now undefined will not become defined.
11750 Return the correct fr_subtype in the frag.
11751 Return the initial "guess for variable size of frag" to caller.
11752 The guess is actually the growth beyond the fixed part. Whatever
11753 we do to grow the fixed or variable part contributes to our
11757 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
11759 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11760 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
11761 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
11763 i386_classify_machine_dependent_frag (fragP
);
11764 return fragP
->tc_frag_data
.length
;
11767 /* We've already got fragP->fr_subtype right; all we have to do is
11768 check for un-relaxable symbols. On an ELF system, we can't relax
11769 an externally visible symbol, because it may be overridden by a
11771 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
11772 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11774 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
11777 #if defined (OBJ_COFF) && defined (TE_PE)
11778 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
11779 && S_IS_WEAK (fragP
->fr_symbol
))
11783 /* Symbol is undefined in this segment, or we need to keep a
11784 reloc so that weak symbols can be overridden. */
11785 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
11786 enum bfd_reloc_code_real reloc_type
;
11787 unsigned char *opcode
;
11790 if (fragP
->fr_var
!= NO_RELOC
)
11791 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
11792 else if (size
== 2)
11793 reloc_type
= BFD_RELOC_16_PCREL
;
11794 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11795 else if (need_plt32_p (fragP
->fr_symbol
))
11796 reloc_type
= BFD_RELOC_X86_64_PLT32
;
11799 reloc_type
= BFD_RELOC_32_PCREL
;
11801 old_fr_fix
= fragP
->fr_fix
;
11802 opcode
= (unsigned char *) fragP
->fr_opcode
;
11804 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
11807 /* Make jmp (0xeb) a (d)word displacement jump. */
11809 fragP
->fr_fix
+= size
;
11810 fix_new (fragP
, old_fr_fix
, size
,
11812 fragP
->fr_offset
, 1,
11818 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
11820 /* Negate the condition, and branch past an
11821 unconditional jump. */
11824 /* Insert an unconditional jump. */
11826 /* We added two extra opcode bytes, and have a two byte
11828 fragP
->fr_fix
+= 2 + 2;
11829 fix_new (fragP
, old_fr_fix
+ 2, 2,
11831 fragP
->fr_offset
, 1,
11835 /* Fall through. */
11838 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
11842 fragP
->fr_fix
+= 1;
11843 fixP
= fix_new (fragP
, old_fr_fix
, 1,
11845 fragP
->fr_offset
, 1,
11846 BFD_RELOC_8_PCREL
);
11847 fixP
->fx_signed
= 1;
11851 /* This changes the byte-displacement jump 0x7N
11852 to the (d)word-displacement jump 0x0f,0x8N. */
11853 opcode
[1] = opcode
[0] + 0x10;
11854 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
11855 /* We've added an opcode byte. */
11856 fragP
->fr_fix
+= 1 + size
;
11857 fix_new (fragP
, old_fr_fix
+ 1, size
,
11859 fragP
->fr_offset
, 1,
11864 BAD_CASE (fragP
->fr_subtype
);
11868 return fragP
->fr_fix
- old_fr_fix
;
11871 /* Guess size depending on current relax state. Initially the relax
11872 state will correspond to a short jump and we return 1, because
11873 the variable part of the frag (the branch offset) is one byte
11874 long. However, we can relax a section more than once and in that
11875 case we must either set fr_subtype back to the unrelaxed state,
11876 or return the value for the appropriate branch. */
11877 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
11880 /* Called after relax() is finished.
11882 In: Address of frag.
11883 fr_type == rs_machine_dependent.
11884 fr_subtype is what the address relaxed to.
11886 Out: Any fixSs and constants are set up.
11887 Caller will turn frag into a ".space 0". */
11890 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
11893 unsigned char *opcode
;
11894 unsigned char *where_to_put_displacement
= NULL
;
11895 offsetT target_address
;
11896 offsetT opcode_address
;
11897 unsigned int extension
= 0;
11898 offsetT displacement_from_opcode_start
;
11900 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11901 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
11902 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
11904 /* Generate nop padding. */
11905 unsigned int size
= fragP
->tc_frag_data
.length
;
11908 if (size
> fragP
->tc_frag_data
.max_bytes
)
11914 const char *branch
= "branch";
11915 const char *prefix
= "";
11916 fragS
*padding_fragP
;
11917 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
11920 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
11921 switch (fragP
->tc_frag_data
.default_prefix
)
11926 case CS_PREFIX_OPCODE
:
11929 case DS_PREFIX_OPCODE
:
11932 case ES_PREFIX_OPCODE
:
11935 case FS_PREFIX_OPCODE
:
11938 case GS_PREFIX_OPCODE
:
11941 case SS_PREFIX_OPCODE
:
11946 msg
= _("%s:%u: add %d%s at 0x%llx to align "
11947 "%s within %d-byte boundary\n");
11949 msg
= _("%s:%u: add additional %d%s at 0x%llx to "
11950 "align %s within %d-byte boundary\n");
11954 padding_fragP
= fragP
;
11955 msg
= _("%s:%u: add %d%s-byte nop at 0x%llx to align "
11956 "%s within %d-byte boundary\n");
11960 switch (padding_fragP
->tc_frag_data
.branch_type
)
11962 case align_branch_jcc
:
11965 case align_branch_fused
:
11966 branch
= "fused jcc";
11968 case align_branch_jmp
:
11971 case align_branch_call
:
11974 case align_branch_indirect
:
11975 branch
= "indiret branch";
11977 case align_branch_ret
:
11984 fprintf (stdout
, msg
,
11985 fragP
->fr_file
, fragP
->fr_line
, size
, prefix
,
11986 (long long) fragP
->fr_address
, branch
,
11987 1 << align_branch_power
);
11989 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
11990 memset (fragP
->fr_opcode
,
11991 fragP
->tc_frag_data
.default_prefix
, size
);
11993 i386_generate_nops (fragP
, (char *) fragP
->fr_opcode
,
11995 fragP
->fr_fix
+= size
;
12000 opcode
= (unsigned char *) fragP
->fr_opcode
;
12002 /* Address we want to reach in file space. */
12003 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
12005 /* Address opcode resides at in file space. */
12006 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
12008 /* Displacement from opcode start to fill into instruction. */
12009 displacement_from_opcode_start
= target_address
- opcode_address
;
12011 if ((fragP
->fr_subtype
& BIG
) == 0)
12013 /* Don't have to change opcode. */
12014 extension
= 1; /* 1 opcode + 1 displacement */
12015 where_to_put_displacement
= &opcode
[1];
12019 if (no_cond_jump_promotion
12020 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
12021 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
12022 _("long jump required"));
12024 switch (fragP
->fr_subtype
)
12026 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
12027 extension
= 4; /* 1 opcode + 4 displacement */
12029 where_to_put_displacement
= &opcode
[1];
12032 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
12033 extension
= 2; /* 1 opcode + 2 displacement */
12035 where_to_put_displacement
= &opcode
[1];
12038 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
12039 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
12040 extension
= 5; /* 2 opcode + 4 displacement */
12041 opcode
[1] = opcode
[0] + 0x10;
12042 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12043 where_to_put_displacement
= &opcode
[2];
12046 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
12047 extension
= 3; /* 2 opcode + 2 displacement */
12048 opcode
[1] = opcode
[0] + 0x10;
12049 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12050 where_to_put_displacement
= &opcode
[2];
12053 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
12058 where_to_put_displacement
= &opcode
[3];
12062 BAD_CASE (fragP
->fr_subtype
);
12067 /* If size if less then four we are sure that the operand fits,
12068 but if it's 4, then it could be that the displacement is larger
12070 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
12072 && ((addressT
) (displacement_from_opcode_start
- extension
12073 + ((addressT
) 1 << 31))
12074 > (((addressT
) 2 << 31) - 1)))
12076 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
12077 _("jump target out of range"));
12078 /* Make us emit 0. */
12079 displacement_from_opcode_start
= extension
;
12081 /* Now put displacement after opcode. */
12082 md_number_to_chars ((char *) where_to_put_displacement
,
12083 (valueT
) (displacement_from_opcode_start
- extension
),
12084 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
12085 fragP
->fr_fix
+= extension
;
12088 /* Apply a fixup (fixP) to segment data, once it has been determined
12089 by our caller that we have all the info we need to fix it up.
12091 Parameter valP is the pointer to the value of the bits.
12093 On the 386, immediates, displacements, and data pointers are all in
12094 the same (little-endian) format, so we don't need to care about which
12095 we are handling. */
12098 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
12100 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
12101 valueT value
= *valP
;
12103 #if !defined (TE_Mach)
12104 if (fixP
->fx_pcrel
)
12106 switch (fixP
->fx_r_type
)
12112 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
12115 case BFD_RELOC_X86_64_32S
:
12116 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
12119 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
12122 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
12127 if (fixP
->fx_addsy
!= NULL
12128 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
12129 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
12130 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
12131 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
12132 && !use_rela_relocations
)
12134 /* This is a hack. There should be a better way to handle this.
12135 This covers for the fact that bfd_install_relocation will
12136 subtract the current location (for partial_inplace, PC relative
12137 relocations); see more below. */
12141 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
12144 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12146 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12149 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
12151 if ((sym_seg
== seg
12152 || (symbol_section_p (fixP
->fx_addsy
)
12153 && sym_seg
!= absolute_section
))
12154 && !generic_force_reloc (fixP
))
12156 /* Yes, we add the values in twice. This is because
12157 bfd_install_relocation subtracts them out again. I think
12158 bfd_install_relocation is broken, but I don't dare change
12160 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12164 #if defined (OBJ_COFF) && defined (TE_PE)
12165 /* For some reason, the PE format does not store a
12166 section address offset for a PC relative symbol. */
12167 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
12168 || S_IS_WEAK (fixP
->fx_addsy
))
12169 value
+= md_pcrel_from (fixP
);
12172 #if defined (OBJ_COFF) && defined (TE_PE)
12173 if (fixP
->fx_addsy
!= NULL
12174 && S_IS_WEAK (fixP
->fx_addsy
)
12175 /* PR 16858: Do not modify weak function references. */
12176 && ! fixP
->fx_pcrel
)
12178 #if !defined (TE_PEP)
12179 /* For x86 PE weak function symbols are neither PC-relative
12180 nor do they set S_IS_FUNCTION. So the only reliable way
12181 to detect them is to check the flags of their containing
12183 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
12184 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
12188 value
-= S_GET_VALUE (fixP
->fx_addsy
);
12192 /* Fix a few things - the dynamic linker expects certain values here,
12193 and we must not disappoint it. */
12194 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12195 if (IS_ELF
&& fixP
->fx_addsy
)
12196 switch (fixP
->fx_r_type
)
12198 case BFD_RELOC_386_PLT32
:
12199 case BFD_RELOC_X86_64_PLT32
:
12200 /* Make the jump instruction point to the address of the operand.
12201 At runtime we merely add the offset to the actual PLT entry.
12202 NB: Subtract the offset size only for jump instructions. */
12203 if (fixP
->fx_pcrel
)
12207 case BFD_RELOC_386_TLS_GD
:
12208 case BFD_RELOC_386_TLS_LDM
:
12209 case BFD_RELOC_386_TLS_IE_32
:
12210 case BFD_RELOC_386_TLS_IE
:
12211 case BFD_RELOC_386_TLS_GOTIE
:
12212 case BFD_RELOC_386_TLS_GOTDESC
:
12213 case BFD_RELOC_X86_64_TLSGD
:
12214 case BFD_RELOC_X86_64_TLSLD
:
12215 case BFD_RELOC_X86_64_GOTTPOFF
:
12216 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12217 value
= 0; /* Fully resolved at runtime. No addend. */
12219 case BFD_RELOC_386_TLS_LE
:
12220 case BFD_RELOC_386_TLS_LDO_32
:
12221 case BFD_RELOC_386_TLS_LE_32
:
12222 case BFD_RELOC_X86_64_DTPOFF32
:
12223 case BFD_RELOC_X86_64_DTPOFF64
:
12224 case BFD_RELOC_X86_64_TPOFF32
:
12225 case BFD_RELOC_X86_64_TPOFF64
:
12226 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12229 case BFD_RELOC_386_TLS_DESC_CALL
:
12230 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12231 value
= 0; /* Fully resolved at runtime. No addend. */
12232 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12236 case BFD_RELOC_VTABLE_INHERIT
:
12237 case BFD_RELOC_VTABLE_ENTRY
:
12244 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
12246 #endif /* !defined (TE_Mach) */
12248 /* Are we finished with this relocation now? */
12249 if (fixP
->fx_addsy
== NULL
)
12251 #if defined (OBJ_COFF) && defined (TE_PE)
12252 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
12255 /* Remember value for tc_gen_reloc. */
12256 fixP
->fx_addnumber
= value
;
12257 /* Clear out the frag for now. */
12261 else if (use_rela_relocations
)
12263 fixP
->fx_no_overflow
= 1;
12264 /* Remember value for tc_gen_reloc. */
12265 fixP
->fx_addnumber
= value
;
12269 md_number_to_chars (p
, value
, fixP
->fx_size
);
12273 md_atof (int type
, char *litP
, int *sizeP
)
12275 /* This outputs the LITTLENUMs in REVERSE order;
12276 in accord with the bigendian 386. */
12277 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
12280 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
12283 output_invalid (int c
)
12286 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12289 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12290 "(0x%x)", (unsigned char) c
);
12291 return output_invalid_buf
;
12294 /* REG_STRING starts *before* REGISTER_PREFIX. */
12296 static const reg_entry
*
12297 parse_real_register (char *reg_string
, char **end_op
)
12299 char *s
= reg_string
;
12301 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
12302 const reg_entry
*r
;
12304 /* Skip possible REGISTER_PREFIX and possible whitespace. */
12305 if (*s
== REGISTER_PREFIX
)
12308 if (is_space_char (*s
))
12311 p
= reg_name_given
;
12312 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
12314 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
12315 return (const reg_entry
*) NULL
;
12319 /* For naked regs, make sure that we are not dealing with an identifier.
12320 This prevents confusing an identifier like `eax_var' with register
12322 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
12323 return (const reg_entry
*) NULL
;
12327 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
12329 /* Handle floating point regs, allowing spaces in the (i) part. */
12330 if (r
== i386_regtab
/* %st is first entry of table */)
12332 if (!cpu_arch_flags
.bitfield
.cpu8087
12333 && !cpu_arch_flags
.bitfield
.cpu287
12334 && !cpu_arch_flags
.bitfield
.cpu387
)
12335 return (const reg_entry
*) NULL
;
12337 if (is_space_char (*s
))
12342 if (is_space_char (*s
))
12344 if (*s
>= '0' && *s
<= '7')
12346 int fpr
= *s
- '0';
12348 if (is_space_char (*s
))
12353 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
12358 /* We have "%st(" then garbage. */
12359 return (const reg_entry
*) NULL
;
12363 if (r
== NULL
|| allow_pseudo_reg
)
12366 if (operand_type_all_zero (&r
->reg_type
))
12367 return (const reg_entry
*) NULL
;
12369 if ((r
->reg_type
.bitfield
.dword
12370 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
12371 || r
->reg_type
.bitfield
.class == RegCR
12372 || r
->reg_type
.bitfield
.class == RegDR
12373 || r
->reg_type
.bitfield
.class == RegTR
)
12374 && !cpu_arch_flags
.bitfield
.cpui386
)
12375 return (const reg_entry
*) NULL
;
12377 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
12378 return (const reg_entry
*) NULL
;
12380 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
12382 if (r
->reg_type
.bitfield
.zmmword
12383 || r
->reg_type
.bitfield
.class == RegMask
)
12384 return (const reg_entry
*) NULL
;
12386 if (!cpu_arch_flags
.bitfield
.cpuavx
)
12388 if (r
->reg_type
.bitfield
.ymmword
)
12389 return (const reg_entry
*) NULL
;
12391 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
12392 return (const reg_entry
*) NULL
;
12396 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
12397 return (const reg_entry
*) NULL
;
12399 /* Don't allow fake index register unless allow_index_reg isn't 0. */
12400 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
12401 return (const reg_entry
*) NULL
;
12403 /* Upper 16 vector registers are only available with VREX in 64bit
12404 mode, and require EVEX encoding. */
12405 if (r
->reg_flags
& RegVRex
)
12407 if (!cpu_arch_flags
.bitfield
.cpuavx512f
12408 || flag_code
!= CODE_64BIT
)
12409 return (const reg_entry
*) NULL
;
12411 i
.vec_encoding
= vex_encoding_evex
;
12414 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
12415 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
12416 && flag_code
!= CODE_64BIT
)
12417 return (const reg_entry
*) NULL
;
12419 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
12421 return (const reg_entry
*) NULL
;
12426 /* REG_STRING starts *before* REGISTER_PREFIX. */
12428 static const reg_entry
*
12429 parse_register (char *reg_string
, char **end_op
)
12431 const reg_entry
*r
;
12433 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
12434 r
= parse_real_register (reg_string
, end_op
);
12439 char *save
= input_line_pointer
;
12443 input_line_pointer
= reg_string
;
12444 c
= get_symbol_name (®_string
);
12445 symbolP
= symbol_find (reg_string
);
12446 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
12448 const expressionS
*e
= symbol_get_value_expression (symbolP
);
12450 know (e
->X_op
== O_register
);
12451 know (e
->X_add_number
>= 0
12452 && (valueT
) e
->X_add_number
< i386_regtab_size
);
12453 r
= i386_regtab
+ e
->X_add_number
;
12454 if ((r
->reg_flags
& RegVRex
))
12455 i
.vec_encoding
= vex_encoding_evex
;
12456 *end_op
= input_line_pointer
;
12458 *input_line_pointer
= c
;
12459 input_line_pointer
= save
;
12465 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
12467 const reg_entry
*r
;
12468 char *end
= input_line_pointer
;
12471 r
= parse_register (name
, &input_line_pointer
);
12472 if (r
&& end
<= input_line_pointer
)
12474 *nextcharP
= *input_line_pointer
;
12475 *input_line_pointer
= 0;
12476 e
->X_op
= O_register
;
12477 e
->X_add_number
= r
- i386_regtab
;
12480 input_line_pointer
= end
;
12482 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
12486 md_operand (expressionS
*e
)
12489 const reg_entry
*r
;
12491 switch (*input_line_pointer
)
12493 case REGISTER_PREFIX
:
12494 r
= parse_real_register (input_line_pointer
, &end
);
12497 e
->X_op
= O_register
;
12498 e
->X_add_number
= r
- i386_regtab
;
12499 input_line_pointer
= end
;
12504 gas_assert (intel_syntax
);
12505 end
= input_line_pointer
++;
12507 if (*input_line_pointer
== ']')
12509 ++input_line_pointer
;
12510 e
->X_op_symbol
= make_expr_symbol (e
);
12511 e
->X_add_symbol
= NULL
;
12512 e
->X_add_number
= 0;
12517 e
->X_op
= O_absent
;
12518 input_line_pointer
= end
;
12525 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12526 const char *md_shortopts
= "kVQ:sqnO::";
12528 const char *md_shortopts
= "qnO::";
12531 #define OPTION_32 (OPTION_MD_BASE + 0)
12532 #define OPTION_64 (OPTION_MD_BASE + 1)
12533 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
12534 #define OPTION_MARCH (OPTION_MD_BASE + 3)
12535 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
12536 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
12537 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
12538 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
12539 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
12540 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
12541 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
12542 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
12543 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
12544 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
12545 #define OPTION_X32 (OPTION_MD_BASE + 14)
12546 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
12547 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
12548 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
12549 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
12550 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
12551 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
12552 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
12553 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
12554 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
12555 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
12556 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
12557 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
12558 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
12559 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
12560 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
12561 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
12562 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
12563 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
12564 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
12566 struct option md_longopts
[] =
12568 {"32", no_argument
, NULL
, OPTION_32
},
12569 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12570 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12571 {"64", no_argument
, NULL
, OPTION_64
},
12573 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12574 {"x32", no_argument
, NULL
, OPTION_X32
},
12575 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
12576 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
12578 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
12579 {"march", required_argument
, NULL
, OPTION_MARCH
},
12580 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
12581 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
12582 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
12583 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
12584 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
12585 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
12586 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
12587 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
12588 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
12589 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
12590 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
12591 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
12592 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
12593 # if defined (TE_PE) || defined (TE_PEP)
12594 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
12596 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
12597 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
12598 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
12599 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
12600 {"malign-branch-boundary", required_argument
, NULL
, OPTION_MALIGN_BRANCH_BOUNDARY
},
12601 {"malign-branch-prefix-size", required_argument
, NULL
, OPTION_MALIGN_BRANCH_PREFIX_SIZE
},
12602 {"malign-branch", required_argument
, NULL
, OPTION_MALIGN_BRANCH
},
12603 {"mbranches-within-32B-boundaries", no_argument
, NULL
, OPTION_MBRANCHES_WITH_32B_BOUNDARIES
},
12604 {"mlfence-after-load", required_argument
, NULL
, OPTION_MLFENCE_AFTER_LOAD
},
12605 {"mlfence-before-indirect-branch", required_argument
, NULL
,
12606 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
},
12607 {"mlfence-before-ret", required_argument
, NULL
, OPTION_MLFENCE_BEFORE_RET
},
12608 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
12609 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
12610 {NULL
, no_argument
, NULL
, 0}
12612 size_t md_longopts_size
= sizeof (md_longopts
);
12615 md_parse_option (int c
, const char *arg
)
12618 char *arch
, *next
, *saved
, *type
;
12623 optimize_align_code
= 0;
12627 quiet_warnings
= 1;
12630 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12631 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
12632 should be emitted or not. FIXME: Not implemented. */
12634 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
12638 /* -V: SVR4 argument to print version ID. */
12640 print_version_id ();
12643 /* -k: Ignore for FreeBSD compatibility. */
12648 /* -s: On i386 Solaris, this tells the native assembler to use
12649 .stab instead of .stab.excl. We always use .stab anyhow. */
12652 case OPTION_MSHARED
:
12656 case OPTION_X86_USED_NOTE
:
12657 if (strcasecmp (arg
, "yes") == 0)
12659 else if (strcasecmp (arg
, "no") == 0)
12662 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
12667 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12668 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12671 const char **list
, **l
;
12673 list
= bfd_target_list ();
12674 for (l
= list
; *l
!= NULL
; l
++)
12675 if (CONST_STRNEQ (*l
, "elf64-x86-64")
12676 || strcmp (*l
, "coff-x86-64") == 0
12677 || strcmp (*l
, "pe-x86-64") == 0
12678 || strcmp (*l
, "pei-x86-64") == 0
12679 || strcmp (*l
, "mach-o-x86-64") == 0)
12681 default_arch
= "x86_64";
12685 as_fatal (_("no compiled in support for x86_64"));
12691 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12695 const char **list
, **l
;
12697 list
= bfd_target_list ();
12698 for (l
= list
; *l
!= NULL
; l
++)
12699 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
12701 default_arch
= "x86_64:32";
12705 as_fatal (_("no compiled in support for 32bit x86_64"));
12709 as_fatal (_("32bit x86_64 is only supported for ELF"));
12714 default_arch
= "i386";
12717 case OPTION_DIVIDE
:
12718 #ifdef SVR4_COMMENT_CHARS
12723 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
12725 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
12729 i386_comment_chars
= n
;
12735 saved
= xstrdup (arg
);
12737 /* Allow -march=+nosse. */
12743 as_fatal (_("invalid -march= option: `%s'"), arg
);
12744 next
= strchr (arch
, '+');
12747 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
12749 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
12752 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
12755 cpu_arch_name
= cpu_arch
[j
].name
;
12756 cpu_sub_arch_name
= NULL
;
12757 cpu_arch_flags
= cpu_arch
[j
].flags
;
12758 cpu_arch_isa
= cpu_arch
[j
].type
;
12759 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
12760 if (!cpu_arch_tune_set
)
12762 cpu_arch_tune
= cpu_arch_isa
;
12763 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
12767 else if (*cpu_arch
[j
].name
== '.'
12768 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
12770 /* ISA extension. */
12771 i386_cpu_flags flags
;
12773 flags
= cpu_flags_or (cpu_arch_flags
,
12774 cpu_arch
[j
].flags
);
12776 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
12778 if (cpu_sub_arch_name
)
12780 char *name
= cpu_sub_arch_name
;
12781 cpu_sub_arch_name
= concat (name
,
12783 (const char *) NULL
);
12787 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
12788 cpu_arch_flags
= flags
;
12789 cpu_arch_isa_flags
= flags
;
12793 = cpu_flags_or (cpu_arch_isa_flags
,
12794 cpu_arch
[j
].flags
);
12799 if (j
>= ARRAY_SIZE (cpu_arch
))
12801 /* Disable an ISA extension. */
12802 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
12803 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
12805 i386_cpu_flags flags
;
12807 flags
= cpu_flags_and_not (cpu_arch_flags
,
12808 cpu_noarch
[j
].flags
);
12809 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
12811 if (cpu_sub_arch_name
)
12813 char *name
= cpu_sub_arch_name
;
12814 cpu_sub_arch_name
= concat (arch
,
12815 (const char *) NULL
);
12819 cpu_sub_arch_name
= xstrdup (arch
);
12820 cpu_arch_flags
= flags
;
12821 cpu_arch_isa_flags
= flags
;
12826 if (j
>= ARRAY_SIZE (cpu_noarch
))
12827 j
= ARRAY_SIZE (cpu_arch
);
12830 if (j
>= ARRAY_SIZE (cpu_arch
))
12831 as_fatal (_("invalid -march= option: `%s'"), arg
);
12835 while (next
!= NULL
);
12841 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
12842 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
12844 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
12846 cpu_arch_tune_set
= 1;
12847 cpu_arch_tune
= cpu_arch
[j
].type
;
12848 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
12852 if (j
>= ARRAY_SIZE (cpu_arch
))
12853 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
12856 case OPTION_MMNEMONIC
:
12857 if (strcasecmp (arg
, "att") == 0)
12858 intel_mnemonic
= 0;
12859 else if (strcasecmp (arg
, "intel") == 0)
12860 intel_mnemonic
= 1;
12862 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
12865 case OPTION_MSYNTAX
:
12866 if (strcasecmp (arg
, "att") == 0)
12868 else if (strcasecmp (arg
, "intel") == 0)
12871 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
12874 case OPTION_MINDEX_REG
:
12875 allow_index_reg
= 1;
12878 case OPTION_MNAKED_REG
:
12879 allow_naked_reg
= 1;
12882 case OPTION_MSSE2AVX
:
12886 case OPTION_MSSE_CHECK
:
12887 if (strcasecmp (arg
, "error") == 0)
12888 sse_check
= check_error
;
12889 else if (strcasecmp (arg
, "warning") == 0)
12890 sse_check
= check_warning
;
12891 else if (strcasecmp (arg
, "none") == 0)
12892 sse_check
= check_none
;
12894 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
12897 case OPTION_MOPERAND_CHECK
:
12898 if (strcasecmp (arg
, "error") == 0)
12899 operand_check
= check_error
;
12900 else if (strcasecmp (arg
, "warning") == 0)
12901 operand_check
= check_warning
;
12902 else if (strcasecmp (arg
, "none") == 0)
12903 operand_check
= check_none
;
12905 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
12908 case OPTION_MAVXSCALAR
:
12909 if (strcasecmp (arg
, "128") == 0)
12910 avxscalar
= vex128
;
12911 else if (strcasecmp (arg
, "256") == 0)
12912 avxscalar
= vex256
;
12914 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
12917 case OPTION_MVEXWIG
:
12918 if (strcmp (arg
, "0") == 0)
12920 else if (strcmp (arg
, "1") == 0)
12923 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
12926 case OPTION_MADD_BND_PREFIX
:
12927 add_bnd_prefix
= 1;
12930 case OPTION_MEVEXLIG
:
12931 if (strcmp (arg
, "128") == 0)
12932 evexlig
= evexl128
;
12933 else if (strcmp (arg
, "256") == 0)
12934 evexlig
= evexl256
;
12935 else if (strcmp (arg
, "512") == 0)
12936 evexlig
= evexl512
;
12938 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
12941 case OPTION_MEVEXRCIG
:
12942 if (strcmp (arg
, "rne") == 0)
12944 else if (strcmp (arg
, "rd") == 0)
12946 else if (strcmp (arg
, "ru") == 0)
12948 else if (strcmp (arg
, "rz") == 0)
12951 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
12954 case OPTION_MEVEXWIG
:
12955 if (strcmp (arg
, "0") == 0)
12957 else if (strcmp (arg
, "1") == 0)
12960 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
12963 # if defined (TE_PE) || defined (TE_PEP)
12964 case OPTION_MBIG_OBJ
:
12969 case OPTION_MOMIT_LOCK_PREFIX
:
12970 if (strcasecmp (arg
, "yes") == 0)
12971 omit_lock_prefix
= 1;
12972 else if (strcasecmp (arg
, "no") == 0)
12973 omit_lock_prefix
= 0;
12975 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
12978 case OPTION_MFENCE_AS_LOCK_ADD
:
12979 if (strcasecmp (arg
, "yes") == 0)
12981 else if (strcasecmp (arg
, "no") == 0)
12984 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
12987 case OPTION_MLFENCE_AFTER_LOAD
:
12988 if (strcasecmp (arg
, "yes") == 0)
12989 lfence_after_load
= 1;
12990 else if (strcasecmp (arg
, "no") == 0)
12991 lfence_after_load
= 0;
12993 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg
);
12996 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
:
12997 if (strcasecmp (arg
, "all") == 0)
12998 lfence_before_indirect_branch
= lfence_branch_all
;
12999 else if (strcasecmp (arg
, "memory") == 0)
13000 lfence_before_indirect_branch
= lfence_branch_memory
;
13001 else if (strcasecmp (arg
, "register") == 0)
13002 lfence_before_indirect_branch
= lfence_branch_register
;
13003 else if (strcasecmp (arg
, "none") == 0)
13004 lfence_before_indirect_branch
= lfence_branch_none
;
13006 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
13010 case OPTION_MLFENCE_BEFORE_RET
:
13011 if (strcasecmp (arg
, "or") == 0)
13012 lfence_before_ret
= lfence_before_ret_or
;
13013 else if (strcasecmp (arg
, "not") == 0)
13014 lfence_before_ret
= lfence_before_ret_not
;
13015 else if (strcasecmp (arg
, "none") == 0)
13016 lfence_before_ret
= lfence_before_ret_none
;
13018 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
13022 case OPTION_MRELAX_RELOCATIONS
:
13023 if (strcasecmp (arg
, "yes") == 0)
13024 generate_relax_relocations
= 1;
13025 else if (strcasecmp (arg
, "no") == 0)
13026 generate_relax_relocations
= 0;
13028 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
13031 case OPTION_MALIGN_BRANCH_BOUNDARY
:
13034 long int align
= strtoul (arg
, &end
, 0);
13039 align_branch_power
= 0;
13042 else if (align
>= 16)
13045 for (align_power
= 0;
13047 align
>>= 1, align_power
++)
13049 /* Limit alignment power to 31. */
13050 if (align
== 1 && align_power
< 32)
13052 align_branch_power
= align_power
;
13057 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg
);
13061 case OPTION_MALIGN_BRANCH_PREFIX_SIZE
:
13064 int align
= strtoul (arg
, &end
, 0);
13065 /* Some processors only support 5 prefixes. */
13066 if (*end
== '\0' && align
>= 0 && align
< 6)
13068 align_branch_prefix_size
= align
;
13071 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
13076 case OPTION_MALIGN_BRANCH
:
13078 saved
= xstrdup (arg
);
13082 next
= strchr (type
, '+');
13085 if (strcasecmp (type
, "jcc") == 0)
13086 align_branch
|= align_branch_jcc_bit
;
13087 else if (strcasecmp (type
, "fused") == 0)
13088 align_branch
|= align_branch_fused_bit
;
13089 else if (strcasecmp (type
, "jmp") == 0)
13090 align_branch
|= align_branch_jmp_bit
;
13091 else if (strcasecmp (type
, "call") == 0)
13092 align_branch
|= align_branch_call_bit
;
13093 else if (strcasecmp (type
, "ret") == 0)
13094 align_branch
|= align_branch_ret_bit
;
13095 else if (strcasecmp (type
, "indirect") == 0)
13096 align_branch
|= align_branch_indirect_bit
;
13098 as_fatal (_("invalid -malign-branch= option: `%s'"), arg
);
13101 while (next
!= NULL
);
13105 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES
:
13106 align_branch_power
= 5;
13107 align_branch_prefix_size
= 5;
13108 align_branch
= (align_branch_jcc_bit
13109 | align_branch_fused_bit
13110 | align_branch_jmp_bit
);
13113 case OPTION_MAMD64
:
13117 case OPTION_MINTEL64
:
13125 /* Turn off -Os. */
13126 optimize_for_space
= 0;
13128 else if (*arg
== 's')
13130 optimize_for_space
= 1;
13131 /* Turn on all encoding optimizations. */
13132 optimize
= INT_MAX
;
13136 optimize
= atoi (arg
);
13137 /* Turn off -Os. */
13138 optimize_for_space
= 0;
13148 #define MESSAGE_TEMPLATE \
13152 output_message (FILE *stream
, char *p
, char *message
, char *start
,
13153 int *left_p
, const char *name
, int len
)
13155 int size
= sizeof (MESSAGE_TEMPLATE
);
13156 int left
= *left_p
;
13158 /* Reserve 2 spaces for ", " or ",\0" */
13161 /* Check if there is any room. */
13169 p
= mempcpy (p
, name
, len
);
13173 /* Output the current message now and start a new one. */
13176 fprintf (stream
, "%s\n", message
);
13178 left
= size
- (start
- message
) - len
- 2;
13180 gas_assert (left
>= 0);
13182 p
= mempcpy (p
, name
, len
);
13190 show_arch (FILE *stream
, int ext
, int check
)
13192 static char message
[] = MESSAGE_TEMPLATE
;
13193 char *start
= message
+ 27;
13195 int size
= sizeof (MESSAGE_TEMPLATE
);
13202 left
= size
- (start
- message
);
13203 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13205 /* Should it be skipped? */
13206 if (cpu_arch
[j
].skip
)
13209 name
= cpu_arch
[j
].name
;
13210 len
= cpu_arch
[j
].len
;
13213 /* It is an extension. Skip if we aren't asked to show it. */
13224 /* It is an processor. Skip if we show only extension. */
13227 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13229 /* It is an impossible processor - skip. */
13233 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
13236 /* Display disabled extensions. */
13238 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13240 name
= cpu_noarch
[j
].name
;
13241 len
= cpu_noarch
[j
].len
;
13242 p
= output_message (stream
, p
, message
, start
, &left
, name
,
13247 fprintf (stream
, "%s\n", message
);
13251 md_show_usage (FILE *stream
)
13253 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13254 fprintf (stream
, _("\
13255 -Qy, -Qn ignored\n\
13256 -V print assembler version number\n\
13259 fprintf (stream
, _("\
13260 -n Do not optimize code alignment\n\
13261 -q quieten some warnings\n"));
13262 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13263 fprintf (stream
, _("\
13266 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13267 || defined (TE_PE) || defined (TE_PEP))
13268 fprintf (stream
, _("\
13269 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
13271 #ifdef SVR4_COMMENT_CHARS
13272 fprintf (stream
, _("\
13273 --divide do not treat `/' as a comment character\n"));
13275 fprintf (stream
, _("\
13276 --divide ignored\n"));
13278 fprintf (stream
, _("\
13279 -march=CPU[,+EXTENSION...]\n\
13280 generate code for CPU and EXTENSION, CPU is one of:\n"));
13281 show_arch (stream
, 0, 1);
13282 fprintf (stream
, _("\
13283 EXTENSION is combination of:\n"));
13284 show_arch (stream
, 1, 0);
13285 fprintf (stream
, _("\
13286 -mtune=CPU optimize for CPU, CPU is one of:\n"));
13287 show_arch (stream
, 0, 0);
13288 fprintf (stream
, _("\
13289 -msse2avx encode SSE instructions with VEX prefix\n"));
13290 fprintf (stream
, _("\
13291 -msse-check=[none|error|warning] (default: warning)\n\
13292 check SSE instructions\n"));
13293 fprintf (stream
, _("\
13294 -moperand-check=[none|error|warning] (default: warning)\n\
13295 check operand combinations for validity\n"));
13296 fprintf (stream
, _("\
13297 -mavxscalar=[128|256] (default: 128)\n\
13298 encode scalar AVX instructions with specific vector\n\
13300 fprintf (stream
, _("\
13301 -mvexwig=[0|1] (default: 0)\n\
13302 encode VEX instructions with specific VEX.W value\n\
13303 for VEX.W bit ignored instructions\n"));
13304 fprintf (stream
, _("\
13305 -mevexlig=[128|256|512] (default: 128)\n\
13306 encode scalar EVEX instructions with specific vector\n\
13308 fprintf (stream
, _("\
13309 -mevexwig=[0|1] (default: 0)\n\
13310 encode EVEX instructions with specific EVEX.W value\n\
13311 for EVEX.W bit ignored instructions\n"));
13312 fprintf (stream
, _("\
13313 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
13314 encode EVEX instructions with specific EVEX.RC value\n\
13315 for SAE-only ignored instructions\n"));
13316 fprintf (stream
, _("\
13317 -mmnemonic=[att|intel] "));
13318 if (SYSV386_COMPAT
)
13319 fprintf (stream
, _("(default: att)\n"));
13321 fprintf (stream
, _("(default: intel)\n"));
13322 fprintf (stream
, _("\
13323 use AT&T/Intel mnemonic\n"));
13324 fprintf (stream
, _("\
13325 -msyntax=[att|intel] (default: att)\n\
13326 use AT&T/Intel syntax\n"));
13327 fprintf (stream
, _("\
13328 -mindex-reg support pseudo index registers\n"));
13329 fprintf (stream
, _("\
13330 -mnaked-reg don't require `%%' prefix for registers\n"));
13331 fprintf (stream
, _("\
13332 -madd-bnd-prefix add BND prefix for all valid branches\n"));
13333 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13334 fprintf (stream
, _("\
13335 -mshared disable branch optimization for shared code\n"));
13336 fprintf (stream
, _("\
13337 -mx86-used-note=[no|yes] "));
13338 if (DEFAULT_X86_USED_NOTE
)
13339 fprintf (stream
, _("(default: yes)\n"));
13341 fprintf (stream
, _("(default: no)\n"));
13342 fprintf (stream
, _("\
13343 generate x86 used ISA and feature properties\n"));
13345 #if defined (TE_PE) || defined (TE_PEP)
13346 fprintf (stream
, _("\
13347 -mbig-obj generate big object files\n"));
13349 fprintf (stream
, _("\
13350 -momit-lock-prefix=[no|yes] (default: no)\n\
13351 strip all lock prefixes\n"));
13352 fprintf (stream
, _("\
13353 -mfence-as-lock-add=[no|yes] (default: no)\n\
13354 encode lfence, mfence and sfence as\n\
13355 lock addl $0x0, (%%{re}sp)\n"));
13356 fprintf (stream
, _("\
13357 -mrelax-relocations=[no|yes] "));
13358 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
13359 fprintf (stream
, _("(default: yes)\n"));
13361 fprintf (stream
, _("(default: no)\n"));
13362 fprintf (stream
, _("\
13363 generate relax relocations\n"));
13364 fprintf (stream
, _("\
13365 -malign-branch-boundary=NUM (default: 0)\n\
13366 align branches within NUM byte boundary\n"));
13367 fprintf (stream
, _("\
13368 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
13369 TYPE is combination of jcc, fused, jmp, call, ret,\n\
13371 specify types of branches to align\n"));
13372 fprintf (stream
, _("\
13373 -malign-branch-prefix-size=NUM (default: 5)\n\
13374 align branches with NUM prefixes per instruction\n"));
13375 fprintf (stream
, _("\
13376 -mbranches-within-32B-boundaries\n\
13377 align branches within 32 byte boundary\n"));
13378 fprintf (stream
, _("\
13379 -mlfence-after-load=[no|yes] (default: no)\n\
13380 generate lfence after load\n"));
13381 fprintf (stream
, _("\
13382 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
13383 generate lfence before indirect near branch\n"));
13384 fprintf (stream
, _("\
13385 -mlfence-before-ret=[none|or|not] (default: none)\n\
13386 generate lfence before ret\n"));
13387 fprintf (stream
, _("\
13388 -mamd64 accept only AMD64 ISA [default]\n"));
13389 fprintf (stream
, _("\
13390 -mintel64 accept only Intel64 ISA\n"));
13393 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
13394 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13395 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13397 /* Pick the target format to use. */
13400 i386_target_format (void)
13402 if (!strncmp (default_arch
, "x86_64", 6))
13404 update_code_flag (CODE_64BIT
, 1);
13405 if (default_arch
[6] == '\0')
13406 x86_elf_abi
= X86_64_ABI
;
13408 x86_elf_abi
= X86_64_X32_ABI
;
13410 else if (!strcmp (default_arch
, "i386"))
13411 update_code_flag (CODE_32BIT
, 1);
13412 else if (!strcmp (default_arch
, "iamcu"))
13414 update_code_flag (CODE_32BIT
, 1);
13415 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
13417 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
13418 cpu_arch_name
= "iamcu";
13419 cpu_sub_arch_name
= NULL
;
13420 cpu_arch_flags
= iamcu_flags
;
13421 cpu_arch_isa
= PROCESSOR_IAMCU
;
13422 cpu_arch_isa_flags
= iamcu_flags
;
13423 if (!cpu_arch_tune_set
)
13425 cpu_arch_tune
= cpu_arch_isa
;
13426 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13429 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
13430 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
13434 as_fatal (_("unknown architecture"));
13436 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
13437 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13438 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
13439 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13441 switch (OUTPUT_FLAVOR
)
13443 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
13444 case bfd_target_aout_flavour
:
13445 return AOUT_TARGET_FORMAT
;
13447 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
13448 # if defined (TE_PE) || defined (TE_PEP)
13449 case bfd_target_coff_flavour
:
13450 if (flag_code
== CODE_64BIT
)
13451 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
13454 # elif defined (TE_GO32)
13455 case bfd_target_coff_flavour
:
13456 return "coff-go32";
13458 case bfd_target_coff_flavour
:
13459 return "coff-i386";
13462 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
13463 case bfd_target_elf_flavour
:
13465 const char *format
;
13467 switch (x86_elf_abi
)
13470 format
= ELF_TARGET_FORMAT
;
13472 tls_get_addr
= "___tls_get_addr";
13476 use_rela_relocations
= 1;
13479 tls_get_addr
= "__tls_get_addr";
13481 format
= ELF_TARGET_FORMAT64
;
13483 case X86_64_X32_ABI
:
13484 use_rela_relocations
= 1;
13487 tls_get_addr
= "__tls_get_addr";
13489 disallow_64bit_reloc
= 1;
13490 format
= ELF_TARGET_FORMAT32
;
13493 if (cpu_arch_isa
== PROCESSOR_L1OM
)
13495 if (x86_elf_abi
!= X86_64_ABI
)
13496 as_fatal (_("Intel L1OM is 64bit only"));
13497 return ELF_TARGET_L1OM_FORMAT
;
13499 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
13501 if (x86_elf_abi
!= X86_64_ABI
)
13502 as_fatal (_("Intel K1OM is 64bit only"));
13503 return ELF_TARGET_K1OM_FORMAT
;
13505 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
13507 if (x86_elf_abi
!= I386_ABI
)
13508 as_fatal (_("Intel MCU is 32bit only"));
13509 return ELF_TARGET_IAMCU_FORMAT
;
13515 #if defined (OBJ_MACH_O)
13516 case bfd_target_mach_o_flavour
:
13517 if (flag_code
== CODE_64BIT
)
13519 use_rela_relocations
= 1;
13521 return "mach-o-x86-64";
13524 return "mach-o-i386";
13532 #endif /* OBJ_MAYBE_ more than one */
13535 md_undefined_symbol (char *name
)
13537 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
13538 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
13539 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
13540 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
13544 if (symbol_find (name
))
13545 as_bad (_("GOT already in symbol table"));
13546 GOT_symbol
= symbol_new (name
, undefined_section
,
13547 (valueT
) 0, &zero_address_frag
);
13554 /* Round up a section size to the appropriate boundary. */
13557 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
13559 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
13560 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
13562 /* For a.out, force the section size to be aligned. If we don't do
13563 this, BFD will align it for us, but it will not write out the
13564 final bytes of the section. This may be a bug in BFD, but it is
13565 easier to fix it here since that is how the other a.out targets
13569 align
= bfd_section_alignment (segment
);
13570 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
13577 /* On the i386, PC-relative offsets are relative to the start of the
13578 next instruction. That is, the address of the offset, plus its
13579 size, since the offset is always the last part of the insn. */
13582 md_pcrel_from (fixS
*fixP
)
13584 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
13590 s_bss (int ignore ATTRIBUTE_UNUSED
)
13594 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13596 obj_elf_section_change_hook ();
13598 temp
= get_absolute_expression ();
13599 subseg_set (bss_section
, (subsegT
) temp
);
13600 demand_empty_rest_of_line ();
13605 /* Remember constant directive. */
13608 i386_cons_align (int ignore ATTRIBUTE_UNUSED
)
13610 if (last_insn
.kind
!= last_insn_directive
13611 && (bfd_section_flags (now_seg
) & SEC_CODE
))
13613 last_insn
.seg
= now_seg
;
13614 last_insn
.kind
= last_insn_directive
;
13615 last_insn
.name
= "constant directive";
13616 last_insn
.file
= as_where (&last_insn
.line
);
13617 if (lfence_before_ret
!= lfence_before_ret_none
)
13619 if (lfence_before_indirect_branch
!= lfence_branch_none
)
13620 as_warn (_("constant directive skips -mlfence-before-ret "
13621 "and -mlfence-before-indirect-branch"));
13623 as_warn (_("constant directive skips -mlfence-before-ret"));
13625 else if (lfence_before_indirect_branch
!= lfence_branch_none
)
13626 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
13631 i386_validate_fix (fixS
*fixp
)
13633 if (fixp
->fx_subsy
)
13635 if (fixp
->fx_subsy
== GOT_symbol
)
13637 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
13641 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13642 if (fixp
->fx_tcbit2
)
13643 fixp
->fx_r_type
= (fixp
->fx_tcbit
13644 ? BFD_RELOC_X86_64_REX_GOTPCRELX
13645 : BFD_RELOC_X86_64_GOTPCRELX
);
13648 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
13653 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
13655 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
13657 fixp
->fx_subsy
= 0;
13660 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13661 else if (!object_64bit
)
13663 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
13664 && fixp
->fx_tcbit2
)
13665 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
13671 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
13674 bfd_reloc_code_real_type code
;
13676 switch (fixp
->fx_r_type
)
13678 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13679 case BFD_RELOC_SIZE32
:
13680 case BFD_RELOC_SIZE64
:
13681 if (S_IS_DEFINED (fixp
->fx_addsy
)
13682 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
13684 /* Resolve size relocation against local symbol to size of
13685 the symbol plus addend. */
13686 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
13687 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
13688 && !fits_in_unsigned_long (value
))
13689 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13690 _("symbol size computation overflow"));
13691 fixp
->fx_addsy
= NULL
;
13692 fixp
->fx_subsy
= NULL
;
13693 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
13697 /* Fall through. */
13699 case BFD_RELOC_X86_64_PLT32
:
13700 case BFD_RELOC_X86_64_GOT32
:
13701 case BFD_RELOC_X86_64_GOTPCREL
:
13702 case BFD_RELOC_X86_64_GOTPCRELX
:
13703 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
13704 case BFD_RELOC_386_PLT32
:
13705 case BFD_RELOC_386_GOT32
:
13706 case BFD_RELOC_386_GOT32X
:
13707 case BFD_RELOC_386_GOTOFF
:
13708 case BFD_RELOC_386_GOTPC
:
13709 case BFD_RELOC_386_TLS_GD
:
13710 case BFD_RELOC_386_TLS_LDM
:
13711 case BFD_RELOC_386_TLS_LDO_32
:
13712 case BFD_RELOC_386_TLS_IE_32
:
13713 case BFD_RELOC_386_TLS_IE
:
13714 case BFD_RELOC_386_TLS_GOTIE
:
13715 case BFD_RELOC_386_TLS_LE_32
:
13716 case BFD_RELOC_386_TLS_LE
:
13717 case BFD_RELOC_386_TLS_GOTDESC
:
13718 case BFD_RELOC_386_TLS_DESC_CALL
:
13719 case BFD_RELOC_X86_64_TLSGD
:
13720 case BFD_RELOC_X86_64_TLSLD
:
13721 case BFD_RELOC_X86_64_DTPOFF32
:
13722 case BFD_RELOC_X86_64_DTPOFF64
:
13723 case BFD_RELOC_X86_64_GOTTPOFF
:
13724 case BFD_RELOC_X86_64_TPOFF32
:
13725 case BFD_RELOC_X86_64_TPOFF64
:
13726 case BFD_RELOC_X86_64_GOTOFF64
:
13727 case BFD_RELOC_X86_64_GOTPC32
:
13728 case BFD_RELOC_X86_64_GOT64
:
13729 case BFD_RELOC_X86_64_GOTPCREL64
:
13730 case BFD_RELOC_X86_64_GOTPC64
:
13731 case BFD_RELOC_X86_64_GOTPLT64
:
13732 case BFD_RELOC_X86_64_PLTOFF64
:
13733 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
13734 case BFD_RELOC_X86_64_TLSDESC_CALL
:
13735 case BFD_RELOC_RVA
:
13736 case BFD_RELOC_VTABLE_ENTRY
:
13737 case BFD_RELOC_VTABLE_INHERIT
:
13739 case BFD_RELOC_32_SECREL
:
13741 code
= fixp
->fx_r_type
;
13743 case BFD_RELOC_X86_64_32S
:
13744 if (!fixp
->fx_pcrel
)
13746 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
13747 code
= fixp
->fx_r_type
;
13750 /* Fall through. */
13752 if (fixp
->fx_pcrel
)
13754 switch (fixp
->fx_size
)
13757 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13758 _("can not do %d byte pc-relative relocation"),
13760 code
= BFD_RELOC_32_PCREL
;
13762 case 1: code
= BFD_RELOC_8_PCREL
; break;
13763 case 2: code
= BFD_RELOC_16_PCREL
; break;
13764 case 4: code
= BFD_RELOC_32_PCREL
; break;
13766 case 8: code
= BFD_RELOC_64_PCREL
; break;
13772 switch (fixp
->fx_size
)
13775 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13776 _("can not do %d byte relocation"),
13778 code
= BFD_RELOC_32
;
13780 case 1: code
= BFD_RELOC_8
; break;
13781 case 2: code
= BFD_RELOC_16
; break;
13782 case 4: code
= BFD_RELOC_32
; break;
13784 case 8: code
= BFD_RELOC_64
; break;
13791 if ((code
== BFD_RELOC_32
13792 || code
== BFD_RELOC_32_PCREL
13793 || code
== BFD_RELOC_X86_64_32S
)
13795 && fixp
->fx_addsy
== GOT_symbol
)
13798 code
= BFD_RELOC_386_GOTPC
;
13800 code
= BFD_RELOC_X86_64_GOTPC32
;
13802 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
13804 && fixp
->fx_addsy
== GOT_symbol
)
13806 code
= BFD_RELOC_X86_64_GOTPC64
;
13809 rel
= XNEW (arelent
);
13810 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
13811 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
13813 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
13815 if (!use_rela_relocations
)
13817 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
13818 vtable entry to be used in the relocation's section offset. */
13819 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
13820 rel
->address
= fixp
->fx_offset
;
13821 #if defined (OBJ_COFF) && defined (TE_PE)
13822 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
13823 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
13828 /* Use the rela in 64bit mode. */
13831 if (disallow_64bit_reloc
)
13834 case BFD_RELOC_X86_64_DTPOFF64
:
13835 case BFD_RELOC_X86_64_TPOFF64
:
13836 case BFD_RELOC_64_PCREL
:
13837 case BFD_RELOC_X86_64_GOTOFF64
:
13838 case BFD_RELOC_X86_64_GOT64
:
13839 case BFD_RELOC_X86_64_GOTPCREL64
:
13840 case BFD_RELOC_X86_64_GOTPC64
:
13841 case BFD_RELOC_X86_64_GOTPLT64
:
13842 case BFD_RELOC_X86_64_PLTOFF64
:
13843 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13844 _("cannot represent relocation type %s in x32 mode"),
13845 bfd_get_reloc_code_name (code
));
13851 if (!fixp
->fx_pcrel
)
13852 rel
->addend
= fixp
->fx_offset
;
13856 case BFD_RELOC_X86_64_PLT32
:
13857 case BFD_RELOC_X86_64_GOT32
:
13858 case BFD_RELOC_X86_64_GOTPCREL
:
13859 case BFD_RELOC_X86_64_GOTPCRELX
:
13860 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
13861 case BFD_RELOC_X86_64_TLSGD
:
13862 case BFD_RELOC_X86_64_TLSLD
:
13863 case BFD_RELOC_X86_64_GOTTPOFF
:
13864 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
13865 case BFD_RELOC_X86_64_TLSDESC_CALL
:
13866 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
13869 rel
->addend
= (section
->vma
13871 + fixp
->fx_addnumber
13872 + md_pcrel_from (fixp
));
13877 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
13878 if (rel
->howto
== NULL
)
13880 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13881 _("cannot represent relocation type %s"),
13882 bfd_get_reloc_code_name (code
));
13883 /* Set howto to a garbage value so that we can keep going. */
13884 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
13885 gas_assert (rel
->howto
!= NULL
);
13891 #include "tc-i386-intel.c"
13894 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
13896 int saved_naked_reg
;
13897 char saved_register_dot
;
13899 saved_naked_reg
= allow_naked_reg
;
13900 allow_naked_reg
= 1;
13901 saved_register_dot
= register_chars
['.'];
13902 register_chars
['.'] = '.';
13903 allow_pseudo_reg
= 1;
13904 expression_and_evaluate (exp
);
13905 allow_pseudo_reg
= 0;
13906 register_chars
['.'] = saved_register_dot
;
13907 allow_naked_reg
= saved_naked_reg
;
13909 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
13911 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
13913 exp
->X_op
= O_constant
;
13914 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
13915 .dw2_regnum
[flag_code
>> 1];
13918 exp
->X_op
= O_illegal
;
13923 tc_x86_frame_initial_instructions (void)
13925 static unsigned int sp_regno
[2];
13927 if (!sp_regno
[flag_code
>> 1])
13929 char *saved_input
= input_line_pointer
;
13930 char sp
[][4] = {"esp", "rsp"};
13933 input_line_pointer
= sp
[flag_code
>> 1];
13934 tc_x86_parse_to_dw2regnum (&exp
);
13935 gas_assert (exp
.X_op
== O_constant
);
13936 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
13937 input_line_pointer
= saved_input
;
13940 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
13941 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
13945 x86_dwarf2_addr_size (void)
13947 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
13948 if (x86_elf_abi
== X86_64_X32_ABI
)
13951 return bfd_arch_bits_per_address (stdoutput
) / 8;
13955 i386_elf_section_type (const char *str
, size_t len
)
13957 if (flag_code
== CODE_64BIT
13958 && len
== sizeof ("unwind") - 1
13959 && strncmp (str
, "unwind", 6) == 0)
13960 return SHT_X86_64_UNWIND
;
13967 i386_solaris_fix_up_eh_frame (segT sec
)
13969 if (flag_code
== CODE_64BIT
)
13970 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
13976 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
13980 exp
.X_op
= O_secrel
;
13981 exp
.X_add_symbol
= symbol
;
13982 exp
.X_add_number
= 0;
13983 emit_expr (&exp
, size
);
13987 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13988 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
13991 x86_64_section_letter (int letter
, const char **ptr_msg
)
13993 if (flag_code
== CODE_64BIT
)
13996 return SHF_X86_64_LARGE
;
13998 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
14001 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
14006 x86_64_section_word (char *str
, size_t len
)
14008 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
14009 return SHF_X86_64_LARGE
;
14015 handle_large_common (int small ATTRIBUTE_UNUSED
)
14017 if (flag_code
!= CODE_64BIT
)
14019 s_comm_internal (0, elf_common_parse
);
14020 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
14024 static segT lbss_section
;
14025 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
14026 asection
*saved_bss_section
= bss_section
;
14028 if (lbss_section
== NULL
)
14030 flagword applicable
;
14031 segT seg
= now_seg
;
14032 subsegT subseg
= now_subseg
;
14034 /* The .lbss section is for local .largecomm symbols. */
14035 lbss_section
= subseg_new (".lbss", 0);
14036 applicable
= bfd_applicable_section_flags (stdoutput
);
14037 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
14038 seg_info (lbss_section
)->bss
= 1;
14040 subseg_set (seg
, subseg
);
14043 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
14044 bss_section
= lbss_section
;
14046 s_comm_internal (0, elf_common_parse
);
14048 elf_com_section_ptr
= saved_com_section_ptr
;
14049 bss_section
= saved_bss_section
;
14052 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */