1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2017 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
36 #ifndef REGISTER_WARNINGS
37 #define REGISTER_WARNINGS 1
40 #ifndef INFER_ADDR_PREFIX
41 #define INFER_ADDR_PREFIX 1
45 #define DEFAULT_ARCH "i386"
50 #define INLINE __inline__
56 /* Prefixes will be emitted in the order defined below.
57 WAIT_PREFIX must be the first prefix since FWAIT is really is an
58 instruction, and so must come before any prefixes.
59 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
60 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
66 #define HLE_PREFIX REP_PREFIX
67 #define BND_PREFIX REP_PREFIX
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 #define ZMMWORD_MNEM_SUFFIX 'z'
87 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91 #define END_OF_INSN '\0'
94 'templates' is for grouping together 'template' structures for opcodes
95 of the same name. This is only used for storing the insns in the grand
96 ole hash table of insns.
97 The templates themselves start at START and range up to (but not including)
102 const insn_template
*start
;
103 const insn_template
*end
;
107 /* 386 operand encoding bytes: see 386 book for details of this. */
110 unsigned int regmem
; /* codes register or memory operand */
111 unsigned int reg
; /* codes register operand (or extended opcode) */
112 unsigned int mode
; /* how to interpret regmem & reg */
116 /* x86-64 extension prefix. */
117 typedef int rex_byte
;
119 /* 386 opcode byte to code indirect addressing. */
128 /* x86 arch names, types and features */
131 const char *name
; /* arch name */
132 unsigned int len
; /* arch string length */
133 enum processor_type type
; /* arch type */
134 i386_cpu_flags flags
; /* cpu feature flags */
135 unsigned int skip
; /* show_arch should skip this. */
139 /* Used to turn off indicated flags. */
142 const char *name
; /* arch name */
143 unsigned int len
; /* arch string length */
144 i386_cpu_flags flags
; /* cpu feature flags */
148 static void update_code_flag (int, int);
149 static void set_code_flag (int);
150 static void set_16bit_gcc_code_flag (int);
151 static void set_intel_syntax (int);
152 static void set_intel_mnemonic (int);
153 static void set_allow_index_reg (int);
154 static void set_check (int);
155 static void set_cpu_arch (int);
157 static void pe_directive_secrel (int);
159 static void signed_cons (int);
160 static char *output_invalid (int c
);
161 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
163 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
165 static int i386_att_operand (char *);
166 static int i386_intel_operand (char *, int);
167 static int i386_intel_simplify (expressionS
*);
168 static int i386_intel_parse_name (const char *, expressionS
*);
169 static const reg_entry
*parse_register (char *, char **);
170 static char *parse_insn (char *, char *);
171 static char *parse_operands (char *, const char *);
172 static void swap_operands (void);
173 static void swap_2_operands (int, int);
174 static void optimize_imm (void);
175 static void optimize_disp (void);
176 static const insn_template
*match_template (char);
177 static int check_string (void);
178 static int process_suffix (void);
179 static int check_byte_reg (void);
180 static int check_long_reg (void);
181 static int check_qword_reg (void);
182 static int check_word_reg (void);
183 static int finalize_imm (void);
184 static int process_operands (void);
185 static const seg_entry
*build_modrm_byte (void);
186 static void output_insn (void);
187 static void output_imm (fragS
*, offsetT
);
188 static void output_disp (fragS
*, offsetT
);
190 static void s_bss (int);
192 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
193 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
196 static const char *default_arch
= DEFAULT_ARCH
;
198 /* This struct describes rounding control and SAE in the instruction. */
212 static struct RC_Operation rc_op
;
214 /* The struct describes masking, applied to OPERAND in the instruction.
215 MASK is a pointer to the corresponding mask register. ZEROING tells
216 whether merging or zeroing mask is used. */
217 struct Mask_Operation
219 const reg_entry
*mask
;
220 unsigned int zeroing
;
221 /* The operand where this operation is associated. */
225 static struct Mask_Operation mask_op
;
227 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
229 struct Broadcast_Operation
231 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
234 /* Index of broadcasted operand. */
238 static struct Broadcast_Operation broadcast_op
;
243 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
244 unsigned char bytes
[4];
246 /* Destination or source register specifier. */
247 const reg_entry
*register_specifier
;
250 /* 'md_assemble ()' gathers together information and puts it into a
257 const reg_entry
*regs
;
262 operand_size_mismatch
,
263 operand_type_mismatch
,
264 register_type_mismatch
,
265 number_of_operands_mismatch
,
266 invalid_instruction_suffix
,
269 unsupported_with_intel_mnemonic
,
272 invalid_vsib_address
,
273 invalid_vector_register_set
,
274 unsupported_vector_index_register
,
275 unsupported_broadcast
,
276 broadcast_not_on_src_operand
,
279 mask_not_on_destination
,
282 rc_sae_operand_not_last_imm
,
283 invalid_register_operand
,
289 /* TM holds the template for the insn were currently assembling. */
292 /* SUFFIX holds the instruction size suffix for byte, word, dword
293 or qword, if given. */
296 /* OPERANDS gives the number of given operands. */
297 unsigned int operands
;
299 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
300 of given register, displacement, memory operands and immediate
302 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
304 /* TYPES [i] is the type (see above #defines) which tells us how to
305 use OP[i] for the corresponding operand. */
306 i386_operand_type types
[MAX_OPERANDS
];
308 /* Displacement expression, immediate expression, or register for each
310 union i386_op op
[MAX_OPERANDS
];
312 /* Flags for operands. */
313 unsigned int flags
[MAX_OPERANDS
];
314 #define Operand_PCrel 1
316 /* Relocation type for operand */
317 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
319 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
320 the base index byte below. */
321 const reg_entry
*base_reg
;
322 const reg_entry
*index_reg
;
323 unsigned int log2_scale_factor
;
325 /* SEG gives the seg_entries of this insn. They are zero unless
326 explicit segment overrides are given. */
327 const seg_entry
*seg
[2];
329 /* Copied first memory operand string, for re-checking. */
332 /* PREFIX holds all the given prefix opcodes (usually null).
333 PREFIXES is the number of prefix opcodes. */
334 unsigned int prefixes
;
335 unsigned char prefix
[MAX_PREFIXES
];
337 /* RM and SIB are the modrm byte and the sib byte where the
338 addressing modes of this insn are encoded. */
345 /* Masking attributes. */
346 struct Mask_Operation
*mask
;
348 /* Rounding control and SAE attributes. */
349 struct RC_Operation
*rounding
;
351 /* Broadcasting attributes. */
352 struct Broadcast_Operation
*broadcast
;
354 /* Compressed disp8*N attribute. */
355 unsigned int memshift
;
357 /* Prefer load or store in encoding. */
360 dir_encoding_default
= 0,
365 /* Prefer 8bit or 32bit displacement in encoding. */
368 disp_encoding_default
= 0,
373 /* How to encode vector instructions. */
376 vex_encoding_default
= 0,
383 const char *rep_prefix
;
386 const char *hle_prefix
;
388 /* Have BND prefix. */
389 const char *bnd_prefix
;
391 /* Have NOTRACK prefix. */
392 const char *notrack_prefix
;
395 enum i386_error error
;
398 typedef struct _i386_insn i386_insn
;
400 /* Link RC type with corresponding string, that'll be looked for in
409 static const struct RC_name RC_NamesTable
[] =
411 { rne
, STRING_COMMA_LEN ("rn-sae") },
412 { rd
, STRING_COMMA_LEN ("rd-sae") },
413 { ru
, STRING_COMMA_LEN ("ru-sae") },
414 { rz
, STRING_COMMA_LEN ("rz-sae") },
415 { saeonly
, STRING_COMMA_LEN ("sae") },
418 /* List of chars besides those in app.c:symbol_chars that can start an
419 operand. Used to prevent the scrubber eating vital white-space. */
420 const char extra_symbol_chars
[] = "*%-([{}"
429 #if (defined (TE_I386AIX) \
430 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
431 && !defined (TE_GNU) \
432 && !defined (TE_LINUX) \
433 && !defined (TE_NACL) \
434 && !defined (TE_NETWARE) \
435 && !defined (TE_FreeBSD) \
436 && !defined (TE_DragonFly) \
437 && !defined (TE_NetBSD)))
438 /* This array holds the chars that always start a comment. If the
439 pre-processor is disabled, these aren't very useful. The option
440 --divide will remove '/' from this list. */
441 const char *i386_comment_chars
= "#/";
442 #define SVR4_COMMENT_CHARS 1
443 #define PREFIX_SEPARATOR '\\'
446 const char *i386_comment_chars
= "#";
447 #define PREFIX_SEPARATOR '/'
450 /* This array holds the chars that only start a comment at the beginning of
451 a line. If the line seems to have the form '# 123 filename'
452 .line and .file directives will appear in the pre-processed output.
453 Note that input_file.c hand checks for '#' at the beginning of the
454 first line of the input file. This is because the compiler outputs
455 #NO_APP at the beginning of its output.
456 Also note that comments started like this one will always work if
457 '/' isn't otherwise defined. */
458 const char line_comment_chars
[] = "#/";
460 const char line_separator_chars
[] = ";";
462 /* Chars that can be used to separate mant from exp in floating point
464 const char EXP_CHARS
[] = "eE";
466 /* Chars that mean this number is a floating point constant
469 const char FLT_CHARS
[] = "fFdDxX";
471 /* Tables for lexical analysis. */
472 static char mnemonic_chars
[256];
473 static char register_chars
[256];
474 static char operand_chars
[256];
475 static char identifier_chars
[256];
476 static char digit_chars
[256];
478 /* Lexical macros. */
479 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
480 #define is_operand_char(x) (operand_chars[(unsigned char) x])
481 #define is_register_char(x) (register_chars[(unsigned char) x])
482 #define is_space_char(x) ((x) == ' ')
483 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
484 #define is_digit_char(x) (digit_chars[(unsigned char) x])
486 /* All non-digit non-letter characters that may occur in an operand. */
487 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
489 /* md_assemble() always leaves the strings it's passed unaltered. To
490 effect this we maintain a stack of saved characters that we've smashed
491 with '\0's (indicating end of strings for various sub-fields of the
492 assembler instruction). */
493 static char save_stack
[32];
494 static char *save_stack_p
;
495 #define END_STRING_AND_SAVE(s) \
496 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
497 #define RESTORE_END_STRING(s) \
498 do { *(s) = *--save_stack_p; } while (0)
500 /* The instruction we're assembling. */
503 /* Possible templates for current insn. */
504 static const templates
*current_templates
;
506 /* Per instruction expressionS buffers: max displacements & immediates. */
507 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
508 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
510 /* Current operand we are working on. */
511 static int this_operand
= -1;
513 /* We support four different modes. FLAG_CODE variable is used to distinguish
521 static enum flag_code flag_code
;
522 static unsigned int object_64bit
;
523 static unsigned int disallow_64bit_reloc
;
524 static int use_rela_relocations
= 0;
526 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
527 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
528 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
530 /* The ELF ABI to use. */
538 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
541 #if defined (TE_PE) || defined (TE_PEP)
542 /* Use big object file format. */
543 static int use_big_obj
= 0;
546 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
547 /* 1 if generating code for a shared library. */
548 static int shared
= 0;
551 /* 1 for intel syntax,
553 static int intel_syntax
= 0;
555 /* 1 for Intel64 ISA,
559 /* 1 for intel mnemonic,
560 0 if att mnemonic. */
561 static int intel_mnemonic
= !SYSV386_COMPAT
;
563 /* 1 if support old (<= 2.8.1) versions of gcc. */
564 static int old_gcc
= OLDGCC_COMPAT
;
566 /* 1 if pseudo registers are permitted. */
567 static int allow_pseudo_reg
= 0;
569 /* 1 if register prefix % not required. */
570 static int allow_naked_reg
= 0;
572 /* 1 if the assembler should add BND prefix for all control-transferring
573 instructions supporting it, even if this prefix wasn't specified
575 static int add_bnd_prefix
= 0;
577 /* 1 if pseudo index register, eiz/riz, is allowed . */
578 static int allow_index_reg
= 0;
580 /* 1 if the assembler should ignore LOCK prefix, even if it was
581 specified explicitly. */
582 static int omit_lock_prefix
= 0;
584 /* 1 if the assembler should encode lfence, mfence, and sfence as
585 "lock addl $0, (%{re}sp)". */
586 static int avoid_fence
= 0;
588 /* 1 if the assembler should generate relax relocations. */
590 static int generate_relax_relocations
591 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
593 static enum check_kind
599 sse_check
, operand_check
= check_warning
;
601 /* Register prefix used for error message. */
602 static const char *register_prefix
= "%";
604 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
605 leave, push, and pop instructions so that gcc has the same stack
606 frame as in 32 bit mode. */
607 static char stackop_size
= '\0';
609 /* Non-zero to optimize code alignment. */
610 int optimize_align_code
= 1;
612 /* Non-zero to quieten some warnings. */
613 static int quiet_warnings
= 0;
616 static const char *cpu_arch_name
= NULL
;
617 static char *cpu_sub_arch_name
= NULL
;
619 /* CPU feature flags. */
620 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
622 /* If we have selected a cpu we are generating instructions for. */
623 static int cpu_arch_tune_set
= 0;
625 /* Cpu we are generating instructions for. */
626 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
628 /* CPU feature flags of cpu we are generating instructions for. */
629 static i386_cpu_flags cpu_arch_tune_flags
;
631 /* CPU instruction set architecture used. */
632 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
634 /* CPU feature flags of instruction set architecture used. */
635 i386_cpu_flags cpu_arch_isa_flags
;
637 /* If set, conditional jumps are not automatically promoted to handle
638 larger than a byte offset. */
639 static unsigned int no_cond_jump_promotion
= 0;
641 /* Encode SSE instructions with VEX prefix. */
642 static unsigned int sse2avx
;
644 /* Encode scalar AVX instructions with specific vector length. */
651 /* Encode scalar EVEX LIG instructions with specific vector length. */
659 /* Encode EVEX WIG instructions with specific evex.w. */
666 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
667 static enum rc_type evexrcig
= rne
;
669 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
670 static symbolS
*GOT_symbol
;
672 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
673 unsigned int x86_dwarf2_return_column
;
675 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
676 int x86_cie_data_alignment
;
678 /* Interface to relax_segment.
679 There are 3 major relax states for 386 jump insns because the
680 different types of jumps add different sizes to frags when we're
681 figuring out what sort of jump to choose to reach a given label. */
684 #define UNCOND_JUMP 0
686 #define COND_JUMP86 2
691 #define SMALL16 (SMALL | CODE16)
693 #define BIG16 (BIG | CODE16)
697 #define INLINE __inline__
703 #define ENCODE_RELAX_STATE(type, size) \
704 ((relax_substateT) (((type) << 2) | (size)))
705 #define TYPE_FROM_RELAX_STATE(s) \
707 #define DISP_SIZE_FROM_RELAX_STATE(s) \
708 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
710 /* This table is used by relax_frag to promote short jumps to long
711 ones where necessary. SMALL (short) jumps may be promoted to BIG
712 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
713 don't allow a short jump in a 32 bit code segment to be promoted to
714 a 16 bit offset jump because it's slower (requires data size
715 prefix), and doesn't work, unless the destination is in the bottom
716 64k of the code segment (The top 16 bits of eip are zeroed). */
718 const relax_typeS md_relax_table
[] =
721 1) most positive reach of this state,
722 2) most negative reach of this state,
723 3) how many bytes this mode will have in the variable part of the frag
724 4) which index into the table to try if we can't fit into this one. */
726 /* UNCOND_JUMP states. */
727 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
728 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
729 /* dword jmp adds 4 bytes to frag:
730 0 extra opcode bytes, 4 displacement bytes. */
732 /* word jmp adds 2 byte2 to frag:
733 0 extra opcode bytes, 2 displacement bytes. */
736 /* COND_JUMP states. */
737 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
738 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
739 /* dword conditionals adds 5 bytes to frag:
740 1 extra opcode byte, 4 displacement bytes. */
742 /* word conditionals add 3 bytes to frag:
743 1 extra opcode byte, 2 displacement bytes. */
746 /* COND_JUMP86 states. */
747 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
748 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
749 /* dword conditionals adds 5 bytes to frag:
750 1 extra opcode byte, 4 displacement bytes. */
752 /* word conditionals add 4 bytes to frag:
753 1 displacement byte and a 3 byte long branch insn. */
757 static const arch_entry cpu_arch
[] =
759 /* Do not replace the first two entries - i386_target_format()
760 relies on them being there in this order. */
761 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
762 CPU_GENERIC32_FLAGS
, 0 },
763 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
764 CPU_GENERIC64_FLAGS
, 0 },
765 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
767 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
769 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
771 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
773 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
775 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
777 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
779 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
781 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
782 CPU_PENTIUMPRO_FLAGS
, 0 },
783 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
785 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
787 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
789 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
791 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
792 CPU_NOCONA_FLAGS
, 0 },
793 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
795 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
797 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
798 CPU_CORE2_FLAGS
, 1 },
799 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
800 CPU_CORE2_FLAGS
, 0 },
801 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
802 CPU_COREI7_FLAGS
, 0 },
803 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
805 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
807 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
808 CPU_IAMCU_FLAGS
, 0 },
809 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
811 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
813 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
814 CPU_ATHLON_FLAGS
, 0 },
815 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
817 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
819 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
821 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
822 CPU_AMDFAM10_FLAGS
, 0 },
823 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
824 CPU_BDVER1_FLAGS
, 0 },
825 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
826 CPU_BDVER2_FLAGS
, 0 },
827 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
828 CPU_BDVER3_FLAGS
, 0 },
829 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
830 CPU_BDVER4_FLAGS
, 0 },
831 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
832 CPU_ZNVER1_FLAGS
, 0 },
833 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
834 CPU_BTVER1_FLAGS
, 0 },
835 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
836 CPU_BTVER2_FLAGS
, 0 },
837 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
839 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
841 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
843 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
845 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
847 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
849 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
851 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
853 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
854 CPU_SSSE3_FLAGS
, 0 },
855 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
856 CPU_SSE4_1_FLAGS
, 0 },
857 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
858 CPU_SSE4_2_FLAGS
, 0 },
859 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
860 CPU_SSE4_2_FLAGS
, 0 },
861 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
863 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
865 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
866 CPU_AVX512F_FLAGS
, 0 },
867 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
868 CPU_AVX512CD_FLAGS
, 0 },
869 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
870 CPU_AVX512ER_FLAGS
, 0 },
871 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
872 CPU_AVX512PF_FLAGS
, 0 },
873 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
874 CPU_AVX512DQ_FLAGS
, 0 },
875 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
876 CPU_AVX512BW_FLAGS
, 0 },
877 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
878 CPU_AVX512VL_FLAGS
, 0 },
879 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
881 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
882 CPU_VMFUNC_FLAGS
, 0 },
883 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
885 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
886 CPU_XSAVE_FLAGS
, 0 },
887 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
888 CPU_XSAVEOPT_FLAGS
, 0 },
889 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
890 CPU_XSAVEC_FLAGS
, 0 },
891 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
892 CPU_XSAVES_FLAGS
, 0 },
893 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
895 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
896 CPU_PCLMUL_FLAGS
, 0 },
897 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
898 CPU_PCLMUL_FLAGS
, 1 },
899 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
900 CPU_FSGSBASE_FLAGS
, 0 },
901 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
902 CPU_RDRND_FLAGS
, 0 },
903 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
905 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
907 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
909 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
911 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
913 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
915 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
916 CPU_MOVBE_FLAGS
, 0 },
917 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
919 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
921 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
922 CPU_LZCNT_FLAGS
, 0 },
923 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
925 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
927 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
928 CPU_INVPCID_FLAGS
, 0 },
929 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
930 CPU_CLFLUSH_FLAGS
, 0 },
931 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
933 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
934 CPU_SYSCALL_FLAGS
, 0 },
935 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
936 CPU_RDTSCP_FLAGS
, 0 },
937 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
938 CPU_3DNOW_FLAGS
, 0 },
939 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
940 CPU_3DNOWA_FLAGS
, 0 },
941 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
942 CPU_PADLOCK_FLAGS
, 0 },
943 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
945 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
947 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
948 CPU_SSE4A_FLAGS
, 0 },
949 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
951 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
953 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
955 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
957 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
958 CPU_RDSEED_FLAGS
, 0 },
959 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
960 CPU_PRFCHW_FLAGS
, 0 },
961 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
963 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
965 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
967 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
968 CPU_CLFLUSHOPT_FLAGS
, 0 },
969 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
970 CPU_PREFETCHWT1_FLAGS
, 0 },
971 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
973 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
975 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
976 CPU_AVX512IFMA_FLAGS
, 0 },
977 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
978 CPU_AVX512VBMI_FLAGS
, 0 },
979 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
980 CPU_AVX512_4FMAPS_FLAGS
, 0 },
981 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
982 CPU_AVX512_4VNNIW_FLAGS
, 0 },
983 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
984 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
985 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
986 CPU_AVX512_VBMI2_FLAGS
, 0 },
987 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
988 CPU_CLZERO_FLAGS
, 0 },
989 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
990 CPU_MWAITX_FLAGS
, 0 },
991 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
992 CPU_OSPKE_FLAGS
, 0 },
993 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
994 CPU_RDPID_FLAGS
, 0 },
995 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
996 CPU_PTWRITE_FLAGS
, 0 },
997 { STRING_COMMA_LEN (".cet"), PROCESSOR_UNKNOWN
,
999 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1000 CPU_GFNI_FLAGS
, 0 },
1003 static const noarch_entry cpu_noarch
[] =
1005 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1006 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1007 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1008 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1009 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1010 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1011 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1012 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1013 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1014 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1015 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1016 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1017 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1018 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1019 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1020 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1021 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1022 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1023 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1024 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1025 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1026 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1027 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1028 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1029 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1030 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1031 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1035 /* Like s_lcomm_internal in gas/read.c but the alignment string
1036 is allowed to be optional. */
1039 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1046 && *input_line_pointer
== ',')
1048 align
= parse_align (needs_align
- 1);
1050 if (align
== (addressT
) -1)
1065 bss_alloc (symbolP
, size
, align
);
1070 pe_lcomm (int needs_align
)
1072 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1076 const pseudo_typeS md_pseudo_table
[] =
1078 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1079 {"align", s_align_bytes
, 0},
1081 {"align", s_align_ptwo
, 0},
1083 {"arch", set_cpu_arch
, 0},
1087 {"lcomm", pe_lcomm
, 1},
1089 {"ffloat", float_cons
, 'f'},
1090 {"dfloat", float_cons
, 'd'},
1091 {"tfloat", float_cons
, 'x'},
1093 {"slong", signed_cons
, 4},
1094 {"noopt", s_ignore
, 0},
1095 {"optim", s_ignore
, 0},
1096 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1097 {"code16", set_code_flag
, CODE_16BIT
},
1098 {"code32", set_code_flag
, CODE_32BIT
},
1099 {"code64", set_code_flag
, CODE_64BIT
},
1100 {"intel_syntax", set_intel_syntax
, 1},
1101 {"att_syntax", set_intel_syntax
, 0},
1102 {"intel_mnemonic", set_intel_mnemonic
, 1},
1103 {"att_mnemonic", set_intel_mnemonic
, 0},
1104 {"allow_index_reg", set_allow_index_reg
, 1},
1105 {"disallow_index_reg", set_allow_index_reg
, 0},
1106 {"sse_check", set_check
, 0},
1107 {"operand_check", set_check
, 1},
1108 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1109 {"largecomm", handle_large_common
, 0},
1111 {"file", (void (*) (int)) dwarf2_directive_file
, 0},
1112 {"loc", dwarf2_directive_loc
, 0},
1113 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1116 {"secrel32", pe_directive_secrel
, 0},
1121 /* For interface with expression (). */
1122 extern char *input_line_pointer
;
1124 /* Hash table for instruction mnemonic lookup. */
1125 static struct hash_control
*op_hash
;
1127 /* Hash table for register lookup. */
1128 static struct hash_control
*reg_hash
;
1131 i386_align_code (fragS
*fragP
, int count
)
1133 /* Various efficient no-op patterns for aligning code labels.
1134 Note: Don't try to assemble the instructions in the comments.
1135 0L and 0w are not legal. */
1136 static const unsigned char f32_1
[] =
1138 static const unsigned char f32_2
[] =
1139 {0x66,0x90}; /* xchg %ax,%ax */
1140 static const unsigned char f32_3
[] =
1141 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1142 static const unsigned char f32_4
[] =
1143 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1144 static const unsigned char f32_5
[] =
1146 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1147 static const unsigned char f32_6
[] =
1148 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1149 static const unsigned char f32_7
[] =
1150 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1151 static const unsigned char f32_8
[] =
1153 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1154 static const unsigned char f32_9
[] =
1155 {0x89,0xf6, /* movl %esi,%esi */
1156 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1157 static const unsigned char f32_10
[] =
1158 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1159 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1160 static const unsigned char f32_11
[] =
1161 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1162 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1163 static const unsigned char f32_12
[] =
1164 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1165 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1166 static const unsigned char f32_13
[] =
1167 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1168 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1169 static const unsigned char f32_14
[] =
1170 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1171 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1172 static const unsigned char f16_3
[] =
1173 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1174 static const unsigned char f16_4
[] =
1175 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1176 static const unsigned char f16_5
[] =
1178 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1179 static const unsigned char f16_6
[] =
1180 {0x89,0xf6, /* mov %si,%si */
1181 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1182 static const unsigned char f16_7
[] =
1183 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1184 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1185 static const unsigned char f16_8
[] =
1186 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1187 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1188 static const unsigned char jump_31
[] =
1189 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1190 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1191 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1192 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1193 static const unsigned char *const f32_patt
[] = {
1194 f32_1
, f32_2
, f32_3
, f32_4
, f32_5
, f32_6
, f32_7
, f32_8
,
1195 f32_9
, f32_10
, f32_11
, f32_12
, f32_13
, f32_14
1197 static const unsigned char *const f16_patt
[] = {
1198 f32_1
, f32_2
, f16_3
, f16_4
, f16_5
, f16_6
, f16_7
, f16_8
1200 /* nopl (%[re]ax) */
1201 static const unsigned char alt_3
[] =
1203 /* nopl 0(%[re]ax) */
1204 static const unsigned char alt_4
[] =
1205 {0x0f,0x1f,0x40,0x00};
1206 /* nopl 0(%[re]ax,%[re]ax,1) */
1207 static const unsigned char alt_5
[] =
1208 {0x0f,0x1f,0x44,0x00,0x00};
1209 /* nopw 0(%[re]ax,%[re]ax,1) */
1210 static const unsigned char alt_6
[] =
1211 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1212 /* nopl 0L(%[re]ax) */
1213 static const unsigned char alt_7
[] =
1214 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1215 /* nopl 0L(%[re]ax,%[re]ax,1) */
1216 static const unsigned char alt_8
[] =
1217 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1218 /* nopw 0L(%[re]ax,%[re]ax,1) */
1219 static const unsigned char alt_9
[] =
1220 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1221 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1222 static const unsigned char alt_10
[] =
1223 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1224 static const unsigned char *const alt_patt
[] = {
1225 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1229 /* Only align for at least a positive non-zero boundary. */
1230 if (count
<= 0 || count
> MAX_MEM_FOR_RS_ALIGN_CODE
)
1233 /* We need to decide which NOP sequence to use for 32bit and
1234 64bit. When -mtune= is used:
1236 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1237 PROCESSOR_GENERIC32, f32_patt will be used.
1238 2. For the rest, alt_patt will be used.
1240 When -mtune= isn't used, alt_patt will be used if
1241 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1244 When -march= or .arch is used, we can't use anything beyond
1245 cpu_arch_isa_flags. */
1247 if (flag_code
== CODE_16BIT
)
1251 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1253 /* Adjust jump offset. */
1254 fragP
->fr_literal
[fragP
->fr_fix
+ 1] = count
- 2;
1257 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1258 f16_patt
[count
- 1], count
);
1262 const unsigned char *const *patt
= NULL
;
1264 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1266 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1267 switch (cpu_arch_tune
)
1269 case PROCESSOR_UNKNOWN
:
1270 /* We use cpu_arch_isa_flags to check if we SHOULD
1271 optimize with nops. */
1272 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1277 case PROCESSOR_PENTIUM4
:
1278 case PROCESSOR_NOCONA
:
1279 case PROCESSOR_CORE
:
1280 case PROCESSOR_CORE2
:
1281 case PROCESSOR_COREI7
:
1282 case PROCESSOR_L1OM
:
1283 case PROCESSOR_K1OM
:
1284 case PROCESSOR_GENERIC64
:
1286 case PROCESSOR_ATHLON
:
1288 case PROCESSOR_AMDFAM10
:
1290 case PROCESSOR_ZNVER
:
1294 case PROCESSOR_I386
:
1295 case PROCESSOR_I486
:
1296 case PROCESSOR_PENTIUM
:
1297 case PROCESSOR_PENTIUMPRO
:
1298 case PROCESSOR_IAMCU
:
1299 case PROCESSOR_GENERIC32
:
1306 switch (fragP
->tc_frag_data
.tune
)
1308 case PROCESSOR_UNKNOWN
:
1309 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1310 PROCESSOR_UNKNOWN. */
1314 case PROCESSOR_I386
:
1315 case PROCESSOR_I486
:
1316 case PROCESSOR_PENTIUM
:
1317 case PROCESSOR_IAMCU
:
1319 case PROCESSOR_ATHLON
:
1321 case PROCESSOR_AMDFAM10
:
1323 case PROCESSOR_ZNVER
:
1325 case PROCESSOR_GENERIC32
:
1326 /* We use cpu_arch_isa_flags to check if we CAN optimize
1328 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1333 case PROCESSOR_PENTIUMPRO
:
1334 case PROCESSOR_PENTIUM4
:
1335 case PROCESSOR_NOCONA
:
1336 case PROCESSOR_CORE
:
1337 case PROCESSOR_CORE2
:
1338 case PROCESSOR_COREI7
:
1339 case PROCESSOR_L1OM
:
1340 case PROCESSOR_K1OM
:
1341 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1346 case PROCESSOR_GENERIC64
:
1352 if (patt
== f32_patt
)
1354 /* If the padding is less than 15 bytes, we use the normal
1355 ones. Otherwise, we use a jump instruction and adjust
1359 /* For 64bit, the limit is 3 bytes. */
1360 if (flag_code
== CODE_64BIT
1361 && fragP
->tc_frag_data
.isa_flags
.bitfield
.cpulm
)
1366 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1367 patt
[count
- 1], count
);
1370 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1372 /* Adjust jump offset. */
1373 fragP
->fr_literal
[fragP
->fr_fix
+ 1] = count
- 2;
1378 /* Maximum length of an instruction is 10 byte. If the
1379 padding is greater than 10 bytes and we don't use jump,
1380 we have to break it into smaller pieces. */
1381 int padding
= count
;
1382 while (padding
> 10)
1385 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
+ padding
,
1390 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1391 patt
[padding
- 1], padding
);
1394 fragP
->fr_var
= count
;
1398 operand_type_all_zero (const union i386_operand_type
*x
)
1400 switch (ARRAY_SIZE(x
->array
))
1411 return !x
->array
[0];
1418 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1420 switch (ARRAY_SIZE(x
->array
))
1438 operand_type_equal (const union i386_operand_type
*x
,
1439 const union i386_operand_type
*y
)
1441 switch (ARRAY_SIZE(x
->array
))
1444 if (x
->array
[2] != y
->array
[2])
1448 if (x
->array
[1] != y
->array
[1])
1452 return x
->array
[0] == y
->array
[0];
1460 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1462 switch (ARRAY_SIZE(x
->array
))
1477 return !x
->array
[0];
1484 cpu_flags_equal (const union i386_cpu_flags
*x
,
1485 const union i386_cpu_flags
*y
)
1487 switch (ARRAY_SIZE(x
->array
))
1490 if (x
->array
[3] != y
->array
[3])
1494 if (x
->array
[2] != y
->array
[2])
1498 if (x
->array
[1] != y
->array
[1])
1502 return x
->array
[0] == y
->array
[0];
1510 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1512 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1513 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1516 static INLINE i386_cpu_flags
1517 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1519 switch (ARRAY_SIZE (x
.array
))
1522 x
.array
[3] &= y
.array
[3];
1525 x
.array
[2] &= y
.array
[2];
1528 x
.array
[1] &= y
.array
[1];
1531 x
.array
[0] &= y
.array
[0];
1539 static INLINE i386_cpu_flags
1540 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1542 switch (ARRAY_SIZE (x
.array
))
1545 x
.array
[3] |= y
.array
[3];
1548 x
.array
[2] |= y
.array
[2];
1551 x
.array
[1] |= y
.array
[1];
1554 x
.array
[0] |= y
.array
[0];
1562 static INLINE i386_cpu_flags
1563 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1565 switch (ARRAY_SIZE (x
.array
))
1568 x
.array
[3] &= ~y
.array
[3];
1571 x
.array
[2] &= ~y
.array
[2];
1574 x
.array
[1] &= ~y
.array
[1];
1577 x
.array
[0] &= ~y
.array
[0];
1585 #define CPU_FLAGS_ARCH_MATCH 0x1
1586 #define CPU_FLAGS_64BIT_MATCH 0x2
1587 #define CPU_FLAGS_AES_MATCH 0x4
1588 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1589 #define CPU_FLAGS_AVX_MATCH 0x10
1591 #define CPU_FLAGS_32BIT_MATCH \
1592 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1593 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1594 #define CPU_FLAGS_PERFECT_MATCH \
1595 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1597 /* Return CPU flags match bits. */
1600 cpu_flags_match (const insn_template
*t
)
1602 i386_cpu_flags x
= t
->cpu_flags
;
1603 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1605 x
.bitfield
.cpu64
= 0;
1606 x
.bitfield
.cpuno64
= 0;
1608 if (cpu_flags_all_zero (&x
))
1610 /* This instruction is available on all archs. */
1611 match
|= CPU_FLAGS_32BIT_MATCH
;
1615 /* This instruction is available only on some archs. */
1616 i386_cpu_flags cpu
= cpu_arch_flags
;
1618 cpu
= cpu_flags_and (x
, cpu
);
1619 if (!cpu_flags_all_zero (&cpu
))
1621 if (x
.bitfield
.cpuavx
)
1623 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1624 if (cpu
.bitfield
.cpuavx
)
1626 /* Check SSE2AVX. */
1627 if (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1629 match
|= (CPU_FLAGS_ARCH_MATCH
1630 | CPU_FLAGS_AVX_MATCH
);
1632 if (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1633 match
|= CPU_FLAGS_AES_MATCH
;
1635 if (!x
.bitfield
.cpupclmul
1636 || cpu
.bitfield
.cpupclmul
)
1637 match
|= CPU_FLAGS_PCLMUL_MATCH
;
1641 match
|= CPU_FLAGS_ARCH_MATCH
;
1643 else if (x
.bitfield
.cpuavx512vl
)
1645 /* Match AVX512VL. */
1646 if (cpu
.bitfield
.cpuavx512vl
)
1648 /* Need another match. */
1649 cpu
.bitfield
.cpuavx512vl
= 0;
1650 if (!cpu_flags_all_zero (&cpu
))
1651 match
|= CPU_FLAGS_32BIT_MATCH
;
1653 match
|= CPU_FLAGS_ARCH_MATCH
;
1656 match
|= CPU_FLAGS_ARCH_MATCH
;
1659 match
|= CPU_FLAGS_32BIT_MATCH
;
1665 static INLINE i386_operand_type
1666 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1668 switch (ARRAY_SIZE (x
.array
))
1671 x
.array
[2] &= y
.array
[2];
1674 x
.array
[1] &= y
.array
[1];
1677 x
.array
[0] &= y
.array
[0];
1685 static INLINE i386_operand_type
1686 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1688 switch (ARRAY_SIZE (x
.array
))
1691 x
.array
[2] |= y
.array
[2];
1694 x
.array
[1] |= y
.array
[1];
1697 x
.array
[0] |= y
.array
[0];
1705 static INLINE i386_operand_type
1706 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1708 switch (ARRAY_SIZE (x
.array
))
1711 x
.array
[2] ^= y
.array
[2];
1714 x
.array
[1] ^= y
.array
[1];
1717 x
.array
[0] ^= y
.array
[0];
1725 static const i386_operand_type acc32
= OPERAND_TYPE_ACC32
;
1726 static const i386_operand_type acc64
= OPERAND_TYPE_ACC64
;
1727 static const i386_operand_type control
= OPERAND_TYPE_CONTROL
;
1728 static const i386_operand_type inoutportreg
1729 = OPERAND_TYPE_INOUTPORTREG
;
1730 static const i386_operand_type reg16_inoutportreg
1731 = OPERAND_TYPE_REG16_INOUTPORTREG
;
1732 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
1733 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
1734 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
1735 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
1736 static const i386_operand_type anydisp
1737 = OPERAND_TYPE_ANYDISP
;
1738 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
1739 static const i386_operand_type regymm
= OPERAND_TYPE_REGYMM
;
1740 static const i386_operand_type regzmm
= OPERAND_TYPE_REGZMM
;
1741 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
1742 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
1743 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
1744 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
1745 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
1746 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
1747 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
1748 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
1749 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
1750 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
1751 static const i386_operand_type vec_imm4
= OPERAND_TYPE_VEC_IMM4
;
1762 operand_type_check (i386_operand_type t
, enum operand_type c
)
1767 return (t
.bitfield
.reg8
1770 || t
.bitfield
.reg64
);
1773 return (t
.bitfield
.imm8
1777 || t
.bitfield
.imm32s
1778 || t
.bitfield
.imm64
);
1781 return (t
.bitfield
.disp8
1782 || t
.bitfield
.disp16
1783 || t
.bitfield
.disp32
1784 || t
.bitfield
.disp32s
1785 || t
.bitfield
.disp64
);
1788 return (t
.bitfield
.disp8
1789 || t
.bitfield
.disp16
1790 || t
.bitfield
.disp32
1791 || t
.bitfield
.disp32s
1792 || t
.bitfield
.disp64
1793 || t
.bitfield
.baseindex
);
1802 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1803 operand J for instruction template T. */
1806 match_reg_size (const insn_template
*t
, unsigned int j
)
1808 return !((i
.types
[j
].bitfield
.byte
1809 && !t
->operand_types
[j
].bitfield
.byte
)
1810 || (i
.types
[j
].bitfield
.word
1811 && !t
->operand_types
[j
].bitfield
.word
)
1812 || (i
.types
[j
].bitfield
.dword
1813 && !t
->operand_types
[j
].bitfield
.dword
)
1814 || (i
.types
[j
].bitfield
.qword
1815 && !t
->operand_types
[j
].bitfield
.qword
));
1818 /* Return 1 if there is no conflict in any size on operand J for
1819 instruction template T. */
1822 match_mem_size (const insn_template
*t
, unsigned int j
)
1824 return (match_reg_size (t
, j
)
1825 && !((i
.types
[j
].bitfield
.unspecified
1827 && !t
->operand_types
[j
].bitfield
.unspecified
)
1828 || (i
.types
[j
].bitfield
.fword
1829 && !t
->operand_types
[j
].bitfield
.fword
)
1830 || (i
.types
[j
].bitfield
.tbyte
1831 && !t
->operand_types
[j
].bitfield
.tbyte
)
1832 || (i
.types
[j
].bitfield
.xmmword
1833 && !t
->operand_types
[j
].bitfield
.xmmword
)
1834 || (i
.types
[j
].bitfield
.ymmword
1835 && !t
->operand_types
[j
].bitfield
.ymmword
)
1836 || (i
.types
[j
].bitfield
.zmmword
1837 && !t
->operand_types
[j
].bitfield
.zmmword
)));
1840 /* Return 1 if there is no size conflict on any operands for
1841 instruction template T. */
1844 operand_size_match (const insn_template
*t
)
1849 /* Don't check jump instructions. */
1850 if (t
->opcode_modifier
.jump
1851 || t
->opcode_modifier
.jumpbyte
1852 || t
->opcode_modifier
.jumpdword
1853 || t
->opcode_modifier
.jumpintersegment
)
1856 /* Check memory and accumulator operand size. */
1857 for (j
= 0; j
< i
.operands
; j
++)
1859 if (t
->operand_types
[j
].bitfield
.anysize
)
1862 if (t
->operand_types
[j
].bitfield
.acc
&& !match_reg_size (t
, j
))
1868 if (i
.types
[j
].bitfield
.mem
&& !match_mem_size (t
, j
))
1877 else if (!t
->opcode_modifier
.d
&& !t
->opcode_modifier
.floatd
)
1880 i
.error
= operand_size_mismatch
;
1884 /* Check reverse. */
1885 gas_assert (i
.operands
== 2);
1888 for (j
= 0; j
< 2; j
++)
1890 if (t
->operand_types
[j
].bitfield
.acc
1891 && !match_reg_size (t
, j
? 0 : 1))
1894 if (i
.types
[j
].bitfield
.mem
1895 && !match_mem_size (t
, j
? 0 : 1))
1903 operand_type_match (i386_operand_type overlap
,
1904 i386_operand_type given
)
1906 i386_operand_type temp
= overlap
;
1908 temp
.bitfield
.jumpabsolute
= 0;
1909 temp
.bitfield
.unspecified
= 0;
1910 temp
.bitfield
.byte
= 0;
1911 temp
.bitfield
.word
= 0;
1912 temp
.bitfield
.dword
= 0;
1913 temp
.bitfield
.fword
= 0;
1914 temp
.bitfield
.qword
= 0;
1915 temp
.bitfield
.tbyte
= 0;
1916 temp
.bitfield
.xmmword
= 0;
1917 temp
.bitfield
.ymmword
= 0;
1918 temp
.bitfield
.zmmword
= 0;
1919 if (operand_type_all_zero (&temp
))
1922 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
1923 && given
.bitfield
.jumpabsolute
== overlap
.bitfield
.jumpabsolute
)
1927 i
.error
= operand_type_mismatch
;
1931 /* If given types g0 and g1 are registers they must be of the same type
1932 unless the expected operand type register overlap is null.
1933 Note that Acc in a template matches every size of reg. */
1936 operand_type_register_match (i386_operand_type m0
,
1937 i386_operand_type g0
,
1938 i386_operand_type t0
,
1939 i386_operand_type m1
,
1940 i386_operand_type g1
,
1941 i386_operand_type t1
)
1943 if (!operand_type_check (g0
, reg
))
1946 if (!operand_type_check (g1
, reg
))
1949 if (g0
.bitfield
.reg8
== g1
.bitfield
.reg8
1950 && g0
.bitfield
.reg16
== g1
.bitfield
.reg16
1951 && g0
.bitfield
.reg32
== g1
.bitfield
.reg32
1952 && g0
.bitfield
.reg64
== g1
.bitfield
.reg64
)
1955 if (m0
.bitfield
.acc
)
1957 t0
.bitfield
.reg8
= 1;
1958 t0
.bitfield
.reg16
= 1;
1959 t0
.bitfield
.reg32
= 1;
1960 t0
.bitfield
.reg64
= 1;
1963 if (m1
.bitfield
.acc
)
1965 t1
.bitfield
.reg8
= 1;
1966 t1
.bitfield
.reg16
= 1;
1967 t1
.bitfield
.reg32
= 1;
1968 t1
.bitfield
.reg64
= 1;
1971 if (!(t0
.bitfield
.reg8
& t1
.bitfield
.reg8
)
1972 && !(t0
.bitfield
.reg16
& t1
.bitfield
.reg16
)
1973 && !(t0
.bitfield
.reg32
& t1
.bitfield
.reg32
)
1974 && !(t0
.bitfield
.reg64
& t1
.bitfield
.reg64
))
1977 i
.error
= register_type_mismatch
;
1982 static INLINE
unsigned int
1983 register_number (const reg_entry
*r
)
1985 unsigned int nr
= r
->reg_num
;
1987 if (r
->reg_flags
& RegRex
)
1990 if (r
->reg_flags
& RegVRex
)
1996 static INLINE
unsigned int
1997 mode_from_disp_size (i386_operand_type t
)
1999 if (t
.bitfield
.disp8
|| t
.bitfield
.vec_disp8
)
2001 else if (t
.bitfield
.disp16
2002 || t
.bitfield
.disp32
2003 || t
.bitfield
.disp32s
)
2010 fits_in_signed_byte (addressT num
)
2012 return num
+ 0x80 <= 0xff;
2016 fits_in_unsigned_byte (addressT num
)
2022 fits_in_unsigned_word (addressT num
)
2024 return num
<= 0xffff;
2028 fits_in_signed_word (addressT num
)
2030 return num
+ 0x8000 <= 0xffff;
2034 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2039 return num
+ 0x80000000 <= 0xffffffff;
2041 } /* fits_in_signed_long() */
2044 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2049 return num
<= 0xffffffff;
2051 } /* fits_in_unsigned_long() */
2054 fits_in_vec_disp8 (offsetT num
)
2056 int shift
= i
.memshift
;
2062 mask
= (1 << shift
) - 1;
2064 /* Return 0 if NUM isn't properly aligned. */
2068 /* Check if NUM will fit in 8bit after shift. */
2069 return fits_in_signed_byte (num
>> shift
);
2073 fits_in_imm4 (offsetT num
)
2075 return (num
& 0xf) == num
;
2078 static i386_operand_type
2079 smallest_imm_type (offsetT num
)
2081 i386_operand_type t
;
2083 operand_type_set (&t
, 0);
2084 t
.bitfield
.imm64
= 1;
2086 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2088 /* This code is disabled on the 486 because all the Imm1 forms
2089 in the opcode table are slower on the i486. They're the
2090 versions with the implicitly specified single-position
2091 displacement, which has another syntax if you really want to
2093 t
.bitfield
.imm1
= 1;
2094 t
.bitfield
.imm8
= 1;
2095 t
.bitfield
.imm8s
= 1;
2096 t
.bitfield
.imm16
= 1;
2097 t
.bitfield
.imm32
= 1;
2098 t
.bitfield
.imm32s
= 1;
2100 else if (fits_in_signed_byte (num
))
2102 t
.bitfield
.imm8
= 1;
2103 t
.bitfield
.imm8s
= 1;
2104 t
.bitfield
.imm16
= 1;
2105 t
.bitfield
.imm32
= 1;
2106 t
.bitfield
.imm32s
= 1;
2108 else if (fits_in_unsigned_byte (num
))
2110 t
.bitfield
.imm8
= 1;
2111 t
.bitfield
.imm16
= 1;
2112 t
.bitfield
.imm32
= 1;
2113 t
.bitfield
.imm32s
= 1;
2115 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2117 t
.bitfield
.imm16
= 1;
2118 t
.bitfield
.imm32
= 1;
2119 t
.bitfield
.imm32s
= 1;
2121 else if (fits_in_signed_long (num
))
2123 t
.bitfield
.imm32
= 1;
2124 t
.bitfield
.imm32s
= 1;
2126 else if (fits_in_unsigned_long (num
))
2127 t
.bitfield
.imm32
= 1;
2133 offset_in_range (offsetT val
, int size
)
2139 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2140 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2141 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2143 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2149 /* If BFD64, sign extend val for 32bit address mode. */
2150 if (flag_code
!= CODE_64BIT
2151 || i
.prefix
[ADDR_PREFIX
])
2152 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2153 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2156 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2158 char buf1
[40], buf2
[40];
2160 sprint_value (buf1
, val
);
2161 sprint_value (buf2
, val
& mask
);
2162 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2177 a. PREFIX_EXIST if attempting to add a prefix where one from the
2178 same class already exists.
2179 b. PREFIX_LOCK if lock prefix is added.
2180 c. PREFIX_REP if rep/repne prefix is added.
2181 d. PREFIX_DS if ds prefix is added.
2182 e. PREFIX_OTHER if other prefix is added.
2185 static enum PREFIX_GROUP
2186 add_prefix (unsigned int prefix
)
2188 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2191 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2192 && flag_code
== CODE_64BIT
)
2194 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2195 || ((i
.prefix
[REX_PREFIX
] & (REX_R
| REX_X
| REX_B
))
2196 && (prefix
& (REX_R
| REX_X
| REX_B
))))
2207 case DS_PREFIX_OPCODE
:
2210 case CS_PREFIX_OPCODE
:
2211 case ES_PREFIX_OPCODE
:
2212 case FS_PREFIX_OPCODE
:
2213 case GS_PREFIX_OPCODE
:
2214 case SS_PREFIX_OPCODE
:
2218 case REPNE_PREFIX_OPCODE
:
2219 case REPE_PREFIX_OPCODE
:
2224 case LOCK_PREFIX_OPCODE
:
2233 case ADDR_PREFIX_OPCODE
:
2237 case DATA_PREFIX_OPCODE
:
2241 if (i
.prefix
[q
] != 0)
2249 i
.prefix
[q
] |= prefix
;
2252 as_bad (_("same type of prefix used twice"));
2258 update_code_flag (int value
, int check
)
2260 PRINTF_LIKE ((*as_error
));
2262 flag_code
= (enum flag_code
) value
;
2263 if (flag_code
== CODE_64BIT
)
2265 cpu_arch_flags
.bitfield
.cpu64
= 1;
2266 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2270 cpu_arch_flags
.bitfield
.cpu64
= 0;
2271 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2273 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2276 as_error
= as_fatal
;
2279 (*as_error
) (_("64bit mode not supported on `%s'."),
2280 cpu_arch_name
? cpu_arch_name
: default_arch
);
2282 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2285 as_error
= as_fatal
;
2288 (*as_error
) (_("32bit mode not supported on `%s'."),
2289 cpu_arch_name
? cpu_arch_name
: default_arch
);
2291 stackop_size
= '\0';
2295 set_code_flag (int value
)
2297 update_code_flag (value
, 0);
2301 set_16bit_gcc_code_flag (int new_code_flag
)
2303 flag_code
= (enum flag_code
) new_code_flag
;
2304 if (flag_code
!= CODE_16BIT
)
2306 cpu_arch_flags
.bitfield
.cpu64
= 0;
2307 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2308 stackop_size
= LONG_MNEM_SUFFIX
;
2312 set_intel_syntax (int syntax_flag
)
2314 /* Find out if register prefixing is specified. */
2315 int ask_naked_reg
= 0;
2318 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2321 int e
= get_symbol_name (&string
);
2323 if (strcmp (string
, "prefix") == 0)
2325 else if (strcmp (string
, "noprefix") == 0)
2328 as_bad (_("bad argument to syntax directive."));
2329 (void) restore_line_pointer (e
);
2331 demand_empty_rest_of_line ();
2333 intel_syntax
= syntax_flag
;
2335 if (ask_naked_reg
== 0)
2336 allow_naked_reg
= (intel_syntax
2337 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2339 allow_naked_reg
= (ask_naked_reg
< 0);
2341 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2343 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2344 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2345 register_prefix
= allow_naked_reg
? "" : "%";
2349 set_intel_mnemonic (int mnemonic_flag
)
2351 intel_mnemonic
= mnemonic_flag
;
2355 set_allow_index_reg (int flag
)
2357 allow_index_reg
= flag
;
2361 set_check (int what
)
2363 enum check_kind
*kind
;
2368 kind
= &operand_check
;
2379 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2382 int e
= get_symbol_name (&string
);
2384 if (strcmp (string
, "none") == 0)
2386 else if (strcmp (string
, "warning") == 0)
2387 *kind
= check_warning
;
2388 else if (strcmp (string
, "error") == 0)
2389 *kind
= check_error
;
2391 as_bad (_("bad argument to %s_check directive."), str
);
2392 (void) restore_line_pointer (e
);
2395 as_bad (_("missing argument for %s_check directive"), str
);
2397 demand_empty_rest_of_line ();
2401 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2402 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2404 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2405 static const char *arch
;
2407 /* Intel LIOM is only supported on ELF. */
2413 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2414 use default_arch. */
2415 arch
= cpu_arch_name
;
2417 arch
= default_arch
;
2420 /* If we are targeting Intel MCU, we must enable it. */
2421 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2422 || new_flag
.bitfield
.cpuiamcu
)
2425 /* If we are targeting Intel L1OM, we must enable it. */
2426 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2427 || new_flag
.bitfield
.cpul1om
)
2430 /* If we are targeting Intel K1OM, we must enable it. */
2431 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2432 || new_flag
.bitfield
.cpuk1om
)
2435 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2440 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2444 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2447 int e
= get_symbol_name (&string
);
2449 i386_cpu_flags flags
;
2451 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2453 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2455 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2459 cpu_arch_name
= cpu_arch
[j
].name
;
2460 cpu_sub_arch_name
= NULL
;
2461 cpu_arch_flags
= cpu_arch
[j
].flags
;
2462 if (flag_code
== CODE_64BIT
)
2464 cpu_arch_flags
.bitfield
.cpu64
= 1;
2465 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2469 cpu_arch_flags
.bitfield
.cpu64
= 0;
2470 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2472 cpu_arch_isa
= cpu_arch
[j
].type
;
2473 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2474 if (!cpu_arch_tune_set
)
2476 cpu_arch_tune
= cpu_arch_isa
;
2477 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2482 flags
= cpu_flags_or (cpu_arch_flags
,
2485 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2487 if (cpu_sub_arch_name
)
2489 char *name
= cpu_sub_arch_name
;
2490 cpu_sub_arch_name
= concat (name
,
2492 (const char *) NULL
);
2496 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2497 cpu_arch_flags
= flags
;
2498 cpu_arch_isa_flags
= flags
;
2500 (void) restore_line_pointer (e
);
2501 demand_empty_rest_of_line ();
2506 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2508 /* Disable an ISA extension. */
2509 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2510 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2512 flags
= cpu_flags_and_not (cpu_arch_flags
,
2513 cpu_noarch
[j
].flags
);
2514 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2516 if (cpu_sub_arch_name
)
2518 char *name
= cpu_sub_arch_name
;
2519 cpu_sub_arch_name
= concat (name
, string
,
2520 (const char *) NULL
);
2524 cpu_sub_arch_name
= xstrdup (string
);
2525 cpu_arch_flags
= flags
;
2526 cpu_arch_isa_flags
= flags
;
2528 (void) restore_line_pointer (e
);
2529 demand_empty_rest_of_line ();
2533 j
= ARRAY_SIZE (cpu_arch
);
2536 if (j
>= ARRAY_SIZE (cpu_arch
))
2537 as_bad (_("no such architecture: `%s'"), string
);
2539 *input_line_pointer
= e
;
2542 as_bad (_("missing cpu architecture"));
2544 no_cond_jump_promotion
= 0;
2545 if (*input_line_pointer
== ','
2546 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2551 ++input_line_pointer
;
2552 e
= get_symbol_name (&string
);
2554 if (strcmp (string
, "nojumps") == 0)
2555 no_cond_jump_promotion
= 1;
2556 else if (strcmp (string
, "jumps") == 0)
2559 as_bad (_("no such architecture modifier: `%s'"), string
);
2561 (void) restore_line_pointer (e
);
2564 demand_empty_rest_of_line ();
2567 enum bfd_architecture
2570 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2572 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2573 || flag_code
!= CODE_64BIT
)
2574 as_fatal (_("Intel L1OM is 64bit ELF only"));
2575 return bfd_arch_l1om
;
2577 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2579 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2580 || flag_code
!= CODE_64BIT
)
2581 as_fatal (_("Intel K1OM is 64bit ELF only"));
2582 return bfd_arch_k1om
;
2584 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2586 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2587 || flag_code
== CODE_64BIT
)
2588 as_fatal (_("Intel MCU is 32bit ELF only"));
2589 return bfd_arch_iamcu
;
2592 return bfd_arch_i386
;
2598 if (!strncmp (default_arch
, "x86_64", 6))
2600 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2602 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2603 || default_arch
[6] != '\0')
2604 as_fatal (_("Intel L1OM is 64bit ELF only"));
2605 return bfd_mach_l1om
;
2607 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2609 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2610 || default_arch
[6] != '\0')
2611 as_fatal (_("Intel K1OM is 64bit ELF only"));
2612 return bfd_mach_k1om
;
2614 else if (default_arch
[6] == '\0')
2615 return bfd_mach_x86_64
;
2617 return bfd_mach_x64_32
;
2619 else if (!strcmp (default_arch
, "i386")
2620 || !strcmp (default_arch
, "iamcu"))
2622 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2624 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
2625 as_fatal (_("Intel MCU is 32bit ELF only"));
2626 return bfd_mach_i386_iamcu
;
2629 return bfd_mach_i386_i386
;
2632 as_fatal (_("unknown architecture"));
2638 const char *hash_err
;
2640 /* Support pseudo prefixes like {disp32}. */
2641 lex_type
['{'] = LEX_BEGIN_NAME
;
2643 /* Initialize op_hash hash table. */
2644 op_hash
= hash_new ();
2647 const insn_template
*optab
;
2648 templates
*core_optab
;
2650 /* Setup for loop. */
2652 core_optab
= XNEW (templates
);
2653 core_optab
->start
= optab
;
2658 if (optab
->name
== NULL
2659 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
2661 /* different name --> ship out current template list;
2662 add to hash table; & begin anew. */
2663 core_optab
->end
= optab
;
2664 hash_err
= hash_insert (op_hash
,
2666 (void *) core_optab
);
2669 as_fatal (_("can't hash %s: %s"),
2673 if (optab
->name
== NULL
)
2675 core_optab
= XNEW (templates
);
2676 core_optab
->start
= optab
;
2681 /* Initialize reg_hash hash table. */
2682 reg_hash
= hash_new ();
2684 const reg_entry
*regtab
;
2685 unsigned int regtab_size
= i386_regtab_size
;
2687 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
2689 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
2691 as_fatal (_("can't hash %s: %s"),
2697 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2702 for (c
= 0; c
< 256; c
++)
2707 mnemonic_chars
[c
] = c
;
2708 register_chars
[c
] = c
;
2709 operand_chars
[c
] = c
;
2711 else if (ISLOWER (c
))
2713 mnemonic_chars
[c
] = c
;
2714 register_chars
[c
] = c
;
2715 operand_chars
[c
] = c
;
2717 else if (ISUPPER (c
))
2719 mnemonic_chars
[c
] = TOLOWER (c
);
2720 register_chars
[c
] = mnemonic_chars
[c
];
2721 operand_chars
[c
] = c
;
2723 else if (c
== '{' || c
== '}')
2725 mnemonic_chars
[c
] = c
;
2726 operand_chars
[c
] = c
;
2729 if (ISALPHA (c
) || ISDIGIT (c
))
2730 identifier_chars
[c
] = c
;
2733 identifier_chars
[c
] = c
;
2734 operand_chars
[c
] = c
;
2739 identifier_chars
['@'] = '@';
2742 identifier_chars
['?'] = '?';
2743 operand_chars
['?'] = '?';
2745 digit_chars
['-'] = '-';
2746 mnemonic_chars
['_'] = '_';
2747 mnemonic_chars
['-'] = '-';
2748 mnemonic_chars
['.'] = '.';
2749 identifier_chars
['_'] = '_';
2750 identifier_chars
['.'] = '.';
2752 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
2753 operand_chars
[(unsigned char) *p
] = *p
;
2756 if (flag_code
== CODE_64BIT
)
2758 #if defined (OBJ_COFF) && defined (TE_PE)
2759 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
2762 x86_dwarf2_return_column
= 16;
2764 x86_cie_data_alignment
= -8;
2768 x86_dwarf2_return_column
= 8;
2769 x86_cie_data_alignment
= -4;
2774 i386_print_statistics (FILE *file
)
2776 hash_print_statistics (file
, "i386 opcode", op_hash
);
2777 hash_print_statistics (file
, "i386 register", reg_hash
);
2782 /* Debugging routines for md_assemble. */
2783 static void pte (insn_template
*);
2784 static void pt (i386_operand_type
);
2785 static void pe (expressionS
*);
2786 static void ps (symbolS
*);
2789 pi (char *line
, i386_insn
*x
)
2793 fprintf (stdout
, "%s: template ", line
);
2795 fprintf (stdout
, " address: base %s index %s scale %x\n",
2796 x
->base_reg
? x
->base_reg
->reg_name
: "none",
2797 x
->index_reg
? x
->index_reg
->reg_name
: "none",
2798 x
->log2_scale_factor
);
2799 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
2800 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
2801 fprintf (stdout
, " sib: base %x index %x scale %x\n",
2802 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
2803 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
2804 (x
->rex
& REX_W
) != 0,
2805 (x
->rex
& REX_R
) != 0,
2806 (x
->rex
& REX_X
) != 0,
2807 (x
->rex
& REX_B
) != 0);
2808 for (j
= 0; j
< x
->operands
; j
++)
2810 fprintf (stdout
, " #%d: ", j
+ 1);
2812 fprintf (stdout
, "\n");
2813 if (x
->types
[j
].bitfield
.reg8
2814 || x
->types
[j
].bitfield
.reg16
2815 || x
->types
[j
].bitfield
.reg32
2816 || x
->types
[j
].bitfield
.reg64
2817 || x
->types
[j
].bitfield
.regmmx
2818 || x
->types
[j
].bitfield
.regxmm
2819 || x
->types
[j
].bitfield
.regymm
2820 || x
->types
[j
].bitfield
.regzmm
2821 || x
->types
[j
].bitfield
.sreg2
2822 || x
->types
[j
].bitfield
.sreg3
2823 || x
->types
[j
].bitfield
.control
2824 || x
->types
[j
].bitfield
.debug
2825 || x
->types
[j
].bitfield
.test
)
2826 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
2827 if (operand_type_check (x
->types
[j
], imm
))
2829 if (operand_type_check (x
->types
[j
], disp
))
2830 pe (x
->op
[j
].disps
);
2835 pte (insn_template
*t
)
2838 fprintf (stdout
, " %d operands ", t
->operands
);
2839 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
2840 if (t
->extension_opcode
!= None
)
2841 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
2842 if (t
->opcode_modifier
.d
)
2843 fprintf (stdout
, "D");
2844 if (t
->opcode_modifier
.w
)
2845 fprintf (stdout
, "W");
2846 fprintf (stdout
, "\n");
2847 for (j
= 0; j
< t
->operands
; j
++)
2849 fprintf (stdout
, " #%d type ", j
+ 1);
2850 pt (t
->operand_types
[j
]);
2851 fprintf (stdout
, "\n");
2858 fprintf (stdout
, " operation %d\n", e
->X_op
);
2859 fprintf (stdout
, " add_number %ld (%lx)\n",
2860 (long) e
->X_add_number
, (long) e
->X_add_number
);
2861 if (e
->X_add_symbol
)
2863 fprintf (stdout
, " add_symbol ");
2864 ps (e
->X_add_symbol
);
2865 fprintf (stdout
, "\n");
2869 fprintf (stdout
, " op_symbol ");
2870 ps (e
->X_op_symbol
);
2871 fprintf (stdout
, "\n");
2878 fprintf (stdout
, "%s type %s%s",
2880 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
2881 segment_name (S_GET_SEGMENT (s
)));
2884 static struct type_name
2886 i386_operand_type mask
;
2889 const type_names
[] =
2891 { OPERAND_TYPE_REG8
, "r8" },
2892 { OPERAND_TYPE_REG16
, "r16" },
2893 { OPERAND_TYPE_REG32
, "r32" },
2894 { OPERAND_TYPE_REG64
, "r64" },
2895 { OPERAND_TYPE_IMM8
, "i8" },
2896 { OPERAND_TYPE_IMM8
, "i8s" },
2897 { OPERAND_TYPE_IMM16
, "i16" },
2898 { OPERAND_TYPE_IMM32
, "i32" },
2899 { OPERAND_TYPE_IMM32S
, "i32s" },
2900 { OPERAND_TYPE_IMM64
, "i64" },
2901 { OPERAND_TYPE_IMM1
, "i1" },
2902 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
2903 { OPERAND_TYPE_DISP8
, "d8" },
2904 { OPERAND_TYPE_DISP16
, "d16" },
2905 { OPERAND_TYPE_DISP32
, "d32" },
2906 { OPERAND_TYPE_DISP32S
, "d32s" },
2907 { OPERAND_TYPE_DISP64
, "d64" },
2908 { OPERAND_TYPE_VEC_DISP8
, "Vector d8" },
2909 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
2910 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
2911 { OPERAND_TYPE_CONTROL
, "control reg" },
2912 { OPERAND_TYPE_TEST
, "test reg" },
2913 { OPERAND_TYPE_DEBUG
, "debug reg" },
2914 { OPERAND_TYPE_FLOATREG
, "FReg" },
2915 { OPERAND_TYPE_FLOATACC
, "FAcc" },
2916 { OPERAND_TYPE_SREG2
, "SReg2" },
2917 { OPERAND_TYPE_SREG3
, "SReg3" },
2918 { OPERAND_TYPE_ACC
, "Acc" },
2919 { OPERAND_TYPE_JUMPABSOLUTE
, "Jump Absolute" },
2920 { OPERAND_TYPE_REGMMX
, "rMMX" },
2921 { OPERAND_TYPE_REGXMM
, "rXMM" },
2922 { OPERAND_TYPE_REGYMM
, "rYMM" },
2923 { OPERAND_TYPE_REGZMM
, "rZMM" },
2924 { OPERAND_TYPE_REGMASK
, "Mask reg" },
2925 { OPERAND_TYPE_ESSEG
, "es" },
2929 pt (i386_operand_type t
)
2932 i386_operand_type a
;
2934 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
2936 a
= operand_type_and (t
, type_names
[j
].mask
);
2937 if (!operand_type_all_zero (&a
))
2938 fprintf (stdout
, "%s, ", type_names
[j
].name
);
2943 #endif /* DEBUG386 */
2945 static bfd_reloc_code_real_type
2946 reloc (unsigned int size
,
2949 bfd_reloc_code_real_type other
)
2951 if (other
!= NO_RELOC
)
2953 reloc_howto_type
*rel
;
2958 case BFD_RELOC_X86_64_GOT32
:
2959 return BFD_RELOC_X86_64_GOT64
;
2961 case BFD_RELOC_X86_64_GOTPLT64
:
2962 return BFD_RELOC_X86_64_GOTPLT64
;
2964 case BFD_RELOC_X86_64_PLTOFF64
:
2965 return BFD_RELOC_X86_64_PLTOFF64
;
2967 case BFD_RELOC_X86_64_GOTPC32
:
2968 other
= BFD_RELOC_X86_64_GOTPC64
;
2970 case BFD_RELOC_X86_64_GOTPCREL
:
2971 other
= BFD_RELOC_X86_64_GOTPCREL64
;
2973 case BFD_RELOC_X86_64_TPOFF32
:
2974 other
= BFD_RELOC_X86_64_TPOFF64
;
2976 case BFD_RELOC_X86_64_DTPOFF32
:
2977 other
= BFD_RELOC_X86_64_DTPOFF64
;
2983 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2984 if (other
== BFD_RELOC_SIZE32
)
2987 other
= BFD_RELOC_SIZE64
;
2990 as_bad (_("there are no pc-relative size relocations"));
2996 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2997 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3000 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3002 as_bad (_("unknown relocation (%u)"), other
);
3003 else if (size
!= bfd_get_reloc_size (rel
))
3004 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3005 bfd_get_reloc_size (rel
),
3007 else if (pcrel
&& !rel
->pc_relative
)
3008 as_bad (_("non-pc-relative relocation for pc-relative field"));
3009 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3011 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3013 as_bad (_("relocated field and relocation type differ in signedness"));
3022 as_bad (_("there are no unsigned pc-relative relocations"));
3025 case 1: return BFD_RELOC_8_PCREL
;
3026 case 2: return BFD_RELOC_16_PCREL
;
3027 case 4: return BFD_RELOC_32_PCREL
;
3028 case 8: return BFD_RELOC_64_PCREL
;
3030 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3037 case 4: return BFD_RELOC_X86_64_32S
;
3042 case 1: return BFD_RELOC_8
;
3043 case 2: return BFD_RELOC_16
;
3044 case 4: return BFD_RELOC_32
;
3045 case 8: return BFD_RELOC_64
;
3047 as_bad (_("cannot do %s %u byte relocation"),
3048 sign
> 0 ? "signed" : "unsigned", size
);
3054 /* Here we decide which fixups can be adjusted to make them relative to
3055 the beginning of the section instead of the symbol. Basically we need
3056 to make sure that the dynamic relocations are done correctly, so in
3057 some cases we force the original symbol to be used. */
3060 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3062 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3066 /* Don't adjust pc-relative references to merge sections in 64-bit
3068 if (use_rela_relocations
3069 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3073 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3074 and changed later by validate_fix. */
3075 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3076 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3079 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3080 for size relocations. */
3081 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3082 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3083 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3084 || fixP
->fx_r_type
== BFD_RELOC_386_PLT32
3085 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3086 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3087 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3088 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3089 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3090 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3091 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3092 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3093 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3094 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3095 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3096 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3097 || fixP
->fx_r_type
== BFD_RELOC_X86_64_PLT32
3098 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3099 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3100 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3101 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3102 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3103 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3104 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3105 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3106 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3107 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3108 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3109 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3110 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3111 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3112 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3113 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3120 intel_float_operand (const char *mnemonic
)
3122 /* Note that the value returned is meaningful only for opcodes with (memory)
3123 operands, hence the code here is free to improperly handle opcodes that
3124 have no operands (for better performance and smaller code). */
3126 if (mnemonic
[0] != 'f')
3127 return 0; /* non-math */
3129 switch (mnemonic
[1])
3131 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3132 the fs segment override prefix not currently handled because no
3133 call path can make opcodes without operands get here */
3135 return 2 /* integer op */;
3137 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3138 return 3; /* fldcw/fldenv */
3141 if (mnemonic
[2] != 'o' /* fnop */)
3142 return 3; /* non-waiting control op */
3145 if (mnemonic
[2] == 's')
3146 return 3; /* frstor/frstpm */
3149 if (mnemonic
[2] == 'a')
3150 return 3; /* fsave */
3151 if (mnemonic
[2] == 't')
3153 switch (mnemonic
[3])
3155 case 'c': /* fstcw */
3156 case 'd': /* fstdw */
3157 case 'e': /* fstenv */
3158 case 's': /* fsts[gw] */
3164 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3165 return 0; /* fxsave/fxrstor are not really math ops */
3172 /* Build the VEX prefix. */
3175 build_vex_prefix (const insn_template
*t
)
3177 unsigned int register_specifier
;
3178 unsigned int implied_prefix
;
3179 unsigned int vector_length
;
3181 /* Check register specifier. */
3182 if (i
.vex
.register_specifier
)
3184 register_specifier
=
3185 ~register_number (i
.vex
.register_specifier
) & 0xf;
3186 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3189 register_specifier
= 0xf;
3191 /* Use 2-byte VEX prefix by swapping destination and source
3193 if (i
.vec_encoding
!= vex_encoding_vex3
3194 && i
.dir_encoding
== dir_encoding_default
3195 && i
.operands
== i
.reg_operands
3196 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3197 && i
.tm
.opcode_modifier
.load
3200 unsigned int xchg
= i
.operands
- 1;
3201 union i386_op temp_op
;
3202 i386_operand_type temp_type
;
3204 temp_type
= i
.types
[xchg
];
3205 i
.types
[xchg
] = i
.types
[0];
3206 i
.types
[0] = temp_type
;
3207 temp_op
= i
.op
[xchg
];
3208 i
.op
[xchg
] = i
.op
[0];
3211 gas_assert (i
.rm
.mode
== 3);
3215 i
.rm
.regmem
= i
.rm
.reg
;
3218 /* Use the next insn. */
3222 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3223 vector_length
= avxscalar
;
3225 vector_length
= i
.tm
.opcode_modifier
.vex
== VEX256
? 1 : 0;
3227 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3232 case DATA_PREFIX_OPCODE
:
3235 case REPE_PREFIX_OPCODE
:
3238 case REPNE_PREFIX_OPCODE
:
3245 /* Use 2-byte VEX prefix if possible. */
3246 if (i
.vec_encoding
!= vex_encoding_vex3
3247 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3248 && i
.tm
.opcode_modifier
.vexw
!= VEXW1
3249 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3251 /* 2-byte VEX prefix. */
3255 i
.vex
.bytes
[0] = 0xc5;
3257 /* Check the REX.R bit. */
3258 r
= (i
.rex
& REX_R
) ? 0 : 1;
3259 i
.vex
.bytes
[1] = (r
<< 7
3260 | register_specifier
<< 3
3261 | vector_length
<< 2
3266 /* 3-byte VEX prefix. */
3271 switch (i
.tm
.opcode_modifier
.vexopcode
)
3275 i
.vex
.bytes
[0] = 0xc4;
3279 i
.vex
.bytes
[0] = 0xc4;
3283 i
.vex
.bytes
[0] = 0xc4;
3287 i
.vex
.bytes
[0] = 0x8f;
3291 i
.vex
.bytes
[0] = 0x8f;
3295 i
.vex
.bytes
[0] = 0x8f;
3301 /* The high 3 bits of the second VEX byte are 1's compliment
3302 of RXB bits from REX. */
3303 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3305 /* Check the REX.W bit. */
3306 w
= (i
.rex
& REX_W
) ? 1 : 0;
3307 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
3310 i
.vex
.bytes
[2] = (w
<< 7
3311 | register_specifier
<< 3
3312 | vector_length
<< 2
3317 /* Build the EVEX prefix. */
3320 build_evex_prefix (void)
3322 unsigned int register_specifier
;
3323 unsigned int implied_prefix
;
3325 rex_byte vrex_used
= 0;
3327 /* Check register specifier. */
3328 if (i
.vex
.register_specifier
)
3330 gas_assert ((i
.vrex
& REX_X
) == 0);
3332 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3333 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3334 register_specifier
+= 8;
3335 /* The upper 16 registers are encoded in the fourth byte of the
3337 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3338 i
.vex
.bytes
[3] = 0x8;
3339 register_specifier
= ~register_specifier
& 0xf;
3343 register_specifier
= 0xf;
3345 /* Encode upper 16 vector index register in the fourth byte of
3347 if (!(i
.vrex
& REX_X
))
3348 i
.vex
.bytes
[3] = 0x8;
3353 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3358 case DATA_PREFIX_OPCODE
:
3361 case REPE_PREFIX_OPCODE
:
3364 case REPNE_PREFIX_OPCODE
:
3371 /* 4 byte EVEX prefix. */
3373 i
.vex
.bytes
[0] = 0x62;
3376 switch (i
.tm
.opcode_modifier
.vexopcode
)
3392 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3394 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3396 /* The fifth bit of the second EVEX byte is 1's compliment of the
3397 REX_R bit in VREX. */
3398 if (!(i
.vrex
& REX_R
))
3399 i
.vex
.bytes
[1] |= 0x10;
3403 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3405 /* When all operands are registers, the REX_X bit in REX is not
3406 used. We reuse it to encode the upper 16 registers, which is
3407 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3408 as 1's compliment. */
3409 if ((i
.vrex
& REX_B
))
3412 i
.vex
.bytes
[1] &= ~0x40;
3416 /* EVEX instructions shouldn't need the REX prefix. */
3417 i
.vrex
&= ~vrex_used
;
3418 gas_assert (i
.vrex
== 0);
3420 /* Check the REX.W bit. */
3421 w
= (i
.rex
& REX_W
) ? 1 : 0;
3422 if (i
.tm
.opcode_modifier
.vexw
)
3424 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
3427 /* If w is not set it means we are dealing with WIG instruction. */
3430 if (evexwig
== evexw1
)
3434 /* Encode the U bit. */
3435 implied_prefix
|= 0x4;
3437 /* The third byte of the EVEX prefix. */
3438 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3440 /* The fourth byte of the EVEX prefix. */
3441 /* The zeroing-masking bit. */
3442 if (i
.mask
&& i
.mask
->zeroing
)
3443 i
.vex
.bytes
[3] |= 0x80;
3445 /* Don't always set the broadcast bit if there is no RC. */
3448 /* Encode the vector length. */
3449 unsigned int vec_length
;
3451 switch (i
.tm
.opcode_modifier
.evex
)
3453 case EVEXLIG
: /* LL' is ignored */
3454 vec_length
= evexlig
<< 5;
3457 vec_length
= 0 << 5;
3460 vec_length
= 1 << 5;
3463 vec_length
= 2 << 5;
3469 i
.vex
.bytes
[3] |= vec_length
;
3470 /* Encode the broadcast bit. */
3472 i
.vex
.bytes
[3] |= 0x10;
3476 if (i
.rounding
->type
!= saeonly
)
3477 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3479 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3482 if (i
.mask
&& i
.mask
->mask
)
3483 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3487 process_immext (void)
3491 if ((i
.tm
.cpu_flags
.bitfield
.cpusse3
|| i
.tm
.cpu_flags
.bitfield
.cpusvme
)
3494 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3495 with an opcode suffix which is coded in the same place as an
3496 8-bit immediate field would be.
3497 Here we check those operands and remove them afterwards. */
3500 for (x
= 0; x
< i
.operands
; x
++)
3501 if (register_number (i
.op
[x
].regs
) != x
)
3502 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3503 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+ 1,
3509 if (i
.tm
.cpu_flags
.bitfield
.cpumwaitx
&& i
.operands
> 0)
3511 /* MONITORX/MWAITX instructions have fixed operands with an opcode
3512 suffix which is coded in the same place as an 8-bit immediate
3514 Here we check those operands and remove them afterwards. */
3517 if (i
.operands
!= 3)
3520 for (x
= 0; x
< 2; x
++)
3521 if (register_number (i
.op
[x
].regs
) != x
)
3522 goto bad_register_operand
;
3524 /* Check for third operand for mwaitx/monitorx insn. */
3525 if (register_number (i
.op
[x
].regs
)
3526 != (x
+ (i
.tm
.extension_opcode
== 0xfb)))
3528 bad_register_operand
:
3529 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3530 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+1,
3537 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3538 which is coded in the same place as an 8-bit immediate field
3539 would be. Here we fake an 8-bit immediate operand from the
3540 opcode suffix stored in tm.extension_opcode.
3542 AVX instructions also use this encoding, for some of
3543 3 argument instructions. */
3545 gas_assert (i
.imm_operands
<= 1
3547 || ((i
.tm
.opcode_modifier
.vex
3548 || i
.tm
.opcode_modifier
.evex
)
3549 && i
.operands
<= 4)));
3551 exp
= &im_expressions
[i
.imm_operands
++];
3552 i
.op
[i
.operands
].imms
= exp
;
3553 i
.types
[i
.operands
] = imm8
;
3555 exp
->X_op
= O_constant
;
3556 exp
->X_add_number
= i
.tm
.extension_opcode
;
3557 i
.tm
.extension_opcode
= None
;
3564 switch (i
.tm
.opcode_modifier
.hleprefixok
)
3569 as_bad (_("invalid instruction `%s' after `%s'"),
3570 i
.tm
.name
, i
.hle_prefix
);
3573 if (i
.prefix
[LOCK_PREFIX
])
3575 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
3579 case HLEPrefixRelease
:
3580 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
3582 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3586 if (i
.mem_operands
== 0
3587 || !operand_type_check (i
.types
[i
.operands
- 1], anymem
))
3589 as_bad (_("memory destination needed for instruction `%s'"
3590 " after `xrelease'"), i
.tm
.name
);
3597 /* This is the guts of the machine-dependent assembler. LINE points to a
3598 machine dependent instruction. This function is supposed to emit
3599 the frags/bytes it assembles to. */
3602 md_assemble (char *line
)
3605 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
3606 const insn_template
*t
;
3608 /* Initialize globals. */
3609 memset (&i
, '\0', sizeof (i
));
3610 for (j
= 0; j
< MAX_OPERANDS
; j
++)
3611 i
.reloc
[j
] = NO_RELOC
;
3612 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
3613 memset (im_expressions
, '\0', sizeof (im_expressions
));
3614 save_stack_p
= save_stack
;
3616 /* First parse an instruction mnemonic & call i386_operand for the operands.
3617 We assume that the scrubber has arranged it so that line[0] is the valid
3618 start of a (possibly prefixed) mnemonic. */
3620 line
= parse_insn (line
, mnemonic
);
3623 mnem_suffix
= i
.suffix
;
3625 line
= parse_operands (line
, mnemonic
);
3627 xfree (i
.memop1_string
);
3628 i
.memop1_string
= NULL
;
3632 /* Now we've parsed the mnemonic into a set of templates, and have the
3633 operands at hand. */
3635 /* All intel opcodes have reversed operands except for "bound" and
3636 "enter". We also don't reverse intersegment "jmp" and "call"
3637 instructions with 2 immediate operands so that the immediate segment
3638 precedes the offset, as it does when in AT&T mode. */
3641 && (strcmp (mnemonic
, "bound") != 0)
3642 && (strcmp (mnemonic
, "invlpga") != 0)
3643 && !(operand_type_check (i
.types
[0], imm
)
3644 && operand_type_check (i
.types
[1], imm
)))
3647 /* The order of the immediates should be reversed
3648 for 2 immediates extrq and insertq instructions */
3649 if (i
.imm_operands
== 2
3650 && (strcmp (mnemonic
, "extrq") == 0
3651 || strcmp (mnemonic
, "insertq") == 0))
3652 swap_2_operands (0, 1);
3657 /* Don't optimize displacement for movabs since it only takes 64bit
3660 && i
.disp_encoding
!= disp_encoding_32bit
3661 && (flag_code
!= CODE_64BIT
3662 || strcmp (mnemonic
, "movabs") != 0))
3665 /* Next, we find a template that matches the given insn,
3666 making sure the overlap of the given operands types is consistent
3667 with the template operand types. */
3669 if (!(t
= match_template (mnem_suffix
)))
3672 if (sse_check
!= check_none
3673 && !i
.tm
.opcode_modifier
.noavx
3674 && (i
.tm
.cpu_flags
.bitfield
.cpusse
3675 || i
.tm
.cpu_flags
.bitfield
.cpusse2
3676 || i
.tm
.cpu_flags
.bitfield
.cpusse3
3677 || i
.tm
.cpu_flags
.bitfield
.cpussse3
3678 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
3679 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
))
3681 (sse_check
== check_warning
3683 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
3686 /* Zap movzx and movsx suffix. The suffix has been set from
3687 "word ptr" or "byte ptr" on the source operand in Intel syntax
3688 or extracted from mnemonic in AT&T syntax. But we'll use
3689 the destination register to choose the suffix for encoding. */
3690 if ((i
.tm
.base_opcode
& ~9) == 0x0fb6)
3692 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3693 there is no suffix, the default will be byte extension. */
3694 if (i
.reg_operands
!= 2
3697 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
3702 if (i
.tm
.opcode_modifier
.fwait
)
3703 if (!add_prefix (FWAIT_OPCODE
))
3706 /* Check if REP prefix is OK. */
3707 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
3709 as_bad (_("invalid instruction `%s' after `%s'"),
3710 i
.tm
.name
, i
.rep_prefix
);
3714 /* Check for lock without a lockable instruction. Destination operand
3715 must be memory unless it is xchg (0x86). */
3716 if (i
.prefix
[LOCK_PREFIX
]
3717 && (!i
.tm
.opcode_modifier
.islockable
3718 || i
.mem_operands
== 0
3719 || (i
.tm
.base_opcode
!= 0x86
3720 && !operand_type_check (i
.types
[i
.operands
- 1], anymem
))))
3722 as_bad (_("expecting lockable instruction after `lock'"));
3726 /* Check if HLE prefix is OK. */
3727 if (i
.hle_prefix
&& !check_hle ())
3730 /* Check BND prefix. */
3731 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
3732 as_bad (_("expecting valid branch instruction after `bnd'"));
3734 /* Check NOTRACK prefix. */
3735 if (i
.notrack_prefix
&& !i
.tm
.opcode_modifier
.notrackprefixok
)
3736 as_bad (_("expecting indirect branch instruction after `notrack'"));
3738 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
3740 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
3741 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3742 else if (flag_code
!= CODE_16BIT
3743 ? i
.prefix
[ADDR_PREFIX
]
3744 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
3745 as_bad (_("16-bit address isn't allowed in MPX instructions"));
3748 /* Insert BND prefix. */
3750 && i
.tm
.opcode_modifier
.bndprefixok
3751 && !i
.prefix
[BND_PREFIX
])
3752 add_prefix (BND_PREFIX_OPCODE
);
3754 /* Check string instruction segment overrides. */
3755 if (i
.tm
.opcode_modifier
.isstring
&& i
.mem_operands
!= 0)
3757 if (!check_string ())
3759 i
.disp_operands
= 0;
3762 if (!process_suffix ())
3765 /* Update operand types. */
3766 for (j
= 0; j
< i
.operands
; j
++)
3767 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
3769 /* Make still unresolved immediate matches conform to size of immediate
3770 given in i.suffix. */
3771 if (!finalize_imm ())
3774 if (i
.types
[0].bitfield
.imm1
)
3775 i
.imm_operands
= 0; /* kludge for shift insns. */
3777 /* We only need to check those implicit registers for instructions
3778 with 3 operands or less. */
3779 if (i
.operands
<= 3)
3780 for (j
= 0; j
< i
.operands
; j
++)
3781 if (i
.types
[j
].bitfield
.inoutportreg
3782 || i
.types
[j
].bitfield
.shiftcount
3783 || i
.types
[j
].bitfield
.acc
3784 || i
.types
[j
].bitfield
.floatacc
)
3787 /* ImmExt should be processed after SSE2AVX. */
3788 if (!i
.tm
.opcode_modifier
.sse2avx
3789 && i
.tm
.opcode_modifier
.immext
)
3792 /* For insns with operands there are more diddles to do to the opcode. */
3795 if (!process_operands ())
3798 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
3800 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3801 as_warn (_("translating to `%sp'"), i
.tm
.name
);
3804 if (i
.tm
.opcode_modifier
.vex
|| i
.tm
.opcode_modifier
.evex
)
3806 if (flag_code
== CODE_16BIT
)
3808 as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
3813 if (i
.tm
.opcode_modifier
.vex
)
3814 build_vex_prefix (t
);
3816 build_evex_prefix ();
3819 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3820 instructions may define INT_OPCODE as well, so avoid this corner
3821 case for those instructions that use MODRM. */
3822 if (i
.tm
.base_opcode
== INT_OPCODE
3823 && !i
.tm
.opcode_modifier
.modrm
3824 && i
.op
[0].imms
->X_add_number
== 3)
3826 i
.tm
.base_opcode
= INT3_OPCODE
;
3830 if ((i
.tm
.opcode_modifier
.jump
3831 || i
.tm
.opcode_modifier
.jumpbyte
3832 || i
.tm
.opcode_modifier
.jumpdword
)
3833 && i
.op
[0].disps
->X_op
== O_constant
)
3835 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3836 the absolute address given by the constant. Since ix86 jumps and
3837 calls are pc relative, we need to generate a reloc. */
3838 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
3839 i
.op
[0].disps
->X_op
= O_symbol
;
3842 if (i
.tm
.opcode_modifier
.rex64
)
3845 /* For 8 bit registers we need an empty rex prefix. Also if the
3846 instruction already has a prefix, we need to convert old
3847 registers to new ones. */
3849 if ((i
.types
[0].bitfield
.reg8
3850 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
3851 || (i
.types
[1].bitfield
.reg8
3852 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
3853 || ((i
.types
[0].bitfield
.reg8
3854 || i
.types
[1].bitfield
.reg8
)
3859 i
.rex
|= REX_OPCODE
;
3860 for (x
= 0; x
< 2; x
++)
3862 /* Look for 8 bit operand that uses old registers. */
3863 if (i
.types
[x
].bitfield
.reg8
3864 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
3866 /* In case it is "hi" register, give up. */
3867 if (i
.op
[x
].regs
->reg_num
> 3)
3868 as_bad (_("can't encode register '%s%s' in an "
3869 "instruction requiring REX prefix."),
3870 register_prefix
, i
.op
[x
].regs
->reg_name
);
3872 /* Otherwise it is equivalent to the extended register.
3873 Since the encoding doesn't change this is merely
3874 cosmetic cleanup for debug output. */
3876 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
3882 add_prefix (REX_OPCODE
| i
.rex
);
3884 /* We are ready to output the insn. */
3889 parse_insn (char *line
, char *mnemonic
)
3892 char *token_start
= l
;
3895 const insn_template
*t
;
3901 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
3906 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
3908 as_bad (_("no such instruction: `%s'"), token_start
);
3913 if (!is_space_char (*l
)
3914 && *l
!= END_OF_INSN
3916 || (*l
!= PREFIX_SEPARATOR
3919 as_bad (_("invalid character %s in mnemonic"),
3920 output_invalid (*l
));
3923 if (token_start
== l
)
3925 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
3926 as_bad (_("expecting prefix; got nothing"));
3928 as_bad (_("expecting mnemonic; got nothing"));
3932 /* Look up instruction (or prefix) via hash table. */
3933 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
3935 if (*l
!= END_OF_INSN
3936 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
3937 && current_templates
3938 && current_templates
->start
->opcode_modifier
.isprefix
)
3940 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
3942 as_bad ((flag_code
!= CODE_64BIT
3943 ? _("`%s' is only supported in 64-bit mode")
3944 : _("`%s' is not supported in 64-bit mode")),
3945 current_templates
->start
->name
);
3948 /* If we are in 16-bit mode, do not allow addr16 or data16.
3949 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3950 if ((current_templates
->start
->opcode_modifier
.size16
3951 || current_templates
->start
->opcode_modifier
.size32
)
3952 && flag_code
!= CODE_64BIT
3953 && (current_templates
->start
->opcode_modifier
.size32
3954 ^ (flag_code
== CODE_16BIT
)))
3956 as_bad (_("redundant %s prefix"),
3957 current_templates
->start
->name
);
3960 if (current_templates
->start
->opcode_length
== 0)
3962 /* Handle pseudo prefixes. */
3963 switch (current_templates
->start
->base_opcode
)
3967 i
.disp_encoding
= disp_encoding_8bit
;
3971 i
.disp_encoding
= disp_encoding_32bit
;
3975 i
.dir_encoding
= dir_encoding_load
;
3979 i
.dir_encoding
= dir_encoding_store
;
3983 i
.vec_encoding
= vex_encoding_vex2
;
3987 i
.vec_encoding
= vex_encoding_vex3
;
3991 i
.vec_encoding
= vex_encoding_evex
;
3999 /* Add prefix, checking for repeated prefixes. */
4000 switch (add_prefix (current_templates
->start
->base_opcode
))
4005 if (current_templates
->start
->cpu_flags
.bitfield
.cpucet
)
4006 i
.notrack_prefix
= current_templates
->start
->name
;
4009 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
4010 i
.hle_prefix
= current_templates
->start
->name
;
4011 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
4012 i
.bnd_prefix
= current_templates
->start
->name
;
4014 i
.rep_prefix
= current_templates
->start
->name
;
4020 /* Skip past PREFIX_SEPARATOR and reset token_start. */
4027 if (!current_templates
)
4029 /* Check if we should swap operand or force 32bit displacement in
4031 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
4032 i
.dir_encoding
= dir_encoding_store
;
4033 else if (mnem_p
- 3 == dot_p
4036 i
.disp_encoding
= disp_encoding_8bit
;
4037 else if (mnem_p
- 4 == dot_p
4041 i
.disp_encoding
= disp_encoding_32bit
;
4046 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
4049 if (!current_templates
)
4052 /* See if we can get a match by trimming off a suffix. */
4055 case WORD_MNEM_SUFFIX
:
4056 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
4057 i
.suffix
= SHORT_MNEM_SUFFIX
;
4060 case BYTE_MNEM_SUFFIX
:
4061 case QWORD_MNEM_SUFFIX
:
4062 i
.suffix
= mnem_p
[-1];
4064 current_templates
= (const templates
*) hash_find (op_hash
,
4067 case SHORT_MNEM_SUFFIX
:
4068 case LONG_MNEM_SUFFIX
:
4071 i
.suffix
= mnem_p
[-1];
4073 current_templates
= (const templates
*) hash_find (op_hash
,
4082 if (intel_float_operand (mnemonic
) == 1)
4083 i
.suffix
= SHORT_MNEM_SUFFIX
;
4085 i
.suffix
= LONG_MNEM_SUFFIX
;
4087 current_templates
= (const templates
*) hash_find (op_hash
,
4092 if (!current_templates
)
4094 as_bad (_("no such instruction: `%s'"), token_start
);
4099 if (current_templates
->start
->opcode_modifier
.jump
4100 || current_templates
->start
->opcode_modifier
.jumpbyte
)
4102 /* Check for a branch hint. We allow ",pt" and ",pn" for
4103 predict taken and predict not taken respectively.
4104 I'm not sure that branch hints actually do anything on loop
4105 and jcxz insns (JumpByte) for current Pentium4 chips. They
4106 may work in the future and it doesn't hurt to accept them
4108 if (l
[0] == ',' && l
[1] == 'p')
4112 if (!add_prefix (DS_PREFIX_OPCODE
))
4116 else if (l
[2] == 'n')
4118 if (!add_prefix (CS_PREFIX_OPCODE
))
4124 /* Any other comma loses. */
4127 as_bad (_("invalid character %s in mnemonic"),
4128 output_invalid (*l
));
4132 /* Check if instruction is supported on specified architecture. */
4134 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
4136 supported
|= cpu_flags_match (t
);
4137 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
4141 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
4143 as_bad (flag_code
== CODE_64BIT
4144 ? _("`%s' is not supported in 64-bit mode")
4145 : _("`%s' is only supported in 64-bit mode"),
4146 current_templates
->start
->name
);
4149 if (supported
!= CPU_FLAGS_PERFECT_MATCH
)
4151 as_bad (_("`%s' is not supported on `%s%s'"),
4152 current_templates
->start
->name
,
4153 cpu_arch_name
? cpu_arch_name
: default_arch
,
4154 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
4159 if (!cpu_arch_flags
.bitfield
.cpui386
4160 && (flag_code
!= CODE_16BIT
))
4162 as_warn (_("use .code16 to ensure correct addressing mode"));
4169 parse_operands (char *l
, const char *mnemonic
)
4173 /* 1 if operand is pending after ','. */
4174 unsigned int expecting_operand
= 0;
4176 /* Non-zero if operand parens not balanced. */
4177 unsigned int paren_not_balanced
;
4179 while (*l
!= END_OF_INSN
)
4181 /* Skip optional white space before operand. */
4182 if (is_space_char (*l
))
4184 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
4186 as_bad (_("invalid character %s before operand %d"),
4187 output_invalid (*l
),
4191 token_start
= l
; /* After white space. */
4192 paren_not_balanced
= 0;
4193 while (paren_not_balanced
|| *l
!= ',')
4195 if (*l
== END_OF_INSN
)
4197 if (paren_not_balanced
)
4200 as_bad (_("unbalanced parenthesis in operand %d."),
4203 as_bad (_("unbalanced brackets in operand %d."),
4208 break; /* we are done */
4210 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
4212 as_bad (_("invalid character %s in operand %d"),
4213 output_invalid (*l
),
4220 ++paren_not_balanced
;
4222 --paren_not_balanced
;
4227 ++paren_not_balanced
;
4229 --paren_not_balanced
;
4233 if (l
!= token_start
)
4234 { /* Yes, we've read in another operand. */
4235 unsigned int operand_ok
;
4236 this_operand
= i
.operands
++;
4237 if (i
.operands
> MAX_OPERANDS
)
4239 as_bad (_("spurious operands; (%d operands/instruction max)"),
4243 i
.types
[this_operand
].bitfield
.unspecified
= 1;
4244 /* Now parse operand adding info to 'i' as we go along. */
4245 END_STRING_AND_SAVE (l
);
4249 i386_intel_operand (token_start
,
4250 intel_float_operand (mnemonic
));
4252 operand_ok
= i386_att_operand (token_start
);
4254 RESTORE_END_STRING (l
);
4260 if (expecting_operand
)
4262 expecting_operand_after_comma
:
4263 as_bad (_("expecting operand after ','; got nothing"));
4268 as_bad (_("expecting operand before ','; got nothing"));
4273 /* Now *l must be either ',' or END_OF_INSN. */
4276 if (*++l
== END_OF_INSN
)
4278 /* Just skip it, if it's \n complain. */
4279 goto expecting_operand_after_comma
;
4281 expecting_operand
= 1;
4288 swap_2_operands (int xchg1
, int xchg2
)
4290 union i386_op temp_op
;
4291 i386_operand_type temp_type
;
4292 enum bfd_reloc_code_real temp_reloc
;
4294 temp_type
= i
.types
[xchg2
];
4295 i
.types
[xchg2
] = i
.types
[xchg1
];
4296 i
.types
[xchg1
] = temp_type
;
4297 temp_op
= i
.op
[xchg2
];
4298 i
.op
[xchg2
] = i
.op
[xchg1
];
4299 i
.op
[xchg1
] = temp_op
;
4300 temp_reloc
= i
.reloc
[xchg2
];
4301 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
4302 i
.reloc
[xchg1
] = temp_reloc
;
4306 if (i
.mask
->operand
== xchg1
)
4307 i
.mask
->operand
= xchg2
;
4308 else if (i
.mask
->operand
== xchg2
)
4309 i
.mask
->operand
= xchg1
;
4313 if (i
.broadcast
->operand
== xchg1
)
4314 i
.broadcast
->operand
= xchg2
;
4315 else if (i
.broadcast
->operand
== xchg2
)
4316 i
.broadcast
->operand
= xchg1
;
4320 if (i
.rounding
->operand
== xchg1
)
4321 i
.rounding
->operand
= xchg2
;
4322 else if (i
.rounding
->operand
== xchg2
)
4323 i
.rounding
->operand
= xchg1
;
4328 swap_operands (void)
4334 swap_2_operands (1, i
.operands
- 2);
4338 swap_2_operands (0, i
.operands
- 1);
4344 if (i
.mem_operands
== 2)
4346 const seg_entry
*temp_seg
;
4347 temp_seg
= i
.seg
[0];
4348 i
.seg
[0] = i
.seg
[1];
4349 i
.seg
[1] = temp_seg
;
4353 /* Try to ensure constant immediates are represented in the smallest
4358 char guess_suffix
= 0;
4362 guess_suffix
= i
.suffix
;
4363 else if (i
.reg_operands
)
4365 /* Figure out a suffix from the last register operand specified.
4366 We can't do this properly yet, ie. excluding InOutPortReg,
4367 but the following works for instructions with immediates.
4368 In any case, we can't set i.suffix yet. */
4369 for (op
= i
.operands
; --op
>= 0;)
4370 if (i
.types
[op
].bitfield
.reg8
)
4372 guess_suffix
= BYTE_MNEM_SUFFIX
;
4375 else if (i
.types
[op
].bitfield
.reg16
)
4377 guess_suffix
= WORD_MNEM_SUFFIX
;
4380 else if (i
.types
[op
].bitfield
.reg32
)
4382 guess_suffix
= LONG_MNEM_SUFFIX
;
4385 else if (i
.types
[op
].bitfield
.reg64
)
4387 guess_suffix
= QWORD_MNEM_SUFFIX
;
4391 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
4392 guess_suffix
= WORD_MNEM_SUFFIX
;
4394 for (op
= i
.operands
; --op
>= 0;)
4395 if (operand_type_check (i
.types
[op
], imm
))
4397 switch (i
.op
[op
].imms
->X_op
)
4400 /* If a suffix is given, this operand may be shortened. */
4401 switch (guess_suffix
)
4403 case LONG_MNEM_SUFFIX
:
4404 i
.types
[op
].bitfield
.imm32
= 1;
4405 i
.types
[op
].bitfield
.imm64
= 1;
4407 case WORD_MNEM_SUFFIX
:
4408 i
.types
[op
].bitfield
.imm16
= 1;
4409 i
.types
[op
].bitfield
.imm32
= 1;
4410 i
.types
[op
].bitfield
.imm32s
= 1;
4411 i
.types
[op
].bitfield
.imm64
= 1;
4413 case BYTE_MNEM_SUFFIX
:
4414 i
.types
[op
].bitfield
.imm8
= 1;
4415 i
.types
[op
].bitfield
.imm8s
= 1;
4416 i
.types
[op
].bitfield
.imm16
= 1;
4417 i
.types
[op
].bitfield
.imm32
= 1;
4418 i
.types
[op
].bitfield
.imm32s
= 1;
4419 i
.types
[op
].bitfield
.imm64
= 1;
4423 /* If this operand is at most 16 bits, convert it
4424 to a signed 16 bit number before trying to see
4425 whether it will fit in an even smaller size.
4426 This allows a 16-bit operand such as $0xffe0 to
4427 be recognised as within Imm8S range. */
4428 if ((i
.types
[op
].bitfield
.imm16
)
4429 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
4431 i
.op
[op
].imms
->X_add_number
=
4432 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
4435 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
4436 if ((i
.types
[op
].bitfield
.imm32
)
4437 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
4440 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
4441 ^ ((offsetT
) 1 << 31))
4442 - ((offsetT
) 1 << 31));
4446 = operand_type_or (i
.types
[op
],
4447 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
4449 /* We must avoid matching of Imm32 templates when 64bit
4450 only immediate is available. */
4451 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
4452 i
.types
[op
].bitfield
.imm32
= 0;
4459 /* Symbols and expressions. */
4461 /* Convert symbolic operand to proper sizes for matching, but don't
4462 prevent matching a set of insns that only supports sizes other
4463 than those matching the insn suffix. */
4465 i386_operand_type mask
, allowed
;
4466 const insn_template
*t
;
4468 operand_type_set (&mask
, 0);
4469 operand_type_set (&allowed
, 0);
4471 for (t
= current_templates
->start
;
4472 t
< current_templates
->end
;
4474 allowed
= operand_type_or (allowed
,
4475 t
->operand_types
[op
]);
4476 switch (guess_suffix
)
4478 case QWORD_MNEM_SUFFIX
:
4479 mask
.bitfield
.imm64
= 1;
4480 mask
.bitfield
.imm32s
= 1;
4482 case LONG_MNEM_SUFFIX
:
4483 mask
.bitfield
.imm32
= 1;
4485 case WORD_MNEM_SUFFIX
:
4486 mask
.bitfield
.imm16
= 1;
4488 case BYTE_MNEM_SUFFIX
:
4489 mask
.bitfield
.imm8
= 1;
4494 allowed
= operand_type_and (mask
, allowed
);
4495 if (!operand_type_all_zero (&allowed
))
4496 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
4503 /* Try to use the smallest displacement type too. */
4505 optimize_disp (void)
4509 for (op
= i
.operands
; --op
>= 0;)
4510 if (operand_type_check (i
.types
[op
], disp
))
4512 if (i
.op
[op
].disps
->X_op
== O_constant
)
4514 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
4516 if (i
.types
[op
].bitfield
.disp16
4517 && (op_disp
& ~(offsetT
) 0xffff) == 0)
4519 /* If this operand is at most 16 bits, convert
4520 to a signed 16 bit number and don't use 64bit
4522 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
4523 i
.types
[op
].bitfield
.disp64
= 0;
4526 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
4527 if (i
.types
[op
].bitfield
.disp32
4528 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
4530 /* If this operand is at most 32 bits, convert
4531 to a signed 32 bit number and don't use 64bit
4533 op_disp
&= (((offsetT
) 2 << 31) - 1);
4534 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
4535 i
.types
[op
].bitfield
.disp64
= 0;
4538 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
4540 i
.types
[op
].bitfield
.disp8
= 0;
4541 i
.types
[op
].bitfield
.disp16
= 0;
4542 i
.types
[op
].bitfield
.disp32
= 0;
4543 i
.types
[op
].bitfield
.disp32s
= 0;
4544 i
.types
[op
].bitfield
.disp64
= 0;
4548 else if (flag_code
== CODE_64BIT
)
4550 if (fits_in_signed_long (op_disp
))
4552 i
.types
[op
].bitfield
.disp64
= 0;
4553 i
.types
[op
].bitfield
.disp32s
= 1;
4555 if (i
.prefix
[ADDR_PREFIX
]
4556 && fits_in_unsigned_long (op_disp
))
4557 i
.types
[op
].bitfield
.disp32
= 1;
4559 if ((i
.types
[op
].bitfield
.disp32
4560 || i
.types
[op
].bitfield
.disp32s
4561 || i
.types
[op
].bitfield
.disp16
)
4562 && fits_in_signed_byte (op_disp
))
4563 i
.types
[op
].bitfield
.disp8
= 1;
4565 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
4566 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
4568 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
4569 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
4570 i
.types
[op
].bitfield
.disp8
= 0;
4571 i
.types
[op
].bitfield
.disp16
= 0;
4572 i
.types
[op
].bitfield
.disp32
= 0;
4573 i
.types
[op
].bitfield
.disp32s
= 0;
4574 i
.types
[op
].bitfield
.disp64
= 0;
4577 /* We only support 64bit displacement on constants. */
4578 i
.types
[op
].bitfield
.disp64
= 0;
4582 /* Check if operands are valid for the instruction. */
4585 check_VecOperands (const insn_template
*t
)
4589 /* Without VSIB byte, we can't have a vector register for index. */
4590 if (!t
->opcode_modifier
.vecsib
4592 && (i
.index_reg
->reg_type
.bitfield
.regxmm
4593 || i
.index_reg
->reg_type
.bitfield
.regymm
4594 || i
.index_reg
->reg_type
.bitfield
.regzmm
))
4596 i
.error
= unsupported_vector_index_register
;
4600 /* Check if default mask is allowed. */
4601 if (t
->opcode_modifier
.nodefmask
4602 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
4604 i
.error
= no_default_mask
;
4608 /* For VSIB byte, we need a vector register for index, and all vector
4609 registers must be distinct. */
4610 if (t
->opcode_modifier
.vecsib
)
4613 || !((t
->opcode_modifier
.vecsib
== VecSIB128
4614 && i
.index_reg
->reg_type
.bitfield
.regxmm
)
4615 || (t
->opcode_modifier
.vecsib
== VecSIB256
4616 && i
.index_reg
->reg_type
.bitfield
.regymm
)
4617 || (t
->opcode_modifier
.vecsib
== VecSIB512
4618 && i
.index_reg
->reg_type
.bitfield
.regzmm
)))
4620 i
.error
= invalid_vsib_address
;
4624 gas_assert (i
.reg_operands
== 2 || i
.mask
);
4625 if (i
.reg_operands
== 2 && !i
.mask
)
4627 gas_assert (i
.types
[0].bitfield
.regxmm
4628 || i
.types
[0].bitfield
.regymm
);
4629 gas_assert (i
.types
[2].bitfield
.regxmm
4630 || i
.types
[2].bitfield
.regymm
);
4631 if (operand_check
== check_none
)
4633 if (register_number (i
.op
[0].regs
)
4634 != register_number (i
.index_reg
)
4635 && register_number (i
.op
[2].regs
)
4636 != register_number (i
.index_reg
)
4637 && register_number (i
.op
[0].regs
)
4638 != register_number (i
.op
[2].regs
))
4640 if (operand_check
== check_error
)
4642 i
.error
= invalid_vector_register_set
;
4645 as_warn (_("mask, index, and destination registers should be distinct"));
4647 else if (i
.reg_operands
== 1 && i
.mask
)
4649 if ((i
.types
[1].bitfield
.regymm
4650 || i
.types
[1].bitfield
.regzmm
)
4651 && (register_number (i
.op
[1].regs
)
4652 == register_number (i
.index_reg
)))
4654 if (operand_check
== check_error
)
4656 i
.error
= invalid_vector_register_set
;
4659 if (operand_check
!= check_none
)
4660 as_warn (_("index and destination registers should be distinct"));
4665 /* Check if broadcast is supported by the instruction and is applied
4666 to the memory operand. */
4669 int broadcasted_opnd_size
;
4671 /* Check if specified broadcast is supported in this instruction,
4672 and it's applied to memory operand of DWORD or QWORD type,
4673 depending on VecESize. */
4674 if (i
.broadcast
->type
!= t
->opcode_modifier
.broadcast
4675 || !i
.types
[i
.broadcast
->operand
].bitfield
.mem
4676 || (t
->opcode_modifier
.vecesize
== 0
4677 && !i
.types
[i
.broadcast
->operand
].bitfield
.dword
4678 && !i
.types
[i
.broadcast
->operand
].bitfield
.unspecified
)
4679 || (t
->opcode_modifier
.vecesize
== 1
4680 && !i
.types
[i
.broadcast
->operand
].bitfield
.qword
4681 && !i
.types
[i
.broadcast
->operand
].bitfield
.unspecified
))
4684 broadcasted_opnd_size
= t
->opcode_modifier
.vecesize
? 64 : 32;
4685 if (i
.broadcast
->type
== BROADCAST_1TO16
)
4686 broadcasted_opnd_size
<<= 4; /* Broadcast 1to16. */
4687 else if (i
.broadcast
->type
== BROADCAST_1TO8
)
4688 broadcasted_opnd_size
<<= 3; /* Broadcast 1to8. */
4689 else if (i
.broadcast
->type
== BROADCAST_1TO4
)
4690 broadcasted_opnd_size
<<= 2; /* Broadcast 1to4. */
4691 else if (i
.broadcast
->type
== BROADCAST_1TO2
)
4692 broadcasted_opnd_size
<<= 1; /* Broadcast 1to2. */
4696 if ((broadcasted_opnd_size
== 256
4697 && !t
->operand_types
[i
.broadcast
->operand
].bitfield
.ymmword
)
4698 || (broadcasted_opnd_size
== 512
4699 && !t
->operand_types
[i
.broadcast
->operand
].bitfield
.zmmword
))
4702 i
.error
= unsupported_broadcast
;
4706 /* If broadcast is supported in this instruction, we need to check if
4707 operand of one-element size isn't specified without broadcast. */
4708 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
4710 /* Find memory operand. */
4711 for (op
= 0; op
< i
.operands
; op
++)
4712 if (operand_type_check (i
.types
[op
], anymem
))
4714 gas_assert (op
< i
.operands
);
4715 /* Check size of the memory operand. */
4716 if ((t
->opcode_modifier
.vecesize
== 0
4717 && i
.types
[op
].bitfield
.dword
)
4718 || (t
->opcode_modifier
.vecesize
== 1
4719 && i
.types
[op
].bitfield
.qword
))
4721 i
.error
= broadcast_needed
;
4726 /* Check if requested masking is supported. */
4728 && (!t
->opcode_modifier
.masking
4730 && t
->opcode_modifier
.masking
== MERGING_MASKING
)))
4732 i
.error
= unsupported_masking
;
4736 /* Check if masking is applied to dest operand. */
4737 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
4739 i
.error
= mask_not_on_destination
;
4746 if ((i
.rounding
->type
!= saeonly
4747 && !t
->opcode_modifier
.staticrounding
)
4748 || (i
.rounding
->type
== saeonly
4749 && (t
->opcode_modifier
.staticrounding
4750 || !t
->opcode_modifier
.sae
)))
4752 i
.error
= unsupported_rc_sae
;
4755 /* If the instruction has several immediate operands and one of
4756 them is rounding, the rounding operand should be the last
4757 immediate operand. */
4758 if (i
.imm_operands
> 1
4759 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
4761 i
.error
= rc_sae_operand_not_last_imm
;
4766 /* Check vector Disp8 operand. */
4767 if (t
->opcode_modifier
.disp8memshift
)
4770 i
.memshift
= t
->opcode_modifier
.vecesize
? 3 : 2;
4772 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
4774 for (op
= 0; op
< i
.operands
; op
++)
4775 if (operand_type_check (i
.types
[op
], disp
)
4776 && i
.op
[op
].disps
->X_op
== O_constant
)
4778 offsetT value
= i
.op
[op
].disps
->X_add_number
;
4780 = (i
.disp_encoding
!= disp_encoding_32bit
4781 && fits_in_vec_disp8 (value
));
4782 if (t
->operand_types
[op
].bitfield
.vec_disp8
)
4785 i
.types
[op
].bitfield
.vec_disp8
= 1;
4788 /* Vector insn can only have Vec_Disp8/Disp32 in
4789 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4791 i
.types
[op
].bitfield
.disp8
= 0;
4792 if (flag_code
!= CODE_16BIT
)
4793 i
.types
[op
].bitfield
.disp16
= 0;
4796 else if (flag_code
!= CODE_16BIT
)
4798 /* One form of this instruction supports vector Disp8.
4799 Try vector Disp8 if we need to use Disp32. */
4800 if (vec_disp8_ok
&& !fits_in_signed_byte (value
))
4802 i
.error
= try_vector_disp8
;
4814 /* Check if operands are valid for the instruction. Update VEX
4818 VEX_check_operands (const insn_template
*t
)
4820 if (i
.vec_encoding
== vex_encoding_evex
)
4822 /* This instruction must be encoded with EVEX prefix. */
4823 if (!t
->opcode_modifier
.evex
)
4825 i
.error
= unsupported
;
4831 if (!t
->opcode_modifier
.vex
)
4833 /* This instruction template doesn't have VEX prefix. */
4834 if (i
.vec_encoding
!= vex_encoding_default
)
4836 i
.error
= unsupported
;
4842 /* Only check VEX_Imm4, which must be the first operand. */
4843 if (t
->operand_types
[0].bitfield
.vec_imm4
)
4845 if (i
.op
[0].imms
->X_op
!= O_constant
4846 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
4852 /* Turn off Imm8 so that update_imm won't complain. */
4853 i
.types
[0] = vec_imm4
;
4859 static const insn_template
*
4860 match_template (char mnem_suffix
)
4862 /* Points to template once we've found it. */
4863 const insn_template
*t
;
4864 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
4865 i386_operand_type overlap4
;
4866 unsigned int found_reverse_match
;
4867 i386_opcode_modifier suffix_check
, mnemsuf_check
;
4868 i386_operand_type operand_types
[MAX_OPERANDS
];
4869 int addr_prefix_disp
;
4871 unsigned int found_cpu_match
;
4872 unsigned int check_register
;
4873 enum i386_error specific_error
= 0;
4875 #if MAX_OPERANDS != 5
4876 # error "MAX_OPERANDS must be 5."
4879 found_reverse_match
= 0;
4880 addr_prefix_disp
= -1;
4882 memset (&suffix_check
, 0, sizeof (suffix_check
));
4883 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
4884 suffix_check
.no_bsuf
= 1;
4885 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
4886 suffix_check
.no_wsuf
= 1;
4887 else if (i
.suffix
== SHORT_MNEM_SUFFIX
)
4888 suffix_check
.no_ssuf
= 1;
4889 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
4890 suffix_check
.no_lsuf
= 1;
4891 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
4892 suffix_check
.no_qsuf
= 1;
4893 else if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
4894 suffix_check
.no_ldsuf
= 1;
4896 memset (&mnemsuf_check
, 0, sizeof (mnemsuf_check
));
4899 switch (mnem_suffix
)
4901 case BYTE_MNEM_SUFFIX
: mnemsuf_check
.no_bsuf
= 1; break;
4902 case WORD_MNEM_SUFFIX
: mnemsuf_check
.no_wsuf
= 1; break;
4903 case SHORT_MNEM_SUFFIX
: mnemsuf_check
.no_ssuf
= 1; break;
4904 case LONG_MNEM_SUFFIX
: mnemsuf_check
.no_lsuf
= 1; break;
4905 case QWORD_MNEM_SUFFIX
: mnemsuf_check
.no_qsuf
= 1; break;
4909 /* Must have right number of operands. */
4910 i
.error
= number_of_operands_mismatch
;
4912 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
4914 addr_prefix_disp
= -1;
4916 if (i
.operands
!= t
->operands
)
4919 /* Check processor support. */
4920 i
.error
= unsupported
;
4921 found_cpu_match
= (cpu_flags_match (t
)
4922 == CPU_FLAGS_PERFECT_MATCH
);
4923 if (!found_cpu_match
)
4926 /* Check old gcc support. */
4927 i
.error
= old_gcc_only
;
4928 if (!old_gcc
&& t
->opcode_modifier
.oldgcc
)
4931 /* Check AT&T mnemonic. */
4932 i
.error
= unsupported_with_intel_mnemonic
;
4933 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
4936 /* Check AT&T/Intel syntax and Intel64/AMD64 ISA. */
4937 i
.error
= unsupported_syntax
;
4938 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
4939 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
)
4940 || (intel64
&& t
->opcode_modifier
.amd64
)
4941 || (!intel64
&& t
->opcode_modifier
.intel64
))
4944 /* Check the suffix, except for some instructions in intel mode. */
4945 i
.error
= invalid_instruction_suffix
;
4946 if ((!intel_syntax
|| !t
->opcode_modifier
.ignoresize
)
4947 && ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
4948 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
4949 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
4950 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
4951 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
4952 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
)))
4954 /* In Intel mode all mnemonic suffixes must be explicitly allowed. */
4955 if ((t
->opcode_modifier
.no_bsuf
&& mnemsuf_check
.no_bsuf
)
4956 || (t
->opcode_modifier
.no_wsuf
&& mnemsuf_check
.no_wsuf
)
4957 || (t
->opcode_modifier
.no_lsuf
&& mnemsuf_check
.no_lsuf
)
4958 || (t
->opcode_modifier
.no_ssuf
&& mnemsuf_check
.no_ssuf
)
4959 || (t
->opcode_modifier
.no_qsuf
&& mnemsuf_check
.no_qsuf
)
4960 || (t
->opcode_modifier
.no_ldsuf
&& mnemsuf_check
.no_ldsuf
))
4963 if (!operand_size_match (t
))
4966 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4967 operand_types
[j
] = t
->operand_types
[j
];
4969 /* In general, don't allow 64-bit operands in 32-bit mode. */
4970 if (i
.suffix
== QWORD_MNEM_SUFFIX
4971 && flag_code
!= CODE_64BIT
4973 ? (!t
->opcode_modifier
.ignoresize
4974 && !intel_float_operand (t
->name
))
4975 : intel_float_operand (t
->name
) != 2)
4976 && ((!operand_types
[0].bitfield
.regmmx
4977 && !operand_types
[0].bitfield
.regxmm
4978 && !operand_types
[0].bitfield
.regymm
4979 && !operand_types
[0].bitfield
.regzmm
)
4980 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
4981 && operand_types
[t
->operands
> 1].bitfield
.regxmm
4982 && operand_types
[t
->operands
> 1].bitfield
.regymm
4983 && operand_types
[t
->operands
> 1].bitfield
.regzmm
))
4984 && (t
->base_opcode
!= 0x0fc7
4985 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
4988 /* In general, don't allow 32-bit operands on pre-386. */
4989 else if (i
.suffix
== LONG_MNEM_SUFFIX
4990 && !cpu_arch_flags
.bitfield
.cpui386
4992 ? (!t
->opcode_modifier
.ignoresize
4993 && !intel_float_operand (t
->name
))
4994 : intel_float_operand (t
->name
) != 2)
4995 && ((!operand_types
[0].bitfield
.regmmx
4996 && !operand_types
[0].bitfield
.regxmm
)
4997 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
4998 && operand_types
[t
->operands
> 1].bitfield
.regxmm
)))
5001 /* Do not verify operands when there are none. */
5005 /* We've found a match; break out of loop. */
5009 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
5010 into Disp32/Disp16/Disp32 operand. */
5011 if (i
.prefix
[ADDR_PREFIX
] != 0)
5013 /* There should be only one Disp operand. */
5017 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5019 if (operand_types
[j
].bitfield
.disp16
)
5021 addr_prefix_disp
= j
;
5022 operand_types
[j
].bitfield
.disp32
= 1;
5023 operand_types
[j
].bitfield
.disp16
= 0;
5029 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5031 if (operand_types
[j
].bitfield
.disp32
)
5033 addr_prefix_disp
= j
;
5034 operand_types
[j
].bitfield
.disp32
= 0;
5035 operand_types
[j
].bitfield
.disp16
= 1;
5041 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5043 if (operand_types
[j
].bitfield
.disp64
)
5045 addr_prefix_disp
= j
;
5046 operand_types
[j
].bitfield
.disp64
= 0;
5047 operand_types
[j
].bitfield
.disp32
= 1;
5055 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
5056 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
&& t
->base_opcode
== 0xa0)
5059 /* We check register size if needed. */
5060 check_register
= t
->opcode_modifier
.checkregsize
;
5061 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
5062 switch (t
->operands
)
5065 if (!operand_type_match (overlap0
, i
.types
[0]))
5069 /* xchg %eax, %eax is a special case. It is an alias for nop
5070 only in 32bit mode and we can use opcode 0x90. In 64bit
5071 mode, we can't use 0x90 for xchg %eax, %eax since it should
5072 zero-extend %eax to %rax. */
5073 if (flag_code
== CODE_64BIT
5074 && t
->base_opcode
== 0x90
5075 && operand_type_equal (&i
.types
[0], &acc32
)
5076 && operand_type_equal (&i
.types
[1], &acc32
))
5078 /* If we want store form, we reverse direction of operands. */
5079 if (i
.dir_encoding
== dir_encoding_store
5080 && t
->opcode_modifier
.d
)
5085 /* If we want store form, we skip the current load. */
5086 if (i
.dir_encoding
== dir_encoding_store
5087 && i
.mem_operands
== 0
5088 && t
->opcode_modifier
.load
)
5093 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
5094 if (!operand_type_match (overlap0
, i
.types
[0])
5095 || !operand_type_match (overlap1
, i
.types
[1])
5097 && !operand_type_register_match (overlap0
, i
.types
[0],
5099 overlap1
, i
.types
[1],
5102 /* Check if other direction is valid ... */
5103 if (!t
->opcode_modifier
.d
&& !t
->opcode_modifier
.floatd
)
5107 /* Try reversing direction of operands. */
5108 overlap0
= operand_type_and (i
.types
[0], operand_types
[1]);
5109 overlap1
= operand_type_and (i
.types
[1], operand_types
[0]);
5110 if (!operand_type_match (overlap0
, i
.types
[0])
5111 || !operand_type_match (overlap1
, i
.types
[1])
5113 && !operand_type_register_match (overlap0
,
5120 /* Does not match either direction. */
5123 /* found_reverse_match holds which of D or FloatDR
5125 if (t
->opcode_modifier
.d
)
5126 found_reverse_match
= Opcode_D
;
5127 else if (t
->opcode_modifier
.floatd
)
5128 found_reverse_match
= Opcode_FloatD
;
5130 found_reverse_match
= 0;
5131 if (t
->opcode_modifier
.floatr
)
5132 found_reverse_match
|= Opcode_FloatR
;
5136 /* Found a forward 2 operand match here. */
5137 switch (t
->operands
)
5140 overlap4
= operand_type_and (i
.types
[4],
5144 overlap3
= operand_type_and (i
.types
[3],
5148 overlap2
= operand_type_and (i
.types
[2],
5153 switch (t
->operands
)
5156 if (!operand_type_match (overlap4
, i
.types
[4])
5157 || !operand_type_register_match (overlap3
,
5166 if (!operand_type_match (overlap3
, i
.types
[3])
5168 && !operand_type_register_match (overlap2
,
5177 /* Here we make use of the fact that there are no
5178 reverse match 3 operand instructions, and all 3
5179 operand instructions only need to be checked for
5180 register consistency between operands 2 and 3. */
5181 if (!operand_type_match (overlap2
, i
.types
[2])
5183 && !operand_type_register_match (overlap1
,
5193 /* Found either forward/reverse 2, 3 or 4 operand match here:
5194 slip through to break. */
5196 if (!found_cpu_match
)
5198 found_reverse_match
= 0;
5202 /* Check if vector and VEX operands are valid. */
5203 if (check_VecOperands (t
) || VEX_check_operands (t
))
5205 specific_error
= i
.error
;
5209 /* We've found a match; break out of loop. */
5213 if (t
== current_templates
->end
)
5215 /* We found no match. */
5216 const char *err_msg
;
5217 switch (specific_error
? specific_error
: i
.error
)
5221 case operand_size_mismatch
:
5222 err_msg
= _("operand size mismatch");
5224 case operand_type_mismatch
:
5225 err_msg
= _("operand type mismatch");
5227 case register_type_mismatch
:
5228 err_msg
= _("register type mismatch");
5230 case number_of_operands_mismatch
:
5231 err_msg
= _("number of operands mismatch");
5233 case invalid_instruction_suffix
:
5234 err_msg
= _("invalid instruction suffix");
5237 err_msg
= _("constant doesn't fit in 4 bits");
5240 err_msg
= _("only supported with old gcc");
5242 case unsupported_with_intel_mnemonic
:
5243 err_msg
= _("unsupported with Intel mnemonic");
5245 case unsupported_syntax
:
5246 err_msg
= _("unsupported syntax");
5249 as_bad (_("unsupported instruction `%s'"),
5250 current_templates
->start
->name
);
5252 case invalid_vsib_address
:
5253 err_msg
= _("invalid VSIB address");
5255 case invalid_vector_register_set
:
5256 err_msg
= _("mask, index, and destination registers must be distinct");
5258 case unsupported_vector_index_register
:
5259 err_msg
= _("unsupported vector index register");
5261 case unsupported_broadcast
:
5262 err_msg
= _("unsupported broadcast");
5264 case broadcast_not_on_src_operand
:
5265 err_msg
= _("broadcast not on source memory operand");
5267 case broadcast_needed
:
5268 err_msg
= _("broadcast is needed for operand of such type");
5270 case unsupported_masking
:
5271 err_msg
= _("unsupported masking");
5273 case mask_not_on_destination
:
5274 err_msg
= _("mask not on destination operand");
5276 case no_default_mask
:
5277 err_msg
= _("default mask isn't allowed");
5279 case unsupported_rc_sae
:
5280 err_msg
= _("unsupported static rounding/sae");
5282 case rc_sae_operand_not_last_imm
:
5284 err_msg
= _("RC/SAE operand must precede immediate operands");
5286 err_msg
= _("RC/SAE operand must follow immediate operands");
5288 case invalid_register_operand
:
5289 err_msg
= _("invalid register operand");
5292 as_bad (_("%s for `%s'"), err_msg
,
5293 current_templates
->start
->name
);
5297 if (!quiet_warnings
)
5300 && (i
.types
[0].bitfield
.jumpabsolute
5301 != operand_types
[0].bitfield
.jumpabsolute
))
5303 as_warn (_("indirect %s without `*'"), t
->name
);
5306 if (t
->opcode_modifier
.isprefix
5307 && t
->opcode_modifier
.ignoresize
)
5309 /* Warn them that a data or address size prefix doesn't
5310 affect assembly of the next line of code. */
5311 as_warn (_("stand-alone `%s' prefix"), t
->name
);
5315 /* Copy the template we found. */
5318 if (addr_prefix_disp
!= -1)
5319 i
.tm
.operand_types
[addr_prefix_disp
]
5320 = operand_types
[addr_prefix_disp
];
5322 if (found_reverse_match
)
5324 /* If we found a reverse match we must alter the opcode
5325 direction bit. found_reverse_match holds bits to change
5326 (different for int & float insns). */
5328 i
.tm
.base_opcode
^= found_reverse_match
;
5330 i
.tm
.operand_types
[0] = operand_types
[1];
5331 i
.tm
.operand_types
[1] = operand_types
[0];
5340 int mem_op
= operand_type_check (i
.types
[0], anymem
) ? 0 : 1;
5341 if (i
.tm
.operand_types
[mem_op
].bitfield
.esseg
)
5343 if (i
.seg
[0] != NULL
&& i
.seg
[0] != &es
)
5345 as_bad (_("`%s' operand %d must use `%ses' segment"),
5351 /* There's only ever one segment override allowed per instruction.
5352 This instruction possibly has a legal segment override on the
5353 second operand, so copy the segment to where non-string
5354 instructions store it, allowing common code. */
5355 i
.seg
[0] = i
.seg
[1];
5357 else if (i
.tm
.operand_types
[mem_op
+ 1].bitfield
.esseg
)
5359 if (i
.seg
[1] != NULL
&& i
.seg
[1] != &es
)
5361 as_bad (_("`%s' operand %d must use `%ses' segment"),
5372 process_suffix (void)
5374 /* If matched instruction specifies an explicit instruction mnemonic
5376 if (i
.tm
.opcode_modifier
.size16
)
5377 i
.suffix
= WORD_MNEM_SUFFIX
;
5378 else if (i
.tm
.opcode_modifier
.size32
)
5379 i
.suffix
= LONG_MNEM_SUFFIX
;
5380 else if (i
.tm
.opcode_modifier
.size64
)
5381 i
.suffix
= QWORD_MNEM_SUFFIX
;
5382 else if (i
.reg_operands
)
5384 /* If there's no instruction mnemonic suffix we try to invent one
5385 based on register operands. */
5388 /* We take i.suffix from the last register operand specified,
5389 Destination register type is more significant than source
5390 register type. crc32 in SSE4.2 prefers source register
5392 if (i
.tm
.base_opcode
== 0xf20f38f1)
5394 if (i
.types
[0].bitfield
.reg16
)
5395 i
.suffix
= WORD_MNEM_SUFFIX
;
5396 else if (i
.types
[0].bitfield
.reg32
)
5397 i
.suffix
= LONG_MNEM_SUFFIX
;
5398 else if (i
.types
[0].bitfield
.reg64
)
5399 i
.suffix
= QWORD_MNEM_SUFFIX
;
5401 else if (i
.tm
.base_opcode
== 0xf20f38f0)
5403 if (i
.types
[0].bitfield
.reg8
)
5404 i
.suffix
= BYTE_MNEM_SUFFIX
;
5411 if (i
.tm
.base_opcode
== 0xf20f38f1
5412 || i
.tm
.base_opcode
== 0xf20f38f0)
5414 /* We have to know the operand size for crc32. */
5415 as_bad (_("ambiguous memory operand size for `%s`"),
5420 for (op
= i
.operands
; --op
>= 0;)
5421 if (!i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
5423 if (i
.types
[op
].bitfield
.reg8
)
5425 i
.suffix
= BYTE_MNEM_SUFFIX
;
5428 else if (i
.types
[op
].bitfield
.reg16
)
5430 i
.suffix
= WORD_MNEM_SUFFIX
;
5433 else if (i
.types
[op
].bitfield
.reg32
)
5435 i
.suffix
= LONG_MNEM_SUFFIX
;
5438 else if (i
.types
[op
].bitfield
.reg64
)
5440 i
.suffix
= QWORD_MNEM_SUFFIX
;
5446 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5449 && i
.tm
.opcode_modifier
.ignoresize
5450 && i
.tm
.opcode_modifier
.no_bsuf
)
5452 else if (!check_byte_reg ())
5455 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
5458 && i
.tm
.opcode_modifier
.ignoresize
5459 && i
.tm
.opcode_modifier
.no_lsuf
)
5461 else if (!check_long_reg ())
5464 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5467 && i
.tm
.opcode_modifier
.ignoresize
5468 && i
.tm
.opcode_modifier
.no_qsuf
)
5470 else if (!check_qword_reg ())
5473 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5476 && i
.tm
.opcode_modifier
.ignoresize
5477 && i
.tm
.opcode_modifier
.no_wsuf
)
5479 else if (!check_word_reg ())
5482 else if (i
.suffix
== XMMWORD_MNEM_SUFFIX
5483 || i
.suffix
== YMMWORD_MNEM_SUFFIX
5484 || i
.suffix
== ZMMWORD_MNEM_SUFFIX
)
5486 /* Skip if the instruction has x/y/z suffix. match_template
5487 should check if it is a valid suffix. */
5489 else if (intel_syntax
&& i
.tm
.opcode_modifier
.ignoresize
)
5490 /* Do nothing if the instruction is going to ignore the prefix. */
5495 else if (i
.tm
.opcode_modifier
.defaultsize
5497 /* exclude fldenv/frstor/fsave/fstenv */
5498 && i
.tm
.opcode_modifier
.no_ssuf
)
5500 i
.suffix
= stackop_size
;
5502 else if (intel_syntax
5504 && (i
.tm
.operand_types
[0].bitfield
.jumpabsolute
5505 || i
.tm
.opcode_modifier
.jumpbyte
5506 || i
.tm
.opcode_modifier
.jumpintersegment
5507 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
5508 && i
.tm
.extension_opcode
<= 3)))
5513 if (!i
.tm
.opcode_modifier
.no_qsuf
)
5515 i
.suffix
= QWORD_MNEM_SUFFIX
;
5520 if (!i
.tm
.opcode_modifier
.no_lsuf
)
5521 i
.suffix
= LONG_MNEM_SUFFIX
;
5524 if (!i
.tm
.opcode_modifier
.no_wsuf
)
5525 i
.suffix
= WORD_MNEM_SUFFIX
;
5534 if (i
.tm
.opcode_modifier
.w
)
5536 as_bad (_("no instruction mnemonic suffix given and "
5537 "no register operands; can't size instruction"));
5543 unsigned int suffixes
;
5545 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
5546 if (!i
.tm
.opcode_modifier
.no_wsuf
)
5548 if (!i
.tm
.opcode_modifier
.no_lsuf
)
5550 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
5552 if (!i
.tm
.opcode_modifier
.no_ssuf
)
5554 if (!i
.tm
.opcode_modifier
.no_qsuf
)
5557 /* There are more than suffix matches. */
5558 if (i
.tm
.opcode_modifier
.w
5559 || ((suffixes
& (suffixes
- 1))
5560 && !i
.tm
.opcode_modifier
.defaultsize
5561 && !i
.tm
.opcode_modifier
.ignoresize
))
5563 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
5569 /* Change the opcode based on the operand size given by i.suffix;
5570 We don't need to change things for byte insns. */
5573 && i
.suffix
!= BYTE_MNEM_SUFFIX
5574 && i
.suffix
!= XMMWORD_MNEM_SUFFIX
5575 && i
.suffix
!= YMMWORD_MNEM_SUFFIX
5576 && i
.suffix
!= ZMMWORD_MNEM_SUFFIX
)
5578 /* It's not a byte, select word/dword operation. */
5579 if (i
.tm
.opcode_modifier
.w
)
5581 if (i
.tm
.opcode_modifier
.shortform
)
5582 i
.tm
.base_opcode
|= 8;
5584 i
.tm
.base_opcode
|= 1;
5587 /* Now select between word & dword operations via the operand
5588 size prefix, except for instructions that will ignore this
5590 if (i
.tm
.opcode_modifier
.addrprefixop0
)
5592 /* The address size override prefix changes the size of the
5594 if ((flag_code
== CODE_32BIT
5595 && i
.op
->regs
[0].reg_type
.bitfield
.reg16
)
5596 || (flag_code
!= CODE_32BIT
5597 && i
.op
->regs
[0].reg_type
.bitfield
.reg32
))
5598 if (!add_prefix (ADDR_PREFIX_OPCODE
))
5601 else if (i
.suffix
!= QWORD_MNEM_SUFFIX
5602 && i
.suffix
!= LONG_DOUBLE_MNEM_SUFFIX
5603 && !i
.tm
.opcode_modifier
.ignoresize
5604 && !i
.tm
.opcode_modifier
.floatmf
5605 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
5606 || (flag_code
== CODE_64BIT
5607 && i
.tm
.opcode_modifier
.jumpbyte
)))
5609 unsigned int prefix
= DATA_PREFIX_OPCODE
;
5611 if (i
.tm
.opcode_modifier
.jumpbyte
) /* jcxz, loop */
5612 prefix
= ADDR_PREFIX_OPCODE
;
5614 if (!add_prefix (prefix
))
5618 /* Set mode64 for an operand. */
5619 if (i
.suffix
== QWORD_MNEM_SUFFIX
5620 && flag_code
== CODE_64BIT
5621 && !i
.tm
.opcode_modifier
.norex64
)
5623 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5624 need rex64. cmpxchg8b is also a special case. */
5625 if (! (i
.operands
== 2
5626 && i
.tm
.base_opcode
== 0x90
5627 && i
.tm
.extension_opcode
== None
5628 && operand_type_equal (&i
.types
[0], &acc64
)
5629 && operand_type_equal (&i
.types
[1], &acc64
))
5630 && ! (i
.operands
== 1
5631 && i
.tm
.base_opcode
== 0xfc7
5632 && i
.tm
.extension_opcode
== 1
5633 && !operand_type_check (i
.types
[0], reg
)
5634 && operand_type_check (i
.types
[0], anymem
)))
5638 /* Size floating point instruction. */
5639 if (i
.suffix
== LONG_MNEM_SUFFIX
)
5640 if (i
.tm
.opcode_modifier
.floatmf
)
5641 i
.tm
.base_opcode
^= 4;
5648 check_byte_reg (void)
5652 for (op
= i
.operands
; --op
>= 0;)
5654 /* If this is an eight bit register, it's OK. If it's the 16 or
5655 32 bit version of an eight bit register, we will just use the
5656 low portion, and that's OK too. */
5657 if (i
.types
[op
].bitfield
.reg8
)
5660 /* I/O port address operands are OK too. */
5661 if (i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
5664 /* crc32 doesn't generate this warning. */
5665 if (i
.tm
.base_opcode
== 0xf20f38f0)
5668 if ((i
.types
[op
].bitfield
.reg16
5669 || i
.types
[op
].bitfield
.reg32
5670 || i
.types
[op
].bitfield
.reg64
)
5671 && i
.op
[op
].regs
->reg_num
< 4
5672 /* Prohibit these changes in 64bit mode, since the lowering
5673 would be more complicated. */
5674 && flag_code
!= CODE_64BIT
)
5676 #if REGISTER_WARNINGS
5677 if (!quiet_warnings
)
5678 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5680 (i
.op
[op
].regs
+ (i
.types
[op
].bitfield
.reg16
5681 ? REGNAM_AL
- REGNAM_AX
5682 : REGNAM_AL
- REGNAM_EAX
))->reg_name
,
5684 i
.op
[op
].regs
->reg_name
,
5689 /* Any other register is bad. */
5690 if (i
.types
[op
].bitfield
.reg16
5691 || i
.types
[op
].bitfield
.reg32
5692 || i
.types
[op
].bitfield
.reg64
5693 || i
.types
[op
].bitfield
.regmmx
5694 || i
.types
[op
].bitfield
.regxmm
5695 || i
.types
[op
].bitfield
.regymm
5696 || i
.types
[op
].bitfield
.regzmm
5697 || i
.types
[op
].bitfield
.sreg2
5698 || i
.types
[op
].bitfield
.sreg3
5699 || i
.types
[op
].bitfield
.control
5700 || i
.types
[op
].bitfield
.debug
5701 || i
.types
[op
].bitfield
.test
5702 || i
.types
[op
].bitfield
.floatreg
5703 || i
.types
[op
].bitfield
.floatacc
)
5705 as_bad (_("`%s%s' not allowed with `%s%c'"),
5707 i
.op
[op
].regs
->reg_name
,
5717 check_long_reg (void)
5721 for (op
= i
.operands
; --op
>= 0;)
5722 /* Reject eight bit registers, except where the template requires
5723 them. (eg. movzb) */
5724 if (i
.types
[op
].bitfield
.reg8
5725 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5726 || i
.tm
.operand_types
[op
].bitfield
.reg32
5727 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5729 as_bad (_("`%s%s' not allowed with `%s%c'"),
5731 i
.op
[op
].regs
->reg_name
,
5736 /* Warn if the e prefix on a general reg is missing. */
5737 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
5738 && i
.types
[op
].bitfield
.reg16
5739 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5740 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5742 /* Prohibit these changes in the 64bit mode, since the
5743 lowering is more complicated. */
5744 if (flag_code
== CODE_64BIT
)
5746 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5747 register_prefix
, i
.op
[op
].regs
->reg_name
,
5751 #if REGISTER_WARNINGS
5752 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5754 (i
.op
[op
].regs
+ REGNAM_EAX
- REGNAM_AX
)->reg_name
,
5755 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
5758 /* Warn if the r prefix on a general reg is present. */
5759 else if (i
.types
[op
].bitfield
.reg64
5760 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5761 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5764 && i
.tm
.opcode_modifier
.toqword
5765 && !i
.types
[0].bitfield
.regxmm
)
5767 /* Convert to QWORD. We want REX byte. */
5768 i
.suffix
= QWORD_MNEM_SUFFIX
;
5772 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5773 register_prefix
, i
.op
[op
].regs
->reg_name
,
5782 check_qword_reg (void)
5786 for (op
= i
.operands
; --op
>= 0; )
5787 /* Reject eight bit registers, except where the template requires
5788 them. (eg. movzb) */
5789 if (i
.types
[op
].bitfield
.reg8
5790 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5791 || i
.tm
.operand_types
[op
].bitfield
.reg32
5792 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5794 as_bad (_("`%s%s' not allowed with `%s%c'"),
5796 i
.op
[op
].regs
->reg_name
,
5801 /* Warn if the r prefix on a general reg is missing. */
5802 else if ((i
.types
[op
].bitfield
.reg16
5803 || i
.types
[op
].bitfield
.reg32
)
5804 && (i
.tm
.operand_types
[op
].bitfield
.reg64
5805 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5807 /* Prohibit these changes in the 64bit mode, since the
5808 lowering is more complicated. */
5810 && i
.tm
.opcode_modifier
.todword
5811 && !i
.types
[0].bitfield
.regxmm
)
5813 /* Convert to DWORD. We don't want REX byte. */
5814 i
.suffix
= LONG_MNEM_SUFFIX
;
5818 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5819 register_prefix
, i
.op
[op
].regs
->reg_name
,
5828 check_word_reg (void)
5831 for (op
= i
.operands
; --op
>= 0;)
5832 /* Reject eight bit registers, except where the template requires
5833 them. (eg. movzb) */
5834 if (i
.types
[op
].bitfield
.reg8
5835 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5836 || i
.tm
.operand_types
[op
].bitfield
.reg32
5837 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5839 as_bad (_("`%s%s' not allowed with `%s%c'"),
5841 i
.op
[op
].regs
->reg_name
,
5846 /* Warn if the e or r prefix on a general reg is present. */
5847 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
5848 && (i
.types
[op
].bitfield
.reg32
5849 || i
.types
[op
].bitfield
.reg64
)
5850 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5851 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5853 /* Prohibit these changes in the 64bit mode, since the
5854 lowering is more complicated. */
5855 if (flag_code
== CODE_64BIT
)
5857 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5858 register_prefix
, i
.op
[op
].regs
->reg_name
,
5862 #if REGISTER_WARNINGS
5863 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5865 (i
.op
[op
].regs
+ REGNAM_AX
- REGNAM_EAX
)->reg_name
,
5866 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
5873 update_imm (unsigned int j
)
5875 i386_operand_type overlap
= i
.types
[j
];
5876 if ((overlap
.bitfield
.imm8
5877 || overlap
.bitfield
.imm8s
5878 || overlap
.bitfield
.imm16
5879 || overlap
.bitfield
.imm32
5880 || overlap
.bitfield
.imm32s
5881 || overlap
.bitfield
.imm64
)
5882 && !operand_type_equal (&overlap
, &imm8
)
5883 && !operand_type_equal (&overlap
, &imm8s
)
5884 && !operand_type_equal (&overlap
, &imm16
)
5885 && !operand_type_equal (&overlap
, &imm32
)
5886 && !operand_type_equal (&overlap
, &imm32s
)
5887 && !operand_type_equal (&overlap
, &imm64
))
5891 i386_operand_type temp
;
5893 operand_type_set (&temp
, 0);
5894 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5896 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
5897 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
5899 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5900 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
5901 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5903 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
5904 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
5907 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
5910 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
5911 || operand_type_equal (&overlap
, &imm16_32
)
5912 || operand_type_equal (&overlap
, &imm16_32s
))
5914 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5919 if (!operand_type_equal (&overlap
, &imm8
)
5920 && !operand_type_equal (&overlap
, &imm8s
)
5921 && !operand_type_equal (&overlap
, &imm16
)
5922 && !operand_type_equal (&overlap
, &imm32
)
5923 && !operand_type_equal (&overlap
, &imm32s
)
5924 && !operand_type_equal (&overlap
, &imm64
))
5926 as_bad (_("no instruction mnemonic suffix given; "
5927 "can't determine immediate size"));
5931 i
.types
[j
] = overlap
;
5941 /* Update the first 2 immediate operands. */
5942 n
= i
.operands
> 2 ? 2 : i
.operands
;
5945 for (j
= 0; j
< n
; j
++)
5946 if (update_imm (j
) == 0)
5949 /* The 3rd operand can't be immediate operand. */
5950 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
5957 bad_implicit_operand (int xmm
)
5959 const char *ireg
= xmm
? "xmm0" : "ymm0";
5962 as_bad (_("the last operand of `%s' must be `%s%s'"),
5963 i
.tm
.name
, register_prefix
, ireg
);
5965 as_bad (_("the first operand of `%s' must be `%s%s'"),
5966 i
.tm
.name
, register_prefix
, ireg
);
5971 process_operands (void)
5973 /* Default segment register this instruction will use for memory
5974 accesses. 0 means unknown. This is only for optimizing out
5975 unnecessary segment overrides. */
5976 const seg_entry
*default_seg
= 0;
5978 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
5980 unsigned int dupl
= i
.operands
;
5981 unsigned int dest
= dupl
- 1;
5984 /* The destination must be an xmm register. */
5985 gas_assert (i
.reg_operands
5986 && MAX_OPERANDS
> dupl
5987 && operand_type_equal (&i
.types
[dest
], ®xmm
));
5989 if (i
.tm
.opcode_modifier
.firstxmm0
)
5991 /* The first operand is implicit and must be xmm0. */
5992 gas_assert (operand_type_equal (&i
.types
[0], ®xmm
));
5993 if (register_number (i
.op
[0].regs
) != 0)
5994 return bad_implicit_operand (1);
5996 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
5998 /* Keep xmm0 for instructions with VEX prefix and 3
6004 /* We remove the first xmm0 and keep the number of
6005 operands unchanged, which in fact duplicates the
6007 for (j
= 1; j
< i
.operands
; j
++)
6009 i
.op
[j
- 1] = i
.op
[j
];
6010 i
.types
[j
- 1] = i
.types
[j
];
6011 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
6015 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
6017 gas_assert ((MAX_OPERANDS
- 1) > dupl
6018 && (i
.tm
.opcode_modifier
.vexsources
6021 /* Add the implicit xmm0 for instructions with VEX prefix
6023 for (j
= i
.operands
; j
> 0; j
--)
6025 i
.op
[j
] = i
.op
[j
- 1];
6026 i
.types
[j
] = i
.types
[j
- 1];
6027 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
6030 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
6031 i
.types
[0] = regxmm
;
6032 i
.tm
.operand_types
[0] = regxmm
;
6035 i
.reg_operands
+= 2;
6040 i
.op
[dupl
] = i
.op
[dest
];
6041 i
.types
[dupl
] = i
.types
[dest
];
6042 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
6051 i
.op
[dupl
] = i
.op
[dest
];
6052 i
.types
[dupl
] = i
.types
[dest
];
6053 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
6056 if (i
.tm
.opcode_modifier
.immext
)
6059 else if (i
.tm
.opcode_modifier
.firstxmm0
)
6063 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
6064 gas_assert (i
.reg_operands
6065 && (operand_type_equal (&i
.types
[0], ®xmm
)
6066 || operand_type_equal (&i
.types
[0], ®ymm
)
6067 || operand_type_equal (&i
.types
[0], ®zmm
)));
6068 if (register_number (i
.op
[0].regs
) != 0)
6069 return bad_implicit_operand (i
.types
[0].bitfield
.regxmm
);
6071 for (j
= 1; j
< i
.operands
; j
++)
6073 i
.op
[j
- 1] = i
.op
[j
];
6074 i
.types
[j
- 1] = i
.types
[j
];
6076 /* We need to adjust fields in i.tm since they are used by
6077 build_modrm_byte. */
6078 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
6085 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
6087 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
6088 gas_assert (i
.operands
>= 2
6089 && (operand_type_equal (&i
.types
[1], ®xmm
)
6090 || operand_type_equal (&i
.types
[1], ®ymm
)
6091 || operand_type_equal (&i
.types
[1], ®zmm
)));
6092 unsigned int regnum
= register_number (i
.op
[1].regs
);
6093 unsigned int first_reg_in_group
= regnum
& ~3;
6094 unsigned int last_reg_in_group
= first_reg_in_group
+ 3;
6095 if (regnum
!= first_reg_in_group
) {
6096 as_warn (_("the second source register `%s%s' implicitly denotes"
6097 " `%s%.3s%d' to `%s%.3s%d' source group in `%s'"),
6098 register_prefix
, i
.op
[1].regs
->reg_name
,
6099 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
6100 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
6104 else if (i
.tm
.opcode_modifier
.regkludge
)
6106 /* The imul $imm, %reg instruction is converted into
6107 imul $imm, %reg, %reg, and the clr %reg instruction
6108 is converted into xor %reg, %reg. */
6110 unsigned int first_reg_op
;
6112 if (operand_type_check (i
.types
[0], reg
))
6116 /* Pretend we saw the extra register operand. */
6117 gas_assert (i
.reg_operands
== 1
6118 && i
.op
[first_reg_op
+ 1].regs
== 0);
6119 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
6120 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
6125 if (i
.tm
.opcode_modifier
.shortform
)
6127 if (i
.types
[0].bitfield
.sreg2
6128 || i
.types
[0].bitfield
.sreg3
)
6130 if (i
.tm
.base_opcode
== POP_SEG_SHORT
6131 && i
.op
[0].regs
->reg_num
== 1)
6133 as_bad (_("you can't `pop %scs'"), register_prefix
);
6136 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
6137 if ((i
.op
[0].regs
->reg_flags
& RegRex
) != 0)
6142 /* The register or float register operand is in operand
6146 if (i
.types
[0].bitfield
.floatreg
6147 || operand_type_check (i
.types
[0], reg
))
6151 /* Register goes in low 3 bits of opcode. */
6152 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
6153 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
6155 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
6157 /* Warn about some common errors, but press on regardless.
6158 The first case can be generated by gcc (<= 2.8.1). */
6159 if (i
.operands
== 2)
6161 /* Reversed arguments on faddp, fsubp, etc. */
6162 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
6163 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
6164 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
6168 /* Extraneous `l' suffix on fp insn. */
6169 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
6170 register_prefix
, i
.op
[0].regs
->reg_name
);
6175 else if (i
.tm
.opcode_modifier
.modrm
)
6177 /* The opcode is completed (modulo i.tm.extension_opcode which
6178 must be put into the modrm byte). Now, we make the modrm and
6179 index base bytes based on all the info we've collected. */
6181 default_seg
= build_modrm_byte ();
6183 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
6187 else if (i
.tm
.opcode_modifier
.isstring
)
6189 /* For the string instructions that allow a segment override
6190 on one of their operands, the default segment is ds. */
6194 if (i
.tm
.base_opcode
== 0x8d /* lea */
6197 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
6199 /* If a segment was explicitly specified, and the specified segment
6200 is not the default, use an opcode prefix to select it. If we
6201 never figured out what the default segment is, then default_seg
6202 will be zero at this point, and the specified segment prefix will
6204 if ((i
.seg
[0]) && (i
.seg
[0] != default_seg
))
6206 if (!add_prefix (i
.seg
[0]->seg_prefix
))
6212 static const seg_entry
*
6213 build_modrm_byte (void)
6215 const seg_entry
*default_seg
= 0;
6216 unsigned int source
, dest
;
6219 /* The first operand of instructions with VEX prefix and 3 sources
6220 must be VEX_Imm4. */
6221 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
6224 unsigned int nds
, reg_slot
;
6227 if (i
.tm
.opcode_modifier
.veximmext
6228 && i
.tm
.opcode_modifier
.immext
)
6230 dest
= i
.operands
- 2;
6231 gas_assert (dest
== 3);
6234 dest
= i
.operands
- 1;
6237 /* There are 2 kinds of instructions:
6238 1. 5 operands: 4 register operands or 3 register operands
6239 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
6240 VexW0 or VexW1. The destination must be either XMM, YMM or
6242 2. 4 operands: 4 register operands or 3 register operands
6243 plus 1 memory operand, VexXDS, and VexImmExt */
6244 gas_assert ((i
.reg_operands
== 4
6245 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
6246 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6247 && (i
.tm
.opcode_modifier
.veximmext
6248 || (i
.imm_operands
== 1
6249 && i
.types
[0].bitfield
.vec_imm4
6250 && (i
.tm
.opcode_modifier
.vexw
== VEXW0
6251 || i
.tm
.opcode_modifier
.vexw
== VEXW1
)
6252 && (operand_type_equal (&i
.tm
.operand_types
[dest
], ®xmm
)
6253 || operand_type_equal (&i
.tm
.operand_types
[dest
], ®ymm
)
6254 || operand_type_equal (&i
.tm
.operand_types
[dest
], ®zmm
)))));
6256 if (i
.imm_operands
== 0)
6258 /* When there is no immediate operand, generate an 8bit
6259 immediate operand to encode the first operand. */
6260 exp
= &im_expressions
[i
.imm_operands
++];
6261 i
.op
[i
.operands
].imms
= exp
;
6262 i
.types
[i
.operands
] = imm8
;
6264 /* If VexW1 is set, the first operand is the source and
6265 the second operand is encoded in the immediate operand. */
6266 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
6277 /* FMA swaps REG and NDS. */
6278 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
6286 gas_assert (operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6288 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6290 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6292 exp
->X_op
= O_constant
;
6293 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
6294 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
6298 unsigned int imm_slot
;
6300 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6302 /* If VexW0 is set, the third operand is the source and
6303 the second operand is encoded in the immediate
6310 /* VexW1 is set, the second operand is the source and
6311 the third operand is encoded in the immediate
6317 if (i
.tm
.opcode_modifier
.immext
)
6319 /* When ImmExt is set, the immediate byte is the last
6321 imm_slot
= i
.operands
- 1;
6329 /* Turn on Imm8 so that output_imm will generate it. */
6330 i
.types
[imm_slot
].bitfield
.imm8
= 1;
6333 gas_assert (operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6335 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6337 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6339 i
.op
[imm_slot
].imms
->X_add_number
6340 |= register_number (i
.op
[reg_slot
].regs
) << 4;
6341 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
6344 gas_assert (operand_type_equal (&i
.tm
.operand_types
[nds
], ®xmm
)
6345 || operand_type_equal (&i
.tm
.operand_types
[nds
],
6347 || operand_type_equal (&i
.tm
.operand_types
[nds
],
6349 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
6354 /* i.reg_operands MUST be the number of real register operands;
6355 implicit registers do not count. If there are 3 register
6356 operands, it must be a instruction with VexNDS. For a
6357 instruction with VexNDD, the destination register is encoded
6358 in VEX prefix. If there are 4 register operands, it must be
6359 a instruction with VEX prefix and 3 sources. */
6360 if (i
.mem_operands
== 0
6361 && ((i
.reg_operands
== 2
6362 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
6363 || (i
.reg_operands
== 3
6364 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6365 || (i
.reg_operands
== 4 && vex_3_sources
)))
6373 /* When there are 3 operands, one of them may be immediate,
6374 which may be the first or the last operand. Otherwise,
6375 the first operand must be shift count register (cl) or it
6376 is an instruction with VexNDS. */
6377 gas_assert (i
.imm_operands
== 1
6378 || (i
.imm_operands
== 0
6379 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6380 || i
.types
[0].bitfield
.shiftcount
)));
6381 if (operand_type_check (i
.types
[0], imm
)
6382 || i
.types
[0].bitfield
.shiftcount
)
6388 /* When there are 4 operands, the first two must be 8bit
6389 immediate operands. The source operand will be the 3rd
6392 For instructions with VexNDS, if the first operand
6393 an imm8, the source operand is the 2nd one. If the last
6394 operand is imm8, the source operand is the first one. */
6395 gas_assert ((i
.imm_operands
== 2
6396 && i
.types
[0].bitfield
.imm8
6397 && i
.types
[1].bitfield
.imm8
)
6398 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6399 && i
.imm_operands
== 1
6400 && (i
.types
[0].bitfield
.imm8
6401 || i
.types
[i
.operands
- 1].bitfield
.imm8
6403 if (i
.imm_operands
== 2)
6407 if (i
.types
[0].bitfield
.imm8
)
6414 if (i
.tm
.opcode_modifier
.evex
)
6416 /* For EVEX instructions, when there are 5 operands, the
6417 first one must be immediate operand. If the second one
6418 is immediate operand, the source operand is the 3th
6419 one. If the last one is immediate operand, the source
6420 operand is the 2nd one. */
6421 gas_assert (i
.imm_operands
== 2
6422 && i
.tm
.opcode_modifier
.sae
6423 && operand_type_check (i
.types
[0], imm
));
6424 if (operand_type_check (i
.types
[1], imm
))
6426 else if (operand_type_check (i
.types
[4], imm
))
6440 /* RC/SAE operand could be between DEST and SRC. That happens
6441 when one operand is GPR and the other one is XMM/YMM/ZMM
6443 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
6446 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6448 /* For instructions with VexNDS, the register-only source
6449 operand must be 32/64bit integer, XMM, YMM or ZMM
6450 register. It is encoded in VEX prefix. We need to
6451 clear RegMem bit before calling operand_type_equal. */
6453 i386_operand_type op
;
6456 /* Check register-only source operand when two source
6457 operands are swapped. */
6458 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
6459 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
6467 op
= i
.tm
.operand_types
[vvvv
];
6468 op
.bitfield
.regmem
= 0;
6469 if ((dest
+ 1) >= i
.operands
6470 || (!op
.bitfield
.reg32
6471 && op
.bitfield
.reg64
6472 && !operand_type_equal (&op
, ®xmm
)
6473 && !operand_type_equal (&op
, ®ymm
)
6474 && !operand_type_equal (&op
, ®zmm
)
6475 && !operand_type_equal (&op
, ®mask
)))
6477 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
6483 /* One of the register operands will be encoded in the i.tm.reg
6484 field, the other in the combined i.tm.mode and i.tm.regmem
6485 fields. If no form of this instruction supports a memory
6486 destination operand, then we assume the source operand may
6487 sometimes be a memory operand and so we need to store the
6488 destination in the i.rm.reg field. */
6489 if (!i
.tm
.operand_types
[dest
].bitfield
.regmem
6490 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
6492 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
6493 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
6494 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
6496 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
6498 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
6500 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
6505 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
6506 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
6507 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
6509 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
6511 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
6513 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
6516 if (flag_code
!= CODE_64BIT
&& (i
.rex
& (REX_R
| REX_B
)))
6518 if (!i
.types
[0].bitfield
.control
6519 && !i
.types
[1].bitfield
.control
)
6521 i
.rex
&= ~(REX_R
| REX_B
);
6522 add_prefix (LOCK_PREFIX_OPCODE
);
6526 { /* If it's not 2 reg operands... */
6531 unsigned int fake_zero_displacement
= 0;
6534 for (op
= 0; op
< i
.operands
; op
++)
6535 if (operand_type_check (i
.types
[op
], anymem
))
6537 gas_assert (op
< i
.operands
);
6539 if (i
.tm
.opcode_modifier
.vecsib
)
6541 if (i
.index_reg
->reg_num
== RegEiz
6542 || i
.index_reg
->reg_num
== RegRiz
)
6545 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6548 i
.sib
.base
= NO_BASE_REGISTER
;
6549 i
.sib
.scale
= i
.log2_scale_factor
;
6550 /* No Vec_Disp8 if there is no base. */
6551 i
.types
[op
].bitfield
.vec_disp8
= 0;
6552 i
.types
[op
].bitfield
.disp8
= 0;
6553 i
.types
[op
].bitfield
.disp16
= 0;
6554 i
.types
[op
].bitfield
.disp64
= 0;
6555 if (flag_code
!= CODE_64BIT
)
6557 /* Must be 32 bit */
6558 i
.types
[op
].bitfield
.disp32
= 1;
6559 i
.types
[op
].bitfield
.disp32s
= 0;
6563 i
.types
[op
].bitfield
.disp32
= 0;
6564 i
.types
[op
].bitfield
.disp32s
= 1;
6567 i
.sib
.index
= i
.index_reg
->reg_num
;
6568 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6570 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
6576 if (i
.base_reg
== 0)
6579 if (!i
.disp_operands
)
6581 fake_zero_displacement
= 1;
6582 /* Instructions with VSIB byte need 32bit displacement
6583 if there is no base register. */
6584 if (i
.tm
.opcode_modifier
.vecsib
)
6585 i
.types
[op
].bitfield
.disp32
= 1;
6587 if (i
.index_reg
== 0)
6589 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6590 /* Operand is just <disp> */
6591 if (flag_code
== CODE_64BIT
)
6593 /* 64bit mode overwrites the 32bit absolute
6594 addressing by RIP relative addressing and
6595 absolute addressing is encoded by one of the
6596 redundant SIB forms. */
6597 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6598 i
.sib
.base
= NO_BASE_REGISTER
;
6599 i
.sib
.index
= NO_INDEX_REGISTER
;
6600 i
.types
[op
] = ((i
.prefix
[ADDR_PREFIX
] == 0)
6601 ? disp32s
: disp32
);
6603 else if ((flag_code
== CODE_16BIT
)
6604 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
6606 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
6607 i
.types
[op
] = disp16
;
6611 i
.rm
.regmem
= NO_BASE_REGISTER
;
6612 i
.types
[op
] = disp32
;
6615 else if (!i
.tm
.opcode_modifier
.vecsib
)
6617 /* !i.base_reg && i.index_reg */
6618 if (i
.index_reg
->reg_num
== RegEiz
6619 || i
.index_reg
->reg_num
== RegRiz
)
6620 i
.sib
.index
= NO_INDEX_REGISTER
;
6622 i
.sib
.index
= i
.index_reg
->reg_num
;
6623 i
.sib
.base
= NO_BASE_REGISTER
;
6624 i
.sib
.scale
= i
.log2_scale_factor
;
6625 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6626 /* No Vec_Disp8 if there is no base. */
6627 i
.types
[op
].bitfield
.vec_disp8
= 0;
6628 i
.types
[op
].bitfield
.disp8
= 0;
6629 i
.types
[op
].bitfield
.disp16
= 0;
6630 i
.types
[op
].bitfield
.disp64
= 0;
6631 if (flag_code
!= CODE_64BIT
)
6633 /* Must be 32 bit */
6634 i
.types
[op
].bitfield
.disp32
= 1;
6635 i
.types
[op
].bitfield
.disp32s
= 0;
6639 i
.types
[op
].bitfield
.disp32
= 0;
6640 i
.types
[op
].bitfield
.disp32s
= 1;
6642 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6646 /* RIP addressing for 64bit mode. */
6647 else if (i
.base_reg
->reg_num
== RegRip
||
6648 i
.base_reg
->reg_num
== RegEip
)
6650 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6651 i
.rm
.regmem
= NO_BASE_REGISTER
;
6652 i
.types
[op
].bitfield
.disp8
= 0;
6653 i
.types
[op
].bitfield
.disp16
= 0;
6654 i
.types
[op
].bitfield
.disp32
= 0;
6655 i
.types
[op
].bitfield
.disp32s
= 1;
6656 i
.types
[op
].bitfield
.disp64
= 0;
6657 i
.types
[op
].bitfield
.vec_disp8
= 0;
6658 i
.flags
[op
] |= Operand_PCrel
;
6659 if (! i
.disp_operands
)
6660 fake_zero_displacement
= 1;
6662 else if (i
.base_reg
->reg_type
.bitfield
.reg16
)
6664 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6665 switch (i
.base_reg
->reg_num
)
6668 if (i
.index_reg
== 0)
6670 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6671 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
6675 if (i
.index_reg
== 0)
6678 if (operand_type_check (i
.types
[op
], disp
) == 0)
6680 /* fake (%bp) into 0(%bp) */
6681 if (i
.tm
.operand_types
[op
].bitfield
.vec_disp8
)
6682 i
.types
[op
].bitfield
.vec_disp8
= 1;
6684 i
.types
[op
].bitfield
.disp8
= 1;
6685 fake_zero_displacement
= 1;
6688 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6689 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
6691 default: /* (%si) -> 4 or (%di) -> 5 */
6692 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
6694 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
6696 else /* i.base_reg and 32/64 bit mode */
6698 if (flag_code
== CODE_64BIT
6699 && operand_type_check (i
.types
[op
], disp
))
6701 i386_operand_type temp
;
6702 operand_type_set (&temp
, 0);
6703 temp
.bitfield
.disp8
= i
.types
[op
].bitfield
.disp8
;
6704 temp
.bitfield
.vec_disp8
6705 = i
.types
[op
].bitfield
.vec_disp8
;
6707 if (i
.prefix
[ADDR_PREFIX
] == 0)
6708 i
.types
[op
].bitfield
.disp32s
= 1;
6710 i
.types
[op
].bitfield
.disp32
= 1;
6713 if (!i
.tm
.opcode_modifier
.vecsib
)
6714 i
.rm
.regmem
= i
.base_reg
->reg_num
;
6715 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
6717 i
.sib
.base
= i
.base_reg
->reg_num
;
6718 /* x86-64 ignores REX prefix bit here to avoid decoder
6720 if (!(i
.base_reg
->reg_flags
& RegRex
)
6721 && (i
.base_reg
->reg_num
== EBP_REG_NUM
6722 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
6724 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
6726 fake_zero_displacement
= 1;
6727 if (i
.tm
.operand_types
[op
].bitfield
.vec_disp8
)
6728 i
.types
[op
].bitfield
.vec_disp8
= 1;
6730 i
.types
[op
].bitfield
.disp8
= 1;
6732 i
.sib
.scale
= i
.log2_scale_factor
;
6733 if (i
.index_reg
== 0)
6735 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6736 /* <disp>(%esp) becomes two byte modrm with no index
6737 register. We've already stored the code for esp
6738 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6739 Any base register besides %esp will not use the
6740 extra modrm byte. */
6741 i
.sib
.index
= NO_INDEX_REGISTER
;
6743 else if (!i
.tm
.opcode_modifier
.vecsib
)
6745 if (i
.index_reg
->reg_num
== RegEiz
6746 || i
.index_reg
->reg_num
== RegRiz
)
6747 i
.sib
.index
= NO_INDEX_REGISTER
;
6749 i
.sib
.index
= i
.index_reg
->reg_num
;
6750 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6751 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6756 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
6757 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
6761 if (!fake_zero_displacement
6765 fake_zero_displacement
= 1;
6766 if (i
.disp_encoding
== disp_encoding_8bit
)
6767 i
.types
[op
].bitfield
.disp8
= 1;
6769 i
.types
[op
].bitfield
.disp32
= 1;
6771 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
6775 if (fake_zero_displacement
)
6777 /* Fakes a zero displacement assuming that i.types[op]
6778 holds the correct displacement size. */
6781 gas_assert (i
.op
[op
].disps
== 0);
6782 exp
= &disp_expressions
[i
.disp_operands
++];
6783 i
.op
[op
].disps
= exp
;
6784 exp
->X_op
= O_constant
;
6785 exp
->X_add_number
= 0;
6786 exp
->X_add_symbol
= (symbolS
*) 0;
6787 exp
->X_op_symbol
= (symbolS
*) 0;
6795 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
6797 if (operand_type_check (i
.types
[0], imm
))
6798 i
.vex
.register_specifier
= NULL
;
6801 /* VEX.vvvv encodes one of the sources when the first
6802 operand is not an immediate. */
6803 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6804 i
.vex
.register_specifier
= i
.op
[0].regs
;
6806 i
.vex
.register_specifier
= i
.op
[1].regs
;
6809 /* Destination is a XMM register encoded in the ModRM.reg
6811 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
6812 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
6815 /* ModRM.rm and VEX.B encodes the other source. */
6816 if (!i
.mem_operands
)
6820 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6821 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
6823 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
6825 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
6829 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
6831 i
.vex
.register_specifier
= i
.op
[2].regs
;
6832 if (!i
.mem_operands
)
6835 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
6836 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
6840 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6841 (if any) based on i.tm.extension_opcode. Again, we must be
6842 careful to make sure that segment/control/debug/test/MMX
6843 registers are coded into the i.rm.reg field. */
6844 else if (i
.reg_operands
)
6847 unsigned int vex_reg
= ~0;
6849 for (op
= 0; op
< i
.operands
; op
++)
6850 if (i
.types
[op
].bitfield
.reg8
6851 || i
.types
[op
].bitfield
.reg16
6852 || i
.types
[op
].bitfield
.reg32
6853 || i
.types
[op
].bitfield
.reg64
6854 || i
.types
[op
].bitfield
.regmmx
6855 || i
.types
[op
].bitfield
.regxmm
6856 || i
.types
[op
].bitfield
.regymm
6857 || i
.types
[op
].bitfield
.regbnd
6858 || i
.types
[op
].bitfield
.regzmm
6859 || i
.types
[op
].bitfield
.regmask
6860 || i
.types
[op
].bitfield
.sreg2
6861 || i
.types
[op
].bitfield
.sreg3
6862 || i
.types
[op
].bitfield
.control
6863 || i
.types
[op
].bitfield
.debug
6864 || i
.types
[op
].bitfield
.test
)
6869 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6871 /* For instructions with VexNDS, the register-only
6872 source operand is encoded in VEX prefix. */
6873 gas_assert (mem
!= (unsigned int) ~0);
6878 gas_assert (op
< i
.operands
);
6882 /* Check register-only source operand when two source
6883 operands are swapped. */
6884 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
6885 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
6889 gas_assert (mem
== (vex_reg
+ 1)
6890 && op
< i
.operands
);
6895 gas_assert (vex_reg
< i
.operands
);
6899 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
6901 /* For instructions with VexNDD, the register destination
6902 is encoded in VEX prefix. */
6903 if (i
.mem_operands
== 0)
6905 /* There is no memory operand. */
6906 gas_assert ((op
+ 2) == i
.operands
);
6911 /* There are only 2 operands. */
6912 gas_assert (op
< 2 && i
.operands
== 2);
6917 gas_assert (op
< i
.operands
);
6919 if (vex_reg
!= (unsigned int) ~0)
6921 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
6923 if (type
->bitfield
.reg32
!= 1
6924 && type
->bitfield
.reg64
!= 1
6925 && !operand_type_equal (type
, ®xmm
)
6926 && !operand_type_equal (type
, ®ymm
)
6927 && !operand_type_equal (type
, ®zmm
)
6928 && !operand_type_equal (type
, ®mask
))
6931 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
6934 /* Don't set OP operand twice. */
6937 /* If there is an extension opcode to put here, the
6938 register number must be put into the regmem field. */
6939 if (i
.tm
.extension_opcode
!= None
)
6941 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
6942 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
6944 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
6949 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
6950 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
6952 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
6957 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6958 must set it to 3 to indicate this is a register operand
6959 in the regmem field. */
6960 if (!i
.mem_operands
)
6964 /* Fill in i.rm.reg field with extension opcode (if any). */
6965 if (i
.tm
.extension_opcode
!= None
)
6966 i
.rm
.reg
= i
.tm
.extension_opcode
;
6972 output_branch (void)
6978 relax_substateT subtype
;
6982 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
6983 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
6986 if (i
.prefix
[DATA_PREFIX
] != 0)
6992 /* Pentium4 branch hints. */
6993 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
6994 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
6999 if (i
.prefix
[REX_PREFIX
] != 0)
7005 /* BND prefixed jump. */
7006 if (i
.prefix
[BND_PREFIX
] != 0)
7008 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
7012 if (i
.prefixes
!= 0 && !intel_syntax
)
7013 as_warn (_("skipping prefixes on this instruction"));
7015 /* It's always a symbol; End frag & setup for relax.
7016 Make sure there is enough room in this frag for the largest
7017 instruction we may generate in md_convert_frag. This is 2
7018 bytes for the opcode and room for the prefix and largest
7020 frag_grow (prefix
+ 2 + 4);
7021 /* Prefix and 1 opcode byte go in fr_fix. */
7022 p
= frag_more (prefix
+ 1);
7023 if (i
.prefix
[DATA_PREFIX
] != 0)
7024 *p
++ = DATA_PREFIX_OPCODE
;
7025 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
7026 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
7027 *p
++ = i
.prefix
[SEG_PREFIX
];
7028 if (i
.prefix
[REX_PREFIX
] != 0)
7029 *p
++ = i
.prefix
[REX_PREFIX
];
7030 *p
= i
.tm
.base_opcode
;
7032 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
7033 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
7034 else if (cpu_arch_flags
.bitfield
.cpui386
)
7035 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
7037 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
7040 sym
= i
.op
[0].disps
->X_add_symbol
;
7041 off
= i
.op
[0].disps
->X_add_number
;
7043 if (i
.op
[0].disps
->X_op
!= O_constant
7044 && i
.op
[0].disps
->X_op
!= O_symbol
)
7046 /* Handle complex expressions. */
7047 sym
= make_expr_symbol (i
.op
[0].disps
);
7051 /* 1 possible extra opcode + 4 byte displacement go in var part.
7052 Pass reloc in fr_var. */
7053 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
7063 if (i
.tm
.opcode_modifier
.jumpbyte
)
7065 /* This is a loop or jecxz type instruction. */
7067 if (i
.prefix
[ADDR_PREFIX
] != 0)
7069 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
7072 /* Pentium4 branch hints. */
7073 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
7074 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
7076 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
7085 if (flag_code
== CODE_16BIT
)
7088 if (i
.prefix
[DATA_PREFIX
] != 0)
7090 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
7100 if (i
.prefix
[REX_PREFIX
] != 0)
7102 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
7106 /* BND prefixed jump. */
7107 if (i
.prefix
[BND_PREFIX
] != 0)
7109 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
7113 if (i
.prefixes
!= 0 && !intel_syntax
)
7114 as_warn (_("skipping prefixes on this instruction"));
7116 p
= frag_more (i
.tm
.opcode_length
+ size
);
7117 switch (i
.tm
.opcode_length
)
7120 *p
++ = i
.tm
.base_opcode
>> 8;
7123 *p
++ = i
.tm
.base_opcode
;
7129 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7130 i
.op
[0].disps
, 1, reloc (size
, 1, 1, i
.reloc
[0]));
7132 /* All jumps handled here are signed, but don't use a signed limit
7133 check for 32 and 16 bit jumps as we want to allow wrap around at
7134 4G and 64k respectively. */
7136 fixP
->fx_signed
= 1;
7140 output_interseg_jump (void)
7148 if (flag_code
== CODE_16BIT
)
7152 if (i
.prefix
[DATA_PREFIX
] != 0)
7158 if (i
.prefix
[REX_PREFIX
] != 0)
7168 if (i
.prefixes
!= 0 && !intel_syntax
)
7169 as_warn (_("skipping prefixes on this instruction"));
7171 /* 1 opcode; 2 segment; offset */
7172 p
= frag_more (prefix
+ 1 + 2 + size
);
7174 if (i
.prefix
[DATA_PREFIX
] != 0)
7175 *p
++ = DATA_PREFIX_OPCODE
;
7177 if (i
.prefix
[REX_PREFIX
] != 0)
7178 *p
++ = i
.prefix
[REX_PREFIX
];
7180 *p
++ = i
.tm
.base_opcode
;
7181 if (i
.op
[1].imms
->X_op
== O_constant
)
7183 offsetT n
= i
.op
[1].imms
->X_add_number
;
7186 && !fits_in_unsigned_word (n
)
7187 && !fits_in_signed_word (n
))
7189 as_bad (_("16-bit jump out of range"));
7192 md_number_to_chars (p
, n
, size
);
7195 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7196 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
7197 if (i
.op
[0].imms
->X_op
!= O_constant
)
7198 as_bad (_("can't handle non absolute segment in `%s'"),
7200 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
7206 fragS
*insn_start_frag
;
7207 offsetT insn_start_off
;
7209 /* Tie dwarf2 debug info to the address at the start of the insn.
7210 We can't do this after the insn has been output as the current
7211 frag may have been closed off. eg. by frag_var. */
7212 dwarf2_emit_insn (0);
7214 insn_start_frag
= frag_now
;
7215 insn_start_off
= frag_now_fix ();
7218 if (i
.tm
.opcode_modifier
.jump
)
7220 else if (i
.tm
.opcode_modifier
.jumpbyte
7221 || i
.tm
.opcode_modifier
.jumpdword
)
7223 else if (i
.tm
.opcode_modifier
.jumpintersegment
)
7224 output_interseg_jump ();
7227 /* Output normal instructions here. */
7231 unsigned int prefix
;
7234 && i
.tm
.base_opcode
== 0xfae
7236 && i
.imm_operands
== 1
7237 && (i
.op
[0].imms
->X_add_number
== 0xe8
7238 || i
.op
[0].imms
->X_add_number
== 0xf0
7239 || i
.op
[0].imms
->X_add_number
== 0xf8))
7241 /* Encode lfence, mfence, and sfence as
7242 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
7243 offsetT val
= 0x240483f0ULL
;
7245 md_number_to_chars (p
, val
, 5);
7249 /* Some processors fail on LOCK prefix. This options makes
7250 assembler ignore LOCK prefix and serves as a workaround. */
7251 if (omit_lock_prefix
)
7253 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
)
7255 i
.prefix
[LOCK_PREFIX
] = 0;
7258 /* Since the VEX/EVEX prefix contains the implicit prefix, we
7259 don't need the explicit prefix. */
7260 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
7262 switch (i
.tm
.opcode_length
)
7265 if (i
.tm
.base_opcode
& 0xff000000)
7267 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
7272 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
7274 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
7275 if (i
.tm
.cpu_flags
.bitfield
.cpupadlock
)
7278 if (prefix
!= REPE_PREFIX_OPCODE
7279 || (i
.prefix
[REP_PREFIX
]
7280 != REPE_PREFIX_OPCODE
))
7281 add_prefix (prefix
);
7284 add_prefix (prefix
);
7293 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7294 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
7295 R_X86_64_GOTTPOFF relocation so that linker can safely
7296 perform IE->LE optimization. */
7297 if (x86_elf_abi
== X86_64_X32_ABI
7299 && i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
7300 && i
.prefix
[REX_PREFIX
] == 0)
7301 add_prefix (REX_OPCODE
);
7304 /* The prefix bytes. */
7305 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
7307 FRAG_APPEND_1_CHAR (*q
);
7311 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
7316 /* REX byte is encoded in VEX prefix. */
7320 FRAG_APPEND_1_CHAR (*q
);
7323 /* There should be no other prefixes for instructions
7328 /* For EVEX instructions i.vrex should become 0 after
7329 build_evex_prefix. For VEX instructions upper 16 registers
7330 aren't available, so VREX should be 0. */
7333 /* Now the VEX prefix. */
7334 p
= frag_more (i
.vex
.length
);
7335 for (j
= 0; j
< i
.vex
.length
; j
++)
7336 p
[j
] = i
.vex
.bytes
[j
];
7339 /* Now the opcode; be careful about word order here! */
7340 if (i
.tm
.opcode_length
== 1)
7342 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
7346 switch (i
.tm
.opcode_length
)
7350 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
7351 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
7355 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
7365 /* Put out high byte first: can't use md_number_to_chars! */
7366 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
7367 *p
= i
.tm
.base_opcode
& 0xff;
7370 /* Now the modrm byte and sib byte (if present). */
7371 if (i
.tm
.opcode_modifier
.modrm
)
7373 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
7376 /* If i.rm.regmem == ESP (4)
7377 && i.rm.mode != (Register mode)
7379 ==> need second modrm byte. */
7380 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
7382 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.reg16
))
7383 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
7385 | i
.sib
.scale
<< 6));
7388 if (i
.disp_operands
)
7389 output_disp (insn_start_frag
, insn_start_off
);
7392 output_imm (insn_start_frag
, insn_start_off
);
7398 pi ("" /*line*/, &i
);
7400 #endif /* DEBUG386 */
7403 /* Return the size of the displacement operand N. */
7406 disp_size (unsigned int n
)
7410 /* Vec_Disp8 has to be 8bit. */
7411 if (i
.types
[n
].bitfield
.vec_disp8
)
7413 else if (i
.types
[n
].bitfield
.disp64
)
7415 else if (i
.types
[n
].bitfield
.disp8
)
7417 else if (i
.types
[n
].bitfield
.disp16
)
7422 /* Return the size of the immediate operand N. */
7425 imm_size (unsigned int n
)
7428 if (i
.types
[n
].bitfield
.imm64
)
7430 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
7432 else if (i
.types
[n
].bitfield
.imm16
)
7438 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
7443 for (n
= 0; n
< i
.operands
; n
++)
7445 if (i
.types
[n
].bitfield
.vec_disp8
7446 || operand_type_check (i
.types
[n
], disp
))
7448 if (i
.op
[n
].disps
->X_op
== O_constant
)
7450 int size
= disp_size (n
);
7451 offsetT val
= i
.op
[n
].disps
->X_add_number
;
7453 if (i
.types
[n
].bitfield
.vec_disp8
)
7455 val
= offset_in_range (val
, size
);
7456 p
= frag_more (size
);
7457 md_number_to_chars (p
, val
, size
);
7461 enum bfd_reloc_code_real reloc_type
;
7462 int size
= disp_size (n
);
7463 int sign
= i
.types
[n
].bitfield
.disp32s
;
7464 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
7467 /* We can't have 8 bit displacement here. */
7468 gas_assert (!i
.types
[n
].bitfield
.disp8
);
7470 /* The PC relative address is computed relative
7471 to the instruction boundary, so in case immediate
7472 fields follows, we need to adjust the value. */
7473 if (pcrel
&& i
.imm_operands
)
7478 for (n1
= 0; n1
< i
.operands
; n1
++)
7479 if (operand_type_check (i
.types
[n1
], imm
))
7481 /* Only one immediate is allowed for PC
7482 relative address. */
7483 gas_assert (sz
== 0);
7485 i
.op
[n
].disps
->X_add_number
-= sz
;
7487 /* We should find the immediate. */
7488 gas_assert (sz
!= 0);
7491 p
= frag_more (size
);
7492 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
7494 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
7495 && (((reloc_type
== BFD_RELOC_32
7496 || reloc_type
== BFD_RELOC_X86_64_32S
7497 || (reloc_type
== BFD_RELOC_64
7499 && (i
.op
[n
].disps
->X_op
== O_symbol
7500 || (i
.op
[n
].disps
->X_op
== O_add
7501 && ((symbol_get_value_expression
7502 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
7504 || reloc_type
== BFD_RELOC_32_PCREL
))
7508 if (insn_start_frag
== frag_now
)
7509 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
7514 add
= insn_start_frag
->fr_fix
- insn_start_off
;
7515 for (fr
= insn_start_frag
->fr_next
;
7516 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
7518 add
+= p
- frag_now
->fr_literal
;
7523 reloc_type
= BFD_RELOC_386_GOTPC
;
7524 i
.op
[n
].imms
->X_add_number
+= add
;
7526 else if (reloc_type
== BFD_RELOC_64
)
7527 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
7529 /* Don't do the adjustment for x86-64, as there
7530 the pcrel addressing is relative to the _next_
7531 insn, and that is taken care of in other code. */
7532 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
7534 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
7535 size
, i
.op
[n
].disps
, pcrel
,
7537 /* Check for "call/jmp *mem", "mov mem, %reg",
7538 "test %reg, mem" and "binop mem, %reg" where binop
7539 is one of adc, add, and, cmp, or, sbb, sub, xor
7540 instructions. Always generate R_386_GOT32X for
7541 "sym*GOT" operand in 32-bit mode. */
7542 if ((generate_relax_relocations
7545 && i
.rm
.regmem
== 5))
7547 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
7548 && ((i
.operands
== 1
7549 && i
.tm
.base_opcode
== 0xff
7550 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
7552 && (i
.tm
.base_opcode
== 0x8b
7553 || i
.tm
.base_opcode
== 0x85
7554 || (i
.tm
.base_opcode
& 0xc7) == 0x03))))
7558 fixP
->fx_tcbit
= i
.rex
!= 0;
7560 && (i
.base_reg
->reg_num
== RegRip
7561 || i
.base_reg
->reg_num
== RegEip
))
7562 fixP
->fx_tcbit2
= 1;
7565 fixP
->fx_tcbit2
= 1;
7573 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
7578 for (n
= 0; n
< i
.operands
; n
++)
7580 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7581 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
7584 if (operand_type_check (i
.types
[n
], imm
))
7586 if (i
.op
[n
].imms
->X_op
== O_constant
)
7588 int size
= imm_size (n
);
7591 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
7593 p
= frag_more (size
);
7594 md_number_to_chars (p
, val
, size
);
7598 /* Not absolute_section.
7599 Need a 32-bit fixup (don't support 8bit
7600 non-absolute imms). Try to support other
7602 enum bfd_reloc_code_real reloc_type
;
7603 int size
= imm_size (n
);
7606 if (i
.types
[n
].bitfield
.imm32s
7607 && (i
.suffix
== QWORD_MNEM_SUFFIX
7608 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
7613 p
= frag_more (size
);
7614 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
7616 /* This is tough to explain. We end up with this one if we
7617 * have operands that look like
7618 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7619 * obtain the absolute address of the GOT, and it is strongly
7620 * preferable from a performance point of view to avoid using
7621 * a runtime relocation for this. The actual sequence of
7622 * instructions often look something like:
7627 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7629 * The call and pop essentially return the absolute address
7630 * of the label .L66 and store it in %ebx. The linker itself
7631 * will ultimately change the first operand of the addl so
7632 * that %ebx points to the GOT, but to keep things simple, the
7633 * .o file must have this operand set so that it generates not
7634 * the absolute address of .L66, but the absolute address of
7635 * itself. This allows the linker itself simply treat a GOTPC
7636 * relocation as asking for a pcrel offset to the GOT to be
7637 * added in, and the addend of the relocation is stored in the
7638 * operand field for the instruction itself.
7640 * Our job here is to fix the operand so that it would add
7641 * the correct offset so that %ebx would point to itself. The
7642 * thing that is tricky is that .-.L66 will point to the
7643 * beginning of the instruction, so we need to further modify
7644 * the operand so that it will point to itself. There are
7645 * other cases where you have something like:
7647 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7649 * and here no correction would be required. Internally in
7650 * the assembler we treat operands of this form as not being
7651 * pcrel since the '.' is explicitly mentioned, and I wonder
7652 * whether it would simplify matters to do it this way. Who
7653 * knows. In earlier versions of the PIC patches, the
7654 * pcrel_adjust field was used to store the correction, but
7655 * since the expression is not pcrel, I felt it would be
7656 * confusing to do it this way. */
7658 if ((reloc_type
== BFD_RELOC_32
7659 || reloc_type
== BFD_RELOC_X86_64_32S
7660 || reloc_type
== BFD_RELOC_64
)
7662 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
7663 && (i
.op
[n
].imms
->X_op
== O_symbol
7664 || (i
.op
[n
].imms
->X_op
== O_add
7665 && ((symbol_get_value_expression
7666 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
7671 if (insn_start_frag
== frag_now
)
7672 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
7677 add
= insn_start_frag
->fr_fix
- insn_start_off
;
7678 for (fr
= insn_start_frag
->fr_next
;
7679 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
7681 add
+= p
- frag_now
->fr_literal
;
7685 reloc_type
= BFD_RELOC_386_GOTPC
;
7687 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
7689 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
7690 i
.op
[n
].imms
->X_add_number
+= add
;
7692 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7693 i
.op
[n
].imms
, 0, reloc_type
);
7699 /* x86_cons_fix_new is called via the expression parsing code when a
7700 reloc is needed. We use this hook to get the correct .got reloc. */
7701 static int cons_sign
= -1;
7704 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
7705 expressionS
*exp
, bfd_reloc_code_real_type r
)
7707 r
= reloc (len
, 0, cons_sign
, r
);
7710 if (exp
->X_op
== O_secrel
)
7712 exp
->X_op
= O_symbol
;
7713 r
= BFD_RELOC_32_SECREL
;
7717 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
7720 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7721 purpose of the `.dc.a' internal pseudo-op. */
7724 x86_address_bytes (void)
7726 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
7728 return stdoutput
->arch_info
->bits_per_address
/ 8;
7731 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7733 # define lex_got(reloc, adjust, types) NULL
7735 /* Parse operands of the form
7736 <symbol>@GOTOFF+<nnn>
7737 and similar .plt or .got references.
7739 If we find one, set up the correct relocation in RELOC and copy the
7740 input string, minus the `@GOTOFF' into a malloc'd buffer for
7741 parsing by the calling routine. Return this buffer, and if ADJUST
7742 is non-null set it to the length of the string we removed from the
7743 input line. Otherwise return NULL. */
7745 lex_got (enum bfd_reloc_code_real
*rel
,
7747 i386_operand_type
*types
)
7749 /* Some of the relocations depend on the size of what field is to
7750 be relocated. But in our callers i386_immediate and i386_displacement
7751 we don't yet know the operand size (this will be set by insn
7752 matching). Hence we record the word32 relocation here,
7753 and adjust the reloc according to the real size in reloc(). */
7754 static const struct {
7757 const enum bfd_reloc_code_real rel
[2];
7758 const i386_operand_type types64
;
7760 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7761 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
7763 OPERAND_TYPE_IMM32_64
},
7765 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
7766 BFD_RELOC_X86_64_PLTOFF64
},
7767 OPERAND_TYPE_IMM64
},
7768 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
7769 BFD_RELOC_X86_64_PLT32
},
7770 OPERAND_TYPE_IMM32_32S_DISP32
},
7771 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
7772 BFD_RELOC_X86_64_GOTPLT64
},
7773 OPERAND_TYPE_IMM64_DISP64
},
7774 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
7775 BFD_RELOC_X86_64_GOTOFF64
},
7776 OPERAND_TYPE_IMM64_DISP64
},
7777 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
7778 BFD_RELOC_X86_64_GOTPCREL
},
7779 OPERAND_TYPE_IMM32_32S_DISP32
},
7780 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
7781 BFD_RELOC_X86_64_TLSGD
},
7782 OPERAND_TYPE_IMM32_32S_DISP32
},
7783 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
7784 _dummy_first_bfd_reloc_code_real
},
7785 OPERAND_TYPE_NONE
},
7786 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
7787 BFD_RELOC_X86_64_TLSLD
},
7788 OPERAND_TYPE_IMM32_32S_DISP32
},
7789 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
7790 BFD_RELOC_X86_64_GOTTPOFF
},
7791 OPERAND_TYPE_IMM32_32S_DISP32
},
7792 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
7793 BFD_RELOC_X86_64_TPOFF32
},
7794 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7795 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
7796 _dummy_first_bfd_reloc_code_real
},
7797 OPERAND_TYPE_NONE
},
7798 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
7799 BFD_RELOC_X86_64_DTPOFF32
},
7800 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7801 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
7802 _dummy_first_bfd_reloc_code_real
},
7803 OPERAND_TYPE_NONE
},
7804 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
7805 _dummy_first_bfd_reloc_code_real
},
7806 OPERAND_TYPE_NONE
},
7807 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
7808 BFD_RELOC_X86_64_GOT32
},
7809 OPERAND_TYPE_IMM32_32S_64_DISP32
},
7810 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
7811 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
7812 OPERAND_TYPE_IMM32_32S_DISP32
},
7813 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
7814 BFD_RELOC_X86_64_TLSDESC_CALL
},
7815 OPERAND_TYPE_IMM32_32S_DISP32
},
7820 #if defined (OBJ_MAYBE_ELF)
7825 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
7826 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
7829 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
7831 int len
= gotrel
[j
].len
;
7832 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
7834 if (gotrel
[j
].rel
[object_64bit
] != 0)
7837 char *tmpbuf
, *past_reloc
;
7839 *rel
= gotrel
[j
].rel
[object_64bit
];
7843 if (flag_code
!= CODE_64BIT
)
7845 types
->bitfield
.imm32
= 1;
7846 types
->bitfield
.disp32
= 1;
7849 *types
= gotrel
[j
].types64
;
7852 if (j
!= 0 && GOT_symbol
== NULL
)
7853 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
7855 /* The length of the first part of our input line. */
7856 first
= cp
- input_line_pointer
;
7858 /* The second part goes from after the reloc token until
7859 (and including) an end_of_line char or comma. */
7860 past_reloc
= cp
+ 1 + len
;
7862 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
7864 second
= cp
+ 1 - past_reloc
;
7866 /* Allocate and copy string. The trailing NUL shouldn't
7867 be necessary, but be safe. */
7868 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
7869 memcpy (tmpbuf
, input_line_pointer
, first
);
7870 if (second
!= 0 && *past_reloc
!= ' ')
7871 /* Replace the relocation token with ' ', so that
7872 errors like foo@GOTOFF1 will be detected. */
7873 tmpbuf
[first
++] = ' ';
7875 /* Increment length by 1 if the relocation token is
7880 memcpy (tmpbuf
+ first
, past_reloc
, second
);
7881 tmpbuf
[first
+ second
] = '\0';
7885 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7886 gotrel
[j
].str
, 1 << (5 + object_64bit
));
7891 /* Might be a symbol version string. Don't as_bad here. */
7900 /* Parse operands of the form
7901 <symbol>@SECREL32+<nnn>
7903 If we find one, set up the correct relocation in RELOC and copy the
7904 input string, minus the `@SECREL32' into a malloc'd buffer for
7905 parsing by the calling routine. Return this buffer, and if ADJUST
7906 is non-null set it to the length of the string we removed from the
7907 input line. Otherwise return NULL.
7909 This function is copied from the ELF version above adjusted for PE targets. */
7912 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
7913 int *adjust ATTRIBUTE_UNUSED
,
7914 i386_operand_type
*types
)
7920 const enum bfd_reloc_code_real rel
[2];
7921 const i386_operand_type types64
;
7925 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
7926 BFD_RELOC_32_SECREL
},
7927 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7933 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
7934 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
7937 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
7939 int len
= gotrel
[j
].len
;
7941 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
7943 if (gotrel
[j
].rel
[object_64bit
] != 0)
7946 char *tmpbuf
, *past_reloc
;
7948 *rel
= gotrel
[j
].rel
[object_64bit
];
7954 if (flag_code
!= CODE_64BIT
)
7956 types
->bitfield
.imm32
= 1;
7957 types
->bitfield
.disp32
= 1;
7960 *types
= gotrel
[j
].types64
;
7963 /* The length of the first part of our input line. */
7964 first
= cp
- input_line_pointer
;
7966 /* The second part goes from after the reloc token until
7967 (and including) an end_of_line char or comma. */
7968 past_reloc
= cp
+ 1 + len
;
7970 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
7972 second
= cp
+ 1 - past_reloc
;
7974 /* Allocate and copy string. The trailing NUL shouldn't
7975 be necessary, but be safe. */
7976 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
7977 memcpy (tmpbuf
, input_line_pointer
, first
);
7978 if (second
!= 0 && *past_reloc
!= ' ')
7979 /* Replace the relocation token with ' ', so that
7980 errors like foo@SECLREL321 will be detected. */
7981 tmpbuf
[first
++] = ' ';
7982 memcpy (tmpbuf
+ first
, past_reloc
, second
);
7983 tmpbuf
[first
+ second
] = '\0';
7987 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7988 gotrel
[j
].str
, 1 << (5 + object_64bit
));
7993 /* Might be a symbol version string. Don't as_bad here. */
7999 bfd_reloc_code_real_type
8000 x86_cons (expressionS
*exp
, int size
)
8002 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
8004 intel_syntax
= -intel_syntax
;
8007 if (size
== 4 || (object_64bit
&& size
== 8))
8009 /* Handle @GOTOFF and the like in an expression. */
8011 char *gotfree_input_line
;
8014 save
= input_line_pointer
;
8015 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
8016 if (gotfree_input_line
)
8017 input_line_pointer
= gotfree_input_line
;
8021 if (gotfree_input_line
)
8023 /* expression () has merrily parsed up to the end of line,
8024 or a comma - in the wrong buffer. Transfer how far
8025 input_line_pointer has moved to the right buffer. */
8026 input_line_pointer
= (save
8027 + (input_line_pointer
- gotfree_input_line
)
8029 free (gotfree_input_line
);
8030 if (exp
->X_op
== O_constant
8031 || exp
->X_op
== O_absent
8032 || exp
->X_op
== O_illegal
8033 || exp
->X_op
== O_register
8034 || exp
->X_op
== O_big
)
8036 char c
= *input_line_pointer
;
8037 *input_line_pointer
= 0;
8038 as_bad (_("missing or invalid expression `%s'"), save
);
8039 *input_line_pointer
= c
;
8046 intel_syntax
= -intel_syntax
;
8049 i386_intel_simplify (exp
);
8055 signed_cons (int size
)
8057 if (flag_code
== CODE_64BIT
)
8065 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
8072 if (exp
.X_op
== O_symbol
)
8073 exp
.X_op
= O_secrel
;
8075 emit_expr (&exp
, 4);
8077 while (*input_line_pointer
++ == ',');
8079 input_line_pointer
--;
8080 demand_empty_rest_of_line ();
8084 /* Handle Vector operations. */
8087 check_VecOperations (char *op_string
, char *op_end
)
8089 const reg_entry
*mask
;
8094 && (op_end
== NULL
|| op_string
< op_end
))
8097 if (*op_string
== '{')
8101 /* Check broadcasts. */
8102 if (strncmp (op_string
, "1to", 3) == 0)
8107 goto duplicated_vec_op
;
8110 if (*op_string
== '8')
8111 bcst_type
= BROADCAST_1TO8
;
8112 else if (*op_string
== '4')
8113 bcst_type
= BROADCAST_1TO4
;
8114 else if (*op_string
== '2')
8115 bcst_type
= BROADCAST_1TO2
;
8116 else if (*op_string
== '1'
8117 && *(op_string
+1) == '6')
8119 bcst_type
= BROADCAST_1TO16
;
8124 as_bad (_("Unsupported broadcast: `%s'"), saved
);
8129 broadcast_op
.type
= bcst_type
;
8130 broadcast_op
.operand
= this_operand
;
8131 i
.broadcast
= &broadcast_op
;
8133 /* Check masking operation. */
8134 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
8136 /* k0 can't be used for write mask. */
8137 if (mask
->reg_num
== 0)
8139 as_bad (_("`%s' can't be used for write mask"),
8146 mask_op
.mask
= mask
;
8147 mask_op
.zeroing
= 0;
8148 mask_op
.operand
= this_operand
;
8154 goto duplicated_vec_op
;
8156 i
.mask
->mask
= mask
;
8158 /* Only "{z}" is allowed here. No need to check
8159 zeroing mask explicitly. */
8160 if (i
.mask
->operand
!= this_operand
)
8162 as_bad (_("invalid write mask `%s'"), saved
);
8169 /* Check zeroing-flag for masking operation. */
8170 else if (*op_string
== 'z')
8174 mask_op
.mask
= NULL
;
8175 mask_op
.zeroing
= 1;
8176 mask_op
.operand
= this_operand
;
8181 if (i
.mask
->zeroing
)
8184 as_bad (_("duplicated `%s'"), saved
);
8188 i
.mask
->zeroing
= 1;
8190 /* Only "{%k}" is allowed here. No need to check mask
8191 register explicitly. */
8192 if (i
.mask
->operand
!= this_operand
)
8194 as_bad (_("invalid zeroing-masking `%s'"),
8203 goto unknown_vec_op
;
8205 if (*op_string
!= '}')
8207 as_bad (_("missing `}' in `%s'"), saved
);
8214 /* We don't know this one. */
8215 as_bad (_("unknown vector operation: `%s'"), saved
);
8223 i386_immediate (char *imm_start
)
8225 char *save_input_line_pointer
;
8226 char *gotfree_input_line
;
8229 i386_operand_type types
;
8231 operand_type_set (&types
, ~0);
8233 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
8235 as_bad (_("at most %d immediate operands are allowed"),
8236 MAX_IMMEDIATE_OPERANDS
);
8240 exp
= &im_expressions
[i
.imm_operands
++];
8241 i
.op
[this_operand
].imms
= exp
;
8243 if (is_space_char (*imm_start
))
8246 save_input_line_pointer
= input_line_pointer
;
8247 input_line_pointer
= imm_start
;
8249 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
8250 if (gotfree_input_line
)
8251 input_line_pointer
= gotfree_input_line
;
8253 exp_seg
= expression (exp
);
8257 /* Handle vector operations. */
8258 if (*input_line_pointer
== '{')
8260 input_line_pointer
= check_VecOperations (input_line_pointer
,
8262 if (input_line_pointer
== NULL
)
8266 if (*input_line_pointer
)
8267 as_bad (_("junk `%s' after expression"), input_line_pointer
);
8269 input_line_pointer
= save_input_line_pointer
;
8270 if (gotfree_input_line
)
8272 free (gotfree_input_line
);
8274 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
8275 exp
->X_op
= O_illegal
;
8278 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
8282 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
8283 i386_operand_type types
, const char *imm_start
)
8285 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
8288 as_bad (_("missing or invalid immediate expression `%s'"),
8292 else if (exp
->X_op
== O_constant
)
8294 /* Size it properly later. */
8295 i
.types
[this_operand
].bitfield
.imm64
= 1;
8296 /* If not 64bit, sign extend val. */
8297 if (flag_code
!= CODE_64BIT
8298 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
8300 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
8302 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8303 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
8304 && exp_seg
!= absolute_section
8305 && exp_seg
!= text_section
8306 && exp_seg
!= data_section
8307 && exp_seg
!= bss_section
8308 && exp_seg
!= undefined_section
8309 && !bfd_is_com_section (exp_seg
))
8311 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
8315 else if (!intel_syntax
&& exp_seg
== reg_section
)
8318 as_bad (_("illegal immediate register operand %s"), imm_start
);
8323 /* This is an address. The size of the address will be
8324 determined later, depending on destination register,
8325 suffix, or the default for the section. */
8326 i
.types
[this_operand
].bitfield
.imm8
= 1;
8327 i
.types
[this_operand
].bitfield
.imm16
= 1;
8328 i
.types
[this_operand
].bitfield
.imm32
= 1;
8329 i
.types
[this_operand
].bitfield
.imm32s
= 1;
8330 i
.types
[this_operand
].bitfield
.imm64
= 1;
8331 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
8339 i386_scale (char *scale
)
8342 char *save
= input_line_pointer
;
8344 input_line_pointer
= scale
;
8345 val
= get_absolute_expression ();
8350 i
.log2_scale_factor
= 0;
8353 i
.log2_scale_factor
= 1;
8356 i
.log2_scale_factor
= 2;
8359 i
.log2_scale_factor
= 3;
8363 char sep
= *input_line_pointer
;
8365 *input_line_pointer
= '\0';
8366 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
8368 *input_line_pointer
= sep
;
8369 input_line_pointer
= save
;
8373 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
8375 as_warn (_("scale factor of %d without an index register"),
8376 1 << i
.log2_scale_factor
);
8377 i
.log2_scale_factor
= 0;
8379 scale
= input_line_pointer
;
8380 input_line_pointer
= save
;
8385 i386_displacement (char *disp_start
, char *disp_end
)
8389 char *save_input_line_pointer
;
8390 char *gotfree_input_line
;
8392 i386_operand_type bigdisp
, types
= anydisp
;
8395 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
8397 as_bad (_("at most %d displacement operands are allowed"),
8398 MAX_MEMORY_OPERANDS
);
8402 operand_type_set (&bigdisp
, 0);
8403 if ((i
.types
[this_operand
].bitfield
.jumpabsolute
)
8404 || (!current_templates
->start
->opcode_modifier
.jump
8405 && !current_templates
->start
->opcode_modifier
.jumpdword
))
8407 bigdisp
.bitfield
.disp32
= 1;
8408 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
8409 if (flag_code
== CODE_64BIT
)
8413 bigdisp
.bitfield
.disp32s
= 1;
8414 bigdisp
.bitfield
.disp64
= 1;
8417 else if ((flag_code
== CODE_16BIT
) ^ override
)
8419 bigdisp
.bitfield
.disp32
= 0;
8420 bigdisp
.bitfield
.disp16
= 1;
8425 /* For PC-relative branches, the width of the displacement
8426 is dependent upon data size, not address size. */
8427 override
= (i
.prefix
[DATA_PREFIX
] != 0);
8428 if (flag_code
== CODE_64BIT
)
8430 if (override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
8431 bigdisp
.bitfield
.disp16
= 1;
8434 bigdisp
.bitfield
.disp32
= 1;
8435 bigdisp
.bitfield
.disp32s
= 1;
8441 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
8443 : LONG_MNEM_SUFFIX
));
8444 bigdisp
.bitfield
.disp32
= 1;
8445 if ((flag_code
== CODE_16BIT
) ^ override
)
8447 bigdisp
.bitfield
.disp32
= 0;
8448 bigdisp
.bitfield
.disp16
= 1;
8452 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
8455 exp
= &disp_expressions
[i
.disp_operands
];
8456 i
.op
[this_operand
].disps
= exp
;
8458 save_input_line_pointer
= input_line_pointer
;
8459 input_line_pointer
= disp_start
;
8460 END_STRING_AND_SAVE (disp_end
);
8462 #ifndef GCC_ASM_O_HACK
8463 #define GCC_ASM_O_HACK 0
8466 END_STRING_AND_SAVE (disp_end
+ 1);
8467 if (i
.types
[this_operand
].bitfield
.baseIndex
8468 && displacement_string_end
[-1] == '+')
8470 /* This hack is to avoid a warning when using the "o"
8471 constraint within gcc asm statements.
8474 #define _set_tssldt_desc(n,addr,limit,type) \
8475 __asm__ __volatile__ ( \
8477 "movw %w1,2+%0\n\t" \
8479 "movb %b1,4+%0\n\t" \
8480 "movb %4,5+%0\n\t" \
8481 "movb $0,6+%0\n\t" \
8482 "movb %h1,7+%0\n\t" \
8484 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8486 This works great except that the output assembler ends
8487 up looking a bit weird if it turns out that there is
8488 no offset. You end up producing code that looks like:
8501 So here we provide the missing zero. */
8503 *displacement_string_end
= '0';
8506 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
8507 if (gotfree_input_line
)
8508 input_line_pointer
= gotfree_input_line
;
8510 exp_seg
= expression (exp
);
8513 if (*input_line_pointer
)
8514 as_bad (_("junk `%s' after expression"), input_line_pointer
);
8516 RESTORE_END_STRING (disp_end
+ 1);
8518 input_line_pointer
= save_input_line_pointer
;
8519 if (gotfree_input_line
)
8521 free (gotfree_input_line
);
8523 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
8524 exp
->X_op
= O_illegal
;
8527 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
8529 RESTORE_END_STRING (disp_end
);
8535 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
8536 i386_operand_type types
, const char *disp_start
)
8538 i386_operand_type bigdisp
;
8541 /* We do this to make sure that the section symbol is in
8542 the symbol table. We will ultimately change the relocation
8543 to be relative to the beginning of the section. */
8544 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
8545 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
8546 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
8548 if (exp
->X_op
!= O_symbol
)
8551 if (S_IS_LOCAL (exp
->X_add_symbol
)
8552 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
8553 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
8554 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
8555 exp
->X_op
= O_subtract
;
8556 exp
->X_op_symbol
= GOT_symbol
;
8557 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
8558 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
8559 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
8560 i
.reloc
[this_operand
] = BFD_RELOC_64
;
8562 i
.reloc
[this_operand
] = BFD_RELOC_32
;
8565 else if (exp
->X_op
== O_absent
8566 || exp
->X_op
== O_illegal
8567 || exp
->X_op
== O_big
)
8570 as_bad (_("missing or invalid displacement expression `%s'"),
8575 else if (flag_code
== CODE_64BIT
8576 && !i
.prefix
[ADDR_PREFIX
]
8577 && exp
->X_op
== O_constant
)
8579 /* Since displacement is signed extended to 64bit, don't allow
8580 disp32 and turn off disp32s if they are out of range. */
8581 i
.types
[this_operand
].bitfield
.disp32
= 0;
8582 if (!fits_in_signed_long (exp
->X_add_number
))
8584 i
.types
[this_operand
].bitfield
.disp32s
= 0;
8585 if (i
.types
[this_operand
].bitfield
.baseindex
)
8587 as_bad (_("0x%lx out range of signed 32bit displacement"),
8588 (long) exp
->X_add_number
);
8594 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8595 else if (exp
->X_op
!= O_constant
8596 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
8597 && exp_seg
!= absolute_section
8598 && exp_seg
!= text_section
8599 && exp_seg
!= data_section
8600 && exp_seg
!= bss_section
8601 && exp_seg
!= undefined_section
8602 && !bfd_is_com_section (exp_seg
))
8604 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
8609 /* Check if this is a displacement only operand. */
8610 bigdisp
= i
.types
[this_operand
];
8611 bigdisp
.bitfield
.disp8
= 0;
8612 bigdisp
.bitfield
.disp16
= 0;
8613 bigdisp
.bitfield
.disp32
= 0;
8614 bigdisp
.bitfield
.disp32s
= 0;
8615 bigdisp
.bitfield
.disp64
= 0;
8616 if (operand_type_all_zero (&bigdisp
))
8617 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
8623 /* Make sure the memory operand we've been dealt is valid.
8624 Return 1 on success, 0 on a failure. */
8627 i386_index_check (const char *operand_string
)
8629 const char *kind
= "base/index";
8630 enum flag_code addr_mode
;
8632 if (i
.prefix
[ADDR_PREFIX
])
8633 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
8636 addr_mode
= flag_code
;
8638 #if INFER_ADDR_PREFIX
8639 if (i
.mem_operands
== 0)
8641 /* Infer address prefix from the first memory operand. */
8642 const reg_entry
*addr_reg
= i
.base_reg
;
8644 if (addr_reg
== NULL
)
8645 addr_reg
= i
.index_reg
;
8649 if (addr_reg
->reg_num
== RegEip
8650 || addr_reg
->reg_num
== RegEiz
8651 || addr_reg
->reg_type
.bitfield
.reg32
)
8652 addr_mode
= CODE_32BIT
;
8653 else if (flag_code
!= CODE_64BIT
8654 && addr_reg
->reg_type
.bitfield
.reg16
)
8655 addr_mode
= CODE_16BIT
;
8657 if (addr_mode
!= flag_code
)
8659 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
8661 /* Change the size of any displacement too. At most one
8662 of Disp16 or Disp32 is set.
8663 FIXME. There doesn't seem to be any real need for
8664 separate Disp16 and Disp32 flags. The same goes for
8665 Imm16 and Imm32. Removing them would probably clean
8666 up the code quite a lot. */
8667 if (flag_code
!= CODE_64BIT
8668 && (i
.types
[this_operand
].bitfield
.disp16
8669 || i
.types
[this_operand
].bitfield
.disp32
))
8670 i
.types
[this_operand
]
8671 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
8678 if (current_templates
->start
->opcode_modifier
.isstring
8679 && !current_templates
->start
->opcode_modifier
.immext
8680 && (current_templates
->end
[-1].opcode_modifier
.isstring
8683 /* Memory operands of string insns are special in that they only allow
8684 a single register (rDI, rSI, or rBX) as their memory address. */
8685 const reg_entry
*expected_reg
;
8686 static const char *di_si
[][2] =
8692 static const char *bx
[] = { "ebx", "bx", "rbx" };
8694 kind
= "string address";
8696 if (current_templates
->start
->opcode_modifier
.repprefixok
)
8698 i386_operand_type type
= current_templates
->end
[-1].operand_types
[0];
8700 if (!type
.bitfield
.baseindex
8701 || ((!i
.mem_operands
!= !intel_syntax
)
8702 && current_templates
->end
[-1].operand_types
[1]
8703 .bitfield
.baseindex
))
8704 type
= current_templates
->end
[-1].operand_types
[1];
8705 expected_reg
= hash_find (reg_hash
,
8706 di_si
[addr_mode
][type
.bitfield
.esseg
]);
8710 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
8712 if (i
.base_reg
!= expected_reg
8714 || operand_type_check (i
.types
[this_operand
], disp
))
8716 /* The second memory operand must have the same size as
8720 && !((addr_mode
== CODE_64BIT
8721 && i
.base_reg
->reg_type
.bitfield
.reg64
)
8722 || (addr_mode
== CODE_32BIT
8723 ? i
.base_reg
->reg_type
.bitfield
.reg32
8724 : i
.base_reg
->reg_type
.bitfield
.reg16
)))
8727 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8729 intel_syntax
? '[' : '(',
8731 expected_reg
->reg_name
,
8732 intel_syntax
? ']' : ')');
8739 as_bad (_("`%s' is not a valid %s expression"),
8740 operand_string
, kind
);
8745 if (addr_mode
!= CODE_16BIT
)
8747 /* 32-bit/64-bit checks. */
8749 && (addr_mode
== CODE_64BIT
8750 ? !i
.base_reg
->reg_type
.bitfield
.reg64
8751 : !i
.base_reg
->reg_type
.bitfield
.reg32
)
8753 || (i
.base_reg
->reg_num
8754 != (addr_mode
== CODE_64BIT
? RegRip
: RegEip
))))
8756 && !i
.index_reg
->reg_type
.bitfield
.regxmm
8757 && !i
.index_reg
->reg_type
.bitfield
.regymm
8758 && !i
.index_reg
->reg_type
.bitfield
.regzmm
8759 && ((addr_mode
== CODE_64BIT
8760 ? !(i
.index_reg
->reg_type
.bitfield
.reg64
8761 || i
.index_reg
->reg_num
== RegRiz
)
8762 : !(i
.index_reg
->reg_type
.bitfield
.reg32
8763 || i
.index_reg
->reg_num
== RegEiz
))
8764 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
8767 /* bndmk, bndldx, and bndstx have special restrictions. */
8768 if (current_templates
->start
->base_opcode
== 0xf30f1b
8769 || (current_templates
->start
->base_opcode
& ~1) == 0x0f1a)
8771 /* They cannot use RIP-relative addressing. */
8772 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegRip
)
8774 as_bad (_("`%s' cannot be used here"), operand_string
);
8778 /* bndldx and bndstx ignore their scale factor. */
8779 if (current_templates
->start
->base_opcode
!= 0xf30f1b
8780 && i
.log2_scale_factor
)
8781 as_warn (_("register scaling is being ignored here"));
8786 /* 16-bit checks. */
8788 && (!i
.base_reg
->reg_type
.bitfield
.reg16
8789 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
8791 && (!i
.index_reg
->reg_type
.bitfield
.reg16
8792 || !i
.index_reg
->reg_type
.bitfield
.baseindex
8794 && i
.base_reg
->reg_num
< 6
8795 && i
.index_reg
->reg_num
>= 6
8796 && i
.log2_scale_factor
== 0))))
8803 /* Handle vector immediates. */
8806 RC_SAE_immediate (const char *imm_start
)
8808 unsigned int match_found
, j
;
8809 const char *pstr
= imm_start
;
8817 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
8819 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
8823 rc_op
.type
= RC_NamesTable
[j
].type
;
8824 rc_op
.operand
= this_operand
;
8825 i
.rounding
= &rc_op
;
8829 as_bad (_("duplicated `%s'"), imm_start
);
8832 pstr
+= RC_NamesTable
[j
].len
;
8842 as_bad (_("Missing '}': '%s'"), imm_start
);
8845 /* RC/SAE immediate string should contain nothing more. */;
8848 as_bad (_("Junk after '}': '%s'"), imm_start
);
8852 exp
= &im_expressions
[i
.imm_operands
++];
8853 i
.op
[this_operand
].imms
= exp
;
8855 exp
->X_op
= O_constant
;
8856 exp
->X_add_number
= 0;
8857 exp
->X_add_symbol
= (symbolS
*) 0;
8858 exp
->X_op_symbol
= (symbolS
*) 0;
8860 i
.types
[this_operand
].bitfield
.imm8
= 1;
8864 /* Only string instructions can have a second memory operand, so
8865 reduce current_templates to just those if it contains any. */
8867 maybe_adjust_templates (void)
8869 const insn_template
*t
;
8871 gas_assert (i
.mem_operands
== 1);
8873 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
8874 if (t
->opcode_modifier
.isstring
)
8877 if (t
< current_templates
->end
)
8879 static templates aux_templates
;
8880 bfd_boolean recheck
;
8882 aux_templates
.start
= t
;
8883 for (; t
< current_templates
->end
; ++t
)
8884 if (!t
->opcode_modifier
.isstring
)
8886 aux_templates
.end
= t
;
8888 /* Determine whether to re-check the first memory operand. */
8889 recheck
= (aux_templates
.start
!= current_templates
->start
8890 || t
!= current_templates
->end
);
8892 current_templates
= &aux_templates
;
8897 if (i
.memop1_string
!= NULL
8898 && i386_index_check (i
.memop1_string
) == 0)
8907 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8911 i386_att_operand (char *operand_string
)
8915 char *op_string
= operand_string
;
8917 if (is_space_char (*op_string
))
8920 /* We check for an absolute prefix (differentiating,
8921 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8922 if (*op_string
== ABSOLUTE_PREFIX
)
8925 if (is_space_char (*op_string
))
8927 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
8930 /* Check if operand is a register. */
8931 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
8933 i386_operand_type temp
;
8935 /* Check for a segment override by searching for ':' after a
8936 segment register. */
8938 if (is_space_char (*op_string
))
8940 if (*op_string
== ':'
8941 && (r
->reg_type
.bitfield
.sreg2
8942 || r
->reg_type
.bitfield
.sreg3
))
8947 i
.seg
[i
.mem_operands
] = &es
;
8950 i
.seg
[i
.mem_operands
] = &cs
;
8953 i
.seg
[i
.mem_operands
] = &ss
;
8956 i
.seg
[i
.mem_operands
] = &ds
;
8959 i
.seg
[i
.mem_operands
] = &fs
;
8962 i
.seg
[i
.mem_operands
] = &gs
;
8966 /* Skip the ':' and whitespace. */
8968 if (is_space_char (*op_string
))
8971 if (!is_digit_char (*op_string
)
8972 && !is_identifier_char (*op_string
)
8973 && *op_string
!= '('
8974 && *op_string
!= ABSOLUTE_PREFIX
)
8976 as_bad (_("bad memory operand `%s'"), op_string
);
8979 /* Handle case of %es:*foo. */
8980 if (*op_string
== ABSOLUTE_PREFIX
)
8983 if (is_space_char (*op_string
))
8985 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
8987 goto do_memory_reference
;
8990 /* Handle vector operations. */
8991 if (*op_string
== '{')
8993 op_string
= check_VecOperations (op_string
, NULL
);
8994 if (op_string
== NULL
)
9000 as_bad (_("junk `%s' after register"), op_string
);
9004 temp
.bitfield
.baseindex
= 0;
9005 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
9007 i
.types
[this_operand
].bitfield
.unspecified
= 0;
9008 i
.op
[this_operand
].regs
= r
;
9011 else if (*op_string
== REGISTER_PREFIX
)
9013 as_bad (_("bad register name `%s'"), op_string
);
9016 else if (*op_string
== IMMEDIATE_PREFIX
)
9019 if (i
.types
[this_operand
].bitfield
.jumpabsolute
)
9021 as_bad (_("immediate operand illegal with absolute jump"));
9024 if (!i386_immediate (op_string
))
9027 else if (RC_SAE_immediate (operand_string
))
9029 /* If it is a RC or SAE immediate, do nothing. */
9032 else if (is_digit_char (*op_string
)
9033 || is_identifier_char (*op_string
)
9034 || *op_string
== '"'
9035 || *op_string
== '(')
9037 /* This is a memory reference of some sort. */
9040 /* Start and end of displacement string expression (if found). */
9041 char *displacement_string_start
;
9042 char *displacement_string_end
;
9045 do_memory_reference
:
9046 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
9048 if ((i
.mem_operands
== 1
9049 && !current_templates
->start
->opcode_modifier
.isstring
)
9050 || i
.mem_operands
== 2)
9052 as_bad (_("too many memory references for `%s'"),
9053 current_templates
->start
->name
);
9057 /* Check for base index form. We detect the base index form by
9058 looking for an ')' at the end of the operand, searching
9059 for the '(' matching it, and finding a REGISTER_PREFIX or ','
9061 base_string
= op_string
+ strlen (op_string
);
9063 /* Handle vector operations. */
9064 vop_start
= strchr (op_string
, '{');
9065 if (vop_start
&& vop_start
< base_string
)
9067 if (check_VecOperations (vop_start
, base_string
) == NULL
)
9069 base_string
= vop_start
;
9073 if (is_space_char (*base_string
))
9076 /* If we only have a displacement, set-up for it to be parsed later. */
9077 displacement_string_start
= op_string
;
9078 displacement_string_end
= base_string
+ 1;
9080 if (*base_string
== ')')
9083 unsigned int parens_balanced
= 1;
9084 /* We've already checked that the number of left & right ()'s are
9085 equal, so this loop will not be infinite. */
9089 if (*base_string
== ')')
9091 if (*base_string
== '(')
9094 while (parens_balanced
);
9096 temp_string
= base_string
;
9098 /* Skip past '(' and whitespace. */
9100 if (is_space_char (*base_string
))
9103 if (*base_string
== ','
9104 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
9107 displacement_string_end
= temp_string
;
9109 i
.types
[this_operand
].bitfield
.baseindex
= 1;
9113 base_string
= end_op
;
9114 if (is_space_char (*base_string
))
9118 /* There may be an index reg or scale factor here. */
9119 if (*base_string
== ',')
9122 if (is_space_char (*base_string
))
9125 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
9128 base_string
= end_op
;
9129 if (is_space_char (*base_string
))
9131 if (*base_string
== ',')
9134 if (is_space_char (*base_string
))
9137 else if (*base_string
!= ')')
9139 as_bad (_("expecting `,' or `)' "
9140 "after index register in `%s'"),
9145 else if (*base_string
== REGISTER_PREFIX
)
9147 end_op
= strchr (base_string
, ',');
9150 as_bad (_("bad register name `%s'"), base_string
);
9154 /* Check for scale factor. */
9155 if (*base_string
!= ')')
9157 char *end_scale
= i386_scale (base_string
);
9162 base_string
= end_scale
;
9163 if (is_space_char (*base_string
))
9165 if (*base_string
!= ')')
9167 as_bad (_("expecting `)' "
9168 "after scale factor in `%s'"),
9173 else if (!i
.index_reg
)
9175 as_bad (_("expecting index register or scale factor "
9176 "after `,'; got '%c'"),
9181 else if (*base_string
!= ')')
9183 as_bad (_("expecting `,' or `)' "
9184 "after base register in `%s'"),
9189 else if (*base_string
== REGISTER_PREFIX
)
9191 end_op
= strchr (base_string
, ',');
9194 as_bad (_("bad register name `%s'"), base_string
);
9199 /* If there's an expression beginning the operand, parse it,
9200 assuming displacement_string_start and
9201 displacement_string_end are meaningful. */
9202 if (displacement_string_start
!= displacement_string_end
)
9204 if (!i386_displacement (displacement_string_start
,
9205 displacement_string_end
))
9209 /* Special case for (%dx) while doing input/output op. */
9211 && operand_type_equal (&i
.base_reg
->reg_type
,
9212 ®16_inoutportreg
)
9214 && i
.log2_scale_factor
== 0
9215 && i
.seg
[i
.mem_operands
] == 0
9216 && !operand_type_check (i
.types
[this_operand
], disp
))
9218 i
.types
[this_operand
] = inoutportreg
;
9222 if (i386_index_check (operand_string
) == 0)
9224 i
.types
[this_operand
].bitfield
.mem
= 1;
9225 if (i
.mem_operands
== 0)
9226 i
.memop1_string
= xstrdup (operand_string
);
9231 /* It's not a memory operand; argh! */
9232 as_bad (_("invalid char %s beginning operand %d `%s'"),
9233 output_invalid (*op_string
),
9238 return 1; /* Normal return. */
9241 /* Calculate the maximum variable size (i.e., excluding fr_fix)
9242 that an rs_machine_dependent frag may reach. */
9245 i386_frag_max_var (fragS
*frag
)
9247 /* The only relaxable frags are for jumps.
9248 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
9249 gas_assert (frag
->fr_type
== rs_machine_dependent
);
9250 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
9253 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9255 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
9257 /* STT_GNU_IFUNC symbol must go through PLT. */
9258 if ((symbol_get_bfdsym (fr_symbol
)->flags
9259 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
9262 if (!S_IS_EXTERNAL (fr_symbol
))
9263 /* Symbol may be weak or local. */
9264 return !S_IS_WEAK (fr_symbol
);
9266 /* Global symbols with non-default visibility can't be preempted. */
9267 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
9270 if (fr_var
!= NO_RELOC
)
9271 switch ((enum bfd_reloc_code_real
) fr_var
)
9273 case BFD_RELOC_386_PLT32
:
9274 case BFD_RELOC_X86_64_PLT32
:
9275 /* Symbol with PLT relocation may be preempted. */
9281 /* Global symbols with default visibility in a shared library may be
9282 preempted by another definition. */
9287 /* md_estimate_size_before_relax()
9289 Called just before relax() for rs_machine_dependent frags. The x86
9290 assembler uses these frags to handle variable size jump
9293 Any symbol that is now undefined will not become defined.
9294 Return the correct fr_subtype in the frag.
9295 Return the initial "guess for variable size of frag" to caller.
9296 The guess is actually the growth beyond the fixed part. Whatever
9297 we do to grow the fixed or variable part contributes to our
9301 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
9303 /* We've already got fragP->fr_subtype right; all we have to do is
9304 check for un-relaxable symbols. On an ELF system, we can't relax
9305 an externally visible symbol, because it may be overridden by a
9307 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
9308 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9310 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
9313 #if defined (OBJ_COFF) && defined (TE_PE)
9314 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
9315 && S_IS_WEAK (fragP
->fr_symbol
))
9319 /* Symbol is undefined in this segment, or we need to keep a
9320 reloc so that weak symbols can be overridden. */
9321 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
9322 enum bfd_reloc_code_real reloc_type
;
9323 unsigned char *opcode
;
9326 if (fragP
->fr_var
!= NO_RELOC
)
9327 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
9329 reloc_type
= BFD_RELOC_16_PCREL
;
9331 reloc_type
= BFD_RELOC_32_PCREL
;
9333 old_fr_fix
= fragP
->fr_fix
;
9334 opcode
= (unsigned char *) fragP
->fr_opcode
;
9336 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
9339 /* Make jmp (0xeb) a (d)word displacement jump. */
9341 fragP
->fr_fix
+= size
;
9342 fix_new (fragP
, old_fr_fix
, size
,
9344 fragP
->fr_offset
, 1,
9350 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
9352 /* Negate the condition, and branch past an
9353 unconditional jump. */
9356 /* Insert an unconditional jump. */
9358 /* We added two extra opcode bytes, and have a two byte
9360 fragP
->fr_fix
+= 2 + 2;
9361 fix_new (fragP
, old_fr_fix
+ 2, 2,
9363 fragP
->fr_offset
, 1,
9370 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
9375 fixP
= fix_new (fragP
, old_fr_fix
, 1,
9377 fragP
->fr_offset
, 1,
9379 fixP
->fx_signed
= 1;
9383 /* This changes the byte-displacement jump 0x7N
9384 to the (d)word-displacement jump 0x0f,0x8N. */
9385 opcode
[1] = opcode
[0] + 0x10;
9386 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
9387 /* We've added an opcode byte. */
9388 fragP
->fr_fix
+= 1 + size
;
9389 fix_new (fragP
, old_fr_fix
+ 1, size
,
9391 fragP
->fr_offset
, 1,
9396 BAD_CASE (fragP
->fr_subtype
);
9400 return fragP
->fr_fix
- old_fr_fix
;
9403 /* Guess size depending on current relax state. Initially the relax
9404 state will correspond to a short jump and we return 1, because
9405 the variable part of the frag (the branch offset) is one byte
9406 long. However, we can relax a section more than once and in that
9407 case we must either set fr_subtype back to the unrelaxed state,
9408 or return the value for the appropriate branch. */
9409 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
9412 /* Called after relax() is finished.
9414 In: Address of frag.
9415 fr_type == rs_machine_dependent.
9416 fr_subtype is what the address relaxed to.
9418 Out: Any fixSs and constants are set up.
9419 Caller will turn frag into a ".space 0". */
9422 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
9425 unsigned char *opcode
;
9426 unsigned char *where_to_put_displacement
= NULL
;
9427 offsetT target_address
;
9428 offsetT opcode_address
;
9429 unsigned int extension
= 0;
9430 offsetT displacement_from_opcode_start
;
9432 opcode
= (unsigned char *) fragP
->fr_opcode
;
9434 /* Address we want to reach in file space. */
9435 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
9437 /* Address opcode resides at in file space. */
9438 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
9440 /* Displacement from opcode start to fill into instruction. */
9441 displacement_from_opcode_start
= target_address
- opcode_address
;
9443 if ((fragP
->fr_subtype
& BIG
) == 0)
9445 /* Don't have to change opcode. */
9446 extension
= 1; /* 1 opcode + 1 displacement */
9447 where_to_put_displacement
= &opcode
[1];
9451 if (no_cond_jump_promotion
9452 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
9453 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
9454 _("long jump required"));
9456 switch (fragP
->fr_subtype
)
9458 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
9459 extension
= 4; /* 1 opcode + 4 displacement */
9461 where_to_put_displacement
= &opcode
[1];
9464 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
9465 extension
= 2; /* 1 opcode + 2 displacement */
9467 where_to_put_displacement
= &opcode
[1];
9470 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
9471 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
9472 extension
= 5; /* 2 opcode + 4 displacement */
9473 opcode
[1] = opcode
[0] + 0x10;
9474 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
9475 where_to_put_displacement
= &opcode
[2];
9478 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
9479 extension
= 3; /* 2 opcode + 2 displacement */
9480 opcode
[1] = opcode
[0] + 0x10;
9481 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
9482 where_to_put_displacement
= &opcode
[2];
9485 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
9490 where_to_put_displacement
= &opcode
[3];
9494 BAD_CASE (fragP
->fr_subtype
);
9499 /* If size if less then four we are sure that the operand fits,
9500 but if it's 4, then it could be that the displacement is larger
9502 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
9504 && ((addressT
) (displacement_from_opcode_start
- extension
9505 + ((addressT
) 1 << 31))
9506 > (((addressT
) 2 << 31) - 1)))
9508 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
9509 _("jump target out of range"));
9510 /* Make us emit 0. */
9511 displacement_from_opcode_start
= extension
;
9513 /* Now put displacement after opcode. */
9514 md_number_to_chars ((char *) where_to_put_displacement
,
9515 (valueT
) (displacement_from_opcode_start
- extension
),
9516 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
9517 fragP
->fr_fix
+= extension
;
9520 /* Apply a fixup (fixP) to segment data, once it has been determined
9521 by our caller that we have all the info we need to fix it up.
9523 Parameter valP is the pointer to the value of the bits.
9525 On the 386, immediates, displacements, and data pointers are all in
9526 the same (little-endian) format, so we don't need to care about which
9530 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
9532 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
9533 valueT value
= *valP
;
9535 #if !defined (TE_Mach)
9538 switch (fixP
->fx_r_type
)
9544 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
9547 case BFD_RELOC_X86_64_32S
:
9548 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
9551 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
9554 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
9559 if (fixP
->fx_addsy
!= NULL
9560 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
9561 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
9562 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
9563 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
9564 && !use_rela_relocations
)
9566 /* This is a hack. There should be a better way to handle this.
9567 This covers for the fact that bfd_install_relocation will
9568 subtract the current location (for partial_inplace, PC relative
9569 relocations); see more below. */
9573 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
9576 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
9578 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9581 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
9584 || (symbol_section_p (fixP
->fx_addsy
)
9585 && sym_seg
!= absolute_section
))
9586 && !generic_force_reloc (fixP
))
9588 /* Yes, we add the values in twice. This is because
9589 bfd_install_relocation subtracts them out again. I think
9590 bfd_install_relocation is broken, but I don't dare change
9592 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
9596 #if defined (OBJ_COFF) && defined (TE_PE)
9597 /* For some reason, the PE format does not store a
9598 section address offset for a PC relative symbol. */
9599 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
9600 || S_IS_WEAK (fixP
->fx_addsy
))
9601 value
+= md_pcrel_from (fixP
);
9604 #if defined (OBJ_COFF) && defined (TE_PE)
9605 if (fixP
->fx_addsy
!= NULL
9606 && S_IS_WEAK (fixP
->fx_addsy
)
9607 /* PR 16858: Do not modify weak function references. */
9608 && ! fixP
->fx_pcrel
)
9610 #if !defined (TE_PEP)
9611 /* For x86 PE weak function symbols are neither PC-relative
9612 nor do they set S_IS_FUNCTION. So the only reliable way
9613 to detect them is to check the flags of their containing
9615 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
9616 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
9620 value
-= S_GET_VALUE (fixP
->fx_addsy
);
9624 /* Fix a few things - the dynamic linker expects certain values here,
9625 and we must not disappoint it. */
9626 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9627 if (IS_ELF
&& fixP
->fx_addsy
)
9628 switch (fixP
->fx_r_type
)
9630 case BFD_RELOC_386_PLT32
:
9631 case BFD_RELOC_X86_64_PLT32
:
9632 /* Make the jump instruction point to the address of the operand. At
9633 runtime we merely add the offset to the actual PLT entry. */
9637 case BFD_RELOC_386_TLS_GD
:
9638 case BFD_RELOC_386_TLS_LDM
:
9639 case BFD_RELOC_386_TLS_IE_32
:
9640 case BFD_RELOC_386_TLS_IE
:
9641 case BFD_RELOC_386_TLS_GOTIE
:
9642 case BFD_RELOC_386_TLS_GOTDESC
:
9643 case BFD_RELOC_X86_64_TLSGD
:
9644 case BFD_RELOC_X86_64_TLSLD
:
9645 case BFD_RELOC_X86_64_GOTTPOFF
:
9646 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
9647 value
= 0; /* Fully resolved at runtime. No addend. */
9649 case BFD_RELOC_386_TLS_LE
:
9650 case BFD_RELOC_386_TLS_LDO_32
:
9651 case BFD_RELOC_386_TLS_LE_32
:
9652 case BFD_RELOC_X86_64_DTPOFF32
:
9653 case BFD_RELOC_X86_64_DTPOFF64
:
9654 case BFD_RELOC_X86_64_TPOFF32
:
9655 case BFD_RELOC_X86_64_TPOFF64
:
9656 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
9659 case BFD_RELOC_386_TLS_DESC_CALL
:
9660 case BFD_RELOC_X86_64_TLSDESC_CALL
:
9661 value
= 0; /* Fully resolved at runtime. No addend. */
9662 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
9666 case BFD_RELOC_VTABLE_INHERIT
:
9667 case BFD_RELOC_VTABLE_ENTRY
:
9674 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9676 #endif /* !defined (TE_Mach) */
9678 /* Are we finished with this relocation now? */
9679 if (fixP
->fx_addsy
== NULL
)
9681 #if defined (OBJ_COFF) && defined (TE_PE)
9682 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
9685 /* Remember value for tc_gen_reloc. */
9686 fixP
->fx_addnumber
= value
;
9687 /* Clear out the frag for now. */
9691 else if (use_rela_relocations
)
9693 fixP
->fx_no_overflow
= 1;
9694 /* Remember value for tc_gen_reloc. */
9695 fixP
->fx_addnumber
= value
;
9699 md_number_to_chars (p
, value
, fixP
->fx_size
);
9703 md_atof (int type
, char *litP
, int *sizeP
)
9705 /* This outputs the LITTLENUMs in REVERSE order;
9706 in accord with the bigendian 386. */
9707 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
9710 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
9713 output_invalid (int c
)
9716 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
9719 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
9720 "(0x%x)", (unsigned char) c
);
9721 return output_invalid_buf
;
9724 /* REG_STRING starts *before* REGISTER_PREFIX. */
9726 static const reg_entry
*
9727 parse_real_register (char *reg_string
, char **end_op
)
9729 char *s
= reg_string
;
9731 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
9734 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9735 if (*s
== REGISTER_PREFIX
)
9738 if (is_space_char (*s
))
9742 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
9744 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
9745 return (const reg_entry
*) NULL
;
9749 /* For naked regs, make sure that we are not dealing with an identifier.
9750 This prevents confusing an identifier like `eax_var' with register
9752 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
9753 return (const reg_entry
*) NULL
;
9757 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
9759 /* Handle floating point regs, allowing spaces in the (i) part. */
9760 if (r
== i386_regtab
/* %st is first entry of table */)
9762 if (is_space_char (*s
))
9767 if (is_space_char (*s
))
9769 if (*s
>= '0' && *s
<= '7')
9773 if (is_space_char (*s
))
9778 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
9783 /* We have "%st(" then garbage. */
9784 return (const reg_entry
*) NULL
;
9788 if (r
== NULL
|| allow_pseudo_reg
)
9791 if (operand_type_all_zero (&r
->reg_type
))
9792 return (const reg_entry
*) NULL
;
9794 if ((r
->reg_type
.bitfield
.reg32
9795 || r
->reg_type
.bitfield
.sreg3
9796 || r
->reg_type
.bitfield
.control
9797 || r
->reg_type
.bitfield
.debug
9798 || r
->reg_type
.bitfield
.test
)
9799 && !cpu_arch_flags
.bitfield
.cpui386
)
9800 return (const reg_entry
*) NULL
;
9802 if (r
->reg_type
.bitfield
.floatreg
9803 && !cpu_arch_flags
.bitfield
.cpu8087
9804 && !cpu_arch_flags
.bitfield
.cpu287
9805 && !cpu_arch_flags
.bitfield
.cpu387
)
9806 return (const reg_entry
*) NULL
;
9808 if (r
->reg_type
.bitfield
.regmmx
&& !cpu_arch_flags
.bitfield
.cpuregmmx
)
9809 return (const reg_entry
*) NULL
;
9811 if (r
->reg_type
.bitfield
.regxmm
&& !cpu_arch_flags
.bitfield
.cpuregxmm
)
9812 return (const reg_entry
*) NULL
;
9814 if (r
->reg_type
.bitfield
.regymm
&& !cpu_arch_flags
.bitfield
.cpuregymm
)
9815 return (const reg_entry
*) NULL
;
9817 if (r
->reg_type
.bitfield
.regzmm
&& !cpu_arch_flags
.bitfield
.cpuregzmm
)
9818 return (const reg_entry
*) NULL
;
9820 if (r
->reg_type
.bitfield
.regmask
9821 && !cpu_arch_flags
.bitfield
.cpuregmask
)
9822 return (const reg_entry
*) NULL
;
9824 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9825 if (!allow_index_reg
9826 && (r
->reg_num
== RegEiz
|| r
->reg_num
== RegRiz
))
9827 return (const reg_entry
*) NULL
;
9829 /* Upper 16 vector register is only available with VREX in 64bit
9831 if ((r
->reg_flags
& RegVRex
))
9833 if (i
.vec_encoding
== vex_encoding_default
)
9834 i
.vec_encoding
= vex_encoding_evex
;
9836 if (!cpu_arch_flags
.bitfield
.cpuvrex
9837 || i
.vec_encoding
!= vex_encoding_evex
9838 || flag_code
!= CODE_64BIT
)
9839 return (const reg_entry
*) NULL
;
9842 if (((r
->reg_flags
& (RegRex64
| RegRex
))
9843 || r
->reg_type
.bitfield
.reg64
)
9844 && (!cpu_arch_flags
.bitfield
.cpulm
9845 || !operand_type_equal (&r
->reg_type
, &control
))
9846 && flag_code
!= CODE_64BIT
)
9847 return (const reg_entry
*) NULL
;
9849 if (r
->reg_type
.bitfield
.sreg3
&& r
->reg_num
== RegFlat
&& !intel_syntax
)
9850 return (const reg_entry
*) NULL
;
9855 /* REG_STRING starts *before* REGISTER_PREFIX. */
9857 static const reg_entry
*
9858 parse_register (char *reg_string
, char **end_op
)
9862 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
9863 r
= parse_real_register (reg_string
, end_op
);
9868 char *save
= input_line_pointer
;
9872 input_line_pointer
= reg_string
;
9873 c
= get_symbol_name (®_string
);
9874 symbolP
= symbol_find (reg_string
);
9875 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
9877 const expressionS
*e
= symbol_get_value_expression (symbolP
);
9879 know (e
->X_op
== O_register
);
9880 know (e
->X_add_number
>= 0
9881 && (valueT
) e
->X_add_number
< i386_regtab_size
);
9882 r
= i386_regtab
+ e
->X_add_number
;
9883 if ((r
->reg_flags
& RegVRex
))
9884 i
.vec_encoding
= vex_encoding_evex
;
9885 *end_op
= input_line_pointer
;
9887 *input_line_pointer
= c
;
9888 input_line_pointer
= save
;
9894 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
9897 char *end
= input_line_pointer
;
9900 r
= parse_register (name
, &input_line_pointer
);
9901 if (r
&& end
<= input_line_pointer
)
9903 *nextcharP
= *input_line_pointer
;
9904 *input_line_pointer
= 0;
9905 e
->X_op
= O_register
;
9906 e
->X_add_number
= r
- i386_regtab
;
9909 input_line_pointer
= end
;
9911 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
9915 md_operand (expressionS
*e
)
9920 switch (*input_line_pointer
)
9922 case REGISTER_PREFIX
:
9923 r
= parse_real_register (input_line_pointer
, &end
);
9926 e
->X_op
= O_register
;
9927 e
->X_add_number
= r
- i386_regtab
;
9928 input_line_pointer
= end
;
9933 gas_assert (intel_syntax
);
9934 end
= input_line_pointer
++;
9936 if (*input_line_pointer
== ']')
9938 ++input_line_pointer
;
9939 e
->X_op_symbol
= make_expr_symbol (e
);
9940 e
->X_add_symbol
= NULL
;
9941 e
->X_add_number
= 0;
9947 input_line_pointer
= end
;
9954 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9955 const char *md_shortopts
= "kVQ:sqn";
9957 const char *md_shortopts
= "qn";
9960 #define OPTION_32 (OPTION_MD_BASE + 0)
9961 #define OPTION_64 (OPTION_MD_BASE + 1)
9962 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9963 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9964 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9965 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9966 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9967 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9968 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9969 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9970 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9971 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9972 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9973 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9974 #define OPTION_X32 (OPTION_MD_BASE + 14)
9975 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9976 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9977 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9978 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
9979 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
9980 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
9981 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
9982 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
9983 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
9984 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
9985 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 25)
9987 struct option md_longopts
[] =
9989 {"32", no_argument
, NULL
, OPTION_32
},
9990 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9991 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9992 {"64", no_argument
, NULL
, OPTION_64
},
9994 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9995 {"x32", no_argument
, NULL
, OPTION_X32
},
9996 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
9998 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
9999 {"march", required_argument
, NULL
, OPTION_MARCH
},
10000 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
10001 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
10002 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
10003 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
10004 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
10005 {"mold-gcc", no_argument
, NULL
, OPTION_MOLD_GCC
},
10006 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
10007 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
10008 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
10009 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
10010 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
10011 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
10012 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
10013 # if defined (TE_PE) || defined (TE_PEP)
10014 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
10016 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
10017 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
10018 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
10019 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
10020 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
10021 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
10022 {NULL
, no_argument
, NULL
, 0}
10024 size_t md_longopts_size
= sizeof (md_longopts
);
10027 md_parse_option (int c
, const char *arg
)
10030 char *arch
, *next
, *saved
;
10035 optimize_align_code
= 0;
10039 quiet_warnings
= 1;
10042 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10043 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
10044 should be emitted or not. FIXME: Not implemented. */
10048 /* -V: SVR4 argument to print version ID. */
10050 print_version_id ();
10053 /* -k: Ignore for FreeBSD compatibility. */
10058 /* -s: On i386 Solaris, this tells the native assembler to use
10059 .stab instead of .stab.excl. We always use .stab anyhow. */
10062 case OPTION_MSHARED
:
10066 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10067 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10070 const char **list
, **l
;
10072 list
= bfd_target_list ();
10073 for (l
= list
; *l
!= NULL
; l
++)
10074 if (CONST_STRNEQ (*l
, "elf64-x86-64")
10075 || strcmp (*l
, "coff-x86-64") == 0
10076 || strcmp (*l
, "pe-x86-64") == 0
10077 || strcmp (*l
, "pei-x86-64") == 0
10078 || strcmp (*l
, "mach-o-x86-64") == 0)
10080 default_arch
= "x86_64";
10084 as_fatal (_("no compiled in support for x86_64"));
10090 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10094 const char **list
, **l
;
10096 list
= bfd_target_list ();
10097 for (l
= list
; *l
!= NULL
; l
++)
10098 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
10100 default_arch
= "x86_64:32";
10104 as_fatal (_("no compiled in support for 32bit x86_64"));
10108 as_fatal (_("32bit x86_64 is only supported for ELF"));
10113 default_arch
= "i386";
10116 case OPTION_DIVIDE
:
10117 #ifdef SVR4_COMMENT_CHARS
10122 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
10124 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
10128 i386_comment_chars
= n
;
10134 saved
= xstrdup (arg
);
10136 /* Allow -march=+nosse. */
10142 as_fatal (_("invalid -march= option: `%s'"), arg
);
10143 next
= strchr (arch
, '+');
10146 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
10148 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
10151 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
10154 cpu_arch_name
= cpu_arch
[j
].name
;
10155 cpu_sub_arch_name
= NULL
;
10156 cpu_arch_flags
= cpu_arch
[j
].flags
;
10157 cpu_arch_isa
= cpu_arch
[j
].type
;
10158 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
10159 if (!cpu_arch_tune_set
)
10161 cpu_arch_tune
= cpu_arch_isa
;
10162 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
10166 else if (*cpu_arch
[j
].name
== '.'
10167 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
10169 /* ISA extension. */
10170 i386_cpu_flags flags
;
10172 flags
= cpu_flags_or (cpu_arch_flags
,
10173 cpu_arch
[j
].flags
);
10175 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
10177 if (cpu_sub_arch_name
)
10179 char *name
= cpu_sub_arch_name
;
10180 cpu_sub_arch_name
= concat (name
,
10182 (const char *) NULL
);
10186 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
10187 cpu_arch_flags
= flags
;
10188 cpu_arch_isa_flags
= flags
;
10194 if (j
>= ARRAY_SIZE (cpu_arch
))
10196 /* Disable an ISA extension. */
10197 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
10198 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
10200 i386_cpu_flags flags
;
10202 flags
= cpu_flags_and_not (cpu_arch_flags
,
10203 cpu_noarch
[j
].flags
);
10204 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
10206 if (cpu_sub_arch_name
)
10208 char *name
= cpu_sub_arch_name
;
10209 cpu_sub_arch_name
= concat (arch
,
10210 (const char *) NULL
);
10214 cpu_sub_arch_name
= xstrdup (arch
);
10215 cpu_arch_flags
= flags
;
10216 cpu_arch_isa_flags
= flags
;
10221 if (j
>= ARRAY_SIZE (cpu_noarch
))
10222 j
= ARRAY_SIZE (cpu_arch
);
10225 if (j
>= ARRAY_SIZE (cpu_arch
))
10226 as_fatal (_("invalid -march= option: `%s'"), arg
);
10230 while (next
!= NULL
);
10236 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
10237 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
10239 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
10241 cpu_arch_tune_set
= 1;
10242 cpu_arch_tune
= cpu_arch
[j
].type
;
10243 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
10247 if (j
>= ARRAY_SIZE (cpu_arch
))
10248 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
10251 case OPTION_MMNEMONIC
:
10252 if (strcasecmp (arg
, "att") == 0)
10253 intel_mnemonic
= 0;
10254 else if (strcasecmp (arg
, "intel") == 0)
10255 intel_mnemonic
= 1;
10257 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
10260 case OPTION_MSYNTAX
:
10261 if (strcasecmp (arg
, "att") == 0)
10263 else if (strcasecmp (arg
, "intel") == 0)
10266 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
10269 case OPTION_MINDEX_REG
:
10270 allow_index_reg
= 1;
10273 case OPTION_MNAKED_REG
:
10274 allow_naked_reg
= 1;
10277 case OPTION_MOLD_GCC
:
10281 case OPTION_MSSE2AVX
:
10285 case OPTION_MSSE_CHECK
:
10286 if (strcasecmp (arg
, "error") == 0)
10287 sse_check
= check_error
;
10288 else if (strcasecmp (arg
, "warning") == 0)
10289 sse_check
= check_warning
;
10290 else if (strcasecmp (arg
, "none") == 0)
10291 sse_check
= check_none
;
10293 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
10296 case OPTION_MOPERAND_CHECK
:
10297 if (strcasecmp (arg
, "error") == 0)
10298 operand_check
= check_error
;
10299 else if (strcasecmp (arg
, "warning") == 0)
10300 operand_check
= check_warning
;
10301 else if (strcasecmp (arg
, "none") == 0)
10302 operand_check
= check_none
;
10304 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
10307 case OPTION_MAVXSCALAR
:
10308 if (strcasecmp (arg
, "128") == 0)
10309 avxscalar
= vex128
;
10310 else if (strcasecmp (arg
, "256") == 0)
10311 avxscalar
= vex256
;
10313 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
10316 case OPTION_MADD_BND_PREFIX
:
10317 add_bnd_prefix
= 1;
10320 case OPTION_MEVEXLIG
:
10321 if (strcmp (arg
, "128") == 0)
10322 evexlig
= evexl128
;
10323 else if (strcmp (arg
, "256") == 0)
10324 evexlig
= evexl256
;
10325 else if (strcmp (arg
, "512") == 0)
10326 evexlig
= evexl512
;
10328 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
10331 case OPTION_MEVEXRCIG
:
10332 if (strcmp (arg
, "rne") == 0)
10334 else if (strcmp (arg
, "rd") == 0)
10336 else if (strcmp (arg
, "ru") == 0)
10338 else if (strcmp (arg
, "rz") == 0)
10341 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
10344 case OPTION_MEVEXWIG
:
10345 if (strcmp (arg
, "0") == 0)
10347 else if (strcmp (arg
, "1") == 0)
10350 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
10353 # if defined (TE_PE) || defined (TE_PEP)
10354 case OPTION_MBIG_OBJ
:
10359 case OPTION_MOMIT_LOCK_PREFIX
:
10360 if (strcasecmp (arg
, "yes") == 0)
10361 omit_lock_prefix
= 1;
10362 else if (strcasecmp (arg
, "no") == 0)
10363 omit_lock_prefix
= 0;
10365 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
10368 case OPTION_MFENCE_AS_LOCK_ADD
:
10369 if (strcasecmp (arg
, "yes") == 0)
10371 else if (strcasecmp (arg
, "no") == 0)
10374 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
10377 case OPTION_MRELAX_RELOCATIONS
:
10378 if (strcasecmp (arg
, "yes") == 0)
10379 generate_relax_relocations
= 1;
10380 else if (strcasecmp (arg
, "no") == 0)
10381 generate_relax_relocations
= 0;
10383 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
10386 case OPTION_MAMD64
:
10390 case OPTION_MINTEL64
:
10400 #define MESSAGE_TEMPLATE \
10404 output_message (FILE *stream
, char *p
, char *message
, char *start
,
10405 int *left_p
, const char *name
, int len
)
10407 int size
= sizeof (MESSAGE_TEMPLATE
);
10408 int left
= *left_p
;
10410 /* Reserve 2 spaces for ", " or ",\0" */
10413 /* Check if there is any room. */
10421 p
= mempcpy (p
, name
, len
);
10425 /* Output the current message now and start a new one. */
10428 fprintf (stream
, "%s\n", message
);
10430 left
= size
- (start
- message
) - len
- 2;
10432 gas_assert (left
>= 0);
10434 p
= mempcpy (p
, name
, len
);
10442 show_arch (FILE *stream
, int ext
, int check
)
10444 static char message
[] = MESSAGE_TEMPLATE
;
10445 char *start
= message
+ 27;
10447 int size
= sizeof (MESSAGE_TEMPLATE
);
10454 left
= size
- (start
- message
);
10455 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
10457 /* Should it be skipped? */
10458 if (cpu_arch
[j
].skip
)
10461 name
= cpu_arch
[j
].name
;
10462 len
= cpu_arch
[j
].len
;
10465 /* It is an extension. Skip if we aren't asked to show it. */
10476 /* It is an processor. Skip if we show only extension. */
10479 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
10481 /* It is an impossible processor - skip. */
10485 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
10488 /* Display disabled extensions. */
10490 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
10492 name
= cpu_noarch
[j
].name
;
10493 len
= cpu_noarch
[j
].len
;
10494 p
= output_message (stream
, p
, message
, start
, &left
, name
,
10499 fprintf (stream
, "%s\n", message
);
10503 md_show_usage (FILE *stream
)
10505 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10506 fprintf (stream
, _("\
10508 -V print assembler version number\n\
10511 fprintf (stream
, _("\
10512 -n Do not optimize code alignment\n\
10513 -q quieten some warnings\n"));
10514 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10515 fprintf (stream
, _("\
10518 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10519 || defined (TE_PE) || defined (TE_PEP))
10520 fprintf (stream
, _("\
10521 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
10523 #ifdef SVR4_COMMENT_CHARS
10524 fprintf (stream
, _("\
10525 --divide do not treat `/' as a comment character\n"));
10527 fprintf (stream
, _("\
10528 --divide ignored\n"));
10530 fprintf (stream
, _("\
10531 -march=CPU[,+EXTENSION...]\n\
10532 generate code for CPU and EXTENSION, CPU is one of:\n"));
10533 show_arch (stream
, 0, 1);
10534 fprintf (stream
, _("\
10535 EXTENSION is combination of:\n"));
10536 show_arch (stream
, 1, 0);
10537 fprintf (stream
, _("\
10538 -mtune=CPU optimize for CPU, CPU is one of:\n"));
10539 show_arch (stream
, 0, 0);
10540 fprintf (stream
, _("\
10541 -msse2avx encode SSE instructions with VEX prefix\n"));
10542 fprintf (stream
, _("\
10543 -msse-check=[none|error|warning]\n\
10544 check SSE instructions\n"));
10545 fprintf (stream
, _("\
10546 -moperand-check=[none|error|warning]\n\
10547 check operand combinations for validity\n"));
10548 fprintf (stream
, _("\
10549 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
10551 fprintf (stream
, _("\
10552 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
10554 fprintf (stream
, _("\
10555 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
10556 for EVEX.W bit ignored instructions\n"));
10557 fprintf (stream
, _("\
10558 -mevexrcig=[rne|rd|ru|rz]\n\
10559 encode EVEX instructions with specific EVEX.RC value\n\
10560 for SAE-only ignored instructions\n"));
10561 fprintf (stream
, _("\
10562 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
10563 fprintf (stream
, _("\
10564 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
10565 fprintf (stream
, _("\
10566 -mindex-reg support pseudo index registers\n"));
10567 fprintf (stream
, _("\
10568 -mnaked-reg don't require `%%' prefix for registers\n"));
10569 fprintf (stream
, _("\
10570 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
10571 fprintf (stream
, _("\
10572 -madd-bnd-prefix add BND prefix for all valid branches\n"));
10573 fprintf (stream
, _("\
10574 -mshared disable branch optimization for shared code\n"));
10575 # if defined (TE_PE) || defined (TE_PEP)
10576 fprintf (stream
, _("\
10577 -mbig-obj generate big object files\n"));
10579 fprintf (stream
, _("\
10580 -momit-lock-prefix=[no|yes]\n\
10581 strip all lock prefixes\n"));
10582 fprintf (stream
, _("\
10583 -mfence-as-lock-add=[no|yes]\n\
10584 encode lfence, mfence and sfence as\n\
10585 lock addl $0x0, (%%{re}sp)\n"));
10586 fprintf (stream
, _("\
10587 -mrelax-relocations=[no|yes]\n\
10588 generate relax relocations\n"));
10589 fprintf (stream
, _("\
10590 -mamd64 accept only AMD64 ISA\n"));
10591 fprintf (stream
, _("\
10592 -mintel64 accept only Intel64 ISA\n"));
10595 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
10596 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10597 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10599 /* Pick the target format to use. */
10602 i386_target_format (void)
10604 if (!strncmp (default_arch
, "x86_64", 6))
10606 update_code_flag (CODE_64BIT
, 1);
10607 if (default_arch
[6] == '\0')
10608 x86_elf_abi
= X86_64_ABI
;
10610 x86_elf_abi
= X86_64_X32_ABI
;
10612 else if (!strcmp (default_arch
, "i386"))
10613 update_code_flag (CODE_32BIT
, 1);
10614 else if (!strcmp (default_arch
, "iamcu"))
10616 update_code_flag (CODE_32BIT
, 1);
10617 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
10619 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
10620 cpu_arch_name
= "iamcu";
10621 cpu_sub_arch_name
= NULL
;
10622 cpu_arch_flags
= iamcu_flags
;
10623 cpu_arch_isa
= PROCESSOR_IAMCU
;
10624 cpu_arch_isa_flags
= iamcu_flags
;
10625 if (!cpu_arch_tune_set
)
10627 cpu_arch_tune
= cpu_arch_isa
;
10628 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
10631 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
10632 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
10636 as_fatal (_("unknown architecture"));
10638 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
10639 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
10640 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
10641 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
10643 switch (OUTPUT_FLAVOR
)
10645 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
10646 case bfd_target_aout_flavour
:
10647 return AOUT_TARGET_FORMAT
;
10649 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
10650 # if defined (TE_PE) || defined (TE_PEP)
10651 case bfd_target_coff_flavour
:
10652 if (flag_code
== CODE_64BIT
)
10653 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
10656 # elif defined (TE_GO32)
10657 case bfd_target_coff_flavour
:
10658 return "coff-go32";
10660 case bfd_target_coff_flavour
:
10661 return "coff-i386";
10664 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10665 case bfd_target_elf_flavour
:
10667 const char *format
;
10669 switch (x86_elf_abi
)
10672 format
= ELF_TARGET_FORMAT
;
10675 use_rela_relocations
= 1;
10677 format
= ELF_TARGET_FORMAT64
;
10679 case X86_64_X32_ABI
:
10680 use_rela_relocations
= 1;
10682 disallow_64bit_reloc
= 1;
10683 format
= ELF_TARGET_FORMAT32
;
10686 if (cpu_arch_isa
== PROCESSOR_L1OM
)
10688 if (x86_elf_abi
!= X86_64_ABI
)
10689 as_fatal (_("Intel L1OM is 64bit only"));
10690 return ELF_TARGET_L1OM_FORMAT
;
10692 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
10694 if (x86_elf_abi
!= X86_64_ABI
)
10695 as_fatal (_("Intel K1OM is 64bit only"));
10696 return ELF_TARGET_K1OM_FORMAT
;
10698 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
10700 if (x86_elf_abi
!= I386_ABI
)
10701 as_fatal (_("Intel MCU is 32bit only"));
10702 return ELF_TARGET_IAMCU_FORMAT
;
10708 #if defined (OBJ_MACH_O)
10709 case bfd_target_mach_o_flavour
:
10710 if (flag_code
== CODE_64BIT
)
10712 use_rela_relocations
= 1;
10714 return "mach-o-x86-64";
10717 return "mach-o-i386";
10725 #endif /* OBJ_MAYBE_ more than one */
10728 md_undefined_symbol (char *name
)
10730 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
10731 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
10732 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
10733 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
10737 if (symbol_find (name
))
10738 as_bad (_("GOT already in symbol table"));
10739 GOT_symbol
= symbol_new (name
, undefined_section
,
10740 (valueT
) 0, &zero_address_frag
);
10747 /* Round up a section size to the appropriate boundary. */
10750 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
10752 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10753 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
10755 /* For a.out, force the section size to be aligned. If we don't do
10756 this, BFD will align it for us, but it will not write out the
10757 final bytes of the section. This may be a bug in BFD, but it is
10758 easier to fix it here since that is how the other a.out targets
10762 align
= bfd_get_section_alignment (stdoutput
, segment
);
10763 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
10770 /* On the i386, PC-relative offsets are relative to the start of the
10771 next instruction. That is, the address of the offset, plus its
10772 size, since the offset is always the last part of the insn. */
10775 md_pcrel_from (fixS
*fixP
)
10777 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
10783 s_bss (int ignore ATTRIBUTE_UNUSED
)
10787 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10789 obj_elf_section_change_hook ();
10791 temp
= get_absolute_expression ();
10792 subseg_set (bss_section
, (subsegT
) temp
);
10793 demand_empty_rest_of_line ();
10799 i386_validate_fix (fixS
*fixp
)
10801 if (fixp
->fx_subsy
)
10803 if (fixp
->fx_subsy
== GOT_symbol
)
10805 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
10809 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10810 if (fixp
->fx_tcbit2
)
10811 fixp
->fx_r_type
= (fixp
->fx_tcbit
10812 ? BFD_RELOC_X86_64_REX_GOTPCRELX
10813 : BFD_RELOC_X86_64_GOTPCRELX
);
10816 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
10821 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
10823 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
10825 fixp
->fx_subsy
= 0;
10828 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10829 else if (!object_64bit
)
10831 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
10832 && fixp
->fx_tcbit2
)
10833 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
10839 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
10842 bfd_reloc_code_real_type code
;
10844 switch (fixp
->fx_r_type
)
10846 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10847 case BFD_RELOC_SIZE32
:
10848 case BFD_RELOC_SIZE64
:
10849 if (S_IS_DEFINED (fixp
->fx_addsy
)
10850 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
10852 /* Resolve size relocation against local symbol to size of
10853 the symbol plus addend. */
10854 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
10855 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
10856 && !fits_in_unsigned_long (value
))
10857 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10858 _("symbol size computation overflow"));
10859 fixp
->fx_addsy
= NULL
;
10860 fixp
->fx_subsy
= NULL
;
10861 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
10865 /* Fall through. */
10867 case BFD_RELOC_X86_64_PLT32
:
10868 case BFD_RELOC_X86_64_GOT32
:
10869 case BFD_RELOC_X86_64_GOTPCREL
:
10870 case BFD_RELOC_X86_64_GOTPCRELX
:
10871 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
10872 case BFD_RELOC_386_PLT32
:
10873 case BFD_RELOC_386_GOT32
:
10874 case BFD_RELOC_386_GOT32X
:
10875 case BFD_RELOC_386_GOTOFF
:
10876 case BFD_RELOC_386_GOTPC
:
10877 case BFD_RELOC_386_TLS_GD
:
10878 case BFD_RELOC_386_TLS_LDM
:
10879 case BFD_RELOC_386_TLS_LDO_32
:
10880 case BFD_RELOC_386_TLS_IE_32
:
10881 case BFD_RELOC_386_TLS_IE
:
10882 case BFD_RELOC_386_TLS_GOTIE
:
10883 case BFD_RELOC_386_TLS_LE_32
:
10884 case BFD_RELOC_386_TLS_LE
:
10885 case BFD_RELOC_386_TLS_GOTDESC
:
10886 case BFD_RELOC_386_TLS_DESC_CALL
:
10887 case BFD_RELOC_X86_64_TLSGD
:
10888 case BFD_RELOC_X86_64_TLSLD
:
10889 case BFD_RELOC_X86_64_DTPOFF32
:
10890 case BFD_RELOC_X86_64_DTPOFF64
:
10891 case BFD_RELOC_X86_64_GOTTPOFF
:
10892 case BFD_RELOC_X86_64_TPOFF32
:
10893 case BFD_RELOC_X86_64_TPOFF64
:
10894 case BFD_RELOC_X86_64_GOTOFF64
:
10895 case BFD_RELOC_X86_64_GOTPC32
:
10896 case BFD_RELOC_X86_64_GOT64
:
10897 case BFD_RELOC_X86_64_GOTPCREL64
:
10898 case BFD_RELOC_X86_64_GOTPC64
:
10899 case BFD_RELOC_X86_64_GOTPLT64
:
10900 case BFD_RELOC_X86_64_PLTOFF64
:
10901 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10902 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10903 case BFD_RELOC_RVA
:
10904 case BFD_RELOC_VTABLE_ENTRY
:
10905 case BFD_RELOC_VTABLE_INHERIT
:
10907 case BFD_RELOC_32_SECREL
:
10909 code
= fixp
->fx_r_type
;
10911 case BFD_RELOC_X86_64_32S
:
10912 if (!fixp
->fx_pcrel
)
10914 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10915 code
= fixp
->fx_r_type
;
10918 /* Fall through. */
10920 if (fixp
->fx_pcrel
)
10922 switch (fixp
->fx_size
)
10925 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10926 _("can not do %d byte pc-relative relocation"),
10928 code
= BFD_RELOC_32_PCREL
;
10930 case 1: code
= BFD_RELOC_8_PCREL
; break;
10931 case 2: code
= BFD_RELOC_16_PCREL
; break;
10932 case 4: code
= BFD_RELOC_32_PCREL
; break;
10934 case 8: code
= BFD_RELOC_64_PCREL
; break;
10940 switch (fixp
->fx_size
)
10943 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10944 _("can not do %d byte relocation"),
10946 code
= BFD_RELOC_32
;
10948 case 1: code
= BFD_RELOC_8
; break;
10949 case 2: code
= BFD_RELOC_16
; break;
10950 case 4: code
= BFD_RELOC_32
; break;
10952 case 8: code
= BFD_RELOC_64
; break;
10959 if ((code
== BFD_RELOC_32
10960 || code
== BFD_RELOC_32_PCREL
10961 || code
== BFD_RELOC_X86_64_32S
)
10963 && fixp
->fx_addsy
== GOT_symbol
)
10966 code
= BFD_RELOC_386_GOTPC
;
10968 code
= BFD_RELOC_X86_64_GOTPC32
;
10970 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
10972 && fixp
->fx_addsy
== GOT_symbol
)
10974 code
= BFD_RELOC_X86_64_GOTPC64
;
10977 rel
= XNEW (arelent
);
10978 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
10979 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
10981 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
10983 if (!use_rela_relocations
)
10985 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10986 vtable entry to be used in the relocation's section offset. */
10987 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
10988 rel
->address
= fixp
->fx_offset
;
10989 #if defined (OBJ_COFF) && defined (TE_PE)
10990 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
10991 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
10996 /* Use the rela in 64bit mode. */
10999 if (disallow_64bit_reloc
)
11002 case BFD_RELOC_X86_64_DTPOFF64
:
11003 case BFD_RELOC_X86_64_TPOFF64
:
11004 case BFD_RELOC_64_PCREL
:
11005 case BFD_RELOC_X86_64_GOTOFF64
:
11006 case BFD_RELOC_X86_64_GOT64
:
11007 case BFD_RELOC_X86_64_GOTPCREL64
:
11008 case BFD_RELOC_X86_64_GOTPC64
:
11009 case BFD_RELOC_X86_64_GOTPLT64
:
11010 case BFD_RELOC_X86_64_PLTOFF64
:
11011 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
11012 _("cannot represent relocation type %s in x32 mode"),
11013 bfd_get_reloc_code_name (code
));
11019 if (!fixp
->fx_pcrel
)
11020 rel
->addend
= fixp
->fx_offset
;
11024 case BFD_RELOC_X86_64_PLT32
:
11025 case BFD_RELOC_X86_64_GOT32
:
11026 case BFD_RELOC_X86_64_GOTPCREL
:
11027 case BFD_RELOC_X86_64_GOTPCRELX
:
11028 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
11029 case BFD_RELOC_X86_64_TLSGD
:
11030 case BFD_RELOC_X86_64_TLSLD
:
11031 case BFD_RELOC_X86_64_GOTTPOFF
:
11032 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
11033 case BFD_RELOC_X86_64_TLSDESC_CALL
:
11034 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
11037 rel
->addend
= (section
->vma
11039 + fixp
->fx_addnumber
11040 + md_pcrel_from (fixp
));
11045 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
11046 if (rel
->howto
== NULL
)
11048 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
11049 _("cannot represent relocation type %s"),
11050 bfd_get_reloc_code_name (code
));
11051 /* Set howto to a garbage value so that we can keep going. */
11052 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
11053 gas_assert (rel
->howto
!= NULL
);
11059 #include "tc-i386-intel.c"
11062 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
11064 int saved_naked_reg
;
11065 char saved_register_dot
;
11067 saved_naked_reg
= allow_naked_reg
;
11068 allow_naked_reg
= 1;
11069 saved_register_dot
= register_chars
['.'];
11070 register_chars
['.'] = '.';
11071 allow_pseudo_reg
= 1;
11072 expression_and_evaluate (exp
);
11073 allow_pseudo_reg
= 0;
11074 register_chars
['.'] = saved_register_dot
;
11075 allow_naked_reg
= saved_naked_reg
;
11077 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
11079 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
11081 exp
->X_op
= O_constant
;
11082 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
11083 .dw2_regnum
[flag_code
>> 1];
11086 exp
->X_op
= O_illegal
;
11091 tc_x86_frame_initial_instructions (void)
11093 static unsigned int sp_regno
[2];
11095 if (!sp_regno
[flag_code
>> 1])
11097 char *saved_input
= input_line_pointer
;
11098 char sp
[][4] = {"esp", "rsp"};
11101 input_line_pointer
= sp
[flag_code
>> 1];
11102 tc_x86_parse_to_dw2regnum (&exp
);
11103 gas_assert (exp
.X_op
== O_constant
);
11104 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
11105 input_line_pointer
= saved_input
;
11108 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
11109 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
11113 x86_dwarf2_addr_size (void)
11115 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
11116 if (x86_elf_abi
== X86_64_X32_ABI
)
11119 return bfd_arch_bits_per_address (stdoutput
) / 8;
11123 i386_elf_section_type (const char *str
, size_t len
)
11125 if (flag_code
== CODE_64BIT
11126 && len
== sizeof ("unwind") - 1
11127 && strncmp (str
, "unwind", 6) == 0)
11128 return SHT_X86_64_UNWIND
;
11135 i386_solaris_fix_up_eh_frame (segT sec
)
11137 if (flag_code
== CODE_64BIT
)
11138 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
11144 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
11148 exp
.X_op
= O_secrel
;
11149 exp
.X_add_symbol
= symbol
;
11150 exp
.X_add_number
= 0;
11151 emit_expr (&exp
, size
);
11155 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11156 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
11159 x86_64_section_letter (int letter
, const char **ptr_msg
)
11161 if (flag_code
== CODE_64BIT
)
11164 return SHF_X86_64_LARGE
;
11166 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
11169 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
11174 x86_64_section_word (char *str
, size_t len
)
11176 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
11177 return SHF_X86_64_LARGE
;
11183 handle_large_common (int small ATTRIBUTE_UNUSED
)
11185 if (flag_code
!= CODE_64BIT
)
11187 s_comm_internal (0, elf_common_parse
);
11188 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
11192 static segT lbss_section
;
11193 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
11194 asection
*saved_bss_section
= bss_section
;
11196 if (lbss_section
== NULL
)
11198 flagword applicable
;
11199 segT seg
= now_seg
;
11200 subsegT subseg
= now_subseg
;
11202 /* The .lbss section is for local .largecomm symbols. */
11203 lbss_section
= subseg_new (".lbss", 0);
11204 applicable
= bfd_applicable_section_flags (stdoutput
);
11205 bfd_set_section_flags (stdoutput
, lbss_section
,
11206 applicable
& SEC_ALLOC
);
11207 seg_info (lbss_section
)->bss
= 1;
11209 subseg_set (seg
, subseg
);
11212 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
11213 bss_section
= lbss_section
;
11215 s_comm_internal (0, elf_common_parse
);
11217 elf_com_section_ptr
= saved_com_section_ptr
;
11218 bss_section
= saved_bss_section
;
11221 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */