1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2016 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
36 #ifndef REGISTER_WARNINGS
37 #define REGISTER_WARNINGS 1
40 #ifndef INFER_ADDR_PREFIX
41 #define INFER_ADDR_PREFIX 1
45 #define DEFAULT_ARCH "i386"
50 #define INLINE __inline__
56 /* Prefixes will be emitted in the order defined below.
57 WAIT_PREFIX must be the first prefix since FWAIT is really is an
58 instruction, and so must come before any prefixes.
59 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
60 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
66 #define HLE_PREFIX REP_PREFIX
67 #define BND_PREFIX REP_PREFIX
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 #define ZMMWORD_MNEM_SUFFIX 'z'
87 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91 #define END_OF_INSN '\0'
94 'templates' is for grouping together 'template' structures for opcodes
95 of the same name. This is only used for storing the insns in the grand
96 ole hash table of insns.
97 The templates themselves start at START and range up to (but not including)
102 const insn_template
*start
;
103 const insn_template
*end
;
107 /* 386 operand encoding bytes: see 386 book for details of this. */
110 unsigned int regmem
; /* codes register or memory operand */
111 unsigned int reg
; /* codes register operand (or extended opcode) */
112 unsigned int mode
; /* how to interpret regmem & reg */
116 /* x86-64 extension prefix. */
117 typedef int rex_byte
;
119 /* 386 opcode byte to code indirect addressing. */
128 /* x86 arch names, types and features */
131 const char *name
; /* arch name */
132 unsigned int len
; /* arch string length */
133 enum processor_type type
; /* arch type */
134 i386_cpu_flags flags
; /* cpu feature flags */
135 unsigned int skip
; /* show_arch should skip this. */
139 /* Used to turn off indicated flags. */
142 const char *name
; /* arch name */
143 unsigned int len
; /* arch string length */
144 i386_cpu_flags flags
; /* cpu feature flags */
148 static void update_code_flag (int, int);
149 static void set_code_flag (int);
150 static void set_16bit_gcc_code_flag (int);
151 static void set_intel_syntax (int);
152 static void set_intel_mnemonic (int);
153 static void set_allow_index_reg (int);
154 static void set_check (int);
155 static void set_cpu_arch (int);
157 static void pe_directive_secrel (int);
159 static void signed_cons (int);
160 static char *output_invalid (int c
);
161 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
163 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
165 static int i386_att_operand (char *);
166 static int i386_intel_operand (char *, int);
167 static int i386_intel_simplify (expressionS
*);
168 static int i386_intel_parse_name (const char *, expressionS
*);
169 static const reg_entry
*parse_register (char *, char **);
170 static char *parse_insn (char *, char *);
171 static char *parse_operands (char *, const char *);
172 static void swap_operands (void);
173 static void swap_2_operands (int, int);
174 static void optimize_imm (void);
175 static void optimize_disp (void);
176 static const insn_template
*match_template (void);
177 static int check_string (void);
178 static int process_suffix (void);
179 static int check_byte_reg (void);
180 static int check_long_reg (void);
181 static int check_qword_reg (void);
182 static int check_word_reg (void);
183 static int finalize_imm (void);
184 static int process_operands (void);
185 static const seg_entry
*build_modrm_byte (void);
186 static void output_insn (void);
187 static void output_imm (fragS
*, offsetT
);
188 static void output_disp (fragS
*, offsetT
);
190 static void s_bss (int);
192 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
193 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
196 static const char *default_arch
= DEFAULT_ARCH
;
198 /* This struct describes rounding control and SAE in the instruction. */
212 static struct RC_Operation rc_op
;
214 /* The struct describes masking, applied to OPERAND in the instruction.
215 MASK is a pointer to the corresponding mask register. ZEROING tells
216 whether merging or zeroing mask is used. */
217 struct Mask_Operation
219 const reg_entry
*mask
;
220 unsigned int zeroing
;
221 /* The operand where this operation is associated. */
225 static struct Mask_Operation mask_op
;
227 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
229 struct Broadcast_Operation
231 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
234 /* Index of broadcasted operand. */
238 static struct Broadcast_Operation broadcast_op
;
243 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
244 unsigned char bytes
[4];
246 /* Destination or source register specifier. */
247 const reg_entry
*register_specifier
;
250 /* 'md_assemble ()' gathers together information and puts it into a
257 const reg_entry
*regs
;
262 operand_size_mismatch
,
263 operand_type_mismatch
,
264 register_type_mismatch
,
265 number_of_operands_mismatch
,
266 invalid_instruction_suffix
,
269 unsupported_with_intel_mnemonic
,
272 invalid_vsib_address
,
273 invalid_vector_register_set
,
274 unsupported_vector_index_register
,
275 unsupported_broadcast
,
276 broadcast_not_on_src_operand
,
279 mask_not_on_destination
,
282 rc_sae_operand_not_last_imm
,
283 invalid_register_operand
,
289 /* TM holds the template for the insn were currently assembling. */
292 /* SUFFIX holds the instruction size suffix for byte, word, dword
293 or qword, if given. */
296 /* OPERANDS gives the number of given operands. */
297 unsigned int operands
;
299 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
300 of given register, displacement, memory operands and immediate
302 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
304 /* TYPES [i] is the type (see above #defines) which tells us how to
305 use OP[i] for the corresponding operand. */
306 i386_operand_type types
[MAX_OPERANDS
];
308 /* Displacement expression, immediate expression, or register for each
310 union i386_op op
[MAX_OPERANDS
];
312 /* Flags for operands. */
313 unsigned int flags
[MAX_OPERANDS
];
314 #define Operand_PCrel 1
316 /* Relocation type for operand */
317 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
319 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
320 the base index byte below. */
321 const reg_entry
*base_reg
;
322 const reg_entry
*index_reg
;
323 unsigned int log2_scale_factor
;
325 /* SEG gives the seg_entries of this insn. They are zero unless
326 explicit segment overrides are given. */
327 const seg_entry
*seg
[2];
329 /* PREFIX holds all the given prefix opcodes (usually null).
330 PREFIXES is the number of prefix opcodes. */
331 unsigned int prefixes
;
332 unsigned char prefix
[MAX_PREFIXES
];
334 /* RM and SIB are the modrm byte and the sib byte where the
335 addressing modes of this insn are encoded. */
342 /* Masking attributes. */
343 struct Mask_Operation
*mask
;
345 /* Rounding control and SAE attributes. */
346 struct RC_Operation
*rounding
;
348 /* Broadcasting attributes. */
349 struct Broadcast_Operation
*broadcast
;
351 /* Compressed disp8*N attribute. */
352 unsigned int memshift
;
354 /* Swap operand in encoding. */
355 unsigned int swap_operand
;
357 /* Prefer 8bit or 32bit displacement in encoding. */
360 disp_encoding_default
= 0,
366 const char *rep_prefix
;
369 const char *hle_prefix
;
371 /* Have BND prefix. */
372 const char *bnd_prefix
;
374 /* Need VREX to support upper 16 registers. */
378 enum i386_error error
;
381 typedef struct _i386_insn i386_insn
;
383 /* Link RC type with corresponding string, that'll be looked for in
392 static const struct RC_name RC_NamesTable
[] =
394 { rne
, STRING_COMMA_LEN ("rn-sae") },
395 { rd
, STRING_COMMA_LEN ("rd-sae") },
396 { ru
, STRING_COMMA_LEN ("ru-sae") },
397 { rz
, STRING_COMMA_LEN ("rz-sae") },
398 { saeonly
, STRING_COMMA_LEN ("sae") },
401 /* List of chars besides those in app.c:symbol_chars that can start an
402 operand. Used to prevent the scrubber eating vital white-space. */
403 const char extra_symbol_chars
[] = "*%-([{"
412 #if (defined (TE_I386AIX) \
413 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
414 && !defined (TE_GNU) \
415 && !defined (TE_LINUX) \
416 && !defined (TE_NACL) \
417 && !defined (TE_NETWARE) \
418 && !defined (TE_FreeBSD) \
419 && !defined (TE_DragonFly) \
420 && !defined (TE_NetBSD)))
421 /* This array holds the chars that always start a comment. If the
422 pre-processor is disabled, these aren't very useful. The option
423 --divide will remove '/' from this list. */
424 const char *i386_comment_chars
= "#/";
425 #define SVR4_COMMENT_CHARS 1
426 #define PREFIX_SEPARATOR '\\'
429 const char *i386_comment_chars
= "#";
430 #define PREFIX_SEPARATOR '/'
433 /* This array holds the chars that only start a comment at the beginning of
434 a line. If the line seems to have the form '# 123 filename'
435 .line and .file directives will appear in the pre-processed output.
436 Note that input_file.c hand checks for '#' at the beginning of the
437 first line of the input file. This is because the compiler outputs
438 #NO_APP at the beginning of its output.
439 Also note that comments started like this one will always work if
440 '/' isn't otherwise defined. */
441 const char line_comment_chars
[] = "#/";
443 const char line_separator_chars
[] = ";";
445 /* Chars that can be used to separate mant from exp in floating point
447 const char EXP_CHARS
[] = "eE";
449 /* Chars that mean this number is a floating point constant
452 const char FLT_CHARS
[] = "fFdDxX";
454 /* Tables for lexical analysis. */
455 static char mnemonic_chars
[256];
456 static char register_chars
[256];
457 static char operand_chars
[256];
458 static char identifier_chars
[256];
459 static char digit_chars
[256];
461 /* Lexical macros. */
462 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
463 #define is_operand_char(x) (operand_chars[(unsigned char) x])
464 #define is_register_char(x) (register_chars[(unsigned char) x])
465 #define is_space_char(x) ((x) == ' ')
466 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
467 #define is_digit_char(x) (digit_chars[(unsigned char) x])
469 /* All non-digit non-letter characters that may occur in an operand. */
470 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
472 /* md_assemble() always leaves the strings it's passed unaltered. To
473 effect this we maintain a stack of saved characters that we've smashed
474 with '\0's (indicating end of strings for various sub-fields of the
475 assembler instruction). */
476 static char save_stack
[32];
477 static char *save_stack_p
;
478 #define END_STRING_AND_SAVE(s) \
479 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
480 #define RESTORE_END_STRING(s) \
481 do { *(s) = *--save_stack_p; } while (0)
483 /* The instruction we're assembling. */
486 /* Possible templates for current insn. */
487 static const templates
*current_templates
;
489 /* Per instruction expressionS buffers: max displacements & immediates. */
490 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
491 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
493 /* Current operand we are working on. */
494 static int this_operand
= -1;
496 /* We support four different modes. FLAG_CODE variable is used to distinguish
504 static enum flag_code flag_code
;
505 static unsigned int object_64bit
;
506 static unsigned int disallow_64bit_reloc
;
507 static int use_rela_relocations
= 0;
509 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
510 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
511 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
513 /* The ELF ABI to use. */
521 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
524 #if defined (TE_PE) || defined (TE_PEP)
525 /* Use big object file format. */
526 static int use_big_obj
= 0;
529 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
530 /* 1 if generating code for a shared library. */
531 static int shared
= 0;
534 /* 1 for intel syntax,
536 static int intel_syntax
= 0;
538 /* 1 for Intel64 ISA,
542 /* 1 for intel mnemonic,
543 0 if att mnemonic. */
544 static int intel_mnemonic
= !SYSV386_COMPAT
;
546 /* 1 if support old (<= 2.8.1) versions of gcc. */
547 static int old_gcc
= OLDGCC_COMPAT
;
549 /* 1 if pseudo registers are permitted. */
550 static int allow_pseudo_reg
= 0;
552 /* 1 if register prefix % not required. */
553 static int allow_naked_reg
= 0;
555 /* 1 if the assembler should add BND prefix for all control-tranferring
556 instructions supporting it, even if this prefix wasn't specified
558 static int add_bnd_prefix
= 0;
560 /* 1 if pseudo index register, eiz/riz, is allowed . */
561 static int allow_index_reg
= 0;
563 /* 1 if the assembler should ignore LOCK prefix, even if it was
564 specified explicitly. */
565 static int omit_lock_prefix
= 0;
567 /* 1 if the assembler should encode lfence, mfence, and sfence as
568 "lock addl $0, (%{re}sp)". */
569 static int avoid_fence
= 0;
571 /* 1 if the assembler should generate relax relocations. */
573 static int generate_relax_relocations
574 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
576 static enum check_kind
582 sse_check
, operand_check
= check_warning
;
584 /* Register prefix used for error message. */
585 static const char *register_prefix
= "%";
587 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
588 leave, push, and pop instructions so that gcc has the same stack
589 frame as in 32 bit mode. */
590 static char stackop_size
= '\0';
592 /* Non-zero to optimize code alignment. */
593 int optimize_align_code
= 1;
595 /* Non-zero to quieten some warnings. */
596 static int quiet_warnings
= 0;
599 static const char *cpu_arch_name
= NULL
;
600 static char *cpu_sub_arch_name
= NULL
;
602 /* CPU feature flags. */
603 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
605 /* If we have selected a cpu we are generating instructions for. */
606 static int cpu_arch_tune_set
= 0;
608 /* Cpu we are generating instructions for. */
609 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
611 /* CPU feature flags of cpu we are generating instructions for. */
612 static i386_cpu_flags cpu_arch_tune_flags
;
614 /* CPU instruction set architecture used. */
615 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
617 /* CPU feature flags of instruction set architecture used. */
618 i386_cpu_flags cpu_arch_isa_flags
;
620 /* If set, conditional jumps are not automatically promoted to handle
621 larger than a byte offset. */
622 static unsigned int no_cond_jump_promotion
= 0;
624 /* Encode SSE instructions with VEX prefix. */
625 static unsigned int sse2avx
;
627 /* Encode scalar AVX instructions with specific vector length. */
634 /* Encode scalar EVEX LIG instructions with specific vector length. */
642 /* Encode EVEX WIG instructions with specific evex.w. */
649 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
650 static enum rc_type evexrcig
= rne
;
652 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
653 static symbolS
*GOT_symbol
;
655 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
656 unsigned int x86_dwarf2_return_column
;
658 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
659 int x86_cie_data_alignment
;
661 /* Interface to relax_segment.
662 There are 3 major relax states for 386 jump insns because the
663 different types of jumps add different sizes to frags when we're
664 figuring out what sort of jump to choose to reach a given label. */
667 #define UNCOND_JUMP 0
669 #define COND_JUMP86 2
674 #define SMALL16 (SMALL | CODE16)
676 #define BIG16 (BIG | CODE16)
680 #define INLINE __inline__
686 #define ENCODE_RELAX_STATE(type, size) \
687 ((relax_substateT) (((type) << 2) | (size)))
688 #define TYPE_FROM_RELAX_STATE(s) \
690 #define DISP_SIZE_FROM_RELAX_STATE(s) \
691 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
693 /* This table is used by relax_frag to promote short jumps to long
694 ones where necessary. SMALL (short) jumps may be promoted to BIG
695 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
696 don't allow a short jump in a 32 bit code segment to be promoted to
697 a 16 bit offset jump because it's slower (requires data size
698 prefix), and doesn't work, unless the destination is in the bottom
699 64k of the code segment (The top 16 bits of eip are zeroed). */
701 const relax_typeS md_relax_table
[] =
704 1) most positive reach of this state,
705 2) most negative reach of this state,
706 3) how many bytes this mode will have in the variable part of the frag
707 4) which index into the table to try if we can't fit into this one. */
709 /* UNCOND_JUMP states. */
710 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
711 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
712 /* dword jmp adds 4 bytes to frag:
713 0 extra opcode bytes, 4 displacement bytes. */
715 /* word jmp adds 2 byte2 to frag:
716 0 extra opcode bytes, 2 displacement bytes. */
719 /* COND_JUMP states. */
720 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
721 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
722 /* dword conditionals adds 5 bytes to frag:
723 1 extra opcode byte, 4 displacement bytes. */
725 /* word conditionals add 3 bytes to frag:
726 1 extra opcode byte, 2 displacement bytes. */
729 /* COND_JUMP86 states. */
730 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
731 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
732 /* dword conditionals adds 5 bytes to frag:
733 1 extra opcode byte, 4 displacement bytes. */
735 /* word conditionals add 4 bytes to frag:
736 1 displacement byte and a 3 byte long branch insn. */
740 static const arch_entry cpu_arch
[] =
742 /* Do not replace the first two entries - i386_target_format()
743 relies on them being there in this order. */
744 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
745 CPU_GENERIC32_FLAGS
, 0 },
746 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
747 CPU_GENERIC64_FLAGS
, 0 },
748 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
750 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
752 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
754 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
756 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
758 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
760 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
762 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
764 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
765 CPU_PENTIUMPRO_FLAGS
, 0 },
766 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
768 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
770 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
772 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
774 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
775 CPU_NOCONA_FLAGS
, 0 },
776 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
778 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
780 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
781 CPU_CORE2_FLAGS
, 1 },
782 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
783 CPU_CORE2_FLAGS
, 0 },
784 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
785 CPU_COREI7_FLAGS
, 0 },
786 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
788 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
790 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
791 CPU_IAMCU_FLAGS
, 0 },
792 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
794 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
796 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
797 CPU_ATHLON_FLAGS
, 0 },
798 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
800 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
802 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
804 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
805 CPU_AMDFAM10_FLAGS
, 0 },
806 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
807 CPU_BDVER1_FLAGS
, 0 },
808 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
809 CPU_BDVER2_FLAGS
, 0 },
810 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
811 CPU_BDVER3_FLAGS
, 0 },
812 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
813 CPU_BDVER4_FLAGS
, 0 },
814 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
815 CPU_ZNVER1_FLAGS
, 0 },
816 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
817 CPU_BTVER1_FLAGS
, 0 },
818 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
819 CPU_BTVER2_FLAGS
, 0 },
820 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
822 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
824 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
826 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
828 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
830 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
832 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
834 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
835 CPU_SSSE3_FLAGS
, 0 },
836 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
837 CPU_SSE4_1_FLAGS
, 0 },
838 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
839 CPU_SSE4_2_FLAGS
, 0 },
840 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
841 CPU_SSE4_2_FLAGS
, 0 },
842 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
844 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
846 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
847 CPU_AVX512F_FLAGS
, 0 },
848 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
849 CPU_AVX512CD_FLAGS
, 0 },
850 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
851 CPU_AVX512ER_FLAGS
, 0 },
852 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
853 CPU_AVX512PF_FLAGS
, 0 },
854 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
855 CPU_AVX512DQ_FLAGS
, 0 },
856 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
857 CPU_AVX512BW_FLAGS
, 0 },
858 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
859 CPU_AVX512VL_FLAGS
, 0 },
860 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
862 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
863 CPU_VMFUNC_FLAGS
, 0 },
864 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
866 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
867 CPU_XSAVE_FLAGS
, 0 },
868 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
869 CPU_XSAVEOPT_FLAGS
, 0 },
870 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
871 CPU_XSAVEC_FLAGS
, 0 },
872 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
873 CPU_XSAVES_FLAGS
, 0 },
874 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
876 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
877 CPU_PCLMUL_FLAGS
, 0 },
878 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
879 CPU_PCLMUL_FLAGS
, 1 },
880 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
881 CPU_FSGSBASE_FLAGS
, 0 },
882 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
883 CPU_RDRND_FLAGS
, 0 },
884 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
886 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
888 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
890 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
892 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
894 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
896 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
897 CPU_MOVBE_FLAGS
, 0 },
898 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
900 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
902 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
903 CPU_LZCNT_FLAGS
, 0 },
904 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
906 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
908 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
909 CPU_INVPCID_FLAGS
, 0 },
910 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
911 CPU_CLFLUSH_FLAGS
, 0 },
912 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
914 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
915 CPU_SYSCALL_FLAGS
, 0 },
916 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
917 CPU_RDTSCP_FLAGS
, 0 },
918 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
919 CPU_3DNOW_FLAGS
, 0 },
920 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
921 CPU_3DNOWA_FLAGS
, 0 },
922 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
923 CPU_PADLOCK_FLAGS
, 0 },
924 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
926 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
928 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
929 CPU_SSE4A_FLAGS
, 0 },
930 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
932 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
934 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
936 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
938 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
939 CPU_RDSEED_FLAGS
, 0 },
940 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
941 CPU_PRFCHW_FLAGS
, 0 },
942 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
944 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
946 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
948 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
949 CPU_CLFLUSHOPT_FLAGS
, 0 },
950 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
951 CPU_PREFETCHWT1_FLAGS
, 0 },
952 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
954 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
956 { STRING_COMMA_LEN (".pcommit"), PROCESSOR_UNKNOWN
,
957 CPU_PCOMMIT_FLAGS
, 0 },
958 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
959 CPU_AVX512IFMA_FLAGS
, 0 },
960 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
961 CPU_AVX512VBMI_FLAGS
, 0 },
962 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
963 CPU_CLZERO_FLAGS
, 0 },
964 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
965 CPU_MWAITX_FLAGS
, 0 },
966 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
967 CPU_OSPKE_FLAGS
, 0 },
968 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
969 CPU_RDPID_FLAGS
, 0 },
972 static const noarch_entry cpu_noarch
[] =
974 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
975 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
976 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
977 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
981 /* Like s_lcomm_internal in gas/read.c but the alignment string
982 is allowed to be optional. */
985 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
992 && *input_line_pointer
== ',')
994 align
= parse_align (needs_align
- 1);
996 if (align
== (addressT
) -1)
1011 bss_alloc (symbolP
, size
, align
);
1016 pe_lcomm (int needs_align
)
1018 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1022 const pseudo_typeS md_pseudo_table
[] =
1024 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1025 {"align", s_align_bytes
, 0},
1027 {"align", s_align_ptwo
, 0},
1029 {"arch", set_cpu_arch
, 0},
1033 {"lcomm", pe_lcomm
, 1},
1035 {"ffloat", float_cons
, 'f'},
1036 {"dfloat", float_cons
, 'd'},
1037 {"tfloat", float_cons
, 'x'},
1039 {"slong", signed_cons
, 4},
1040 {"noopt", s_ignore
, 0},
1041 {"optim", s_ignore
, 0},
1042 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1043 {"code16", set_code_flag
, CODE_16BIT
},
1044 {"code32", set_code_flag
, CODE_32BIT
},
1045 {"code64", set_code_flag
, CODE_64BIT
},
1046 {"intel_syntax", set_intel_syntax
, 1},
1047 {"att_syntax", set_intel_syntax
, 0},
1048 {"intel_mnemonic", set_intel_mnemonic
, 1},
1049 {"att_mnemonic", set_intel_mnemonic
, 0},
1050 {"allow_index_reg", set_allow_index_reg
, 1},
1051 {"disallow_index_reg", set_allow_index_reg
, 0},
1052 {"sse_check", set_check
, 0},
1053 {"operand_check", set_check
, 1},
1054 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1055 {"largecomm", handle_large_common
, 0},
1057 {"file", (void (*) (int)) dwarf2_directive_file
, 0},
1058 {"loc", dwarf2_directive_loc
, 0},
1059 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1062 {"secrel32", pe_directive_secrel
, 0},
1067 /* For interface with expression (). */
1068 extern char *input_line_pointer
;
1070 /* Hash table for instruction mnemonic lookup. */
1071 static struct hash_control
*op_hash
;
1073 /* Hash table for register lookup. */
1074 static struct hash_control
*reg_hash
;
1077 i386_align_code (fragS
*fragP
, int count
)
1079 /* Various efficient no-op patterns for aligning code labels.
1080 Note: Don't try to assemble the instructions in the comments.
1081 0L and 0w are not legal. */
1082 static const unsigned char f32_1
[] =
1084 static const unsigned char f32_2
[] =
1085 {0x66,0x90}; /* xchg %ax,%ax */
1086 static const unsigned char f32_3
[] =
1087 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1088 static const unsigned char f32_4
[] =
1089 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1090 static const unsigned char f32_5
[] =
1092 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1093 static const unsigned char f32_6
[] =
1094 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1095 static const unsigned char f32_7
[] =
1096 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1097 static const unsigned char f32_8
[] =
1099 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1100 static const unsigned char f32_9
[] =
1101 {0x89,0xf6, /* movl %esi,%esi */
1102 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1103 static const unsigned char f32_10
[] =
1104 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1105 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1106 static const unsigned char f32_11
[] =
1107 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1108 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1109 static const unsigned char f32_12
[] =
1110 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1111 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1112 static const unsigned char f32_13
[] =
1113 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1114 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1115 static const unsigned char f32_14
[] =
1116 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1117 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1118 static const unsigned char f16_3
[] =
1119 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1120 static const unsigned char f16_4
[] =
1121 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1122 static const unsigned char f16_5
[] =
1124 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1125 static const unsigned char f16_6
[] =
1126 {0x89,0xf6, /* mov %si,%si */
1127 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1128 static const unsigned char f16_7
[] =
1129 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1130 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1131 static const unsigned char f16_8
[] =
1132 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1133 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1134 static const unsigned char jump_31
[] =
1135 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1136 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1137 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1138 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1139 static const unsigned char *const f32_patt
[] = {
1140 f32_1
, f32_2
, f32_3
, f32_4
, f32_5
, f32_6
, f32_7
, f32_8
,
1141 f32_9
, f32_10
, f32_11
, f32_12
, f32_13
, f32_14
1143 static const unsigned char *const f16_patt
[] = {
1144 f32_1
, f32_2
, f16_3
, f16_4
, f16_5
, f16_6
, f16_7
, f16_8
1146 /* nopl (%[re]ax) */
1147 static const unsigned char alt_3
[] =
1149 /* nopl 0(%[re]ax) */
1150 static const unsigned char alt_4
[] =
1151 {0x0f,0x1f,0x40,0x00};
1152 /* nopl 0(%[re]ax,%[re]ax,1) */
1153 static const unsigned char alt_5
[] =
1154 {0x0f,0x1f,0x44,0x00,0x00};
1155 /* nopw 0(%[re]ax,%[re]ax,1) */
1156 static const unsigned char alt_6
[] =
1157 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1158 /* nopl 0L(%[re]ax) */
1159 static const unsigned char alt_7
[] =
1160 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1161 /* nopl 0L(%[re]ax,%[re]ax,1) */
1162 static const unsigned char alt_8
[] =
1163 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1164 /* nopw 0L(%[re]ax,%[re]ax,1) */
1165 static const unsigned char alt_9
[] =
1166 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1167 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1168 static const unsigned char alt_10
[] =
1169 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1170 static const unsigned char *const alt_patt
[] = {
1171 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1175 /* Only align for at least a positive non-zero boundary. */
1176 if (count
<= 0 || count
> MAX_MEM_FOR_RS_ALIGN_CODE
)
1179 /* We need to decide which NOP sequence to use for 32bit and
1180 64bit. When -mtune= is used:
1182 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1183 PROCESSOR_GENERIC32, f32_patt will be used.
1184 2. For the rest, alt_patt will be used.
1186 When -mtune= isn't used, alt_patt will be used if
1187 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1190 When -march= or .arch is used, we can't use anything beyond
1191 cpu_arch_isa_flags. */
1193 if (flag_code
== CODE_16BIT
)
1197 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1199 /* Adjust jump offset. */
1200 fragP
->fr_literal
[fragP
->fr_fix
+ 1] = count
- 2;
1203 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1204 f16_patt
[count
- 1], count
);
1208 const unsigned char *const *patt
= NULL
;
1210 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1212 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1213 switch (cpu_arch_tune
)
1215 case PROCESSOR_UNKNOWN
:
1216 /* We use cpu_arch_isa_flags to check if we SHOULD
1217 optimize with nops. */
1218 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1223 case PROCESSOR_PENTIUM4
:
1224 case PROCESSOR_NOCONA
:
1225 case PROCESSOR_CORE
:
1226 case PROCESSOR_CORE2
:
1227 case PROCESSOR_COREI7
:
1228 case PROCESSOR_L1OM
:
1229 case PROCESSOR_K1OM
:
1230 case PROCESSOR_GENERIC64
:
1232 case PROCESSOR_ATHLON
:
1234 case PROCESSOR_AMDFAM10
:
1236 case PROCESSOR_ZNVER
:
1240 case PROCESSOR_I386
:
1241 case PROCESSOR_I486
:
1242 case PROCESSOR_PENTIUM
:
1243 case PROCESSOR_PENTIUMPRO
:
1244 case PROCESSOR_IAMCU
:
1245 case PROCESSOR_GENERIC32
:
1252 switch (fragP
->tc_frag_data
.tune
)
1254 case PROCESSOR_UNKNOWN
:
1255 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1256 PROCESSOR_UNKNOWN. */
1260 case PROCESSOR_I386
:
1261 case PROCESSOR_I486
:
1262 case PROCESSOR_PENTIUM
:
1263 case PROCESSOR_IAMCU
:
1265 case PROCESSOR_ATHLON
:
1267 case PROCESSOR_AMDFAM10
:
1269 case PROCESSOR_ZNVER
:
1271 case PROCESSOR_GENERIC32
:
1272 /* We use cpu_arch_isa_flags to check if we CAN optimize
1274 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1279 case PROCESSOR_PENTIUMPRO
:
1280 case PROCESSOR_PENTIUM4
:
1281 case PROCESSOR_NOCONA
:
1282 case PROCESSOR_CORE
:
1283 case PROCESSOR_CORE2
:
1284 case PROCESSOR_COREI7
:
1285 case PROCESSOR_L1OM
:
1286 case PROCESSOR_K1OM
:
1287 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1292 case PROCESSOR_GENERIC64
:
1298 if (patt
== f32_patt
)
1300 /* If the padding is less than 15 bytes, we use the normal
1301 ones. Otherwise, we use a jump instruction and adjust
1305 /* For 64bit, the limit is 3 bytes. */
1306 if (flag_code
== CODE_64BIT
1307 && fragP
->tc_frag_data
.isa_flags
.bitfield
.cpulm
)
1312 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1313 patt
[count
- 1], count
);
1316 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1318 /* Adjust jump offset. */
1319 fragP
->fr_literal
[fragP
->fr_fix
+ 1] = count
- 2;
1324 /* Maximum length of an instruction is 10 byte. If the
1325 padding is greater than 10 bytes and we don't use jump,
1326 we have to break it into smaller pieces. */
1327 int padding
= count
;
1328 while (padding
> 10)
1331 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
+ padding
,
1336 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1337 patt
[padding
- 1], padding
);
1340 fragP
->fr_var
= count
;
1344 operand_type_all_zero (const union i386_operand_type
*x
)
1346 switch (ARRAY_SIZE(x
->array
))
1355 return !x
->array
[0];
1362 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1364 switch (ARRAY_SIZE(x
->array
))
1379 operand_type_equal (const union i386_operand_type
*x
,
1380 const union i386_operand_type
*y
)
1382 switch (ARRAY_SIZE(x
->array
))
1385 if (x
->array
[2] != y
->array
[2])
1388 if (x
->array
[1] != y
->array
[1])
1391 return x
->array
[0] == y
->array
[0];
1399 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1401 switch (ARRAY_SIZE(x
->array
))
1410 return !x
->array
[0];
1417 cpu_flags_equal (const union i386_cpu_flags
*x
,
1418 const union i386_cpu_flags
*y
)
1420 switch (ARRAY_SIZE(x
->array
))
1423 if (x
->array
[2] != y
->array
[2])
1426 if (x
->array
[1] != y
->array
[1])
1429 return x
->array
[0] == y
->array
[0];
1437 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1439 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1440 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1443 static INLINE i386_cpu_flags
1444 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1446 switch (ARRAY_SIZE (x
.array
))
1449 x
.array
[2] &= y
.array
[2];
1451 x
.array
[1] &= y
.array
[1];
1453 x
.array
[0] &= y
.array
[0];
1461 static INLINE i386_cpu_flags
1462 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1464 switch (ARRAY_SIZE (x
.array
))
1467 x
.array
[2] |= y
.array
[2];
1469 x
.array
[1] |= y
.array
[1];
1471 x
.array
[0] |= y
.array
[0];
1479 static INLINE i386_cpu_flags
1480 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1482 switch (ARRAY_SIZE (x
.array
))
1485 x
.array
[2] &= ~y
.array
[2];
1487 x
.array
[1] &= ~y
.array
[1];
1489 x
.array
[0] &= ~y
.array
[0];
1498 valid_iamcu_cpu_flags (const i386_cpu_flags
*flags
)
1500 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
1502 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_COMPAT_FLAGS
;
1503 i386_cpu_flags compat_flags
;
1504 compat_flags
= cpu_flags_and_not (*flags
, iamcu_flags
);
1505 return cpu_flags_all_zero (&compat_flags
);
1511 #define CPU_FLAGS_ARCH_MATCH 0x1
1512 #define CPU_FLAGS_64BIT_MATCH 0x2
1513 #define CPU_FLAGS_AES_MATCH 0x4
1514 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1515 #define CPU_FLAGS_AVX_MATCH 0x10
1517 #define CPU_FLAGS_32BIT_MATCH \
1518 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1519 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1520 #define CPU_FLAGS_PERFECT_MATCH \
1521 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1523 /* Return CPU flags match bits. */
1526 cpu_flags_match (const insn_template
*t
)
1528 i386_cpu_flags x
= t
->cpu_flags
;
1529 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1531 x
.bitfield
.cpu64
= 0;
1532 x
.bitfield
.cpuno64
= 0;
1534 if (cpu_flags_all_zero (&x
))
1536 /* This instruction is available on all archs. */
1537 match
|= CPU_FLAGS_32BIT_MATCH
;
1541 /* This instruction is available only on some archs. */
1542 i386_cpu_flags cpu
= cpu_arch_flags
;
1544 cpu
= cpu_flags_and (x
, cpu
);
1545 if (!cpu_flags_all_zero (&cpu
))
1547 if (x
.bitfield
.cpuavx
)
1549 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1550 if (cpu
.bitfield
.cpuavx
)
1552 /* Check SSE2AVX. */
1553 if (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1555 match
|= (CPU_FLAGS_ARCH_MATCH
1556 | CPU_FLAGS_AVX_MATCH
);
1558 if (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1559 match
|= CPU_FLAGS_AES_MATCH
;
1561 if (!x
.bitfield
.cpupclmul
1562 || cpu
.bitfield
.cpupclmul
)
1563 match
|= CPU_FLAGS_PCLMUL_MATCH
;
1567 match
|= CPU_FLAGS_ARCH_MATCH
;
1569 else if (x
.bitfield
.cpuavx512vl
)
1571 /* Match AVX512VL. */
1572 if (cpu
.bitfield
.cpuavx512vl
)
1574 /* Need another match. */
1575 cpu
.bitfield
.cpuavx512vl
= 0;
1576 if (!cpu_flags_all_zero (&cpu
))
1577 match
|= CPU_FLAGS_32BIT_MATCH
;
1579 match
|= CPU_FLAGS_ARCH_MATCH
;
1582 match
|= CPU_FLAGS_ARCH_MATCH
;
1585 match
|= CPU_FLAGS_32BIT_MATCH
;
1591 static INLINE i386_operand_type
1592 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1594 switch (ARRAY_SIZE (x
.array
))
1597 x
.array
[2] &= y
.array
[2];
1599 x
.array
[1] &= y
.array
[1];
1601 x
.array
[0] &= y
.array
[0];
1609 static INLINE i386_operand_type
1610 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1612 switch (ARRAY_SIZE (x
.array
))
1615 x
.array
[2] |= y
.array
[2];
1617 x
.array
[1] |= y
.array
[1];
1619 x
.array
[0] |= y
.array
[0];
1627 static INLINE i386_operand_type
1628 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1630 switch (ARRAY_SIZE (x
.array
))
1633 x
.array
[2] ^= y
.array
[2];
1635 x
.array
[1] ^= y
.array
[1];
1637 x
.array
[0] ^= y
.array
[0];
1645 static const i386_operand_type acc32
= OPERAND_TYPE_ACC32
;
1646 static const i386_operand_type acc64
= OPERAND_TYPE_ACC64
;
1647 static const i386_operand_type control
= OPERAND_TYPE_CONTROL
;
1648 static const i386_operand_type inoutportreg
1649 = OPERAND_TYPE_INOUTPORTREG
;
1650 static const i386_operand_type reg16_inoutportreg
1651 = OPERAND_TYPE_REG16_INOUTPORTREG
;
1652 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
1653 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
1654 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
1655 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
1656 static const i386_operand_type anydisp
1657 = OPERAND_TYPE_ANYDISP
;
1658 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
1659 static const i386_operand_type regymm
= OPERAND_TYPE_REGYMM
;
1660 static const i386_operand_type regzmm
= OPERAND_TYPE_REGZMM
;
1661 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
1662 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
1663 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
1664 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
1665 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
1666 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
1667 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
1668 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
1669 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
1670 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
1671 static const i386_operand_type vec_imm4
= OPERAND_TYPE_VEC_IMM4
;
1682 operand_type_check (i386_operand_type t
, enum operand_type c
)
1687 return (t
.bitfield
.reg8
1690 || t
.bitfield
.reg64
);
1693 return (t
.bitfield
.imm8
1697 || t
.bitfield
.imm32s
1698 || t
.bitfield
.imm64
);
1701 return (t
.bitfield
.disp8
1702 || t
.bitfield
.disp16
1703 || t
.bitfield
.disp32
1704 || t
.bitfield
.disp32s
1705 || t
.bitfield
.disp64
);
1708 return (t
.bitfield
.disp8
1709 || t
.bitfield
.disp16
1710 || t
.bitfield
.disp32
1711 || t
.bitfield
.disp32s
1712 || t
.bitfield
.disp64
1713 || t
.bitfield
.baseindex
);
1722 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1723 operand J for instruction template T. */
1726 match_reg_size (const insn_template
*t
, unsigned int j
)
1728 return !((i
.types
[j
].bitfield
.byte
1729 && !t
->operand_types
[j
].bitfield
.byte
)
1730 || (i
.types
[j
].bitfield
.word
1731 && !t
->operand_types
[j
].bitfield
.word
)
1732 || (i
.types
[j
].bitfield
.dword
1733 && !t
->operand_types
[j
].bitfield
.dword
)
1734 || (i
.types
[j
].bitfield
.qword
1735 && !t
->operand_types
[j
].bitfield
.qword
));
1738 /* Return 1 if there is no conflict in any size on operand J for
1739 instruction template T. */
1742 match_mem_size (const insn_template
*t
, unsigned int j
)
1744 return (match_reg_size (t
, j
)
1745 && !((i
.types
[j
].bitfield
.unspecified
1747 && !t
->operand_types
[j
].bitfield
.unspecified
)
1748 || (i
.types
[j
].bitfield
.fword
1749 && !t
->operand_types
[j
].bitfield
.fword
)
1750 || (i
.types
[j
].bitfield
.tbyte
1751 && !t
->operand_types
[j
].bitfield
.tbyte
)
1752 || (i
.types
[j
].bitfield
.xmmword
1753 && !t
->operand_types
[j
].bitfield
.xmmword
)
1754 || (i
.types
[j
].bitfield
.ymmword
1755 && !t
->operand_types
[j
].bitfield
.ymmword
)
1756 || (i
.types
[j
].bitfield
.zmmword
1757 && !t
->operand_types
[j
].bitfield
.zmmword
)));
1760 /* Return 1 if there is no size conflict on any operands for
1761 instruction template T. */
1764 operand_size_match (const insn_template
*t
)
1769 /* Don't check jump instructions. */
1770 if (t
->opcode_modifier
.jump
1771 || t
->opcode_modifier
.jumpbyte
1772 || t
->opcode_modifier
.jumpdword
1773 || t
->opcode_modifier
.jumpintersegment
)
1776 /* Check memory and accumulator operand size. */
1777 for (j
= 0; j
< i
.operands
; j
++)
1779 if (t
->operand_types
[j
].bitfield
.anysize
)
1782 if (t
->operand_types
[j
].bitfield
.acc
&& !match_reg_size (t
, j
))
1788 if (i
.types
[j
].bitfield
.mem
&& !match_mem_size (t
, j
))
1797 else if (!t
->opcode_modifier
.d
&& !t
->opcode_modifier
.floatd
)
1800 i
.error
= operand_size_mismatch
;
1804 /* Check reverse. */
1805 gas_assert (i
.operands
== 2);
1808 for (j
= 0; j
< 2; j
++)
1810 if (t
->operand_types
[j
].bitfield
.acc
1811 && !match_reg_size (t
, j
? 0 : 1))
1814 if (i
.types
[j
].bitfield
.mem
1815 && !match_mem_size (t
, j
? 0 : 1))
1823 operand_type_match (i386_operand_type overlap
,
1824 i386_operand_type given
)
1826 i386_operand_type temp
= overlap
;
1828 temp
.bitfield
.jumpabsolute
= 0;
1829 temp
.bitfield
.unspecified
= 0;
1830 temp
.bitfield
.byte
= 0;
1831 temp
.bitfield
.word
= 0;
1832 temp
.bitfield
.dword
= 0;
1833 temp
.bitfield
.fword
= 0;
1834 temp
.bitfield
.qword
= 0;
1835 temp
.bitfield
.tbyte
= 0;
1836 temp
.bitfield
.xmmword
= 0;
1837 temp
.bitfield
.ymmword
= 0;
1838 temp
.bitfield
.zmmword
= 0;
1839 if (operand_type_all_zero (&temp
))
1842 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
1843 && given
.bitfield
.jumpabsolute
== overlap
.bitfield
.jumpabsolute
)
1847 i
.error
= operand_type_mismatch
;
1851 /* If given types g0 and g1 are registers they must be of the same type
1852 unless the expected operand type register overlap is null.
1853 Note that Acc in a template matches every size of reg. */
1856 operand_type_register_match (i386_operand_type m0
,
1857 i386_operand_type g0
,
1858 i386_operand_type t0
,
1859 i386_operand_type m1
,
1860 i386_operand_type g1
,
1861 i386_operand_type t1
)
1863 if (!operand_type_check (g0
, reg
))
1866 if (!operand_type_check (g1
, reg
))
1869 if (g0
.bitfield
.reg8
== g1
.bitfield
.reg8
1870 && g0
.bitfield
.reg16
== g1
.bitfield
.reg16
1871 && g0
.bitfield
.reg32
== g1
.bitfield
.reg32
1872 && g0
.bitfield
.reg64
== g1
.bitfield
.reg64
)
1875 if (m0
.bitfield
.acc
)
1877 t0
.bitfield
.reg8
= 1;
1878 t0
.bitfield
.reg16
= 1;
1879 t0
.bitfield
.reg32
= 1;
1880 t0
.bitfield
.reg64
= 1;
1883 if (m1
.bitfield
.acc
)
1885 t1
.bitfield
.reg8
= 1;
1886 t1
.bitfield
.reg16
= 1;
1887 t1
.bitfield
.reg32
= 1;
1888 t1
.bitfield
.reg64
= 1;
1891 if (!(t0
.bitfield
.reg8
& t1
.bitfield
.reg8
)
1892 && !(t0
.bitfield
.reg16
& t1
.bitfield
.reg16
)
1893 && !(t0
.bitfield
.reg32
& t1
.bitfield
.reg32
)
1894 && !(t0
.bitfield
.reg64
& t1
.bitfield
.reg64
))
1897 i
.error
= register_type_mismatch
;
1902 static INLINE
unsigned int
1903 register_number (const reg_entry
*r
)
1905 unsigned int nr
= r
->reg_num
;
1907 if (r
->reg_flags
& RegRex
)
1910 if (r
->reg_flags
& RegVRex
)
1916 static INLINE
unsigned int
1917 mode_from_disp_size (i386_operand_type t
)
1919 if (t
.bitfield
.disp8
|| t
.bitfield
.vec_disp8
)
1921 else if (t
.bitfield
.disp16
1922 || t
.bitfield
.disp32
1923 || t
.bitfield
.disp32s
)
1930 fits_in_signed_byte (addressT num
)
1932 return num
+ 0x80 <= 0xff;
1936 fits_in_unsigned_byte (addressT num
)
1942 fits_in_unsigned_word (addressT num
)
1944 return num
<= 0xffff;
1948 fits_in_signed_word (addressT num
)
1950 return num
+ 0x8000 <= 0xffff;
1954 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
1959 return num
+ 0x80000000 <= 0xffffffff;
1961 } /* fits_in_signed_long() */
1964 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
1969 return num
<= 0xffffffff;
1971 } /* fits_in_unsigned_long() */
1974 fits_in_vec_disp8 (offsetT num
)
1976 int shift
= i
.memshift
;
1982 mask
= (1 << shift
) - 1;
1984 /* Return 0 if NUM isn't properly aligned. */
1988 /* Check if NUM will fit in 8bit after shift. */
1989 return fits_in_signed_byte (num
>> shift
);
1993 fits_in_imm4 (offsetT num
)
1995 return (num
& 0xf) == num
;
1998 static i386_operand_type
1999 smallest_imm_type (offsetT num
)
2001 i386_operand_type t
;
2003 operand_type_set (&t
, 0);
2004 t
.bitfield
.imm64
= 1;
2006 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2008 /* This code is disabled on the 486 because all the Imm1 forms
2009 in the opcode table are slower on the i486. They're the
2010 versions with the implicitly specified single-position
2011 displacement, which has another syntax if you really want to
2013 t
.bitfield
.imm1
= 1;
2014 t
.bitfield
.imm8
= 1;
2015 t
.bitfield
.imm8s
= 1;
2016 t
.bitfield
.imm16
= 1;
2017 t
.bitfield
.imm32
= 1;
2018 t
.bitfield
.imm32s
= 1;
2020 else if (fits_in_signed_byte (num
))
2022 t
.bitfield
.imm8
= 1;
2023 t
.bitfield
.imm8s
= 1;
2024 t
.bitfield
.imm16
= 1;
2025 t
.bitfield
.imm32
= 1;
2026 t
.bitfield
.imm32s
= 1;
2028 else if (fits_in_unsigned_byte (num
))
2030 t
.bitfield
.imm8
= 1;
2031 t
.bitfield
.imm16
= 1;
2032 t
.bitfield
.imm32
= 1;
2033 t
.bitfield
.imm32s
= 1;
2035 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2037 t
.bitfield
.imm16
= 1;
2038 t
.bitfield
.imm32
= 1;
2039 t
.bitfield
.imm32s
= 1;
2041 else if (fits_in_signed_long (num
))
2043 t
.bitfield
.imm32
= 1;
2044 t
.bitfield
.imm32s
= 1;
2046 else if (fits_in_unsigned_long (num
))
2047 t
.bitfield
.imm32
= 1;
2053 offset_in_range (offsetT val
, int size
)
2059 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2060 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2061 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2063 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2069 /* If BFD64, sign extend val for 32bit address mode. */
2070 if (flag_code
!= CODE_64BIT
2071 || i
.prefix
[ADDR_PREFIX
])
2072 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2073 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2076 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2078 char buf1
[40], buf2
[40];
2080 sprint_value (buf1
, val
);
2081 sprint_value (buf2
, val
& mask
);
2082 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2096 a. PREFIX_EXIST if attempting to add a prefix where one from the
2097 same class already exists.
2098 b. PREFIX_LOCK if lock prefix is added.
2099 c. PREFIX_REP if rep/repne prefix is added.
2100 d. PREFIX_OTHER if other prefix is added.
2103 static enum PREFIX_GROUP
2104 add_prefix (unsigned int prefix
)
2106 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2109 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2110 && flag_code
== CODE_64BIT
)
2112 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2113 || ((i
.prefix
[REX_PREFIX
] & (REX_R
| REX_X
| REX_B
))
2114 && (prefix
& (REX_R
| REX_X
| REX_B
))))
2125 case CS_PREFIX_OPCODE
:
2126 case DS_PREFIX_OPCODE
:
2127 case ES_PREFIX_OPCODE
:
2128 case FS_PREFIX_OPCODE
:
2129 case GS_PREFIX_OPCODE
:
2130 case SS_PREFIX_OPCODE
:
2134 case REPNE_PREFIX_OPCODE
:
2135 case REPE_PREFIX_OPCODE
:
2140 case LOCK_PREFIX_OPCODE
:
2149 case ADDR_PREFIX_OPCODE
:
2153 case DATA_PREFIX_OPCODE
:
2157 if (i
.prefix
[q
] != 0)
2165 i
.prefix
[q
] |= prefix
;
2168 as_bad (_("same type of prefix used twice"));
2174 update_code_flag (int value
, int check
)
2176 PRINTF_LIKE ((*as_error
));
2178 flag_code
= (enum flag_code
) value
;
2179 if (flag_code
== CODE_64BIT
)
2181 cpu_arch_flags
.bitfield
.cpu64
= 1;
2182 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2186 cpu_arch_flags
.bitfield
.cpu64
= 0;
2187 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2189 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2192 as_error
= as_fatal
;
2195 (*as_error
) (_("64bit mode not supported on `%s'."),
2196 cpu_arch_name
? cpu_arch_name
: default_arch
);
2198 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2201 as_error
= as_fatal
;
2204 (*as_error
) (_("32bit mode not supported on `%s'."),
2205 cpu_arch_name
? cpu_arch_name
: default_arch
);
2207 stackop_size
= '\0';
2211 set_code_flag (int value
)
2213 update_code_flag (value
, 0);
2217 set_16bit_gcc_code_flag (int new_code_flag
)
2219 flag_code
= (enum flag_code
) new_code_flag
;
2220 if (flag_code
!= CODE_16BIT
)
2222 cpu_arch_flags
.bitfield
.cpu64
= 0;
2223 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2224 stackop_size
= LONG_MNEM_SUFFIX
;
2228 set_intel_syntax (int syntax_flag
)
2230 /* Find out if register prefixing is specified. */
2231 int ask_naked_reg
= 0;
2234 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2237 int e
= get_symbol_name (&string
);
2239 if (strcmp (string
, "prefix") == 0)
2241 else if (strcmp (string
, "noprefix") == 0)
2244 as_bad (_("bad argument to syntax directive."));
2245 (void) restore_line_pointer (e
);
2247 demand_empty_rest_of_line ();
2249 intel_syntax
= syntax_flag
;
2251 if (ask_naked_reg
== 0)
2252 allow_naked_reg
= (intel_syntax
2253 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2255 allow_naked_reg
= (ask_naked_reg
< 0);
2257 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2259 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2260 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2261 register_prefix
= allow_naked_reg
? "" : "%";
2265 set_intel_mnemonic (int mnemonic_flag
)
2267 intel_mnemonic
= mnemonic_flag
;
2271 set_allow_index_reg (int flag
)
2273 allow_index_reg
= flag
;
2277 set_check (int what
)
2279 enum check_kind
*kind
;
2284 kind
= &operand_check
;
2295 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2298 int e
= get_symbol_name (&string
);
2300 if (strcmp (string
, "none") == 0)
2302 else if (strcmp (string
, "warning") == 0)
2303 *kind
= check_warning
;
2304 else if (strcmp (string
, "error") == 0)
2305 *kind
= check_error
;
2307 as_bad (_("bad argument to %s_check directive."), str
);
2308 (void) restore_line_pointer (e
);
2311 as_bad (_("missing argument for %s_check directive"), str
);
2313 demand_empty_rest_of_line ();
2317 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2318 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2320 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2321 static const char *arch
;
2323 /* Intel LIOM is only supported on ELF. */
2329 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2330 use default_arch. */
2331 arch
= cpu_arch_name
;
2333 arch
= default_arch
;
2336 /* If we are targeting Intel MCU, we must enable it. */
2337 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2338 || new_flag
.bitfield
.cpuiamcu
)
2341 /* If we are targeting Intel L1OM, we must enable it. */
2342 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2343 || new_flag
.bitfield
.cpul1om
)
2346 /* If we are targeting Intel K1OM, we must enable it. */
2347 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2348 || new_flag
.bitfield
.cpuk1om
)
2351 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2356 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2360 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2363 int e
= get_symbol_name (&string
);
2365 i386_cpu_flags flags
;
2367 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2369 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2371 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2375 cpu_arch_name
= cpu_arch
[j
].name
;
2376 cpu_sub_arch_name
= NULL
;
2377 cpu_arch_flags
= cpu_arch
[j
].flags
;
2378 if (flag_code
== CODE_64BIT
)
2380 cpu_arch_flags
.bitfield
.cpu64
= 1;
2381 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2385 cpu_arch_flags
.bitfield
.cpu64
= 0;
2386 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2388 cpu_arch_isa
= cpu_arch
[j
].type
;
2389 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2390 if (!cpu_arch_tune_set
)
2392 cpu_arch_tune
= cpu_arch_isa
;
2393 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2398 flags
= cpu_flags_or (cpu_arch_flags
,
2401 if (!valid_iamcu_cpu_flags (&flags
))
2402 as_fatal (_("`%s' isn't valid for Intel MCU"),
2404 else if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2406 if (cpu_sub_arch_name
)
2408 char *name
= cpu_sub_arch_name
;
2409 cpu_sub_arch_name
= concat (name
,
2411 (const char *) NULL
);
2415 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2416 cpu_arch_flags
= flags
;
2417 cpu_arch_isa_flags
= flags
;
2419 (void) restore_line_pointer (e
);
2420 demand_empty_rest_of_line ();
2425 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2427 /* Disable an ISA entension. */
2428 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2429 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2431 flags
= cpu_flags_and_not (cpu_arch_flags
,
2432 cpu_noarch
[j
].flags
);
2433 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2435 if (cpu_sub_arch_name
)
2437 char *name
= cpu_sub_arch_name
;
2438 cpu_sub_arch_name
= concat (name
, string
,
2439 (const char *) NULL
);
2443 cpu_sub_arch_name
= xstrdup (string
);
2444 cpu_arch_flags
= flags
;
2445 cpu_arch_isa_flags
= flags
;
2447 (void) restore_line_pointer (e
);
2448 demand_empty_rest_of_line ();
2452 j
= ARRAY_SIZE (cpu_arch
);
2455 if (j
>= ARRAY_SIZE (cpu_arch
))
2456 as_bad (_("no such architecture: `%s'"), string
);
2458 *input_line_pointer
= e
;
2461 as_bad (_("missing cpu architecture"));
2463 no_cond_jump_promotion
= 0;
2464 if (*input_line_pointer
== ','
2465 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2470 ++input_line_pointer
;
2471 e
= get_symbol_name (&string
);
2473 if (strcmp (string
, "nojumps") == 0)
2474 no_cond_jump_promotion
= 1;
2475 else if (strcmp (string
, "jumps") == 0)
2478 as_bad (_("no such architecture modifier: `%s'"), string
);
2480 (void) restore_line_pointer (e
);
2483 demand_empty_rest_of_line ();
2486 enum bfd_architecture
2489 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2491 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2492 || flag_code
!= CODE_64BIT
)
2493 as_fatal (_("Intel L1OM is 64bit ELF only"));
2494 return bfd_arch_l1om
;
2496 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2498 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2499 || flag_code
!= CODE_64BIT
)
2500 as_fatal (_("Intel K1OM is 64bit ELF only"));
2501 return bfd_arch_k1om
;
2503 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2505 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2506 || flag_code
== CODE_64BIT
)
2507 as_fatal (_("Intel MCU is 32bit ELF only"));
2508 return bfd_arch_iamcu
;
2511 return bfd_arch_i386
;
2517 if (!strncmp (default_arch
, "x86_64", 6))
2519 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2521 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2522 || default_arch
[6] != '\0')
2523 as_fatal (_("Intel L1OM is 64bit ELF only"));
2524 return bfd_mach_l1om
;
2526 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2528 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2529 || default_arch
[6] != '\0')
2530 as_fatal (_("Intel K1OM is 64bit ELF only"));
2531 return bfd_mach_k1om
;
2533 else if (default_arch
[6] == '\0')
2534 return bfd_mach_x86_64
;
2536 return bfd_mach_x64_32
;
2538 else if (!strcmp (default_arch
, "i386")
2539 || !strcmp (default_arch
, "iamcu"))
2541 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2543 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
2544 as_fatal (_("Intel MCU is 32bit ELF only"));
2545 return bfd_mach_i386_iamcu
;
2548 return bfd_mach_i386_i386
;
2551 as_fatal (_("unknown architecture"));
2557 const char *hash_err
;
2559 /* Initialize op_hash hash table. */
2560 op_hash
= hash_new ();
2563 const insn_template
*optab
;
2564 templates
*core_optab
;
2566 /* Setup for loop. */
2568 core_optab
= XNEW (templates
);
2569 core_optab
->start
= optab
;
2574 if (optab
->name
== NULL
2575 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
2577 /* different name --> ship out current template list;
2578 add to hash table; & begin anew. */
2579 core_optab
->end
= optab
;
2580 hash_err
= hash_insert (op_hash
,
2582 (void *) core_optab
);
2585 as_fatal (_("can't hash %s: %s"),
2589 if (optab
->name
== NULL
)
2591 core_optab
= XNEW (templates
);
2592 core_optab
->start
= optab
;
2597 /* Initialize reg_hash hash table. */
2598 reg_hash
= hash_new ();
2600 const reg_entry
*regtab
;
2601 unsigned int regtab_size
= i386_regtab_size
;
2603 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
2605 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
2607 as_fatal (_("can't hash %s: %s"),
2613 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2618 for (c
= 0; c
< 256; c
++)
2623 mnemonic_chars
[c
] = c
;
2624 register_chars
[c
] = c
;
2625 operand_chars
[c
] = c
;
2627 else if (ISLOWER (c
))
2629 mnemonic_chars
[c
] = c
;
2630 register_chars
[c
] = c
;
2631 operand_chars
[c
] = c
;
2633 else if (ISUPPER (c
))
2635 mnemonic_chars
[c
] = TOLOWER (c
);
2636 register_chars
[c
] = mnemonic_chars
[c
];
2637 operand_chars
[c
] = c
;
2639 else if (c
== '{' || c
== '}')
2640 operand_chars
[c
] = c
;
2642 if (ISALPHA (c
) || ISDIGIT (c
))
2643 identifier_chars
[c
] = c
;
2646 identifier_chars
[c
] = c
;
2647 operand_chars
[c
] = c
;
2652 identifier_chars
['@'] = '@';
2655 identifier_chars
['?'] = '?';
2656 operand_chars
['?'] = '?';
2658 digit_chars
['-'] = '-';
2659 mnemonic_chars
['_'] = '_';
2660 mnemonic_chars
['-'] = '-';
2661 mnemonic_chars
['.'] = '.';
2662 identifier_chars
['_'] = '_';
2663 identifier_chars
['.'] = '.';
2665 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
2666 operand_chars
[(unsigned char) *p
] = *p
;
2669 if (flag_code
== CODE_64BIT
)
2671 #if defined (OBJ_COFF) && defined (TE_PE)
2672 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
2675 x86_dwarf2_return_column
= 16;
2677 x86_cie_data_alignment
= -8;
2681 x86_dwarf2_return_column
= 8;
2682 x86_cie_data_alignment
= -4;
2687 i386_print_statistics (FILE *file
)
2689 hash_print_statistics (file
, "i386 opcode", op_hash
);
2690 hash_print_statistics (file
, "i386 register", reg_hash
);
2695 /* Debugging routines for md_assemble. */
2696 static void pte (insn_template
*);
2697 static void pt (i386_operand_type
);
2698 static void pe (expressionS
*);
2699 static void ps (symbolS
*);
2702 pi (char *line
, i386_insn
*x
)
2706 fprintf (stdout
, "%s: template ", line
);
2708 fprintf (stdout
, " address: base %s index %s scale %x\n",
2709 x
->base_reg
? x
->base_reg
->reg_name
: "none",
2710 x
->index_reg
? x
->index_reg
->reg_name
: "none",
2711 x
->log2_scale_factor
);
2712 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
2713 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
2714 fprintf (stdout
, " sib: base %x index %x scale %x\n",
2715 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
2716 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
2717 (x
->rex
& REX_W
) != 0,
2718 (x
->rex
& REX_R
) != 0,
2719 (x
->rex
& REX_X
) != 0,
2720 (x
->rex
& REX_B
) != 0);
2721 for (j
= 0; j
< x
->operands
; j
++)
2723 fprintf (stdout
, " #%d: ", j
+ 1);
2725 fprintf (stdout
, "\n");
2726 if (x
->types
[j
].bitfield
.reg8
2727 || x
->types
[j
].bitfield
.reg16
2728 || x
->types
[j
].bitfield
.reg32
2729 || x
->types
[j
].bitfield
.reg64
2730 || x
->types
[j
].bitfield
.regmmx
2731 || x
->types
[j
].bitfield
.regxmm
2732 || x
->types
[j
].bitfield
.regymm
2733 || x
->types
[j
].bitfield
.regzmm
2734 || x
->types
[j
].bitfield
.sreg2
2735 || x
->types
[j
].bitfield
.sreg3
2736 || x
->types
[j
].bitfield
.control
2737 || x
->types
[j
].bitfield
.debug
2738 || x
->types
[j
].bitfield
.test
)
2739 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
2740 if (operand_type_check (x
->types
[j
], imm
))
2742 if (operand_type_check (x
->types
[j
], disp
))
2743 pe (x
->op
[j
].disps
);
2748 pte (insn_template
*t
)
2751 fprintf (stdout
, " %d operands ", t
->operands
);
2752 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
2753 if (t
->extension_opcode
!= None
)
2754 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
2755 if (t
->opcode_modifier
.d
)
2756 fprintf (stdout
, "D");
2757 if (t
->opcode_modifier
.w
)
2758 fprintf (stdout
, "W");
2759 fprintf (stdout
, "\n");
2760 for (j
= 0; j
< t
->operands
; j
++)
2762 fprintf (stdout
, " #%d type ", j
+ 1);
2763 pt (t
->operand_types
[j
]);
2764 fprintf (stdout
, "\n");
2771 fprintf (stdout
, " operation %d\n", e
->X_op
);
2772 fprintf (stdout
, " add_number %ld (%lx)\n",
2773 (long) e
->X_add_number
, (long) e
->X_add_number
);
2774 if (e
->X_add_symbol
)
2776 fprintf (stdout
, " add_symbol ");
2777 ps (e
->X_add_symbol
);
2778 fprintf (stdout
, "\n");
2782 fprintf (stdout
, " op_symbol ");
2783 ps (e
->X_op_symbol
);
2784 fprintf (stdout
, "\n");
2791 fprintf (stdout
, "%s type %s%s",
2793 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
2794 segment_name (S_GET_SEGMENT (s
)));
2797 static struct type_name
2799 i386_operand_type mask
;
2802 const type_names
[] =
2804 { OPERAND_TYPE_REG8
, "r8" },
2805 { OPERAND_TYPE_REG16
, "r16" },
2806 { OPERAND_TYPE_REG32
, "r32" },
2807 { OPERAND_TYPE_REG64
, "r64" },
2808 { OPERAND_TYPE_IMM8
, "i8" },
2809 { OPERAND_TYPE_IMM8
, "i8s" },
2810 { OPERAND_TYPE_IMM16
, "i16" },
2811 { OPERAND_TYPE_IMM32
, "i32" },
2812 { OPERAND_TYPE_IMM32S
, "i32s" },
2813 { OPERAND_TYPE_IMM64
, "i64" },
2814 { OPERAND_TYPE_IMM1
, "i1" },
2815 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
2816 { OPERAND_TYPE_DISP8
, "d8" },
2817 { OPERAND_TYPE_DISP16
, "d16" },
2818 { OPERAND_TYPE_DISP32
, "d32" },
2819 { OPERAND_TYPE_DISP32S
, "d32s" },
2820 { OPERAND_TYPE_DISP64
, "d64" },
2821 { OPERAND_TYPE_VEC_DISP8
, "Vector d8" },
2822 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
2823 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
2824 { OPERAND_TYPE_CONTROL
, "control reg" },
2825 { OPERAND_TYPE_TEST
, "test reg" },
2826 { OPERAND_TYPE_DEBUG
, "debug reg" },
2827 { OPERAND_TYPE_FLOATREG
, "FReg" },
2828 { OPERAND_TYPE_FLOATACC
, "FAcc" },
2829 { OPERAND_TYPE_SREG2
, "SReg2" },
2830 { OPERAND_TYPE_SREG3
, "SReg3" },
2831 { OPERAND_TYPE_ACC
, "Acc" },
2832 { OPERAND_TYPE_JUMPABSOLUTE
, "Jump Absolute" },
2833 { OPERAND_TYPE_REGMMX
, "rMMX" },
2834 { OPERAND_TYPE_REGXMM
, "rXMM" },
2835 { OPERAND_TYPE_REGYMM
, "rYMM" },
2836 { OPERAND_TYPE_REGZMM
, "rZMM" },
2837 { OPERAND_TYPE_REGMASK
, "Mask reg" },
2838 { OPERAND_TYPE_ESSEG
, "es" },
2842 pt (i386_operand_type t
)
2845 i386_operand_type a
;
2847 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
2849 a
= operand_type_and (t
, type_names
[j
].mask
);
2850 if (!operand_type_all_zero (&a
))
2851 fprintf (stdout
, "%s, ", type_names
[j
].name
);
2856 #endif /* DEBUG386 */
2858 static bfd_reloc_code_real_type
2859 reloc (unsigned int size
,
2862 bfd_reloc_code_real_type other
)
2864 if (other
!= NO_RELOC
)
2866 reloc_howto_type
*rel
;
2871 case BFD_RELOC_X86_64_GOT32
:
2872 return BFD_RELOC_X86_64_GOT64
;
2874 case BFD_RELOC_X86_64_GOTPLT64
:
2875 return BFD_RELOC_X86_64_GOTPLT64
;
2877 case BFD_RELOC_X86_64_PLTOFF64
:
2878 return BFD_RELOC_X86_64_PLTOFF64
;
2880 case BFD_RELOC_X86_64_GOTPC32
:
2881 other
= BFD_RELOC_X86_64_GOTPC64
;
2883 case BFD_RELOC_X86_64_GOTPCREL
:
2884 other
= BFD_RELOC_X86_64_GOTPCREL64
;
2886 case BFD_RELOC_X86_64_TPOFF32
:
2887 other
= BFD_RELOC_X86_64_TPOFF64
;
2889 case BFD_RELOC_X86_64_DTPOFF32
:
2890 other
= BFD_RELOC_X86_64_DTPOFF64
;
2896 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2897 if (other
== BFD_RELOC_SIZE32
)
2900 other
= BFD_RELOC_SIZE64
;
2903 as_bad (_("there are no pc-relative size relocations"));
2909 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2910 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
2913 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
2915 as_bad (_("unknown relocation (%u)"), other
);
2916 else if (size
!= bfd_get_reloc_size (rel
))
2917 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2918 bfd_get_reloc_size (rel
),
2920 else if (pcrel
&& !rel
->pc_relative
)
2921 as_bad (_("non-pc-relative relocation for pc-relative field"));
2922 else if ((rel
->complain_on_overflow
== complain_overflow_signed
2924 || (rel
->complain_on_overflow
== complain_overflow_unsigned
2926 as_bad (_("relocated field and relocation type differ in signedness"));
2935 as_bad (_("there are no unsigned pc-relative relocations"));
2938 case 1: return BFD_RELOC_8_PCREL
;
2939 case 2: return BFD_RELOC_16_PCREL
;
2940 case 4: return BFD_RELOC_32_PCREL
;
2941 case 8: return BFD_RELOC_64_PCREL
;
2943 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
2950 case 4: return BFD_RELOC_X86_64_32S
;
2955 case 1: return BFD_RELOC_8
;
2956 case 2: return BFD_RELOC_16
;
2957 case 4: return BFD_RELOC_32
;
2958 case 8: return BFD_RELOC_64
;
2960 as_bad (_("cannot do %s %u byte relocation"),
2961 sign
> 0 ? "signed" : "unsigned", size
);
2967 /* Here we decide which fixups can be adjusted to make them relative to
2968 the beginning of the section instead of the symbol. Basically we need
2969 to make sure that the dynamic relocations are done correctly, so in
2970 some cases we force the original symbol to be used. */
2973 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
2975 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2979 /* Don't adjust pc-relative references to merge sections in 64-bit
2981 if (use_rela_relocations
2982 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
2986 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2987 and changed later by validate_fix. */
2988 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
2989 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
2992 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2993 for size relocations. */
2994 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
2995 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
2996 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
2997 || fixP
->fx_r_type
== BFD_RELOC_386_PLT32
2998 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
2999 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3000 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3001 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3002 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3003 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3004 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3005 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3006 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3007 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3008 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3009 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3010 || fixP
->fx_r_type
== BFD_RELOC_X86_64_PLT32
3011 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3012 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3013 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3014 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3015 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3016 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3017 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3018 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3019 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3020 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3021 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3022 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3023 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3024 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3025 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3026 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3033 intel_float_operand (const char *mnemonic
)
3035 /* Note that the value returned is meaningful only for opcodes with (memory)
3036 operands, hence the code here is free to improperly handle opcodes that
3037 have no operands (for better performance and smaller code). */
3039 if (mnemonic
[0] != 'f')
3040 return 0; /* non-math */
3042 switch (mnemonic
[1])
3044 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3045 the fs segment override prefix not currently handled because no
3046 call path can make opcodes without operands get here */
3048 return 2 /* integer op */;
3050 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3051 return 3; /* fldcw/fldenv */
3054 if (mnemonic
[2] != 'o' /* fnop */)
3055 return 3; /* non-waiting control op */
3058 if (mnemonic
[2] == 's')
3059 return 3; /* frstor/frstpm */
3062 if (mnemonic
[2] == 'a')
3063 return 3; /* fsave */
3064 if (mnemonic
[2] == 't')
3066 switch (mnemonic
[3])
3068 case 'c': /* fstcw */
3069 case 'd': /* fstdw */
3070 case 'e': /* fstenv */
3071 case 's': /* fsts[gw] */
3077 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3078 return 0; /* fxsave/fxrstor are not really math ops */
3085 /* Build the VEX prefix. */
3088 build_vex_prefix (const insn_template
*t
)
3090 unsigned int register_specifier
;
3091 unsigned int implied_prefix
;
3092 unsigned int vector_length
;
3094 /* Check register specifier. */
3095 if (i
.vex
.register_specifier
)
3097 register_specifier
=
3098 ~register_number (i
.vex
.register_specifier
) & 0xf;
3099 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3102 register_specifier
= 0xf;
3104 /* Use 2-byte VEX prefix by swappping destination and source
3107 && i
.operands
== i
.reg_operands
3108 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3109 && i
.tm
.opcode_modifier
.s
3112 unsigned int xchg
= i
.operands
- 1;
3113 union i386_op temp_op
;
3114 i386_operand_type temp_type
;
3116 temp_type
= i
.types
[xchg
];
3117 i
.types
[xchg
] = i
.types
[0];
3118 i
.types
[0] = temp_type
;
3119 temp_op
= i
.op
[xchg
];
3120 i
.op
[xchg
] = i
.op
[0];
3123 gas_assert (i
.rm
.mode
== 3);
3127 i
.rm
.regmem
= i
.rm
.reg
;
3130 /* Use the next insn. */
3134 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3135 vector_length
= avxscalar
;
3137 vector_length
= i
.tm
.opcode_modifier
.vex
== VEX256
? 1 : 0;
3139 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3144 case DATA_PREFIX_OPCODE
:
3147 case REPE_PREFIX_OPCODE
:
3150 case REPNE_PREFIX_OPCODE
:
3157 /* Use 2-byte VEX prefix if possible. */
3158 if (i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3159 && i
.tm
.opcode_modifier
.vexw
!= VEXW1
3160 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3162 /* 2-byte VEX prefix. */
3166 i
.vex
.bytes
[0] = 0xc5;
3168 /* Check the REX.R bit. */
3169 r
= (i
.rex
& REX_R
) ? 0 : 1;
3170 i
.vex
.bytes
[1] = (r
<< 7
3171 | register_specifier
<< 3
3172 | vector_length
<< 2
3177 /* 3-byte VEX prefix. */
3182 switch (i
.tm
.opcode_modifier
.vexopcode
)
3186 i
.vex
.bytes
[0] = 0xc4;
3190 i
.vex
.bytes
[0] = 0xc4;
3194 i
.vex
.bytes
[0] = 0xc4;
3198 i
.vex
.bytes
[0] = 0x8f;
3202 i
.vex
.bytes
[0] = 0x8f;
3206 i
.vex
.bytes
[0] = 0x8f;
3212 /* The high 3 bits of the second VEX byte are 1's compliment
3213 of RXB bits from REX. */
3214 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3216 /* Check the REX.W bit. */
3217 w
= (i
.rex
& REX_W
) ? 1 : 0;
3218 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
3221 i
.vex
.bytes
[2] = (w
<< 7
3222 | register_specifier
<< 3
3223 | vector_length
<< 2
3228 /* Build the EVEX prefix. */
3231 build_evex_prefix (void)
3233 unsigned int register_specifier
;
3234 unsigned int implied_prefix
;
3236 rex_byte vrex_used
= 0;
3238 /* Check register specifier. */
3239 if (i
.vex
.register_specifier
)
3241 gas_assert ((i
.vrex
& REX_X
) == 0);
3243 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3244 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3245 register_specifier
+= 8;
3246 /* The upper 16 registers are encoded in the fourth byte of the
3248 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3249 i
.vex
.bytes
[3] = 0x8;
3250 register_specifier
= ~register_specifier
& 0xf;
3254 register_specifier
= 0xf;
3256 /* Encode upper 16 vector index register in the fourth byte of
3258 if (!(i
.vrex
& REX_X
))
3259 i
.vex
.bytes
[3] = 0x8;
3264 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3269 case DATA_PREFIX_OPCODE
:
3272 case REPE_PREFIX_OPCODE
:
3275 case REPNE_PREFIX_OPCODE
:
3282 /* 4 byte EVEX prefix. */
3284 i
.vex
.bytes
[0] = 0x62;
3287 switch (i
.tm
.opcode_modifier
.vexopcode
)
3303 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3305 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3307 /* The fifth bit of the second EVEX byte is 1's compliment of the
3308 REX_R bit in VREX. */
3309 if (!(i
.vrex
& REX_R
))
3310 i
.vex
.bytes
[1] |= 0x10;
3314 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3316 /* When all operands are registers, the REX_X bit in REX is not
3317 used. We reuse it to encode the upper 16 registers, which is
3318 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3319 as 1's compliment. */
3320 if ((i
.vrex
& REX_B
))
3323 i
.vex
.bytes
[1] &= ~0x40;
3327 /* EVEX instructions shouldn't need the REX prefix. */
3328 i
.vrex
&= ~vrex_used
;
3329 gas_assert (i
.vrex
== 0);
3331 /* Check the REX.W bit. */
3332 w
= (i
.rex
& REX_W
) ? 1 : 0;
3333 if (i
.tm
.opcode_modifier
.vexw
)
3335 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
3338 /* If w is not set it means we are dealing with WIG instruction. */
3341 if (evexwig
== evexw1
)
3345 /* Encode the U bit. */
3346 implied_prefix
|= 0x4;
3348 /* The third byte of the EVEX prefix. */
3349 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3351 /* The fourth byte of the EVEX prefix. */
3352 /* The zeroing-masking bit. */
3353 if (i
.mask
&& i
.mask
->zeroing
)
3354 i
.vex
.bytes
[3] |= 0x80;
3356 /* Don't always set the broadcast bit if there is no RC. */
3359 /* Encode the vector length. */
3360 unsigned int vec_length
;
3362 switch (i
.tm
.opcode_modifier
.evex
)
3364 case EVEXLIG
: /* LL' is ignored */
3365 vec_length
= evexlig
<< 5;
3368 vec_length
= 0 << 5;
3371 vec_length
= 1 << 5;
3374 vec_length
= 2 << 5;
3380 i
.vex
.bytes
[3] |= vec_length
;
3381 /* Encode the broadcast bit. */
3383 i
.vex
.bytes
[3] |= 0x10;
3387 if (i
.rounding
->type
!= saeonly
)
3388 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3390 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3393 if (i
.mask
&& i
.mask
->mask
)
3394 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3398 process_immext (void)
3402 if ((i
.tm
.cpu_flags
.bitfield
.cpusse3
|| i
.tm
.cpu_flags
.bitfield
.cpusvme
)
3405 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3406 with an opcode suffix which is coded in the same place as an
3407 8-bit immediate field would be.
3408 Here we check those operands and remove them afterwards. */
3411 for (x
= 0; x
< i
.operands
; x
++)
3412 if (register_number (i
.op
[x
].regs
) != x
)
3413 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3414 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+ 1,
3420 if (i
.tm
.cpu_flags
.bitfield
.cpumwaitx
&& i
.operands
> 0)
3422 /* MONITORX/MWAITX instructions have fixed operands with an opcode
3423 suffix which is coded in the same place as an 8-bit immediate
3425 Here we check those operands and remove them afterwards. */
3428 if (i
.operands
!= 3)
3431 for (x
= 0; x
< 2; x
++)
3432 if (register_number (i
.op
[x
].regs
) != x
)
3433 goto bad_register_operand
;
3435 /* Check for third operand for mwaitx/monitorx insn. */
3436 if (register_number (i
.op
[x
].regs
)
3437 != (x
+ (i
.tm
.extension_opcode
== 0xfb)))
3439 bad_register_operand
:
3440 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3441 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+1,
3448 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3449 which is coded in the same place as an 8-bit immediate field
3450 would be. Here we fake an 8-bit immediate operand from the
3451 opcode suffix stored in tm.extension_opcode.
3453 AVX instructions also use this encoding, for some of
3454 3 argument instructions. */
3456 gas_assert (i
.imm_operands
<= 1
3458 || ((i
.tm
.opcode_modifier
.vex
3459 || i
.tm
.opcode_modifier
.evex
)
3460 && i
.operands
<= 4)));
3462 exp
= &im_expressions
[i
.imm_operands
++];
3463 i
.op
[i
.operands
].imms
= exp
;
3464 i
.types
[i
.operands
] = imm8
;
3466 exp
->X_op
= O_constant
;
3467 exp
->X_add_number
= i
.tm
.extension_opcode
;
3468 i
.tm
.extension_opcode
= None
;
3475 switch (i
.tm
.opcode_modifier
.hleprefixok
)
3480 as_bad (_("invalid instruction `%s' after `%s'"),
3481 i
.tm
.name
, i
.hle_prefix
);
3484 if (i
.prefix
[LOCK_PREFIX
])
3486 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
3490 case HLEPrefixRelease
:
3491 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
3493 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3497 if (i
.mem_operands
== 0
3498 || !operand_type_check (i
.types
[i
.operands
- 1], anymem
))
3500 as_bad (_("memory destination needed for instruction `%s'"
3501 " after `xrelease'"), i
.tm
.name
);
3508 /* This is the guts of the machine-dependent assembler. LINE points to a
3509 machine dependent instruction. This function is supposed to emit
3510 the frags/bytes it assembles to. */
3513 md_assemble (char *line
)
3516 char mnemonic
[MAX_MNEM_SIZE
];
3517 const insn_template
*t
;
3519 /* Initialize globals. */
3520 memset (&i
, '\0', sizeof (i
));
3521 for (j
= 0; j
< MAX_OPERANDS
; j
++)
3522 i
.reloc
[j
] = NO_RELOC
;
3523 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
3524 memset (im_expressions
, '\0', sizeof (im_expressions
));
3525 save_stack_p
= save_stack
;
3527 /* First parse an instruction mnemonic & call i386_operand for the operands.
3528 We assume that the scrubber has arranged it so that line[0] is the valid
3529 start of a (possibly prefixed) mnemonic. */
3531 line
= parse_insn (line
, mnemonic
);
3535 line
= parse_operands (line
, mnemonic
);
3540 /* Now we've parsed the mnemonic into a set of templates, and have the
3541 operands at hand. */
3543 /* All intel opcodes have reversed operands except for "bound" and
3544 "enter". We also don't reverse intersegment "jmp" and "call"
3545 instructions with 2 immediate operands so that the immediate segment
3546 precedes the offset, as it does when in AT&T mode. */
3549 && (strcmp (mnemonic
, "bound") != 0)
3550 && (strcmp (mnemonic
, "invlpga") != 0)
3551 && !(operand_type_check (i
.types
[0], imm
)
3552 && operand_type_check (i
.types
[1], imm
)))
3555 /* The order of the immediates should be reversed
3556 for 2 immediates extrq and insertq instructions */
3557 if (i
.imm_operands
== 2
3558 && (strcmp (mnemonic
, "extrq") == 0
3559 || strcmp (mnemonic
, "insertq") == 0))
3560 swap_2_operands (0, 1);
3565 /* Don't optimize displacement for movabs since it only takes 64bit
3568 && i
.disp_encoding
!= disp_encoding_32bit
3569 && (flag_code
!= CODE_64BIT
3570 || strcmp (mnemonic
, "movabs") != 0))
3573 /* Next, we find a template that matches the given insn,
3574 making sure the overlap of the given operands types is consistent
3575 with the template operand types. */
3577 if (!(t
= match_template ()))
3580 if (sse_check
!= check_none
3581 && !i
.tm
.opcode_modifier
.noavx
3582 && (i
.tm
.cpu_flags
.bitfield
.cpusse
3583 || i
.tm
.cpu_flags
.bitfield
.cpusse2
3584 || i
.tm
.cpu_flags
.bitfield
.cpusse3
3585 || i
.tm
.cpu_flags
.bitfield
.cpussse3
3586 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
3587 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
))
3589 (sse_check
== check_warning
3591 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
3594 /* Zap movzx and movsx suffix. The suffix has been set from
3595 "word ptr" or "byte ptr" on the source operand in Intel syntax
3596 or extracted from mnemonic in AT&T syntax. But we'll use
3597 the destination register to choose the suffix for encoding. */
3598 if ((i
.tm
.base_opcode
& ~9) == 0x0fb6)
3600 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3601 there is no suffix, the default will be byte extension. */
3602 if (i
.reg_operands
!= 2
3605 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
3610 if (i
.tm
.opcode_modifier
.fwait
)
3611 if (!add_prefix (FWAIT_OPCODE
))
3614 /* Check if REP prefix is OK. */
3615 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
3617 as_bad (_("invalid instruction `%s' after `%s'"),
3618 i
.tm
.name
, i
.rep_prefix
);
3622 /* Check for lock without a lockable instruction. Destination operand
3623 must be memory unless it is xchg (0x86). */
3624 if (i
.prefix
[LOCK_PREFIX
]
3625 && (!i
.tm
.opcode_modifier
.islockable
3626 || i
.mem_operands
== 0
3627 || (i
.tm
.base_opcode
!= 0x86
3628 && !operand_type_check (i
.types
[i
.operands
- 1], anymem
))))
3630 as_bad (_("expecting lockable instruction after `lock'"));
3634 /* Check if HLE prefix is OK. */
3635 if (i
.hle_prefix
&& !check_hle ())
3638 /* Check BND prefix. */
3639 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
3640 as_bad (_("expecting valid branch instruction after `bnd'"));
3642 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
3643 && flag_code
== CODE_64BIT
3644 && i
.prefix
[ADDR_PREFIX
])
3645 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3647 /* Insert BND prefix. */
3649 && i
.tm
.opcode_modifier
.bndprefixok
3650 && !i
.prefix
[BND_PREFIX
])
3651 add_prefix (BND_PREFIX_OPCODE
);
3653 /* Check string instruction segment overrides. */
3654 if (i
.tm
.opcode_modifier
.isstring
&& i
.mem_operands
!= 0)
3656 if (!check_string ())
3658 i
.disp_operands
= 0;
3661 if (!process_suffix ())
3664 /* Update operand types. */
3665 for (j
= 0; j
< i
.operands
; j
++)
3666 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
3668 /* Make still unresolved immediate matches conform to size of immediate
3669 given in i.suffix. */
3670 if (!finalize_imm ())
3673 if (i
.types
[0].bitfield
.imm1
)
3674 i
.imm_operands
= 0; /* kludge for shift insns. */
3676 /* We only need to check those implicit registers for instructions
3677 with 3 operands or less. */
3678 if (i
.operands
<= 3)
3679 for (j
= 0; j
< i
.operands
; j
++)
3680 if (i
.types
[j
].bitfield
.inoutportreg
3681 || i
.types
[j
].bitfield
.shiftcount
3682 || i
.types
[j
].bitfield
.acc
3683 || i
.types
[j
].bitfield
.floatacc
)
3686 /* ImmExt should be processed after SSE2AVX. */
3687 if (!i
.tm
.opcode_modifier
.sse2avx
3688 && i
.tm
.opcode_modifier
.immext
)
3691 /* For insns with operands there are more diddles to do to the opcode. */
3694 if (!process_operands ())
3697 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
3699 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3700 as_warn (_("translating to `%sp'"), i
.tm
.name
);
3703 if (i
.tm
.opcode_modifier
.vex
|| i
.tm
.opcode_modifier
.evex
)
3705 if (flag_code
== CODE_16BIT
)
3707 as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
3712 if (i
.tm
.opcode_modifier
.vex
)
3713 build_vex_prefix (t
);
3715 build_evex_prefix ();
3718 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3719 instructions may define INT_OPCODE as well, so avoid this corner
3720 case for those instructions that use MODRM. */
3721 if (i
.tm
.base_opcode
== INT_OPCODE
3722 && !i
.tm
.opcode_modifier
.modrm
3723 && i
.op
[0].imms
->X_add_number
== 3)
3725 i
.tm
.base_opcode
= INT3_OPCODE
;
3729 if ((i
.tm
.opcode_modifier
.jump
3730 || i
.tm
.opcode_modifier
.jumpbyte
3731 || i
.tm
.opcode_modifier
.jumpdword
)
3732 && i
.op
[0].disps
->X_op
== O_constant
)
3734 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3735 the absolute address given by the constant. Since ix86 jumps and
3736 calls are pc relative, we need to generate a reloc. */
3737 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
3738 i
.op
[0].disps
->X_op
= O_symbol
;
3741 if (i
.tm
.opcode_modifier
.rex64
)
3744 /* For 8 bit registers we need an empty rex prefix. Also if the
3745 instruction already has a prefix, we need to convert old
3746 registers to new ones. */
3748 if ((i
.types
[0].bitfield
.reg8
3749 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
3750 || (i
.types
[1].bitfield
.reg8
3751 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
3752 || ((i
.types
[0].bitfield
.reg8
3753 || i
.types
[1].bitfield
.reg8
)
3758 i
.rex
|= REX_OPCODE
;
3759 for (x
= 0; x
< 2; x
++)
3761 /* Look for 8 bit operand that uses old registers. */
3762 if (i
.types
[x
].bitfield
.reg8
3763 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
3765 /* In case it is "hi" register, give up. */
3766 if (i
.op
[x
].regs
->reg_num
> 3)
3767 as_bad (_("can't encode register '%s%s' in an "
3768 "instruction requiring REX prefix."),
3769 register_prefix
, i
.op
[x
].regs
->reg_name
);
3771 /* Otherwise it is equivalent to the extended register.
3772 Since the encoding doesn't change this is merely
3773 cosmetic cleanup for debug output. */
3775 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
3781 add_prefix (REX_OPCODE
| i
.rex
);
3783 /* We are ready to output the insn. */
3788 parse_insn (char *line
, char *mnemonic
)
3791 char *token_start
= l
;
3794 const insn_template
*t
;
3800 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
3805 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
3807 as_bad (_("no such instruction: `%s'"), token_start
);
3812 if (!is_space_char (*l
)
3813 && *l
!= END_OF_INSN
3815 || (*l
!= PREFIX_SEPARATOR
3818 as_bad (_("invalid character %s in mnemonic"),
3819 output_invalid (*l
));
3822 if (token_start
== l
)
3824 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
3825 as_bad (_("expecting prefix; got nothing"));
3827 as_bad (_("expecting mnemonic; got nothing"));
3831 /* Look up instruction (or prefix) via hash table. */
3832 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
3834 if (*l
!= END_OF_INSN
3835 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
3836 && current_templates
3837 && current_templates
->start
->opcode_modifier
.isprefix
)
3839 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
3841 as_bad ((flag_code
!= CODE_64BIT
3842 ? _("`%s' is only supported in 64-bit mode")
3843 : _("`%s' is not supported in 64-bit mode")),
3844 current_templates
->start
->name
);
3847 /* If we are in 16-bit mode, do not allow addr16 or data16.
3848 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3849 if ((current_templates
->start
->opcode_modifier
.size16
3850 || current_templates
->start
->opcode_modifier
.size32
)
3851 && flag_code
!= CODE_64BIT
3852 && (current_templates
->start
->opcode_modifier
.size32
3853 ^ (flag_code
== CODE_16BIT
)))
3855 as_bad (_("redundant %s prefix"),
3856 current_templates
->start
->name
);
3859 /* Add prefix, checking for repeated prefixes. */
3860 switch (add_prefix (current_templates
->start
->base_opcode
))
3865 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
3866 i
.hle_prefix
= current_templates
->start
->name
;
3867 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
3868 i
.bnd_prefix
= current_templates
->start
->name
;
3870 i
.rep_prefix
= current_templates
->start
->name
;
3875 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3882 if (!current_templates
)
3884 /* Check if we should swap operand or force 32bit displacement in
3886 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
3888 else if (mnem_p
- 3 == dot_p
3891 i
.disp_encoding
= disp_encoding_8bit
;
3892 else if (mnem_p
- 4 == dot_p
3896 i
.disp_encoding
= disp_encoding_32bit
;
3901 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
3904 if (!current_templates
)
3907 /* See if we can get a match by trimming off a suffix. */
3910 case WORD_MNEM_SUFFIX
:
3911 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
3912 i
.suffix
= SHORT_MNEM_SUFFIX
;
3914 case BYTE_MNEM_SUFFIX
:
3915 case QWORD_MNEM_SUFFIX
:
3916 i
.suffix
= mnem_p
[-1];
3918 current_templates
= (const templates
*) hash_find (op_hash
,
3921 case SHORT_MNEM_SUFFIX
:
3922 case LONG_MNEM_SUFFIX
:
3925 i
.suffix
= mnem_p
[-1];
3927 current_templates
= (const templates
*) hash_find (op_hash
,
3936 if (intel_float_operand (mnemonic
) == 1)
3937 i
.suffix
= SHORT_MNEM_SUFFIX
;
3939 i
.suffix
= LONG_MNEM_SUFFIX
;
3941 current_templates
= (const templates
*) hash_find (op_hash
,
3946 if (!current_templates
)
3948 as_bad (_("no such instruction: `%s'"), token_start
);
3953 if (current_templates
->start
->opcode_modifier
.jump
3954 || current_templates
->start
->opcode_modifier
.jumpbyte
)
3956 /* Check for a branch hint. We allow ",pt" and ",pn" for
3957 predict taken and predict not taken respectively.
3958 I'm not sure that branch hints actually do anything on loop
3959 and jcxz insns (JumpByte) for current Pentium4 chips. They
3960 may work in the future and it doesn't hurt to accept them
3962 if (l
[0] == ',' && l
[1] == 'p')
3966 if (!add_prefix (DS_PREFIX_OPCODE
))
3970 else if (l
[2] == 'n')
3972 if (!add_prefix (CS_PREFIX_OPCODE
))
3978 /* Any other comma loses. */
3981 as_bad (_("invalid character %s in mnemonic"),
3982 output_invalid (*l
));
3986 /* Check if instruction is supported on specified architecture. */
3988 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
3990 supported
|= cpu_flags_match (t
);
3991 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
3995 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
3997 as_bad (flag_code
== CODE_64BIT
3998 ? _("`%s' is not supported in 64-bit mode")
3999 : _("`%s' is only supported in 64-bit mode"),
4000 current_templates
->start
->name
);
4003 if (supported
!= CPU_FLAGS_PERFECT_MATCH
)
4005 as_bad (_("`%s' is not supported on `%s%s'"),
4006 current_templates
->start
->name
,
4007 cpu_arch_name
? cpu_arch_name
: default_arch
,
4008 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
4013 if (!cpu_arch_flags
.bitfield
.cpui386
4014 && (flag_code
!= CODE_16BIT
))
4016 as_warn (_("use .code16 to ensure correct addressing mode"));
4023 parse_operands (char *l
, const char *mnemonic
)
4027 /* 1 if operand is pending after ','. */
4028 unsigned int expecting_operand
= 0;
4030 /* Non-zero if operand parens not balanced. */
4031 unsigned int paren_not_balanced
;
4033 while (*l
!= END_OF_INSN
)
4035 /* Skip optional white space before operand. */
4036 if (is_space_char (*l
))
4038 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
4040 as_bad (_("invalid character %s before operand %d"),
4041 output_invalid (*l
),
4045 token_start
= l
; /* After white space. */
4046 paren_not_balanced
= 0;
4047 while (paren_not_balanced
|| *l
!= ',')
4049 if (*l
== END_OF_INSN
)
4051 if (paren_not_balanced
)
4054 as_bad (_("unbalanced parenthesis in operand %d."),
4057 as_bad (_("unbalanced brackets in operand %d."),
4062 break; /* we are done */
4064 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
4066 as_bad (_("invalid character %s in operand %d"),
4067 output_invalid (*l
),
4074 ++paren_not_balanced
;
4076 --paren_not_balanced
;
4081 ++paren_not_balanced
;
4083 --paren_not_balanced
;
4087 if (l
!= token_start
)
4088 { /* Yes, we've read in another operand. */
4089 unsigned int operand_ok
;
4090 this_operand
= i
.operands
++;
4091 i
.types
[this_operand
].bitfield
.unspecified
= 1;
4092 if (i
.operands
> MAX_OPERANDS
)
4094 as_bad (_("spurious operands; (%d operands/instruction max)"),
4098 /* Now parse operand adding info to 'i' as we go along. */
4099 END_STRING_AND_SAVE (l
);
4103 i386_intel_operand (token_start
,
4104 intel_float_operand (mnemonic
));
4106 operand_ok
= i386_att_operand (token_start
);
4108 RESTORE_END_STRING (l
);
4114 if (expecting_operand
)
4116 expecting_operand_after_comma
:
4117 as_bad (_("expecting operand after ','; got nothing"));
4122 as_bad (_("expecting operand before ','; got nothing"));
4127 /* Now *l must be either ',' or END_OF_INSN. */
4130 if (*++l
== END_OF_INSN
)
4132 /* Just skip it, if it's \n complain. */
4133 goto expecting_operand_after_comma
;
4135 expecting_operand
= 1;
4142 swap_2_operands (int xchg1
, int xchg2
)
4144 union i386_op temp_op
;
4145 i386_operand_type temp_type
;
4146 enum bfd_reloc_code_real temp_reloc
;
4148 temp_type
= i
.types
[xchg2
];
4149 i
.types
[xchg2
] = i
.types
[xchg1
];
4150 i
.types
[xchg1
] = temp_type
;
4151 temp_op
= i
.op
[xchg2
];
4152 i
.op
[xchg2
] = i
.op
[xchg1
];
4153 i
.op
[xchg1
] = temp_op
;
4154 temp_reloc
= i
.reloc
[xchg2
];
4155 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
4156 i
.reloc
[xchg1
] = temp_reloc
;
4160 if (i
.mask
->operand
== xchg1
)
4161 i
.mask
->operand
= xchg2
;
4162 else if (i
.mask
->operand
== xchg2
)
4163 i
.mask
->operand
= xchg1
;
4167 if (i
.broadcast
->operand
== xchg1
)
4168 i
.broadcast
->operand
= xchg2
;
4169 else if (i
.broadcast
->operand
== xchg2
)
4170 i
.broadcast
->operand
= xchg1
;
4174 if (i
.rounding
->operand
== xchg1
)
4175 i
.rounding
->operand
= xchg2
;
4176 else if (i
.rounding
->operand
== xchg2
)
4177 i
.rounding
->operand
= xchg1
;
4182 swap_operands (void)
4188 swap_2_operands (1, i
.operands
- 2);
4191 swap_2_operands (0, i
.operands
- 1);
4197 if (i
.mem_operands
== 2)
4199 const seg_entry
*temp_seg
;
4200 temp_seg
= i
.seg
[0];
4201 i
.seg
[0] = i
.seg
[1];
4202 i
.seg
[1] = temp_seg
;
4206 /* Try to ensure constant immediates are represented in the smallest
4211 char guess_suffix
= 0;
4215 guess_suffix
= i
.suffix
;
4216 else if (i
.reg_operands
)
4218 /* Figure out a suffix from the last register operand specified.
4219 We can't do this properly yet, ie. excluding InOutPortReg,
4220 but the following works for instructions with immediates.
4221 In any case, we can't set i.suffix yet. */
4222 for (op
= i
.operands
; --op
>= 0;)
4223 if (i
.types
[op
].bitfield
.reg8
)
4225 guess_suffix
= BYTE_MNEM_SUFFIX
;
4228 else if (i
.types
[op
].bitfield
.reg16
)
4230 guess_suffix
= WORD_MNEM_SUFFIX
;
4233 else if (i
.types
[op
].bitfield
.reg32
)
4235 guess_suffix
= LONG_MNEM_SUFFIX
;
4238 else if (i
.types
[op
].bitfield
.reg64
)
4240 guess_suffix
= QWORD_MNEM_SUFFIX
;
4244 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
4245 guess_suffix
= WORD_MNEM_SUFFIX
;
4247 for (op
= i
.operands
; --op
>= 0;)
4248 if (operand_type_check (i
.types
[op
], imm
))
4250 switch (i
.op
[op
].imms
->X_op
)
4253 /* If a suffix is given, this operand may be shortened. */
4254 switch (guess_suffix
)
4256 case LONG_MNEM_SUFFIX
:
4257 i
.types
[op
].bitfield
.imm32
= 1;
4258 i
.types
[op
].bitfield
.imm64
= 1;
4260 case WORD_MNEM_SUFFIX
:
4261 i
.types
[op
].bitfield
.imm16
= 1;
4262 i
.types
[op
].bitfield
.imm32
= 1;
4263 i
.types
[op
].bitfield
.imm32s
= 1;
4264 i
.types
[op
].bitfield
.imm64
= 1;
4266 case BYTE_MNEM_SUFFIX
:
4267 i
.types
[op
].bitfield
.imm8
= 1;
4268 i
.types
[op
].bitfield
.imm8s
= 1;
4269 i
.types
[op
].bitfield
.imm16
= 1;
4270 i
.types
[op
].bitfield
.imm32
= 1;
4271 i
.types
[op
].bitfield
.imm32s
= 1;
4272 i
.types
[op
].bitfield
.imm64
= 1;
4276 /* If this operand is at most 16 bits, convert it
4277 to a signed 16 bit number before trying to see
4278 whether it will fit in an even smaller size.
4279 This allows a 16-bit operand such as $0xffe0 to
4280 be recognised as within Imm8S range. */
4281 if ((i
.types
[op
].bitfield
.imm16
)
4282 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
4284 i
.op
[op
].imms
->X_add_number
=
4285 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
4288 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
4289 if ((i
.types
[op
].bitfield
.imm32
)
4290 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
4293 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
4294 ^ ((offsetT
) 1 << 31))
4295 - ((offsetT
) 1 << 31));
4299 = operand_type_or (i
.types
[op
],
4300 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
4302 /* We must avoid matching of Imm32 templates when 64bit
4303 only immediate is available. */
4304 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
4305 i
.types
[op
].bitfield
.imm32
= 0;
4312 /* Symbols and expressions. */
4314 /* Convert symbolic operand to proper sizes for matching, but don't
4315 prevent matching a set of insns that only supports sizes other
4316 than those matching the insn suffix. */
4318 i386_operand_type mask
, allowed
;
4319 const insn_template
*t
;
4321 operand_type_set (&mask
, 0);
4322 operand_type_set (&allowed
, 0);
4324 for (t
= current_templates
->start
;
4325 t
< current_templates
->end
;
4327 allowed
= operand_type_or (allowed
,
4328 t
->operand_types
[op
]);
4329 switch (guess_suffix
)
4331 case QWORD_MNEM_SUFFIX
:
4332 mask
.bitfield
.imm64
= 1;
4333 mask
.bitfield
.imm32s
= 1;
4335 case LONG_MNEM_SUFFIX
:
4336 mask
.bitfield
.imm32
= 1;
4338 case WORD_MNEM_SUFFIX
:
4339 mask
.bitfield
.imm16
= 1;
4341 case BYTE_MNEM_SUFFIX
:
4342 mask
.bitfield
.imm8
= 1;
4347 allowed
= operand_type_and (mask
, allowed
);
4348 if (!operand_type_all_zero (&allowed
))
4349 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
4356 /* Try to use the smallest displacement type too. */
4358 optimize_disp (void)
4362 for (op
= i
.operands
; --op
>= 0;)
4363 if (operand_type_check (i
.types
[op
], disp
))
4365 if (i
.op
[op
].disps
->X_op
== O_constant
)
4367 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
4369 if (i
.types
[op
].bitfield
.disp16
4370 && (op_disp
& ~(offsetT
) 0xffff) == 0)
4372 /* If this operand is at most 16 bits, convert
4373 to a signed 16 bit number and don't use 64bit
4375 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
4376 i
.types
[op
].bitfield
.disp64
= 0;
4379 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
4380 if (i
.types
[op
].bitfield
.disp32
4381 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
4383 /* If this operand is at most 32 bits, convert
4384 to a signed 32 bit number and don't use 64bit
4386 op_disp
&= (((offsetT
) 2 << 31) - 1);
4387 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
4388 i
.types
[op
].bitfield
.disp64
= 0;
4391 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
4393 i
.types
[op
].bitfield
.disp8
= 0;
4394 i
.types
[op
].bitfield
.disp16
= 0;
4395 i
.types
[op
].bitfield
.disp32
= 0;
4396 i
.types
[op
].bitfield
.disp32s
= 0;
4397 i
.types
[op
].bitfield
.disp64
= 0;
4401 else if (flag_code
== CODE_64BIT
)
4403 if (fits_in_signed_long (op_disp
))
4405 i
.types
[op
].bitfield
.disp64
= 0;
4406 i
.types
[op
].bitfield
.disp32s
= 1;
4408 if (i
.prefix
[ADDR_PREFIX
]
4409 && fits_in_unsigned_long (op_disp
))
4410 i
.types
[op
].bitfield
.disp32
= 1;
4412 if ((i
.types
[op
].bitfield
.disp32
4413 || i
.types
[op
].bitfield
.disp32s
4414 || i
.types
[op
].bitfield
.disp16
)
4415 && fits_in_signed_byte (op_disp
))
4416 i
.types
[op
].bitfield
.disp8
= 1;
4418 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
4419 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
4421 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
4422 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
4423 i
.types
[op
].bitfield
.disp8
= 0;
4424 i
.types
[op
].bitfield
.disp16
= 0;
4425 i
.types
[op
].bitfield
.disp32
= 0;
4426 i
.types
[op
].bitfield
.disp32s
= 0;
4427 i
.types
[op
].bitfield
.disp64
= 0;
4430 /* We only support 64bit displacement on constants. */
4431 i
.types
[op
].bitfield
.disp64
= 0;
4435 /* Check if operands are valid for the instruction. */
4438 check_VecOperands (const insn_template
*t
)
4442 /* Without VSIB byte, we can't have a vector register for index. */
4443 if (!t
->opcode_modifier
.vecsib
4445 && (i
.index_reg
->reg_type
.bitfield
.regxmm
4446 || i
.index_reg
->reg_type
.bitfield
.regymm
4447 || i
.index_reg
->reg_type
.bitfield
.regzmm
))
4449 i
.error
= unsupported_vector_index_register
;
4453 /* Check if default mask is allowed. */
4454 if (t
->opcode_modifier
.nodefmask
4455 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
4457 i
.error
= no_default_mask
;
4461 /* For VSIB byte, we need a vector register for index, and all vector
4462 registers must be distinct. */
4463 if (t
->opcode_modifier
.vecsib
)
4466 || !((t
->opcode_modifier
.vecsib
== VecSIB128
4467 && i
.index_reg
->reg_type
.bitfield
.regxmm
)
4468 || (t
->opcode_modifier
.vecsib
== VecSIB256
4469 && i
.index_reg
->reg_type
.bitfield
.regymm
)
4470 || (t
->opcode_modifier
.vecsib
== VecSIB512
4471 && i
.index_reg
->reg_type
.bitfield
.regzmm
)))
4473 i
.error
= invalid_vsib_address
;
4477 gas_assert (i
.reg_operands
== 2 || i
.mask
);
4478 if (i
.reg_operands
== 2 && !i
.mask
)
4480 gas_assert (i
.types
[0].bitfield
.regxmm
4481 || i
.types
[0].bitfield
.regymm
);
4482 gas_assert (i
.types
[2].bitfield
.regxmm
4483 || i
.types
[2].bitfield
.regymm
);
4484 if (operand_check
== check_none
)
4486 if (register_number (i
.op
[0].regs
)
4487 != register_number (i
.index_reg
)
4488 && register_number (i
.op
[2].regs
)
4489 != register_number (i
.index_reg
)
4490 && register_number (i
.op
[0].regs
)
4491 != register_number (i
.op
[2].regs
))
4493 if (operand_check
== check_error
)
4495 i
.error
= invalid_vector_register_set
;
4498 as_warn (_("mask, index, and destination registers should be distinct"));
4500 else if (i
.reg_operands
== 1 && i
.mask
)
4502 if ((i
.types
[1].bitfield
.regymm
4503 || i
.types
[1].bitfield
.regzmm
)
4504 && (register_number (i
.op
[1].regs
)
4505 == register_number (i
.index_reg
)))
4507 if (operand_check
== check_error
)
4509 i
.error
= invalid_vector_register_set
;
4512 if (operand_check
!= check_none
)
4513 as_warn (_("index and destination registers should be distinct"));
4518 /* Check if broadcast is supported by the instruction and is applied
4519 to the memory operand. */
4522 int broadcasted_opnd_size
;
4524 /* Check if specified broadcast is supported in this instruction,
4525 and it's applied to memory operand of DWORD or QWORD type,
4526 depending on VecESize. */
4527 if (i
.broadcast
->type
!= t
->opcode_modifier
.broadcast
4528 || !i
.types
[i
.broadcast
->operand
].bitfield
.mem
4529 || (t
->opcode_modifier
.vecesize
== 0
4530 && !i
.types
[i
.broadcast
->operand
].bitfield
.dword
4531 && !i
.types
[i
.broadcast
->operand
].bitfield
.unspecified
)
4532 || (t
->opcode_modifier
.vecesize
== 1
4533 && !i
.types
[i
.broadcast
->operand
].bitfield
.qword
4534 && !i
.types
[i
.broadcast
->operand
].bitfield
.unspecified
))
4537 broadcasted_opnd_size
= t
->opcode_modifier
.vecesize
? 64 : 32;
4538 if (i
.broadcast
->type
== BROADCAST_1TO16
)
4539 broadcasted_opnd_size
<<= 4; /* Broadcast 1to16. */
4540 else if (i
.broadcast
->type
== BROADCAST_1TO8
)
4541 broadcasted_opnd_size
<<= 3; /* Broadcast 1to8. */
4542 else if (i
.broadcast
->type
== BROADCAST_1TO4
)
4543 broadcasted_opnd_size
<<= 2; /* Broadcast 1to4. */
4544 else if (i
.broadcast
->type
== BROADCAST_1TO2
)
4545 broadcasted_opnd_size
<<= 1; /* Broadcast 1to2. */
4549 if ((broadcasted_opnd_size
== 256
4550 && !t
->operand_types
[i
.broadcast
->operand
].bitfield
.ymmword
)
4551 || (broadcasted_opnd_size
== 512
4552 && !t
->operand_types
[i
.broadcast
->operand
].bitfield
.zmmword
))
4555 i
.error
= unsupported_broadcast
;
4559 /* If broadcast is supported in this instruction, we need to check if
4560 operand of one-element size isn't specified without broadcast. */
4561 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
4563 /* Find memory operand. */
4564 for (op
= 0; op
< i
.operands
; op
++)
4565 if (operand_type_check (i
.types
[op
], anymem
))
4567 gas_assert (op
< i
.operands
);
4568 /* Check size of the memory operand. */
4569 if ((t
->opcode_modifier
.vecesize
== 0
4570 && i
.types
[op
].bitfield
.dword
)
4571 || (t
->opcode_modifier
.vecesize
== 1
4572 && i
.types
[op
].bitfield
.qword
))
4574 i
.error
= broadcast_needed
;
4579 /* Check if requested masking is supported. */
4581 && (!t
->opcode_modifier
.masking
4583 && t
->opcode_modifier
.masking
== MERGING_MASKING
)))
4585 i
.error
= unsupported_masking
;
4589 /* Check if masking is applied to dest operand. */
4590 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
4592 i
.error
= mask_not_on_destination
;
4599 if ((i
.rounding
->type
!= saeonly
4600 && !t
->opcode_modifier
.staticrounding
)
4601 || (i
.rounding
->type
== saeonly
4602 && (t
->opcode_modifier
.staticrounding
4603 || !t
->opcode_modifier
.sae
)))
4605 i
.error
= unsupported_rc_sae
;
4608 /* If the instruction has several immediate operands and one of
4609 them is rounding, the rounding operand should be the last
4610 immediate operand. */
4611 if (i
.imm_operands
> 1
4612 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
4614 i
.error
= rc_sae_operand_not_last_imm
;
4619 /* Check vector Disp8 operand. */
4620 if (t
->opcode_modifier
.disp8memshift
)
4623 i
.memshift
= t
->opcode_modifier
.vecesize
? 3 : 2;
4625 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
4627 for (op
= 0; op
< i
.operands
; op
++)
4628 if (operand_type_check (i
.types
[op
], disp
)
4629 && i
.op
[op
].disps
->X_op
== O_constant
)
4631 offsetT value
= i
.op
[op
].disps
->X_add_number
;
4633 = (i
.disp_encoding
!= disp_encoding_32bit
4634 && fits_in_vec_disp8 (value
));
4635 if (t
->operand_types
[op
].bitfield
.vec_disp8
)
4638 i
.types
[op
].bitfield
.vec_disp8
= 1;
4641 /* Vector insn can only have Vec_Disp8/Disp32 in
4642 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4644 i
.types
[op
].bitfield
.disp8
= 0;
4645 if (flag_code
!= CODE_16BIT
)
4646 i
.types
[op
].bitfield
.disp16
= 0;
4649 else if (flag_code
!= CODE_16BIT
)
4651 /* One form of this instruction supports vector Disp8.
4652 Try vector Disp8 if we need to use Disp32. */
4653 if (vec_disp8_ok
&& !fits_in_signed_byte (value
))
4655 i
.error
= try_vector_disp8
;
4667 /* Check if operands are valid for the instruction. Update VEX
4671 VEX_check_operands (const insn_template
*t
)
4673 /* VREX is only valid with EVEX prefix. */
4674 if (i
.need_vrex
&& !t
->opcode_modifier
.evex
)
4676 i
.error
= invalid_register_operand
;
4680 if (!t
->opcode_modifier
.vex
)
4683 /* Only check VEX_Imm4, which must be the first operand. */
4684 if (t
->operand_types
[0].bitfield
.vec_imm4
)
4686 if (i
.op
[0].imms
->X_op
!= O_constant
4687 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
4693 /* Turn off Imm8 so that update_imm won't complain. */
4694 i
.types
[0] = vec_imm4
;
4700 static const insn_template
*
4701 match_template (void)
4703 /* Points to template once we've found it. */
4704 const insn_template
*t
;
4705 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
4706 i386_operand_type overlap4
;
4707 unsigned int found_reverse_match
;
4708 i386_opcode_modifier suffix_check
;
4709 i386_operand_type operand_types
[MAX_OPERANDS
];
4710 int addr_prefix_disp
;
4712 unsigned int found_cpu_match
;
4713 unsigned int check_register
;
4714 enum i386_error specific_error
= 0;
4716 #if MAX_OPERANDS != 5
4717 # error "MAX_OPERANDS must be 5."
4720 found_reverse_match
= 0;
4721 addr_prefix_disp
= -1;
4723 memset (&suffix_check
, 0, sizeof (suffix_check
));
4724 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
4725 suffix_check
.no_bsuf
= 1;
4726 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
4727 suffix_check
.no_wsuf
= 1;
4728 else if (i
.suffix
== SHORT_MNEM_SUFFIX
)
4729 suffix_check
.no_ssuf
= 1;
4730 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
4731 suffix_check
.no_lsuf
= 1;
4732 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
4733 suffix_check
.no_qsuf
= 1;
4734 else if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
4735 suffix_check
.no_ldsuf
= 1;
4737 /* Must have right number of operands. */
4738 i
.error
= number_of_operands_mismatch
;
4740 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
4742 addr_prefix_disp
= -1;
4744 if (i
.operands
!= t
->operands
)
4747 /* Check processor support. */
4748 i
.error
= unsupported
;
4749 found_cpu_match
= (cpu_flags_match (t
)
4750 == CPU_FLAGS_PERFECT_MATCH
);
4751 if (!found_cpu_match
)
4754 /* Check old gcc support. */
4755 i
.error
= old_gcc_only
;
4756 if (!old_gcc
&& t
->opcode_modifier
.oldgcc
)
4759 /* Check AT&T mnemonic. */
4760 i
.error
= unsupported_with_intel_mnemonic
;
4761 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
4764 /* Check AT&T/Intel syntax and Intel64/AMD64 ISA. */
4765 i
.error
= unsupported_syntax
;
4766 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
4767 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
)
4768 || (intel64
&& t
->opcode_modifier
.amd64
)
4769 || (!intel64
&& t
->opcode_modifier
.intel64
))
4772 /* Check the suffix, except for some instructions in intel mode. */
4773 i
.error
= invalid_instruction_suffix
;
4774 if ((!intel_syntax
|| !t
->opcode_modifier
.ignoresize
)
4775 && ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
4776 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
4777 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
4778 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
4779 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
4780 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
)))
4783 if (!operand_size_match (t
))
4786 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4787 operand_types
[j
] = t
->operand_types
[j
];
4789 /* In general, don't allow 64-bit operands in 32-bit mode. */
4790 if (i
.suffix
== QWORD_MNEM_SUFFIX
4791 && flag_code
!= CODE_64BIT
4793 ? (!t
->opcode_modifier
.ignoresize
4794 && !intel_float_operand (t
->name
))
4795 : intel_float_operand (t
->name
) != 2)
4796 && ((!operand_types
[0].bitfield
.regmmx
4797 && !operand_types
[0].bitfield
.regxmm
4798 && !operand_types
[0].bitfield
.regymm
4799 && !operand_types
[0].bitfield
.regzmm
)
4800 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
4801 && operand_types
[t
->operands
> 1].bitfield
.regxmm
4802 && operand_types
[t
->operands
> 1].bitfield
.regymm
4803 && operand_types
[t
->operands
> 1].bitfield
.regzmm
))
4804 && (t
->base_opcode
!= 0x0fc7
4805 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
4808 /* In general, don't allow 32-bit operands on pre-386. */
4809 else if (i
.suffix
== LONG_MNEM_SUFFIX
4810 && !cpu_arch_flags
.bitfield
.cpui386
4812 ? (!t
->opcode_modifier
.ignoresize
4813 && !intel_float_operand (t
->name
))
4814 : intel_float_operand (t
->name
) != 2)
4815 && ((!operand_types
[0].bitfield
.regmmx
4816 && !operand_types
[0].bitfield
.regxmm
)
4817 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
4818 && operand_types
[t
->operands
> 1].bitfield
.regxmm
)))
4821 /* Do not verify operands when there are none. */
4825 /* We've found a match; break out of loop. */
4829 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4830 into Disp32/Disp16/Disp32 operand. */
4831 if (i
.prefix
[ADDR_PREFIX
] != 0)
4833 /* There should be only one Disp operand. */
4837 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4839 if (operand_types
[j
].bitfield
.disp16
)
4841 addr_prefix_disp
= j
;
4842 operand_types
[j
].bitfield
.disp32
= 1;
4843 operand_types
[j
].bitfield
.disp16
= 0;
4849 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4851 if (operand_types
[j
].bitfield
.disp32
)
4853 addr_prefix_disp
= j
;
4854 operand_types
[j
].bitfield
.disp32
= 0;
4855 operand_types
[j
].bitfield
.disp16
= 1;
4861 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4863 if (operand_types
[j
].bitfield
.disp64
)
4865 addr_prefix_disp
= j
;
4866 operand_types
[j
].bitfield
.disp64
= 0;
4867 operand_types
[j
].bitfield
.disp32
= 1;
4875 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
4876 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
&& t
->base_opcode
== 0xa0)
4879 /* We check register size if needed. */
4880 check_register
= t
->opcode_modifier
.checkregsize
;
4881 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
4882 switch (t
->operands
)
4885 if (!operand_type_match (overlap0
, i
.types
[0]))
4889 /* xchg %eax, %eax is a special case. It is an aliase for nop
4890 only in 32bit mode and we can use opcode 0x90. In 64bit
4891 mode, we can't use 0x90 for xchg %eax, %eax since it should
4892 zero-extend %eax to %rax. */
4893 if (flag_code
== CODE_64BIT
4894 && t
->base_opcode
== 0x90
4895 && operand_type_equal (&i
.types
[0], &acc32
)
4896 && operand_type_equal (&i
.types
[1], &acc32
))
4900 /* If we swap operand in encoding, we either match
4901 the next one or reverse direction of operands. */
4902 if (t
->opcode_modifier
.s
)
4904 else if (t
->opcode_modifier
.d
)
4909 /* If we swap operand in encoding, we match the next one. */
4910 if (i
.swap_operand
&& t
->opcode_modifier
.s
)
4914 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
4915 if (!operand_type_match (overlap0
, i
.types
[0])
4916 || !operand_type_match (overlap1
, i
.types
[1])
4918 && !operand_type_register_match (overlap0
, i
.types
[0],
4920 overlap1
, i
.types
[1],
4923 /* Check if other direction is valid ... */
4924 if (!t
->opcode_modifier
.d
&& !t
->opcode_modifier
.floatd
)
4928 /* Try reversing direction of operands. */
4929 overlap0
= operand_type_and (i
.types
[0], operand_types
[1]);
4930 overlap1
= operand_type_and (i
.types
[1], operand_types
[0]);
4931 if (!operand_type_match (overlap0
, i
.types
[0])
4932 || !operand_type_match (overlap1
, i
.types
[1])
4934 && !operand_type_register_match (overlap0
,
4941 /* Does not match either direction. */
4944 /* found_reverse_match holds which of D or FloatDR
4946 if (t
->opcode_modifier
.d
)
4947 found_reverse_match
= Opcode_D
;
4948 else if (t
->opcode_modifier
.floatd
)
4949 found_reverse_match
= Opcode_FloatD
;
4951 found_reverse_match
= 0;
4952 if (t
->opcode_modifier
.floatr
)
4953 found_reverse_match
|= Opcode_FloatR
;
4957 /* Found a forward 2 operand match here. */
4958 switch (t
->operands
)
4961 overlap4
= operand_type_and (i
.types
[4],
4964 overlap3
= operand_type_and (i
.types
[3],
4967 overlap2
= operand_type_and (i
.types
[2],
4972 switch (t
->operands
)
4975 if (!operand_type_match (overlap4
, i
.types
[4])
4976 || !operand_type_register_match (overlap3
,
4984 if (!operand_type_match (overlap3
, i
.types
[3])
4986 && !operand_type_register_match (overlap2
,
4994 /* Here we make use of the fact that there are no
4995 reverse match 3 operand instructions, and all 3
4996 operand instructions only need to be checked for
4997 register consistency between operands 2 and 3. */
4998 if (!operand_type_match (overlap2
, i
.types
[2])
5000 && !operand_type_register_match (overlap1
,
5010 /* Found either forward/reverse 2, 3 or 4 operand match here:
5011 slip through to break. */
5013 if (!found_cpu_match
)
5015 found_reverse_match
= 0;
5019 /* Check if vector and VEX operands are valid. */
5020 if (check_VecOperands (t
) || VEX_check_operands (t
))
5022 specific_error
= i
.error
;
5026 /* We've found a match; break out of loop. */
5030 if (t
== current_templates
->end
)
5032 /* We found no match. */
5033 const char *err_msg
;
5034 switch (specific_error
? specific_error
: i
.error
)
5038 case operand_size_mismatch
:
5039 err_msg
= _("operand size mismatch");
5041 case operand_type_mismatch
:
5042 err_msg
= _("operand type mismatch");
5044 case register_type_mismatch
:
5045 err_msg
= _("register type mismatch");
5047 case number_of_operands_mismatch
:
5048 err_msg
= _("number of operands mismatch");
5050 case invalid_instruction_suffix
:
5051 err_msg
= _("invalid instruction suffix");
5054 err_msg
= _("constant doesn't fit in 4 bits");
5057 err_msg
= _("only supported with old gcc");
5059 case unsupported_with_intel_mnemonic
:
5060 err_msg
= _("unsupported with Intel mnemonic");
5062 case unsupported_syntax
:
5063 err_msg
= _("unsupported syntax");
5066 as_bad (_("unsupported instruction `%s'"),
5067 current_templates
->start
->name
);
5069 case invalid_vsib_address
:
5070 err_msg
= _("invalid VSIB address");
5072 case invalid_vector_register_set
:
5073 err_msg
= _("mask, index, and destination registers must be distinct");
5075 case unsupported_vector_index_register
:
5076 err_msg
= _("unsupported vector index register");
5078 case unsupported_broadcast
:
5079 err_msg
= _("unsupported broadcast");
5081 case broadcast_not_on_src_operand
:
5082 err_msg
= _("broadcast not on source memory operand");
5084 case broadcast_needed
:
5085 err_msg
= _("broadcast is needed for operand of such type");
5087 case unsupported_masking
:
5088 err_msg
= _("unsupported masking");
5090 case mask_not_on_destination
:
5091 err_msg
= _("mask not on destination operand");
5093 case no_default_mask
:
5094 err_msg
= _("default mask isn't allowed");
5096 case unsupported_rc_sae
:
5097 err_msg
= _("unsupported static rounding/sae");
5099 case rc_sae_operand_not_last_imm
:
5101 err_msg
= _("RC/SAE operand must precede immediate operands");
5103 err_msg
= _("RC/SAE operand must follow immediate operands");
5105 case invalid_register_operand
:
5106 err_msg
= _("invalid register operand");
5109 as_bad (_("%s for `%s'"), err_msg
,
5110 current_templates
->start
->name
);
5114 if (!quiet_warnings
)
5117 && (i
.types
[0].bitfield
.jumpabsolute
5118 != operand_types
[0].bitfield
.jumpabsolute
))
5120 as_warn (_("indirect %s without `*'"), t
->name
);
5123 if (t
->opcode_modifier
.isprefix
5124 && t
->opcode_modifier
.ignoresize
)
5126 /* Warn them that a data or address size prefix doesn't
5127 affect assembly of the next line of code. */
5128 as_warn (_("stand-alone `%s' prefix"), t
->name
);
5132 /* Copy the template we found. */
5135 if (addr_prefix_disp
!= -1)
5136 i
.tm
.operand_types
[addr_prefix_disp
]
5137 = operand_types
[addr_prefix_disp
];
5139 if (found_reverse_match
)
5141 /* If we found a reverse match we must alter the opcode
5142 direction bit. found_reverse_match holds bits to change
5143 (different for int & float insns). */
5145 i
.tm
.base_opcode
^= found_reverse_match
;
5147 i
.tm
.operand_types
[0] = operand_types
[1];
5148 i
.tm
.operand_types
[1] = operand_types
[0];
5157 int mem_op
= operand_type_check (i
.types
[0], anymem
) ? 0 : 1;
5158 if (i
.tm
.operand_types
[mem_op
].bitfield
.esseg
)
5160 if (i
.seg
[0] != NULL
&& i
.seg
[0] != &es
)
5162 as_bad (_("`%s' operand %d must use `%ses' segment"),
5168 /* There's only ever one segment override allowed per instruction.
5169 This instruction possibly has a legal segment override on the
5170 second operand, so copy the segment to where non-string
5171 instructions store it, allowing common code. */
5172 i
.seg
[0] = i
.seg
[1];
5174 else if (i
.tm
.operand_types
[mem_op
+ 1].bitfield
.esseg
)
5176 if (i
.seg
[1] != NULL
&& i
.seg
[1] != &es
)
5178 as_bad (_("`%s' operand %d must use `%ses' segment"),
5189 process_suffix (void)
5191 /* If matched instruction specifies an explicit instruction mnemonic
5193 if (i
.tm
.opcode_modifier
.size16
)
5194 i
.suffix
= WORD_MNEM_SUFFIX
;
5195 else if (i
.tm
.opcode_modifier
.size32
)
5196 i
.suffix
= LONG_MNEM_SUFFIX
;
5197 else if (i
.tm
.opcode_modifier
.size64
)
5198 i
.suffix
= QWORD_MNEM_SUFFIX
;
5199 else if (i
.reg_operands
)
5201 /* If there's no instruction mnemonic suffix we try to invent one
5202 based on register operands. */
5205 /* We take i.suffix from the last register operand specified,
5206 Destination register type is more significant than source
5207 register type. crc32 in SSE4.2 prefers source register
5209 if (i
.tm
.base_opcode
== 0xf20f38f1)
5211 if (i
.types
[0].bitfield
.reg16
)
5212 i
.suffix
= WORD_MNEM_SUFFIX
;
5213 else if (i
.types
[0].bitfield
.reg32
)
5214 i
.suffix
= LONG_MNEM_SUFFIX
;
5215 else if (i
.types
[0].bitfield
.reg64
)
5216 i
.suffix
= QWORD_MNEM_SUFFIX
;
5218 else if (i
.tm
.base_opcode
== 0xf20f38f0)
5220 if (i
.types
[0].bitfield
.reg8
)
5221 i
.suffix
= BYTE_MNEM_SUFFIX
;
5228 if (i
.tm
.base_opcode
== 0xf20f38f1
5229 || i
.tm
.base_opcode
== 0xf20f38f0)
5231 /* We have to know the operand size for crc32. */
5232 as_bad (_("ambiguous memory operand size for `%s`"),
5237 for (op
= i
.operands
; --op
>= 0;)
5238 if (!i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
5240 if (i
.types
[op
].bitfield
.reg8
)
5242 i
.suffix
= BYTE_MNEM_SUFFIX
;
5245 else if (i
.types
[op
].bitfield
.reg16
)
5247 i
.suffix
= WORD_MNEM_SUFFIX
;
5250 else if (i
.types
[op
].bitfield
.reg32
)
5252 i
.suffix
= LONG_MNEM_SUFFIX
;
5255 else if (i
.types
[op
].bitfield
.reg64
)
5257 i
.suffix
= QWORD_MNEM_SUFFIX
;
5263 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5266 && i
.tm
.opcode_modifier
.ignoresize
5267 && i
.tm
.opcode_modifier
.no_bsuf
)
5269 else if (!check_byte_reg ())
5272 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
5275 && i
.tm
.opcode_modifier
.ignoresize
5276 && i
.tm
.opcode_modifier
.no_lsuf
)
5278 else if (!check_long_reg ())
5281 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5284 && i
.tm
.opcode_modifier
.ignoresize
5285 && i
.tm
.opcode_modifier
.no_qsuf
)
5287 else if (!check_qword_reg ())
5290 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5293 && i
.tm
.opcode_modifier
.ignoresize
5294 && i
.tm
.opcode_modifier
.no_wsuf
)
5296 else if (!check_word_reg ())
5299 else if (i
.suffix
== XMMWORD_MNEM_SUFFIX
5300 || i
.suffix
== YMMWORD_MNEM_SUFFIX
5301 || i
.suffix
== ZMMWORD_MNEM_SUFFIX
)
5303 /* Skip if the instruction has x/y/z suffix. match_template
5304 should check if it is a valid suffix. */
5306 else if (intel_syntax
&& i
.tm
.opcode_modifier
.ignoresize
)
5307 /* Do nothing if the instruction is going to ignore the prefix. */
5312 else if (i
.tm
.opcode_modifier
.defaultsize
5314 /* exclude fldenv/frstor/fsave/fstenv */
5315 && i
.tm
.opcode_modifier
.no_ssuf
)
5317 i
.suffix
= stackop_size
;
5319 else if (intel_syntax
5321 && (i
.tm
.operand_types
[0].bitfield
.jumpabsolute
5322 || i
.tm
.opcode_modifier
.jumpbyte
5323 || i
.tm
.opcode_modifier
.jumpintersegment
5324 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
5325 && i
.tm
.extension_opcode
<= 3)))
5330 if (!i
.tm
.opcode_modifier
.no_qsuf
)
5332 i
.suffix
= QWORD_MNEM_SUFFIX
;
5336 if (!i
.tm
.opcode_modifier
.no_lsuf
)
5337 i
.suffix
= LONG_MNEM_SUFFIX
;
5340 if (!i
.tm
.opcode_modifier
.no_wsuf
)
5341 i
.suffix
= WORD_MNEM_SUFFIX
;
5350 if (i
.tm
.opcode_modifier
.w
)
5352 as_bad (_("no instruction mnemonic suffix given and "
5353 "no register operands; can't size instruction"));
5359 unsigned int suffixes
;
5361 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
5362 if (!i
.tm
.opcode_modifier
.no_wsuf
)
5364 if (!i
.tm
.opcode_modifier
.no_lsuf
)
5366 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
5368 if (!i
.tm
.opcode_modifier
.no_ssuf
)
5370 if (!i
.tm
.opcode_modifier
.no_qsuf
)
5373 /* There are more than suffix matches. */
5374 if (i
.tm
.opcode_modifier
.w
5375 || ((suffixes
& (suffixes
- 1))
5376 && !i
.tm
.opcode_modifier
.defaultsize
5377 && !i
.tm
.opcode_modifier
.ignoresize
))
5379 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
5385 /* Change the opcode based on the operand size given by i.suffix;
5386 We don't need to change things for byte insns. */
5389 && i
.suffix
!= BYTE_MNEM_SUFFIX
5390 && i
.suffix
!= XMMWORD_MNEM_SUFFIX
5391 && i
.suffix
!= YMMWORD_MNEM_SUFFIX
5392 && i
.suffix
!= ZMMWORD_MNEM_SUFFIX
)
5394 /* It's not a byte, select word/dword operation. */
5395 if (i
.tm
.opcode_modifier
.w
)
5397 if (i
.tm
.opcode_modifier
.shortform
)
5398 i
.tm
.base_opcode
|= 8;
5400 i
.tm
.base_opcode
|= 1;
5403 /* Now select between word & dword operations via the operand
5404 size prefix, except for instructions that will ignore this
5406 if (i
.tm
.opcode_modifier
.addrprefixop0
)
5408 /* The address size override prefix changes the size of the
5410 if ((flag_code
== CODE_32BIT
5411 && i
.op
->regs
[0].reg_type
.bitfield
.reg16
)
5412 || (flag_code
!= CODE_32BIT
5413 && i
.op
->regs
[0].reg_type
.bitfield
.reg32
))
5414 if (!add_prefix (ADDR_PREFIX_OPCODE
))
5417 else if (i
.suffix
!= QWORD_MNEM_SUFFIX
5418 && i
.suffix
!= LONG_DOUBLE_MNEM_SUFFIX
5419 && !i
.tm
.opcode_modifier
.ignoresize
5420 && !i
.tm
.opcode_modifier
.floatmf
5421 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
5422 || (flag_code
== CODE_64BIT
5423 && i
.tm
.opcode_modifier
.jumpbyte
)))
5425 unsigned int prefix
= DATA_PREFIX_OPCODE
;
5427 if (i
.tm
.opcode_modifier
.jumpbyte
) /* jcxz, loop */
5428 prefix
= ADDR_PREFIX_OPCODE
;
5430 if (!add_prefix (prefix
))
5434 /* Set mode64 for an operand. */
5435 if (i
.suffix
== QWORD_MNEM_SUFFIX
5436 && flag_code
== CODE_64BIT
5437 && !i
.tm
.opcode_modifier
.norex64
)
5439 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5440 need rex64. cmpxchg8b is also a special case. */
5441 if (! (i
.operands
== 2
5442 && i
.tm
.base_opcode
== 0x90
5443 && i
.tm
.extension_opcode
== None
5444 && operand_type_equal (&i
.types
[0], &acc64
)
5445 && operand_type_equal (&i
.types
[1], &acc64
))
5446 && ! (i
.operands
== 1
5447 && i
.tm
.base_opcode
== 0xfc7
5448 && i
.tm
.extension_opcode
== 1
5449 && !operand_type_check (i
.types
[0], reg
)
5450 && operand_type_check (i
.types
[0], anymem
)))
5454 /* Size floating point instruction. */
5455 if (i
.suffix
== LONG_MNEM_SUFFIX
)
5456 if (i
.tm
.opcode_modifier
.floatmf
)
5457 i
.tm
.base_opcode
^= 4;
5464 check_byte_reg (void)
5468 for (op
= i
.operands
; --op
>= 0;)
5470 /* If this is an eight bit register, it's OK. If it's the 16 or
5471 32 bit version of an eight bit register, we will just use the
5472 low portion, and that's OK too. */
5473 if (i
.types
[op
].bitfield
.reg8
)
5476 /* I/O port address operands are OK too. */
5477 if (i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
5480 /* crc32 doesn't generate this warning. */
5481 if (i
.tm
.base_opcode
== 0xf20f38f0)
5484 if ((i
.types
[op
].bitfield
.reg16
5485 || i
.types
[op
].bitfield
.reg32
5486 || i
.types
[op
].bitfield
.reg64
)
5487 && i
.op
[op
].regs
->reg_num
< 4
5488 /* Prohibit these changes in 64bit mode, since the lowering
5489 would be more complicated. */
5490 && flag_code
!= CODE_64BIT
)
5492 #if REGISTER_WARNINGS
5493 if (!quiet_warnings
)
5494 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5496 (i
.op
[op
].regs
+ (i
.types
[op
].bitfield
.reg16
5497 ? REGNAM_AL
- REGNAM_AX
5498 : REGNAM_AL
- REGNAM_EAX
))->reg_name
,
5500 i
.op
[op
].regs
->reg_name
,
5505 /* Any other register is bad. */
5506 if (i
.types
[op
].bitfield
.reg16
5507 || i
.types
[op
].bitfield
.reg32
5508 || i
.types
[op
].bitfield
.reg64
5509 || i
.types
[op
].bitfield
.regmmx
5510 || i
.types
[op
].bitfield
.regxmm
5511 || i
.types
[op
].bitfield
.regymm
5512 || i
.types
[op
].bitfield
.regzmm
5513 || i
.types
[op
].bitfield
.sreg2
5514 || i
.types
[op
].bitfield
.sreg3
5515 || i
.types
[op
].bitfield
.control
5516 || i
.types
[op
].bitfield
.debug
5517 || i
.types
[op
].bitfield
.test
5518 || i
.types
[op
].bitfield
.floatreg
5519 || i
.types
[op
].bitfield
.floatacc
)
5521 as_bad (_("`%s%s' not allowed with `%s%c'"),
5523 i
.op
[op
].regs
->reg_name
,
5533 check_long_reg (void)
5537 for (op
= i
.operands
; --op
>= 0;)
5538 /* Reject eight bit registers, except where the template requires
5539 them. (eg. movzb) */
5540 if (i
.types
[op
].bitfield
.reg8
5541 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5542 || i
.tm
.operand_types
[op
].bitfield
.reg32
5543 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5545 as_bad (_("`%s%s' not allowed with `%s%c'"),
5547 i
.op
[op
].regs
->reg_name
,
5552 /* Warn if the e prefix on a general reg is missing. */
5553 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
5554 && i
.types
[op
].bitfield
.reg16
5555 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5556 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5558 /* Prohibit these changes in the 64bit mode, since the
5559 lowering is more complicated. */
5560 if (flag_code
== CODE_64BIT
)
5562 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5563 register_prefix
, i
.op
[op
].regs
->reg_name
,
5567 #if REGISTER_WARNINGS
5568 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5570 (i
.op
[op
].regs
+ REGNAM_EAX
- REGNAM_AX
)->reg_name
,
5571 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
5574 /* Warn if the r prefix on a general reg is present. */
5575 else if (i
.types
[op
].bitfield
.reg64
5576 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5577 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5580 && i
.tm
.opcode_modifier
.toqword
5581 && !i
.types
[0].bitfield
.regxmm
)
5583 /* Convert to QWORD. We want REX byte. */
5584 i
.suffix
= QWORD_MNEM_SUFFIX
;
5588 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5589 register_prefix
, i
.op
[op
].regs
->reg_name
,
5598 check_qword_reg (void)
5602 for (op
= i
.operands
; --op
>= 0; )
5603 /* Reject eight bit registers, except where the template requires
5604 them. (eg. movzb) */
5605 if (i
.types
[op
].bitfield
.reg8
5606 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5607 || i
.tm
.operand_types
[op
].bitfield
.reg32
5608 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5610 as_bad (_("`%s%s' not allowed with `%s%c'"),
5612 i
.op
[op
].regs
->reg_name
,
5617 /* Warn if the r prefix on a general reg is missing. */
5618 else if ((i
.types
[op
].bitfield
.reg16
5619 || i
.types
[op
].bitfield
.reg32
)
5620 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5621 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5623 /* Prohibit these changes in the 64bit mode, since the
5624 lowering is more complicated. */
5626 && i
.tm
.opcode_modifier
.todword
5627 && !i
.types
[0].bitfield
.regxmm
)
5629 /* Convert to DWORD. We don't want REX byte. */
5630 i
.suffix
= LONG_MNEM_SUFFIX
;
5634 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5635 register_prefix
, i
.op
[op
].regs
->reg_name
,
5644 check_word_reg (void)
5647 for (op
= i
.operands
; --op
>= 0;)
5648 /* Reject eight bit registers, except where the template requires
5649 them. (eg. movzb) */
5650 if (i
.types
[op
].bitfield
.reg8
5651 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5652 || i
.tm
.operand_types
[op
].bitfield
.reg32
5653 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5655 as_bad (_("`%s%s' not allowed with `%s%c'"),
5657 i
.op
[op
].regs
->reg_name
,
5662 /* Warn if the e or r prefix on a general reg is present. */
5663 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
5664 && (i
.types
[op
].bitfield
.reg32
5665 || i
.types
[op
].bitfield
.reg64
)
5666 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5667 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5669 /* Prohibit these changes in the 64bit mode, since the
5670 lowering is more complicated. */
5671 if (flag_code
== CODE_64BIT
)
5673 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5674 register_prefix
, i
.op
[op
].regs
->reg_name
,
5678 #if REGISTER_WARNINGS
5679 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5681 (i
.op
[op
].regs
+ REGNAM_AX
- REGNAM_EAX
)->reg_name
,
5682 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
5689 update_imm (unsigned int j
)
5691 i386_operand_type overlap
= i
.types
[j
];
5692 if ((overlap
.bitfield
.imm8
5693 || overlap
.bitfield
.imm8s
5694 || overlap
.bitfield
.imm16
5695 || overlap
.bitfield
.imm32
5696 || overlap
.bitfield
.imm32s
5697 || overlap
.bitfield
.imm64
)
5698 && !operand_type_equal (&overlap
, &imm8
)
5699 && !operand_type_equal (&overlap
, &imm8s
)
5700 && !operand_type_equal (&overlap
, &imm16
)
5701 && !operand_type_equal (&overlap
, &imm32
)
5702 && !operand_type_equal (&overlap
, &imm32s
)
5703 && !operand_type_equal (&overlap
, &imm64
))
5707 i386_operand_type temp
;
5709 operand_type_set (&temp
, 0);
5710 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5712 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
5713 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
5715 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5716 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
5717 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5719 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
5720 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
5723 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
5726 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
5727 || operand_type_equal (&overlap
, &imm16_32
)
5728 || operand_type_equal (&overlap
, &imm16_32s
))
5730 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5735 if (!operand_type_equal (&overlap
, &imm8
)
5736 && !operand_type_equal (&overlap
, &imm8s
)
5737 && !operand_type_equal (&overlap
, &imm16
)
5738 && !operand_type_equal (&overlap
, &imm32
)
5739 && !operand_type_equal (&overlap
, &imm32s
)
5740 && !operand_type_equal (&overlap
, &imm64
))
5742 as_bad (_("no instruction mnemonic suffix given; "
5743 "can't determine immediate size"));
5747 i
.types
[j
] = overlap
;
5757 /* Update the first 2 immediate operands. */
5758 n
= i
.operands
> 2 ? 2 : i
.operands
;
5761 for (j
= 0; j
< n
; j
++)
5762 if (update_imm (j
) == 0)
5765 /* The 3rd operand can't be immediate operand. */
5766 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
5773 bad_implicit_operand (int xmm
)
5775 const char *ireg
= xmm
? "xmm0" : "ymm0";
5778 as_bad (_("the last operand of `%s' must be `%s%s'"),
5779 i
.tm
.name
, register_prefix
, ireg
);
5781 as_bad (_("the first operand of `%s' must be `%s%s'"),
5782 i
.tm
.name
, register_prefix
, ireg
);
5787 process_operands (void)
5789 /* Default segment register this instruction will use for memory
5790 accesses. 0 means unknown. This is only for optimizing out
5791 unnecessary segment overrides. */
5792 const seg_entry
*default_seg
= 0;
5794 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
5796 unsigned int dupl
= i
.operands
;
5797 unsigned int dest
= dupl
- 1;
5800 /* The destination must be an xmm register. */
5801 gas_assert (i
.reg_operands
5802 && MAX_OPERANDS
> dupl
5803 && operand_type_equal (&i
.types
[dest
], ®xmm
));
5805 if (i
.tm
.opcode_modifier
.firstxmm0
)
5807 /* The first operand is implicit and must be xmm0. */
5808 gas_assert (operand_type_equal (&i
.types
[0], ®xmm
));
5809 if (register_number (i
.op
[0].regs
) != 0)
5810 return bad_implicit_operand (1);
5812 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
5814 /* Keep xmm0 for instructions with VEX prefix and 3
5820 /* We remove the first xmm0 and keep the number of
5821 operands unchanged, which in fact duplicates the
5823 for (j
= 1; j
< i
.operands
; j
++)
5825 i
.op
[j
- 1] = i
.op
[j
];
5826 i
.types
[j
- 1] = i
.types
[j
];
5827 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
5831 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
5833 gas_assert ((MAX_OPERANDS
- 1) > dupl
5834 && (i
.tm
.opcode_modifier
.vexsources
5837 /* Add the implicit xmm0 for instructions with VEX prefix
5839 for (j
= i
.operands
; j
> 0; j
--)
5841 i
.op
[j
] = i
.op
[j
- 1];
5842 i
.types
[j
] = i
.types
[j
- 1];
5843 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
5846 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
5847 i
.types
[0] = regxmm
;
5848 i
.tm
.operand_types
[0] = regxmm
;
5851 i
.reg_operands
+= 2;
5856 i
.op
[dupl
] = i
.op
[dest
];
5857 i
.types
[dupl
] = i
.types
[dest
];
5858 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
5867 i
.op
[dupl
] = i
.op
[dest
];
5868 i
.types
[dupl
] = i
.types
[dest
];
5869 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
5872 if (i
.tm
.opcode_modifier
.immext
)
5875 else if (i
.tm
.opcode_modifier
.firstxmm0
)
5879 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
5880 gas_assert (i
.reg_operands
5881 && (operand_type_equal (&i
.types
[0], ®xmm
)
5882 || operand_type_equal (&i
.types
[0], ®ymm
)
5883 || operand_type_equal (&i
.types
[0], ®zmm
)));
5884 if (register_number (i
.op
[0].regs
) != 0)
5885 return bad_implicit_operand (i
.types
[0].bitfield
.regxmm
);
5887 for (j
= 1; j
< i
.operands
; j
++)
5889 i
.op
[j
- 1] = i
.op
[j
];
5890 i
.types
[j
- 1] = i
.types
[j
];
5892 /* We need to adjust fields in i.tm since they are used by
5893 build_modrm_byte. */
5894 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
5901 else if (i
.tm
.opcode_modifier
.regkludge
)
5903 /* The imul $imm, %reg instruction is converted into
5904 imul $imm, %reg, %reg, and the clr %reg instruction
5905 is converted into xor %reg, %reg. */
5907 unsigned int first_reg_op
;
5909 if (operand_type_check (i
.types
[0], reg
))
5913 /* Pretend we saw the extra register operand. */
5914 gas_assert (i
.reg_operands
== 1
5915 && i
.op
[first_reg_op
+ 1].regs
== 0);
5916 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
5917 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
5922 if (i
.tm
.opcode_modifier
.shortform
)
5924 if (i
.types
[0].bitfield
.sreg2
5925 || i
.types
[0].bitfield
.sreg3
)
5927 if (i
.tm
.base_opcode
== POP_SEG_SHORT
5928 && i
.op
[0].regs
->reg_num
== 1)
5930 as_bad (_("you can't `pop %scs'"), register_prefix
);
5933 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
5934 if ((i
.op
[0].regs
->reg_flags
& RegRex
) != 0)
5939 /* The register or float register operand is in operand
5943 if (i
.types
[0].bitfield
.floatreg
5944 || operand_type_check (i
.types
[0], reg
))
5948 /* Register goes in low 3 bits of opcode. */
5949 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
5950 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
5952 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
5954 /* Warn about some common errors, but press on regardless.
5955 The first case can be generated by gcc (<= 2.8.1). */
5956 if (i
.operands
== 2)
5958 /* Reversed arguments on faddp, fsubp, etc. */
5959 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
5960 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
5961 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
5965 /* Extraneous `l' suffix on fp insn. */
5966 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
5967 register_prefix
, i
.op
[0].regs
->reg_name
);
5972 else if (i
.tm
.opcode_modifier
.modrm
)
5974 /* The opcode is completed (modulo i.tm.extension_opcode which
5975 must be put into the modrm byte). Now, we make the modrm and
5976 index base bytes based on all the info we've collected. */
5978 default_seg
= build_modrm_byte ();
5980 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
5984 else if (i
.tm
.opcode_modifier
.isstring
)
5986 /* For the string instructions that allow a segment override
5987 on one of their operands, the default segment is ds. */
5991 if (i
.tm
.base_opcode
== 0x8d /* lea */
5994 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
5996 /* If a segment was explicitly specified, and the specified segment
5997 is not the default, use an opcode prefix to select it. If we
5998 never figured out what the default segment is, then default_seg
5999 will be zero at this point, and the specified segment prefix will
6001 if ((i
.seg
[0]) && (i
.seg
[0] != default_seg
))
6003 if (!add_prefix (i
.seg
[0]->seg_prefix
))
6009 static const seg_entry
*
6010 build_modrm_byte (void)
6012 const seg_entry
*default_seg
= 0;
6013 unsigned int source
, dest
;
6016 /* The first operand of instructions with VEX prefix and 3 sources
6017 must be VEX_Imm4. */
6018 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
6021 unsigned int nds
, reg_slot
;
6024 if (i
.tm
.opcode_modifier
.veximmext
6025 && i
.tm
.opcode_modifier
.immext
)
6027 dest
= i
.operands
- 2;
6028 gas_assert (dest
== 3);
6031 dest
= i
.operands
- 1;
6034 /* There are 2 kinds of instructions:
6035 1. 5 operands: 4 register operands or 3 register operands
6036 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
6037 VexW0 or VexW1. The destination must be either XMM, YMM or
6039 2. 4 operands: 4 register operands or 3 register operands
6040 plus 1 memory operand, VexXDS, and VexImmExt */
6041 gas_assert ((i
.reg_operands
== 4
6042 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
6043 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6044 && (i
.tm
.opcode_modifier
.veximmext
6045 || (i
.imm_operands
== 1
6046 && i
.types
[0].bitfield
.vec_imm4
6047 && (i
.tm
.opcode_modifier
.vexw
== VEXW0
6048 || i
.tm
.opcode_modifier
.vexw
== VEXW1
)
6049 && (operand_type_equal (&i
.tm
.operand_types
[dest
], ®xmm
)
6050 || operand_type_equal (&i
.tm
.operand_types
[dest
], ®ymm
)
6051 || operand_type_equal (&i
.tm
.operand_types
[dest
], ®zmm
)))));
6053 if (i
.imm_operands
== 0)
6055 /* When there is no immediate operand, generate an 8bit
6056 immediate operand to encode the first operand. */
6057 exp
= &im_expressions
[i
.imm_operands
++];
6058 i
.op
[i
.operands
].imms
= exp
;
6059 i
.types
[i
.operands
] = imm8
;
6061 /* If VexW1 is set, the first operand is the source and
6062 the second operand is encoded in the immediate operand. */
6063 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
6074 /* FMA swaps REG and NDS. */
6075 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
6083 gas_assert (operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6085 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6087 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6089 exp
->X_op
= O_constant
;
6090 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
6091 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
6095 unsigned int imm_slot
;
6097 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6099 /* If VexW0 is set, the third operand is the source and
6100 the second operand is encoded in the immediate
6107 /* VexW1 is set, the second operand is the source and
6108 the third operand is encoded in the immediate
6114 if (i
.tm
.opcode_modifier
.immext
)
6116 /* When ImmExt is set, the immdiate byte is the last
6118 imm_slot
= i
.operands
- 1;
6126 /* Turn on Imm8 so that output_imm will generate it. */
6127 i
.types
[imm_slot
].bitfield
.imm8
= 1;
6130 gas_assert (operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6132 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6134 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6136 i
.op
[imm_slot
].imms
->X_add_number
6137 |= register_number (i
.op
[reg_slot
].regs
) << 4;
6138 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
6141 gas_assert (operand_type_equal (&i
.tm
.operand_types
[nds
], ®xmm
)
6142 || operand_type_equal (&i
.tm
.operand_types
[nds
],
6144 || operand_type_equal (&i
.tm
.operand_types
[nds
],
6146 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
6151 /* i.reg_operands MUST be the number of real register operands;
6152 implicit registers do not count. If there are 3 register
6153 operands, it must be a instruction with VexNDS. For a
6154 instruction with VexNDD, the destination register is encoded
6155 in VEX prefix. If there are 4 register operands, it must be
6156 a instruction with VEX prefix and 3 sources. */
6157 if (i
.mem_operands
== 0
6158 && ((i
.reg_operands
== 2
6159 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
6160 || (i
.reg_operands
== 3
6161 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6162 || (i
.reg_operands
== 4 && vex_3_sources
)))
6170 /* When there are 3 operands, one of them may be immediate,
6171 which may be the first or the last operand. Otherwise,
6172 the first operand must be shift count register (cl) or it
6173 is an instruction with VexNDS. */
6174 gas_assert (i
.imm_operands
== 1
6175 || (i
.imm_operands
== 0
6176 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6177 || i
.types
[0].bitfield
.shiftcount
)));
6178 if (operand_type_check (i
.types
[0], imm
)
6179 || i
.types
[0].bitfield
.shiftcount
)
6185 /* When there are 4 operands, the first two must be 8bit
6186 immediate operands. The source operand will be the 3rd
6189 For instructions with VexNDS, if the first operand
6190 an imm8, the source operand is the 2nd one. If the last
6191 operand is imm8, the source operand is the first one. */
6192 gas_assert ((i
.imm_operands
== 2
6193 && i
.types
[0].bitfield
.imm8
6194 && i
.types
[1].bitfield
.imm8
)
6195 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6196 && i
.imm_operands
== 1
6197 && (i
.types
[0].bitfield
.imm8
6198 || i
.types
[i
.operands
- 1].bitfield
.imm8
6200 if (i
.imm_operands
== 2)
6204 if (i
.types
[0].bitfield
.imm8
)
6211 if (i
.tm
.opcode_modifier
.evex
)
6213 /* For EVEX instructions, when there are 5 operands, the
6214 first one must be immediate operand. If the second one
6215 is immediate operand, the source operand is the 3th
6216 one. If the last one is immediate operand, the source
6217 operand is the 2nd one. */
6218 gas_assert (i
.imm_operands
== 2
6219 && i
.tm
.opcode_modifier
.sae
6220 && operand_type_check (i
.types
[0], imm
));
6221 if (operand_type_check (i
.types
[1], imm
))
6223 else if (operand_type_check (i
.types
[4], imm
))
6237 /* RC/SAE operand could be between DEST and SRC. That happens
6238 when one operand is GPR and the other one is XMM/YMM/ZMM
6240 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
6243 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6245 /* For instructions with VexNDS, the register-only source
6246 operand must be 32/64bit integer, XMM, YMM or ZMM
6247 register. It is encoded in VEX prefix. We need to
6248 clear RegMem bit before calling operand_type_equal. */
6250 i386_operand_type op
;
6253 /* Check register-only source operand when two source
6254 operands are swapped. */
6255 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
6256 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
6264 op
= i
.tm
.operand_types
[vvvv
];
6265 op
.bitfield
.regmem
= 0;
6266 if ((dest
+ 1) >= i
.operands
6267 || (!op
.bitfield
.reg32
6268 && op
.bitfield
.reg64
6269 && !operand_type_equal (&op
, ®xmm
)
6270 && !operand_type_equal (&op
, ®ymm
)
6271 && !operand_type_equal (&op
, ®zmm
)
6272 && !operand_type_equal (&op
, ®mask
)))
6274 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
6280 /* One of the register operands will be encoded in the i.tm.reg
6281 field, the other in the combined i.tm.mode and i.tm.regmem
6282 fields. If no form of this instruction supports a memory
6283 destination operand, then we assume the source operand may
6284 sometimes be a memory operand and so we need to store the
6285 destination in the i.rm.reg field. */
6286 if (!i
.tm
.operand_types
[dest
].bitfield
.regmem
6287 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
6289 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
6290 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
6291 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
6293 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
6295 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
6297 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
6302 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
6303 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
6304 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
6306 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
6308 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
6310 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
6313 if (flag_code
!= CODE_64BIT
&& (i
.rex
& (REX_R
| REX_B
)))
6315 if (!i
.types
[0].bitfield
.control
6316 && !i
.types
[1].bitfield
.control
)
6318 i
.rex
&= ~(REX_R
| REX_B
);
6319 add_prefix (LOCK_PREFIX_OPCODE
);
6323 { /* If it's not 2 reg operands... */
6328 unsigned int fake_zero_displacement
= 0;
6331 for (op
= 0; op
< i
.operands
; op
++)
6332 if (operand_type_check (i
.types
[op
], anymem
))
6334 gas_assert (op
< i
.operands
);
6336 if (i
.tm
.opcode_modifier
.vecsib
)
6338 if (i
.index_reg
->reg_num
== RegEiz
6339 || i
.index_reg
->reg_num
== RegRiz
)
6342 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6345 i
.sib
.base
= NO_BASE_REGISTER
;
6346 i
.sib
.scale
= i
.log2_scale_factor
;
6347 /* No Vec_Disp8 if there is no base. */
6348 i
.types
[op
].bitfield
.vec_disp8
= 0;
6349 i
.types
[op
].bitfield
.disp8
= 0;
6350 i
.types
[op
].bitfield
.disp16
= 0;
6351 i
.types
[op
].bitfield
.disp64
= 0;
6352 if (flag_code
!= CODE_64BIT
)
6354 /* Must be 32 bit */
6355 i
.types
[op
].bitfield
.disp32
= 1;
6356 i
.types
[op
].bitfield
.disp32s
= 0;
6360 i
.types
[op
].bitfield
.disp32
= 0;
6361 i
.types
[op
].bitfield
.disp32s
= 1;
6364 i
.sib
.index
= i
.index_reg
->reg_num
;
6365 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6367 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
6373 if (i
.base_reg
== 0)
6376 if (!i
.disp_operands
)
6378 fake_zero_displacement
= 1;
6379 /* Instructions with VSIB byte need 32bit displacement
6380 if there is no base register. */
6381 if (i
.tm
.opcode_modifier
.vecsib
)
6382 i
.types
[op
].bitfield
.disp32
= 1;
6384 if (i
.index_reg
== 0)
6386 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6387 /* Operand is just <disp> */
6388 if (flag_code
== CODE_64BIT
)
6390 /* 64bit mode overwrites the 32bit absolute
6391 addressing by RIP relative addressing and
6392 absolute addressing is encoded by one of the
6393 redundant SIB forms. */
6394 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6395 i
.sib
.base
= NO_BASE_REGISTER
;
6396 i
.sib
.index
= NO_INDEX_REGISTER
;
6397 i
.types
[op
] = ((i
.prefix
[ADDR_PREFIX
] == 0)
6398 ? disp32s
: disp32
);
6400 else if ((flag_code
== CODE_16BIT
)
6401 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
6403 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
6404 i
.types
[op
] = disp16
;
6408 i
.rm
.regmem
= NO_BASE_REGISTER
;
6409 i
.types
[op
] = disp32
;
6412 else if (!i
.tm
.opcode_modifier
.vecsib
)
6414 /* !i.base_reg && i.index_reg */
6415 if (i
.index_reg
->reg_num
== RegEiz
6416 || i
.index_reg
->reg_num
== RegRiz
)
6417 i
.sib
.index
= NO_INDEX_REGISTER
;
6419 i
.sib
.index
= i
.index_reg
->reg_num
;
6420 i
.sib
.base
= NO_BASE_REGISTER
;
6421 i
.sib
.scale
= i
.log2_scale_factor
;
6422 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6423 /* No Vec_Disp8 if there is no base. */
6424 i
.types
[op
].bitfield
.vec_disp8
= 0;
6425 i
.types
[op
].bitfield
.disp8
= 0;
6426 i
.types
[op
].bitfield
.disp16
= 0;
6427 i
.types
[op
].bitfield
.disp64
= 0;
6428 if (flag_code
!= CODE_64BIT
)
6430 /* Must be 32 bit */
6431 i
.types
[op
].bitfield
.disp32
= 1;
6432 i
.types
[op
].bitfield
.disp32s
= 0;
6436 i
.types
[op
].bitfield
.disp32
= 0;
6437 i
.types
[op
].bitfield
.disp32s
= 1;
6439 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6443 /* RIP addressing for 64bit mode. */
6444 else if (i
.base_reg
->reg_num
== RegRip
||
6445 i
.base_reg
->reg_num
== RegEip
)
6447 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6448 i
.rm
.regmem
= NO_BASE_REGISTER
;
6449 i
.types
[op
].bitfield
.disp8
= 0;
6450 i
.types
[op
].bitfield
.disp16
= 0;
6451 i
.types
[op
].bitfield
.disp32
= 0;
6452 i
.types
[op
].bitfield
.disp32s
= 1;
6453 i
.types
[op
].bitfield
.disp64
= 0;
6454 i
.types
[op
].bitfield
.vec_disp8
= 0;
6455 i
.flags
[op
] |= Operand_PCrel
;
6456 if (! i
.disp_operands
)
6457 fake_zero_displacement
= 1;
6459 else if (i
.base_reg
->reg_type
.bitfield
.reg16
)
6461 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6462 switch (i
.base_reg
->reg_num
)
6465 if (i
.index_reg
== 0)
6467 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6468 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
6472 if (i
.index_reg
== 0)
6475 if (operand_type_check (i
.types
[op
], disp
) == 0)
6477 /* fake (%bp) into 0(%bp) */
6478 if (i
.tm
.operand_types
[op
].bitfield
.vec_disp8
)
6479 i
.types
[op
].bitfield
.vec_disp8
= 1;
6481 i
.types
[op
].bitfield
.disp8
= 1;
6482 fake_zero_displacement
= 1;
6485 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6486 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
6488 default: /* (%si) -> 4 or (%di) -> 5 */
6489 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
6491 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
6493 else /* i.base_reg and 32/64 bit mode */
6495 if (flag_code
== CODE_64BIT
6496 && operand_type_check (i
.types
[op
], disp
))
6498 i386_operand_type temp
;
6499 operand_type_set (&temp
, 0);
6500 temp
.bitfield
.disp8
= i
.types
[op
].bitfield
.disp8
;
6501 temp
.bitfield
.vec_disp8
6502 = i
.types
[op
].bitfield
.vec_disp8
;
6504 if (i
.prefix
[ADDR_PREFIX
] == 0)
6505 i
.types
[op
].bitfield
.disp32s
= 1;
6507 i
.types
[op
].bitfield
.disp32
= 1;
6510 if (!i
.tm
.opcode_modifier
.vecsib
)
6511 i
.rm
.regmem
= i
.base_reg
->reg_num
;
6512 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
6514 i
.sib
.base
= i
.base_reg
->reg_num
;
6515 /* x86-64 ignores REX prefix bit here to avoid decoder
6517 if (!(i
.base_reg
->reg_flags
& RegRex
)
6518 && (i
.base_reg
->reg_num
== EBP_REG_NUM
6519 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
6521 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
6523 fake_zero_displacement
= 1;
6524 if (i
.tm
.operand_types
[op
].bitfield
.vec_disp8
)
6525 i
.types
[op
].bitfield
.vec_disp8
= 1;
6527 i
.types
[op
].bitfield
.disp8
= 1;
6529 i
.sib
.scale
= i
.log2_scale_factor
;
6530 if (i
.index_reg
== 0)
6532 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6533 /* <disp>(%esp) becomes two byte modrm with no index
6534 register. We've already stored the code for esp
6535 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6536 Any base register besides %esp will not use the
6537 extra modrm byte. */
6538 i
.sib
.index
= NO_INDEX_REGISTER
;
6540 else if (!i
.tm
.opcode_modifier
.vecsib
)
6542 if (i
.index_reg
->reg_num
== RegEiz
6543 || i
.index_reg
->reg_num
== RegRiz
)
6544 i
.sib
.index
= NO_INDEX_REGISTER
;
6546 i
.sib
.index
= i
.index_reg
->reg_num
;
6547 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6548 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6553 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
6554 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
6558 if (!fake_zero_displacement
6562 fake_zero_displacement
= 1;
6563 if (i
.disp_encoding
== disp_encoding_8bit
)
6564 i
.types
[op
].bitfield
.disp8
= 1;
6566 i
.types
[op
].bitfield
.disp32
= 1;
6568 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
6572 if (fake_zero_displacement
)
6574 /* Fakes a zero displacement assuming that i.types[op]
6575 holds the correct displacement size. */
6578 gas_assert (i
.op
[op
].disps
== 0);
6579 exp
= &disp_expressions
[i
.disp_operands
++];
6580 i
.op
[op
].disps
= exp
;
6581 exp
->X_op
= O_constant
;
6582 exp
->X_add_number
= 0;
6583 exp
->X_add_symbol
= (symbolS
*) 0;
6584 exp
->X_op_symbol
= (symbolS
*) 0;
6592 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
6594 if (operand_type_check (i
.types
[0], imm
))
6595 i
.vex
.register_specifier
= NULL
;
6598 /* VEX.vvvv encodes one of the sources when the first
6599 operand is not an immediate. */
6600 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6601 i
.vex
.register_specifier
= i
.op
[0].regs
;
6603 i
.vex
.register_specifier
= i
.op
[1].regs
;
6606 /* Destination is a XMM register encoded in the ModRM.reg
6608 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
6609 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
6612 /* ModRM.rm and VEX.B encodes the other source. */
6613 if (!i
.mem_operands
)
6617 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6618 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
6620 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
6622 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
6626 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
6628 i
.vex
.register_specifier
= i
.op
[2].regs
;
6629 if (!i
.mem_operands
)
6632 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
6633 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
6637 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6638 (if any) based on i.tm.extension_opcode. Again, we must be
6639 careful to make sure that segment/control/debug/test/MMX
6640 registers are coded into the i.rm.reg field. */
6641 else if (i
.reg_operands
)
6644 unsigned int vex_reg
= ~0;
6646 for (op
= 0; op
< i
.operands
; op
++)
6647 if (i
.types
[op
].bitfield
.reg8
6648 || i
.types
[op
].bitfield
.reg16
6649 || i
.types
[op
].bitfield
.reg32
6650 || i
.types
[op
].bitfield
.reg64
6651 || i
.types
[op
].bitfield
.regmmx
6652 || i
.types
[op
].bitfield
.regxmm
6653 || i
.types
[op
].bitfield
.regymm
6654 || i
.types
[op
].bitfield
.regbnd
6655 || i
.types
[op
].bitfield
.regzmm
6656 || i
.types
[op
].bitfield
.regmask
6657 || i
.types
[op
].bitfield
.sreg2
6658 || i
.types
[op
].bitfield
.sreg3
6659 || i
.types
[op
].bitfield
.control
6660 || i
.types
[op
].bitfield
.debug
6661 || i
.types
[op
].bitfield
.test
)
6666 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6668 /* For instructions with VexNDS, the register-only
6669 source operand is encoded in VEX prefix. */
6670 gas_assert (mem
!= (unsigned int) ~0);
6675 gas_assert (op
< i
.operands
);
6679 /* Check register-only source operand when two source
6680 operands are swapped. */
6681 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
6682 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
6686 gas_assert (mem
== (vex_reg
+ 1)
6687 && op
< i
.operands
);
6692 gas_assert (vex_reg
< i
.operands
);
6696 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
6698 /* For instructions with VexNDD, the register destination
6699 is encoded in VEX prefix. */
6700 if (i
.mem_operands
== 0)
6702 /* There is no memory operand. */
6703 gas_assert ((op
+ 2) == i
.operands
);
6708 /* There are only 2 operands. */
6709 gas_assert (op
< 2 && i
.operands
== 2);
6714 gas_assert (op
< i
.operands
);
6716 if (vex_reg
!= (unsigned int) ~0)
6718 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
6720 if (type
->bitfield
.reg32
!= 1
6721 && type
->bitfield
.reg64
!= 1
6722 && !operand_type_equal (type
, ®xmm
)
6723 && !operand_type_equal (type
, ®ymm
)
6724 && !operand_type_equal (type
, ®zmm
)
6725 && !operand_type_equal (type
, ®mask
))
6728 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
6731 /* Don't set OP operand twice. */
6734 /* If there is an extension opcode to put here, the
6735 register number must be put into the regmem field. */
6736 if (i
.tm
.extension_opcode
!= None
)
6738 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
6739 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
6741 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
6746 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
6747 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
6749 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
6754 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6755 must set it to 3 to indicate this is a register operand
6756 in the regmem field. */
6757 if (!i
.mem_operands
)
6761 /* Fill in i.rm.reg field with extension opcode (if any). */
6762 if (i
.tm
.extension_opcode
!= None
)
6763 i
.rm
.reg
= i
.tm
.extension_opcode
;
6769 output_branch (void)
6775 relax_substateT subtype
;
6779 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
6780 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
6783 if (i
.prefix
[DATA_PREFIX
] != 0)
6789 /* Pentium4 branch hints. */
6790 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
6791 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
6796 if (i
.prefix
[REX_PREFIX
] != 0)
6802 /* BND prefixed jump. */
6803 if (i
.prefix
[BND_PREFIX
] != 0)
6805 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
6809 if (i
.prefixes
!= 0 && !intel_syntax
)
6810 as_warn (_("skipping prefixes on this instruction"));
6812 /* It's always a symbol; End frag & setup for relax.
6813 Make sure there is enough room in this frag for the largest
6814 instruction we may generate in md_convert_frag. This is 2
6815 bytes for the opcode and room for the prefix and largest
6817 frag_grow (prefix
+ 2 + 4);
6818 /* Prefix and 1 opcode byte go in fr_fix. */
6819 p
= frag_more (prefix
+ 1);
6820 if (i
.prefix
[DATA_PREFIX
] != 0)
6821 *p
++ = DATA_PREFIX_OPCODE
;
6822 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
6823 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
6824 *p
++ = i
.prefix
[SEG_PREFIX
];
6825 if (i
.prefix
[REX_PREFIX
] != 0)
6826 *p
++ = i
.prefix
[REX_PREFIX
];
6827 *p
= i
.tm
.base_opcode
;
6829 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
6830 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
6831 else if (cpu_arch_flags
.bitfield
.cpui386
)
6832 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
6834 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
6837 sym
= i
.op
[0].disps
->X_add_symbol
;
6838 off
= i
.op
[0].disps
->X_add_number
;
6840 if (i
.op
[0].disps
->X_op
!= O_constant
6841 && i
.op
[0].disps
->X_op
!= O_symbol
)
6843 /* Handle complex expressions. */
6844 sym
= make_expr_symbol (i
.op
[0].disps
);
6848 /* 1 possible extra opcode + 4 byte displacement go in var part.
6849 Pass reloc in fr_var. */
6850 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
6860 if (i
.tm
.opcode_modifier
.jumpbyte
)
6862 /* This is a loop or jecxz type instruction. */
6864 if (i
.prefix
[ADDR_PREFIX
] != 0)
6866 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
6869 /* Pentium4 branch hints. */
6870 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
6871 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
6873 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
6882 if (flag_code
== CODE_16BIT
)
6885 if (i
.prefix
[DATA_PREFIX
] != 0)
6887 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
6897 if (i
.prefix
[REX_PREFIX
] != 0)
6899 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
6903 /* BND prefixed jump. */
6904 if (i
.prefix
[BND_PREFIX
] != 0)
6906 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
6910 if (i
.prefixes
!= 0 && !intel_syntax
)
6911 as_warn (_("skipping prefixes on this instruction"));
6913 p
= frag_more (i
.tm
.opcode_length
+ size
);
6914 switch (i
.tm
.opcode_length
)
6917 *p
++ = i
.tm
.base_opcode
>> 8;
6919 *p
++ = i
.tm
.base_opcode
;
6925 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
6926 i
.op
[0].disps
, 1, reloc (size
, 1, 1, i
.reloc
[0]));
6928 /* All jumps handled here are signed, but don't use a signed limit
6929 check for 32 and 16 bit jumps as we want to allow wrap around at
6930 4G and 64k respectively. */
6932 fixP
->fx_signed
= 1;
6936 output_interseg_jump (void)
6944 if (flag_code
== CODE_16BIT
)
6948 if (i
.prefix
[DATA_PREFIX
] != 0)
6954 if (i
.prefix
[REX_PREFIX
] != 0)
6964 if (i
.prefixes
!= 0 && !intel_syntax
)
6965 as_warn (_("skipping prefixes on this instruction"));
6967 /* 1 opcode; 2 segment; offset */
6968 p
= frag_more (prefix
+ 1 + 2 + size
);
6970 if (i
.prefix
[DATA_PREFIX
] != 0)
6971 *p
++ = DATA_PREFIX_OPCODE
;
6973 if (i
.prefix
[REX_PREFIX
] != 0)
6974 *p
++ = i
.prefix
[REX_PREFIX
];
6976 *p
++ = i
.tm
.base_opcode
;
6977 if (i
.op
[1].imms
->X_op
== O_constant
)
6979 offsetT n
= i
.op
[1].imms
->X_add_number
;
6982 && !fits_in_unsigned_word (n
)
6983 && !fits_in_signed_word (n
))
6985 as_bad (_("16-bit jump out of range"));
6988 md_number_to_chars (p
, n
, size
);
6991 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
6992 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
6993 if (i
.op
[0].imms
->X_op
!= O_constant
)
6994 as_bad (_("can't handle non absolute segment in `%s'"),
6996 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
7002 fragS
*insn_start_frag
;
7003 offsetT insn_start_off
;
7005 /* Tie dwarf2 debug info to the address at the start of the insn.
7006 We can't do this after the insn has been output as the current
7007 frag may have been closed off. eg. by frag_var. */
7008 dwarf2_emit_insn (0);
7010 insn_start_frag
= frag_now
;
7011 insn_start_off
= frag_now_fix ();
7014 if (i
.tm
.opcode_modifier
.jump
)
7016 else if (i
.tm
.opcode_modifier
.jumpbyte
7017 || i
.tm
.opcode_modifier
.jumpdword
)
7019 else if (i
.tm
.opcode_modifier
.jumpintersegment
)
7020 output_interseg_jump ();
7023 /* Output normal instructions here. */
7027 unsigned int prefix
;
7030 && i
.tm
.base_opcode
== 0xfae
7032 && i
.imm_operands
== 1
7033 && (i
.op
[0].imms
->X_add_number
== 0xe8
7034 || i
.op
[0].imms
->X_add_number
== 0xf0
7035 || i
.op
[0].imms
->X_add_number
== 0xf8))
7037 /* Encode lfence, mfence, and sfence as
7038 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
7039 offsetT val
= 0x240483f0ULL
;
7041 md_number_to_chars (p
, val
, 5);
7045 /* Some processors fail on LOCK prefix. This options makes
7046 assembler ignore LOCK prefix and serves as a workaround. */
7047 if (omit_lock_prefix
)
7049 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
)
7051 i
.prefix
[LOCK_PREFIX
] = 0;
7054 /* Since the VEX/EVEX prefix contains the implicit prefix, we
7055 don't need the explicit prefix. */
7056 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
7058 switch (i
.tm
.opcode_length
)
7061 if (i
.tm
.base_opcode
& 0xff000000)
7063 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
7068 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
7070 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
7071 if (i
.tm
.cpu_flags
.bitfield
.cpupadlock
)
7074 if (prefix
!= REPE_PREFIX_OPCODE
7075 || (i
.prefix
[REP_PREFIX
]
7076 != REPE_PREFIX_OPCODE
))
7077 add_prefix (prefix
);
7080 add_prefix (prefix
);
7089 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7090 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
7091 R_X86_64_GOTTPOFF relocation so that linker can safely
7092 perform IE->LE optimization. */
7093 if (x86_elf_abi
== X86_64_X32_ABI
7095 && i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
7096 && i
.prefix
[REX_PREFIX
] == 0)
7097 add_prefix (REX_OPCODE
);
7100 /* The prefix bytes. */
7101 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
7103 FRAG_APPEND_1_CHAR (*q
);
7107 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
7112 /* REX byte is encoded in VEX prefix. */
7116 FRAG_APPEND_1_CHAR (*q
);
7119 /* There should be no other prefixes for instructions
7124 /* For EVEX instructions i.vrex should become 0 after
7125 build_evex_prefix. For VEX instructions upper 16 registers
7126 aren't available, so VREX should be 0. */
7129 /* Now the VEX prefix. */
7130 p
= frag_more (i
.vex
.length
);
7131 for (j
= 0; j
< i
.vex
.length
; j
++)
7132 p
[j
] = i
.vex
.bytes
[j
];
7135 /* Now the opcode; be careful about word order here! */
7136 if (i
.tm
.opcode_length
== 1)
7138 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
7142 switch (i
.tm
.opcode_length
)
7146 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
7147 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
7151 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
7161 /* Put out high byte first: can't use md_number_to_chars! */
7162 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
7163 *p
= i
.tm
.base_opcode
& 0xff;
7166 /* Now the modrm byte and sib byte (if present). */
7167 if (i
.tm
.opcode_modifier
.modrm
)
7169 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
7172 /* If i.rm.regmem == ESP (4)
7173 && i.rm.mode != (Register mode)
7175 ==> need second modrm byte. */
7176 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
7178 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.reg16
))
7179 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
7181 | i
.sib
.scale
<< 6));
7184 if (i
.disp_operands
)
7185 output_disp (insn_start_frag
, insn_start_off
);
7188 output_imm (insn_start_frag
, insn_start_off
);
7194 pi ("" /*line*/, &i
);
7196 #endif /* DEBUG386 */
7199 /* Return the size of the displacement operand N. */
7202 disp_size (unsigned int n
)
7206 /* Vec_Disp8 has to be 8bit. */
7207 if (i
.types
[n
].bitfield
.vec_disp8
)
7209 else if (i
.types
[n
].bitfield
.disp64
)
7211 else if (i
.types
[n
].bitfield
.disp8
)
7213 else if (i
.types
[n
].bitfield
.disp16
)
7218 /* Return the size of the immediate operand N. */
7221 imm_size (unsigned int n
)
7224 if (i
.types
[n
].bitfield
.imm64
)
7226 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
7228 else if (i
.types
[n
].bitfield
.imm16
)
7234 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
7239 for (n
= 0; n
< i
.operands
; n
++)
7241 if (i
.types
[n
].bitfield
.vec_disp8
7242 || operand_type_check (i
.types
[n
], disp
))
7244 if (i
.op
[n
].disps
->X_op
== O_constant
)
7246 int size
= disp_size (n
);
7247 offsetT val
= i
.op
[n
].disps
->X_add_number
;
7249 if (i
.types
[n
].bitfield
.vec_disp8
)
7251 val
= offset_in_range (val
, size
);
7252 p
= frag_more (size
);
7253 md_number_to_chars (p
, val
, size
);
7257 enum bfd_reloc_code_real reloc_type
;
7258 int size
= disp_size (n
);
7259 int sign
= i
.types
[n
].bitfield
.disp32s
;
7260 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
7263 /* We can't have 8 bit displacement here. */
7264 gas_assert (!i
.types
[n
].bitfield
.disp8
);
7266 /* The PC relative address is computed relative
7267 to the instruction boundary, so in case immediate
7268 fields follows, we need to adjust the value. */
7269 if (pcrel
&& i
.imm_operands
)
7274 for (n1
= 0; n1
< i
.operands
; n1
++)
7275 if (operand_type_check (i
.types
[n1
], imm
))
7277 /* Only one immediate is allowed for PC
7278 relative address. */
7279 gas_assert (sz
== 0);
7281 i
.op
[n
].disps
->X_add_number
-= sz
;
7283 /* We should find the immediate. */
7284 gas_assert (sz
!= 0);
7287 p
= frag_more (size
);
7288 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
7290 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
7291 && (((reloc_type
== BFD_RELOC_32
7292 || reloc_type
== BFD_RELOC_X86_64_32S
7293 || (reloc_type
== BFD_RELOC_64
7295 && (i
.op
[n
].disps
->X_op
== O_symbol
7296 || (i
.op
[n
].disps
->X_op
== O_add
7297 && ((symbol_get_value_expression
7298 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
7300 || reloc_type
== BFD_RELOC_32_PCREL
))
7304 if (insn_start_frag
== frag_now
)
7305 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
7310 add
= insn_start_frag
->fr_fix
- insn_start_off
;
7311 for (fr
= insn_start_frag
->fr_next
;
7312 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
7314 add
+= p
- frag_now
->fr_literal
;
7319 reloc_type
= BFD_RELOC_386_GOTPC
;
7320 i
.op
[n
].imms
->X_add_number
+= add
;
7322 else if (reloc_type
== BFD_RELOC_64
)
7323 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
7325 /* Don't do the adjustment for x86-64, as there
7326 the pcrel addressing is relative to the _next_
7327 insn, and that is taken care of in other code. */
7328 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
7330 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
7331 size
, i
.op
[n
].disps
, pcrel
,
7333 /* Check for "call/jmp *mem", "mov mem, %reg",
7334 "test %reg, mem" and "binop mem, %reg" where binop
7335 is one of adc, add, and, cmp, or, sbb, sub, xor
7336 instructions. Always generate R_386_GOT32X for
7337 "sym*GOT" operand in 32-bit mode. */
7338 if ((generate_relax_relocations
7341 && i
.rm
.regmem
== 5))
7343 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
7344 && ((i
.operands
== 1
7345 && i
.tm
.base_opcode
== 0xff
7346 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
7348 && (i
.tm
.base_opcode
== 0x8b
7349 || i
.tm
.base_opcode
== 0x85
7350 || (i
.tm
.base_opcode
& 0xc7) == 0x03))))
7354 fixP
->fx_tcbit
= i
.rex
!= 0;
7356 && (i
.base_reg
->reg_num
== RegRip
7357 || i
.base_reg
->reg_num
== RegEip
))
7358 fixP
->fx_tcbit2
= 1;
7361 fixP
->fx_tcbit2
= 1;
7369 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
7374 for (n
= 0; n
< i
.operands
; n
++)
7376 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7377 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
7380 if (operand_type_check (i
.types
[n
], imm
))
7382 if (i
.op
[n
].imms
->X_op
== O_constant
)
7384 int size
= imm_size (n
);
7387 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
7389 p
= frag_more (size
);
7390 md_number_to_chars (p
, val
, size
);
7394 /* Not absolute_section.
7395 Need a 32-bit fixup (don't support 8bit
7396 non-absolute imms). Try to support other
7398 enum bfd_reloc_code_real reloc_type
;
7399 int size
= imm_size (n
);
7402 if (i
.types
[n
].bitfield
.imm32s
7403 && (i
.suffix
== QWORD_MNEM_SUFFIX
7404 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
7409 p
= frag_more (size
);
7410 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
7412 /* This is tough to explain. We end up with this one if we
7413 * have operands that look like
7414 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7415 * obtain the absolute address of the GOT, and it is strongly
7416 * preferable from a performance point of view to avoid using
7417 * a runtime relocation for this. The actual sequence of
7418 * instructions often look something like:
7423 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7425 * The call and pop essentially return the absolute address
7426 * of the label .L66 and store it in %ebx. The linker itself
7427 * will ultimately change the first operand of the addl so
7428 * that %ebx points to the GOT, but to keep things simple, the
7429 * .o file must have this operand set so that it generates not
7430 * the absolute address of .L66, but the absolute address of
7431 * itself. This allows the linker itself simply treat a GOTPC
7432 * relocation as asking for a pcrel offset to the GOT to be
7433 * added in, and the addend of the relocation is stored in the
7434 * operand field for the instruction itself.
7436 * Our job here is to fix the operand so that it would add
7437 * the correct offset so that %ebx would point to itself. The
7438 * thing that is tricky is that .-.L66 will point to the
7439 * beginning of the instruction, so we need to further modify
7440 * the operand so that it will point to itself. There are
7441 * other cases where you have something like:
7443 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7445 * and here no correction would be required. Internally in
7446 * the assembler we treat operands of this form as not being
7447 * pcrel since the '.' is explicitly mentioned, and I wonder
7448 * whether it would simplify matters to do it this way. Who
7449 * knows. In earlier versions of the PIC patches, the
7450 * pcrel_adjust field was used to store the correction, but
7451 * since the expression is not pcrel, I felt it would be
7452 * confusing to do it this way. */
7454 if ((reloc_type
== BFD_RELOC_32
7455 || reloc_type
== BFD_RELOC_X86_64_32S
7456 || reloc_type
== BFD_RELOC_64
)
7458 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
7459 && (i
.op
[n
].imms
->X_op
== O_symbol
7460 || (i
.op
[n
].imms
->X_op
== O_add
7461 && ((symbol_get_value_expression
7462 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
7467 if (insn_start_frag
== frag_now
)
7468 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
7473 add
= insn_start_frag
->fr_fix
- insn_start_off
;
7474 for (fr
= insn_start_frag
->fr_next
;
7475 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
7477 add
+= p
- frag_now
->fr_literal
;
7481 reloc_type
= BFD_RELOC_386_GOTPC
;
7483 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
7485 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
7486 i
.op
[n
].imms
->X_add_number
+= add
;
7488 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7489 i
.op
[n
].imms
, 0, reloc_type
);
7495 /* x86_cons_fix_new is called via the expression parsing code when a
7496 reloc is needed. We use this hook to get the correct .got reloc. */
7497 static int cons_sign
= -1;
7500 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
7501 expressionS
*exp
, bfd_reloc_code_real_type r
)
7503 r
= reloc (len
, 0, cons_sign
, r
);
7506 if (exp
->X_op
== O_secrel
)
7508 exp
->X_op
= O_symbol
;
7509 r
= BFD_RELOC_32_SECREL
;
7513 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
7516 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7517 purpose of the `.dc.a' internal pseudo-op. */
7520 x86_address_bytes (void)
7522 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
7524 return stdoutput
->arch_info
->bits_per_address
/ 8;
7527 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7529 # define lex_got(reloc, adjust, types) NULL
7531 /* Parse operands of the form
7532 <symbol>@GOTOFF+<nnn>
7533 and similar .plt or .got references.
7535 If we find one, set up the correct relocation in RELOC and copy the
7536 input string, minus the `@GOTOFF' into a malloc'd buffer for
7537 parsing by the calling routine. Return this buffer, and if ADJUST
7538 is non-null set it to the length of the string we removed from the
7539 input line. Otherwise return NULL. */
7541 lex_got (enum bfd_reloc_code_real
*rel
,
7543 i386_operand_type
*types
)
7545 /* Some of the relocations depend on the size of what field is to
7546 be relocated. But in our callers i386_immediate and i386_displacement
7547 we don't yet know the operand size (this will be set by insn
7548 matching). Hence we record the word32 relocation here,
7549 and adjust the reloc according to the real size in reloc(). */
7550 static const struct {
7553 const enum bfd_reloc_code_real rel
[2];
7554 const i386_operand_type types64
;
7556 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7557 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
7559 OPERAND_TYPE_IMM32_64
},
7561 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
7562 BFD_RELOC_X86_64_PLTOFF64
},
7563 OPERAND_TYPE_IMM64
},
7564 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
7565 BFD_RELOC_X86_64_PLT32
},
7566 OPERAND_TYPE_IMM32_32S_DISP32
},
7567 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
7568 BFD_RELOC_X86_64_GOTPLT64
},
7569 OPERAND_TYPE_IMM64_DISP64
},
7570 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
7571 BFD_RELOC_X86_64_GOTOFF64
},
7572 OPERAND_TYPE_IMM64_DISP64
},
7573 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
7574 BFD_RELOC_X86_64_GOTPCREL
},
7575 OPERAND_TYPE_IMM32_32S_DISP32
},
7576 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
7577 BFD_RELOC_X86_64_TLSGD
},
7578 OPERAND_TYPE_IMM32_32S_DISP32
},
7579 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
7580 _dummy_first_bfd_reloc_code_real
},
7581 OPERAND_TYPE_NONE
},
7582 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
7583 BFD_RELOC_X86_64_TLSLD
},
7584 OPERAND_TYPE_IMM32_32S_DISP32
},
7585 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
7586 BFD_RELOC_X86_64_GOTTPOFF
},
7587 OPERAND_TYPE_IMM32_32S_DISP32
},
7588 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
7589 BFD_RELOC_X86_64_TPOFF32
},
7590 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7591 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
7592 _dummy_first_bfd_reloc_code_real
},
7593 OPERAND_TYPE_NONE
},
7594 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
7595 BFD_RELOC_X86_64_DTPOFF32
},
7596 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7597 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
7598 _dummy_first_bfd_reloc_code_real
},
7599 OPERAND_TYPE_NONE
},
7600 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
7601 _dummy_first_bfd_reloc_code_real
},
7602 OPERAND_TYPE_NONE
},
7603 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
7604 BFD_RELOC_X86_64_GOT32
},
7605 OPERAND_TYPE_IMM32_32S_64_DISP32
},
7606 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
7607 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
7608 OPERAND_TYPE_IMM32_32S_DISP32
},
7609 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
7610 BFD_RELOC_X86_64_TLSDESC_CALL
},
7611 OPERAND_TYPE_IMM32_32S_DISP32
},
7616 #if defined (OBJ_MAYBE_ELF)
7621 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
7622 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
7625 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
7627 int len
= gotrel
[j
].len
;
7628 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
7630 if (gotrel
[j
].rel
[object_64bit
] != 0)
7633 char *tmpbuf
, *past_reloc
;
7635 *rel
= gotrel
[j
].rel
[object_64bit
];
7639 if (flag_code
!= CODE_64BIT
)
7641 types
->bitfield
.imm32
= 1;
7642 types
->bitfield
.disp32
= 1;
7645 *types
= gotrel
[j
].types64
;
7648 if (j
!= 0 && GOT_symbol
== NULL
)
7649 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
7651 /* The length of the first part of our input line. */
7652 first
= cp
- input_line_pointer
;
7654 /* The second part goes from after the reloc token until
7655 (and including) an end_of_line char or comma. */
7656 past_reloc
= cp
+ 1 + len
;
7658 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
7660 second
= cp
+ 1 - past_reloc
;
7662 /* Allocate and copy string. The trailing NUL shouldn't
7663 be necessary, but be safe. */
7664 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
7665 memcpy (tmpbuf
, input_line_pointer
, first
);
7666 if (second
!= 0 && *past_reloc
!= ' ')
7667 /* Replace the relocation token with ' ', so that
7668 errors like foo@GOTOFF1 will be detected. */
7669 tmpbuf
[first
++] = ' ';
7671 /* Increment length by 1 if the relocation token is
7676 memcpy (tmpbuf
+ first
, past_reloc
, second
);
7677 tmpbuf
[first
+ second
] = '\0';
7681 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7682 gotrel
[j
].str
, 1 << (5 + object_64bit
));
7687 /* Might be a symbol version string. Don't as_bad here. */
7696 /* Parse operands of the form
7697 <symbol>@SECREL32+<nnn>
7699 If we find one, set up the correct relocation in RELOC and copy the
7700 input string, minus the `@SECREL32' into a malloc'd buffer for
7701 parsing by the calling routine. Return this buffer, and if ADJUST
7702 is non-null set it to the length of the string we removed from the
7703 input line. Otherwise return NULL.
7705 This function is copied from the ELF version above adjusted for PE targets. */
7708 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
7709 int *adjust ATTRIBUTE_UNUSED
,
7710 i386_operand_type
*types
)
7716 const enum bfd_reloc_code_real rel
[2];
7717 const i386_operand_type types64
;
7721 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
7722 BFD_RELOC_32_SECREL
},
7723 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7729 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
7730 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
7733 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
7735 int len
= gotrel
[j
].len
;
7737 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
7739 if (gotrel
[j
].rel
[object_64bit
] != 0)
7742 char *tmpbuf
, *past_reloc
;
7744 *rel
= gotrel
[j
].rel
[object_64bit
];
7750 if (flag_code
!= CODE_64BIT
)
7752 types
->bitfield
.imm32
= 1;
7753 types
->bitfield
.disp32
= 1;
7756 *types
= gotrel
[j
].types64
;
7759 /* The length of the first part of our input line. */
7760 first
= cp
- input_line_pointer
;
7762 /* The second part goes from after the reloc token until
7763 (and including) an end_of_line char or comma. */
7764 past_reloc
= cp
+ 1 + len
;
7766 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
7768 second
= cp
+ 1 - past_reloc
;
7770 /* Allocate and copy string. The trailing NUL shouldn't
7771 be necessary, but be safe. */
7772 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
7773 memcpy (tmpbuf
, input_line_pointer
, first
);
7774 if (second
!= 0 && *past_reloc
!= ' ')
7775 /* Replace the relocation token with ' ', so that
7776 errors like foo@SECLREL321 will be detected. */
7777 tmpbuf
[first
++] = ' ';
7778 memcpy (tmpbuf
+ first
, past_reloc
, second
);
7779 tmpbuf
[first
+ second
] = '\0';
7783 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7784 gotrel
[j
].str
, 1 << (5 + object_64bit
));
7789 /* Might be a symbol version string. Don't as_bad here. */
7795 bfd_reloc_code_real_type
7796 x86_cons (expressionS
*exp
, int size
)
7798 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
7800 intel_syntax
= -intel_syntax
;
7803 if (size
== 4 || (object_64bit
&& size
== 8))
7805 /* Handle @GOTOFF and the like in an expression. */
7807 char *gotfree_input_line
;
7810 save
= input_line_pointer
;
7811 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
7812 if (gotfree_input_line
)
7813 input_line_pointer
= gotfree_input_line
;
7817 if (gotfree_input_line
)
7819 /* expression () has merrily parsed up to the end of line,
7820 or a comma - in the wrong buffer. Transfer how far
7821 input_line_pointer has moved to the right buffer. */
7822 input_line_pointer
= (save
7823 + (input_line_pointer
- gotfree_input_line
)
7825 free (gotfree_input_line
);
7826 if (exp
->X_op
== O_constant
7827 || exp
->X_op
== O_absent
7828 || exp
->X_op
== O_illegal
7829 || exp
->X_op
== O_register
7830 || exp
->X_op
== O_big
)
7832 char c
= *input_line_pointer
;
7833 *input_line_pointer
= 0;
7834 as_bad (_("missing or invalid expression `%s'"), save
);
7835 *input_line_pointer
= c
;
7842 intel_syntax
= -intel_syntax
;
7845 i386_intel_simplify (exp
);
7851 signed_cons (int size
)
7853 if (flag_code
== CODE_64BIT
)
7861 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
7868 if (exp
.X_op
== O_symbol
)
7869 exp
.X_op
= O_secrel
;
7871 emit_expr (&exp
, 4);
7873 while (*input_line_pointer
++ == ',');
7875 input_line_pointer
--;
7876 demand_empty_rest_of_line ();
7880 /* Handle Vector operations. */
7883 check_VecOperations (char *op_string
, char *op_end
)
7885 const reg_entry
*mask
;
7890 && (op_end
== NULL
|| op_string
< op_end
))
7893 if (*op_string
== '{')
7897 /* Check broadcasts. */
7898 if (strncmp (op_string
, "1to", 3) == 0)
7903 goto duplicated_vec_op
;
7906 if (*op_string
== '8')
7907 bcst_type
= BROADCAST_1TO8
;
7908 else if (*op_string
== '4')
7909 bcst_type
= BROADCAST_1TO4
;
7910 else if (*op_string
== '2')
7911 bcst_type
= BROADCAST_1TO2
;
7912 else if (*op_string
== '1'
7913 && *(op_string
+1) == '6')
7915 bcst_type
= BROADCAST_1TO16
;
7920 as_bad (_("Unsupported broadcast: `%s'"), saved
);
7925 broadcast_op
.type
= bcst_type
;
7926 broadcast_op
.operand
= this_operand
;
7927 i
.broadcast
= &broadcast_op
;
7929 /* Check masking operation. */
7930 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
7932 /* k0 can't be used for write mask. */
7933 if (mask
->reg_num
== 0)
7935 as_bad (_("`%s' can't be used for write mask"),
7942 mask_op
.mask
= mask
;
7943 mask_op
.zeroing
= 0;
7944 mask_op
.operand
= this_operand
;
7950 goto duplicated_vec_op
;
7952 i
.mask
->mask
= mask
;
7954 /* Only "{z}" is allowed here. No need to check
7955 zeroing mask explicitly. */
7956 if (i
.mask
->operand
!= this_operand
)
7958 as_bad (_("invalid write mask `%s'"), saved
);
7965 /* Check zeroing-flag for masking operation. */
7966 else if (*op_string
== 'z')
7970 mask_op
.mask
= NULL
;
7971 mask_op
.zeroing
= 1;
7972 mask_op
.operand
= this_operand
;
7977 if (i
.mask
->zeroing
)
7980 as_bad (_("duplicated `%s'"), saved
);
7984 i
.mask
->zeroing
= 1;
7986 /* Only "{%k}" is allowed here. No need to check mask
7987 register explicitly. */
7988 if (i
.mask
->operand
!= this_operand
)
7990 as_bad (_("invalid zeroing-masking `%s'"),
7999 goto unknown_vec_op
;
8001 if (*op_string
!= '}')
8003 as_bad (_("missing `}' in `%s'"), saved
);
8010 /* We don't know this one. */
8011 as_bad (_("unknown vector operation: `%s'"), saved
);
8019 i386_immediate (char *imm_start
)
8021 char *save_input_line_pointer
;
8022 char *gotfree_input_line
;
8025 i386_operand_type types
;
8027 operand_type_set (&types
, ~0);
8029 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
8031 as_bad (_("at most %d immediate operands are allowed"),
8032 MAX_IMMEDIATE_OPERANDS
);
8036 exp
= &im_expressions
[i
.imm_operands
++];
8037 i
.op
[this_operand
].imms
= exp
;
8039 if (is_space_char (*imm_start
))
8042 save_input_line_pointer
= input_line_pointer
;
8043 input_line_pointer
= imm_start
;
8045 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
8046 if (gotfree_input_line
)
8047 input_line_pointer
= gotfree_input_line
;
8049 exp_seg
= expression (exp
);
8053 /* Handle vector operations. */
8054 if (*input_line_pointer
== '{')
8056 input_line_pointer
= check_VecOperations (input_line_pointer
,
8058 if (input_line_pointer
== NULL
)
8062 if (*input_line_pointer
)
8063 as_bad (_("junk `%s' after expression"), input_line_pointer
);
8065 input_line_pointer
= save_input_line_pointer
;
8066 if (gotfree_input_line
)
8068 free (gotfree_input_line
);
8070 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
8071 exp
->X_op
= O_illegal
;
8074 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
8078 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
8079 i386_operand_type types
, const char *imm_start
)
8081 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
8084 as_bad (_("missing or invalid immediate expression `%s'"),
8088 else if (exp
->X_op
== O_constant
)
8090 /* Size it properly later. */
8091 i
.types
[this_operand
].bitfield
.imm64
= 1;
8092 /* If not 64bit, sign extend val. */
8093 if (flag_code
!= CODE_64BIT
8094 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
8096 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
8098 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8099 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
8100 && exp_seg
!= absolute_section
8101 && exp_seg
!= text_section
8102 && exp_seg
!= data_section
8103 && exp_seg
!= bss_section
8104 && exp_seg
!= undefined_section
8105 && !bfd_is_com_section (exp_seg
))
8107 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
8111 else if (!intel_syntax
&& exp_seg
== reg_section
)
8114 as_bad (_("illegal immediate register operand %s"), imm_start
);
8119 /* This is an address. The size of the address will be
8120 determined later, depending on destination register,
8121 suffix, or the default for the section. */
8122 i
.types
[this_operand
].bitfield
.imm8
= 1;
8123 i
.types
[this_operand
].bitfield
.imm16
= 1;
8124 i
.types
[this_operand
].bitfield
.imm32
= 1;
8125 i
.types
[this_operand
].bitfield
.imm32s
= 1;
8126 i
.types
[this_operand
].bitfield
.imm64
= 1;
8127 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
8135 i386_scale (char *scale
)
8138 char *save
= input_line_pointer
;
8140 input_line_pointer
= scale
;
8141 val
= get_absolute_expression ();
8146 i
.log2_scale_factor
= 0;
8149 i
.log2_scale_factor
= 1;
8152 i
.log2_scale_factor
= 2;
8155 i
.log2_scale_factor
= 3;
8159 char sep
= *input_line_pointer
;
8161 *input_line_pointer
= '\0';
8162 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
8164 *input_line_pointer
= sep
;
8165 input_line_pointer
= save
;
8169 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
8171 as_warn (_("scale factor of %d without an index register"),
8172 1 << i
.log2_scale_factor
);
8173 i
.log2_scale_factor
= 0;
8175 scale
= input_line_pointer
;
8176 input_line_pointer
= save
;
8181 i386_displacement (char *disp_start
, char *disp_end
)
8185 char *save_input_line_pointer
;
8186 char *gotfree_input_line
;
8188 i386_operand_type bigdisp
, types
= anydisp
;
8191 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
8193 as_bad (_("at most %d displacement operands are allowed"),
8194 MAX_MEMORY_OPERANDS
);
8198 operand_type_set (&bigdisp
, 0);
8199 if ((i
.types
[this_operand
].bitfield
.jumpabsolute
)
8200 || (!current_templates
->start
->opcode_modifier
.jump
8201 && !current_templates
->start
->opcode_modifier
.jumpdword
))
8203 bigdisp
.bitfield
.disp32
= 1;
8204 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
8205 if (flag_code
== CODE_64BIT
)
8209 bigdisp
.bitfield
.disp32s
= 1;
8210 bigdisp
.bitfield
.disp64
= 1;
8213 else if ((flag_code
== CODE_16BIT
) ^ override
)
8215 bigdisp
.bitfield
.disp32
= 0;
8216 bigdisp
.bitfield
.disp16
= 1;
8221 /* For PC-relative branches, the width of the displacement
8222 is dependent upon data size, not address size. */
8223 override
= (i
.prefix
[DATA_PREFIX
] != 0);
8224 if (flag_code
== CODE_64BIT
)
8226 if (override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
8227 bigdisp
.bitfield
.disp16
= 1;
8230 bigdisp
.bitfield
.disp32
= 1;
8231 bigdisp
.bitfield
.disp32s
= 1;
8237 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
8239 : LONG_MNEM_SUFFIX
));
8240 bigdisp
.bitfield
.disp32
= 1;
8241 if ((flag_code
== CODE_16BIT
) ^ override
)
8243 bigdisp
.bitfield
.disp32
= 0;
8244 bigdisp
.bitfield
.disp16
= 1;
8248 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
8251 exp
= &disp_expressions
[i
.disp_operands
];
8252 i
.op
[this_operand
].disps
= exp
;
8254 save_input_line_pointer
= input_line_pointer
;
8255 input_line_pointer
= disp_start
;
8256 END_STRING_AND_SAVE (disp_end
);
8258 #ifndef GCC_ASM_O_HACK
8259 #define GCC_ASM_O_HACK 0
8262 END_STRING_AND_SAVE (disp_end
+ 1);
8263 if (i
.types
[this_operand
].bitfield
.baseIndex
8264 && displacement_string_end
[-1] == '+')
8266 /* This hack is to avoid a warning when using the "o"
8267 constraint within gcc asm statements.
8270 #define _set_tssldt_desc(n,addr,limit,type) \
8271 __asm__ __volatile__ ( \
8273 "movw %w1,2+%0\n\t" \
8275 "movb %b1,4+%0\n\t" \
8276 "movb %4,5+%0\n\t" \
8277 "movb $0,6+%0\n\t" \
8278 "movb %h1,7+%0\n\t" \
8280 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8282 This works great except that the output assembler ends
8283 up looking a bit weird if it turns out that there is
8284 no offset. You end up producing code that looks like:
8297 So here we provide the missing zero. */
8299 *displacement_string_end
= '0';
8302 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
8303 if (gotfree_input_line
)
8304 input_line_pointer
= gotfree_input_line
;
8306 exp_seg
= expression (exp
);
8309 if (*input_line_pointer
)
8310 as_bad (_("junk `%s' after expression"), input_line_pointer
);
8312 RESTORE_END_STRING (disp_end
+ 1);
8314 input_line_pointer
= save_input_line_pointer
;
8315 if (gotfree_input_line
)
8317 free (gotfree_input_line
);
8319 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
8320 exp
->X_op
= O_illegal
;
8323 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
8325 RESTORE_END_STRING (disp_end
);
8331 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
8332 i386_operand_type types
, const char *disp_start
)
8334 i386_operand_type bigdisp
;
8337 /* We do this to make sure that the section symbol is in
8338 the symbol table. We will ultimately change the relocation
8339 to be relative to the beginning of the section. */
8340 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
8341 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
8342 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
8344 if (exp
->X_op
!= O_symbol
)
8347 if (S_IS_LOCAL (exp
->X_add_symbol
)
8348 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
8349 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
8350 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
8351 exp
->X_op
= O_subtract
;
8352 exp
->X_op_symbol
= GOT_symbol
;
8353 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
8354 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
8355 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
8356 i
.reloc
[this_operand
] = BFD_RELOC_64
;
8358 i
.reloc
[this_operand
] = BFD_RELOC_32
;
8361 else if (exp
->X_op
== O_absent
8362 || exp
->X_op
== O_illegal
8363 || exp
->X_op
== O_big
)
8366 as_bad (_("missing or invalid displacement expression `%s'"),
8371 else if (flag_code
== CODE_64BIT
8372 && !i
.prefix
[ADDR_PREFIX
]
8373 && exp
->X_op
== O_constant
)
8375 /* Since displacement is signed extended to 64bit, don't allow
8376 disp32 and turn off disp32s if they are out of range. */
8377 i
.types
[this_operand
].bitfield
.disp32
= 0;
8378 if (!fits_in_signed_long (exp
->X_add_number
))
8380 i
.types
[this_operand
].bitfield
.disp32s
= 0;
8381 if (i
.types
[this_operand
].bitfield
.baseindex
)
8383 as_bad (_("0x%lx out range of signed 32bit displacement"),
8384 (long) exp
->X_add_number
);
8390 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8391 else if (exp
->X_op
!= O_constant
8392 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
8393 && exp_seg
!= absolute_section
8394 && exp_seg
!= text_section
8395 && exp_seg
!= data_section
8396 && exp_seg
!= bss_section
8397 && exp_seg
!= undefined_section
8398 && !bfd_is_com_section (exp_seg
))
8400 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
8405 /* Check if this is a displacement only operand. */
8406 bigdisp
= i
.types
[this_operand
];
8407 bigdisp
.bitfield
.disp8
= 0;
8408 bigdisp
.bitfield
.disp16
= 0;
8409 bigdisp
.bitfield
.disp32
= 0;
8410 bigdisp
.bitfield
.disp32s
= 0;
8411 bigdisp
.bitfield
.disp64
= 0;
8412 if (operand_type_all_zero (&bigdisp
))
8413 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
8419 /* Make sure the memory operand we've been dealt is valid.
8420 Return 1 on success, 0 on a failure. */
8423 i386_index_check (const char *operand_string
)
8425 const char *kind
= "base/index";
8426 enum flag_code addr_mode
;
8428 if (i
.prefix
[ADDR_PREFIX
])
8429 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
8432 addr_mode
= flag_code
;
8434 #if INFER_ADDR_PREFIX
8435 if (i
.mem_operands
== 0)
8437 /* Infer address prefix from the first memory operand. */
8438 const reg_entry
*addr_reg
= i
.base_reg
;
8440 if (addr_reg
== NULL
)
8441 addr_reg
= i
.index_reg
;
8445 if (addr_reg
->reg_num
== RegEip
8446 || addr_reg
->reg_num
== RegEiz
8447 || addr_reg
->reg_type
.bitfield
.reg32
)
8448 addr_mode
= CODE_32BIT
;
8449 else if (flag_code
!= CODE_64BIT
8450 && addr_reg
->reg_type
.bitfield
.reg16
)
8451 addr_mode
= CODE_16BIT
;
8453 if (addr_mode
!= flag_code
)
8455 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
8457 /* Change the size of any displacement too. At most one
8458 of Disp16 or Disp32 is set.
8459 FIXME. There doesn't seem to be any real need for
8460 separate Disp16 and Disp32 flags. The same goes for
8461 Imm16 and Imm32. Removing them would probably clean
8462 up the code quite a lot. */
8463 if (flag_code
!= CODE_64BIT
8464 && (i
.types
[this_operand
].bitfield
.disp16
8465 || i
.types
[this_operand
].bitfield
.disp32
))
8466 i
.types
[this_operand
]
8467 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
8474 if (current_templates
->start
->opcode_modifier
.isstring
8475 && !current_templates
->start
->opcode_modifier
.immext
8476 && (current_templates
->end
[-1].opcode_modifier
.isstring
8479 /* Memory operands of string insns are special in that they only allow
8480 a single register (rDI, rSI, or rBX) as their memory address. */
8481 const reg_entry
*expected_reg
;
8482 static const char *di_si
[][2] =
8488 static const char *bx
[] = { "ebx", "bx", "rbx" };
8490 kind
= "string address";
8492 if (current_templates
->start
->opcode_modifier
.w
)
8494 i386_operand_type type
= current_templates
->end
[-1].operand_types
[0];
8496 if (!type
.bitfield
.baseindex
8497 || ((!i
.mem_operands
!= !intel_syntax
)
8498 && current_templates
->end
[-1].operand_types
[1]
8499 .bitfield
.baseindex
))
8500 type
= current_templates
->end
[-1].operand_types
[1];
8501 expected_reg
= hash_find (reg_hash
,
8502 di_si
[addr_mode
][type
.bitfield
.esseg
]);
8506 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
8508 if (i
.base_reg
!= expected_reg
8510 || operand_type_check (i
.types
[this_operand
], disp
))
8512 /* The second memory operand must have the same size as
8516 && !((addr_mode
== CODE_64BIT
8517 && i
.base_reg
->reg_type
.bitfield
.reg64
)
8518 || (addr_mode
== CODE_32BIT
8519 ? i
.base_reg
->reg_type
.bitfield
.reg32
8520 : i
.base_reg
->reg_type
.bitfield
.reg16
)))
8523 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8525 intel_syntax
? '[' : '(',
8527 expected_reg
->reg_name
,
8528 intel_syntax
? ']' : ')');
8535 as_bad (_("`%s' is not a valid %s expression"),
8536 operand_string
, kind
);
8541 if (addr_mode
!= CODE_16BIT
)
8543 /* 32-bit/64-bit checks. */
8545 && (addr_mode
== CODE_64BIT
8546 ? !i
.base_reg
->reg_type
.bitfield
.reg64
8547 : !i
.base_reg
->reg_type
.bitfield
.reg32
)
8549 || (i
.base_reg
->reg_num
8550 != (addr_mode
== CODE_64BIT
? RegRip
: RegEip
))))
8552 && !i
.index_reg
->reg_type
.bitfield
.regxmm
8553 && !i
.index_reg
->reg_type
.bitfield
.regymm
8554 && !i
.index_reg
->reg_type
.bitfield
.regzmm
8555 && ((addr_mode
== CODE_64BIT
8556 ? !(i
.index_reg
->reg_type
.bitfield
.reg64
8557 || i
.index_reg
->reg_num
== RegRiz
)
8558 : !(i
.index_reg
->reg_type
.bitfield
.reg32
8559 || i
.index_reg
->reg_num
== RegEiz
))
8560 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
8565 /* 16-bit checks. */
8567 && (!i
.base_reg
->reg_type
.bitfield
.reg16
8568 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
8570 && (!i
.index_reg
->reg_type
.bitfield
.reg16
8571 || !i
.index_reg
->reg_type
.bitfield
.baseindex
8573 && i
.base_reg
->reg_num
< 6
8574 && i
.index_reg
->reg_num
>= 6
8575 && i
.log2_scale_factor
== 0))))
8582 /* Handle vector immediates. */
8585 RC_SAE_immediate (const char *imm_start
)
8587 unsigned int match_found
, j
;
8588 const char *pstr
= imm_start
;
8596 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
8598 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
8602 rc_op
.type
= RC_NamesTable
[j
].type
;
8603 rc_op
.operand
= this_operand
;
8604 i
.rounding
= &rc_op
;
8608 as_bad (_("duplicated `%s'"), imm_start
);
8611 pstr
+= RC_NamesTable
[j
].len
;
8621 as_bad (_("Missing '}': '%s'"), imm_start
);
8624 /* RC/SAE immediate string should contain nothing more. */;
8627 as_bad (_("Junk after '}': '%s'"), imm_start
);
8631 exp
= &im_expressions
[i
.imm_operands
++];
8632 i
.op
[this_operand
].imms
= exp
;
8634 exp
->X_op
= O_constant
;
8635 exp
->X_add_number
= 0;
8636 exp
->X_add_symbol
= (symbolS
*) 0;
8637 exp
->X_op_symbol
= (symbolS
*) 0;
8639 i
.types
[this_operand
].bitfield
.imm8
= 1;
8643 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8647 i386_att_operand (char *operand_string
)
8651 char *op_string
= operand_string
;
8653 if (is_space_char (*op_string
))
8656 /* We check for an absolute prefix (differentiating,
8657 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8658 if (*op_string
== ABSOLUTE_PREFIX
)
8661 if (is_space_char (*op_string
))
8663 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
8666 /* Check if operand is a register. */
8667 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
8669 i386_operand_type temp
;
8671 /* Check for a segment override by searching for ':' after a
8672 segment register. */
8674 if (is_space_char (*op_string
))
8676 if (*op_string
== ':'
8677 && (r
->reg_type
.bitfield
.sreg2
8678 || r
->reg_type
.bitfield
.sreg3
))
8683 i
.seg
[i
.mem_operands
] = &es
;
8686 i
.seg
[i
.mem_operands
] = &cs
;
8689 i
.seg
[i
.mem_operands
] = &ss
;
8692 i
.seg
[i
.mem_operands
] = &ds
;
8695 i
.seg
[i
.mem_operands
] = &fs
;
8698 i
.seg
[i
.mem_operands
] = &gs
;
8702 /* Skip the ':' and whitespace. */
8704 if (is_space_char (*op_string
))
8707 if (!is_digit_char (*op_string
)
8708 && !is_identifier_char (*op_string
)
8709 && *op_string
!= '('
8710 && *op_string
!= ABSOLUTE_PREFIX
)
8712 as_bad (_("bad memory operand `%s'"), op_string
);
8715 /* Handle case of %es:*foo. */
8716 if (*op_string
== ABSOLUTE_PREFIX
)
8719 if (is_space_char (*op_string
))
8721 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
8723 goto do_memory_reference
;
8726 /* Handle vector operations. */
8727 if (*op_string
== '{')
8729 op_string
= check_VecOperations (op_string
, NULL
);
8730 if (op_string
== NULL
)
8736 as_bad (_("junk `%s' after register"), op_string
);
8740 temp
.bitfield
.baseindex
= 0;
8741 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
8743 i
.types
[this_operand
].bitfield
.unspecified
= 0;
8744 i
.op
[this_operand
].regs
= r
;
8747 else if (*op_string
== REGISTER_PREFIX
)
8749 as_bad (_("bad register name `%s'"), op_string
);
8752 else if (*op_string
== IMMEDIATE_PREFIX
)
8755 if (i
.types
[this_operand
].bitfield
.jumpabsolute
)
8757 as_bad (_("immediate operand illegal with absolute jump"));
8760 if (!i386_immediate (op_string
))
8763 else if (RC_SAE_immediate (operand_string
))
8765 /* If it is a RC or SAE immediate, do nothing. */
8768 else if (is_digit_char (*op_string
)
8769 || is_identifier_char (*op_string
)
8770 || *op_string
== '"'
8771 || *op_string
== '(')
8773 /* This is a memory reference of some sort. */
8776 /* Start and end of displacement string expression (if found). */
8777 char *displacement_string_start
;
8778 char *displacement_string_end
;
8781 do_memory_reference
:
8782 if ((i
.mem_operands
== 1
8783 && !current_templates
->start
->opcode_modifier
.isstring
)
8784 || i
.mem_operands
== 2)
8786 as_bad (_("too many memory references for `%s'"),
8787 current_templates
->start
->name
);
8791 /* Check for base index form. We detect the base index form by
8792 looking for an ')' at the end of the operand, searching
8793 for the '(' matching it, and finding a REGISTER_PREFIX or ','
8795 base_string
= op_string
+ strlen (op_string
);
8797 /* Handle vector operations. */
8798 vop_start
= strchr (op_string
, '{');
8799 if (vop_start
&& vop_start
< base_string
)
8801 if (check_VecOperations (vop_start
, base_string
) == NULL
)
8803 base_string
= vop_start
;
8807 if (is_space_char (*base_string
))
8810 /* If we only have a displacement, set-up for it to be parsed later. */
8811 displacement_string_start
= op_string
;
8812 displacement_string_end
= base_string
+ 1;
8814 if (*base_string
== ')')
8817 unsigned int parens_balanced
= 1;
8818 /* We've already checked that the number of left & right ()'s are
8819 equal, so this loop will not be infinite. */
8823 if (*base_string
== ')')
8825 if (*base_string
== '(')
8828 while (parens_balanced
);
8830 temp_string
= base_string
;
8832 /* Skip past '(' and whitespace. */
8834 if (is_space_char (*base_string
))
8837 if (*base_string
== ','
8838 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
8841 displacement_string_end
= temp_string
;
8843 i
.types
[this_operand
].bitfield
.baseindex
= 1;
8847 base_string
= end_op
;
8848 if (is_space_char (*base_string
))
8852 /* There may be an index reg or scale factor here. */
8853 if (*base_string
== ',')
8856 if (is_space_char (*base_string
))
8859 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
8862 base_string
= end_op
;
8863 if (is_space_char (*base_string
))
8865 if (*base_string
== ',')
8868 if (is_space_char (*base_string
))
8871 else if (*base_string
!= ')')
8873 as_bad (_("expecting `,' or `)' "
8874 "after index register in `%s'"),
8879 else if (*base_string
== REGISTER_PREFIX
)
8881 end_op
= strchr (base_string
, ',');
8884 as_bad (_("bad register name `%s'"), base_string
);
8888 /* Check for scale factor. */
8889 if (*base_string
!= ')')
8891 char *end_scale
= i386_scale (base_string
);
8896 base_string
= end_scale
;
8897 if (is_space_char (*base_string
))
8899 if (*base_string
!= ')')
8901 as_bad (_("expecting `)' "
8902 "after scale factor in `%s'"),
8907 else if (!i
.index_reg
)
8909 as_bad (_("expecting index register or scale factor "
8910 "after `,'; got '%c'"),
8915 else if (*base_string
!= ')')
8917 as_bad (_("expecting `,' or `)' "
8918 "after base register in `%s'"),
8923 else if (*base_string
== REGISTER_PREFIX
)
8925 end_op
= strchr (base_string
, ',');
8928 as_bad (_("bad register name `%s'"), base_string
);
8933 /* If there's an expression beginning the operand, parse it,
8934 assuming displacement_string_start and
8935 displacement_string_end are meaningful. */
8936 if (displacement_string_start
!= displacement_string_end
)
8938 if (!i386_displacement (displacement_string_start
,
8939 displacement_string_end
))
8943 /* Special case for (%dx) while doing input/output op. */
8945 && operand_type_equal (&i
.base_reg
->reg_type
,
8946 ®16_inoutportreg
)
8948 && i
.log2_scale_factor
== 0
8949 && i
.seg
[i
.mem_operands
] == 0
8950 && !operand_type_check (i
.types
[this_operand
], disp
))
8952 i
.types
[this_operand
] = inoutportreg
;
8956 if (i386_index_check (operand_string
) == 0)
8958 i
.types
[this_operand
].bitfield
.mem
= 1;
8963 /* It's not a memory operand; argh! */
8964 as_bad (_("invalid char %s beginning operand %d `%s'"),
8965 output_invalid (*op_string
),
8970 return 1; /* Normal return. */
8973 /* Calculate the maximum variable size (i.e., excluding fr_fix)
8974 that an rs_machine_dependent frag may reach. */
8977 i386_frag_max_var (fragS
*frag
)
8979 /* The only relaxable frags are for jumps.
8980 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
8981 gas_assert (frag
->fr_type
== rs_machine_dependent
);
8982 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
8985 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8987 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
8989 /* STT_GNU_IFUNC symbol must go through PLT. */
8990 if ((symbol_get_bfdsym (fr_symbol
)->flags
8991 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
8994 if (!S_IS_EXTERNAL (fr_symbol
))
8995 /* Symbol may be weak or local. */
8996 return !S_IS_WEAK (fr_symbol
);
8998 /* Global symbols with non-default visibility can't be preempted. */
8999 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
9002 if (fr_var
!= NO_RELOC
)
9003 switch ((enum bfd_reloc_code_real
) fr_var
)
9005 case BFD_RELOC_386_PLT32
:
9006 case BFD_RELOC_X86_64_PLT32
:
9007 /* Symbol with PLT relocatin may be preempted. */
9013 /* Global symbols with default visibility in a shared library may be
9014 preempted by another definition. */
9019 /* md_estimate_size_before_relax()
9021 Called just before relax() for rs_machine_dependent frags. The x86
9022 assembler uses these frags to handle variable size jump
9025 Any symbol that is now undefined will not become defined.
9026 Return the correct fr_subtype in the frag.
9027 Return the initial "guess for variable size of frag" to caller.
9028 The guess is actually the growth beyond the fixed part. Whatever
9029 we do to grow the fixed or variable part contributes to our
9033 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
9035 /* We've already got fragP->fr_subtype right; all we have to do is
9036 check for un-relaxable symbols. On an ELF system, we can't relax
9037 an externally visible symbol, because it may be overridden by a
9039 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
9040 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9042 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
9045 #if defined (OBJ_COFF) && defined (TE_PE)
9046 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
9047 && S_IS_WEAK (fragP
->fr_symbol
))
9051 /* Symbol is undefined in this segment, or we need to keep a
9052 reloc so that weak symbols can be overridden. */
9053 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
9054 enum bfd_reloc_code_real reloc_type
;
9055 unsigned char *opcode
;
9058 if (fragP
->fr_var
!= NO_RELOC
)
9059 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
9061 reloc_type
= BFD_RELOC_16_PCREL
;
9063 reloc_type
= BFD_RELOC_32_PCREL
;
9065 old_fr_fix
= fragP
->fr_fix
;
9066 opcode
= (unsigned char *) fragP
->fr_opcode
;
9068 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
9071 /* Make jmp (0xeb) a (d)word displacement jump. */
9073 fragP
->fr_fix
+= size
;
9074 fix_new (fragP
, old_fr_fix
, size
,
9076 fragP
->fr_offset
, 1,
9082 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
9084 /* Negate the condition, and branch past an
9085 unconditional jump. */
9088 /* Insert an unconditional jump. */
9090 /* We added two extra opcode bytes, and have a two byte
9092 fragP
->fr_fix
+= 2 + 2;
9093 fix_new (fragP
, old_fr_fix
+ 2, 2,
9095 fragP
->fr_offset
, 1,
9102 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
9107 fixP
= fix_new (fragP
, old_fr_fix
, 1,
9109 fragP
->fr_offset
, 1,
9111 fixP
->fx_signed
= 1;
9115 /* This changes the byte-displacement jump 0x7N
9116 to the (d)word-displacement jump 0x0f,0x8N. */
9117 opcode
[1] = opcode
[0] + 0x10;
9118 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
9119 /* We've added an opcode byte. */
9120 fragP
->fr_fix
+= 1 + size
;
9121 fix_new (fragP
, old_fr_fix
+ 1, size
,
9123 fragP
->fr_offset
, 1,
9128 BAD_CASE (fragP
->fr_subtype
);
9132 return fragP
->fr_fix
- old_fr_fix
;
9135 /* Guess size depending on current relax state. Initially the relax
9136 state will correspond to a short jump and we return 1, because
9137 the variable part of the frag (the branch offset) is one byte
9138 long. However, we can relax a section more than once and in that
9139 case we must either set fr_subtype back to the unrelaxed state,
9140 or return the value for the appropriate branch. */
9141 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
9144 /* Called after relax() is finished.
9146 In: Address of frag.
9147 fr_type == rs_machine_dependent.
9148 fr_subtype is what the address relaxed to.
9150 Out: Any fixSs and constants are set up.
9151 Caller will turn frag into a ".space 0". */
9154 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
9157 unsigned char *opcode
;
9158 unsigned char *where_to_put_displacement
= NULL
;
9159 offsetT target_address
;
9160 offsetT opcode_address
;
9161 unsigned int extension
= 0;
9162 offsetT displacement_from_opcode_start
;
9164 opcode
= (unsigned char *) fragP
->fr_opcode
;
9166 /* Address we want to reach in file space. */
9167 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
9169 /* Address opcode resides at in file space. */
9170 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
9172 /* Displacement from opcode start to fill into instruction. */
9173 displacement_from_opcode_start
= target_address
- opcode_address
;
9175 if ((fragP
->fr_subtype
& BIG
) == 0)
9177 /* Don't have to change opcode. */
9178 extension
= 1; /* 1 opcode + 1 displacement */
9179 where_to_put_displacement
= &opcode
[1];
9183 if (no_cond_jump_promotion
9184 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
9185 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
9186 _("long jump required"));
9188 switch (fragP
->fr_subtype
)
9190 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
9191 extension
= 4; /* 1 opcode + 4 displacement */
9193 where_to_put_displacement
= &opcode
[1];
9196 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
9197 extension
= 2; /* 1 opcode + 2 displacement */
9199 where_to_put_displacement
= &opcode
[1];
9202 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
9203 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
9204 extension
= 5; /* 2 opcode + 4 displacement */
9205 opcode
[1] = opcode
[0] + 0x10;
9206 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
9207 where_to_put_displacement
= &opcode
[2];
9210 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
9211 extension
= 3; /* 2 opcode + 2 displacement */
9212 opcode
[1] = opcode
[0] + 0x10;
9213 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
9214 where_to_put_displacement
= &opcode
[2];
9217 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
9222 where_to_put_displacement
= &opcode
[3];
9226 BAD_CASE (fragP
->fr_subtype
);
9231 /* If size if less then four we are sure that the operand fits,
9232 but if it's 4, then it could be that the displacement is larger
9234 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
9236 && ((addressT
) (displacement_from_opcode_start
- extension
9237 + ((addressT
) 1 << 31))
9238 > (((addressT
) 2 << 31) - 1)))
9240 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
9241 _("jump target out of range"));
9242 /* Make us emit 0. */
9243 displacement_from_opcode_start
= extension
;
9245 /* Now put displacement after opcode. */
9246 md_number_to_chars ((char *) where_to_put_displacement
,
9247 (valueT
) (displacement_from_opcode_start
- extension
),
9248 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
9249 fragP
->fr_fix
+= extension
;
9252 /* Apply a fixup (fixP) to segment data, once it has been determined
9253 by our caller that we have all the info we need to fix it up.
9255 Parameter valP is the pointer to the value of the bits.
9257 On the 386, immediates, displacements, and data pointers are all in
9258 the same (little-endian) format, so we don't need to care about which
9262 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
9264 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
9265 valueT value
= *valP
;
9267 #if !defined (TE_Mach)
9270 switch (fixP
->fx_r_type
)
9276 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
9279 case BFD_RELOC_X86_64_32S
:
9280 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
9283 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
9286 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
9291 if (fixP
->fx_addsy
!= NULL
9292 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
9293 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
9294 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
9295 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
9296 && !use_rela_relocations
)
9298 /* This is a hack. There should be a better way to handle this.
9299 This covers for the fact that bfd_install_relocation will
9300 subtract the current location (for partial_inplace, PC relative
9301 relocations); see more below. */
9305 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
9308 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
9310 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9313 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
9316 || (symbol_section_p (fixP
->fx_addsy
)
9317 && sym_seg
!= absolute_section
))
9318 && !generic_force_reloc (fixP
))
9320 /* Yes, we add the values in twice. This is because
9321 bfd_install_relocation subtracts them out again. I think
9322 bfd_install_relocation is broken, but I don't dare change
9324 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
9328 #if defined (OBJ_COFF) && defined (TE_PE)
9329 /* For some reason, the PE format does not store a
9330 section address offset for a PC relative symbol. */
9331 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
9332 || S_IS_WEAK (fixP
->fx_addsy
))
9333 value
+= md_pcrel_from (fixP
);
9336 #if defined (OBJ_COFF) && defined (TE_PE)
9337 if (fixP
->fx_addsy
!= NULL
9338 && S_IS_WEAK (fixP
->fx_addsy
)
9339 /* PR 16858: Do not modify weak function references. */
9340 && ! fixP
->fx_pcrel
)
9342 #if !defined (TE_PEP)
9343 /* For x86 PE weak function symbols are neither PC-relative
9344 nor do they set S_IS_FUNCTION. So the only reliable way
9345 to detect them is to check the flags of their containing
9347 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
9348 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
9352 value
-= S_GET_VALUE (fixP
->fx_addsy
);
9356 /* Fix a few things - the dynamic linker expects certain values here,
9357 and we must not disappoint it. */
9358 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9359 if (IS_ELF
&& fixP
->fx_addsy
)
9360 switch (fixP
->fx_r_type
)
9362 case BFD_RELOC_386_PLT32
:
9363 case BFD_RELOC_X86_64_PLT32
:
9364 /* Make the jump instruction point to the address of the operand. At
9365 runtime we merely add the offset to the actual PLT entry. */
9369 case BFD_RELOC_386_TLS_GD
:
9370 case BFD_RELOC_386_TLS_LDM
:
9371 case BFD_RELOC_386_TLS_IE_32
:
9372 case BFD_RELOC_386_TLS_IE
:
9373 case BFD_RELOC_386_TLS_GOTIE
:
9374 case BFD_RELOC_386_TLS_GOTDESC
:
9375 case BFD_RELOC_X86_64_TLSGD
:
9376 case BFD_RELOC_X86_64_TLSLD
:
9377 case BFD_RELOC_X86_64_GOTTPOFF
:
9378 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
9379 value
= 0; /* Fully resolved at runtime. No addend. */
9381 case BFD_RELOC_386_TLS_LE
:
9382 case BFD_RELOC_386_TLS_LDO_32
:
9383 case BFD_RELOC_386_TLS_LE_32
:
9384 case BFD_RELOC_X86_64_DTPOFF32
:
9385 case BFD_RELOC_X86_64_DTPOFF64
:
9386 case BFD_RELOC_X86_64_TPOFF32
:
9387 case BFD_RELOC_X86_64_TPOFF64
:
9388 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
9391 case BFD_RELOC_386_TLS_DESC_CALL
:
9392 case BFD_RELOC_X86_64_TLSDESC_CALL
:
9393 value
= 0; /* Fully resolved at runtime. No addend. */
9394 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
9398 case BFD_RELOC_VTABLE_INHERIT
:
9399 case BFD_RELOC_VTABLE_ENTRY
:
9406 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9408 #endif /* !defined (TE_Mach) */
9410 /* Are we finished with this relocation now? */
9411 if (fixP
->fx_addsy
== NULL
)
9413 #if defined (OBJ_COFF) && defined (TE_PE)
9414 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
9417 /* Remember value for tc_gen_reloc. */
9418 fixP
->fx_addnumber
= value
;
9419 /* Clear out the frag for now. */
9423 else if (use_rela_relocations
)
9425 fixP
->fx_no_overflow
= 1;
9426 /* Remember value for tc_gen_reloc. */
9427 fixP
->fx_addnumber
= value
;
9431 md_number_to_chars (p
, value
, fixP
->fx_size
);
9435 md_atof (int type
, char *litP
, int *sizeP
)
9437 /* This outputs the LITTLENUMs in REVERSE order;
9438 in accord with the bigendian 386. */
9439 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
9442 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
9445 output_invalid (int c
)
9448 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
9451 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
9452 "(0x%x)", (unsigned char) c
);
9453 return output_invalid_buf
;
9456 /* REG_STRING starts *before* REGISTER_PREFIX. */
9458 static const reg_entry
*
9459 parse_real_register (char *reg_string
, char **end_op
)
9461 char *s
= reg_string
;
9463 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
9466 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9467 if (*s
== REGISTER_PREFIX
)
9470 if (is_space_char (*s
))
9474 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
9476 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
9477 return (const reg_entry
*) NULL
;
9481 /* For naked regs, make sure that we are not dealing with an identifier.
9482 This prevents confusing an identifier like `eax_var' with register
9484 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
9485 return (const reg_entry
*) NULL
;
9489 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
9491 /* Handle floating point regs, allowing spaces in the (i) part. */
9492 if (r
== i386_regtab
/* %st is first entry of table */)
9494 if (is_space_char (*s
))
9499 if (is_space_char (*s
))
9501 if (*s
>= '0' && *s
<= '7')
9505 if (is_space_char (*s
))
9510 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
9515 /* We have "%st(" then garbage. */
9516 return (const reg_entry
*) NULL
;
9520 if (r
== NULL
|| allow_pseudo_reg
)
9523 if (operand_type_all_zero (&r
->reg_type
))
9524 return (const reg_entry
*) NULL
;
9526 if ((r
->reg_type
.bitfield
.reg32
9527 || r
->reg_type
.bitfield
.sreg3
9528 || r
->reg_type
.bitfield
.control
9529 || r
->reg_type
.bitfield
.debug
9530 || r
->reg_type
.bitfield
.test
)
9531 && !cpu_arch_flags
.bitfield
.cpui386
)
9532 return (const reg_entry
*) NULL
;
9534 if (r
->reg_type
.bitfield
.floatreg
9535 && !cpu_arch_flags
.bitfield
.cpu8087
9536 && !cpu_arch_flags
.bitfield
.cpu287
9537 && !cpu_arch_flags
.bitfield
.cpu387
)
9538 return (const reg_entry
*) NULL
;
9540 if (r
->reg_type
.bitfield
.regmmx
&& !cpu_arch_flags
.bitfield
.cpummx
)
9541 return (const reg_entry
*) NULL
;
9543 if (r
->reg_type
.bitfield
.regxmm
&& !cpu_arch_flags
.bitfield
.cpusse
)
9544 return (const reg_entry
*) NULL
;
9546 if (r
->reg_type
.bitfield
.regymm
&& !cpu_arch_flags
.bitfield
.cpuavx
)
9547 return (const reg_entry
*) NULL
;
9549 if ((r
->reg_type
.bitfield
.regzmm
|| r
->reg_type
.bitfield
.regmask
)
9550 && !cpu_arch_flags
.bitfield
.cpuavx512f
)
9551 return (const reg_entry
*) NULL
;
9553 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9554 if (!allow_index_reg
9555 && (r
->reg_num
== RegEiz
|| r
->reg_num
== RegRiz
))
9556 return (const reg_entry
*) NULL
;
9558 /* Upper 16 vector register is only available with VREX in 64bit
9560 if ((r
->reg_flags
& RegVRex
))
9562 if (!cpu_arch_flags
.bitfield
.cpuvrex
9563 || flag_code
!= CODE_64BIT
)
9564 return (const reg_entry
*) NULL
;
9569 if (((r
->reg_flags
& (RegRex64
| RegRex
))
9570 || r
->reg_type
.bitfield
.reg64
)
9571 && (!cpu_arch_flags
.bitfield
.cpulm
9572 || !operand_type_equal (&r
->reg_type
, &control
))
9573 && flag_code
!= CODE_64BIT
)
9574 return (const reg_entry
*) NULL
;
9576 if (r
->reg_type
.bitfield
.sreg3
&& r
->reg_num
== RegFlat
&& !intel_syntax
)
9577 return (const reg_entry
*) NULL
;
9582 /* REG_STRING starts *before* REGISTER_PREFIX. */
9584 static const reg_entry
*
9585 parse_register (char *reg_string
, char **end_op
)
9589 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
9590 r
= parse_real_register (reg_string
, end_op
);
9595 char *save
= input_line_pointer
;
9599 input_line_pointer
= reg_string
;
9600 c
= get_symbol_name (®_string
);
9601 symbolP
= symbol_find (reg_string
);
9602 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
9604 const expressionS
*e
= symbol_get_value_expression (symbolP
);
9606 know (e
->X_op
== O_register
);
9607 know (e
->X_add_number
>= 0
9608 && (valueT
) e
->X_add_number
< i386_regtab_size
);
9609 r
= i386_regtab
+ e
->X_add_number
;
9610 if ((r
->reg_flags
& RegVRex
))
9612 *end_op
= input_line_pointer
;
9614 *input_line_pointer
= c
;
9615 input_line_pointer
= save
;
9621 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
9624 char *end
= input_line_pointer
;
9627 r
= parse_register (name
, &input_line_pointer
);
9628 if (r
&& end
<= input_line_pointer
)
9630 *nextcharP
= *input_line_pointer
;
9631 *input_line_pointer
= 0;
9632 e
->X_op
= O_register
;
9633 e
->X_add_number
= r
- i386_regtab
;
9636 input_line_pointer
= end
;
9638 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
9642 md_operand (expressionS
*e
)
9647 switch (*input_line_pointer
)
9649 case REGISTER_PREFIX
:
9650 r
= parse_real_register (input_line_pointer
, &end
);
9653 e
->X_op
= O_register
;
9654 e
->X_add_number
= r
- i386_regtab
;
9655 input_line_pointer
= end
;
9660 gas_assert (intel_syntax
);
9661 end
= input_line_pointer
++;
9663 if (*input_line_pointer
== ']')
9665 ++input_line_pointer
;
9666 e
->X_op_symbol
= make_expr_symbol (e
);
9667 e
->X_add_symbol
= NULL
;
9668 e
->X_add_number
= 0;
9674 input_line_pointer
= end
;
9681 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9682 const char *md_shortopts
= "kVQ:sqn";
9684 const char *md_shortopts
= "qn";
9687 #define OPTION_32 (OPTION_MD_BASE + 0)
9688 #define OPTION_64 (OPTION_MD_BASE + 1)
9689 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9690 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9691 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9692 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9693 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9694 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9695 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9696 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9697 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9698 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9699 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9700 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9701 #define OPTION_X32 (OPTION_MD_BASE + 14)
9702 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9703 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9704 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9705 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
9706 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
9707 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
9708 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
9709 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
9710 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
9711 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
9712 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 25)
9714 struct option md_longopts
[] =
9716 {"32", no_argument
, NULL
, OPTION_32
},
9717 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9718 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9719 {"64", no_argument
, NULL
, OPTION_64
},
9721 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9722 {"x32", no_argument
, NULL
, OPTION_X32
},
9723 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
9725 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
9726 {"march", required_argument
, NULL
, OPTION_MARCH
},
9727 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
9728 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
9729 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
9730 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
9731 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
9732 {"mold-gcc", no_argument
, NULL
, OPTION_MOLD_GCC
},
9733 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
9734 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
9735 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
9736 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
9737 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
9738 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
9739 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
9740 # if defined (TE_PE) || defined (TE_PEP)
9741 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
9743 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
9744 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
9745 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
9746 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
9747 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
9748 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
9749 {NULL
, no_argument
, NULL
, 0}
9751 size_t md_longopts_size
= sizeof (md_longopts
);
9754 md_parse_option (int c
, const char *arg
)
9757 char *arch
, *next
, *saved
;
9762 optimize_align_code
= 0;
9769 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9770 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
9771 should be emitted or not. FIXME: Not implemented. */
9775 /* -V: SVR4 argument to print version ID. */
9777 print_version_id ();
9780 /* -k: Ignore for FreeBSD compatibility. */
9785 /* -s: On i386 Solaris, this tells the native assembler to use
9786 .stab instead of .stab.excl. We always use .stab anyhow. */
9789 case OPTION_MSHARED
:
9793 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9794 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9797 const char **list
, **l
;
9799 list
= bfd_target_list ();
9800 for (l
= list
; *l
!= NULL
; l
++)
9801 if (CONST_STRNEQ (*l
, "elf64-x86-64")
9802 || strcmp (*l
, "coff-x86-64") == 0
9803 || strcmp (*l
, "pe-x86-64") == 0
9804 || strcmp (*l
, "pei-x86-64") == 0
9805 || strcmp (*l
, "mach-o-x86-64") == 0)
9807 default_arch
= "x86_64";
9811 as_fatal (_("no compiled in support for x86_64"));
9817 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9821 const char **list
, **l
;
9823 list
= bfd_target_list ();
9824 for (l
= list
; *l
!= NULL
; l
++)
9825 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
9827 default_arch
= "x86_64:32";
9831 as_fatal (_("no compiled in support for 32bit x86_64"));
9835 as_fatal (_("32bit x86_64 is only supported for ELF"));
9840 default_arch
= "i386";
9844 #ifdef SVR4_COMMENT_CHARS
9849 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
9851 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
9855 i386_comment_chars
= n
;
9861 saved
= xstrdup (arg
);
9863 /* Allow -march=+nosse. */
9869 as_fatal (_("invalid -march= option: `%s'"), arg
);
9870 next
= strchr (arch
, '+');
9873 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
9875 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
9878 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
9881 cpu_arch_name
= cpu_arch
[j
].name
;
9882 cpu_sub_arch_name
= NULL
;
9883 cpu_arch_flags
= cpu_arch
[j
].flags
;
9884 cpu_arch_isa
= cpu_arch
[j
].type
;
9885 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
9886 if (!cpu_arch_tune_set
)
9888 cpu_arch_tune
= cpu_arch_isa
;
9889 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
9893 else if (*cpu_arch
[j
].name
== '.'
9894 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
9896 /* ISA entension. */
9897 i386_cpu_flags flags
;
9899 flags
= cpu_flags_or (cpu_arch_flags
,
9902 if (!valid_iamcu_cpu_flags (&flags
))
9903 as_fatal (_("`%s' isn't valid for Intel MCU"), arch
);
9904 else if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
9906 if (cpu_sub_arch_name
)
9908 char *name
= cpu_sub_arch_name
;
9909 cpu_sub_arch_name
= concat (name
,
9911 (const char *) NULL
);
9915 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
9916 cpu_arch_flags
= flags
;
9917 cpu_arch_isa_flags
= flags
;
9923 if (j
>= ARRAY_SIZE (cpu_arch
))
9925 /* Disable an ISA entension. */
9926 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
9927 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
9929 i386_cpu_flags flags
;
9931 flags
= cpu_flags_and_not (cpu_arch_flags
,
9932 cpu_noarch
[j
].flags
);
9933 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
9935 if (cpu_sub_arch_name
)
9937 char *name
= cpu_sub_arch_name
;
9938 cpu_sub_arch_name
= concat (arch
,
9939 (const char *) NULL
);
9943 cpu_sub_arch_name
= xstrdup (arch
);
9944 cpu_arch_flags
= flags
;
9945 cpu_arch_isa_flags
= flags
;
9950 if (j
>= ARRAY_SIZE (cpu_noarch
))
9951 j
= ARRAY_SIZE (cpu_arch
);
9954 if (j
>= ARRAY_SIZE (cpu_arch
))
9955 as_fatal (_("invalid -march= option: `%s'"), arg
);
9959 while (next
!= NULL
);
9965 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
9966 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
9968 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
9970 cpu_arch_tune_set
= 1;
9971 cpu_arch_tune
= cpu_arch
[j
].type
;
9972 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
9976 if (j
>= ARRAY_SIZE (cpu_arch
))
9977 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
9980 case OPTION_MMNEMONIC
:
9981 if (strcasecmp (arg
, "att") == 0)
9983 else if (strcasecmp (arg
, "intel") == 0)
9986 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
9989 case OPTION_MSYNTAX
:
9990 if (strcasecmp (arg
, "att") == 0)
9992 else if (strcasecmp (arg
, "intel") == 0)
9995 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
9998 case OPTION_MINDEX_REG
:
9999 allow_index_reg
= 1;
10002 case OPTION_MNAKED_REG
:
10003 allow_naked_reg
= 1;
10006 case OPTION_MOLD_GCC
:
10010 case OPTION_MSSE2AVX
:
10014 case OPTION_MSSE_CHECK
:
10015 if (strcasecmp (arg
, "error") == 0)
10016 sse_check
= check_error
;
10017 else if (strcasecmp (arg
, "warning") == 0)
10018 sse_check
= check_warning
;
10019 else if (strcasecmp (arg
, "none") == 0)
10020 sse_check
= check_none
;
10022 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
10025 case OPTION_MOPERAND_CHECK
:
10026 if (strcasecmp (arg
, "error") == 0)
10027 operand_check
= check_error
;
10028 else if (strcasecmp (arg
, "warning") == 0)
10029 operand_check
= check_warning
;
10030 else if (strcasecmp (arg
, "none") == 0)
10031 operand_check
= check_none
;
10033 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
10036 case OPTION_MAVXSCALAR
:
10037 if (strcasecmp (arg
, "128") == 0)
10038 avxscalar
= vex128
;
10039 else if (strcasecmp (arg
, "256") == 0)
10040 avxscalar
= vex256
;
10042 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
10045 case OPTION_MADD_BND_PREFIX
:
10046 add_bnd_prefix
= 1;
10049 case OPTION_MEVEXLIG
:
10050 if (strcmp (arg
, "128") == 0)
10051 evexlig
= evexl128
;
10052 else if (strcmp (arg
, "256") == 0)
10053 evexlig
= evexl256
;
10054 else if (strcmp (arg
, "512") == 0)
10055 evexlig
= evexl512
;
10057 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
10060 case OPTION_MEVEXRCIG
:
10061 if (strcmp (arg
, "rne") == 0)
10063 else if (strcmp (arg
, "rd") == 0)
10065 else if (strcmp (arg
, "ru") == 0)
10067 else if (strcmp (arg
, "rz") == 0)
10070 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
10073 case OPTION_MEVEXWIG
:
10074 if (strcmp (arg
, "0") == 0)
10076 else if (strcmp (arg
, "1") == 0)
10079 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
10082 # if defined (TE_PE) || defined (TE_PEP)
10083 case OPTION_MBIG_OBJ
:
10088 case OPTION_MOMIT_LOCK_PREFIX
:
10089 if (strcasecmp (arg
, "yes") == 0)
10090 omit_lock_prefix
= 1;
10091 else if (strcasecmp (arg
, "no") == 0)
10092 omit_lock_prefix
= 0;
10094 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
10097 case OPTION_MFENCE_AS_LOCK_ADD
:
10098 if (strcasecmp (arg
, "yes") == 0)
10100 else if (strcasecmp (arg
, "no") == 0)
10103 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
10106 case OPTION_MRELAX_RELOCATIONS
:
10107 if (strcasecmp (arg
, "yes") == 0)
10108 generate_relax_relocations
= 1;
10109 else if (strcasecmp (arg
, "no") == 0)
10110 generate_relax_relocations
= 0;
10112 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
10115 case OPTION_MAMD64
:
10119 case OPTION_MINTEL64
:
10129 #define MESSAGE_TEMPLATE \
10133 output_message (FILE *stream
, char *p
, char *message
, char *start
,
10134 int *left_p
, const char *name
, int len
)
10136 int size
= sizeof (MESSAGE_TEMPLATE
);
10137 int left
= *left_p
;
10139 /* Reserve 2 spaces for ", " or ",\0" */
10142 /* Check if there is any room. */
10150 p
= mempcpy (p
, name
, len
);
10154 /* Output the current message now and start a new one. */
10157 fprintf (stream
, "%s\n", message
);
10159 left
= size
- (start
- message
) - len
- 2;
10161 gas_assert (left
>= 0);
10163 p
= mempcpy (p
, name
, len
);
10171 show_arch (FILE *stream
, int ext
, int check
)
10173 static char message
[] = MESSAGE_TEMPLATE
;
10174 char *start
= message
+ 27;
10176 int size
= sizeof (MESSAGE_TEMPLATE
);
10183 left
= size
- (start
- message
);
10184 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
10186 /* Should it be skipped? */
10187 if (cpu_arch
[j
].skip
)
10190 name
= cpu_arch
[j
].name
;
10191 len
= cpu_arch
[j
].len
;
10194 /* It is an extension. Skip if we aren't asked to show it. */
10205 /* It is an processor. Skip if we show only extension. */
10208 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
10210 /* It is an impossible processor - skip. */
10214 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
10217 /* Display disabled extensions. */
10219 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
10221 name
= cpu_noarch
[j
].name
;
10222 len
= cpu_noarch
[j
].len
;
10223 p
= output_message (stream
, p
, message
, start
, &left
, name
,
10228 fprintf (stream
, "%s\n", message
);
10232 md_show_usage (FILE *stream
)
10234 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10235 fprintf (stream
, _("\
10237 -V print assembler version number\n\
10240 fprintf (stream
, _("\
10241 -n Do not optimize code alignment\n\
10242 -q quieten some warnings\n"));
10243 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10244 fprintf (stream
, _("\
10247 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10248 || defined (TE_PE) || defined (TE_PEP))
10249 fprintf (stream
, _("\
10250 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
10252 #ifdef SVR4_COMMENT_CHARS
10253 fprintf (stream
, _("\
10254 --divide do not treat `/' as a comment character\n"));
10256 fprintf (stream
, _("\
10257 --divide ignored\n"));
10259 fprintf (stream
, _("\
10260 -march=CPU[,+EXTENSION...]\n\
10261 generate code for CPU and EXTENSION, CPU is one of:\n"));
10262 show_arch (stream
, 0, 1);
10263 fprintf (stream
, _("\
10264 EXTENSION is combination of:\n"));
10265 show_arch (stream
, 1, 0);
10266 fprintf (stream
, _("\
10267 -mtune=CPU optimize for CPU, CPU is one of:\n"));
10268 show_arch (stream
, 0, 0);
10269 fprintf (stream
, _("\
10270 -msse2avx encode SSE instructions with VEX prefix\n"));
10271 fprintf (stream
, _("\
10272 -msse-check=[none|error|warning]\n\
10273 check SSE instructions\n"));
10274 fprintf (stream
, _("\
10275 -moperand-check=[none|error|warning]\n\
10276 check operand combinations for validity\n"));
10277 fprintf (stream
, _("\
10278 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
10280 fprintf (stream
, _("\
10281 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
10283 fprintf (stream
, _("\
10284 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
10285 for EVEX.W bit ignored instructions\n"));
10286 fprintf (stream
, _("\
10287 -mevexrcig=[rne|rd|ru|rz]\n\
10288 encode EVEX instructions with specific EVEX.RC value\n\
10289 for SAE-only ignored instructions\n"));
10290 fprintf (stream
, _("\
10291 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
10292 fprintf (stream
, _("\
10293 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
10294 fprintf (stream
, _("\
10295 -mindex-reg support pseudo index registers\n"));
10296 fprintf (stream
, _("\
10297 -mnaked-reg don't require `%%' prefix for registers\n"));
10298 fprintf (stream
, _("\
10299 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
10300 fprintf (stream
, _("\
10301 -madd-bnd-prefix add BND prefix for all valid branches\n"));
10302 fprintf (stream
, _("\
10303 -mshared disable branch optimization for shared code\n"));
10304 # if defined (TE_PE) || defined (TE_PEP)
10305 fprintf (stream
, _("\
10306 -mbig-obj generate big object files\n"));
10308 fprintf (stream
, _("\
10309 -momit-lock-prefix=[no|yes]\n\
10310 strip all lock prefixes\n"));
10311 fprintf (stream
, _("\
10312 -mfence-as-lock-add=[no|yes]\n\
10313 encode lfence, mfence and sfence as\n\
10314 lock addl $0x0, (%%{re}sp)\n"));
10315 fprintf (stream
, _("\
10316 -mrelax-relocations=[no|yes]\n\
10317 generate relax relocations\n"));
10318 fprintf (stream
, _("\
10319 -mamd64 accept only AMD64 ISA\n"));
10320 fprintf (stream
, _("\
10321 -mintel64 accept only Intel64 ISA\n"));
10324 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
10325 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10326 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10328 /* Pick the target format to use. */
10331 i386_target_format (void)
10333 if (!strncmp (default_arch
, "x86_64", 6))
10335 update_code_flag (CODE_64BIT
, 1);
10336 if (default_arch
[6] == '\0')
10337 x86_elf_abi
= X86_64_ABI
;
10339 x86_elf_abi
= X86_64_X32_ABI
;
10341 else if (!strcmp (default_arch
, "i386"))
10342 update_code_flag (CODE_32BIT
, 1);
10343 else if (!strcmp (default_arch
, "iamcu"))
10345 update_code_flag (CODE_32BIT
, 1);
10346 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
10348 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
10349 cpu_arch_name
= "iamcu";
10350 cpu_sub_arch_name
= NULL
;
10351 cpu_arch_flags
= iamcu_flags
;
10352 cpu_arch_isa
= PROCESSOR_IAMCU
;
10353 cpu_arch_isa_flags
= iamcu_flags
;
10354 if (!cpu_arch_tune_set
)
10356 cpu_arch_tune
= cpu_arch_isa
;
10357 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
10361 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
10365 as_fatal (_("unknown architecture"));
10367 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
10368 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
10369 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
10370 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
10372 switch (OUTPUT_FLAVOR
)
10374 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
10375 case bfd_target_aout_flavour
:
10376 return AOUT_TARGET_FORMAT
;
10378 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
10379 # if defined (TE_PE) || defined (TE_PEP)
10380 case bfd_target_coff_flavour
:
10381 if (flag_code
== CODE_64BIT
)
10382 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
10385 # elif defined (TE_GO32)
10386 case bfd_target_coff_flavour
:
10387 return "coff-go32";
10389 case bfd_target_coff_flavour
:
10390 return "coff-i386";
10393 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10394 case bfd_target_elf_flavour
:
10396 const char *format
;
10398 switch (x86_elf_abi
)
10401 format
= ELF_TARGET_FORMAT
;
10404 use_rela_relocations
= 1;
10406 format
= ELF_TARGET_FORMAT64
;
10408 case X86_64_X32_ABI
:
10409 use_rela_relocations
= 1;
10411 disallow_64bit_reloc
= 1;
10412 format
= ELF_TARGET_FORMAT32
;
10415 if (cpu_arch_isa
== PROCESSOR_L1OM
)
10417 if (x86_elf_abi
!= X86_64_ABI
)
10418 as_fatal (_("Intel L1OM is 64bit only"));
10419 return ELF_TARGET_L1OM_FORMAT
;
10421 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
10423 if (x86_elf_abi
!= X86_64_ABI
)
10424 as_fatal (_("Intel K1OM is 64bit only"));
10425 return ELF_TARGET_K1OM_FORMAT
;
10427 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
10429 if (x86_elf_abi
!= I386_ABI
)
10430 as_fatal (_("Intel MCU is 32bit only"));
10431 return ELF_TARGET_IAMCU_FORMAT
;
10437 #if defined (OBJ_MACH_O)
10438 case bfd_target_mach_o_flavour
:
10439 if (flag_code
== CODE_64BIT
)
10441 use_rela_relocations
= 1;
10443 return "mach-o-x86-64";
10446 return "mach-o-i386";
10454 #endif /* OBJ_MAYBE_ more than one */
10457 md_undefined_symbol (char *name
)
10459 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
10460 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
10461 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
10462 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
10466 if (symbol_find (name
))
10467 as_bad (_("GOT already in symbol table"));
10468 GOT_symbol
= symbol_new (name
, undefined_section
,
10469 (valueT
) 0, &zero_address_frag
);
10476 /* Round up a section size to the appropriate boundary. */
10479 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
10481 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10482 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
10484 /* For a.out, force the section size to be aligned. If we don't do
10485 this, BFD will align it for us, but it will not write out the
10486 final bytes of the section. This may be a bug in BFD, but it is
10487 easier to fix it here since that is how the other a.out targets
10491 align
= bfd_get_section_alignment (stdoutput
, segment
);
10492 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
10499 /* On the i386, PC-relative offsets are relative to the start of the
10500 next instruction. That is, the address of the offset, plus its
10501 size, since the offset is always the last part of the insn. */
10504 md_pcrel_from (fixS
*fixP
)
10506 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
10512 s_bss (int ignore ATTRIBUTE_UNUSED
)
10516 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10518 obj_elf_section_change_hook ();
10520 temp
= get_absolute_expression ();
10521 subseg_set (bss_section
, (subsegT
) temp
);
10522 demand_empty_rest_of_line ();
10528 i386_validate_fix (fixS
*fixp
)
10530 if (fixp
->fx_subsy
)
10532 if (fixp
->fx_subsy
== GOT_symbol
)
10534 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
10538 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10539 if (fixp
->fx_tcbit2
)
10540 fixp
->fx_r_type
= (fixp
->fx_tcbit
10541 ? BFD_RELOC_X86_64_REX_GOTPCRELX
10542 : BFD_RELOC_X86_64_GOTPCRELX
);
10545 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
10550 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
10552 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
10554 fixp
->fx_subsy
= 0;
10557 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10558 else if (!object_64bit
)
10560 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
10561 && fixp
->fx_tcbit2
)
10562 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
10568 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
10571 bfd_reloc_code_real_type code
;
10573 switch (fixp
->fx_r_type
)
10575 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10576 case BFD_RELOC_SIZE32
:
10577 case BFD_RELOC_SIZE64
:
10578 if (S_IS_DEFINED (fixp
->fx_addsy
)
10579 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
10581 /* Resolve size relocation against local symbol to size of
10582 the symbol plus addend. */
10583 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
10584 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
10585 && !fits_in_unsigned_long (value
))
10586 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10587 _("symbol size computation overflow"));
10588 fixp
->fx_addsy
= NULL
;
10589 fixp
->fx_subsy
= NULL
;
10590 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
10595 case BFD_RELOC_X86_64_PLT32
:
10596 case BFD_RELOC_X86_64_GOT32
:
10597 case BFD_RELOC_X86_64_GOTPCREL
:
10598 case BFD_RELOC_X86_64_GOTPCRELX
:
10599 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
10600 case BFD_RELOC_386_PLT32
:
10601 case BFD_RELOC_386_GOT32
:
10602 case BFD_RELOC_386_GOT32X
:
10603 case BFD_RELOC_386_GOTOFF
:
10604 case BFD_RELOC_386_GOTPC
:
10605 case BFD_RELOC_386_TLS_GD
:
10606 case BFD_RELOC_386_TLS_LDM
:
10607 case BFD_RELOC_386_TLS_LDO_32
:
10608 case BFD_RELOC_386_TLS_IE_32
:
10609 case BFD_RELOC_386_TLS_IE
:
10610 case BFD_RELOC_386_TLS_GOTIE
:
10611 case BFD_RELOC_386_TLS_LE_32
:
10612 case BFD_RELOC_386_TLS_LE
:
10613 case BFD_RELOC_386_TLS_GOTDESC
:
10614 case BFD_RELOC_386_TLS_DESC_CALL
:
10615 case BFD_RELOC_X86_64_TLSGD
:
10616 case BFD_RELOC_X86_64_TLSLD
:
10617 case BFD_RELOC_X86_64_DTPOFF32
:
10618 case BFD_RELOC_X86_64_DTPOFF64
:
10619 case BFD_RELOC_X86_64_GOTTPOFF
:
10620 case BFD_RELOC_X86_64_TPOFF32
:
10621 case BFD_RELOC_X86_64_TPOFF64
:
10622 case BFD_RELOC_X86_64_GOTOFF64
:
10623 case BFD_RELOC_X86_64_GOTPC32
:
10624 case BFD_RELOC_X86_64_GOT64
:
10625 case BFD_RELOC_X86_64_GOTPCREL64
:
10626 case BFD_RELOC_X86_64_GOTPC64
:
10627 case BFD_RELOC_X86_64_GOTPLT64
:
10628 case BFD_RELOC_X86_64_PLTOFF64
:
10629 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10630 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10631 case BFD_RELOC_RVA
:
10632 case BFD_RELOC_VTABLE_ENTRY
:
10633 case BFD_RELOC_VTABLE_INHERIT
:
10635 case BFD_RELOC_32_SECREL
:
10637 code
= fixp
->fx_r_type
;
10639 case BFD_RELOC_X86_64_32S
:
10640 if (!fixp
->fx_pcrel
)
10642 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10643 code
= fixp
->fx_r_type
;
10647 if (fixp
->fx_pcrel
)
10649 switch (fixp
->fx_size
)
10652 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10653 _("can not do %d byte pc-relative relocation"),
10655 code
= BFD_RELOC_32_PCREL
;
10657 case 1: code
= BFD_RELOC_8_PCREL
; break;
10658 case 2: code
= BFD_RELOC_16_PCREL
; break;
10659 case 4: code
= BFD_RELOC_32_PCREL
; break;
10661 case 8: code
= BFD_RELOC_64_PCREL
; break;
10667 switch (fixp
->fx_size
)
10670 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10671 _("can not do %d byte relocation"),
10673 code
= BFD_RELOC_32
;
10675 case 1: code
= BFD_RELOC_8
; break;
10676 case 2: code
= BFD_RELOC_16
; break;
10677 case 4: code
= BFD_RELOC_32
; break;
10679 case 8: code
= BFD_RELOC_64
; break;
10686 if ((code
== BFD_RELOC_32
10687 || code
== BFD_RELOC_32_PCREL
10688 || code
== BFD_RELOC_X86_64_32S
)
10690 && fixp
->fx_addsy
== GOT_symbol
)
10693 code
= BFD_RELOC_386_GOTPC
;
10695 code
= BFD_RELOC_X86_64_GOTPC32
;
10697 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
10699 && fixp
->fx_addsy
== GOT_symbol
)
10701 code
= BFD_RELOC_X86_64_GOTPC64
;
10704 rel
= XNEW (arelent
);
10705 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
10706 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
10708 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
10710 if (!use_rela_relocations
)
10712 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10713 vtable entry to be used in the relocation's section offset. */
10714 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
10715 rel
->address
= fixp
->fx_offset
;
10716 #if defined (OBJ_COFF) && defined (TE_PE)
10717 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
10718 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
10723 /* Use the rela in 64bit mode. */
10726 if (disallow_64bit_reloc
)
10729 case BFD_RELOC_X86_64_DTPOFF64
:
10730 case BFD_RELOC_X86_64_TPOFF64
:
10731 case BFD_RELOC_64_PCREL
:
10732 case BFD_RELOC_X86_64_GOTOFF64
:
10733 case BFD_RELOC_X86_64_GOT64
:
10734 case BFD_RELOC_X86_64_GOTPCREL64
:
10735 case BFD_RELOC_X86_64_GOTPC64
:
10736 case BFD_RELOC_X86_64_GOTPLT64
:
10737 case BFD_RELOC_X86_64_PLTOFF64
:
10738 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10739 _("cannot represent relocation type %s in x32 mode"),
10740 bfd_get_reloc_code_name (code
));
10746 if (!fixp
->fx_pcrel
)
10747 rel
->addend
= fixp
->fx_offset
;
10751 case BFD_RELOC_X86_64_PLT32
:
10752 case BFD_RELOC_X86_64_GOT32
:
10753 case BFD_RELOC_X86_64_GOTPCREL
:
10754 case BFD_RELOC_X86_64_GOTPCRELX
:
10755 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
10756 case BFD_RELOC_X86_64_TLSGD
:
10757 case BFD_RELOC_X86_64_TLSLD
:
10758 case BFD_RELOC_X86_64_GOTTPOFF
:
10759 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10760 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10761 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
10764 rel
->addend
= (section
->vma
10766 + fixp
->fx_addnumber
10767 + md_pcrel_from (fixp
));
10772 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
10773 if (rel
->howto
== NULL
)
10775 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10776 _("cannot represent relocation type %s"),
10777 bfd_get_reloc_code_name (code
));
10778 /* Set howto to a garbage value so that we can keep going. */
10779 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
10780 gas_assert (rel
->howto
!= NULL
);
10786 #include "tc-i386-intel.c"
10789 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
10791 int saved_naked_reg
;
10792 char saved_register_dot
;
10794 saved_naked_reg
= allow_naked_reg
;
10795 allow_naked_reg
= 1;
10796 saved_register_dot
= register_chars
['.'];
10797 register_chars
['.'] = '.';
10798 allow_pseudo_reg
= 1;
10799 expression_and_evaluate (exp
);
10800 allow_pseudo_reg
= 0;
10801 register_chars
['.'] = saved_register_dot
;
10802 allow_naked_reg
= saved_naked_reg
;
10804 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
10806 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
10808 exp
->X_op
= O_constant
;
10809 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
10810 .dw2_regnum
[flag_code
>> 1];
10813 exp
->X_op
= O_illegal
;
10818 tc_x86_frame_initial_instructions (void)
10820 static unsigned int sp_regno
[2];
10822 if (!sp_regno
[flag_code
>> 1])
10824 char *saved_input
= input_line_pointer
;
10825 char sp
[][4] = {"esp", "rsp"};
10828 input_line_pointer
= sp
[flag_code
>> 1];
10829 tc_x86_parse_to_dw2regnum (&exp
);
10830 gas_assert (exp
.X_op
== O_constant
);
10831 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
10832 input_line_pointer
= saved_input
;
10835 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
10836 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
10840 x86_dwarf2_addr_size (void)
10842 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10843 if (x86_elf_abi
== X86_64_X32_ABI
)
10846 return bfd_arch_bits_per_address (stdoutput
) / 8;
10850 i386_elf_section_type (const char *str
, size_t len
)
10852 if (flag_code
== CODE_64BIT
10853 && len
== sizeof ("unwind") - 1
10854 && strncmp (str
, "unwind", 6) == 0)
10855 return SHT_X86_64_UNWIND
;
10862 i386_solaris_fix_up_eh_frame (segT sec
)
10864 if (flag_code
== CODE_64BIT
)
10865 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
10871 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
10875 exp
.X_op
= O_secrel
;
10876 exp
.X_add_symbol
= symbol
;
10877 exp
.X_add_number
= 0;
10878 emit_expr (&exp
, size
);
10882 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10883 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
10886 x86_64_section_letter (int letter
, const char **ptr_msg
)
10888 if (flag_code
== CODE_64BIT
)
10891 return SHF_X86_64_LARGE
;
10893 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
10896 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
10901 x86_64_section_word (char *str
, size_t len
)
10903 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
10904 return SHF_X86_64_LARGE
;
10910 handle_large_common (int small ATTRIBUTE_UNUSED
)
10912 if (flag_code
!= CODE_64BIT
)
10914 s_comm_internal (0, elf_common_parse
);
10915 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
10919 static segT lbss_section
;
10920 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
10921 asection
*saved_bss_section
= bss_section
;
10923 if (lbss_section
== NULL
)
10925 flagword applicable
;
10926 segT seg
= now_seg
;
10927 subsegT subseg
= now_subseg
;
10929 /* The .lbss section is for local .largecomm symbols. */
10930 lbss_section
= subseg_new (".lbss", 0);
10931 applicable
= bfd_applicable_section_flags (stdoutput
);
10932 bfd_set_section_flags (stdoutput
, lbss_section
,
10933 applicable
& SEC_ALLOC
);
10934 seg_info (lbss_section
)->bss
= 1;
10936 subseg_set (seg
, subseg
);
10939 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
10940 bss_section
= lbss_section
;
10942 s_comm_internal (0, elf_common_parse
);
10944 elf_com_section_ptr
= saved_com_section_ptr
;
10945 bss_section
= saved_bss_section
;
10948 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */