1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
5 Free Software Foundation, Inc.
7 This file is part of GAS, the GNU Assembler.
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
32 #include "safe-ctype.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
48 #define DEFAULT_ARCH "i386"
53 #define INLINE __inline__
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX, LOCK_PREFIX. */
70 #define REX_PREFIX 6 /* must come last. */
71 #define MAX_PREFIXES 7 /* max prefixes per opcode */
73 /* we define the syntax here (modulo base,index,scale syntax) */
74 #define REGISTER_PREFIX '%'
75 #define IMMEDIATE_PREFIX '$'
76 #define ABSOLUTE_PREFIX '*'
78 /* these are the instruction mnemonic suffixes in AT&T syntax or
79 memory operand size in Intel syntax. */
80 #define WORD_MNEM_SUFFIX 'w'
81 #define BYTE_MNEM_SUFFIX 'b'
82 #define SHORT_MNEM_SUFFIX 's'
83 #define LONG_MNEM_SUFFIX 'l'
84 #define QWORD_MNEM_SUFFIX 'q'
85 #define XMMWORD_MNEM_SUFFIX 'x'
86 #define YMMWORD_MNEM_SUFFIX 'y'
87 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91 #define END_OF_INSN '\0'
94 'templates' is for grouping together 'template' structures for opcodes
95 of the same name. This is only used for storing the insns in the grand
96 ole hash table of insns.
97 The templates themselves start at START and range up to (but not including)
102 const insn_template
*start
;
103 const insn_template
*end
;
107 /* 386 operand encoding bytes: see 386 book for details of this. */
110 unsigned int regmem
; /* codes register or memory operand */
111 unsigned int reg
; /* codes register operand (or extended opcode) */
112 unsigned int mode
; /* how to interpret regmem & reg */
116 /* x86-64 extension prefix. */
117 typedef int rex_byte
;
119 /* 386 opcode byte to code indirect addressing. */
128 /* x86 arch names, types and features */
131 const char *name
; /* arch name */
132 unsigned int len
; /* arch string length */
133 enum processor_type type
; /* arch type */
134 i386_cpu_flags flags
; /* cpu feature flags */
135 unsigned int skip
; /* show_arch should skip this. */
136 unsigned int negated
; /* turn off indicated flags. */
140 static void update_code_flag (int, int);
141 static void set_code_flag (int);
142 static void set_16bit_gcc_code_flag (int);
143 static void set_intel_syntax (int);
144 static void set_intel_mnemonic (int);
145 static void set_allow_index_reg (int);
146 static void set_sse_check (int);
147 static void set_cpu_arch (int);
149 static void pe_directive_secrel (int);
151 static void signed_cons (int);
152 static char *output_invalid (int c
);
153 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
155 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
157 static int i386_att_operand (char *);
158 static int i386_intel_operand (char *, int);
159 static int i386_intel_simplify (expressionS
*);
160 static int i386_intel_parse_name (const char *, expressionS
*);
161 static const reg_entry
*parse_register (char *, char **);
162 static char *parse_insn (char *, char *);
163 static char *parse_operands (char *, const char *);
164 static void swap_operands (void);
165 static void swap_2_operands (int, int);
166 static void optimize_imm (void);
167 static void optimize_disp (void);
168 static const insn_template
*match_template (void);
169 static int check_string (void);
170 static int process_suffix (void);
171 static int check_byte_reg (void);
172 static int check_long_reg (void);
173 static int check_qword_reg (void);
174 static int check_word_reg (void);
175 static int finalize_imm (void);
176 static int process_operands (void);
177 static const seg_entry
*build_modrm_byte (void);
178 static void output_insn (void);
179 static void output_imm (fragS
*, offsetT
);
180 static void output_disp (fragS
*, offsetT
);
182 static void s_bss (int);
184 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
185 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
188 static const char *default_arch
= DEFAULT_ARCH
;
193 /* VEX prefix is either 2 byte or 3 byte. */
194 unsigned char bytes
[3];
196 /* Destination or source register specifier. */
197 const reg_entry
*register_specifier
;
200 /* 'md_assemble ()' gathers together information and puts it into a
207 const reg_entry
*regs
;
212 operand_size_mismatch
,
213 operand_type_mismatch
,
214 register_type_mismatch
,
215 number_of_operands_mismatch
,
216 invalid_instruction_suffix
,
219 unsupported_with_intel_mnemonic
,
222 invalid_vsib_address
,
223 unsupported_vector_index_register
228 /* TM holds the template for the insn were currently assembling. */
231 /* SUFFIX holds the instruction size suffix for byte, word, dword
232 or qword, if given. */
235 /* OPERANDS gives the number of given operands. */
236 unsigned int operands
;
238 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
239 of given register, displacement, memory operands and immediate
241 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
243 /* TYPES [i] is the type (see above #defines) which tells us how to
244 use OP[i] for the corresponding operand. */
245 i386_operand_type types
[MAX_OPERANDS
];
247 /* Displacement expression, immediate expression, or register for each
249 union i386_op op
[MAX_OPERANDS
];
251 /* Flags for operands. */
252 unsigned int flags
[MAX_OPERANDS
];
253 #define Operand_PCrel 1
255 /* Relocation type for operand */
256 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
258 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
259 the base index byte below. */
260 const reg_entry
*base_reg
;
261 const reg_entry
*index_reg
;
262 unsigned int log2_scale_factor
;
264 /* SEG gives the seg_entries of this insn. They are zero unless
265 explicit segment overrides are given. */
266 const seg_entry
*seg
[2];
268 /* PREFIX holds all the given prefix opcodes (usually null).
269 PREFIXES is the number of prefix opcodes. */
270 unsigned int prefixes
;
271 unsigned char prefix
[MAX_PREFIXES
];
273 /* RM and SIB are the modrm byte and the sib byte where the
274 addressing modes of this insn are encoded. */
280 /* Swap operand in encoding. */
281 unsigned int swap_operand
;
283 /* Force 32bit displacement in encoding. */
284 unsigned int disp32_encoding
;
287 enum i386_error error
;
290 typedef struct _i386_insn i386_insn
;
292 /* List of chars besides those in app.c:symbol_chars that can start an
293 operand. Used to prevent the scrubber eating vital white-space. */
294 const char extra_symbol_chars
[] = "*%-(["
303 #if (defined (TE_I386AIX) \
304 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
305 && !defined (TE_GNU) \
306 && !defined (TE_LINUX) \
307 && !defined (TE_NETWARE) \
308 && !defined (TE_FreeBSD) \
309 && !defined (TE_DragonFly) \
310 && !defined (TE_NetBSD)))
311 /* This array holds the chars that always start a comment. If the
312 pre-processor is disabled, these aren't very useful. The option
313 --divide will remove '/' from this list. */
314 const char *i386_comment_chars
= "#/";
315 #define SVR4_COMMENT_CHARS 1
316 #define PREFIX_SEPARATOR '\\'
319 const char *i386_comment_chars
= "#";
320 #define PREFIX_SEPARATOR '/'
323 /* This array holds the chars that only start a comment at the beginning of
324 a line. If the line seems to have the form '# 123 filename'
325 .line and .file directives will appear in the pre-processed output.
326 Note that input_file.c hand checks for '#' at the beginning of the
327 first line of the input file. This is because the compiler outputs
328 #NO_APP at the beginning of its output.
329 Also note that comments started like this one will always work if
330 '/' isn't otherwise defined. */
331 const char line_comment_chars
[] = "#/";
333 const char line_separator_chars
[] = ";";
335 /* Chars that can be used to separate mant from exp in floating point
337 const char EXP_CHARS
[] = "eE";
339 /* Chars that mean this number is a floating point constant
342 const char FLT_CHARS
[] = "fFdDxX";
344 /* Tables for lexical analysis. */
345 static char mnemonic_chars
[256];
346 static char register_chars
[256];
347 static char operand_chars
[256];
348 static char identifier_chars
[256];
349 static char digit_chars
[256];
351 /* Lexical macros. */
352 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
353 #define is_operand_char(x) (operand_chars[(unsigned char) x])
354 #define is_register_char(x) (register_chars[(unsigned char) x])
355 #define is_space_char(x) ((x) == ' ')
356 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
357 #define is_digit_char(x) (digit_chars[(unsigned char) x])
359 /* All non-digit non-letter characters that may occur in an operand. */
360 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
362 /* md_assemble() always leaves the strings it's passed unaltered. To
363 effect this we maintain a stack of saved characters that we've smashed
364 with '\0's (indicating end of strings for various sub-fields of the
365 assembler instruction). */
366 static char save_stack
[32];
367 static char *save_stack_p
;
368 #define END_STRING_AND_SAVE(s) \
369 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
370 #define RESTORE_END_STRING(s) \
371 do { *(s) = *--save_stack_p; } while (0)
373 /* The instruction we're assembling. */
376 /* Possible templates for current insn. */
377 static const templates
*current_templates
;
379 /* Per instruction expressionS buffers: max displacements & immediates. */
380 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
381 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
383 /* Current operand we are working on. */
384 static int this_operand
= -1;
386 /* We support four different modes. FLAG_CODE variable is used to distinguish
394 static enum flag_code flag_code
;
395 static unsigned int object_64bit
;
396 static unsigned int disallow_64bit_reloc
;
397 static int use_rela_relocations
= 0;
399 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
400 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
401 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
403 /* The ELF ABI to use. */
411 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
414 /* The names used to print error messages. */
415 static const char *flag_code_names
[] =
422 /* 1 for intel syntax,
424 static int intel_syntax
= 0;
426 /* 1 for intel mnemonic,
427 0 if att mnemonic. */
428 static int intel_mnemonic
= !SYSV386_COMPAT
;
430 /* 1 if support old (<= 2.8.1) versions of gcc. */
431 static int old_gcc
= OLDGCC_COMPAT
;
433 /* 1 if pseudo registers are permitted. */
434 static int allow_pseudo_reg
= 0;
436 /* 1 if register prefix % not required. */
437 static int allow_naked_reg
= 0;
439 /* 1 if pseudo index register, eiz/riz, is allowed . */
440 static int allow_index_reg
= 0;
450 /* Register prefix used for error message. */
451 static const char *register_prefix
= "%";
453 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
454 leave, push, and pop instructions so that gcc has the same stack
455 frame as in 32 bit mode. */
456 static char stackop_size
= '\0';
458 /* Non-zero to optimize code alignment. */
459 int optimize_align_code
= 1;
461 /* Non-zero to quieten some warnings. */
462 static int quiet_warnings
= 0;
465 static const char *cpu_arch_name
= NULL
;
466 static char *cpu_sub_arch_name
= NULL
;
468 /* CPU feature flags. */
469 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
471 /* If we have selected a cpu we are generating instructions for. */
472 static int cpu_arch_tune_set
= 0;
474 /* Cpu we are generating instructions for. */
475 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
477 /* CPU feature flags of cpu we are generating instructions for. */
478 static i386_cpu_flags cpu_arch_tune_flags
;
480 /* CPU instruction set architecture used. */
481 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
483 /* CPU feature flags of instruction set architecture used. */
484 i386_cpu_flags cpu_arch_isa_flags
;
486 /* If set, conditional jumps are not automatically promoted to handle
487 larger than a byte offset. */
488 static unsigned int no_cond_jump_promotion
= 0;
490 /* Encode SSE instructions with VEX prefix. */
491 static unsigned int sse2avx
;
493 /* Encode scalar AVX instructions with specific vector length. */
500 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
501 static symbolS
*GOT_symbol
;
503 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
504 unsigned int x86_dwarf2_return_column
;
506 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
507 int x86_cie_data_alignment
;
509 /* Interface to relax_segment.
510 There are 3 major relax states for 386 jump insns because the
511 different types of jumps add different sizes to frags when we're
512 figuring out what sort of jump to choose to reach a given label. */
515 #define UNCOND_JUMP 0
517 #define COND_JUMP86 2
522 #define SMALL16 (SMALL | CODE16)
524 #define BIG16 (BIG | CODE16)
528 #define INLINE __inline__
534 #define ENCODE_RELAX_STATE(type, size) \
535 ((relax_substateT) (((type) << 2) | (size)))
536 #define TYPE_FROM_RELAX_STATE(s) \
538 #define DISP_SIZE_FROM_RELAX_STATE(s) \
539 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
541 /* This table is used by relax_frag to promote short jumps to long
542 ones where necessary. SMALL (short) jumps may be promoted to BIG
543 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
544 don't allow a short jump in a 32 bit code segment to be promoted to
545 a 16 bit offset jump because it's slower (requires data size
546 prefix), and doesn't work, unless the destination is in the bottom
547 64k of the code segment (The top 16 bits of eip are zeroed). */
549 const relax_typeS md_relax_table
[] =
552 1) most positive reach of this state,
553 2) most negative reach of this state,
554 3) how many bytes this mode will have in the variable part of the frag
555 4) which index into the table to try if we can't fit into this one. */
557 /* UNCOND_JUMP states. */
558 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
559 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
560 /* dword jmp adds 4 bytes to frag:
561 0 extra opcode bytes, 4 displacement bytes. */
563 /* word jmp adds 2 byte2 to frag:
564 0 extra opcode bytes, 2 displacement bytes. */
567 /* COND_JUMP states. */
568 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
570 /* dword conditionals adds 5 bytes to frag:
571 1 extra opcode byte, 4 displacement bytes. */
573 /* word conditionals add 3 bytes to frag:
574 1 extra opcode byte, 2 displacement bytes. */
577 /* COND_JUMP86 states. */
578 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
580 /* dword conditionals adds 5 bytes to frag:
581 1 extra opcode byte, 4 displacement bytes. */
583 /* word conditionals add 4 bytes to frag:
584 1 displacement byte and a 3 byte long branch insn. */
588 static const arch_entry cpu_arch
[] =
590 /* Do not replace the first two entries - i386_target_format()
591 relies on them being there in this order. */
592 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
593 CPU_GENERIC32_FLAGS
, 0, 0 },
594 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
595 CPU_GENERIC64_FLAGS
, 0, 0 },
596 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
597 CPU_NONE_FLAGS
, 0, 0 },
598 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
599 CPU_I186_FLAGS
, 0, 0 },
600 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
601 CPU_I286_FLAGS
, 0, 0 },
602 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
603 CPU_I386_FLAGS
, 0, 0 },
604 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
605 CPU_I486_FLAGS
, 0, 0 },
606 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
607 CPU_I586_FLAGS
, 0, 0 },
608 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
609 CPU_I686_FLAGS
, 0, 0 },
610 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
611 CPU_I586_FLAGS
, 0, 0 },
612 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
613 CPU_PENTIUMPRO_FLAGS
, 0, 0 },
614 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
615 CPU_P2_FLAGS
, 0, 0 },
616 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
617 CPU_P3_FLAGS
, 0, 0 },
618 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
619 CPU_P4_FLAGS
, 0, 0 },
620 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
621 CPU_CORE_FLAGS
, 0, 0 },
622 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
623 CPU_NOCONA_FLAGS
, 0, 0 },
624 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
625 CPU_CORE_FLAGS
, 1, 0 },
626 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
627 CPU_CORE_FLAGS
, 0, 0 },
628 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
629 CPU_CORE2_FLAGS
, 1, 0 },
630 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
631 CPU_CORE2_FLAGS
, 0, 0 },
632 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
633 CPU_COREI7_FLAGS
, 0, 0 },
634 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
635 CPU_L1OM_FLAGS
, 0, 0 },
636 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
637 CPU_K1OM_FLAGS
, 0, 0 },
638 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
639 CPU_K6_FLAGS
, 0, 0 },
640 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
641 CPU_K6_2_FLAGS
, 0, 0 },
642 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
643 CPU_ATHLON_FLAGS
, 0, 0 },
644 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
645 CPU_K8_FLAGS
, 1, 0 },
646 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
647 CPU_K8_FLAGS
, 0, 0 },
648 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
649 CPU_K8_FLAGS
, 0, 0 },
650 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
651 CPU_AMDFAM10_FLAGS
, 0, 0 },
652 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
653 CPU_BDVER1_FLAGS
, 0, 0 },
654 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
655 CPU_BDVER2_FLAGS
, 0, 0 },
656 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
657 CPU_8087_FLAGS
, 0, 0 },
658 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
659 CPU_287_FLAGS
, 0, 0 },
660 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
661 CPU_387_FLAGS
, 0, 0 },
662 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN
,
663 CPU_ANY87_FLAGS
, 0, 1 },
664 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
665 CPU_MMX_FLAGS
, 0, 0 },
666 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN
,
667 CPU_3DNOWA_FLAGS
, 0, 1 },
668 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
669 CPU_SSE_FLAGS
, 0, 0 },
670 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
671 CPU_SSE2_FLAGS
, 0, 0 },
672 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
673 CPU_SSE3_FLAGS
, 0, 0 },
674 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
675 CPU_SSSE3_FLAGS
, 0, 0 },
676 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
677 CPU_SSE4_1_FLAGS
, 0, 0 },
678 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
679 CPU_SSE4_2_FLAGS
, 0, 0 },
680 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
681 CPU_SSE4_2_FLAGS
, 0, 0 },
682 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN
,
683 CPU_ANY_SSE_FLAGS
, 0, 1 },
684 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
685 CPU_AVX_FLAGS
, 0, 0 },
686 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
687 CPU_AVX2_FLAGS
, 0, 0 },
688 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN
,
689 CPU_ANY_AVX_FLAGS
, 0, 1 },
690 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
691 CPU_VMX_FLAGS
, 0, 0 },
692 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
693 CPU_VMFUNC_FLAGS
, 0, 0 },
694 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
695 CPU_SMX_FLAGS
, 0, 0 },
696 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
697 CPU_XSAVE_FLAGS
, 0, 0 },
698 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
699 CPU_XSAVEOPT_FLAGS
, 0, 0 },
700 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
701 CPU_AES_FLAGS
, 0, 0 },
702 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
703 CPU_PCLMUL_FLAGS
, 0, 0 },
704 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
705 CPU_PCLMUL_FLAGS
, 1, 0 },
706 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
707 CPU_FSGSBASE_FLAGS
, 0, 0 },
708 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
709 CPU_RDRND_FLAGS
, 0, 0 },
710 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
711 CPU_F16C_FLAGS
, 0, 0 },
712 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
713 CPU_BMI2_FLAGS
, 0, 0 },
714 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
715 CPU_FMA_FLAGS
, 0, 0 },
716 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
717 CPU_FMA4_FLAGS
, 0, 0 },
718 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
719 CPU_XOP_FLAGS
, 0, 0 },
720 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
721 CPU_LWP_FLAGS
, 0, 0 },
722 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
723 CPU_MOVBE_FLAGS
, 0, 0 },
724 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
725 CPU_EPT_FLAGS
, 0, 0 },
726 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
727 CPU_LZCNT_FLAGS
, 0, 0 },
728 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
729 CPU_INVPCID_FLAGS
, 0, 0 },
730 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
731 CPU_CLFLUSH_FLAGS
, 0, 0 },
732 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
733 CPU_NOP_FLAGS
, 0, 0 },
734 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
735 CPU_SYSCALL_FLAGS
, 0, 0 },
736 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
737 CPU_RDTSCP_FLAGS
, 0, 0 },
738 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
739 CPU_3DNOW_FLAGS
, 0, 0 },
740 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
741 CPU_3DNOWA_FLAGS
, 0, 0 },
742 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
743 CPU_PADLOCK_FLAGS
, 0, 0 },
744 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
745 CPU_SVME_FLAGS
, 1, 0 },
746 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
747 CPU_SVME_FLAGS
, 0, 0 },
748 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
749 CPU_SSE4A_FLAGS
, 0, 0 },
750 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
751 CPU_ABM_FLAGS
, 0, 0 },
752 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
753 CPU_BMI_FLAGS
, 0, 0 },
754 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
755 CPU_TBM_FLAGS
, 0, 0 },
759 /* Like s_lcomm_internal in gas/read.c but the alignment string
760 is allowed to be optional. */
763 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
770 && *input_line_pointer
== ',')
772 align
= parse_align (needs_align
- 1);
774 if (align
== (addressT
) -1)
789 bss_alloc (symbolP
, size
, align
);
794 pe_lcomm (int needs_align
)
796 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
800 const pseudo_typeS md_pseudo_table
[] =
802 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
803 {"align", s_align_bytes
, 0},
805 {"align", s_align_ptwo
, 0},
807 {"arch", set_cpu_arch
, 0},
811 {"lcomm", pe_lcomm
, 1},
813 {"ffloat", float_cons
, 'f'},
814 {"dfloat", float_cons
, 'd'},
815 {"tfloat", float_cons
, 'x'},
817 {"slong", signed_cons
, 4},
818 {"noopt", s_ignore
, 0},
819 {"optim", s_ignore
, 0},
820 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
821 {"code16", set_code_flag
, CODE_16BIT
},
822 {"code32", set_code_flag
, CODE_32BIT
},
823 {"code64", set_code_flag
, CODE_64BIT
},
824 {"intel_syntax", set_intel_syntax
, 1},
825 {"att_syntax", set_intel_syntax
, 0},
826 {"intel_mnemonic", set_intel_mnemonic
, 1},
827 {"att_mnemonic", set_intel_mnemonic
, 0},
828 {"allow_index_reg", set_allow_index_reg
, 1},
829 {"disallow_index_reg", set_allow_index_reg
, 0},
830 {"sse_check", set_sse_check
, 0},
831 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
832 {"largecomm", handle_large_common
, 0},
834 {"file", (void (*) (int)) dwarf2_directive_file
, 0},
835 {"loc", dwarf2_directive_loc
, 0},
836 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
839 {"secrel32", pe_directive_secrel
, 0},
844 /* For interface with expression (). */
845 extern char *input_line_pointer
;
847 /* Hash table for instruction mnemonic lookup. */
848 static struct hash_control
*op_hash
;
850 /* Hash table for register lookup. */
851 static struct hash_control
*reg_hash
;
854 i386_align_code (fragS
*fragP
, int count
)
856 /* Various efficient no-op patterns for aligning code labels.
857 Note: Don't try to assemble the instructions in the comments.
858 0L and 0w are not legal. */
859 static const char f32_1
[] =
861 static const char f32_2
[] =
862 {0x66,0x90}; /* xchg %ax,%ax */
863 static const char f32_3
[] =
864 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
865 static const char f32_4
[] =
866 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
867 static const char f32_5
[] =
869 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
870 static const char f32_6
[] =
871 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
872 static const char f32_7
[] =
873 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
874 static const char f32_8
[] =
876 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
877 static const char f32_9
[] =
878 {0x89,0xf6, /* movl %esi,%esi */
879 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
880 static const char f32_10
[] =
881 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
882 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
883 static const char f32_11
[] =
884 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
885 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
886 static const char f32_12
[] =
887 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
888 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
889 static const char f32_13
[] =
890 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
891 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
892 static const char f32_14
[] =
893 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
894 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
895 static const char f16_3
[] =
896 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
897 static const char f16_4
[] =
898 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
899 static const char f16_5
[] =
901 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
902 static const char f16_6
[] =
903 {0x89,0xf6, /* mov %si,%si */
904 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
905 static const char f16_7
[] =
906 {0x8d,0x74,0x00, /* lea 0(%si),%si */
907 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
908 static const char f16_8
[] =
909 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
910 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
911 static const char jump_31
[] =
912 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
913 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
914 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
915 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
916 static const char *const f32_patt
[] = {
917 f32_1
, f32_2
, f32_3
, f32_4
, f32_5
, f32_6
, f32_7
, f32_8
,
918 f32_9
, f32_10
, f32_11
, f32_12
, f32_13
, f32_14
920 static const char *const f16_patt
[] = {
921 f32_1
, f32_2
, f16_3
, f16_4
, f16_5
, f16_6
, f16_7
, f16_8
924 static const char alt_3
[] =
926 /* nopl 0(%[re]ax) */
927 static const char alt_4
[] =
928 {0x0f,0x1f,0x40,0x00};
929 /* nopl 0(%[re]ax,%[re]ax,1) */
930 static const char alt_5
[] =
931 {0x0f,0x1f,0x44,0x00,0x00};
932 /* nopw 0(%[re]ax,%[re]ax,1) */
933 static const char alt_6
[] =
934 {0x66,0x0f,0x1f,0x44,0x00,0x00};
935 /* nopl 0L(%[re]ax) */
936 static const char alt_7
[] =
937 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
938 /* nopl 0L(%[re]ax,%[re]ax,1) */
939 static const char alt_8
[] =
940 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
941 /* nopw 0L(%[re]ax,%[re]ax,1) */
942 static const char alt_9
[] =
943 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
944 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
945 static const char alt_10
[] =
946 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
948 nopw %cs:0L(%[re]ax,%[re]ax,1) */
949 static const char alt_long_11
[] =
951 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
954 nopw %cs:0L(%[re]ax,%[re]ax,1) */
955 static const char alt_long_12
[] =
958 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
962 nopw %cs:0L(%[re]ax,%[re]ax,1) */
963 static const char alt_long_13
[] =
967 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
972 nopw %cs:0L(%[re]ax,%[re]ax,1) */
973 static const char alt_long_14
[] =
978 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
984 nopw %cs:0L(%[re]ax,%[re]ax,1) */
985 static const char alt_long_15
[] =
991 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
992 /* nopl 0(%[re]ax,%[re]ax,1)
993 nopw 0(%[re]ax,%[re]ax,1) */
994 static const char alt_short_11
[] =
995 {0x0f,0x1f,0x44,0x00,0x00,
996 0x66,0x0f,0x1f,0x44,0x00,0x00};
997 /* nopw 0(%[re]ax,%[re]ax,1)
998 nopw 0(%[re]ax,%[re]ax,1) */
999 static const char alt_short_12
[] =
1000 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1001 0x66,0x0f,0x1f,0x44,0x00,0x00};
1002 /* nopw 0(%[re]ax,%[re]ax,1)
1004 static const char alt_short_13
[] =
1005 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1006 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1009 static const char alt_short_14
[] =
1010 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1011 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1013 nopl 0L(%[re]ax,%[re]ax,1) */
1014 static const char alt_short_15
[] =
1015 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1016 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1017 static const char *const alt_short_patt
[] = {
1018 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1019 alt_9
, alt_10
, alt_short_11
, alt_short_12
, alt_short_13
,
1020 alt_short_14
, alt_short_15
1022 static const char *const alt_long_patt
[] = {
1023 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1024 alt_9
, alt_10
, alt_long_11
, alt_long_12
, alt_long_13
,
1025 alt_long_14
, alt_long_15
1028 /* Only align for at least a positive non-zero boundary. */
1029 if (count
<= 0 || count
> MAX_MEM_FOR_RS_ALIGN_CODE
)
1032 /* We need to decide which NOP sequence to use for 32bit and
1033 64bit. When -mtune= is used:
1035 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1036 PROCESSOR_GENERIC32, f32_patt will be used.
1037 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1038 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1039 PROCESSOR_GENERIC64, alt_long_patt will be used.
1040 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1041 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1044 When -mtune= isn't used, alt_long_patt will be used if
1045 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1048 When -march= or .arch is used, we can't use anything beyond
1049 cpu_arch_isa_flags. */
1051 if (flag_code
== CODE_16BIT
)
1055 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1057 /* Adjust jump offset. */
1058 fragP
->fr_literal
[fragP
->fr_fix
+ 1] = count
- 2;
1061 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1062 f16_patt
[count
- 1], count
);
1066 const char *const *patt
= NULL
;
1068 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1070 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1071 switch (cpu_arch_tune
)
1073 case PROCESSOR_UNKNOWN
:
1074 /* We use cpu_arch_isa_flags to check if we SHOULD
1075 optimize with nops. */
1076 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1077 patt
= alt_long_patt
;
1081 case PROCESSOR_PENTIUM4
:
1082 case PROCESSOR_NOCONA
:
1083 case PROCESSOR_CORE
:
1084 case PROCESSOR_CORE2
:
1085 case PROCESSOR_COREI7
:
1086 case PROCESSOR_L1OM
:
1087 case PROCESSOR_K1OM
:
1088 case PROCESSOR_GENERIC64
:
1089 patt
= alt_long_patt
;
1092 case PROCESSOR_ATHLON
:
1094 case PROCESSOR_AMDFAM10
:
1096 patt
= alt_short_patt
;
1098 case PROCESSOR_I386
:
1099 case PROCESSOR_I486
:
1100 case PROCESSOR_PENTIUM
:
1101 case PROCESSOR_PENTIUMPRO
:
1102 case PROCESSOR_GENERIC32
:
1109 switch (fragP
->tc_frag_data
.tune
)
1111 case PROCESSOR_UNKNOWN
:
1112 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1113 PROCESSOR_UNKNOWN. */
1117 case PROCESSOR_I386
:
1118 case PROCESSOR_I486
:
1119 case PROCESSOR_PENTIUM
:
1121 case PROCESSOR_ATHLON
:
1123 case PROCESSOR_AMDFAM10
:
1125 case PROCESSOR_GENERIC32
:
1126 /* We use cpu_arch_isa_flags to check if we CAN optimize
1128 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1129 patt
= alt_short_patt
;
1133 case PROCESSOR_PENTIUMPRO
:
1134 case PROCESSOR_PENTIUM4
:
1135 case PROCESSOR_NOCONA
:
1136 case PROCESSOR_CORE
:
1137 case PROCESSOR_CORE2
:
1138 case PROCESSOR_COREI7
:
1139 case PROCESSOR_L1OM
:
1140 case PROCESSOR_K1OM
:
1141 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1142 patt
= alt_long_patt
;
1146 case PROCESSOR_GENERIC64
:
1147 patt
= alt_long_patt
;
1152 if (patt
== f32_patt
)
1154 /* If the padding is less than 15 bytes, we use the normal
1155 ones. Otherwise, we use a jump instruction and adjust
1159 /* For 64bit, the limit is 3 bytes. */
1160 if (flag_code
== CODE_64BIT
1161 && fragP
->tc_frag_data
.isa_flags
.bitfield
.cpulm
)
1166 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1167 patt
[count
- 1], count
);
1170 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1172 /* Adjust jump offset. */
1173 fragP
->fr_literal
[fragP
->fr_fix
+ 1] = count
- 2;
1178 /* Maximum length of an instruction is 15 byte. If the
1179 padding is greater than 15 bytes and we don't use jump,
1180 we have to break it into smaller pieces. */
1181 int padding
= count
;
1182 while (padding
> 15)
1185 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
+ padding
,
1190 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1191 patt
[padding
- 1], padding
);
1194 fragP
->fr_var
= count
;
1198 operand_type_all_zero (const union i386_operand_type
*x
)
1200 switch (ARRAY_SIZE(x
->array
))
1209 return !x
->array
[0];
1216 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1218 switch (ARRAY_SIZE(x
->array
))
1233 operand_type_equal (const union i386_operand_type
*x
,
1234 const union i386_operand_type
*y
)
1236 switch (ARRAY_SIZE(x
->array
))
1239 if (x
->array
[2] != y
->array
[2])
1242 if (x
->array
[1] != y
->array
[1])
1245 return x
->array
[0] == y
->array
[0];
1253 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1255 switch (ARRAY_SIZE(x
->array
))
1264 return !x
->array
[0];
1271 cpu_flags_set (union i386_cpu_flags
*x
, unsigned int v
)
1273 switch (ARRAY_SIZE(x
->array
))
1288 cpu_flags_equal (const union i386_cpu_flags
*x
,
1289 const union i386_cpu_flags
*y
)
1291 switch (ARRAY_SIZE(x
->array
))
1294 if (x
->array
[2] != y
->array
[2])
1297 if (x
->array
[1] != y
->array
[1])
1300 return x
->array
[0] == y
->array
[0];
1308 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1310 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1311 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1314 static INLINE i386_cpu_flags
1315 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1317 switch (ARRAY_SIZE (x
.array
))
1320 x
.array
[2] &= y
.array
[2];
1322 x
.array
[1] &= y
.array
[1];
1324 x
.array
[0] &= y
.array
[0];
1332 static INLINE i386_cpu_flags
1333 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1335 switch (ARRAY_SIZE (x
.array
))
1338 x
.array
[2] |= y
.array
[2];
1340 x
.array
[1] |= y
.array
[1];
1342 x
.array
[0] |= y
.array
[0];
1350 static INLINE i386_cpu_flags
1351 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1353 switch (ARRAY_SIZE (x
.array
))
1356 x
.array
[2] &= ~y
.array
[2];
1358 x
.array
[1] &= ~y
.array
[1];
1360 x
.array
[0] &= ~y
.array
[0];
1368 #define CPU_FLAGS_ARCH_MATCH 0x1
1369 #define CPU_FLAGS_64BIT_MATCH 0x2
1370 #define CPU_FLAGS_AES_MATCH 0x4
1371 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1372 #define CPU_FLAGS_AVX_MATCH 0x10
1374 #define CPU_FLAGS_32BIT_MATCH \
1375 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1376 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1377 #define CPU_FLAGS_PERFECT_MATCH \
1378 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1380 /* Return CPU flags match bits. */
1383 cpu_flags_match (const insn_template
*t
)
1385 i386_cpu_flags x
= t
->cpu_flags
;
1386 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1388 x
.bitfield
.cpu64
= 0;
1389 x
.bitfield
.cpuno64
= 0;
1391 if (cpu_flags_all_zero (&x
))
1393 /* This instruction is available on all archs. */
1394 match
|= CPU_FLAGS_32BIT_MATCH
;
1398 /* This instruction is available only on some archs. */
1399 i386_cpu_flags cpu
= cpu_arch_flags
;
1401 cpu
.bitfield
.cpu64
= 0;
1402 cpu
.bitfield
.cpuno64
= 0;
1403 cpu
= cpu_flags_and (x
, cpu
);
1404 if (!cpu_flags_all_zero (&cpu
))
1406 if (x
.bitfield
.cpuavx
)
1408 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1409 if (cpu
.bitfield
.cpuavx
)
1411 /* Check SSE2AVX. */
1412 if (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1414 match
|= (CPU_FLAGS_ARCH_MATCH
1415 | CPU_FLAGS_AVX_MATCH
);
1417 if (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1418 match
|= CPU_FLAGS_AES_MATCH
;
1420 if (!x
.bitfield
.cpupclmul
1421 || cpu
.bitfield
.cpupclmul
)
1422 match
|= CPU_FLAGS_PCLMUL_MATCH
;
1426 match
|= CPU_FLAGS_ARCH_MATCH
;
1429 match
|= CPU_FLAGS_32BIT_MATCH
;
1435 static INLINE i386_operand_type
1436 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1438 switch (ARRAY_SIZE (x
.array
))
1441 x
.array
[2] &= y
.array
[2];
1443 x
.array
[1] &= y
.array
[1];
1445 x
.array
[0] &= y
.array
[0];
1453 static INLINE i386_operand_type
1454 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1456 switch (ARRAY_SIZE (x
.array
))
1459 x
.array
[2] |= y
.array
[2];
1461 x
.array
[1] |= y
.array
[1];
1463 x
.array
[0] |= y
.array
[0];
1471 static INLINE i386_operand_type
1472 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1474 switch (ARRAY_SIZE (x
.array
))
1477 x
.array
[2] ^= y
.array
[2];
1479 x
.array
[1] ^= y
.array
[1];
1481 x
.array
[0] ^= y
.array
[0];
1489 static const i386_operand_type acc32
= OPERAND_TYPE_ACC32
;
1490 static const i386_operand_type acc64
= OPERAND_TYPE_ACC64
;
1491 static const i386_operand_type control
= OPERAND_TYPE_CONTROL
;
1492 static const i386_operand_type inoutportreg
1493 = OPERAND_TYPE_INOUTPORTREG
;
1494 static const i386_operand_type reg16_inoutportreg
1495 = OPERAND_TYPE_REG16_INOUTPORTREG
;
1496 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
1497 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
1498 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
1499 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
1500 static const i386_operand_type anydisp
1501 = OPERAND_TYPE_ANYDISP
;
1502 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
1503 static const i386_operand_type regymm
= OPERAND_TYPE_REGYMM
;
1504 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
1505 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
1506 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
1507 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
1508 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
1509 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
1510 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
1511 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
1512 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
1513 static const i386_operand_type vec_imm4
= OPERAND_TYPE_VEC_IMM4
;
1524 operand_type_check (i386_operand_type t
, enum operand_type c
)
1529 return (t
.bitfield
.reg8
1532 || t
.bitfield
.reg64
);
1535 return (t
.bitfield
.imm8
1539 || t
.bitfield
.imm32s
1540 || t
.bitfield
.imm64
);
1543 return (t
.bitfield
.disp8
1544 || t
.bitfield
.disp16
1545 || t
.bitfield
.disp32
1546 || t
.bitfield
.disp32s
1547 || t
.bitfield
.disp64
);
1550 return (t
.bitfield
.disp8
1551 || t
.bitfield
.disp16
1552 || t
.bitfield
.disp32
1553 || t
.bitfield
.disp32s
1554 || t
.bitfield
.disp64
1555 || t
.bitfield
.baseindex
);
1564 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1565 operand J for instruction template T. */
1568 match_reg_size (const insn_template
*t
, unsigned int j
)
1570 return !((i
.types
[j
].bitfield
.byte
1571 && !t
->operand_types
[j
].bitfield
.byte
)
1572 || (i
.types
[j
].bitfield
.word
1573 && !t
->operand_types
[j
].bitfield
.word
)
1574 || (i
.types
[j
].bitfield
.dword
1575 && !t
->operand_types
[j
].bitfield
.dword
)
1576 || (i
.types
[j
].bitfield
.qword
1577 && !t
->operand_types
[j
].bitfield
.qword
));
1580 /* Return 1 if there is no conflict in any size on operand J for
1581 instruction template T. */
1584 match_mem_size (const insn_template
*t
, unsigned int j
)
1586 return (match_reg_size (t
, j
)
1587 && !((i
.types
[j
].bitfield
.unspecified
1588 && !t
->operand_types
[j
].bitfield
.unspecified
)
1589 || (i
.types
[j
].bitfield
.fword
1590 && !t
->operand_types
[j
].bitfield
.fword
)
1591 || (i
.types
[j
].bitfield
.tbyte
1592 && !t
->operand_types
[j
].bitfield
.tbyte
)
1593 || (i
.types
[j
].bitfield
.xmmword
1594 && !t
->operand_types
[j
].bitfield
.xmmword
)
1595 || (i
.types
[j
].bitfield
.ymmword
1596 && !t
->operand_types
[j
].bitfield
.ymmword
)));
1599 /* Return 1 if there is no size conflict on any operands for
1600 instruction template T. */
1603 operand_size_match (const insn_template
*t
)
1608 /* Don't check jump instructions. */
1609 if (t
->opcode_modifier
.jump
1610 || t
->opcode_modifier
.jumpbyte
1611 || t
->opcode_modifier
.jumpdword
1612 || t
->opcode_modifier
.jumpintersegment
)
1615 /* Check memory and accumulator operand size. */
1616 for (j
= 0; j
< i
.operands
; j
++)
1618 if (t
->operand_types
[j
].bitfield
.anysize
)
1621 if (t
->operand_types
[j
].bitfield
.acc
&& !match_reg_size (t
, j
))
1627 if (i
.types
[j
].bitfield
.mem
&& !match_mem_size (t
, j
))
1636 else if (!t
->opcode_modifier
.d
&& !t
->opcode_modifier
.floatd
)
1639 i
.error
= operand_size_mismatch
;
1643 /* Check reverse. */
1644 gas_assert (i
.operands
== 2);
1647 for (j
= 0; j
< 2; j
++)
1649 if (t
->operand_types
[j
].bitfield
.acc
1650 && !match_reg_size (t
, j
? 0 : 1))
1653 if (i
.types
[j
].bitfield
.mem
1654 && !match_mem_size (t
, j
? 0 : 1))
1662 operand_type_match (i386_operand_type overlap
,
1663 i386_operand_type given
)
1665 i386_operand_type temp
= overlap
;
1667 temp
.bitfield
.jumpabsolute
= 0;
1668 temp
.bitfield
.unspecified
= 0;
1669 temp
.bitfield
.byte
= 0;
1670 temp
.bitfield
.word
= 0;
1671 temp
.bitfield
.dword
= 0;
1672 temp
.bitfield
.fword
= 0;
1673 temp
.bitfield
.qword
= 0;
1674 temp
.bitfield
.tbyte
= 0;
1675 temp
.bitfield
.xmmword
= 0;
1676 temp
.bitfield
.ymmword
= 0;
1677 if (operand_type_all_zero (&temp
))
1680 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
1681 && given
.bitfield
.jumpabsolute
== overlap
.bitfield
.jumpabsolute
)
1685 i
.error
= operand_type_mismatch
;
1689 /* If given types g0 and g1 are registers they must be of the same type
1690 unless the expected operand type register overlap is null.
1691 Note that Acc in a template matches every size of reg. */
1694 operand_type_register_match (i386_operand_type m0
,
1695 i386_operand_type g0
,
1696 i386_operand_type t0
,
1697 i386_operand_type m1
,
1698 i386_operand_type g1
,
1699 i386_operand_type t1
)
1701 if (!operand_type_check (g0
, reg
))
1704 if (!operand_type_check (g1
, reg
))
1707 if (g0
.bitfield
.reg8
== g1
.bitfield
.reg8
1708 && g0
.bitfield
.reg16
== g1
.bitfield
.reg16
1709 && g0
.bitfield
.reg32
== g1
.bitfield
.reg32
1710 && g0
.bitfield
.reg64
== g1
.bitfield
.reg64
)
1713 if (m0
.bitfield
.acc
)
1715 t0
.bitfield
.reg8
= 1;
1716 t0
.bitfield
.reg16
= 1;
1717 t0
.bitfield
.reg32
= 1;
1718 t0
.bitfield
.reg64
= 1;
1721 if (m1
.bitfield
.acc
)
1723 t1
.bitfield
.reg8
= 1;
1724 t1
.bitfield
.reg16
= 1;
1725 t1
.bitfield
.reg32
= 1;
1726 t1
.bitfield
.reg64
= 1;
1729 if (!(t0
.bitfield
.reg8
& t1
.bitfield
.reg8
)
1730 && !(t0
.bitfield
.reg16
& t1
.bitfield
.reg16
)
1731 && !(t0
.bitfield
.reg32
& t1
.bitfield
.reg32
)
1732 && !(t0
.bitfield
.reg64
& t1
.bitfield
.reg64
))
1735 i
.error
= register_type_mismatch
;
1740 static INLINE
unsigned int
1741 mode_from_disp_size (i386_operand_type t
)
1743 if (t
.bitfield
.disp8
)
1745 else if (t
.bitfield
.disp16
1746 || t
.bitfield
.disp32
1747 || t
.bitfield
.disp32s
)
1754 fits_in_signed_byte (offsetT num
)
1756 return (num
>= -128) && (num
<= 127);
1760 fits_in_unsigned_byte (offsetT num
)
1762 return (num
& 0xff) == num
;
1766 fits_in_unsigned_word (offsetT num
)
1768 return (num
& 0xffff) == num
;
1772 fits_in_signed_word (offsetT num
)
1774 return (-32768 <= num
) && (num
<= 32767);
1778 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED
)
1783 return (!(((offsetT
) -1 << 31) & num
)
1784 || (((offsetT
) -1 << 31) & num
) == ((offsetT
) -1 << 31));
1786 } /* fits_in_signed_long() */
1789 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED
)
1794 return (num
& (((offsetT
) 2 << 31) - 1)) == num
;
1796 } /* fits_in_unsigned_long() */
1799 fits_in_imm4 (offsetT num
)
1801 return (num
& 0xf) == num
;
1804 static i386_operand_type
1805 smallest_imm_type (offsetT num
)
1807 i386_operand_type t
;
1809 operand_type_set (&t
, 0);
1810 t
.bitfield
.imm64
= 1;
1812 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
1814 /* This code is disabled on the 486 because all the Imm1 forms
1815 in the opcode table are slower on the i486. They're the
1816 versions with the implicitly specified single-position
1817 displacement, which has another syntax if you really want to
1819 t
.bitfield
.imm1
= 1;
1820 t
.bitfield
.imm8
= 1;
1821 t
.bitfield
.imm8s
= 1;
1822 t
.bitfield
.imm16
= 1;
1823 t
.bitfield
.imm32
= 1;
1824 t
.bitfield
.imm32s
= 1;
1826 else if (fits_in_signed_byte (num
))
1828 t
.bitfield
.imm8
= 1;
1829 t
.bitfield
.imm8s
= 1;
1830 t
.bitfield
.imm16
= 1;
1831 t
.bitfield
.imm32
= 1;
1832 t
.bitfield
.imm32s
= 1;
1834 else if (fits_in_unsigned_byte (num
))
1836 t
.bitfield
.imm8
= 1;
1837 t
.bitfield
.imm16
= 1;
1838 t
.bitfield
.imm32
= 1;
1839 t
.bitfield
.imm32s
= 1;
1841 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
1843 t
.bitfield
.imm16
= 1;
1844 t
.bitfield
.imm32
= 1;
1845 t
.bitfield
.imm32s
= 1;
1847 else if (fits_in_signed_long (num
))
1849 t
.bitfield
.imm32
= 1;
1850 t
.bitfield
.imm32s
= 1;
1852 else if (fits_in_unsigned_long (num
))
1853 t
.bitfield
.imm32
= 1;
1859 offset_in_range (offsetT val
, int size
)
1865 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
1866 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
1867 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
1869 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
1875 /* If BFD64, sign extend val for 32bit address mode. */
1876 if (flag_code
!= CODE_64BIT
1877 || i
.prefix
[ADDR_PREFIX
])
1878 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
1879 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
1882 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
1884 char buf1
[40], buf2
[40];
1886 sprint_value (buf1
, val
);
1887 sprint_value (buf2
, val
& mask
);
1888 as_warn (_("%s shortened to %s"), buf1
, buf2
);
1902 a. PREFIX_EXIST if attempting to add a prefix where one from the
1903 same class already exists.
1904 b. PREFIX_LOCK if lock prefix is added.
1905 c. PREFIX_REP if rep/repne prefix is added.
1906 d. PREFIX_OTHER if other prefix is added.
1909 static enum PREFIX_GROUP
1910 add_prefix (unsigned int prefix
)
1912 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
1915 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
1916 && flag_code
== CODE_64BIT
)
1918 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
1919 || ((i
.prefix
[REX_PREFIX
] & (REX_R
| REX_X
| REX_B
))
1920 && (prefix
& (REX_R
| REX_X
| REX_B
))))
1931 case CS_PREFIX_OPCODE
:
1932 case DS_PREFIX_OPCODE
:
1933 case ES_PREFIX_OPCODE
:
1934 case FS_PREFIX_OPCODE
:
1935 case GS_PREFIX_OPCODE
:
1936 case SS_PREFIX_OPCODE
:
1940 case REPNE_PREFIX_OPCODE
:
1941 case REPE_PREFIX_OPCODE
:
1946 case LOCK_PREFIX_OPCODE
:
1955 case ADDR_PREFIX_OPCODE
:
1959 case DATA_PREFIX_OPCODE
:
1963 if (i
.prefix
[q
] != 0)
1971 i
.prefix
[q
] |= prefix
;
1974 as_bad (_("same type of prefix used twice"));
1980 update_code_flag (int value
, int check
)
1982 PRINTF_LIKE ((*as_error
));
1984 flag_code
= (enum flag_code
) value
;
1985 if (flag_code
== CODE_64BIT
)
1987 cpu_arch_flags
.bitfield
.cpu64
= 1;
1988 cpu_arch_flags
.bitfield
.cpuno64
= 0;
1992 cpu_arch_flags
.bitfield
.cpu64
= 0;
1993 cpu_arch_flags
.bitfield
.cpuno64
= 1;
1995 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
1998 as_error
= as_fatal
;
2001 (*as_error
) (_("64bit mode not supported on `%s'."),
2002 cpu_arch_name
? cpu_arch_name
: default_arch
);
2004 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2007 as_error
= as_fatal
;
2010 (*as_error
) (_("32bit mode not supported on `%s'."),
2011 cpu_arch_name
? cpu_arch_name
: default_arch
);
2013 stackop_size
= '\0';
2017 set_code_flag (int value
)
2019 update_code_flag (value
, 0);
2023 set_16bit_gcc_code_flag (int new_code_flag
)
2025 flag_code
= (enum flag_code
) new_code_flag
;
2026 if (flag_code
!= CODE_16BIT
)
2028 cpu_arch_flags
.bitfield
.cpu64
= 0;
2029 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2030 stackop_size
= LONG_MNEM_SUFFIX
;
2034 set_intel_syntax (int syntax_flag
)
2036 /* Find out if register prefixing is specified. */
2037 int ask_naked_reg
= 0;
2040 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2042 char *string
= input_line_pointer
;
2043 int e
= get_symbol_end ();
2045 if (strcmp (string
, "prefix") == 0)
2047 else if (strcmp (string
, "noprefix") == 0)
2050 as_bad (_("bad argument to syntax directive."));
2051 *input_line_pointer
= e
;
2053 demand_empty_rest_of_line ();
2055 intel_syntax
= syntax_flag
;
2057 if (ask_naked_reg
== 0)
2058 allow_naked_reg
= (intel_syntax
2059 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2061 allow_naked_reg
= (ask_naked_reg
< 0);
2063 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2065 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2066 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2067 register_prefix
= allow_naked_reg
? "" : "%";
2071 set_intel_mnemonic (int mnemonic_flag
)
2073 intel_mnemonic
= mnemonic_flag
;
2077 set_allow_index_reg (int flag
)
2079 allow_index_reg
= flag
;
2083 set_sse_check (int dummy ATTRIBUTE_UNUSED
)
2087 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2089 char *string
= input_line_pointer
;
2090 int e
= get_symbol_end ();
2092 if (strcmp (string
, "none") == 0)
2093 sse_check
= sse_check_none
;
2094 else if (strcmp (string
, "warning") == 0)
2095 sse_check
= sse_check_warning
;
2096 else if (strcmp (string
, "error") == 0)
2097 sse_check
= sse_check_error
;
2099 as_bad (_("bad argument to sse_check directive."));
2100 *input_line_pointer
= e
;
2103 as_bad (_("missing argument for sse_check directive"));
2105 demand_empty_rest_of_line ();
2109 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2110 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2112 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2113 static const char *arch
;
2115 /* Intel LIOM is only supported on ELF. */
2121 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2122 use default_arch. */
2123 arch
= cpu_arch_name
;
2125 arch
= default_arch
;
2128 /* If we are targeting Intel L1OM, we must enable it. */
2129 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2130 || new_flag
.bitfield
.cpul1om
)
2133 /* If we are targeting Intel K1OM, we must enable it. */
2134 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2135 || new_flag
.bitfield
.cpuk1om
)
2138 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2143 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2147 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2149 char *string
= input_line_pointer
;
2150 int e
= get_symbol_end ();
2152 i386_cpu_flags flags
;
2154 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2156 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2158 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2162 cpu_arch_name
= cpu_arch
[j
].name
;
2163 cpu_sub_arch_name
= NULL
;
2164 cpu_arch_flags
= cpu_arch
[j
].flags
;
2165 if (flag_code
== CODE_64BIT
)
2167 cpu_arch_flags
.bitfield
.cpu64
= 1;
2168 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2172 cpu_arch_flags
.bitfield
.cpu64
= 0;
2173 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2175 cpu_arch_isa
= cpu_arch
[j
].type
;
2176 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2177 if (!cpu_arch_tune_set
)
2179 cpu_arch_tune
= cpu_arch_isa
;
2180 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2185 if (!cpu_arch
[j
].negated
)
2186 flags
= cpu_flags_or (cpu_arch_flags
,
2189 flags
= cpu_flags_and_not (cpu_arch_flags
,
2191 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2193 if (cpu_sub_arch_name
)
2195 char *name
= cpu_sub_arch_name
;
2196 cpu_sub_arch_name
= concat (name
,
2198 (const char *) NULL
);
2202 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2203 cpu_arch_flags
= flags
;
2204 cpu_arch_isa_flags
= flags
;
2206 *input_line_pointer
= e
;
2207 demand_empty_rest_of_line ();
2211 if (j
>= ARRAY_SIZE (cpu_arch
))
2212 as_bad (_("no such architecture: `%s'"), string
);
2214 *input_line_pointer
= e
;
2217 as_bad (_("missing cpu architecture"));
2219 no_cond_jump_promotion
= 0;
2220 if (*input_line_pointer
== ','
2221 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2223 char *string
= ++input_line_pointer
;
2224 int e
= get_symbol_end ();
2226 if (strcmp (string
, "nojumps") == 0)
2227 no_cond_jump_promotion
= 1;
2228 else if (strcmp (string
, "jumps") == 0)
2231 as_bad (_("no such architecture modifier: `%s'"), string
);
2233 *input_line_pointer
= e
;
2236 demand_empty_rest_of_line ();
2239 enum bfd_architecture
2242 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2244 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2245 || flag_code
!= CODE_64BIT
)
2246 as_fatal (_("Intel L1OM is 64bit ELF only"));
2247 return bfd_arch_l1om
;
2249 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2251 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2252 || flag_code
!= CODE_64BIT
)
2253 as_fatal (_("Intel K1OM is 64bit ELF only"));
2254 return bfd_arch_k1om
;
2257 return bfd_arch_i386
;
2263 if (!strncmp (default_arch
, "x86_64", 6))
2265 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2267 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2268 || default_arch
[6] != '\0')
2269 as_fatal (_("Intel L1OM is 64bit ELF only"));
2270 return bfd_mach_l1om
;
2272 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2274 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2275 || default_arch
[6] != '\0')
2276 as_fatal (_("Intel K1OM is 64bit ELF only"));
2277 return bfd_mach_k1om
;
2279 else if (default_arch
[6] == '\0')
2280 return bfd_mach_x86_64
;
2282 return bfd_mach_x64_32
;
2284 else if (!strcmp (default_arch
, "i386"))
2285 return bfd_mach_i386_i386
;
2287 as_fatal (_("unknown architecture"));
2293 const char *hash_err
;
2295 /* Initialize op_hash hash table. */
2296 op_hash
= hash_new ();
2299 const insn_template
*optab
;
2300 templates
*core_optab
;
2302 /* Setup for loop. */
2304 core_optab
= (templates
*) xmalloc (sizeof (templates
));
2305 core_optab
->start
= optab
;
2310 if (optab
->name
== NULL
2311 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
2313 /* different name --> ship out current template list;
2314 add to hash table; & begin anew. */
2315 core_optab
->end
= optab
;
2316 hash_err
= hash_insert (op_hash
,
2318 (void *) core_optab
);
2321 as_fatal (_("internal Error: Can't hash %s: %s"),
2325 if (optab
->name
== NULL
)
2327 core_optab
= (templates
*) xmalloc (sizeof (templates
));
2328 core_optab
->start
= optab
;
2333 /* Initialize reg_hash hash table. */
2334 reg_hash
= hash_new ();
2336 const reg_entry
*regtab
;
2337 unsigned int regtab_size
= i386_regtab_size
;
2339 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
2341 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
2343 as_fatal (_("internal Error: Can't hash %s: %s"),
2349 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2354 for (c
= 0; c
< 256; c
++)
2359 mnemonic_chars
[c
] = c
;
2360 register_chars
[c
] = c
;
2361 operand_chars
[c
] = c
;
2363 else if (ISLOWER (c
))
2365 mnemonic_chars
[c
] = c
;
2366 register_chars
[c
] = c
;
2367 operand_chars
[c
] = c
;
2369 else if (ISUPPER (c
))
2371 mnemonic_chars
[c
] = TOLOWER (c
);
2372 register_chars
[c
] = mnemonic_chars
[c
];
2373 operand_chars
[c
] = c
;
2376 if (ISALPHA (c
) || ISDIGIT (c
))
2377 identifier_chars
[c
] = c
;
2380 identifier_chars
[c
] = c
;
2381 operand_chars
[c
] = c
;
2386 identifier_chars
['@'] = '@';
2389 identifier_chars
['?'] = '?';
2390 operand_chars
['?'] = '?';
2392 digit_chars
['-'] = '-';
2393 mnemonic_chars
['_'] = '_';
2394 mnemonic_chars
['-'] = '-';
2395 mnemonic_chars
['.'] = '.';
2396 identifier_chars
['_'] = '_';
2397 identifier_chars
['.'] = '.';
2399 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
2400 operand_chars
[(unsigned char) *p
] = *p
;
2403 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2406 record_alignment (text_section
, 2);
2407 record_alignment (data_section
, 2);
2408 record_alignment (bss_section
, 2);
2412 if (flag_code
== CODE_64BIT
)
2414 #if defined (OBJ_COFF) && defined (TE_PE)
2415 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
2418 x86_dwarf2_return_column
= 16;
2420 x86_cie_data_alignment
= -8;
2424 x86_dwarf2_return_column
= 8;
2425 x86_cie_data_alignment
= -4;
2430 i386_print_statistics (FILE *file
)
2432 hash_print_statistics (file
, "i386 opcode", op_hash
);
2433 hash_print_statistics (file
, "i386 register", reg_hash
);
2438 /* Debugging routines for md_assemble. */
2439 static void pte (insn_template
*);
2440 static void pt (i386_operand_type
);
2441 static void pe (expressionS
*);
2442 static void ps (symbolS
*);
2445 pi (char *line
, i386_insn
*x
)
2449 fprintf (stdout
, "%s: template ", line
);
2451 fprintf (stdout
, " address: base %s index %s scale %x\n",
2452 x
->base_reg
? x
->base_reg
->reg_name
: "none",
2453 x
->index_reg
? x
->index_reg
->reg_name
: "none",
2454 x
->log2_scale_factor
);
2455 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
2456 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
2457 fprintf (stdout
, " sib: base %x index %x scale %x\n",
2458 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
2459 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
2460 (x
->rex
& REX_W
) != 0,
2461 (x
->rex
& REX_R
) != 0,
2462 (x
->rex
& REX_X
) != 0,
2463 (x
->rex
& REX_B
) != 0);
2464 for (j
= 0; j
< x
->operands
; j
++)
2466 fprintf (stdout
, " #%d: ", j
+ 1);
2468 fprintf (stdout
, "\n");
2469 if (x
->types
[j
].bitfield
.reg8
2470 || x
->types
[j
].bitfield
.reg16
2471 || x
->types
[j
].bitfield
.reg32
2472 || x
->types
[j
].bitfield
.reg64
2473 || x
->types
[j
].bitfield
.regmmx
2474 || x
->types
[j
].bitfield
.regxmm
2475 || x
->types
[j
].bitfield
.regymm
2476 || x
->types
[j
].bitfield
.sreg2
2477 || x
->types
[j
].bitfield
.sreg3
2478 || x
->types
[j
].bitfield
.control
2479 || x
->types
[j
].bitfield
.debug
2480 || x
->types
[j
].bitfield
.test
)
2481 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
2482 if (operand_type_check (x
->types
[j
], imm
))
2484 if (operand_type_check (x
->types
[j
], disp
))
2485 pe (x
->op
[j
].disps
);
2490 pte (insn_template
*t
)
2493 fprintf (stdout
, " %d operands ", t
->operands
);
2494 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
2495 if (t
->extension_opcode
!= None
)
2496 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
2497 if (t
->opcode_modifier
.d
)
2498 fprintf (stdout
, "D");
2499 if (t
->opcode_modifier
.w
)
2500 fprintf (stdout
, "W");
2501 fprintf (stdout
, "\n");
2502 for (j
= 0; j
< t
->operands
; j
++)
2504 fprintf (stdout
, " #%d type ", j
+ 1);
2505 pt (t
->operand_types
[j
]);
2506 fprintf (stdout
, "\n");
2513 fprintf (stdout
, " operation %d\n", e
->X_op
);
2514 fprintf (stdout
, " add_number %ld (%lx)\n",
2515 (long) e
->X_add_number
, (long) e
->X_add_number
);
2516 if (e
->X_add_symbol
)
2518 fprintf (stdout
, " add_symbol ");
2519 ps (e
->X_add_symbol
);
2520 fprintf (stdout
, "\n");
2524 fprintf (stdout
, " op_symbol ");
2525 ps (e
->X_op_symbol
);
2526 fprintf (stdout
, "\n");
2533 fprintf (stdout
, "%s type %s%s",
2535 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
2536 segment_name (S_GET_SEGMENT (s
)));
2539 static struct type_name
2541 i386_operand_type mask
;
2544 const type_names
[] =
2546 { OPERAND_TYPE_REG8
, "r8" },
2547 { OPERAND_TYPE_REG16
, "r16" },
2548 { OPERAND_TYPE_REG32
, "r32" },
2549 { OPERAND_TYPE_REG64
, "r64" },
2550 { OPERAND_TYPE_IMM8
, "i8" },
2551 { OPERAND_TYPE_IMM8
, "i8s" },
2552 { OPERAND_TYPE_IMM16
, "i16" },
2553 { OPERAND_TYPE_IMM32
, "i32" },
2554 { OPERAND_TYPE_IMM32S
, "i32s" },
2555 { OPERAND_TYPE_IMM64
, "i64" },
2556 { OPERAND_TYPE_IMM1
, "i1" },
2557 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
2558 { OPERAND_TYPE_DISP8
, "d8" },
2559 { OPERAND_TYPE_DISP16
, "d16" },
2560 { OPERAND_TYPE_DISP32
, "d32" },
2561 { OPERAND_TYPE_DISP32S
, "d32s" },
2562 { OPERAND_TYPE_DISP64
, "d64" },
2563 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
2564 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
2565 { OPERAND_TYPE_CONTROL
, "control reg" },
2566 { OPERAND_TYPE_TEST
, "test reg" },
2567 { OPERAND_TYPE_DEBUG
, "debug reg" },
2568 { OPERAND_TYPE_FLOATREG
, "FReg" },
2569 { OPERAND_TYPE_FLOATACC
, "FAcc" },
2570 { OPERAND_TYPE_SREG2
, "SReg2" },
2571 { OPERAND_TYPE_SREG3
, "SReg3" },
2572 { OPERAND_TYPE_ACC
, "Acc" },
2573 { OPERAND_TYPE_JUMPABSOLUTE
, "Jump Absolute" },
2574 { OPERAND_TYPE_REGMMX
, "rMMX" },
2575 { OPERAND_TYPE_REGXMM
, "rXMM" },
2576 { OPERAND_TYPE_REGYMM
, "rYMM" },
2577 { OPERAND_TYPE_ESSEG
, "es" },
2581 pt (i386_operand_type t
)
2584 i386_operand_type a
;
2586 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
2588 a
= operand_type_and (t
, type_names
[j
].mask
);
2589 if (!operand_type_all_zero (&a
))
2590 fprintf (stdout
, "%s, ", type_names
[j
].name
);
2595 #endif /* DEBUG386 */
2597 static bfd_reloc_code_real_type
2598 reloc (unsigned int size
,
2601 bfd_reloc_code_real_type other
)
2603 if (other
!= NO_RELOC
)
2605 reloc_howto_type
*rel
;
2610 case BFD_RELOC_X86_64_GOT32
:
2611 return BFD_RELOC_X86_64_GOT64
;
2613 case BFD_RELOC_X86_64_PLTOFF64
:
2614 return BFD_RELOC_X86_64_PLTOFF64
;
2616 case BFD_RELOC_X86_64_GOTPC32
:
2617 other
= BFD_RELOC_X86_64_GOTPC64
;
2619 case BFD_RELOC_X86_64_GOTPCREL
:
2620 other
= BFD_RELOC_X86_64_GOTPCREL64
;
2622 case BFD_RELOC_X86_64_TPOFF32
:
2623 other
= BFD_RELOC_X86_64_TPOFF64
;
2625 case BFD_RELOC_X86_64_DTPOFF32
:
2626 other
= BFD_RELOC_X86_64_DTPOFF64
;
2632 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2633 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
2636 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
2638 as_bad (_("unknown relocation (%u)"), other
);
2639 else if (size
!= bfd_get_reloc_size (rel
))
2640 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2641 bfd_get_reloc_size (rel
),
2643 else if (pcrel
&& !rel
->pc_relative
)
2644 as_bad (_("non-pc-relative relocation for pc-relative field"));
2645 else if ((rel
->complain_on_overflow
== complain_overflow_signed
2647 || (rel
->complain_on_overflow
== complain_overflow_unsigned
2649 as_bad (_("relocated field and relocation type differ in signedness"));
2658 as_bad (_("there are no unsigned pc-relative relocations"));
2661 case 1: return BFD_RELOC_8_PCREL
;
2662 case 2: return BFD_RELOC_16_PCREL
;
2663 case 4: return BFD_RELOC_32_PCREL
;
2664 case 8: return BFD_RELOC_64_PCREL
;
2666 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
2673 case 4: return BFD_RELOC_X86_64_32S
;
2678 case 1: return BFD_RELOC_8
;
2679 case 2: return BFD_RELOC_16
;
2680 case 4: return BFD_RELOC_32
;
2681 case 8: return BFD_RELOC_64
;
2683 as_bad (_("cannot do %s %u byte relocation"),
2684 sign
> 0 ? "signed" : "unsigned", size
);
2690 /* Here we decide which fixups can be adjusted to make them relative to
2691 the beginning of the section instead of the symbol. Basically we need
2692 to make sure that the dynamic relocations are done correctly, so in
2693 some cases we force the original symbol to be used. */
2696 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
2698 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2702 /* Don't adjust pc-relative references to merge sections in 64-bit
2704 if (use_rela_relocations
2705 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
2709 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2710 and changed later by validate_fix. */
2711 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
2712 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
2715 /* adjust_reloc_syms doesn't know about the GOT. */
2716 if (fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
2717 || fixP
->fx_r_type
== BFD_RELOC_386_PLT32
2718 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
2719 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
2720 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
2721 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
2722 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
2723 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
2724 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
2725 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
2726 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
2727 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
2728 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
2729 || fixP
->fx_r_type
== BFD_RELOC_X86_64_PLT32
2730 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
2731 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
2732 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
2733 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
2734 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
2735 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
2736 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
2737 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
2738 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
2739 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
2740 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
2741 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
2742 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
2743 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
2750 intel_float_operand (const char *mnemonic
)
2752 /* Note that the value returned is meaningful only for opcodes with (memory)
2753 operands, hence the code here is free to improperly handle opcodes that
2754 have no operands (for better performance and smaller code). */
2756 if (mnemonic
[0] != 'f')
2757 return 0; /* non-math */
2759 switch (mnemonic
[1])
2761 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2762 the fs segment override prefix not currently handled because no
2763 call path can make opcodes without operands get here */
2765 return 2 /* integer op */;
2767 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
2768 return 3; /* fldcw/fldenv */
2771 if (mnemonic
[2] != 'o' /* fnop */)
2772 return 3; /* non-waiting control op */
2775 if (mnemonic
[2] == 's')
2776 return 3; /* frstor/frstpm */
2779 if (mnemonic
[2] == 'a')
2780 return 3; /* fsave */
2781 if (mnemonic
[2] == 't')
2783 switch (mnemonic
[3])
2785 case 'c': /* fstcw */
2786 case 'd': /* fstdw */
2787 case 'e': /* fstenv */
2788 case 's': /* fsts[gw] */
2794 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
2795 return 0; /* fxsave/fxrstor are not really math ops */
2802 /* Build the VEX prefix. */
2805 build_vex_prefix (const insn_template
*t
)
2807 unsigned int register_specifier
;
2808 unsigned int implied_prefix
;
2809 unsigned int vector_length
;
2811 /* Check register specifier. */
2812 if (i
.vex
.register_specifier
)
2814 register_specifier
= i
.vex
.register_specifier
->reg_num
;
2815 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
2816 register_specifier
+= 8;
2817 register_specifier
= ~register_specifier
& 0xf;
2820 register_specifier
= 0xf;
2822 /* Use 2-byte VEX prefix by swappping destination and source
2825 && i
.operands
== i
.reg_operands
2826 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
2827 && i
.tm
.opcode_modifier
.s
2830 unsigned int xchg
= i
.operands
- 1;
2831 union i386_op temp_op
;
2832 i386_operand_type temp_type
;
2834 temp_type
= i
.types
[xchg
];
2835 i
.types
[xchg
] = i
.types
[0];
2836 i
.types
[0] = temp_type
;
2837 temp_op
= i
.op
[xchg
];
2838 i
.op
[xchg
] = i
.op
[0];
2841 gas_assert (i
.rm
.mode
== 3);
2845 i
.rm
.regmem
= i
.rm
.reg
;
2848 /* Use the next insn. */
2852 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
2853 vector_length
= avxscalar
;
2855 vector_length
= i
.tm
.opcode_modifier
.vex
== VEX256
? 1 : 0;
2857 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
2862 case DATA_PREFIX_OPCODE
:
2865 case REPE_PREFIX_OPCODE
:
2868 case REPNE_PREFIX_OPCODE
:
2875 /* Use 2-byte VEX prefix if possible. */
2876 if (i
.tm
.opcode_modifier
.vexopcode
== VEX0F
2877 && i
.tm
.opcode_modifier
.vexw
!= VEXW1
2878 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
2880 /* 2-byte VEX prefix. */
2884 i
.vex
.bytes
[0] = 0xc5;
2886 /* Check the REX.R bit. */
2887 r
= (i
.rex
& REX_R
) ? 0 : 1;
2888 i
.vex
.bytes
[1] = (r
<< 7
2889 | register_specifier
<< 3
2890 | vector_length
<< 2
2895 /* 3-byte VEX prefix. */
2900 switch (i
.tm
.opcode_modifier
.vexopcode
)
2904 i
.vex
.bytes
[0] = 0xc4;
2908 i
.vex
.bytes
[0] = 0xc4;
2912 i
.vex
.bytes
[0] = 0xc4;
2916 i
.vex
.bytes
[0] = 0x8f;
2920 i
.vex
.bytes
[0] = 0x8f;
2924 i
.vex
.bytes
[0] = 0x8f;
2930 /* The high 3 bits of the second VEX byte are 1's compliment
2931 of RXB bits from REX. */
2932 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
2934 /* Check the REX.W bit. */
2935 w
= (i
.rex
& REX_W
) ? 1 : 0;
2936 if (i
.tm
.opcode_modifier
.vexw
)
2941 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
2945 i
.vex
.bytes
[2] = (w
<< 7
2946 | register_specifier
<< 3
2947 | vector_length
<< 2
2953 process_immext (void)
2957 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
&& i
.operands
> 0)
2959 /* SSE3 Instructions have the fixed operands with an opcode
2960 suffix which is coded in the same place as an 8-bit immediate
2961 field would be. Here we check those operands and remove them
2965 for (x
= 0; x
< i
.operands
; x
++)
2966 if (i
.op
[x
].regs
->reg_num
!= x
)
2967 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2968 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+ 1,
2974 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2975 which is coded in the same place as an 8-bit immediate field
2976 would be. Here we fake an 8-bit immediate operand from the
2977 opcode suffix stored in tm.extension_opcode.
2979 AVX instructions also use this encoding, for some of
2980 3 argument instructions. */
2982 gas_assert (i
.imm_operands
== 0
2984 || (i
.tm
.opcode_modifier
.vex
2985 && i
.operands
<= 4)));
2987 exp
= &im_expressions
[i
.imm_operands
++];
2988 i
.op
[i
.operands
].imms
= exp
;
2989 i
.types
[i
.operands
] = imm8
;
2991 exp
->X_op
= O_constant
;
2992 exp
->X_add_number
= i
.tm
.extension_opcode
;
2993 i
.tm
.extension_opcode
= None
;
2996 /* This is the guts of the machine-dependent assembler. LINE points to a
2997 machine dependent instruction. This function is supposed to emit
2998 the frags/bytes it assembles to. */
3001 md_assemble (char *line
)
3004 char mnemonic
[MAX_MNEM_SIZE
];
3005 const insn_template
*t
;
3007 /* Initialize globals. */
3008 memset (&i
, '\0', sizeof (i
));
3009 for (j
= 0; j
< MAX_OPERANDS
; j
++)
3010 i
.reloc
[j
] = NO_RELOC
;
3011 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
3012 memset (im_expressions
, '\0', sizeof (im_expressions
));
3013 save_stack_p
= save_stack
;
3015 /* First parse an instruction mnemonic & call i386_operand for the operands.
3016 We assume that the scrubber has arranged it so that line[0] is the valid
3017 start of a (possibly prefixed) mnemonic. */
3019 line
= parse_insn (line
, mnemonic
);
3023 line
= parse_operands (line
, mnemonic
);
3028 /* Now we've parsed the mnemonic into a set of templates, and have the
3029 operands at hand. */
3031 /* All intel opcodes have reversed operands except for "bound" and
3032 "enter". We also don't reverse intersegment "jmp" and "call"
3033 instructions with 2 immediate operands so that the immediate segment
3034 precedes the offset, as it does when in AT&T mode. */
3037 && (strcmp (mnemonic
, "bound") != 0)
3038 && (strcmp (mnemonic
, "invlpga") != 0)
3039 && !(operand_type_check (i
.types
[0], imm
)
3040 && operand_type_check (i
.types
[1], imm
)))
3043 /* The order of the immediates should be reversed
3044 for 2 immediates extrq and insertq instructions */
3045 if (i
.imm_operands
== 2
3046 && (strcmp (mnemonic
, "extrq") == 0
3047 || strcmp (mnemonic
, "insertq") == 0))
3048 swap_2_operands (0, 1);
3053 /* Don't optimize displacement for movabs since it only takes 64bit
3056 && !i
.disp32_encoding
3057 && (flag_code
!= CODE_64BIT
3058 || strcmp (mnemonic
, "movabs") != 0))
3061 /* Next, we find a template that matches the given insn,
3062 making sure the overlap of the given operands types is consistent
3063 with the template operand types. */
3065 if (!(t
= match_template ()))
3068 if (sse_check
!= sse_check_none
3069 && !i
.tm
.opcode_modifier
.noavx
3070 && (i
.tm
.cpu_flags
.bitfield
.cpusse
3071 || i
.tm
.cpu_flags
.bitfield
.cpusse2
3072 || i
.tm
.cpu_flags
.bitfield
.cpusse3
3073 || i
.tm
.cpu_flags
.bitfield
.cpussse3
3074 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
3075 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
))
3077 (sse_check
== sse_check_warning
3079 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
3082 /* Zap movzx and movsx suffix. The suffix has been set from
3083 "word ptr" or "byte ptr" on the source operand in Intel syntax
3084 or extracted from mnemonic in AT&T syntax. But we'll use
3085 the destination register to choose the suffix for encoding. */
3086 if ((i
.tm
.base_opcode
& ~9) == 0x0fb6)
3088 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3089 there is no suffix, the default will be byte extension. */
3090 if (i
.reg_operands
!= 2
3093 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
3098 if (i
.tm
.opcode_modifier
.fwait
)
3099 if (!add_prefix (FWAIT_OPCODE
))
3102 /* Check for lock without a lockable instruction. Destination operand
3103 must be memory unless it is xchg (0x86). */
3104 if (i
.prefix
[LOCK_PREFIX
]
3105 && (!i
.tm
.opcode_modifier
.islockable
3106 || i
.mem_operands
== 0
3107 || (i
.tm
.base_opcode
!= 0x86
3108 && !operand_type_check (i
.types
[i
.operands
- 1], anymem
))))
3110 as_bad (_("expecting lockable instruction after `lock'"));
3114 /* Check string instruction segment overrides. */
3115 if (i
.tm
.opcode_modifier
.isstring
&& i
.mem_operands
!= 0)
3117 if (!check_string ())
3119 i
.disp_operands
= 0;
3122 if (!process_suffix ())
3125 /* Update operand types. */
3126 for (j
= 0; j
< i
.operands
; j
++)
3127 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
3129 /* Make still unresolved immediate matches conform to size of immediate
3130 given in i.suffix. */
3131 if (!finalize_imm ())
3134 if (i
.types
[0].bitfield
.imm1
)
3135 i
.imm_operands
= 0; /* kludge for shift insns. */
3137 /* We only need to check those implicit registers for instructions
3138 with 3 operands or less. */
3139 if (i
.operands
<= 3)
3140 for (j
= 0; j
< i
.operands
; j
++)
3141 if (i
.types
[j
].bitfield
.inoutportreg
3142 || i
.types
[j
].bitfield
.shiftcount
3143 || i
.types
[j
].bitfield
.acc
3144 || i
.types
[j
].bitfield
.floatacc
)
3147 /* ImmExt should be processed after SSE2AVX. */
3148 if (!i
.tm
.opcode_modifier
.sse2avx
3149 && i
.tm
.opcode_modifier
.immext
)
3152 /* For insns with operands there are more diddles to do to the opcode. */
3155 if (!process_operands ())
3158 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
3160 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3161 as_warn (_("translating to `%sp'"), i
.tm
.name
);
3164 if (i
.tm
.opcode_modifier
.vex
)
3165 build_vex_prefix (t
);
3167 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3168 instructions may define INT_OPCODE as well, so avoid this corner
3169 case for those instructions that use MODRM. */
3170 if (i
.tm
.base_opcode
== INT_OPCODE
3171 && !i
.tm
.opcode_modifier
.modrm
3172 && i
.op
[0].imms
->X_add_number
== 3)
3174 i
.tm
.base_opcode
= INT3_OPCODE
;
3178 if ((i
.tm
.opcode_modifier
.jump
3179 || i
.tm
.opcode_modifier
.jumpbyte
3180 || i
.tm
.opcode_modifier
.jumpdword
)
3181 && i
.op
[0].disps
->X_op
== O_constant
)
3183 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3184 the absolute address given by the constant. Since ix86 jumps and
3185 calls are pc relative, we need to generate a reloc. */
3186 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
3187 i
.op
[0].disps
->X_op
= O_symbol
;
3190 if (i
.tm
.opcode_modifier
.rex64
)
3193 /* For 8 bit registers we need an empty rex prefix. Also if the
3194 instruction already has a prefix, we need to convert old
3195 registers to new ones. */
3197 if ((i
.types
[0].bitfield
.reg8
3198 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
3199 || (i
.types
[1].bitfield
.reg8
3200 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
3201 || ((i
.types
[0].bitfield
.reg8
3202 || i
.types
[1].bitfield
.reg8
)
3207 i
.rex
|= REX_OPCODE
;
3208 for (x
= 0; x
< 2; x
++)
3210 /* Look for 8 bit operand that uses old registers. */
3211 if (i
.types
[x
].bitfield
.reg8
3212 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
3214 /* In case it is "hi" register, give up. */
3215 if (i
.op
[x
].regs
->reg_num
> 3)
3216 as_bad (_("can't encode register '%s%s' in an "
3217 "instruction requiring REX prefix."),
3218 register_prefix
, i
.op
[x
].regs
->reg_name
);
3220 /* Otherwise it is equivalent to the extended register.
3221 Since the encoding doesn't change this is merely
3222 cosmetic cleanup for debug output. */
3224 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
3230 add_prefix (REX_OPCODE
| i
.rex
);
3232 /* We are ready to output the insn. */
3237 parse_insn (char *line
, char *mnemonic
)
3240 char *token_start
= l
;
3243 const insn_template
*t
;
3246 /* Non-zero if we found a prefix only acceptable with string insns. */
3247 const char *expecting_string_instruction
= NULL
;
3252 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
3257 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
3259 as_bad (_("no such instruction: `%s'"), token_start
);
3264 if (!is_space_char (*l
)
3265 && *l
!= END_OF_INSN
3267 || (*l
!= PREFIX_SEPARATOR
3270 as_bad (_("invalid character %s in mnemonic"),
3271 output_invalid (*l
));
3274 if (token_start
== l
)
3276 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
3277 as_bad (_("expecting prefix; got nothing"));
3279 as_bad (_("expecting mnemonic; got nothing"));
3283 /* Look up instruction (or prefix) via hash table. */
3284 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
3286 if (*l
!= END_OF_INSN
3287 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
3288 && current_templates
3289 && current_templates
->start
->opcode_modifier
.isprefix
)
3291 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
3293 as_bad ((flag_code
!= CODE_64BIT
3294 ? _("`%s' is only supported in 64-bit mode")
3295 : _("`%s' is not supported in 64-bit mode")),
3296 current_templates
->start
->name
);
3299 /* If we are in 16-bit mode, do not allow addr16 or data16.
3300 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3301 if ((current_templates
->start
->opcode_modifier
.size16
3302 || current_templates
->start
->opcode_modifier
.size32
)
3303 && flag_code
!= CODE_64BIT
3304 && (current_templates
->start
->opcode_modifier
.size32
3305 ^ (flag_code
== CODE_16BIT
)))
3307 as_bad (_("redundant %s prefix"),
3308 current_templates
->start
->name
);
3311 /* Add prefix, checking for repeated prefixes. */
3312 switch (add_prefix (current_templates
->start
->base_opcode
))
3317 expecting_string_instruction
= current_templates
->start
->name
;
3322 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3329 if (!current_templates
)
3331 /* Check if we should swap operand or force 32bit displacement in
3333 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
3335 else if (mnem_p
- 4 == dot_p
3339 i
.disp32_encoding
= 1;
3344 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
3347 if (!current_templates
)
3350 /* See if we can get a match by trimming off a suffix. */
3353 case WORD_MNEM_SUFFIX
:
3354 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
3355 i
.suffix
= SHORT_MNEM_SUFFIX
;
3357 case BYTE_MNEM_SUFFIX
:
3358 case QWORD_MNEM_SUFFIX
:
3359 i
.suffix
= mnem_p
[-1];
3361 current_templates
= (const templates
*) hash_find (op_hash
,
3364 case SHORT_MNEM_SUFFIX
:
3365 case LONG_MNEM_SUFFIX
:
3368 i
.suffix
= mnem_p
[-1];
3370 current_templates
= (const templates
*) hash_find (op_hash
,
3379 if (intel_float_operand (mnemonic
) == 1)
3380 i
.suffix
= SHORT_MNEM_SUFFIX
;
3382 i
.suffix
= LONG_MNEM_SUFFIX
;
3384 current_templates
= (const templates
*) hash_find (op_hash
,
3389 if (!current_templates
)
3391 as_bad (_("no such instruction: `%s'"), token_start
);
3396 if (current_templates
->start
->opcode_modifier
.jump
3397 || current_templates
->start
->opcode_modifier
.jumpbyte
)
3399 /* Check for a branch hint. We allow ",pt" and ",pn" for
3400 predict taken and predict not taken respectively.
3401 I'm not sure that branch hints actually do anything on loop
3402 and jcxz insns (JumpByte) for current Pentium4 chips. They
3403 may work in the future and it doesn't hurt to accept them
3405 if (l
[0] == ',' && l
[1] == 'p')
3409 if (!add_prefix (DS_PREFIX_OPCODE
))
3413 else if (l
[2] == 'n')
3415 if (!add_prefix (CS_PREFIX_OPCODE
))
3421 /* Any other comma loses. */
3424 as_bad (_("invalid character %s in mnemonic"),
3425 output_invalid (*l
));
3429 /* Check if instruction is supported on specified architecture. */
3431 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
3433 supported
|= cpu_flags_match (t
);
3434 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
3438 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
3440 as_bad (flag_code
== CODE_64BIT
3441 ? _("`%s' is not supported in 64-bit mode")
3442 : _("`%s' is only supported in 64-bit mode"),
3443 current_templates
->start
->name
);
3446 if (supported
!= CPU_FLAGS_PERFECT_MATCH
)
3448 as_bad (_("`%s' is not supported on `%s%s'"),
3449 current_templates
->start
->name
,
3450 cpu_arch_name
? cpu_arch_name
: default_arch
,
3451 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
3456 if (!cpu_arch_flags
.bitfield
.cpui386
3457 && (flag_code
!= CODE_16BIT
))
3459 as_warn (_("use .code16 to ensure correct addressing mode"));
3462 /* Check for rep/repne without a string instruction. */
3463 if (expecting_string_instruction
)
3465 static templates override
;
3467 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
3468 if (t
->opcode_modifier
.isstring
)
3470 if (t
>= current_templates
->end
)
3472 as_bad (_("expecting string instruction after `%s'"),
3473 expecting_string_instruction
);
3476 for (override
.start
= t
; t
< current_templates
->end
; ++t
)
3477 if (!t
->opcode_modifier
.isstring
)
3480 current_templates
= &override
;
3487 parse_operands (char *l
, const char *mnemonic
)
3491 /* 1 if operand is pending after ','. */
3492 unsigned int expecting_operand
= 0;
3494 /* Non-zero if operand parens not balanced. */
3495 unsigned int paren_not_balanced
;
3497 while (*l
!= END_OF_INSN
)
3499 /* Skip optional white space before operand. */
3500 if (is_space_char (*l
))
3502 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
)
3504 as_bad (_("invalid character %s before operand %d"),
3505 output_invalid (*l
),
3509 token_start
= l
; /* after white space */
3510 paren_not_balanced
= 0;
3511 while (paren_not_balanced
|| *l
!= ',')
3513 if (*l
== END_OF_INSN
)
3515 if (paren_not_balanced
)
3518 as_bad (_("unbalanced parenthesis in operand %d."),
3521 as_bad (_("unbalanced brackets in operand %d."),
3526 break; /* we are done */
3528 else if (!is_operand_char (*l
) && !is_space_char (*l
))
3530 as_bad (_("invalid character %s in operand %d"),
3531 output_invalid (*l
),
3538 ++paren_not_balanced
;
3540 --paren_not_balanced
;
3545 ++paren_not_balanced
;
3547 --paren_not_balanced
;
3551 if (l
!= token_start
)
3552 { /* Yes, we've read in another operand. */
3553 unsigned int operand_ok
;
3554 this_operand
= i
.operands
++;
3555 i
.types
[this_operand
].bitfield
.unspecified
= 1;
3556 if (i
.operands
> MAX_OPERANDS
)
3558 as_bad (_("spurious operands; (%d operands/instruction max)"),
3562 /* Now parse operand adding info to 'i' as we go along. */
3563 END_STRING_AND_SAVE (l
);
3567 i386_intel_operand (token_start
,
3568 intel_float_operand (mnemonic
));
3570 operand_ok
= i386_att_operand (token_start
);
3572 RESTORE_END_STRING (l
);
3578 if (expecting_operand
)
3580 expecting_operand_after_comma
:
3581 as_bad (_("expecting operand after ','; got nothing"));
3586 as_bad (_("expecting operand before ','; got nothing"));
3591 /* Now *l must be either ',' or END_OF_INSN. */
3594 if (*++l
== END_OF_INSN
)
3596 /* Just skip it, if it's \n complain. */
3597 goto expecting_operand_after_comma
;
3599 expecting_operand
= 1;
3606 swap_2_operands (int xchg1
, int xchg2
)
3608 union i386_op temp_op
;
3609 i386_operand_type temp_type
;
3610 enum bfd_reloc_code_real temp_reloc
;
3612 temp_type
= i
.types
[xchg2
];
3613 i
.types
[xchg2
] = i
.types
[xchg1
];
3614 i
.types
[xchg1
] = temp_type
;
3615 temp_op
= i
.op
[xchg2
];
3616 i
.op
[xchg2
] = i
.op
[xchg1
];
3617 i
.op
[xchg1
] = temp_op
;
3618 temp_reloc
= i
.reloc
[xchg2
];
3619 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
3620 i
.reloc
[xchg1
] = temp_reloc
;
3624 swap_operands (void)
3630 swap_2_operands (1, i
.operands
- 2);
3633 swap_2_operands (0, i
.operands
- 1);
3639 if (i
.mem_operands
== 2)
3641 const seg_entry
*temp_seg
;
3642 temp_seg
= i
.seg
[0];
3643 i
.seg
[0] = i
.seg
[1];
3644 i
.seg
[1] = temp_seg
;
3648 /* Try to ensure constant immediates are represented in the smallest
3653 char guess_suffix
= 0;
3657 guess_suffix
= i
.suffix
;
3658 else if (i
.reg_operands
)
3660 /* Figure out a suffix from the last register operand specified.
3661 We can't do this properly yet, ie. excluding InOutPortReg,
3662 but the following works for instructions with immediates.
3663 In any case, we can't set i.suffix yet. */
3664 for (op
= i
.operands
; --op
>= 0;)
3665 if (i
.types
[op
].bitfield
.reg8
)
3667 guess_suffix
= BYTE_MNEM_SUFFIX
;
3670 else if (i
.types
[op
].bitfield
.reg16
)
3672 guess_suffix
= WORD_MNEM_SUFFIX
;
3675 else if (i
.types
[op
].bitfield
.reg32
)
3677 guess_suffix
= LONG_MNEM_SUFFIX
;
3680 else if (i
.types
[op
].bitfield
.reg64
)
3682 guess_suffix
= QWORD_MNEM_SUFFIX
;
3686 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
3687 guess_suffix
= WORD_MNEM_SUFFIX
;
3689 for (op
= i
.operands
; --op
>= 0;)
3690 if (operand_type_check (i
.types
[op
], imm
))
3692 switch (i
.op
[op
].imms
->X_op
)
3695 /* If a suffix is given, this operand may be shortened. */
3696 switch (guess_suffix
)
3698 case LONG_MNEM_SUFFIX
:
3699 i
.types
[op
].bitfield
.imm32
= 1;
3700 i
.types
[op
].bitfield
.imm64
= 1;
3702 case WORD_MNEM_SUFFIX
:
3703 i
.types
[op
].bitfield
.imm16
= 1;
3704 i
.types
[op
].bitfield
.imm32
= 1;
3705 i
.types
[op
].bitfield
.imm32s
= 1;
3706 i
.types
[op
].bitfield
.imm64
= 1;
3708 case BYTE_MNEM_SUFFIX
:
3709 i
.types
[op
].bitfield
.imm8
= 1;
3710 i
.types
[op
].bitfield
.imm8s
= 1;
3711 i
.types
[op
].bitfield
.imm16
= 1;
3712 i
.types
[op
].bitfield
.imm32
= 1;
3713 i
.types
[op
].bitfield
.imm32s
= 1;
3714 i
.types
[op
].bitfield
.imm64
= 1;
3718 /* If this operand is at most 16 bits, convert it
3719 to a signed 16 bit number before trying to see
3720 whether it will fit in an even smaller size.
3721 This allows a 16-bit operand such as $0xffe0 to
3722 be recognised as within Imm8S range. */
3723 if ((i
.types
[op
].bitfield
.imm16
)
3724 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
3726 i
.op
[op
].imms
->X_add_number
=
3727 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
3729 if ((i
.types
[op
].bitfield
.imm32
)
3730 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
3733 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
3734 ^ ((offsetT
) 1 << 31))
3735 - ((offsetT
) 1 << 31));
3738 = operand_type_or (i
.types
[op
],
3739 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
3741 /* We must avoid matching of Imm32 templates when 64bit
3742 only immediate is available. */
3743 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
3744 i
.types
[op
].bitfield
.imm32
= 0;
3751 /* Symbols and expressions. */
3753 /* Convert symbolic operand to proper sizes for matching, but don't
3754 prevent matching a set of insns that only supports sizes other
3755 than those matching the insn suffix. */
3757 i386_operand_type mask
, allowed
;
3758 const insn_template
*t
;
3760 operand_type_set (&mask
, 0);
3761 operand_type_set (&allowed
, 0);
3763 for (t
= current_templates
->start
;
3764 t
< current_templates
->end
;
3766 allowed
= operand_type_or (allowed
,
3767 t
->operand_types
[op
]);
3768 switch (guess_suffix
)
3770 case QWORD_MNEM_SUFFIX
:
3771 mask
.bitfield
.imm64
= 1;
3772 mask
.bitfield
.imm32s
= 1;
3774 case LONG_MNEM_SUFFIX
:
3775 mask
.bitfield
.imm32
= 1;
3777 case WORD_MNEM_SUFFIX
:
3778 mask
.bitfield
.imm16
= 1;
3780 case BYTE_MNEM_SUFFIX
:
3781 mask
.bitfield
.imm8
= 1;
3786 allowed
= operand_type_and (mask
, allowed
);
3787 if (!operand_type_all_zero (&allowed
))
3788 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
3795 /* Try to use the smallest displacement type too. */
3797 optimize_disp (void)
3801 for (op
= i
.operands
; --op
>= 0;)
3802 if (operand_type_check (i
.types
[op
], disp
))
3804 if (i
.op
[op
].disps
->X_op
== O_constant
)
3806 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
3808 if (i
.types
[op
].bitfield
.disp16
3809 && (op_disp
& ~(offsetT
) 0xffff) == 0)
3811 /* If this operand is at most 16 bits, convert
3812 to a signed 16 bit number and don't use 64bit
3814 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
3815 i
.types
[op
].bitfield
.disp64
= 0;
3817 if (i
.types
[op
].bitfield
.disp32
3818 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
3820 /* If this operand is at most 32 bits, convert
3821 to a signed 32 bit number and don't use 64bit
3823 op_disp
&= (((offsetT
) 2 << 31) - 1);
3824 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
3825 i
.types
[op
].bitfield
.disp64
= 0;
3827 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
3829 i
.types
[op
].bitfield
.disp8
= 0;
3830 i
.types
[op
].bitfield
.disp16
= 0;
3831 i
.types
[op
].bitfield
.disp32
= 0;
3832 i
.types
[op
].bitfield
.disp32s
= 0;
3833 i
.types
[op
].bitfield
.disp64
= 0;
3837 else if (flag_code
== CODE_64BIT
)
3839 if (fits_in_signed_long (op_disp
))
3841 i
.types
[op
].bitfield
.disp64
= 0;
3842 i
.types
[op
].bitfield
.disp32s
= 1;
3844 if (i
.prefix
[ADDR_PREFIX
]
3845 && fits_in_unsigned_long (op_disp
))
3846 i
.types
[op
].bitfield
.disp32
= 1;
3848 if ((i
.types
[op
].bitfield
.disp32
3849 || i
.types
[op
].bitfield
.disp32s
3850 || i
.types
[op
].bitfield
.disp16
)
3851 && fits_in_signed_byte (op_disp
))
3852 i
.types
[op
].bitfield
.disp8
= 1;
3854 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
3855 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
3857 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
3858 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
3859 i
.types
[op
].bitfield
.disp8
= 0;
3860 i
.types
[op
].bitfield
.disp16
= 0;
3861 i
.types
[op
].bitfield
.disp32
= 0;
3862 i
.types
[op
].bitfield
.disp32s
= 0;
3863 i
.types
[op
].bitfield
.disp64
= 0;
3866 /* We only support 64bit displacement on constants. */
3867 i
.types
[op
].bitfield
.disp64
= 0;
3871 /* Check if operands are valid for the instruction. */
3874 check_VecOperands (const insn_template
*t
)
3876 /* Without VSIB byte, we can't have a vector register for index. */
3877 if (!t
->opcode_modifier
.vecsib
3879 && (i
.index_reg
->reg_type
.bitfield
.regxmm
3880 || i
.index_reg
->reg_type
.bitfield
.regymm
))
3882 i
.error
= unsupported_vector_index_register
;
3886 /* For VSIB byte, we need a vector register for index and no PC
3887 relative addressing is allowed. */
3888 if (t
->opcode_modifier
.vecsib
3890 || !((t
->opcode_modifier
.vecsib
== VecSIB128
3891 && i
.index_reg
->reg_type
.bitfield
.regxmm
)
3892 || (t
->opcode_modifier
.vecsib
== VecSIB256
3893 && i
.index_reg
->reg_type
.bitfield
.regymm
))
3894 || (i
.base_reg
&& i
.base_reg
->reg_num
== RegRip
)))
3896 i
.error
= invalid_vsib_address
;
3903 /* Check if operands are valid for the instruction. Update VEX
3907 VEX_check_operands (const insn_template
*t
)
3909 if (!t
->opcode_modifier
.vex
)
3912 /* Only check VEX_Imm4, which must be the first operand. */
3913 if (t
->operand_types
[0].bitfield
.vec_imm4
)
3915 if (i
.op
[0].imms
->X_op
!= O_constant
3916 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
3922 /* Turn off Imm8 so that update_imm won't complain. */
3923 i
.types
[0] = vec_imm4
;
3929 static const insn_template
*
3930 match_template (void)
3932 /* Points to template once we've found it. */
3933 const insn_template
*t
;
3934 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
3935 i386_operand_type overlap4
;
3936 unsigned int found_reverse_match
;
3937 i386_opcode_modifier suffix_check
;
3938 i386_operand_type operand_types
[MAX_OPERANDS
];
3939 int addr_prefix_disp
;
3941 unsigned int found_cpu_match
;
3942 unsigned int check_register
;
3944 #if MAX_OPERANDS != 5
3945 # error "MAX_OPERANDS must be 5."
3948 found_reverse_match
= 0;
3949 addr_prefix_disp
= -1;
3951 memset (&suffix_check
, 0, sizeof (suffix_check
));
3952 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
3953 suffix_check
.no_bsuf
= 1;
3954 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
3955 suffix_check
.no_wsuf
= 1;
3956 else if (i
.suffix
== SHORT_MNEM_SUFFIX
)
3957 suffix_check
.no_ssuf
= 1;
3958 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
3959 suffix_check
.no_lsuf
= 1;
3960 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
3961 suffix_check
.no_qsuf
= 1;
3962 else if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
3963 suffix_check
.no_ldsuf
= 1;
3965 /* Must have right number of operands. */
3966 i
.error
= number_of_operands_mismatch
;
3968 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
3970 addr_prefix_disp
= -1;
3972 if (i
.operands
!= t
->operands
)
3975 /* Check processor support. */
3976 i
.error
= unsupported
;
3977 found_cpu_match
= (cpu_flags_match (t
)
3978 == CPU_FLAGS_PERFECT_MATCH
);
3979 if (!found_cpu_match
)
3982 /* Check old gcc support. */
3983 i
.error
= old_gcc_only
;
3984 if (!old_gcc
&& t
->opcode_modifier
.oldgcc
)
3987 /* Check AT&T mnemonic. */
3988 i
.error
= unsupported_with_intel_mnemonic
;
3989 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
3992 /* Check AT&T/Intel syntax. */
3993 i
.error
= unsupported_syntax
;
3994 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
3995 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
3998 /* Check the suffix, except for some instructions in intel mode. */
3999 i
.error
= invalid_instruction_suffix
;
4000 if ((!intel_syntax
|| !t
->opcode_modifier
.ignoresize
)
4001 && ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
4002 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
4003 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
4004 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
4005 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
4006 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
)))
4009 if (!operand_size_match (t
))
4012 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4013 operand_types
[j
] = t
->operand_types
[j
];
4015 /* In general, don't allow 64-bit operands in 32-bit mode. */
4016 if (i
.suffix
== QWORD_MNEM_SUFFIX
4017 && flag_code
!= CODE_64BIT
4019 ? (!t
->opcode_modifier
.ignoresize
4020 && !intel_float_operand (t
->name
))
4021 : intel_float_operand (t
->name
) != 2)
4022 && ((!operand_types
[0].bitfield
.regmmx
4023 && !operand_types
[0].bitfield
.regxmm
4024 && !operand_types
[0].bitfield
.regymm
)
4025 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
4026 && !!operand_types
[t
->operands
> 1].bitfield
.regxmm
4027 && !!operand_types
[t
->operands
> 1].bitfield
.regymm
))
4028 && (t
->base_opcode
!= 0x0fc7
4029 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
4032 /* In general, don't allow 32-bit operands on pre-386. */
4033 else if (i
.suffix
== LONG_MNEM_SUFFIX
4034 && !cpu_arch_flags
.bitfield
.cpui386
4036 ? (!t
->opcode_modifier
.ignoresize
4037 && !intel_float_operand (t
->name
))
4038 : intel_float_operand (t
->name
) != 2)
4039 && ((!operand_types
[0].bitfield
.regmmx
4040 && !operand_types
[0].bitfield
.regxmm
)
4041 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
4042 && !!operand_types
[t
->operands
> 1].bitfield
.regxmm
)))
4045 /* Do not verify operands when there are none. */
4049 /* We've found a match; break out of loop. */
4053 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4054 into Disp32/Disp16/Disp32 operand. */
4055 if (i
.prefix
[ADDR_PREFIX
] != 0)
4057 /* There should be only one Disp operand. */
4061 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4063 if (operand_types
[j
].bitfield
.disp16
)
4065 addr_prefix_disp
= j
;
4066 operand_types
[j
].bitfield
.disp32
= 1;
4067 operand_types
[j
].bitfield
.disp16
= 0;
4073 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4075 if (operand_types
[j
].bitfield
.disp32
)
4077 addr_prefix_disp
= j
;
4078 operand_types
[j
].bitfield
.disp32
= 0;
4079 operand_types
[j
].bitfield
.disp16
= 1;
4085 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4087 if (operand_types
[j
].bitfield
.disp64
)
4089 addr_prefix_disp
= j
;
4090 operand_types
[j
].bitfield
.disp64
= 0;
4091 operand_types
[j
].bitfield
.disp32
= 1;
4099 /* We check register size if needed. */
4100 check_register
= t
->opcode_modifier
.checkregsize
;
4101 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
4102 switch (t
->operands
)
4105 if (!operand_type_match (overlap0
, i
.types
[0]))
4109 /* xchg %eax, %eax is a special case. It is an aliase for nop
4110 only in 32bit mode and we can use opcode 0x90. In 64bit
4111 mode, we can't use 0x90 for xchg %eax, %eax since it should
4112 zero-extend %eax to %rax. */
4113 if (flag_code
== CODE_64BIT
4114 && t
->base_opcode
== 0x90
4115 && operand_type_equal (&i
.types
[0], &acc32
)
4116 && operand_type_equal (&i
.types
[1], &acc32
))
4120 /* If we swap operand in encoding, we either match
4121 the next one or reverse direction of operands. */
4122 if (t
->opcode_modifier
.s
)
4124 else if (t
->opcode_modifier
.d
)
4129 /* If we swap operand in encoding, we match the next one. */
4130 if (i
.swap_operand
&& t
->opcode_modifier
.s
)
4134 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
4135 if (!operand_type_match (overlap0
, i
.types
[0])
4136 || !operand_type_match (overlap1
, i
.types
[1])
4138 && !operand_type_register_match (overlap0
, i
.types
[0],
4140 overlap1
, i
.types
[1],
4143 /* Check if other direction is valid ... */
4144 if (!t
->opcode_modifier
.d
&& !t
->opcode_modifier
.floatd
)
4148 /* Try reversing direction of operands. */
4149 overlap0
= operand_type_and (i
.types
[0], operand_types
[1]);
4150 overlap1
= operand_type_and (i
.types
[1], operand_types
[0]);
4151 if (!operand_type_match (overlap0
, i
.types
[0])
4152 || !operand_type_match (overlap1
, i
.types
[1])
4154 && !operand_type_register_match (overlap0
,
4161 /* Does not match either direction. */
4164 /* found_reverse_match holds which of D or FloatDR
4166 if (t
->opcode_modifier
.d
)
4167 found_reverse_match
= Opcode_D
;
4168 else if (t
->opcode_modifier
.floatd
)
4169 found_reverse_match
= Opcode_FloatD
;
4171 found_reverse_match
= 0;
4172 if (t
->opcode_modifier
.floatr
)
4173 found_reverse_match
|= Opcode_FloatR
;
4177 /* Found a forward 2 operand match here. */
4178 switch (t
->operands
)
4181 overlap4
= operand_type_and (i
.types
[4],
4184 overlap3
= operand_type_and (i
.types
[3],
4187 overlap2
= operand_type_and (i
.types
[2],
4192 switch (t
->operands
)
4195 if (!operand_type_match (overlap4
, i
.types
[4])
4196 || !operand_type_register_match (overlap3
,
4204 if (!operand_type_match (overlap3
, i
.types
[3])
4206 && !operand_type_register_match (overlap2
,
4214 /* Here we make use of the fact that there are no
4215 reverse match 3 operand instructions, and all 3
4216 operand instructions only need to be checked for
4217 register consistency between operands 2 and 3. */
4218 if (!operand_type_match (overlap2
, i
.types
[2])
4220 && !operand_type_register_match (overlap1
,
4230 /* Found either forward/reverse 2, 3 or 4 operand match here:
4231 slip through to break. */
4233 if (!found_cpu_match
)
4235 found_reverse_match
= 0;
4239 /* Check if vector operands are valid. */
4240 if (check_VecOperands (t
))
4243 /* Check if VEX operands are valid. */
4244 if (VEX_check_operands (t
))
4247 /* We've found a match; break out of loop. */
4251 if (t
== current_templates
->end
)
4253 /* We found no match. */
4254 const char *err_msg
;
4259 case operand_size_mismatch
:
4260 err_msg
= _("operand size mismatch");
4262 case operand_type_mismatch
:
4263 err_msg
= _("operand type mismatch");
4265 case register_type_mismatch
:
4266 err_msg
= _("register type mismatch");
4268 case number_of_operands_mismatch
:
4269 err_msg
= _("number of operands mismatch");
4271 case invalid_instruction_suffix
:
4272 err_msg
= _("invalid instruction suffix");
4275 err_msg
= _("Imm4 isn't the first operand");
4278 err_msg
= _("only supported with old gcc");
4280 case unsupported_with_intel_mnemonic
:
4281 err_msg
= _("unsupported with Intel mnemonic");
4283 case unsupported_syntax
:
4284 err_msg
= _("unsupported syntax");
4287 err_msg
= _("unsupported");
4289 case invalid_vsib_address
:
4290 err_msg
= _("invalid VSIB address");
4292 case unsupported_vector_index_register
:
4293 err_msg
= _("unsupported vector index register");
4296 as_bad (_("%s for `%s'"), err_msg
,
4297 current_templates
->start
->name
);
4301 if (!quiet_warnings
)
4304 && (i
.types
[0].bitfield
.jumpabsolute
4305 != operand_types
[0].bitfield
.jumpabsolute
))
4307 as_warn (_("indirect %s without `*'"), t
->name
);
4310 if (t
->opcode_modifier
.isprefix
4311 && t
->opcode_modifier
.ignoresize
)
4313 /* Warn them that a data or address size prefix doesn't
4314 affect assembly of the next line of code. */
4315 as_warn (_("stand-alone `%s' prefix"), t
->name
);
4319 /* Copy the template we found. */
4322 if (addr_prefix_disp
!= -1)
4323 i
.tm
.operand_types
[addr_prefix_disp
]
4324 = operand_types
[addr_prefix_disp
];
4326 if (found_reverse_match
)
4328 /* If we found a reverse match we must alter the opcode
4329 direction bit. found_reverse_match holds bits to change
4330 (different for int & float insns). */
4332 i
.tm
.base_opcode
^= found_reverse_match
;
4334 i
.tm
.operand_types
[0] = operand_types
[1];
4335 i
.tm
.operand_types
[1] = operand_types
[0];
4344 int mem_op
= operand_type_check (i
.types
[0], anymem
) ? 0 : 1;
4345 if (i
.tm
.operand_types
[mem_op
].bitfield
.esseg
)
4347 if (i
.seg
[0] != NULL
&& i
.seg
[0] != &es
)
4349 as_bad (_("`%s' operand %d must use `%ses' segment"),
4355 /* There's only ever one segment override allowed per instruction.
4356 This instruction possibly has a legal segment override on the
4357 second operand, so copy the segment to where non-string
4358 instructions store it, allowing common code. */
4359 i
.seg
[0] = i
.seg
[1];
4361 else if (i
.tm
.operand_types
[mem_op
+ 1].bitfield
.esseg
)
4363 if (i
.seg
[1] != NULL
&& i
.seg
[1] != &es
)
4365 as_bad (_("`%s' operand %d must use `%ses' segment"),
4376 process_suffix (void)
4378 /* If matched instruction specifies an explicit instruction mnemonic
4380 if (i
.tm
.opcode_modifier
.size16
)
4381 i
.suffix
= WORD_MNEM_SUFFIX
;
4382 else if (i
.tm
.opcode_modifier
.size32
)
4383 i
.suffix
= LONG_MNEM_SUFFIX
;
4384 else if (i
.tm
.opcode_modifier
.size64
)
4385 i
.suffix
= QWORD_MNEM_SUFFIX
;
4386 else if (i
.reg_operands
)
4388 /* If there's no instruction mnemonic suffix we try to invent one
4389 based on register operands. */
4392 /* We take i.suffix from the last register operand specified,
4393 Destination register type is more significant than source
4394 register type. crc32 in SSE4.2 prefers source register
4396 if (i
.tm
.base_opcode
== 0xf20f38f1)
4398 if (i
.types
[0].bitfield
.reg16
)
4399 i
.suffix
= WORD_MNEM_SUFFIX
;
4400 else if (i
.types
[0].bitfield
.reg32
)
4401 i
.suffix
= LONG_MNEM_SUFFIX
;
4402 else if (i
.types
[0].bitfield
.reg64
)
4403 i
.suffix
= QWORD_MNEM_SUFFIX
;
4405 else if (i
.tm
.base_opcode
== 0xf20f38f0)
4407 if (i
.types
[0].bitfield
.reg8
)
4408 i
.suffix
= BYTE_MNEM_SUFFIX
;
4415 if (i
.tm
.base_opcode
== 0xf20f38f1
4416 || i
.tm
.base_opcode
== 0xf20f38f0)
4418 /* We have to know the operand size for crc32. */
4419 as_bad (_("ambiguous memory operand size for `%s`"),
4424 for (op
= i
.operands
; --op
>= 0;)
4425 if (!i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
4427 if (i
.types
[op
].bitfield
.reg8
)
4429 i
.suffix
= BYTE_MNEM_SUFFIX
;
4432 else if (i
.types
[op
].bitfield
.reg16
)
4434 i
.suffix
= WORD_MNEM_SUFFIX
;
4437 else if (i
.types
[op
].bitfield
.reg32
)
4439 i
.suffix
= LONG_MNEM_SUFFIX
;
4442 else if (i
.types
[op
].bitfield
.reg64
)
4444 i
.suffix
= QWORD_MNEM_SUFFIX
;
4450 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
4453 && i
.tm
.opcode_modifier
.ignoresize
4454 && i
.tm
.opcode_modifier
.no_bsuf
)
4456 else if (!check_byte_reg ())
4459 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
4462 && i
.tm
.opcode_modifier
.ignoresize
4463 && i
.tm
.opcode_modifier
.no_lsuf
)
4465 else if (!check_long_reg ())
4468 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
4471 && i
.tm
.opcode_modifier
.ignoresize
4472 && i
.tm
.opcode_modifier
.no_qsuf
)
4474 else if (!check_qword_reg ())
4477 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
4480 && i
.tm
.opcode_modifier
.ignoresize
4481 && i
.tm
.opcode_modifier
.no_wsuf
)
4483 else if (!check_word_reg ())
4486 else if (i
.suffix
== XMMWORD_MNEM_SUFFIX
4487 || i
.suffix
== YMMWORD_MNEM_SUFFIX
)
4489 /* Skip if the instruction has x/y suffix. match_template
4490 should check if it is a valid suffix. */
4492 else if (intel_syntax
&& i
.tm
.opcode_modifier
.ignoresize
)
4493 /* Do nothing if the instruction is going to ignore the prefix. */
4498 else if (i
.tm
.opcode_modifier
.defaultsize
4500 /* exclude fldenv/frstor/fsave/fstenv */
4501 && i
.tm
.opcode_modifier
.no_ssuf
)
4503 i
.suffix
= stackop_size
;
4505 else if (intel_syntax
4507 && (i
.tm
.operand_types
[0].bitfield
.jumpabsolute
4508 || i
.tm
.opcode_modifier
.jumpbyte
4509 || i
.tm
.opcode_modifier
.jumpintersegment
4510 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
4511 && i
.tm
.extension_opcode
<= 3)))
4516 if (!i
.tm
.opcode_modifier
.no_qsuf
)
4518 i
.suffix
= QWORD_MNEM_SUFFIX
;
4522 if (!i
.tm
.opcode_modifier
.no_lsuf
)
4523 i
.suffix
= LONG_MNEM_SUFFIX
;
4526 if (!i
.tm
.opcode_modifier
.no_wsuf
)
4527 i
.suffix
= WORD_MNEM_SUFFIX
;
4536 if (i
.tm
.opcode_modifier
.w
)
4538 as_bad (_("no instruction mnemonic suffix given and "
4539 "no register operands; can't size instruction"));
4545 unsigned int suffixes
;
4547 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
4548 if (!i
.tm
.opcode_modifier
.no_wsuf
)
4550 if (!i
.tm
.opcode_modifier
.no_lsuf
)
4552 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
4554 if (!i
.tm
.opcode_modifier
.no_ssuf
)
4556 if (!i
.tm
.opcode_modifier
.no_qsuf
)
4559 /* There are more than suffix matches. */
4560 if (i
.tm
.opcode_modifier
.w
4561 || ((suffixes
& (suffixes
- 1))
4562 && !i
.tm
.opcode_modifier
.defaultsize
4563 && !i
.tm
.opcode_modifier
.ignoresize
))
4565 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
4571 /* Change the opcode based on the operand size given by i.suffix;
4572 We don't need to change things for byte insns. */
4575 && i
.suffix
!= BYTE_MNEM_SUFFIX
4576 && i
.suffix
!= XMMWORD_MNEM_SUFFIX
4577 && i
.suffix
!= YMMWORD_MNEM_SUFFIX
)
4579 /* It's not a byte, select word/dword operation. */
4580 if (i
.tm
.opcode_modifier
.w
)
4582 if (i
.tm
.opcode_modifier
.shortform
)
4583 i
.tm
.base_opcode
|= 8;
4585 i
.tm
.base_opcode
|= 1;
4588 /* Now select between word & dword operations via the operand
4589 size prefix, except for instructions that will ignore this
4591 if (i
.tm
.opcode_modifier
.addrprefixop0
)
4593 /* The address size override prefix changes the size of the
4595 if ((flag_code
== CODE_32BIT
4596 && i
.op
->regs
[0].reg_type
.bitfield
.reg16
)
4597 || (flag_code
!= CODE_32BIT
4598 && i
.op
->regs
[0].reg_type
.bitfield
.reg32
))
4599 if (!add_prefix (ADDR_PREFIX_OPCODE
))
4602 else if (i
.suffix
!= QWORD_MNEM_SUFFIX
4603 && i
.suffix
!= LONG_DOUBLE_MNEM_SUFFIX
4604 && !i
.tm
.opcode_modifier
.ignoresize
4605 && !i
.tm
.opcode_modifier
.floatmf
4606 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
4607 || (flag_code
== CODE_64BIT
4608 && i
.tm
.opcode_modifier
.jumpbyte
)))
4610 unsigned int prefix
= DATA_PREFIX_OPCODE
;
4612 if (i
.tm
.opcode_modifier
.jumpbyte
) /* jcxz, loop */
4613 prefix
= ADDR_PREFIX_OPCODE
;
4615 if (!add_prefix (prefix
))
4619 /* Set mode64 for an operand. */
4620 if (i
.suffix
== QWORD_MNEM_SUFFIX
4621 && flag_code
== CODE_64BIT
4622 && !i
.tm
.opcode_modifier
.norex64
)
4624 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4625 need rex64. cmpxchg8b is also a special case. */
4626 if (! (i
.operands
== 2
4627 && i
.tm
.base_opcode
== 0x90
4628 && i
.tm
.extension_opcode
== None
4629 && operand_type_equal (&i
.types
[0], &acc64
)
4630 && operand_type_equal (&i
.types
[1], &acc64
))
4631 && ! (i
.operands
== 1
4632 && i
.tm
.base_opcode
== 0xfc7
4633 && i
.tm
.extension_opcode
== 1
4634 && !operand_type_check (i
.types
[0], reg
)
4635 && operand_type_check (i
.types
[0], anymem
)))
4639 /* Size floating point instruction. */
4640 if (i
.suffix
== LONG_MNEM_SUFFIX
)
4641 if (i
.tm
.opcode_modifier
.floatmf
)
4642 i
.tm
.base_opcode
^= 4;
4649 check_byte_reg (void)
4653 for (op
= i
.operands
; --op
>= 0;)
4655 /* If this is an eight bit register, it's OK. If it's the 16 or
4656 32 bit version of an eight bit register, we will just use the
4657 low portion, and that's OK too. */
4658 if (i
.types
[op
].bitfield
.reg8
)
4661 /* crc32 doesn't generate this warning. */
4662 if (i
.tm
.base_opcode
== 0xf20f38f0)
4665 if ((i
.types
[op
].bitfield
.reg16
4666 || i
.types
[op
].bitfield
.reg32
4667 || i
.types
[op
].bitfield
.reg64
)
4668 && i
.op
[op
].regs
->reg_num
< 4)
4670 /* Prohibit these changes in the 64bit mode, since the
4671 lowering is more complicated. */
4672 if (flag_code
== CODE_64BIT
4673 && !i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
4675 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4676 register_prefix
, i
.op
[op
].regs
->reg_name
,
4680 #if REGISTER_WARNINGS
4682 && !i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
4683 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4685 (i
.op
[op
].regs
+ (i
.types
[op
].bitfield
.reg16
4686 ? REGNAM_AL
- REGNAM_AX
4687 : REGNAM_AL
- REGNAM_EAX
))->reg_name
,
4689 i
.op
[op
].regs
->reg_name
,
4694 /* Any other register is bad. */
4695 if (i
.types
[op
].bitfield
.reg16
4696 || i
.types
[op
].bitfield
.reg32
4697 || i
.types
[op
].bitfield
.reg64
4698 || i
.types
[op
].bitfield
.regmmx
4699 || i
.types
[op
].bitfield
.regxmm
4700 || i
.types
[op
].bitfield
.regymm
4701 || i
.types
[op
].bitfield
.sreg2
4702 || i
.types
[op
].bitfield
.sreg3
4703 || i
.types
[op
].bitfield
.control
4704 || i
.types
[op
].bitfield
.debug
4705 || i
.types
[op
].bitfield
.test
4706 || i
.types
[op
].bitfield
.floatreg
4707 || i
.types
[op
].bitfield
.floatacc
)
4709 as_bad (_("`%s%s' not allowed with `%s%c'"),
4711 i
.op
[op
].regs
->reg_name
,
4721 check_long_reg (void)
4725 for (op
= i
.operands
; --op
>= 0;)
4726 /* Reject eight bit registers, except where the template requires
4727 them. (eg. movzb) */
4728 if (i
.types
[op
].bitfield
.reg8
4729 && (i
.tm
.operand_types
[op
].bitfield
.reg16
4730 || i
.tm
.operand_types
[op
].bitfield
.reg32
4731 || i
.tm
.operand_types
[op
].bitfield
.acc
))
4733 as_bad (_("`%s%s' not allowed with `%s%c'"),
4735 i
.op
[op
].regs
->reg_name
,
4740 /* Warn if the e prefix on a general reg is missing. */
4741 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
4742 && i
.types
[op
].bitfield
.reg16
4743 && (i
.tm
.operand_types
[op
].bitfield
.reg32
4744 || i
.tm
.operand_types
[op
].bitfield
.acc
))
4746 /* Prohibit these changes in the 64bit mode, since the
4747 lowering is more complicated. */
4748 if (flag_code
== CODE_64BIT
)
4750 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4751 register_prefix
, i
.op
[op
].regs
->reg_name
,
4755 #if REGISTER_WARNINGS
4757 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4759 (i
.op
[op
].regs
+ REGNAM_EAX
- REGNAM_AX
)->reg_name
,
4761 i
.op
[op
].regs
->reg_name
,
4765 /* Warn if the r prefix on a general reg is missing. */
4766 else if (i
.types
[op
].bitfield
.reg64
4767 && (i
.tm
.operand_types
[op
].bitfield
.reg32
4768 || i
.tm
.operand_types
[op
].bitfield
.acc
))
4771 && i
.tm
.opcode_modifier
.toqword
4772 && !i
.types
[0].bitfield
.regxmm
)
4774 /* Convert to QWORD. We want REX byte. */
4775 i
.suffix
= QWORD_MNEM_SUFFIX
;
4779 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4780 register_prefix
, i
.op
[op
].regs
->reg_name
,
4789 check_qword_reg (void)
4793 for (op
= i
.operands
; --op
>= 0; )
4794 /* Reject eight bit registers, except where the template requires
4795 them. (eg. movzb) */
4796 if (i
.types
[op
].bitfield
.reg8
4797 && (i
.tm
.operand_types
[op
].bitfield
.reg16
4798 || i
.tm
.operand_types
[op
].bitfield
.reg32
4799 || i
.tm
.operand_types
[op
].bitfield
.acc
))
4801 as_bad (_("`%s%s' not allowed with `%s%c'"),
4803 i
.op
[op
].regs
->reg_name
,
4808 /* Warn if the e prefix on a general reg is missing. */
4809 else if ((i
.types
[op
].bitfield
.reg16
4810 || i
.types
[op
].bitfield
.reg32
)
4811 && (i
.tm
.operand_types
[op
].bitfield
.reg32
4812 || i
.tm
.operand_types
[op
].bitfield
.acc
))
4814 /* Prohibit these changes in the 64bit mode, since the
4815 lowering is more complicated. */
4817 && i
.tm
.opcode_modifier
.todword
4818 && !i
.types
[0].bitfield
.regxmm
)
4820 /* Convert to DWORD. We don't want REX byte. */
4821 i
.suffix
= LONG_MNEM_SUFFIX
;
4825 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4826 register_prefix
, i
.op
[op
].regs
->reg_name
,
4835 check_word_reg (void)
4838 for (op
= i
.operands
; --op
>= 0;)
4839 /* Reject eight bit registers, except where the template requires
4840 them. (eg. movzb) */
4841 if (i
.types
[op
].bitfield
.reg8
4842 && (i
.tm
.operand_types
[op
].bitfield
.reg16
4843 || i
.tm
.operand_types
[op
].bitfield
.reg32
4844 || i
.tm
.operand_types
[op
].bitfield
.acc
))
4846 as_bad (_("`%s%s' not allowed with `%s%c'"),
4848 i
.op
[op
].regs
->reg_name
,
4853 /* Warn if the e prefix on a general reg is present. */
4854 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
4855 && i
.types
[op
].bitfield
.reg32
4856 && (i
.tm
.operand_types
[op
].bitfield
.reg16
4857 || i
.tm
.operand_types
[op
].bitfield
.acc
))
4859 /* Prohibit these changes in the 64bit mode, since the
4860 lowering is more complicated. */
4861 if (flag_code
== CODE_64BIT
)
4863 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4864 register_prefix
, i
.op
[op
].regs
->reg_name
,
4869 #if REGISTER_WARNINGS
4870 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4872 (i
.op
[op
].regs
+ REGNAM_AX
- REGNAM_EAX
)->reg_name
,
4874 i
.op
[op
].regs
->reg_name
,
4882 update_imm (unsigned int j
)
4884 i386_operand_type overlap
= i
.types
[j
];
4885 if ((overlap
.bitfield
.imm8
4886 || overlap
.bitfield
.imm8s
4887 || overlap
.bitfield
.imm16
4888 || overlap
.bitfield
.imm32
4889 || overlap
.bitfield
.imm32s
4890 || overlap
.bitfield
.imm64
)
4891 && !operand_type_equal (&overlap
, &imm8
)
4892 && !operand_type_equal (&overlap
, &imm8s
)
4893 && !operand_type_equal (&overlap
, &imm16
)
4894 && !operand_type_equal (&overlap
, &imm32
)
4895 && !operand_type_equal (&overlap
, &imm32s
)
4896 && !operand_type_equal (&overlap
, &imm64
))
4900 i386_operand_type temp
;
4902 operand_type_set (&temp
, 0);
4903 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
4905 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
4906 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
4908 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
4909 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
4910 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
4912 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
4913 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
4916 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
4919 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
4920 || operand_type_equal (&overlap
, &imm16_32
)
4921 || operand_type_equal (&overlap
, &imm16_32s
))
4923 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
4928 if (!operand_type_equal (&overlap
, &imm8
)
4929 && !operand_type_equal (&overlap
, &imm8s
)
4930 && !operand_type_equal (&overlap
, &imm16
)
4931 && !operand_type_equal (&overlap
, &imm32
)
4932 && !operand_type_equal (&overlap
, &imm32s
)
4933 && !operand_type_equal (&overlap
, &imm64
))
4935 as_bad (_("no instruction mnemonic suffix given; "
4936 "can't determine immediate size"));
4940 i
.types
[j
] = overlap
;
4950 /* Update the first 2 immediate operands. */
4951 n
= i
.operands
> 2 ? 2 : i
.operands
;
4954 for (j
= 0; j
< n
; j
++)
4955 if (update_imm (j
) == 0)
4958 /* The 3rd operand can't be immediate operand. */
4959 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
4966 bad_implicit_operand (int xmm
)
4968 const char *ireg
= xmm
? "xmm0" : "ymm0";
4971 as_bad (_("the last operand of `%s' must be `%s%s'"),
4972 i
.tm
.name
, register_prefix
, ireg
);
4974 as_bad (_("the first operand of `%s' must be `%s%s'"),
4975 i
.tm
.name
, register_prefix
, ireg
);
4980 process_operands (void)
4982 /* Default segment register this instruction will use for memory
4983 accesses. 0 means unknown. This is only for optimizing out
4984 unnecessary segment overrides. */
4985 const seg_entry
*default_seg
= 0;
4987 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
4989 unsigned int dupl
= i
.operands
;
4990 unsigned int dest
= dupl
- 1;
4993 /* The destination must be an xmm register. */
4994 gas_assert (i
.reg_operands
4995 && MAX_OPERANDS
> dupl
4996 && operand_type_equal (&i
.types
[dest
], ®xmm
));
4998 if (i
.tm
.opcode_modifier
.firstxmm0
)
5000 /* The first operand is implicit and must be xmm0. */
5001 gas_assert (operand_type_equal (&i
.types
[0], ®xmm
));
5002 if (i
.op
[0].regs
->reg_num
!= 0)
5003 return bad_implicit_operand (1);
5005 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
5007 /* Keep xmm0 for instructions with VEX prefix and 3
5013 /* We remove the first xmm0 and keep the number of
5014 operands unchanged, which in fact duplicates the
5016 for (j
= 1; j
< i
.operands
; j
++)
5018 i
.op
[j
- 1] = i
.op
[j
];
5019 i
.types
[j
- 1] = i
.types
[j
];
5020 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
5024 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
5026 gas_assert ((MAX_OPERANDS
- 1) > dupl
5027 && (i
.tm
.opcode_modifier
.vexsources
5030 /* Add the implicit xmm0 for instructions with VEX prefix
5032 for (j
= i
.operands
; j
> 0; j
--)
5034 i
.op
[j
] = i
.op
[j
- 1];
5035 i
.types
[j
] = i
.types
[j
- 1];
5036 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
5039 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
5040 i
.types
[0] = regxmm
;
5041 i
.tm
.operand_types
[0] = regxmm
;
5044 i
.reg_operands
+= 2;
5049 i
.op
[dupl
] = i
.op
[dest
];
5050 i
.types
[dupl
] = i
.types
[dest
];
5051 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
5060 i
.op
[dupl
] = i
.op
[dest
];
5061 i
.types
[dupl
] = i
.types
[dest
];
5062 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
5065 if (i
.tm
.opcode_modifier
.immext
)
5068 else if (i
.tm
.opcode_modifier
.firstxmm0
)
5072 /* The first operand is implicit and must be xmm0/ymm0. */
5073 gas_assert (i
.reg_operands
5074 && (operand_type_equal (&i
.types
[0], ®xmm
)
5075 || operand_type_equal (&i
.types
[0], ®ymm
)));
5076 if (i
.op
[0].regs
->reg_num
!= 0)
5077 return bad_implicit_operand (i
.types
[0].bitfield
.regxmm
);
5079 for (j
= 1; j
< i
.operands
; j
++)
5081 i
.op
[j
- 1] = i
.op
[j
];
5082 i
.types
[j
- 1] = i
.types
[j
];
5084 /* We need to adjust fields in i.tm since they are used by
5085 build_modrm_byte. */
5086 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
5093 else if (i
.tm
.opcode_modifier
.regkludge
)
5095 /* The imul $imm, %reg instruction is converted into
5096 imul $imm, %reg, %reg, and the clr %reg instruction
5097 is converted into xor %reg, %reg. */
5099 unsigned int first_reg_op
;
5101 if (operand_type_check (i
.types
[0], reg
))
5105 /* Pretend we saw the extra register operand. */
5106 gas_assert (i
.reg_operands
== 1
5107 && i
.op
[first_reg_op
+ 1].regs
== 0);
5108 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
5109 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
5114 if (i
.tm
.opcode_modifier
.shortform
)
5116 if (i
.types
[0].bitfield
.sreg2
5117 || i
.types
[0].bitfield
.sreg3
)
5119 if (i
.tm
.base_opcode
== POP_SEG_SHORT
5120 && i
.op
[0].regs
->reg_num
== 1)
5122 as_bad (_("you can't `pop %scs'"), register_prefix
);
5125 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
5126 if ((i
.op
[0].regs
->reg_flags
& RegRex
) != 0)
5131 /* The register or float register operand is in operand
5135 if (i
.types
[0].bitfield
.floatreg
5136 || operand_type_check (i
.types
[0], reg
))
5140 /* Register goes in low 3 bits of opcode. */
5141 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
5142 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
5144 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
5146 /* Warn about some common errors, but press on regardless.
5147 The first case can be generated by gcc (<= 2.8.1). */
5148 if (i
.operands
== 2)
5150 /* Reversed arguments on faddp, fsubp, etc. */
5151 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
5152 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
5153 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
5157 /* Extraneous `l' suffix on fp insn. */
5158 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
5159 register_prefix
, i
.op
[0].regs
->reg_name
);
5164 else if (i
.tm
.opcode_modifier
.modrm
)
5166 /* The opcode is completed (modulo i.tm.extension_opcode which
5167 must be put into the modrm byte). Now, we make the modrm and
5168 index base bytes based on all the info we've collected. */
5170 default_seg
= build_modrm_byte ();
5172 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
5176 else if (i
.tm
.opcode_modifier
.isstring
)
5178 /* For the string instructions that allow a segment override
5179 on one of their operands, the default segment is ds. */
5183 if (i
.tm
.base_opcode
== 0x8d /* lea */
5186 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
5188 /* If a segment was explicitly specified, and the specified segment
5189 is not the default, use an opcode prefix to select it. If we
5190 never figured out what the default segment is, then default_seg
5191 will be zero at this point, and the specified segment prefix will
5193 if ((i
.seg
[0]) && (i
.seg
[0] != default_seg
))
5195 if (!add_prefix (i
.seg
[0]->seg_prefix
))
5201 static const seg_entry
*
5202 build_modrm_byte (void)
5204 const seg_entry
*default_seg
= 0;
5205 unsigned int source
, dest
;
5208 /* The first operand of instructions with VEX prefix and 3 sources
5209 must be VEX_Imm4. */
5210 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
5213 unsigned int nds
, reg_slot
;
5216 if (i
.tm
.opcode_modifier
.veximmext
5217 && i
.tm
.opcode_modifier
.immext
)
5219 dest
= i
.operands
- 2;
5220 gas_assert (dest
== 3);
5223 dest
= i
.operands
- 1;
5226 /* There are 2 kinds of instructions:
5227 1. 5 operands: 4 register operands or 3 register operands
5228 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5229 VexW0 or VexW1. The destination must be either XMM or YMM
5231 2. 4 operands: 4 register operands or 3 register operands
5232 plus 1 memory operand, VexXDS, and VexImmExt */
5233 gas_assert ((i
.reg_operands
== 4
5234 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
5235 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
5236 && (i
.tm
.opcode_modifier
.veximmext
5237 || (i
.imm_operands
== 1
5238 && i
.types
[0].bitfield
.vec_imm4
5239 && (i
.tm
.opcode_modifier
.vexw
== VEXW0
5240 || i
.tm
.opcode_modifier
.vexw
== VEXW1
)
5241 && (operand_type_equal (&i
.tm
.operand_types
[dest
], ®xmm
)
5242 || operand_type_equal (&i
.tm
.operand_types
[dest
], ®ymm
)))));
5244 if (i
.imm_operands
== 0)
5246 /* When there is no immediate operand, generate an 8bit
5247 immediate operand to encode the first operand. */
5248 exp
= &im_expressions
[i
.imm_operands
++];
5249 i
.op
[i
.operands
].imms
= exp
;
5250 i
.types
[i
.operands
] = imm8
;
5252 /* If VexW1 is set, the first operand is the source and
5253 the second operand is encoded in the immediate operand. */
5254 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
5265 /* FMA swaps REG and NDS. */
5266 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
5274 gas_assert (operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5276 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5278 exp
->X_op
= O_constant
;
5280 = ((i
.op
[reg_slot
].regs
->reg_num
5281 + ((i
.op
[reg_slot
].regs
->reg_flags
& RegRex
) ? 8 : 0))
5286 unsigned int imm_slot
;
5288 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
5290 /* If VexW0 is set, the third operand is the source and
5291 the second operand is encoded in the immediate
5298 /* VexW1 is set, the second operand is the source and
5299 the third operand is encoded in the immediate
5305 if (i
.tm
.opcode_modifier
.immext
)
5307 /* When ImmExt is set, the immdiate byte is the last
5309 imm_slot
= i
.operands
- 1;
5317 /* Turn on Imm8 so that output_imm will generate it. */
5318 i
.types
[imm_slot
].bitfield
.imm8
= 1;
5321 gas_assert (operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5323 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5325 i
.op
[imm_slot
].imms
->X_add_number
5326 |= ((i
.op
[reg_slot
].regs
->reg_num
5327 + ((i
.op
[reg_slot
].regs
->reg_flags
& RegRex
) ? 8 : 0))
5331 gas_assert (operand_type_equal (&i
.tm
.operand_types
[nds
], ®xmm
)
5332 || operand_type_equal (&i
.tm
.operand_types
[nds
],
5334 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
5339 /* i.reg_operands MUST be the number of real register operands;
5340 implicit registers do not count. If there are 3 register
5341 operands, it must be a instruction with VexNDS. For a
5342 instruction with VexNDD, the destination register is encoded
5343 in VEX prefix. If there are 4 register operands, it must be
5344 a instruction with VEX prefix and 3 sources. */
5345 if (i
.mem_operands
== 0
5346 && ((i
.reg_operands
== 2
5347 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
5348 || (i
.reg_operands
== 3
5349 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
5350 || (i
.reg_operands
== 4 && vex_3_sources
)))
5358 /* When there are 3 operands, one of them may be immediate,
5359 which may be the first or the last operand. Otherwise,
5360 the first operand must be shift count register (cl) or it
5361 is an instruction with VexNDS. */
5362 gas_assert (i
.imm_operands
== 1
5363 || (i
.imm_operands
== 0
5364 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
5365 || i
.types
[0].bitfield
.shiftcount
)));
5366 if (operand_type_check (i
.types
[0], imm
)
5367 || i
.types
[0].bitfield
.shiftcount
)
5373 /* When there are 4 operands, the first two must be 8bit
5374 immediate operands. The source operand will be the 3rd
5377 For instructions with VexNDS, if the first operand
5378 an imm8, the source operand is the 2nd one. If the last
5379 operand is imm8, the source operand is the first one. */
5380 gas_assert ((i
.imm_operands
== 2
5381 && i
.types
[0].bitfield
.imm8
5382 && i
.types
[1].bitfield
.imm8
)
5383 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
5384 && i
.imm_operands
== 1
5385 && (i
.types
[0].bitfield
.imm8
5386 || i
.types
[i
.operands
- 1].bitfield
.imm8
)));
5387 if (i
.imm_operands
== 2)
5391 if (i
.types
[0].bitfield
.imm8
)
5407 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
5409 /* For instructions with VexNDS, the register-only
5410 source operand must be 32/64bit integer, XMM or
5411 YMM register. It is encoded in VEX prefix. We
5412 need to clear RegMem bit before calling
5413 operand_type_equal. */
5415 i386_operand_type op
;
5418 /* Check register-only source operand when two source
5419 operands are swapped. */
5420 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
5421 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
5429 op
= i
.tm
.operand_types
[vvvv
];
5430 op
.bitfield
.regmem
= 0;
5431 if ((dest
+ 1) >= i
.operands
5432 || (op
.bitfield
.reg32
!= 1
5433 && !op
.bitfield
.reg64
!= 1
5434 && !operand_type_equal (&op
, ®xmm
)
5435 && !operand_type_equal (&op
, ®ymm
)))
5437 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
5443 /* One of the register operands will be encoded in the i.tm.reg
5444 field, the other in the combined i.tm.mode and i.tm.regmem
5445 fields. If no form of this instruction supports a memory
5446 destination operand, then we assume the source operand may
5447 sometimes be a memory operand and so we need to store the
5448 destination in the i.rm.reg field. */
5449 if (!i
.tm
.operand_types
[dest
].bitfield
.regmem
5450 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
5452 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
5453 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
5454 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
5456 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
5461 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
5462 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
5463 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
5465 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
5468 if (flag_code
!= CODE_64BIT
&& (i
.rex
& (REX_R
| REX_B
)))
5470 if (!i
.types
[0].bitfield
.control
5471 && !i
.types
[1].bitfield
.control
)
5473 i
.rex
&= ~(REX_R
| REX_B
);
5474 add_prefix (LOCK_PREFIX_OPCODE
);
5478 { /* If it's not 2 reg operands... */
5483 unsigned int fake_zero_displacement
= 0;
5486 for (op
= 0; op
< i
.operands
; op
++)
5487 if (operand_type_check (i
.types
[op
], anymem
))
5489 gas_assert (op
< i
.operands
);
5491 if (i
.tm
.opcode_modifier
.vecsib
)
5493 if (i
.index_reg
->reg_num
== RegEiz
5494 || i
.index_reg
->reg_num
== RegRiz
)
5497 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
5500 i
.sib
.base
= NO_BASE_REGISTER
;
5501 i
.sib
.scale
= i
.log2_scale_factor
;
5502 i
.types
[op
].bitfield
.disp8
= 0;
5503 i
.types
[op
].bitfield
.disp16
= 0;
5504 i
.types
[op
].bitfield
.disp64
= 0;
5505 if (flag_code
!= CODE_64BIT
)
5507 /* Must be 32 bit */
5508 i
.types
[op
].bitfield
.disp32
= 1;
5509 i
.types
[op
].bitfield
.disp32s
= 0;
5513 i
.types
[op
].bitfield
.disp32
= 0;
5514 i
.types
[op
].bitfield
.disp32s
= 1;
5517 i
.sib
.index
= i
.index_reg
->reg_num
;
5518 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
5524 if (i
.base_reg
== 0)
5527 if (!i
.disp_operands
)
5529 fake_zero_displacement
= 1;
5530 /* Instructions with VSIB byte need 32bit displacement
5531 if there is no base register. */
5532 if (i
.tm
.opcode_modifier
.vecsib
)
5533 i
.types
[op
].bitfield
.disp32
= 1;
5535 if (i
.index_reg
== 0)
5537 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
5538 /* Operand is just <disp> */
5539 if (flag_code
== CODE_64BIT
)
5541 /* 64bit mode overwrites the 32bit absolute
5542 addressing by RIP relative addressing and
5543 absolute addressing is encoded by one of the
5544 redundant SIB forms. */
5545 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
5546 i
.sib
.base
= NO_BASE_REGISTER
;
5547 i
.sib
.index
= NO_INDEX_REGISTER
;
5548 i
.types
[op
] = ((i
.prefix
[ADDR_PREFIX
] == 0)
5549 ? disp32s
: disp32
);
5551 else if ((flag_code
== CODE_16BIT
)
5552 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
5554 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
5555 i
.types
[op
] = disp16
;
5559 i
.rm
.regmem
= NO_BASE_REGISTER
;
5560 i
.types
[op
] = disp32
;
5563 else if (!i
.tm
.opcode_modifier
.vecsib
)
5565 /* !i.base_reg && i.index_reg */
5566 if (i
.index_reg
->reg_num
== RegEiz
5567 || i
.index_reg
->reg_num
== RegRiz
)
5568 i
.sib
.index
= NO_INDEX_REGISTER
;
5570 i
.sib
.index
= i
.index_reg
->reg_num
;
5571 i
.sib
.base
= NO_BASE_REGISTER
;
5572 i
.sib
.scale
= i
.log2_scale_factor
;
5573 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
5574 i
.types
[op
].bitfield
.disp8
= 0;
5575 i
.types
[op
].bitfield
.disp16
= 0;
5576 i
.types
[op
].bitfield
.disp64
= 0;
5577 if (flag_code
!= CODE_64BIT
)
5579 /* Must be 32 bit */
5580 i
.types
[op
].bitfield
.disp32
= 1;
5581 i
.types
[op
].bitfield
.disp32s
= 0;
5585 i
.types
[op
].bitfield
.disp32
= 0;
5586 i
.types
[op
].bitfield
.disp32s
= 1;
5588 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
5592 /* RIP addressing for 64bit mode. */
5593 else if (i
.base_reg
->reg_num
== RegRip
||
5594 i
.base_reg
->reg_num
== RegEip
)
5596 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
5597 i
.rm
.regmem
= NO_BASE_REGISTER
;
5598 i
.types
[op
].bitfield
.disp8
= 0;
5599 i
.types
[op
].bitfield
.disp16
= 0;
5600 i
.types
[op
].bitfield
.disp32
= 0;
5601 i
.types
[op
].bitfield
.disp32s
= 1;
5602 i
.types
[op
].bitfield
.disp64
= 0;
5603 i
.flags
[op
] |= Operand_PCrel
;
5604 if (! i
.disp_operands
)
5605 fake_zero_displacement
= 1;
5607 else if (i
.base_reg
->reg_type
.bitfield
.reg16
)
5609 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
5610 switch (i
.base_reg
->reg_num
)
5613 if (i
.index_reg
== 0)
5615 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5616 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
5620 if (i
.index_reg
== 0)
5623 if (operand_type_check (i
.types
[op
], disp
) == 0)
5625 /* fake (%bp) into 0(%bp) */
5626 i
.types
[op
].bitfield
.disp8
= 1;
5627 fake_zero_displacement
= 1;
5630 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5631 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
5633 default: /* (%si) -> 4 or (%di) -> 5 */
5634 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
5636 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
5638 else /* i.base_reg and 32/64 bit mode */
5640 if (flag_code
== CODE_64BIT
5641 && operand_type_check (i
.types
[op
], disp
))
5643 i386_operand_type temp
;
5644 operand_type_set (&temp
, 0);
5645 temp
.bitfield
.disp8
= i
.types
[op
].bitfield
.disp8
;
5647 if (i
.prefix
[ADDR_PREFIX
] == 0)
5648 i
.types
[op
].bitfield
.disp32s
= 1;
5650 i
.types
[op
].bitfield
.disp32
= 1;
5653 if (!i
.tm
.opcode_modifier
.vecsib
)
5654 i
.rm
.regmem
= i
.base_reg
->reg_num
;
5655 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
5657 i
.sib
.base
= i
.base_reg
->reg_num
;
5658 /* x86-64 ignores REX prefix bit here to avoid decoder
5660 if ((i
.base_reg
->reg_num
& 7) == EBP_REG_NUM
)
5663 if (i
.disp_operands
== 0)
5665 fake_zero_displacement
= 1;
5666 i
.types
[op
].bitfield
.disp8
= 1;
5669 else if (i
.base_reg
->reg_num
== ESP_REG_NUM
)
5673 i
.sib
.scale
= i
.log2_scale_factor
;
5674 if (i
.index_reg
== 0)
5676 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
5677 /* <disp>(%esp) becomes two byte modrm with no index
5678 register. We've already stored the code for esp
5679 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5680 Any base register besides %esp will not use the
5681 extra modrm byte. */
5682 i
.sib
.index
= NO_INDEX_REGISTER
;
5684 else if (!i
.tm
.opcode_modifier
.vecsib
)
5686 if (i
.index_reg
->reg_num
== RegEiz
5687 || i
.index_reg
->reg_num
== RegRiz
)
5688 i
.sib
.index
= NO_INDEX_REGISTER
;
5690 i
.sib
.index
= i
.index_reg
->reg_num
;
5691 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
5692 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
5697 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5698 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
5701 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
5704 if (fake_zero_displacement
)
5706 /* Fakes a zero displacement assuming that i.types[op]
5707 holds the correct displacement size. */
5710 gas_assert (i
.op
[op
].disps
== 0);
5711 exp
= &disp_expressions
[i
.disp_operands
++];
5712 i
.op
[op
].disps
= exp
;
5713 exp
->X_op
= O_constant
;
5714 exp
->X_add_number
= 0;
5715 exp
->X_add_symbol
= (symbolS
*) 0;
5716 exp
->X_op_symbol
= (symbolS
*) 0;
5724 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
5726 if (operand_type_check (i
.types
[0], imm
))
5727 i
.vex
.register_specifier
= NULL
;
5730 /* VEX.vvvv encodes one of the sources when the first
5731 operand is not an immediate. */
5732 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
5733 i
.vex
.register_specifier
= i
.op
[0].regs
;
5735 i
.vex
.register_specifier
= i
.op
[1].regs
;
5738 /* Destination is a XMM register encoded in the ModRM.reg
5740 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
5741 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
5744 /* ModRM.rm and VEX.B encodes the other source. */
5745 if (!i
.mem_operands
)
5749 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
5750 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
5752 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
5754 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
5758 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
5760 i
.vex
.register_specifier
= i
.op
[2].regs
;
5761 if (!i
.mem_operands
)
5764 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
5765 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
5769 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5770 (if any) based on i.tm.extension_opcode. Again, we must be
5771 careful to make sure that segment/control/debug/test/MMX
5772 registers are coded into the i.rm.reg field. */
5773 else if (i
.reg_operands
)
5776 unsigned int vex_reg
= ~0;
5778 for (op
= 0; op
< i
.operands
; op
++)
5779 if (i
.types
[op
].bitfield
.reg8
5780 || i
.types
[op
].bitfield
.reg16
5781 || i
.types
[op
].bitfield
.reg32
5782 || i
.types
[op
].bitfield
.reg64
5783 || i
.types
[op
].bitfield
.regmmx
5784 || i
.types
[op
].bitfield
.regxmm
5785 || i
.types
[op
].bitfield
.regymm
5786 || i
.types
[op
].bitfield
.sreg2
5787 || i
.types
[op
].bitfield
.sreg3
5788 || i
.types
[op
].bitfield
.control
5789 || i
.types
[op
].bitfield
.debug
5790 || i
.types
[op
].bitfield
.test
)
5795 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
5797 /* For instructions with VexNDS, the register-only
5798 source operand is encoded in VEX prefix. */
5799 gas_assert (mem
!= (unsigned int) ~0);
5804 gas_assert (op
< i
.operands
);
5808 /* Check register-only source operand when two source
5809 operands are swapped. */
5810 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
5811 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
5815 gas_assert (mem
== (vex_reg
+ 1)
5816 && op
< i
.operands
);
5821 gas_assert (vex_reg
< i
.operands
);
5825 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
5827 /* For instructions with VexNDD, the register destination
5828 is encoded in VEX prefix. */
5829 if (i
.mem_operands
== 0)
5831 /* There is no memory operand. */
5832 gas_assert ((op
+ 2) == i
.operands
);
5837 /* There are only 2 operands. */
5838 gas_assert (op
< 2 && i
.operands
== 2);
5843 gas_assert (op
< i
.operands
);
5845 if (vex_reg
!= (unsigned int) ~0)
5847 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
5849 if (type
->bitfield
.reg32
!= 1
5850 && type
->bitfield
.reg64
!= 1
5851 && !operand_type_equal (type
, ®xmm
)
5852 && !operand_type_equal (type
, ®ymm
))
5855 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
5858 /* Don't set OP operand twice. */
5861 /* If there is an extension opcode to put here, the
5862 register number must be put into the regmem field. */
5863 if (i
.tm
.extension_opcode
!= None
)
5865 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
5866 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
5871 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
5872 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
5877 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5878 must set it to 3 to indicate this is a register operand
5879 in the regmem field. */
5880 if (!i
.mem_operands
)
5884 /* Fill in i.rm.reg field with extension opcode (if any). */
5885 if (i
.tm
.extension_opcode
!= None
)
5886 i
.rm
.reg
= i
.tm
.extension_opcode
;
5892 output_branch (void)
5898 relax_substateT subtype
;
5902 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
5903 size
= i
.disp32_encoding
? BIG
: SMALL
;
5906 if (i
.prefix
[DATA_PREFIX
] != 0)
5912 /* Pentium4 branch hints. */
5913 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
5914 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
5919 if (i
.prefix
[REX_PREFIX
] != 0)
5925 if (i
.prefixes
!= 0 && !intel_syntax
)
5926 as_warn (_("skipping prefixes on this instruction"));
5928 /* It's always a symbol; End frag & setup for relax.
5929 Make sure there is enough room in this frag for the largest
5930 instruction we may generate in md_convert_frag. This is 2
5931 bytes for the opcode and room for the prefix and largest
5933 frag_grow (prefix
+ 2 + 4);
5934 /* Prefix and 1 opcode byte go in fr_fix. */
5935 p
= frag_more (prefix
+ 1);
5936 if (i
.prefix
[DATA_PREFIX
] != 0)
5937 *p
++ = DATA_PREFIX_OPCODE
;
5938 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
5939 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
5940 *p
++ = i
.prefix
[SEG_PREFIX
];
5941 if (i
.prefix
[REX_PREFIX
] != 0)
5942 *p
++ = i
.prefix
[REX_PREFIX
];
5943 *p
= i
.tm
.base_opcode
;
5945 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
5946 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
5947 else if (cpu_arch_flags
.bitfield
.cpui386
)
5948 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
5950 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
5953 sym
= i
.op
[0].disps
->X_add_symbol
;
5954 off
= i
.op
[0].disps
->X_add_number
;
5956 if (i
.op
[0].disps
->X_op
!= O_constant
5957 && i
.op
[0].disps
->X_op
!= O_symbol
)
5959 /* Handle complex expressions. */
5960 sym
= make_expr_symbol (i
.op
[0].disps
);
5964 /* 1 possible extra opcode + 4 byte displacement go in var part.
5965 Pass reloc in fr_var. */
5966 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
5976 if (i
.tm
.opcode_modifier
.jumpbyte
)
5978 /* This is a loop or jecxz type instruction. */
5980 if (i
.prefix
[ADDR_PREFIX
] != 0)
5982 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
5985 /* Pentium4 branch hints. */
5986 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
5987 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
5989 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
5998 if (flag_code
== CODE_16BIT
)
6001 if (i
.prefix
[DATA_PREFIX
] != 0)
6003 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
6013 if (i
.prefix
[REX_PREFIX
] != 0)
6015 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
6019 if (i
.prefixes
!= 0 && !intel_syntax
)
6020 as_warn (_("skipping prefixes on this instruction"));
6022 p
= frag_more (1 + size
);
6023 *p
++ = i
.tm
.base_opcode
;
6025 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
6026 i
.op
[0].disps
, 1, reloc (size
, 1, 1, i
.reloc
[0]));
6028 /* All jumps handled here are signed, but don't use a signed limit
6029 check for 32 and 16 bit jumps as we want to allow wrap around at
6030 4G and 64k respectively. */
6032 fixP
->fx_signed
= 1;
6036 output_interseg_jump (void)
6044 if (flag_code
== CODE_16BIT
)
6048 if (i
.prefix
[DATA_PREFIX
] != 0)
6054 if (i
.prefix
[REX_PREFIX
] != 0)
6064 if (i
.prefixes
!= 0 && !intel_syntax
)
6065 as_warn (_("skipping prefixes on this instruction"));
6067 /* 1 opcode; 2 segment; offset */
6068 p
= frag_more (prefix
+ 1 + 2 + size
);
6070 if (i
.prefix
[DATA_PREFIX
] != 0)
6071 *p
++ = DATA_PREFIX_OPCODE
;
6073 if (i
.prefix
[REX_PREFIX
] != 0)
6074 *p
++ = i
.prefix
[REX_PREFIX
];
6076 *p
++ = i
.tm
.base_opcode
;
6077 if (i
.op
[1].imms
->X_op
== O_constant
)
6079 offsetT n
= i
.op
[1].imms
->X_add_number
;
6082 && !fits_in_unsigned_word (n
)
6083 && !fits_in_signed_word (n
))
6085 as_bad (_("16-bit jump out of range"));
6088 md_number_to_chars (p
, n
, size
);
6091 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
6092 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
6093 if (i
.op
[0].imms
->X_op
!= O_constant
)
6094 as_bad (_("can't handle non absolute segment in `%s'"),
6096 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
6102 fragS
*insn_start_frag
;
6103 offsetT insn_start_off
;
6105 /* Tie dwarf2 debug info to the address at the start of the insn.
6106 We can't do this after the insn has been output as the current
6107 frag may have been closed off. eg. by frag_var. */
6108 dwarf2_emit_insn (0);
6110 insn_start_frag
= frag_now
;
6111 insn_start_off
= frag_now_fix ();
6114 if (i
.tm
.opcode_modifier
.jump
)
6116 else if (i
.tm
.opcode_modifier
.jumpbyte
6117 || i
.tm
.opcode_modifier
.jumpdword
)
6119 else if (i
.tm
.opcode_modifier
.jumpintersegment
)
6120 output_interseg_jump ();
6123 /* Output normal instructions here. */
6127 unsigned int prefix
;
6129 /* Since the VEX prefix contains the implicit prefix, we don't
6130 need the explicit prefix. */
6131 if (!i
.tm
.opcode_modifier
.vex
)
6133 switch (i
.tm
.opcode_length
)
6136 if (i
.tm
.base_opcode
& 0xff000000)
6138 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
6143 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
6145 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
6146 if (i
.tm
.cpu_flags
.bitfield
.cpupadlock
)
6149 if (prefix
!= REPE_PREFIX_OPCODE
6150 || (i
.prefix
[REP_PREFIX
]
6151 != REPE_PREFIX_OPCODE
))
6152 add_prefix (prefix
);
6155 add_prefix (prefix
);
6164 /* The prefix bytes. */
6165 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
6167 FRAG_APPEND_1_CHAR (*q
);
6170 if (i
.tm
.opcode_modifier
.vex
)
6172 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
6177 /* REX byte is encoded in VEX prefix. */
6181 FRAG_APPEND_1_CHAR (*q
);
6184 /* There should be no other prefixes for instructions
6189 /* Now the VEX prefix. */
6190 p
= frag_more (i
.vex
.length
);
6191 for (j
= 0; j
< i
.vex
.length
; j
++)
6192 p
[j
] = i
.vex
.bytes
[j
];
6195 /* Now the opcode; be careful about word order here! */
6196 if (i
.tm
.opcode_length
== 1)
6198 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
6202 switch (i
.tm
.opcode_length
)
6206 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
6216 /* Put out high byte first: can't use md_number_to_chars! */
6217 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
6218 *p
= i
.tm
.base_opcode
& 0xff;
6221 /* Now the modrm byte and sib byte (if present). */
6222 if (i
.tm
.opcode_modifier
.modrm
)
6224 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
6227 /* If i.rm.regmem == ESP (4)
6228 && i.rm.mode != (Register mode)
6230 ==> need second modrm byte. */
6231 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
6233 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.reg16
))
6234 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
6236 | i
.sib
.scale
<< 6));
6239 if (i
.disp_operands
)
6240 output_disp (insn_start_frag
, insn_start_off
);
6243 output_imm (insn_start_frag
, insn_start_off
);
6249 pi ("" /*line*/, &i
);
6251 #endif /* DEBUG386 */
6254 /* Return the size of the displacement operand N. */
6257 disp_size (unsigned int n
)
6260 if (i
.types
[n
].bitfield
.disp64
)
6262 else if (i
.types
[n
].bitfield
.disp8
)
6264 else if (i
.types
[n
].bitfield
.disp16
)
6269 /* Return the size of the immediate operand N. */
6272 imm_size (unsigned int n
)
6275 if (i
.types
[n
].bitfield
.imm64
)
6277 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
6279 else if (i
.types
[n
].bitfield
.imm16
)
6285 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
6290 for (n
= 0; n
< i
.operands
; n
++)
6292 if (operand_type_check (i
.types
[n
], disp
))
6294 if (i
.op
[n
].disps
->X_op
== O_constant
)
6296 int size
= disp_size (n
);
6299 val
= offset_in_range (i
.op
[n
].disps
->X_add_number
,
6301 p
= frag_more (size
);
6302 md_number_to_chars (p
, val
, size
);
6306 enum bfd_reloc_code_real reloc_type
;
6307 int size
= disp_size (n
);
6308 int sign
= i
.types
[n
].bitfield
.disp32s
;
6309 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
6311 /* We can't have 8 bit displacement here. */
6312 gas_assert (!i
.types
[n
].bitfield
.disp8
);
6314 /* The PC relative address is computed relative
6315 to the instruction boundary, so in case immediate
6316 fields follows, we need to adjust the value. */
6317 if (pcrel
&& i
.imm_operands
)
6322 for (n1
= 0; n1
< i
.operands
; n1
++)
6323 if (operand_type_check (i
.types
[n1
], imm
))
6325 /* Only one immediate is allowed for PC
6326 relative address. */
6327 gas_assert (sz
== 0);
6329 i
.op
[n
].disps
->X_add_number
-= sz
;
6331 /* We should find the immediate. */
6332 gas_assert (sz
!= 0);
6335 p
= frag_more (size
);
6336 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
6338 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
6339 && (((reloc_type
== BFD_RELOC_32
6340 || reloc_type
== BFD_RELOC_X86_64_32S
6341 || (reloc_type
== BFD_RELOC_64
6343 && (i
.op
[n
].disps
->X_op
== O_symbol
6344 || (i
.op
[n
].disps
->X_op
== O_add
6345 && ((symbol_get_value_expression
6346 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
6348 || reloc_type
== BFD_RELOC_32_PCREL
))
6352 if (insn_start_frag
== frag_now
)
6353 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
6358 add
= insn_start_frag
->fr_fix
- insn_start_off
;
6359 for (fr
= insn_start_frag
->fr_next
;
6360 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
6362 add
+= p
- frag_now
->fr_literal
;
6367 reloc_type
= BFD_RELOC_386_GOTPC
;
6368 i
.op
[n
].imms
->X_add_number
+= add
;
6370 else if (reloc_type
== BFD_RELOC_64
)
6371 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
6373 /* Don't do the adjustment for x86-64, as there
6374 the pcrel addressing is relative to the _next_
6375 insn, and that is taken care of in other code. */
6376 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
6378 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
6379 i
.op
[n
].disps
, pcrel
, reloc_type
);
6386 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
6391 for (n
= 0; n
< i
.operands
; n
++)
6393 if (operand_type_check (i
.types
[n
], imm
))
6395 if (i
.op
[n
].imms
->X_op
== O_constant
)
6397 int size
= imm_size (n
);
6400 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
6402 p
= frag_more (size
);
6403 md_number_to_chars (p
, val
, size
);
6407 /* Not absolute_section.
6408 Need a 32-bit fixup (don't support 8bit
6409 non-absolute imms). Try to support other
6411 enum bfd_reloc_code_real reloc_type
;
6412 int size
= imm_size (n
);
6415 if (i
.types
[n
].bitfield
.imm32s
6416 && (i
.suffix
== QWORD_MNEM_SUFFIX
6417 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
6422 p
= frag_more (size
);
6423 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
6425 /* This is tough to explain. We end up with this one if we
6426 * have operands that look like
6427 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6428 * obtain the absolute address of the GOT, and it is strongly
6429 * preferable from a performance point of view to avoid using
6430 * a runtime relocation for this. The actual sequence of
6431 * instructions often look something like:
6436 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6438 * The call and pop essentially return the absolute address
6439 * of the label .L66 and store it in %ebx. The linker itself
6440 * will ultimately change the first operand of the addl so
6441 * that %ebx points to the GOT, but to keep things simple, the
6442 * .o file must have this operand set so that it generates not
6443 * the absolute address of .L66, but the absolute address of
6444 * itself. This allows the linker itself simply treat a GOTPC
6445 * relocation as asking for a pcrel offset to the GOT to be
6446 * added in, and the addend of the relocation is stored in the
6447 * operand field for the instruction itself.
6449 * Our job here is to fix the operand so that it would add
6450 * the correct offset so that %ebx would point to itself. The
6451 * thing that is tricky is that .-.L66 will point to the
6452 * beginning of the instruction, so we need to further modify
6453 * the operand so that it will point to itself. There are
6454 * other cases where you have something like:
6456 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6458 * and here no correction would be required. Internally in
6459 * the assembler we treat operands of this form as not being
6460 * pcrel since the '.' is explicitly mentioned, and I wonder
6461 * whether it would simplify matters to do it this way. Who
6462 * knows. In earlier versions of the PIC patches, the
6463 * pcrel_adjust field was used to store the correction, but
6464 * since the expression is not pcrel, I felt it would be
6465 * confusing to do it this way. */
6467 if ((reloc_type
== BFD_RELOC_32
6468 || reloc_type
== BFD_RELOC_X86_64_32S
6469 || reloc_type
== BFD_RELOC_64
)
6471 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
6472 && (i
.op
[n
].imms
->X_op
== O_symbol
6473 || (i
.op
[n
].imms
->X_op
== O_add
6474 && ((symbol_get_value_expression
6475 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
6480 if (insn_start_frag
== frag_now
)
6481 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
6486 add
= insn_start_frag
->fr_fix
- insn_start_off
;
6487 for (fr
= insn_start_frag
->fr_next
;
6488 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
6490 add
+= p
- frag_now
->fr_literal
;
6494 reloc_type
= BFD_RELOC_386_GOTPC
;
6496 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
6498 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
6499 i
.op
[n
].imms
->X_add_number
+= add
;
6501 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
6502 i
.op
[n
].imms
, 0, reloc_type
);
6508 /* x86_cons_fix_new is called via the expression parsing code when a
6509 reloc is needed. We use this hook to get the correct .got reloc. */
6510 static enum bfd_reloc_code_real got_reloc
= NO_RELOC
;
6511 static int cons_sign
= -1;
6514 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
6517 enum bfd_reloc_code_real r
= reloc (len
, 0, cons_sign
, got_reloc
);
6519 got_reloc
= NO_RELOC
;
6522 if (exp
->X_op
== O_secrel
)
6524 exp
->X_op
= O_symbol
;
6525 r
= BFD_RELOC_32_SECREL
;
6529 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
6532 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6534 # define lex_got(reloc, adjust, types) NULL
6536 /* Parse operands of the form
6537 <symbol>@GOTOFF+<nnn>
6538 and similar .plt or .got references.
6540 If we find one, set up the correct relocation in RELOC and copy the
6541 input string, minus the `@GOTOFF' into a malloc'd buffer for
6542 parsing by the calling routine. Return this buffer, and if ADJUST
6543 is non-null set it to the length of the string we removed from the
6544 input line. Otherwise return NULL. */
6546 lex_got (enum bfd_reloc_code_real
*rel
,
6548 i386_operand_type
*types
)
6550 /* Some of the relocations depend on the size of what field is to
6551 be relocated. But in our callers i386_immediate and i386_displacement
6552 we don't yet know the operand size (this will be set by insn
6553 matching). Hence we record the word32 relocation here,
6554 and adjust the reloc according to the real size in reloc(). */
6555 static const struct {
6558 const enum bfd_reloc_code_real rel
[2];
6559 const i386_operand_type types64
;
6561 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
6562 BFD_RELOC_X86_64_PLTOFF64
},
6563 OPERAND_TYPE_IMM64
},
6564 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
6565 BFD_RELOC_X86_64_PLT32
},
6566 OPERAND_TYPE_IMM32_32S_DISP32
},
6567 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
6568 BFD_RELOC_X86_64_GOTPLT64
},
6569 OPERAND_TYPE_IMM64_DISP64
},
6570 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
6571 BFD_RELOC_X86_64_GOTOFF64
},
6572 OPERAND_TYPE_IMM64_DISP64
},
6573 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
6574 BFD_RELOC_X86_64_GOTPCREL
},
6575 OPERAND_TYPE_IMM32_32S_DISP32
},
6576 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
6577 BFD_RELOC_X86_64_TLSGD
},
6578 OPERAND_TYPE_IMM32_32S_DISP32
},
6579 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
6580 _dummy_first_bfd_reloc_code_real
},
6581 OPERAND_TYPE_NONE
},
6582 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
6583 BFD_RELOC_X86_64_TLSLD
},
6584 OPERAND_TYPE_IMM32_32S_DISP32
},
6585 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
6586 BFD_RELOC_X86_64_GOTTPOFF
},
6587 OPERAND_TYPE_IMM32_32S_DISP32
},
6588 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
6589 BFD_RELOC_X86_64_TPOFF32
},
6590 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
6591 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
6592 _dummy_first_bfd_reloc_code_real
},
6593 OPERAND_TYPE_NONE
},
6594 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
6595 BFD_RELOC_X86_64_DTPOFF32
},
6596 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
6597 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
6598 _dummy_first_bfd_reloc_code_real
},
6599 OPERAND_TYPE_NONE
},
6600 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
6601 _dummy_first_bfd_reloc_code_real
},
6602 OPERAND_TYPE_NONE
},
6603 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
6604 BFD_RELOC_X86_64_GOT32
},
6605 OPERAND_TYPE_IMM32_32S_64_DISP32
},
6606 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
6607 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
6608 OPERAND_TYPE_IMM32_32S_DISP32
},
6609 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
6610 BFD_RELOC_X86_64_TLSDESC_CALL
},
6611 OPERAND_TYPE_IMM32_32S_DISP32
},
6616 #if defined (OBJ_MAYBE_ELF)
6621 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
6622 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
6625 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
6627 int len
= gotrel
[j
].len
;
6628 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
6630 if (gotrel
[j
].rel
[object_64bit
] != 0)
6633 char *tmpbuf
, *past_reloc
;
6635 *rel
= gotrel
[j
].rel
[object_64bit
];
6641 if (flag_code
!= CODE_64BIT
)
6643 types
->bitfield
.imm32
= 1;
6644 types
->bitfield
.disp32
= 1;
6647 *types
= gotrel
[j
].types64
;
6650 if (GOT_symbol
== NULL
)
6651 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
6653 /* The length of the first part of our input line. */
6654 first
= cp
- input_line_pointer
;
6656 /* The second part goes from after the reloc token until
6657 (and including) an end_of_line char or comma. */
6658 past_reloc
= cp
+ 1 + len
;
6660 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
6662 second
= cp
+ 1 - past_reloc
;
6664 /* Allocate and copy string. The trailing NUL shouldn't
6665 be necessary, but be safe. */
6666 tmpbuf
= (char *) xmalloc (first
+ second
+ 2);
6667 memcpy (tmpbuf
, input_line_pointer
, first
);
6668 if (second
!= 0 && *past_reloc
!= ' ')
6669 /* Replace the relocation token with ' ', so that
6670 errors like foo@GOTOFF1 will be detected. */
6671 tmpbuf
[first
++] = ' ';
6672 memcpy (tmpbuf
+ first
, past_reloc
, second
);
6673 tmpbuf
[first
+ second
] = '\0';
6677 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6678 gotrel
[j
].str
, 1 << (5 + object_64bit
));
6683 /* Might be a symbol version string. Don't as_bad here. */
6689 x86_cons (expressionS
*exp
, int size
)
6691 intel_syntax
= -intel_syntax
;
6694 if (size
== 4 || (object_64bit
&& size
== 8))
6696 /* Handle @GOTOFF and the like in an expression. */
6698 char *gotfree_input_line
;
6701 save
= input_line_pointer
;
6702 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
6703 if (gotfree_input_line
)
6704 input_line_pointer
= gotfree_input_line
;
6708 if (gotfree_input_line
)
6710 /* expression () has merrily parsed up to the end of line,
6711 or a comma - in the wrong buffer. Transfer how far
6712 input_line_pointer has moved to the right buffer. */
6713 input_line_pointer
= (save
6714 + (input_line_pointer
- gotfree_input_line
)
6716 free (gotfree_input_line
);
6717 if (exp
->X_op
== O_constant
6718 || exp
->X_op
== O_absent
6719 || exp
->X_op
== O_illegal
6720 || exp
->X_op
== O_register
6721 || exp
->X_op
== O_big
)
6723 char c
= *input_line_pointer
;
6724 *input_line_pointer
= 0;
6725 as_bad (_("missing or invalid expression `%s'"), save
);
6726 *input_line_pointer
= c
;
6733 intel_syntax
= -intel_syntax
;
6736 i386_intel_simplify (exp
);
6740 signed_cons (int size
)
6742 if (flag_code
== CODE_64BIT
)
6750 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
6757 if (exp
.X_op
== O_symbol
)
6758 exp
.X_op
= O_secrel
;
6760 emit_expr (&exp
, 4);
6762 while (*input_line_pointer
++ == ',');
6764 input_line_pointer
--;
6765 demand_empty_rest_of_line ();
6770 i386_immediate (char *imm_start
)
6772 char *save_input_line_pointer
;
6773 char *gotfree_input_line
;
6776 i386_operand_type types
;
6778 operand_type_set (&types
, ~0);
6780 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
6782 as_bad (_("at most %d immediate operands are allowed"),
6783 MAX_IMMEDIATE_OPERANDS
);
6787 exp
= &im_expressions
[i
.imm_operands
++];
6788 i
.op
[this_operand
].imms
= exp
;
6790 if (is_space_char (*imm_start
))
6793 save_input_line_pointer
= input_line_pointer
;
6794 input_line_pointer
= imm_start
;
6796 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
6797 if (gotfree_input_line
)
6798 input_line_pointer
= gotfree_input_line
;
6800 exp_seg
= expression (exp
);
6803 if (*input_line_pointer
)
6804 as_bad (_("junk `%s' after expression"), input_line_pointer
);
6806 input_line_pointer
= save_input_line_pointer
;
6807 if (gotfree_input_line
)
6809 free (gotfree_input_line
);
6811 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
6812 exp
->X_op
= O_illegal
;
6815 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
6819 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
6820 i386_operand_type types
, const char *imm_start
)
6822 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
6825 as_bad (_("missing or invalid immediate expression `%s'"),
6829 else if (exp
->X_op
== O_constant
)
6831 /* Size it properly later. */
6832 i
.types
[this_operand
].bitfield
.imm64
= 1;
6833 /* If not 64bit, sign extend val. */
6834 if (flag_code
!= CODE_64BIT
6835 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
6837 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
6839 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6840 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
6841 && exp_seg
!= absolute_section
6842 && exp_seg
!= text_section
6843 && exp_seg
!= data_section
6844 && exp_seg
!= bss_section
6845 && exp_seg
!= undefined_section
6846 && !bfd_is_com_section (exp_seg
))
6848 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
6852 else if (!intel_syntax
&& exp
->X_op
== O_register
)
6855 as_bad (_("illegal immediate register operand %s"), imm_start
);
6860 /* This is an address. The size of the address will be
6861 determined later, depending on destination register,
6862 suffix, or the default for the section. */
6863 i
.types
[this_operand
].bitfield
.imm8
= 1;
6864 i
.types
[this_operand
].bitfield
.imm16
= 1;
6865 i
.types
[this_operand
].bitfield
.imm32
= 1;
6866 i
.types
[this_operand
].bitfield
.imm32s
= 1;
6867 i
.types
[this_operand
].bitfield
.imm64
= 1;
6868 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
6876 i386_scale (char *scale
)
6879 char *save
= input_line_pointer
;
6881 input_line_pointer
= scale
;
6882 val
= get_absolute_expression ();
6887 i
.log2_scale_factor
= 0;
6890 i
.log2_scale_factor
= 1;
6893 i
.log2_scale_factor
= 2;
6896 i
.log2_scale_factor
= 3;
6900 char sep
= *input_line_pointer
;
6902 *input_line_pointer
= '\0';
6903 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6905 *input_line_pointer
= sep
;
6906 input_line_pointer
= save
;
6910 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
6912 as_warn (_("scale factor of %d without an index register"),
6913 1 << i
.log2_scale_factor
);
6914 i
.log2_scale_factor
= 0;
6916 scale
= input_line_pointer
;
6917 input_line_pointer
= save
;
6922 i386_displacement (char *disp_start
, char *disp_end
)
6926 char *save_input_line_pointer
;
6927 char *gotfree_input_line
;
6929 i386_operand_type bigdisp
, types
= anydisp
;
6932 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
6934 as_bad (_("at most %d displacement operands are allowed"),
6935 MAX_MEMORY_OPERANDS
);
6939 operand_type_set (&bigdisp
, 0);
6940 if ((i
.types
[this_operand
].bitfield
.jumpabsolute
)
6941 || (!current_templates
->start
->opcode_modifier
.jump
6942 && !current_templates
->start
->opcode_modifier
.jumpdword
))
6944 bigdisp
.bitfield
.disp32
= 1;
6945 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
6946 if (flag_code
== CODE_64BIT
)
6950 bigdisp
.bitfield
.disp32s
= 1;
6951 bigdisp
.bitfield
.disp64
= 1;
6954 else if ((flag_code
== CODE_16BIT
) ^ override
)
6956 bigdisp
.bitfield
.disp32
= 0;
6957 bigdisp
.bitfield
.disp16
= 1;
6962 /* For PC-relative branches, the width of the displacement
6963 is dependent upon data size, not address size. */
6964 override
= (i
.prefix
[DATA_PREFIX
] != 0);
6965 if (flag_code
== CODE_64BIT
)
6967 if (override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
6968 bigdisp
.bitfield
.disp16
= 1;
6971 bigdisp
.bitfield
.disp32
= 1;
6972 bigdisp
.bitfield
.disp32s
= 1;
6978 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
6980 : LONG_MNEM_SUFFIX
));
6981 bigdisp
.bitfield
.disp32
= 1;
6982 if ((flag_code
== CODE_16BIT
) ^ override
)
6984 bigdisp
.bitfield
.disp32
= 0;
6985 bigdisp
.bitfield
.disp16
= 1;
6989 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
6992 exp
= &disp_expressions
[i
.disp_operands
];
6993 i
.op
[this_operand
].disps
= exp
;
6995 save_input_line_pointer
= input_line_pointer
;
6996 input_line_pointer
= disp_start
;
6997 END_STRING_AND_SAVE (disp_end
);
6999 #ifndef GCC_ASM_O_HACK
7000 #define GCC_ASM_O_HACK 0
7003 END_STRING_AND_SAVE (disp_end
+ 1);
7004 if (i
.types
[this_operand
].bitfield
.baseIndex
7005 && displacement_string_end
[-1] == '+')
7007 /* This hack is to avoid a warning when using the "o"
7008 constraint within gcc asm statements.
7011 #define _set_tssldt_desc(n,addr,limit,type) \
7012 __asm__ __volatile__ ( \
7014 "movw %w1,2+%0\n\t" \
7016 "movb %b1,4+%0\n\t" \
7017 "movb %4,5+%0\n\t" \
7018 "movb $0,6+%0\n\t" \
7019 "movb %h1,7+%0\n\t" \
7021 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7023 This works great except that the output assembler ends
7024 up looking a bit weird if it turns out that there is
7025 no offset. You end up producing code that looks like:
7038 So here we provide the missing zero. */
7040 *displacement_string_end
= '0';
7043 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
7044 if (gotfree_input_line
)
7045 input_line_pointer
= gotfree_input_line
;
7047 exp_seg
= expression (exp
);
7050 if (*input_line_pointer
)
7051 as_bad (_("junk `%s' after expression"), input_line_pointer
);
7053 RESTORE_END_STRING (disp_end
+ 1);
7055 input_line_pointer
= save_input_line_pointer
;
7056 if (gotfree_input_line
)
7058 free (gotfree_input_line
);
7060 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
7061 exp
->X_op
= O_illegal
;
7064 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
7066 RESTORE_END_STRING (disp_end
);
7072 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
7073 i386_operand_type types
, const char *disp_start
)
7075 i386_operand_type bigdisp
;
7078 /* We do this to make sure that the section symbol is in
7079 the symbol table. We will ultimately change the relocation
7080 to be relative to the beginning of the section. */
7081 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
7082 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
7083 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
7085 if (exp
->X_op
!= O_symbol
)
7088 if (S_IS_LOCAL (exp
->X_add_symbol
)
7089 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
7090 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
7091 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
7092 exp
->X_op
= O_subtract
;
7093 exp
->X_op_symbol
= GOT_symbol
;
7094 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
7095 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
7096 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
7097 i
.reloc
[this_operand
] = BFD_RELOC_64
;
7099 i
.reloc
[this_operand
] = BFD_RELOC_32
;
7102 else if (exp
->X_op
== O_absent
7103 || exp
->X_op
== O_illegal
7104 || exp
->X_op
== O_big
)
7107 as_bad (_("missing or invalid displacement expression `%s'"),
7112 else if (flag_code
== CODE_64BIT
7113 && !i
.prefix
[ADDR_PREFIX
]
7114 && exp
->X_op
== O_constant
)
7116 /* Since displacement is signed extended to 64bit, don't allow
7117 disp32 and turn off disp32s if they are out of range. */
7118 i
.types
[this_operand
].bitfield
.disp32
= 0;
7119 if (!fits_in_signed_long (exp
->X_add_number
))
7121 i
.types
[this_operand
].bitfield
.disp32s
= 0;
7122 if (i
.types
[this_operand
].bitfield
.baseindex
)
7124 as_bad (_("0x%lx out range of signed 32bit displacement"),
7125 (long) exp
->X_add_number
);
7131 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7132 else if (exp
->X_op
!= O_constant
7133 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
7134 && exp_seg
!= absolute_section
7135 && exp_seg
!= text_section
7136 && exp_seg
!= data_section
7137 && exp_seg
!= bss_section
7138 && exp_seg
!= undefined_section
7139 && !bfd_is_com_section (exp_seg
))
7141 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
7146 /* Check if this is a displacement only operand. */
7147 bigdisp
= i
.types
[this_operand
];
7148 bigdisp
.bitfield
.disp8
= 0;
7149 bigdisp
.bitfield
.disp16
= 0;
7150 bigdisp
.bitfield
.disp32
= 0;
7151 bigdisp
.bitfield
.disp32s
= 0;
7152 bigdisp
.bitfield
.disp64
= 0;
7153 if (operand_type_all_zero (&bigdisp
))
7154 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
7160 /* Make sure the memory operand we've been dealt is valid.
7161 Return 1 on success, 0 on a failure. */
7164 i386_index_check (const char *operand_string
)
7167 const char *kind
= "base/index";
7168 #if INFER_ADDR_PREFIX
7174 if (current_templates
->start
->opcode_modifier
.isstring
7175 && !current_templates
->start
->opcode_modifier
.immext
7176 && (current_templates
->end
[-1].opcode_modifier
.isstring
7179 /* Memory operands of string insns are special in that they only allow
7180 a single register (rDI, rSI, or rBX) as their memory address. */
7181 unsigned int expected
;
7183 kind
= "string address";
7185 if (current_templates
->start
->opcode_modifier
.w
)
7187 i386_operand_type type
= current_templates
->end
[-1].operand_types
[0];
7189 if (!type
.bitfield
.baseindex
7190 || ((!i
.mem_operands
!= !intel_syntax
)
7191 && current_templates
->end
[-1].operand_types
[1]
7192 .bitfield
.baseindex
))
7193 type
= current_templates
->end
[-1].operand_types
[1];
7194 expected
= type
.bitfield
.esseg
? 7 /* rDI */ : 6 /* rSI */;
7197 expected
= 3 /* rBX */;
7199 if (!i
.base_reg
|| i
.index_reg
7200 || operand_type_check (i
.types
[this_operand
], disp
))
7202 else if (!(flag_code
== CODE_64BIT
7203 ? i
.prefix
[ADDR_PREFIX
]
7204 ? i
.base_reg
->reg_type
.bitfield
.reg32
7205 : i
.base_reg
->reg_type
.bitfield
.reg64
7206 : (flag_code
== CODE_16BIT
) ^ !i
.prefix
[ADDR_PREFIX
]
7207 ? i
.base_reg
->reg_type
.bitfield
.reg32
7208 : i
.base_reg
->reg_type
.bitfield
.reg16
))
7210 else if (i
.base_reg
->reg_num
!= expected
)
7217 for (j
= 0; j
< i386_regtab_size
; ++j
)
7218 if ((flag_code
== CODE_64BIT
7219 ? i
.prefix
[ADDR_PREFIX
]
7220 ? i386_regtab
[j
].reg_type
.bitfield
.reg32
7221 : i386_regtab
[j
].reg_type
.bitfield
.reg64
7222 : (flag_code
== CODE_16BIT
) ^ !i
.prefix
[ADDR_PREFIX
]
7223 ? i386_regtab
[j
].reg_type
.bitfield
.reg32
7224 : i386_regtab
[j
].reg_type
.bitfield
.reg16
)
7225 && i386_regtab
[j
].reg_num
== expected
)
7227 gas_assert (j
< i386_regtab_size
);
7228 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7230 intel_syntax
? '[' : '(',
7232 i386_regtab
[j
].reg_name
,
7233 intel_syntax
? ']' : ')');
7237 else if (flag_code
== CODE_64BIT
)
7240 && ((i
.prefix
[ADDR_PREFIX
] == 0
7241 && !i
.base_reg
->reg_type
.bitfield
.reg64
)
7242 || (i
.prefix
[ADDR_PREFIX
]
7243 && !i
.base_reg
->reg_type
.bitfield
.reg32
))
7245 || i
.base_reg
->reg_num
!=
7246 (i
.prefix
[ADDR_PREFIX
] == 0 ? RegRip
: RegEip
)))
7248 && !(i
.index_reg
->reg_type
.bitfield
.regxmm
7249 || i
.index_reg
->reg_type
.bitfield
.regymm
)
7250 && (!i
.index_reg
->reg_type
.bitfield
.baseindex
7251 || (i
.prefix
[ADDR_PREFIX
] == 0
7252 && i
.index_reg
->reg_num
!= RegRiz
7253 && !i
.index_reg
->reg_type
.bitfield
.reg64
7255 || (i
.prefix
[ADDR_PREFIX
]
7256 && i
.index_reg
->reg_num
!= RegEiz
7257 && !i
.index_reg
->reg_type
.bitfield
.reg32
))))
7262 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[ADDR_PREFIX
] != 0))
7266 && (!i
.base_reg
->reg_type
.bitfield
.reg16
7267 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
7269 && (!i
.index_reg
->reg_type
.bitfield
.reg16
7270 || !i
.index_reg
->reg_type
.bitfield
.baseindex
7272 && i
.base_reg
->reg_num
< 6
7273 && i
.index_reg
->reg_num
>= 6
7274 && i
.log2_scale_factor
== 0))))
7281 && !i
.base_reg
->reg_type
.bitfield
.reg32
)
7283 && !i
.index_reg
->reg_type
.bitfield
.regxmm
7284 && !i
.index_reg
->reg_type
.bitfield
.regymm
7285 && ((!i
.index_reg
->reg_type
.bitfield
.reg32
7286 && i
.index_reg
->reg_num
!= RegEiz
)
7287 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
7293 #if INFER_ADDR_PREFIX
7294 if (!i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
7296 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
7298 /* Change the size of any displacement too. At most one of
7299 Disp16 or Disp32 is set.
7300 FIXME. There doesn't seem to be any real need for separate
7301 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7302 Removing them would probably clean up the code quite a lot. */
7303 if (flag_code
!= CODE_64BIT
7304 && (i
.types
[this_operand
].bitfield
.disp16
7305 || i
.types
[this_operand
].bitfield
.disp32
))
7306 i
.types
[this_operand
]
7307 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
7312 as_bad (_("`%s' is not a valid %s expression"),
7317 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7319 flag_code_names
[i
.prefix
[ADDR_PREFIX
]
7320 ? flag_code
== CODE_32BIT
7329 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7333 i386_att_operand (char *operand_string
)
7337 char *op_string
= operand_string
;
7339 if (is_space_char (*op_string
))
7342 /* We check for an absolute prefix (differentiating,
7343 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7344 if (*op_string
== ABSOLUTE_PREFIX
)
7347 if (is_space_char (*op_string
))
7349 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
7352 /* Check if operand is a register. */
7353 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
7355 i386_operand_type temp
;
7357 /* Check for a segment override by searching for ':' after a
7358 segment register. */
7360 if (is_space_char (*op_string
))
7362 if (*op_string
== ':'
7363 && (r
->reg_type
.bitfield
.sreg2
7364 || r
->reg_type
.bitfield
.sreg3
))
7369 i
.seg
[i
.mem_operands
] = &es
;
7372 i
.seg
[i
.mem_operands
] = &cs
;
7375 i
.seg
[i
.mem_operands
] = &ss
;
7378 i
.seg
[i
.mem_operands
] = &ds
;
7381 i
.seg
[i
.mem_operands
] = &fs
;
7384 i
.seg
[i
.mem_operands
] = &gs
;
7388 /* Skip the ':' and whitespace. */
7390 if (is_space_char (*op_string
))
7393 if (!is_digit_char (*op_string
)
7394 && !is_identifier_char (*op_string
)
7395 && *op_string
!= '('
7396 && *op_string
!= ABSOLUTE_PREFIX
)
7398 as_bad (_("bad memory operand `%s'"), op_string
);
7401 /* Handle case of %es:*foo. */
7402 if (*op_string
== ABSOLUTE_PREFIX
)
7405 if (is_space_char (*op_string
))
7407 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
7409 goto do_memory_reference
;
7413 as_bad (_("junk `%s' after register"), op_string
);
7417 temp
.bitfield
.baseindex
= 0;
7418 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
7420 i
.types
[this_operand
].bitfield
.unspecified
= 0;
7421 i
.op
[this_operand
].regs
= r
;
7424 else if (*op_string
== REGISTER_PREFIX
)
7426 as_bad (_("bad register name `%s'"), op_string
);
7429 else if (*op_string
== IMMEDIATE_PREFIX
)
7432 if (i
.types
[this_operand
].bitfield
.jumpabsolute
)
7434 as_bad (_("immediate operand illegal with absolute jump"));
7437 if (!i386_immediate (op_string
))
7440 else if (is_digit_char (*op_string
)
7441 || is_identifier_char (*op_string
)
7442 || *op_string
== '(')
7444 /* This is a memory reference of some sort. */
7447 /* Start and end of displacement string expression (if found). */
7448 char *displacement_string_start
;
7449 char *displacement_string_end
;
7451 do_memory_reference
:
7452 if ((i
.mem_operands
== 1
7453 && !current_templates
->start
->opcode_modifier
.isstring
)
7454 || i
.mem_operands
== 2)
7456 as_bad (_("too many memory references for `%s'"),
7457 current_templates
->start
->name
);
7461 /* Check for base index form. We detect the base index form by
7462 looking for an ')' at the end of the operand, searching
7463 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7465 base_string
= op_string
+ strlen (op_string
);
7468 if (is_space_char (*base_string
))
7471 /* If we only have a displacement, set-up for it to be parsed later. */
7472 displacement_string_start
= op_string
;
7473 displacement_string_end
= base_string
+ 1;
7475 if (*base_string
== ')')
7478 unsigned int parens_balanced
= 1;
7479 /* We've already checked that the number of left & right ()'s are
7480 equal, so this loop will not be infinite. */
7484 if (*base_string
== ')')
7486 if (*base_string
== '(')
7489 while (parens_balanced
);
7491 temp_string
= base_string
;
7493 /* Skip past '(' and whitespace. */
7495 if (is_space_char (*base_string
))
7498 if (*base_string
== ','
7499 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
7502 displacement_string_end
= temp_string
;
7504 i
.types
[this_operand
].bitfield
.baseindex
= 1;
7508 base_string
= end_op
;
7509 if (is_space_char (*base_string
))
7513 /* There may be an index reg or scale factor here. */
7514 if (*base_string
== ',')
7517 if (is_space_char (*base_string
))
7520 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
7523 base_string
= end_op
;
7524 if (is_space_char (*base_string
))
7526 if (*base_string
== ',')
7529 if (is_space_char (*base_string
))
7532 else if (*base_string
!= ')')
7534 as_bad (_("expecting `,' or `)' "
7535 "after index register in `%s'"),
7540 else if (*base_string
== REGISTER_PREFIX
)
7542 as_bad (_("bad register name `%s'"), base_string
);
7546 /* Check for scale factor. */
7547 if (*base_string
!= ')')
7549 char *end_scale
= i386_scale (base_string
);
7554 base_string
= end_scale
;
7555 if (is_space_char (*base_string
))
7557 if (*base_string
!= ')')
7559 as_bad (_("expecting `)' "
7560 "after scale factor in `%s'"),
7565 else if (!i
.index_reg
)
7567 as_bad (_("expecting index register or scale factor "
7568 "after `,'; got '%c'"),
7573 else if (*base_string
!= ')')
7575 as_bad (_("expecting `,' or `)' "
7576 "after base register in `%s'"),
7581 else if (*base_string
== REGISTER_PREFIX
)
7583 as_bad (_("bad register name `%s'"), base_string
);
7588 /* If there's an expression beginning the operand, parse it,
7589 assuming displacement_string_start and
7590 displacement_string_end are meaningful. */
7591 if (displacement_string_start
!= displacement_string_end
)
7593 if (!i386_displacement (displacement_string_start
,
7594 displacement_string_end
))
7598 /* Special case for (%dx) while doing input/output op. */
7600 && operand_type_equal (&i
.base_reg
->reg_type
,
7601 ®16_inoutportreg
)
7603 && i
.log2_scale_factor
== 0
7604 && i
.seg
[i
.mem_operands
] == 0
7605 && !operand_type_check (i
.types
[this_operand
], disp
))
7607 i
.types
[this_operand
] = inoutportreg
;
7611 if (i386_index_check (operand_string
) == 0)
7613 i
.types
[this_operand
].bitfield
.mem
= 1;
7618 /* It's not a memory operand; argh! */
7619 as_bad (_("invalid char %s beginning operand %d `%s'"),
7620 output_invalid (*op_string
),
7625 return 1; /* Normal return. */
7628 /* md_estimate_size_before_relax()
7630 Called just before relax() for rs_machine_dependent frags. The x86
7631 assembler uses these frags to handle variable size jump
7634 Any symbol that is now undefined will not become defined.
7635 Return the correct fr_subtype in the frag.
7636 Return the initial "guess for variable size of frag" to caller.
7637 The guess is actually the growth beyond the fixed part. Whatever
7638 we do to grow the fixed or variable part contributes to our
7642 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
7644 /* We've already got fragP->fr_subtype right; all we have to do is
7645 check for un-relaxable symbols. On an ELF system, we can't relax
7646 an externally visible symbol, because it may be overridden by a
7648 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
7649 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7651 && (S_IS_EXTERNAL (fragP
->fr_symbol
)
7652 || S_IS_WEAK (fragP
->fr_symbol
)
7653 || ((symbol_get_bfdsym (fragP
->fr_symbol
)->flags
7654 & BSF_GNU_INDIRECT_FUNCTION
))))
7656 #if defined (OBJ_COFF) && defined (TE_PE)
7657 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
7658 && S_IS_WEAK (fragP
->fr_symbol
))
7662 /* Symbol is undefined in this segment, or we need to keep a
7663 reloc so that weak symbols can be overridden. */
7664 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
7665 enum bfd_reloc_code_real reloc_type
;
7666 unsigned char *opcode
;
7669 if (fragP
->fr_var
!= NO_RELOC
)
7670 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
7672 reloc_type
= BFD_RELOC_16_PCREL
;
7674 reloc_type
= BFD_RELOC_32_PCREL
;
7676 old_fr_fix
= fragP
->fr_fix
;
7677 opcode
= (unsigned char *) fragP
->fr_opcode
;
7679 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
7682 /* Make jmp (0xeb) a (d)word displacement jump. */
7684 fragP
->fr_fix
+= size
;
7685 fix_new (fragP
, old_fr_fix
, size
,
7687 fragP
->fr_offset
, 1,
7693 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
7695 /* Negate the condition, and branch past an
7696 unconditional jump. */
7699 /* Insert an unconditional jump. */
7701 /* We added two extra opcode bytes, and have a two byte
7703 fragP
->fr_fix
+= 2 + 2;
7704 fix_new (fragP
, old_fr_fix
+ 2, 2,
7706 fragP
->fr_offset
, 1,
7713 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
7718 fixP
= fix_new (fragP
, old_fr_fix
, 1,
7720 fragP
->fr_offset
, 1,
7722 fixP
->fx_signed
= 1;
7726 /* This changes the byte-displacement jump 0x7N
7727 to the (d)word-displacement jump 0x0f,0x8N. */
7728 opcode
[1] = opcode
[0] + 0x10;
7729 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
7730 /* We've added an opcode byte. */
7731 fragP
->fr_fix
+= 1 + size
;
7732 fix_new (fragP
, old_fr_fix
+ 1, size
,
7734 fragP
->fr_offset
, 1,
7739 BAD_CASE (fragP
->fr_subtype
);
7743 return fragP
->fr_fix
- old_fr_fix
;
7746 /* Guess size depending on current relax state. Initially the relax
7747 state will correspond to a short jump and we return 1, because
7748 the variable part of the frag (the branch offset) is one byte
7749 long. However, we can relax a section more than once and in that
7750 case we must either set fr_subtype back to the unrelaxed state,
7751 or return the value for the appropriate branch. */
7752 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
7755 /* Called after relax() is finished.
7757 In: Address of frag.
7758 fr_type == rs_machine_dependent.
7759 fr_subtype is what the address relaxed to.
7761 Out: Any fixSs and constants are set up.
7762 Caller will turn frag into a ".space 0". */
7765 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
7768 unsigned char *opcode
;
7769 unsigned char *where_to_put_displacement
= NULL
;
7770 offsetT target_address
;
7771 offsetT opcode_address
;
7772 unsigned int extension
= 0;
7773 offsetT displacement_from_opcode_start
;
7775 opcode
= (unsigned char *) fragP
->fr_opcode
;
7777 /* Address we want to reach in file space. */
7778 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
7780 /* Address opcode resides at in file space. */
7781 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
7783 /* Displacement from opcode start to fill into instruction. */
7784 displacement_from_opcode_start
= target_address
- opcode_address
;
7786 if ((fragP
->fr_subtype
& BIG
) == 0)
7788 /* Don't have to change opcode. */
7789 extension
= 1; /* 1 opcode + 1 displacement */
7790 where_to_put_displacement
= &opcode
[1];
7794 if (no_cond_jump_promotion
7795 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
7796 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
7797 _("long jump required"));
7799 switch (fragP
->fr_subtype
)
7801 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
7802 extension
= 4; /* 1 opcode + 4 displacement */
7804 where_to_put_displacement
= &opcode
[1];
7807 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
7808 extension
= 2; /* 1 opcode + 2 displacement */
7810 where_to_put_displacement
= &opcode
[1];
7813 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
7814 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
7815 extension
= 5; /* 2 opcode + 4 displacement */
7816 opcode
[1] = opcode
[0] + 0x10;
7817 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
7818 where_to_put_displacement
= &opcode
[2];
7821 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
7822 extension
= 3; /* 2 opcode + 2 displacement */
7823 opcode
[1] = opcode
[0] + 0x10;
7824 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
7825 where_to_put_displacement
= &opcode
[2];
7828 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
7833 where_to_put_displacement
= &opcode
[3];
7837 BAD_CASE (fragP
->fr_subtype
);
7842 /* If size if less then four we are sure that the operand fits,
7843 but if it's 4, then it could be that the displacement is larger
7845 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
7847 && ((addressT
) (displacement_from_opcode_start
- extension
7848 + ((addressT
) 1 << 31))
7849 > (((addressT
) 2 << 31) - 1)))
7851 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
7852 _("jump target out of range"));
7853 /* Make us emit 0. */
7854 displacement_from_opcode_start
= extension
;
7856 /* Now put displacement after opcode. */
7857 md_number_to_chars ((char *) where_to_put_displacement
,
7858 (valueT
) (displacement_from_opcode_start
- extension
),
7859 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
7860 fragP
->fr_fix
+= extension
;
7863 /* Apply a fixup (fixP) to segment data, once it has been determined
7864 by our caller that we have all the info we need to fix it up.
7866 Parameter valP is the pointer to the value of the bits.
7868 On the 386, immediates, displacements, and data pointers are all in
7869 the same (little-endian) format, so we don't need to care about which
7873 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
7875 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
7876 valueT value
= *valP
;
7878 #if !defined (TE_Mach)
7881 switch (fixP
->fx_r_type
)
7887 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
7890 case BFD_RELOC_X86_64_32S
:
7891 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
7894 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
7897 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
7902 if (fixP
->fx_addsy
!= NULL
7903 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
7904 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
7905 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
7906 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
7907 && !use_rela_relocations
)
7909 /* This is a hack. There should be a better way to handle this.
7910 This covers for the fact that bfd_install_relocation will
7911 subtract the current location (for partial_inplace, PC relative
7912 relocations); see more below. */
7916 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
7919 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
7921 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7924 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
7927 || (symbol_section_p (fixP
->fx_addsy
)
7928 && sym_seg
!= absolute_section
))
7929 && !generic_force_reloc (fixP
))
7931 /* Yes, we add the values in twice. This is because
7932 bfd_install_relocation subtracts them out again. I think
7933 bfd_install_relocation is broken, but I don't dare change
7935 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
7939 #if defined (OBJ_COFF) && defined (TE_PE)
7940 /* For some reason, the PE format does not store a
7941 section address offset for a PC relative symbol. */
7942 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
7943 || S_IS_WEAK (fixP
->fx_addsy
))
7944 value
+= md_pcrel_from (fixP
);
7947 #if defined (OBJ_COFF) && defined (TE_PE)
7948 if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
7950 value
-= S_GET_VALUE (fixP
->fx_addsy
);
7954 /* Fix a few things - the dynamic linker expects certain values here,
7955 and we must not disappoint it. */
7956 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7957 if (IS_ELF
&& fixP
->fx_addsy
)
7958 switch (fixP
->fx_r_type
)
7960 case BFD_RELOC_386_PLT32
:
7961 case BFD_RELOC_X86_64_PLT32
:
7962 /* Make the jump instruction point to the address of the operand. At
7963 runtime we merely add the offset to the actual PLT entry. */
7967 case BFD_RELOC_386_TLS_GD
:
7968 case BFD_RELOC_386_TLS_LDM
:
7969 case BFD_RELOC_386_TLS_IE_32
:
7970 case BFD_RELOC_386_TLS_IE
:
7971 case BFD_RELOC_386_TLS_GOTIE
:
7972 case BFD_RELOC_386_TLS_GOTDESC
:
7973 case BFD_RELOC_X86_64_TLSGD
:
7974 case BFD_RELOC_X86_64_TLSLD
:
7975 case BFD_RELOC_X86_64_GOTTPOFF
:
7976 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
7977 value
= 0; /* Fully resolved at runtime. No addend. */
7979 case BFD_RELOC_386_TLS_LE
:
7980 case BFD_RELOC_386_TLS_LDO_32
:
7981 case BFD_RELOC_386_TLS_LE_32
:
7982 case BFD_RELOC_X86_64_DTPOFF32
:
7983 case BFD_RELOC_X86_64_DTPOFF64
:
7984 case BFD_RELOC_X86_64_TPOFF32
:
7985 case BFD_RELOC_X86_64_TPOFF64
:
7986 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
7989 case BFD_RELOC_386_TLS_DESC_CALL
:
7990 case BFD_RELOC_X86_64_TLSDESC_CALL
:
7991 value
= 0; /* Fully resolved at runtime. No addend. */
7992 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
7996 case BFD_RELOC_386_GOT32
:
7997 case BFD_RELOC_X86_64_GOT32
:
7998 value
= 0; /* Fully resolved at runtime. No addend. */
8001 case BFD_RELOC_VTABLE_INHERIT
:
8002 case BFD_RELOC_VTABLE_ENTRY
:
8009 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8011 #endif /* !defined (TE_Mach) */
8013 /* Are we finished with this relocation now? */
8014 if (fixP
->fx_addsy
== NULL
)
8016 #if defined (OBJ_COFF) && defined (TE_PE)
8017 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
8020 /* Remember value for tc_gen_reloc. */
8021 fixP
->fx_addnumber
= value
;
8022 /* Clear out the frag for now. */
8026 else if (use_rela_relocations
)
8028 fixP
->fx_no_overflow
= 1;
8029 /* Remember value for tc_gen_reloc. */
8030 fixP
->fx_addnumber
= value
;
8034 md_number_to_chars (p
, value
, fixP
->fx_size
);
8038 md_atof (int type
, char *litP
, int *sizeP
)
8040 /* This outputs the LITTLENUMs in REVERSE order;
8041 in accord with the bigendian 386. */
8042 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
8045 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
8048 output_invalid (int c
)
8051 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
8054 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
8055 "(0x%x)", (unsigned char) c
);
8056 return output_invalid_buf
;
8059 /* REG_STRING starts *before* REGISTER_PREFIX. */
8061 static const reg_entry
*
8062 parse_real_register (char *reg_string
, char **end_op
)
8064 char *s
= reg_string
;
8066 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
8069 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8070 if (*s
== REGISTER_PREFIX
)
8073 if (is_space_char (*s
))
8077 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
8079 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
8080 return (const reg_entry
*) NULL
;
8084 /* For naked regs, make sure that we are not dealing with an identifier.
8085 This prevents confusing an identifier like `eax_var' with register
8087 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
8088 return (const reg_entry
*) NULL
;
8092 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
8094 /* Handle floating point regs, allowing spaces in the (i) part. */
8095 if (r
== i386_regtab
/* %st is first entry of table */)
8097 if (is_space_char (*s
))
8102 if (is_space_char (*s
))
8104 if (*s
>= '0' && *s
<= '7')
8108 if (is_space_char (*s
))
8113 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
8118 /* We have "%st(" then garbage. */
8119 return (const reg_entry
*) NULL
;
8123 if (r
== NULL
|| allow_pseudo_reg
)
8126 if (operand_type_all_zero (&r
->reg_type
))
8127 return (const reg_entry
*) NULL
;
8129 if ((r
->reg_type
.bitfield
.reg32
8130 || r
->reg_type
.bitfield
.sreg3
8131 || r
->reg_type
.bitfield
.control
8132 || r
->reg_type
.bitfield
.debug
8133 || r
->reg_type
.bitfield
.test
)
8134 && !cpu_arch_flags
.bitfield
.cpui386
)
8135 return (const reg_entry
*) NULL
;
8137 if (r
->reg_type
.bitfield
.floatreg
8138 && !cpu_arch_flags
.bitfield
.cpu8087
8139 && !cpu_arch_flags
.bitfield
.cpu287
8140 && !cpu_arch_flags
.bitfield
.cpu387
)
8141 return (const reg_entry
*) NULL
;
8143 if (r
->reg_type
.bitfield
.regmmx
&& !cpu_arch_flags
.bitfield
.cpummx
)
8144 return (const reg_entry
*) NULL
;
8146 if (r
->reg_type
.bitfield
.regxmm
&& !cpu_arch_flags
.bitfield
.cpusse
)
8147 return (const reg_entry
*) NULL
;
8149 if (r
->reg_type
.bitfield
.regymm
&& !cpu_arch_flags
.bitfield
.cpuavx
)
8150 return (const reg_entry
*) NULL
;
8152 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8153 if (!allow_index_reg
8154 && (r
->reg_num
== RegEiz
|| r
->reg_num
== RegRiz
))
8155 return (const reg_entry
*) NULL
;
8157 if (((r
->reg_flags
& (RegRex64
| RegRex
))
8158 || r
->reg_type
.bitfield
.reg64
)
8159 && (!cpu_arch_flags
.bitfield
.cpulm
8160 || !operand_type_equal (&r
->reg_type
, &control
))
8161 && flag_code
!= CODE_64BIT
)
8162 return (const reg_entry
*) NULL
;
8164 if (r
->reg_type
.bitfield
.sreg3
&& r
->reg_num
== RegFlat
&& !intel_syntax
)
8165 return (const reg_entry
*) NULL
;
8170 /* REG_STRING starts *before* REGISTER_PREFIX. */
8172 static const reg_entry
*
8173 parse_register (char *reg_string
, char **end_op
)
8177 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
8178 r
= parse_real_register (reg_string
, end_op
);
8183 char *save
= input_line_pointer
;
8187 input_line_pointer
= reg_string
;
8188 c
= get_symbol_end ();
8189 symbolP
= symbol_find (reg_string
);
8190 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
8192 const expressionS
*e
= symbol_get_value_expression (symbolP
);
8194 know (e
->X_op
== O_register
);
8195 know (e
->X_add_number
>= 0
8196 && (valueT
) e
->X_add_number
< i386_regtab_size
);
8197 r
= i386_regtab
+ e
->X_add_number
;
8198 *end_op
= input_line_pointer
;
8200 *input_line_pointer
= c
;
8201 input_line_pointer
= save
;
8207 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
8210 char *end
= input_line_pointer
;
8213 r
= parse_register (name
, &input_line_pointer
);
8214 if (r
&& end
<= input_line_pointer
)
8216 *nextcharP
= *input_line_pointer
;
8217 *input_line_pointer
= 0;
8218 e
->X_op
= O_register
;
8219 e
->X_add_number
= r
- i386_regtab
;
8222 input_line_pointer
= end
;
8224 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
8228 md_operand (expressionS
*e
)
8233 switch (*input_line_pointer
)
8235 case REGISTER_PREFIX
:
8236 r
= parse_real_register (input_line_pointer
, &end
);
8239 e
->X_op
= O_register
;
8240 e
->X_add_number
= r
- i386_regtab
;
8241 input_line_pointer
= end
;
8246 gas_assert (intel_syntax
);
8247 end
= input_line_pointer
++;
8249 if (*input_line_pointer
== ']')
8251 ++input_line_pointer
;
8252 e
->X_op_symbol
= make_expr_symbol (e
);
8253 e
->X_add_symbol
= NULL
;
8254 e
->X_add_number
= 0;
8260 input_line_pointer
= end
;
8267 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8268 const char *md_shortopts
= "kVQ:sqn";
8270 const char *md_shortopts
= "qn";
8273 #define OPTION_32 (OPTION_MD_BASE + 0)
8274 #define OPTION_64 (OPTION_MD_BASE + 1)
8275 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8276 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8277 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8278 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8279 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8280 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8281 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8282 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8283 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8284 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8285 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8286 #define OPTION_X32 (OPTION_MD_BASE + 13)
8288 struct option md_longopts
[] =
8290 {"32", no_argument
, NULL
, OPTION_32
},
8291 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8292 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8293 {"64", no_argument
, NULL
, OPTION_64
},
8295 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8296 {"x32", no_argument
, NULL
, OPTION_X32
},
8298 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
8299 {"march", required_argument
, NULL
, OPTION_MARCH
},
8300 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
8301 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
8302 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
8303 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
8304 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
8305 {"mold-gcc", no_argument
, NULL
, OPTION_MOLD_GCC
},
8306 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
8307 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
8308 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
8309 {NULL
, no_argument
, NULL
, 0}
8311 size_t md_longopts_size
= sizeof (md_longopts
);
8314 md_parse_option (int c
, char *arg
)
8322 optimize_align_code
= 0;
8329 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8330 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8331 should be emitted or not. FIXME: Not implemented. */
8335 /* -V: SVR4 argument to print version ID. */
8337 print_version_id ();
8340 /* -k: Ignore for FreeBSD compatibility. */
8345 /* -s: On i386 Solaris, this tells the native assembler to use
8346 .stab instead of .stab.excl. We always use .stab anyhow. */
8349 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8350 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8353 const char **list
, **l
;
8355 list
= bfd_target_list ();
8356 for (l
= list
; *l
!= NULL
; l
++)
8357 if (CONST_STRNEQ (*l
, "elf64-x86-64")
8358 || strcmp (*l
, "coff-x86-64") == 0
8359 || strcmp (*l
, "pe-x86-64") == 0
8360 || strcmp (*l
, "pei-x86-64") == 0
8361 || strcmp (*l
, "mach-o-x86-64") == 0)
8363 default_arch
= "x86_64";
8367 as_fatal (_("no compiled in support for x86_64"));
8373 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8377 const char **list
, **l
;
8379 list
= bfd_target_list ();
8380 for (l
= list
; *l
!= NULL
; l
++)
8381 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
8383 default_arch
= "x86_64:32";
8387 as_fatal (_("no compiled in support for 32bit x86_64"));
8391 as_fatal (_("32bit x86_64 is only supported for ELF"));
8396 default_arch
= "i386";
8400 #ifdef SVR4_COMMENT_CHARS
8405 n
= (char *) xmalloc (strlen (i386_comment_chars
) + 1);
8407 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
8411 i386_comment_chars
= n
;
8417 arch
= xstrdup (arg
);
8421 as_fatal (_("invalid -march= option: `%s'"), arg
);
8422 next
= strchr (arch
, '+');
8425 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
8427 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
8430 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
8433 cpu_arch_name
= cpu_arch
[j
].name
;
8434 cpu_sub_arch_name
= NULL
;
8435 cpu_arch_flags
= cpu_arch
[j
].flags
;
8436 cpu_arch_isa
= cpu_arch
[j
].type
;
8437 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
8438 if (!cpu_arch_tune_set
)
8440 cpu_arch_tune
= cpu_arch_isa
;
8441 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
8445 else if (*cpu_arch
[j
].name
== '.'
8446 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
8448 /* ISA entension. */
8449 i386_cpu_flags flags
;
8451 if (!cpu_arch
[j
].negated
)
8452 flags
= cpu_flags_or (cpu_arch_flags
,
8455 flags
= cpu_flags_and_not (cpu_arch_flags
,
8457 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
8459 if (cpu_sub_arch_name
)
8461 char *name
= cpu_sub_arch_name
;
8462 cpu_sub_arch_name
= concat (name
,
8464 (const char *) NULL
);
8468 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
8469 cpu_arch_flags
= flags
;
8470 cpu_arch_isa_flags
= flags
;
8476 if (j
>= ARRAY_SIZE (cpu_arch
))
8477 as_fatal (_("invalid -march= option: `%s'"), arg
);
8481 while (next
!= NULL
);
8486 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
8487 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
8489 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
8491 cpu_arch_tune_set
= 1;
8492 cpu_arch_tune
= cpu_arch
[j
].type
;
8493 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
8497 if (j
>= ARRAY_SIZE (cpu_arch
))
8498 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
8501 case OPTION_MMNEMONIC
:
8502 if (strcasecmp (arg
, "att") == 0)
8504 else if (strcasecmp (arg
, "intel") == 0)
8507 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
8510 case OPTION_MSYNTAX
:
8511 if (strcasecmp (arg
, "att") == 0)
8513 else if (strcasecmp (arg
, "intel") == 0)
8516 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
8519 case OPTION_MINDEX_REG
:
8520 allow_index_reg
= 1;
8523 case OPTION_MNAKED_REG
:
8524 allow_naked_reg
= 1;
8527 case OPTION_MOLD_GCC
:
8531 case OPTION_MSSE2AVX
:
8535 case OPTION_MSSE_CHECK
:
8536 if (strcasecmp (arg
, "error") == 0)
8537 sse_check
= sse_check_error
;
8538 else if (strcasecmp (arg
, "warning") == 0)
8539 sse_check
= sse_check_warning
;
8540 else if (strcasecmp (arg
, "none") == 0)
8541 sse_check
= sse_check_none
;
8543 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
8546 case OPTION_MAVXSCALAR
:
8547 if (strcasecmp (arg
, "128") == 0)
8549 else if (strcasecmp (arg
, "256") == 0)
8552 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
8561 #define MESSAGE_TEMPLATE \
8565 show_arch (FILE *stream
, int ext
, int check
)
8567 static char message
[] = MESSAGE_TEMPLATE
;
8568 char *start
= message
+ 27;
8570 int size
= sizeof (MESSAGE_TEMPLATE
);
8577 left
= size
- (start
- message
);
8578 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
8580 /* Should it be skipped? */
8581 if (cpu_arch
[j
].skip
)
8584 name
= cpu_arch
[j
].name
;
8585 len
= cpu_arch
[j
].len
;
8588 /* It is an extension. Skip if we aren't asked to show it. */
8599 /* It is an processor. Skip if we show only extension. */
8602 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
8604 /* It is an impossible processor - skip. */
8608 /* Reserve 2 spaces for ", " or ",\0" */
8611 /* Check if there is any room. */
8619 p
= mempcpy (p
, name
, len
);
8623 /* Output the current message now and start a new one. */
8626 fprintf (stream
, "%s\n", message
);
8628 left
= size
- (start
- message
) - len
- 2;
8630 gas_assert (left
>= 0);
8632 p
= mempcpy (p
, name
, len
);
8637 fprintf (stream
, "%s\n", message
);
8641 md_show_usage (FILE *stream
)
8643 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8644 fprintf (stream
, _("\
8646 -V print assembler version number\n\
8649 fprintf (stream
, _("\
8650 -n Do not optimize code alignment\n\
8651 -q quieten some warnings\n"));
8652 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8653 fprintf (stream
, _("\
8656 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8657 || defined (TE_PE) || defined (TE_PEP))
8658 fprintf (stream
, _("\
8659 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8661 #ifdef SVR4_COMMENT_CHARS
8662 fprintf (stream
, _("\
8663 --divide do not treat `/' as a comment character\n"));
8665 fprintf (stream
, _("\
8666 --divide ignored\n"));
8668 fprintf (stream
, _("\
8669 -march=CPU[,+EXTENSION...]\n\
8670 generate code for CPU and EXTENSION, CPU is one of:\n"));
8671 show_arch (stream
, 0, 1);
8672 fprintf (stream
, _("\
8673 EXTENSION is combination of:\n"));
8674 show_arch (stream
, 1, 0);
8675 fprintf (stream
, _("\
8676 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8677 show_arch (stream
, 0, 0);
8678 fprintf (stream
, _("\
8679 -msse2avx encode SSE instructions with VEX prefix\n"));
8680 fprintf (stream
, _("\
8681 -msse-check=[none|error|warning]\n\
8682 check SSE instructions\n"));
8683 fprintf (stream
, _("\
8684 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8686 fprintf (stream
, _("\
8687 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8688 fprintf (stream
, _("\
8689 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8690 fprintf (stream
, _("\
8691 -mindex-reg support pseudo index registers\n"));
8692 fprintf (stream
, _("\
8693 -mnaked-reg don't require `%%' prefix for registers\n"));
8694 fprintf (stream
, _("\
8695 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8698 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8699 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8700 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8702 /* Pick the target format to use. */
8705 i386_target_format (void)
8707 if (!strncmp (default_arch
, "x86_64", 6))
8709 update_code_flag (CODE_64BIT
, 1);
8710 if (default_arch
[6] == '\0')
8711 x86_elf_abi
= X86_64_ABI
;
8713 x86_elf_abi
= X86_64_X32_ABI
;
8715 else if (!strcmp (default_arch
, "i386"))
8716 update_code_flag (CODE_32BIT
, 1);
8718 as_fatal (_("unknown architecture"));
8720 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
8721 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
8722 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
8723 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
8725 switch (OUTPUT_FLAVOR
)
8727 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8728 case bfd_target_aout_flavour
:
8729 return AOUT_TARGET_FORMAT
;
8731 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8732 # if defined (TE_PE) || defined (TE_PEP)
8733 case bfd_target_coff_flavour
:
8734 return flag_code
== CODE_64BIT
? "pe-x86-64" : "pe-i386";
8735 # elif defined (TE_GO32)
8736 case bfd_target_coff_flavour
:
8739 case bfd_target_coff_flavour
:
8743 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8744 case bfd_target_elf_flavour
:
8748 switch (x86_elf_abi
)
8751 format
= ELF_TARGET_FORMAT
;
8754 use_rela_relocations
= 1;
8756 format
= ELF_TARGET_FORMAT64
;
8758 case X86_64_X32_ABI
:
8759 use_rela_relocations
= 1;
8761 disallow_64bit_reloc
= 1;
8762 format
= ELF_TARGET_FORMAT32
;
8765 if (cpu_arch_isa
== PROCESSOR_L1OM
)
8767 if (x86_elf_abi
!= X86_64_ABI
)
8768 as_fatal (_("Intel L1OM is 64bit only"));
8769 return ELF_TARGET_L1OM_FORMAT
;
8771 if (cpu_arch_isa
== PROCESSOR_K1OM
)
8773 if (x86_elf_abi
!= X86_64_ABI
)
8774 as_fatal (_("Intel K1OM is 64bit only"));
8775 return ELF_TARGET_K1OM_FORMAT
;
8781 #if defined (OBJ_MACH_O)
8782 case bfd_target_mach_o_flavour
:
8783 if (flag_code
== CODE_64BIT
)
8785 use_rela_relocations
= 1;
8787 return "mach-o-x86-64";
8790 return "mach-o-i386";
8798 #endif /* OBJ_MAYBE_ more than one */
8800 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8802 i386_elf_emit_arch_note (void)
8804 if (IS_ELF
&& cpu_arch_name
!= NULL
)
8807 asection
*seg
= now_seg
;
8808 subsegT subseg
= now_subseg
;
8809 Elf_Internal_Note i_note
;
8810 Elf_External_Note e_note
;
8811 asection
*note_secp
;
8814 /* Create the .note section. */
8815 note_secp
= subseg_new (".note", 0);
8816 bfd_set_section_flags (stdoutput
,
8818 SEC_HAS_CONTENTS
| SEC_READONLY
);
8820 /* Process the arch string. */
8821 len
= strlen (cpu_arch_name
);
8823 i_note
.namesz
= len
+ 1;
8825 i_note
.type
= NT_ARCH
;
8826 p
= frag_more (sizeof (e_note
.namesz
));
8827 md_number_to_chars (p
, (valueT
) i_note
.namesz
, sizeof (e_note
.namesz
));
8828 p
= frag_more (sizeof (e_note
.descsz
));
8829 md_number_to_chars (p
, (valueT
) i_note
.descsz
, sizeof (e_note
.descsz
));
8830 p
= frag_more (sizeof (e_note
.type
));
8831 md_number_to_chars (p
, (valueT
) i_note
.type
, sizeof (e_note
.type
));
8832 p
= frag_more (len
+ 1);
8833 strcpy (p
, cpu_arch_name
);
8835 frag_align (2, 0, 0);
8837 subseg_set (seg
, subseg
);
8843 md_undefined_symbol (char *name
)
8845 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
8846 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
8847 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
8848 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
8852 if (symbol_find (name
))
8853 as_bad (_("GOT already in symbol table"));
8854 GOT_symbol
= symbol_new (name
, undefined_section
,
8855 (valueT
) 0, &zero_address_frag
);
8862 /* Round up a section size to the appropriate boundary. */
8865 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
8867 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8868 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
8870 /* For a.out, force the section size to be aligned. If we don't do
8871 this, BFD will align it for us, but it will not write out the
8872 final bytes of the section. This may be a bug in BFD, but it is
8873 easier to fix it here since that is how the other a.out targets
8877 align
= bfd_get_section_alignment (stdoutput
, segment
);
8878 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
8885 /* On the i386, PC-relative offsets are relative to the start of the
8886 next instruction. That is, the address of the offset, plus its
8887 size, since the offset is always the last part of the insn. */
8890 md_pcrel_from (fixS
*fixP
)
8892 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
8898 s_bss (int ignore ATTRIBUTE_UNUSED
)
8902 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8904 obj_elf_section_change_hook ();
8906 temp
= get_absolute_expression ();
8907 subseg_set (bss_section
, (subsegT
) temp
);
8908 demand_empty_rest_of_line ();
8914 i386_validate_fix (fixS
*fixp
)
8916 if (fixp
->fx_subsy
&& fixp
->fx_subsy
== GOT_symbol
)
8918 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
8922 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
8927 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
8929 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
8936 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
8939 bfd_reloc_code_real_type code
;
8941 switch (fixp
->fx_r_type
)
8943 case BFD_RELOC_X86_64_PLT32
:
8944 case BFD_RELOC_X86_64_GOT32
:
8945 case BFD_RELOC_X86_64_GOTPCREL
:
8946 case BFD_RELOC_386_PLT32
:
8947 case BFD_RELOC_386_GOT32
:
8948 case BFD_RELOC_386_GOTOFF
:
8949 case BFD_RELOC_386_GOTPC
:
8950 case BFD_RELOC_386_TLS_GD
:
8951 case BFD_RELOC_386_TLS_LDM
:
8952 case BFD_RELOC_386_TLS_LDO_32
:
8953 case BFD_RELOC_386_TLS_IE_32
:
8954 case BFD_RELOC_386_TLS_IE
:
8955 case BFD_RELOC_386_TLS_GOTIE
:
8956 case BFD_RELOC_386_TLS_LE_32
:
8957 case BFD_RELOC_386_TLS_LE
:
8958 case BFD_RELOC_386_TLS_GOTDESC
:
8959 case BFD_RELOC_386_TLS_DESC_CALL
:
8960 case BFD_RELOC_X86_64_TLSGD
:
8961 case BFD_RELOC_X86_64_TLSLD
:
8962 case BFD_RELOC_X86_64_DTPOFF32
:
8963 case BFD_RELOC_X86_64_DTPOFF64
:
8964 case BFD_RELOC_X86_64_GOTTPOFF
:
8965 case BFD_RELOC_X86_64_TPOFF32
:
8966 case BFD_RELOC_X86_64_TPOFF64
:
8967 case BFD_RELOC_X86_64_GOTOFF64
:
8968 case BFD_RELOC_X86_64_GOTPC32
:
8969 case BFD_RELOC_X86_64_GOT64
:
8970 case BFD_RELOC_X86_64_GOTPCREL64
:
8971 case BFD_RELOC_X86_64_GOTPC64
:
8972 case BFD_RELOC_X86_64_GOTPLT64
:
8973 case BFD_RELOC_X86_64_PLTOFF64
:
8974 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
8975 case BFD_RELOC_X86_64_TLSDESC_CALL
:
8977 case BFD_RELOC_VTABLE_ENTRY
:
8978 case BFD_RELOC_VTABLE_INHERIT
:
8980 case BFD_RELOC_32_SECREL
:
8982 code
= fixp
->fx_r_type
;
8984 case BFD_RELOC_X86_64_32S
:
8985 if (!fixp
->fx_pcrel
)
8987 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8988 code
= fixp
->fx_r_type
;
8994 switch (fixp
->fx_size
)
8997 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
8998 _("can not do %d byte pc-relative relocation"),
9000 code
= BFD_RELOC_32_PCREL
;
9002 case 1: code
= BFD_RELOC_8_PCREL
; break;
9003 case 2: code
= BFD_RELOC_16_PCREL
; break;
9004 case 4: code
= BFD_RELOC_32_PCREL
; break;
9006 case 8: code
= BFD_RELOC_64_PCREL
; break;
9012 switch (fixp
->fx_size
)
9015 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
9016 _("can not do %d byte relocation"),
9018 code
= BFD_RELOC_32
;
9020 case 1: code
= BFD_RELOC_8
; break;
9021 case 2: code
= BFD_RELOC_16
; break;
9022 case 4: code
= BFD_RELOC_32
; break;
9024 case 8: code
= BFD_RELOC_64
; break;
9031 if ((code
== BFD_RELOC_32
9032 || code
== BFD_RELOC_32_PCREL
9033 || code
== BFD_RELOC_X86_64_32S
)
9035 && fixp
->fx_addsy
== GOT_symbol
)
9038 code
= BFD_RELOC_386_GOTPC
;
9040 code
= BFD_RELOC_X86_64_GOTPC32
;
9042 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
9044 && fixp
->fx_addsy
== GOT_symbol
)
9046 code
= BFD_RELOC_X86_64_GOTPC64
;
9049 rel
= (arelent
*) xmalloc (sizeof (arelent
));
9050 rel
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
9051 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
9053 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
9055 if (!use_rela_relocations
)
9057 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9058 vtable entry to be used in the relocation's section offset. */
9059 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
9060 rel
->address
= fixp
->fx_offset
;
9061 #if defined (OBJ_COFF) && defined (TE_PE)
9062 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
9063 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
9068 /* Use the rela in 64bit mode. */
9071 if (disallow_64bit_reloc
)
9074 case BFD_RELOC_X86_64_DTPOFF64
:
9075 case BFD_RELOC_X86_64_TPOFF64
:
9076 case BFD_RELOC_64_PCREL
:
9077 case BFD_RELOC_X86_64_GOTOFF64
:
9078 case BFD_RELOC_X86_64_GOT64
:
9079 case BFD_RELOC_X86_64_GOTPCREL64
:
9080 case BFD_RELOC_X86_64_GOTPC64
:
9081 case BFD_RELOC_X86_64_GOTPLT64
:
9082 case BFD_RELOC_X86_64_PLTOFF64
:
9083 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
9084 _("cannot represent relocation type %s in x32 mode"),
9085 bfd_get_reloc_code_name (code
));
9091 if (!fixp
->fx_pcrel
)
9092 rel
->addend
= fixp
->fx_offset
;
9096 case BFD_RELOC_X86_64_PLT32
:
9097 case BFD_RELOC_X86_64_GOT32
:
9098 case BFD_RELOC_X86_64_GOTPCREL
:
9099 case BFD_RELOC_X86_64_TLSGD
:
9100 case BFD_RELOC_X86_64_TLSLD
:
9101 case BFD_RELOC_X86_64_GOTTPOFF
:
9102 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
9103 case BFD_RELOC_X86_64_TLSDESC_CALL
:
9104 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
9107 rel
->addend
= (section
->vma
9109 + fixp
->fx_addnumber
9110 + md_pcrel_from (fixp
));
9115 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
9116 if (rel
->howto
== NULL
)
9118 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
9119 _("cannot represent relocation type %s"),
9120 bfd_get_reloc_code_name (code
));
9121 /* Set howto to a garbage value so that we can keep going. */
9122 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
9123 gas_assert (rel
->howto
!= NULL
);
9129 #include "tc-i386-intel.c"
9132 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
9134 int saved_naked_reg
;
9135 char saved_register_dot
;
9137 saved_naked_reg
= allow_naked_reg
;
9138 allow_naked_reg
= 1;
9139 saved_register_dot
= register_chars
['.'];
9140 register_chars
['.'] = '.';
9141 allow_pseudo_reg
= 1;
9142 expression_and_evaluate (exp
);
9143 allow_pseudo_reg
= 0;
9144 register_chars
['.'] = saved_register_dot
;
9145 allow_naked_reg
= saved_naked_reg
;
9147 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
9149 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
9151 exp
->X_op
= O_constant
;
9152 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
9153 .dw2_regnum
[flag_code
>> 1];
9156 exp
->X_op
= O_illegal
;
9161 tc_x86_frame_initial_instructions (void)
9163 static unsigned int sp_regno
[2];
9165 if (!sp_regno
[flag_code
>> 1])
9167 char *saved_input
= input_line_pointer
;
9168 char sp
[][4] = {"esp", "rsp"};
9171 input_line_pointer
= sp
[flag_code
>> 1];
9172 tc_x86_parse_to_dw2regnum (&exp
);
9173 gas_assert (exp
.X_op
== O_constant
);
9174 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
9175 input_line_pointer
= saved_input
;
9178 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
9179 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
9183 x86_dwarf2_addr_size (void)
9185 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9186 if (x86_elf_abi
== X86_64_X32_ABI
)
9189 return bfd_arch_bits_per_address (stdoutput
) / 8;
9193 i386_elf_section_type (const char *str
, size_t len
)
9195 if (flag_code
== CODE_64BIT
9196 && len
== sizeof ("unwind") - 1
9197 && strncmp (str
, "unwind", 6) == 0)
9198 return SHT_X86_64_UNWIND
;
9205 i386_solaris_fix_up_eh_frame (segT sec
)
9207 if (flag_code
== CODE_64BIT
)
9208 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
9214 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
9218 exp
.X_op
= O_secrel
;
9219 exp
.X_add_symbol
= symbol
;
9220 exp
.X_add_number
= 0;
9221 emit_expr (&exp
, size
);
9225 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9226 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9229 x86_64_section_letter (int letter
, char **ptr_msg
)
9231 if (flag_code
== CODE_64BIT
)
9234 return SHF_X86_64_LARGE
;
9236 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9239 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
9244 x86_64_section_word (char *str
, size_t len
)
9246 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
9247 return SHF_X86_64_LARGE
;
9253 handle_large_common (int small ATTRIBUTE_UNUSED
)
9255 if (flag_code
!= CODE_64BIT
)
9257 s_comm_internal (0, elf_common_parse
);
9258 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9262 static segT lbss_section
;
9263 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
9264 asection
*saved_bss_section
= bss_section
;
9266 if (lbss_section
== NULL
)
9268 flagword applicable
;
9270 subsegT subseg
= now_subseg
;
9272 /* The .lbss section is for local .largecomm symbols. */
9273 lbss_section
= subseg_new (".lbss", 0);
9274 applicable
= bfd_applicable_section_flags (stdoutput
);
9275 bfd_set_section_flags (stdoutput
, lbss_section
,
9276 applicable
& SEC_ALLOC
);
9277 seg_info (lbss_section
)->bss
= 1;
9279 subseg_set (seg
, subseg
);
9282 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
9283 bss_section
= lbss_section
;
9285 s_comm_internal (0, elf_common_parse
);
9287 elf_com_section_ptr
= saved_com_section_ptr
;
9288 bss_section
= saved_bss_section
;
9291 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */