Enable 64-bit archives in ar and ranlib
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
... / ...
CommitLineData
1/* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2016 Free Software Foundation, Inc.
3
4 This file is part of GAS, the GNU Assembler.
5
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
19 02110-1301, USA. */
20
21/* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
27
28#include "as.h"
29#include "safe-ctype.h"
30#include "subsegs.h"
31#include "dwarf2dbg.h"
32#include "dw2gencfi.h"
33#include "elf/x86-64.h"
34#include "opcodes/i386-init.h"
35
36#ifndef REGISTER_WARNINGS
37#define REGISTER_WARNINGS 1
38#endif
39
40#ifndef INFER_ADDR_PREFIX
41#define INFER_ADDR_PREFIX 1
42#endif
43
44#ifndef DEFAULT_ARCH
45#define DEFAULT_ARCH "i386"
46#endif
47
48#ifndef INLINE
49#if __GNUC__ >= 2
50#define INLINE __inline__
51#else
52#define INLINE
53#endif
54#endif
55
56/* Prefixes will be emitted in the order defined below.
57 WAIT_PREFIX must be the first prefix since FWAIT is really is an
58 instruction, and so must come before any prefixes.
59 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
60 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
61#define WAIT_PREFIX 0
62#define SEG_PREFIX 1
63#define ADDR_PREFIX 2
64#define DATA_PREFIX 3
65#define REP_PREFIX 4
66#define HLE_PREFIX REP_PREFIX
67#define BND_PREFIX REP_PREFIX
68#define LOCK_PREFIX 5
69#define REX_PREFIX 6 /* must come last. */
70#define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72/* we define the syntax here (modulo base,index,scale syntax) */
73#define REGISTER_PREFIX '%'
74#define IMMEDIATE_PREFIX '$'
75#define ABSOLUTE_PREFIX '*'
76
77/* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79#define WORD_MNEM_SUFFIX 'w'
80#define BYTE_MNEM_SUFFIX 'b'
81#define SHORT_MNEM_SUFFIX 's'
82#define LONG_MNEM_SUFFIX 'l'
83#define QWORD_MNEM_SUFFIX 'q'
84#define XMMWORD_MNEM_SUFFIX 'x'
85#define YMMWORD_MNEM_SUFFIX 'y'
86#define ZMMWORD_MNEM_SUFFIX 'z'
87/* Intel Syntax. Use a non-ascii letter since since it never appears
88 in instructions. */
89#define LONG_DOUBLE_MNEM_SUFFIX '\1'
90
91#define END_OF_INSN '\0'
92
93/*
94 'templates' is for grouping together 'template' structures for opcodes
95 of the same name. This is only used for storing the insns in the grand
96 ole hash table of insns.
97 The templates themselves start at START and range up to (but not including)
98 END.
99 */
100typedef struct
101{
102 const insn_template *start;
103 const insn_template *end;
104}
105templates;
106
107/* 386 operand encoding bytes: see 386 book for details of this. */
108typedef struct
109{
110 unsigned int regmem; /* codes register or memory operand */
111 unsigned int reg; /* codes register operand (or extended opcode) */
112 unsigned int mode; /* how to interpret regmem & reg */
113}
114modrm_byte;
115
116/* x86-64 extension prefix. */
117typedef int rex_byte;
118
119/* 386 opcode byte to code indirect addressing. */
120typedef struct
121{
122 unsigned base;
123 unsigned index;
124 unsigned scale;
125}
126sib_byte;
127
128/* x86 arch names, types and features */
129typedef struct
130{
131 const char *name; /* arch name */
132 unsigned int len; /* arch string length */
133 enum processor_type type; /* arch type */
134 i386_cpu_flags flags; /* cpu feature flags */
135 unsigned int skip; /* show_arch should skip this. */
136 unsigned int negated; /* turn off indicated flags. */
137}
138arch_entry;
139
140static void update_code_flag (int, int);
141static void set_code_flag (int);
142static void set_16bit_gcc_code_flag (int);
143static void set_intel_syntax (int);
144static void set_intel_mnemonic (int);
145static void set_allow_index_reg (int);
146static void set_check (int);
147static void set_cpu_arch (int);
148#ifdef TE_PE
149static void pe_directive_secrel (int);
150#endif
151static void signed_cons (int);
152static char *output_invalid (int c);
153static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
154 const char *);
155static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
156 const char *);
157static int i386_att_operand (char *);
158static int i386_intel_operand (char *, int);
159static int i386_intel_simplify (expressionS *);
160static int i386_intel_parse_name (const char *, expressionS *);
161static const reg_entry *parse_register (char *, char **);
162static char *parse_insn (char *, char *);
163static char *parse_operands (char *, const char *);
164static void swap_operands (void);
165static void swap_2_operands (int, int);
166static void optimize_imm (void);
167static void optimize_disp (void);
168static const insn_template *match_template (void);
169static int check_string (void);
170static int process_suffix (void);
171static int check_byte_reg (void);
172static int check_long_reg (void);
173static int check_qword_reg (void);
174static int check_word_reg (void);
175static int finalize_imm (void);
176static int process_operands (void);
177static const seg_entry *build_modrm_byte (void);
178static void output_insn (void);
179static void output_imm (fragS *, offsetT);
180static void output_disp (fragS *, offsetT);
181#ifndef I386COFF
182static void s_bss (int);
183#endif
184#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
185static void handle_large_common (int small ATTRIBUTE_UNUSED);
186#endif
187
188static const char *default_arch = DEFAULT_ARCH;
189
190/* This struct describes rounding control and SAE in the instruction. */
191struct RC_Operation
192{
193 enum rc_type
194 {
195 rne = 0,
196 rd,
197 ru,
198 rz,
199 saeonly
200 } type;
201 int operand;
202};
203
204static struct RC_Operation rc_op;
205
206/* The struct describes masking, applied to OPERAND in the instruction.
207 MASK is a pointer to the corresponding mask register. ZEROING tells
208 whether merging or zeroing mask is used. */
209struct Mask_Operation
210{
211 const reg_entry *mask;
212 unsigned int zeroing;
213 /* The operand where this operation is associated. */
214 int operand;
215};
216
217static struct Mask_Operation mask_op;
218
219/* The struct describes broadcasting, applied to OPERAND. FACTOR is
220 broadcast factor. */
221struct Broadcast_Operation
222{
223 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
224 int type;
225
226 /* Index of broadcasted operand. */
227 int operand;
228};
229
230static struct Broadcast_Operation broadcast_op;
231
232/* VEX prefix. */
233typedef struct
234{
235 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
236 unsigned char bytes[4];
237 unsigned int length;
238 /* Destination or source register specifier. */
239 const reg_entry *register_specifier;
240} vex_prefix;
241
242/* 'md_assemble ()' gathers together information and puts it into a
243 i386_insn. */
244
245union i386_op
246 {
247 expressionS *disps;
248 expressionS *imms;
249 const reg_entry *regs;
250 };
251
252enum i386_error
253 {
254 operand_size_mismatch,
255 operand_type_mismatch,
256 register_type_mismatch,
257 number_of_operands_mismatch,
258 invalid_instruction_suffix,
259 bad_imm4,
260 old_gcc_only,
261 unsupported_with_intel_mnemonic,
262 unsupported_syntax,
263 unsupported,
264 invalid_vsib_address,
265 invalid_vector_register_set,
266 unsupported_vector_index_register,
267 unsupported_broadcast,
268 broadcast_not_on_src_operand,
269 broadcast_needed,
270 unsupported_masking,
271 mask_not_on_destination,
272 no_default_mask,
273 unsupported_rc_sae,
274 rc_sae_operand_not_last_imm,
275 invalid_register_operand,
276 try_vector_disp8
277 };
278
279struct _i386_insn
280 {
281 /* TM holds the template for the insn were currently assembling. */
282 insn_template tm;
283
284 /* SUFFIX holds the instruction size suffix for byte, word, dword
285 or qword, if given. */
286 char suffix;
287
288 /* OPERANDS gives the number of given operands. */
289 unsigned int operands;
290
291 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
292 of given register, displacement, memory operands and immediate
293 operands. */
294 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
295
296 /* TYPES [i] is the type (see above #defines) which tells us how to
297 use OP[i] for the corresponding operand. */
298 i386_operand_type types[MAX_OPERANDS];
299
300 /* Displacement expression, immediate expression, or register for each
301 operand. */
302 union i386_op op[MAX_OPERANDS];
303
304 /* Flags for operands. */
305 unsigned int flags[MAX_OPERANDS];
306#define Operand_PCrel 1
307
308 /* Relocation type for operand */
309 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
310
311 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
312 the base index byte below. */
313 const reg_entry *base_reg;
314 const reg_entry *index_reg;
315 unsigned int log2_scale_factor;
316
317 /* SEG gives the seg_entries of this insn. They are zero unless
318 explicit segment overrides are given. */
319 const seg_entry *seg[2];
320
321 /* PREFIX holds all the given prefix opcodes (usually null).
322 PREFIXES is the number of prefix opcodes. */
323 unsigned int prefixes;
324 unsigned char prefix[MAX_PREFIXES];
325
326 /* RM and SIB are the modrm byte and the sib byte where the
327 addressing modes of this insn are encoded. */
328 modrm_byte rm;
329 rex_byte rex;
330 rex_byte vrex;
331 sib_byte sib;
332 vex_prefix vex;
333
334 /* Masking attributes. */
335 struct Mask_Operation *mask;
336
337 /* Rounding control and SAE attributes. */
338 struct RC_Operation *rounding;
339
340 /* Broadcasting attributes. */
341 struct Broadcast_Operation *broadcast;
342
343 /* Compressed disp8*N attribute. */
344 unsigned int memshift;
345
346 /* Swap operand in encoding. */
347 unsigned int swap_operand;
348
349 /* Prefer 8bit or 32bit displacement in encoding. */
350 enum
351 {
352 disp_encoding_default = 0,
353 disp_encoding_8bit,
354 disp_encoding_32bit
355 } disp_encoding;
356
357 /* REP prefix. */
358 const char *rep_prefix;
359
360 /* HLE prefix. */
361 const char *hle_prefix;
362
363 /* Have BND prefix. */
364 const char *bnd_prefix;
365
366 /* Need VREX to support upper 16 registers. */
367 int need_vrex;
368
369 /* Error message. */
370 enum i386_error error;
371 };
372
373typedef struct _i386_insn i386_insn;
374
375/* Link RC type with corresponding string, that'll be looked for in
376 asm. */
377struct RC_name
378{
379 enum rc_type type;
380 const char *name;
381 unsigned int len;
382};
383
384static const struct RC_name RC_NamesTable[] =
385{
386 { rne, STRING_COMMA_LEN ("rn-sae") },
387 { rd, STRING_COMMA_LEN ("rd-sae") },
388 { ru, STRING_COMMA_LEN ("ru-sae") },
389 { rz, STRING_COMMA_LEN ("rz-sae") },
390 { saeonly, STRING_COMMA_LEN ("sae") },
391};
392
393/* List of chars besides those in app.c:symbol_chars that can start an
394 operand. Used to prevent the scrubber eating vital white-space. */
395const char extra_symbol_chars[] = "*%-([{"
396#ifdef LEX_AT
397 "@"
398#endif
399#ifdef LEX_QM
400 "?"
401#endif
402 ;
403
404#if (defined (TE_I386AIX) \
405 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
406 && !defined (TE_GNU) \
407 && !defined (TE_LINUX) \
408 && !defined (TE_NACL) \
409 && !defined (TE_NETWARE) \
410 && !defined (TE_FreeBSD) \
411 && !defined (TE_DragonFly) \
412 && !defined (TE_NetBSD)))
413/* This array holds the chars that always start a comment. If the
414 pre-processor is disabled, these aren't very useful. The option
415 --divide will remove '/' from this list. */
416const char *i386_comment_chars = "#/";
417#define SVR4_COMMENT_CHARS 1
418#define PREFIX_SEPARATOR '\\'
419
420#else
421const char *i386_comment_chars = "#";
422#define PREFIX_SEPARATOR '/'
423#endif
424
425/* This array holds the chars that only start a comment at the beginning of
426 a line. If the line seems to have the form '# 123 filename'
427 .line and .file directives will appear in the pre-processed output.
428 Note that input_file.c hand checks for '#' at the beginning of the
429 first line of the input file. This is because the compiler outputs
430 #NO_APP at the beginning of its output.
431 Also note that comments started like this one will always work if
432 '/' isn't otherwise defined. */
433const char line_comment_chars[] = "#/";
434
435const char line_separator_chars[] = ";";
436
437/* Chars that can be used to separate mant from exp in floating point
438 nums. */
439const char EXP_CHARS[] = "eE";
440
441/* Chars that mean this number is a floating point constant
442 As in 0f12.456
443 or 0d1.2345e12. */
444const char FLT_CHARS[] = "fFdDxX";
445
446/* Tables for lexical analysis. */
447static char mnemonic_chars[256];
448static char register_chars[256];
449static char operand_chars[256];
450static char identifier_chars[256];
451static char digit_chars[256];
452
453/* Lexical macros. */
454#define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
455#define is_operand_char(x) (operand_chars[(unsigned char) x])
456#define is_register_char(x) (register_chars[(unsigned char) x])
457#define is_space_char(x) ((x) == ' ')
458#define is_identifier_char(x) (identifier_chars[(unsigned char) x])
459#define is_digit_char(x) (digit_chars[(unsigned char) x])
460
461/* All non-digit non-letter characters that may occur in an operand. */
462static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
463
464/* md_assemble() always leaves the strings it's passed unaltered. To
465 effect this we maintain a stack of saved characters that we've smashed
466 with '\0's (indicating end of strings for various sub-fields of the
467 assembler instruction). */
468static char save_stack[32];
469static char *save_stack_p;
470#define END_STRING_AND_SAVE(s) \
471 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
472#define RESTORE_END_STRING(s) \
473 do { *(s) = *--save_stack_p; } while (0)
474
475/* The instruction we're assembling. */
476static i386_insn i;
477
478/* Possible templates for current insn. */
479static const templates *current_templates;
480
481/* Per instruction expressionS buffers: max displacements & immediates. */
482static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
483static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
484
485/* Current operand we are working on. */
486static int this_operand = -1;
487
488/* We support four different modes. FLAG_CODE variable is used to distinguish
489 these. */
490
491enum flag_code {
492 CODE_32BIT,
493 CODE_16BIT,
494 CODE_64BIT };
495
496static enum flag_code flag_code;
497static unsigned int object_64bit;
498static unsigned int disallow_64bit_reloc;
499static int use_rela_relocations = 0;
500
501#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
502 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
503 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
504
505/* The ELF ABI to use. */
506enum x86_elf_abi
507{
508 I386_ABI,
509 X86_64_ABI,
510 X86_64_X32_ABI
511};
512
513static enum x86_elf_abi x86_elf_abi = I386_ABI;
514#endif
515
516#if defined (TE_PE) || defined (TE_PEP)
517/* Use big object file format. */
518static int use_big_obj = 0;
519#endif
520
521#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
522/* 1 if generating code for a shared library. */
523static int shared = 0;
524#endif
525
526/* 1 for intel syntax,
527 0 if att syntax. */
528static int intel_syntax = 0;
529
530/* 1 for intel mnemonic,
531 0 if att mnemonic. */
532static int intel_mnemonic = !SYSV386_COMPAT;
533
534/* 1 if support old (<= 2.8.1) versions of gcc. */
535static int old_gcc = OLDGCC_COMPAT;
536
537/* 1 if pseudo registers are permitted. */
538static int allow_pseudo_reg = 0;
539
540/* 1 if register prefix % not required. */
541static int allow_naked_reg = 0;
542
543/* 1 if the assembler should add BND prefix for all control-tranferring
544 instructions supporting it, even if this prefix wasn't specified
545 explicitly. */
546static int add_bnd_prefix = 0;
547
548/* 1 if pseudo index register, eiz/riz, is allowed . */
549static int allow_index_reg = 0;
550
551/* 1 if the assembler should ignore LOCK prefix, even if it was
552 specified explicitly. */
553static int omit_lock_prefix = 0;
554
555/* 1 if the assembler should encode lfence, mfence, and sfence as
556 "lock addl $0, (%{re}sp)". */
557static int avoid_fence = 0;
558
559/* 1 if the assembler should generate relax relocations. */
560
561static int generate_relax_relocations
562 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS;
563
564static enum check_kind
565 {
566 check_none = 0,
567 check_warning,
568 check_error
569 }
570sse_check, operand_check = check_warning;
571
572/* Register prefix used for error message. */
573static const char *register_prefix = "%";
574
575/* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
576 leave, push, and pop instructions so that gcc has the same stack
577 frame as in 32 bit mode. */
578static char stackop_size = '\0';
579
580/* Non-zero to optimize code alignment. */
581int optimize_align_code = 1;
582
583/* Non-zero to quieten some warnings. */
584static int quiet_warnings = 0;
585
586/* CPU name. */
587static const char *cpu_arch_name = NULL;
588static char *cpu_sub_arch_name = NULL;
589
590/* CPU feature flags. */
591static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
592
593/* If we have selected a cpu we are generating instructions for. */
594static int cpu_arch_tune_set = 0;
595
596/* Cpu we are generating instructions for. */
597enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
598
599/* CPU feature flags of cpu we are generating instructions for. */
600static i386_cpu_flags cpu_arch_tune_flags;
601
602/* CPU instruction set architecture used. */
603enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
604
605/* CPU feature flags of instruction set architecture used. */
606i386_cpu_flags cpu_arch_isa_flags;
607
608/* If set, conditional jumps are not automatically promoted to handle
609 larger than a byte offset. */
610static unsigned int no_cond_jump_promotion = 0;
611
612/* Encode SSE instructions with VEX prefix. */
613static unsigned int sse2avx;
614
615/* Encode scalar AVX instructions with specific vector length. */
616static enum
617 {
618 vex128 = 0,
619 vex256
620 } avxscalar;
621
622/* Encode scalar EVEX LIG instructions with specific vector length. */
623static enum
624 {
625 evexl128 = 0,
626 evexl256,
627 evexl512
628 } evexlig;
629
630/* Encode EVEX WIG instructions with specific evex.w. */
631static enum
632 {
633 evexw0 = 0,
634 evexw1
635 } evexwig;
636
637/* Value to encode in EVEX RC bits, for SAE-only instructions. */
638static enum rc_type evexrcig = rne;
639
640/* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
641static symbolS *GOT_symbol;
642
643/* The dwarf2 return column, adjusted for 32 or 64 bit. */
644unsigned int x86_dwarf2_return_column;
645
646/* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
647int x86_cie_data_alignment;
648
649/* Interface to relax_segment.
650 There are 3 major relax states for 386 jump insns because the
651 different types of jumps add different sizes to frags when we're
652 figuring out what sort of jump to choose to reach a given label. */
653
654/* Types. */
655#define UNCOND_JUMP 0
656#define COND_JUMP 1
657#define COND_JUMP86 2
658
659/* Sizes. */
660#define CODE16 1
661#define SMALL 0
662#define SMALL16 (SMALL | CODE16)
663#define BIG 2
664#define BIG16 (BIG | CODE16)
665
666#ifndef INLINE
667#ifdef __GNUC__
668#define INLINE __inline__
669#else
670#define INLINE
671#endif
672#endif
673
674#define ENCODE_RELAX_STATE(type, size) \
675 ((relax_substateT) (((type) << 2) | (size)))
676#define TYPE_FROM_RELAX_STATE(s) \
677 ((s) >> 2)
678#define DISP_SIZE_FROM_RELAX_STATE(s) \
679 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
680
681/* This table is used by relax_frag to promote short jumps to long
682 ones where necessary. SMALL (short) jumps may be promoted to BIG
683 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
684 don't allow a short jump in a 32 bit code segment to be promoted to
685 a 16 bit offset jump because it's slower (requires data size
686 prefix), and doesn't work, unless the destination is in the bottom
687 64k of the code segment (The top 16 bits of eip are zeroed). */
688
689const relax_typeS md_relax_table[] =
690{
691 /* The fields are:
692 1) most positive reach of this state,
693 2) most negative reach of this state,
694 3) how many bytes this mode will have in the variable part of the frag
695 4) which index into the table to try if we can't fit into this one. */
696
697 /* UNCOND_JUMP states. */
698 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
699 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
700 /* dword jmp adds 4 bytes to frag:
701 0 extra opcode bytes, 4 displacement bytes. */
702 {0, 0, 4, 0},
703 /* word jmp adds 2 byte2 to frag:
704 0 extra opcode bytes, 2 displacement bytes. */
705 {0, 0, 2, 0},
706
707 /* COND_JUMP states. */
708 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
709 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
710 /* dword conditionals adds 5 bytes to frag:
711 1 extra opcode byte, 4 displacement bytes. */
712 {0, 0, 5, 0},
713 /* word conditionals add 3 bytes to frag:
714 1 extra opcode byte, 2 displacement bytes. */
715 {0, 0, 3, 0},
716
717 /* COND_JUMP86 states. */
718 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
719 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
720 /* dword conditionals adds 5 bytes to frag:
721 1 extra opcode byte, 4 displacement bytes. */
722 {0, 0, 5, 0},
723 /* word conditionals add 4 bytes to frag:
724 1 displacement byte and a 3 byte long branch insn. */
725 {0, 0, 4, 0}
726};
727
728static const arch_entry cpu_arch[] =
729{
730 /* Do not replace the first two entries - i386_target_format()
731 relies on them being there in this order. */
732 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
733 CPU_GENERIC32_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
735 CPU_GENERIC64_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
737 CPU_NONE_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
739 CPU_I186_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
741 CPU_I286_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
743 CPU_I386_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
745 CPU_I486_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
747 CPU_I586_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
749 CPU_I686_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
751 CPU_I586_FLAGS, 0, 0 },
752 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
753 CPU_PENTIUMPRO_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
755 CPU_P2_FLAGS, 0, 0 },
756 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
757 CPU_P3_FLAGS, 0, 0 },
758 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
759 CPU_P4_FLAGS, 0, 0 },
760 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
761 CPU_CORE_FLAGS, 0, 0 },
762 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
763 CPU_NOCONA_FLAGS, 0, 0 },
764 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
765 CPU_CORE_FLAGS, 1, 0 },
766 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
767 CPU_CORE_FLAGS, 0, 0 },
768 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
769 CPU_CORE2_FLAGS, 1, 0 },
770 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
771 CPU_CORE2_FLAGS, 0, 0 },
772 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
773 CPU_COREI7_FLAGS, 0, 0 },
774 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
775 CPU_L1OM_FLAGS, 0, 0 },
776 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
777 CPU_K1OM_FLAGS, 0, 0 },
778 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU,
779 CPU_IAMCU_FLAGS, 0, 0 },
780 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
781 CPU_K6_FLAGS, 0, 0 },
782 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
783 CPU_K6_2_FLAGS, 0, 0 },
784 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
785 CPU_ATHLON_FLAGS, 0, 0 },
786 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
787 CPU_K8_FLAGS, 1, 0 },
788 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
789 CPU_K8_FLAGS, 0, 0 },
790 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
791 CPU_K8_FLAGS, 0, 0 },
792 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
793 CPU_AMDFAM10_FLAGS, 0, 0 },
794 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
795 CPU_BDVER1_FLAGS, 0, 0 },
796 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
797 CPU_BDVER2_FLAGS, 0, 0 },
798 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
799 CPU_BDVER3_FLAGS, 0, 0 },
800 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
801 CPU_BDVER4_FLAGS, 0, 0 },
802 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER,
803 CPU_ZNVER1_FLAGS, 0, 0 },
804 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
805 CPU_BTVER1_FLAGS, 0, 0 },
806 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
807 CPU_BTVER2_FLAGS, 0, 0 },
808 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
809 CPU_8087_FLAGS, 0, 0 },
810 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
811 CPU_287_FLAGS, 0, 0 },
812 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
813 CPU_387_FLAGS, 0, 0 },
814 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
815 CPU_ANY87_FLAGS, 0, 1 },
816 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
817 CPU_MMX_FLAGS, 0, 0 },
818 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
819 CPU_3DNOWA_FLAGS, 0, 1 },
820 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
821 CPU_SSE_FLAGS, 0, 0 },
822 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
823 CPU_SSE2_FLAGS, 0, 0 },
824 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
825 CPU_SSE3_FLAGS, 0, 0 },
826 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
827 CPU_SSSE3_FLAGS, 0, 0 },
828 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
829 CPU_SSE4_1_FLAGS, 0, 0 },
830 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
831 CPU_SSE4_2_FLAGS, 0, 0 },
832 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
833 CPU_SSE4_2_FLAGS, 0, 0 },
834 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
835 CPU_ANY_SSE_FLAGS, 0, 1 },
836 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
837 CPU_AVX_FLAGS, 0, 0 },
838 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
839 CPU_AVX2_FLAGS, 0, 0 },
840 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
841 CPU_AVX512F_FLAGS, 0, 0 },
842 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
843 CPU_AVX512CD_FLAGS, 0, 0 },
844 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
845 CPU_AVX512ER_FLAGS, 0, 0 },
846 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
847 CPU_AVX512PF_FLAGS, 0, 0 },
848 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
849 CPU_AVX512DQ_FLAGS, 0, 0 },
850 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
851 CPU_AVX512BW_FLAGS, 0, 0 },
852 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
853 CPU_AVX512VL_FLAGS, 0, 0 },
854 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
855 CPU_ANY_AVX_FLAGS, 0, 1 },
856 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
857 CPU_VMX_FLAGS, 0, 0 },
858 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
859 CPU_VMFUNC_FLAGS, 0, 0 },
860 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
861 CPU_SMX_FLAGS, 0, 0 },
862 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
863 CPU_XSAVE_FLAGS, 0, 0 },
864 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
865 CPU_XSAVEOPT_FLAGS, 0, 0 },
866 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
867 CPU_XSAVEC_FLAGS, 0, 0 },
868 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
869 CPU_XSAVES_FLAGS, 0, 0 },
870 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
871 CPU_AES_FLAGS, 0, 0 },
872 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
873 CPU_PCLMUL_FLAGS, 0, 0 },
874 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
875 CPU_PCLMUL_FLAGS, 1, 0 },
876 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
877 CPU_FSGSBASE_FLAGS, 0, 0 },
878 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
879 CPU_RDRND_FLAGS, 0, 0 },
880 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
881 CPU_F16C_FLAGS, 0, 0 },
882 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
883 CPU_BMI2_FLAGS, 0, 0 },
884 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
885 CPU_FMA_FLAGS, 0, 0 },
886 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
887 CPU_FMA4_FLAGS, 0, 0 },
888 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
889 CPU_XOP_FLAGS, 0, 0 },
890 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
891 CPU_LWP_FLAGS, 0, 0 },
892 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
893 CPU_MOVBE_FLAGS, 0, 0 },
894 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
895 CPU_CX16_FLAGS, 0, 0 },
896 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
897 CPU_EPT_FLAGS, 0, 0 },
898 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
899 CPU_LZCNT_FLAGS, 0, 0 },
900 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
901 CPU_HLE_FLAGS, 0, 0 },
902 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
903 CPU_RTM_FLAGS, 0, 0 },
904 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
905 CPU_INVPCID_FLAGS, 0, 0 },
906 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
907 CPU_CLFLUSH_FLAGS, 0, 0 },
908 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
909 CPU_NOP_FLAGS, 0, 0 },
910 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
911 CPU_SYSCALL_FLAGS, 0, 0 },
912 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
913 CPU_RDTSCP_FLAGS, 0, 0 },
914 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
915 CPU_3DNOW_FLAGS, 0, 0 },
916 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
917 CPU_3DNOWA_FLAGS, 0, 0 },
918 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
919 CPU_PADLOCK_FLAGS, 0, 0 },
920 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
921 CPU_SVME_FLAGS, 1, 0 },
922 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
923 CPU_SVME_FLAGS, 0, 0 },
924 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
925 CPU_SSE4A_FLAGS, 0, 0 },
926 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
927 CPU_ABM_FLAGS, 0, 0 },
928 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
929 CPU_BMI_FLAGS, 0, 0 },
930 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
931 CPU_TBM_FLAGS, 0, 0 },
932 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
933 CPU_ADX_FLAGS, 0, 0 },
934 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
935 CPU_RDSEED_FLAGS, 0, 0 },
936 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
937 CPU_PRFCHW_FLAGS, 0, 0 },
938 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
939 CPU_SMAP_FLAGS, 0, 0 },
940 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
941 CPU_MPX_FLAGS, 0, 0 },
942 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
943 CPU_SHA_FLAGS, 0, 0 },
944 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
945 CPU_CLFLUSHOPT_FLAGS, 0, 0 },
946 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
947 CPU_PREFETCHWT1_FLAGS, 0, 0 },
948 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
949 CPU_SE1_FLAGS, 0, 0 },
950 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN,
951 CPU_CLWB_FLAGS, 0, 0 },
952 { STRING_COMMA_LEN (".pcommit"), PROCESSOR_UNKNOWN,
953 CPU_PCOMMIT_FLAGS, 0, 0 },
954 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN,
955 CPU_AVX512IFMA_FLAGS, 0, 0 },
956 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN,
957 CPU_AVX512VBMI_FLAGS, 0, 0 },
958 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN,
959 CPU_CLZERO_FLAGS, 0, 0 },
960 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN,
961 CPU_MWAITX_FLAGS, 0, 0 },
962 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN,
963 CPU_OSPKE_FLAGS, 0, 0 },
964 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN,
965 CPU_RDPID_FLAGS, 0, 0 },
966};
967
968#ifdef I386COFF
969/* Like s_lcomm_internal in gas/read.c but the alignment string
970 is allowed to be optional. */
971
972static symbolS *
973pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
974{
975 addressT align = 0;
976
977 SKIP_WHITESPACE ();
978
979 if (needs_align
980 && *input_line_pointer == ',')
981 {
982 align = parse_align (needs_align - 1);
983
984 if (align == (addressT) -1)
985 return NULL;
986 }
987 else
988 {
989 if (size >= 8)
990 align = 3;
991 else if (size >= 4)
992 align = 2;
993 else if (size >= 2)
994 align = 1;
995 else
996 align = 0;
997 }
998
999 bss_alloc (symbolP, size, align);
1000 return symbolP;
1001}
1002
1003static void
1004pe_lcomm (int needs_align)
1005{
1006 s_comm_internal (needs_align * 2, pe_lcomm_internal);
1007}
1008#endif
1009
1010const pseudo_typeS md_pseudo_table[] =
1011{
1012#if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1013 {"align", s_align_bytes, 0},
1014#else
1015 {"align", s_align_ptwo, 0},
1016#endif
1017 {"arch", set_cpu_arch, 0},
1018#ifndef I386COFF
1019 {"bss", s_bss, 0},
1020#else
1021 {"lcomm", pe_lcomm, 1},
1022#endif
1023 {"ffloat", float_cons, 'f'},
1024 {"dfloat", float_cons, 'd'},
1025 {"tfloat", float_cons, 'x'},
1026 {"value", cons, 2},
1027 {"slong", signed_cons, 4},
1028 {"noopt", s_ignore, 0},
1029 {"optim", s_ignore, 0},
1030 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
1031 {"code16", set_code_flag, CODE_16BIT},
1032 {"code32", set_code_flag, CODE_32BIT},
1033 {"code64", set_code_flag, CODE_64BIT},
1034 {"intel_syntax", set_intel_syntax, 1},
1035 {"att_syntax", set_intel_syntax, 0},
1036 {"intel_mnemonic", set_intel_mnemonic, 1},
1037 {"att_mnemonic", set_intel_mnemonic, 0},
1038 {"allow_index_reg", set_allow_index_reg, 1},
1039 {"disallow_index_reg", set_allow_index_reg, 0},
1040 {"sse_check", set_check, 0},
1041 {"operand_check", set_check, 1},
1042#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1043 {"largecomm", handle_large_common, 0},
1044#else
1045 {"file", (void (*) (int)) dwarf2_directive_file, 0},
1046 {"loc", dwarf2_directive_loc, 0},
1047 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
1048#endif
1049#ifdef TE_PE
1050 {"secrel32", pe_directive_secrel, 0},
1051#endif
1052 {0, 0, 0}
1053};
1054
1055/* For interface with expression (). */
1056extern char *input_line_pointer;
1057
1058/* Hash table for instruction mnemonic lookup. */
1059static struct hash_control *op_hash;
1060
1061/* Hash table for register lookup. */
1062static struct hash_control *reg_hash;
1063\f
1064void
1065i386_align_code (fragS *fragP, int count)
1066{
1067 /* Various efficient no-op patterns for aligning code labels.
1068 Note: Don't try to assemble the instructions in the comments.
1069 0L and 0w are not legal. */
1070 static const unsigned char f32_1[] =
1071 {0x90}; /* nop */
1072 static const unsigned char f32_2[] =
1073 {0x66,0x90}; /* xchg %ax,%ax */
1074 static const unsigned char f32_3[] =
1075 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1076 static const unsigned char f32_4[] =
1077 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1078 static const unsigned char f32_5[] =
1079 {0x90, /* nop */
1080 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1081 static const unsigned char f32_6[] =
1082 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1083 static const unsigned char f32_7[] =
1084 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1085 static const unsigned char f32_8[] =
1086 {0x90, /* nop */
1087 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1088 static const unsigned char f32_9[] =
1089 {0x89,0xf6, /* movl %esi,%esi */
1090 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1091 static const unsigned char f32_10[] =
1092 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1093 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1094 static const unsigned char f32_11[] =
1095 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1096 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1097 static const unsigned char f32_12[] =
1098 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1099 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1100 static const unsigned char f32_13[] =
1101 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1102 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1103 static const unsigned char f32_14[] =
1104 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1105 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1106 static const unsigned char f16_3[] =
1107 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1108 static const unsigned char f16_4[] =
1109 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1110 static const unsigned char f16_5[] =
1111 {0x90, /* nop */
1112 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1113 static const unsigned char f16_6[] =
1114 {0x89,0xf6, /* mov %si,%si */
1115 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1116 static const unsigned char f16_7[] =
1117 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1118 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1119 static const unsigned char f16_8[] =
1120 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1121 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1122 static const unsigned char jump_31[] =
1123 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1124 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1125 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1126 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1127 static const unsigned char *const f32_patt[] = {
1128 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
1129 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
1130 };
1131 static const unsigned char *const f16_patt[] = {
1132 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
1133 };
1134 /* nopl (%[re]ax) */
1135 static const unsigned char alt_3[] =
1136 {0x0f,0x1f,0x00};
1137 /* nopl 0(%[re]ax) */
1138 static const unsigned char alt_4[] =
1139 {0x0f,0x1f,0x40,0x00};
1140 /* nopl 0(%[re]ax,%[re]ax,1) */
1141 static const unsigned char alt_5[] =
1142 {0x0f,0x1f,0x44,0x00,0x00};
1143 /* nopw 0(%[re]ax,%[re]ax,1) */
1144 static const unsigned char alt_6[] =
1145 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1146 /* nopl 0L(%[re]ax) */
1147 static const unsigned char alt_7[] =
1148 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1149 /* nopl 0L(%[re]ax,%[re]ax,1) */
1150 static const unsigned char alt_8[] =
1151 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1152 /* nopw 0L(%[re]ax,%[re]ax,1) */
1153 static const unsigned char alt_9[] =
1154 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1155 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1156 static const unsigned char alt_10[] =
1157 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1158 static const unsigned char *const alt_patt[] = {
1159 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1160 alt_9, alt_10
1161 };
1162
1163 /* Only align for at least a positive non-zero boundary. */
1164 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1165 return;
1166
1167 /* We need to decide which NOP sequence to use for 32bit and
1168 64bit. When -mtune= is used:
1169
1170 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1171 PROCESSOR_GENERIC32, f32_patt will be used.
1172 2. For the rest, alt_patt will be used.
1173
1174 When -mtune= isn't used, alt_patt will be used if
1175 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1176 be used.
1177
1178 When -march= or .arch is used, we can't use anything beyond
1179 cpu_arch_isa_flags. */
1180
1181 if (flag_code == CODE_16BIT)
1182 {
1183 if (count > 8)
1184 {
1185 memcpy (fragP->fr_literal + fragP->fr_fix,
1186 jump_31, count);
1187 /* Adjust jump offset. */
1188 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1189 }
1190 else
1191 memcpy (fragP->fr_literal + fragP->fr_fix,
1192 f16_patt[count - 1], count);
1193 }
1194 else
1195 {
1196 const unsigned char *const *patt = NULL;
1197
1198 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1199 {
1200 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1201 switch (cpu_arch_tune)
1202 {
1203 case PROCESSOR_UNKNOWN:
1204 /* We use cpu_arch_isa_flags to check if we SHOULD
1205 optimize with nops. */
1206 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1207 patt = alt_patt;
1208 else
1209 patt = f32_patt;
1210 break;
1211 case PROCESSOR_PENTIUM4:
1212 case PROCESSOR_NOCONA:
1213 case PROCESSOR_CORE:
1214 case PROCESSOR_CORE2:
1215 case PROCESSOR_COREI7:
1216 case PROCESSOR_L1OM:
1217 case PROCESSOR_K1OM:
1218 case PROCESSOR_GENERIC64:
1219 case PROCESSOR_K6:
1220 case PROCESSOR_ATHLON:
1221 case PROCESSOR_K8:
1222 case PROCESSOR_AMDFAM10:
1223 case PROCESSOR_BD:
1224 case PROCESSOR_ZNVER:
1225 case PROCESSOR_BT:
1226 patt = alt_patt;
1227 break;
1228 case PROCESSOR_I386:
1229 case PROCESSOR_I486:
1230 case PROCESSOR_PENTIUM:
1231 case PROCESSOR_PENTIUMPRO:
1232 case PROCESSOR_IAMCU:
1233 case PROCESSOR_GENERIC32:
1234 patt = f32_patt;
1235 break;
1236 }
1237 }
1238 else
1239 {
1240 switch (fragP->tc_frag_data.tune)
1241 {
1242 case PROCESSOR_UNKNOWN:
1243 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1244 PROCESSOR_UNKNOWN. */
1245 abort ();
1246 break;
1247
1248 case PROCESSOR_I386:
1249 case PROCESSOR_I486:
1250 case PROCESSOR_PENTIUM:
1251 case PROCESSOR_IAMCU:
1252 case PROCESSOR_K6:
1253 case PROCESSOR_ATHLON:
1254 case PROCESSOR_K8:
1255 case PROCESSOR_AMDFAM10:
1256 case PROCESSOR_BD:
1257 case PROCESSOR_ZNVER:
1258 case PROCESSOR_BT:
1259 case PROCESSOR_GENERIC32:
1260 /* We use cpu_arch_isa_flags to check if we CAN optimize
1261 with nops. */
1262 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1263 patt = alt_patt;
1264 else
1265 patt = f32_patt;
1266 break;
1267 case PROCESSOR_PENTIUMPRO:
1268 case PROCESSOR_PENTIUM4:
1269 case PROCESSOR_NOCONA:
1270 case PROCESSOR_CORE:
1271 case PROCESSOR_CORE2:
1272 case PROCESSOR_COREI7:
1273 case PROCESSOR_L1OM:
1274 case PROCESSOR_K1OM:
1275 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1276 patt = alt_patt;
1277 else
1278 patt = f32_patt;
1279 break;
1280 case PROCESSOR_GENERIC64:
1281 patt = alt_patt;
1282 break;
1283 }
1284 }
1285
1286 if (patt == f32_patt)
1287 {
1288 /* If the padding is less than 15 bytes, we use the normal
1289 ones. Otherwise, we use a jump instruction and adjust
1290 its offset. */
1291 int limit;
1292
1293 /* For 64bit, the limit is 3 bytes. */
1294 if (flag_code == CODE_64BIT
1295 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1296 limit = 3;
1297 else
1298 limit = 15;
1299 if (count < limit)
1300 memcpy (fragP->fr_literal + fragP->fr_fix,
1301 patt[count - 1], count);
1302 else
1303 {
1304 memcpy (fragP->fr_literal + fragP->fr_fix,
1305 jump_31, count);
1306 /* Adjust jump offset. */
1307 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1308 }
1309 }
1310 else
1311 {
1312 /* Maximum length of an instruction is 10 byte. If the
1313 padding is greater than 10 bytes and we don't use jump,
1314 we have to break it into smaller pieces. */
1315 int padding = count;
1316 while (padding > 10)
1317 {
1318 padding -= 10;
1319 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1320 patt [9], 10);
1321 }
1322
1323 if (padding)
1324 memcpy (fragP->fr_literal + fragP->fr_fix,
1325 patt [padding - 1], padding);
1326 }
1327 }
1328 fragP->fr_var = count;
1329}
1330
1331static INLINE int
1332operand_type_all_zero (const union i386_operand_type *x)
1333{
1334 switch (ARRAY_SIZE(x->array))
1335 {
1336 case 3:
1337 if (x->array[2])
1338 return 0;
1339 case 2:
1340 if (x->array[1])
1341 return 0;
1342 case 1:
1343 return !x->array[0];
1344 default:
1345 abort ();
1346 }
1347}
1348
1349static INLINE void
1350operand_type_set (union i386_operand_type *x, unsigned int v)
1351{
1352 switch (ARRAY_SIZE(x->array))
1353 {
1354 case 3:
1355 x->array[2] = v;
1356 case 2:
1357 x->array[1] = v;
1358 case 1:
1359 x->array[0] = v;
1360 break;
1361 default:
1362 abort ();
1363 }
1364}
1365
1366static INLINE int
1367operand_type_equal (const union i386_operand_type *x,
1368 const union i386_operand_type *y)
1369{
1370 switch (ARRAY_SIZE(x->array))
1371 {
1372 case 3:
1373 if (x->array[2] != y->array[2])
1374 return 0;
1375 case 2:
1376 if (x->array[1] != y->array[1])
1377 return 0;
1378 case 1:
1379 return x->array[0] == y->array[0];
1380 break;
1381 default:
1382 abort ();
1383 }
1384}
1385
1386static INLINE int
1387cpu_flags_all_zero (const union i386_cpu_flags *x)
1388{
1389 switch (ARRAY_SIZE(x->array))
1390 {
1391 case 3:
1392 if (x->array[2])
1393 return 0;
1394 case 2:
1395 if (x->array[1])
1396 return 0;
1397 case 1:
1398 return !x->array[0];
1399 default:
1400 abort ();
1401 }
1402}
1403
1404static INLINE int
1405cpu_flags_equal (const union i386_cpu_flags *x,
1406 const union i386_cpu_flags *y)
1407{
1408 switch (ARRAY_SIZE(x->array))
1409 {
1410 case 3:
1411 if (x->array[2] != y->array[2])
1412 return 0;
1413 case 2:
1414 if (x->array[1] != y->array[1])
1415 return 0;
1416 case 1:
1417 return x->array[0] == y->array[0];
1418 break;
1419 default:
1420 abort ();
1421 }
1422}
1423
1424static INLINE int
1425cpu_flags_check_cpu64 (i386_cpu_flags f)
1426{
1427 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1428 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1429}
1430
1431static INLINE i386_cpu_flags
1432cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1433{
1434 switch (ARRAY_SIZE (x.array))
1435 {
1436 case 3:
1437 x.array [2] &= y.array [2];
1438 case 2:
1439 x.array [1] &= y.array [1];
1440 case 1:
1441 x.array [0] &= y.array [0];
1442 break;
1443 default:
1444 abort ();
1445 }
1446 return x;
1447}
1448
1449static INLINE i386_cpu_flags
1450cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1451{
1452 switch (ARRAY_SIZE (x.array))
1453 {
1454 case 3:
1455 x.array [2] |= y.array [2];
1456 case 2:
1457 x.array [1] |= y.array [1];
1458 case 1:
1459 x.array [0] |= y.array [0];
1460 break;
1461 default:
1462 abort ();
1463 }
1464 return x;
1465}
1466
1467static INLINE i386_cpu_flags
1468cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1469{
1470 switch (ARRAY_SIZE (x.array))
1471 {
1472 case 3:
1473 x.array [2] &= ~y.array [2];
1474 case 2:
1475 x.array [1] &= ~y.array [1];
1476 case 1:
1477 x.array [0] &= ~y.array [0];
1478 break;
1479 default:
1480 abort ();
1481 }
1482 return x;
1483}
1484
1485static int
1486valid_iamcu_cpu_flags (const i386_cpu_flags *flags)
1487{
1488 if (cpu_arch_isa == PROCESSOR_IAMCU)
1489 {
1490 static const i386_cpu_flags iamcu_flags = CPU_IAMCU_COMPAT_FLAGS;
1491 i386_cpu_flags compat_flags;
1492 compat_flags = cpu_flags_and_not (*flags, iamcu_flags);
1493 return cpu_flags_all_zero (&compat_flags);
1494 }
1495 else
1496 return 1;
1497}
1498
1499#define CPU_FLAGS_ARCH_MATCH 0x1
1500#define CPU_FLAGS_64BIT_MATCH 0x2
1501#define CPU_FLAGS_AES_MATCH 0x4
1502#define CPU_FLAGS_PCLMUL_MATCH 0x8
1503#define CPU_FLAGS_AVX_MATCH 0x10
1504
1505#define CPU_FLAGS_32BIT_MATCH \
1506 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1507 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1508#define CPU_FLAGS_PERFECT_MATCH \
1509 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1510
1511/* Return CPU flags match bits. */
1512
1513static int
1514cpu_flags_match (const insn_template *t)
1515{
1516 i386_cpu_flags x = t->cpu_flags;
1517 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1518
1519 x.bitfield.cpu64 = 0;
1520 x.bitfield.cpuno64 = 0;
1521
1522 if (cpu_flags_all_zero (&x))
1523 {
1524 /* This instruction is available on all archs. */
1525 match |= CPU_FLAGS_32BIT_MATCH;
1526 }
1527 else
1528 {
1529 /* This instruction is available only on some archs. */
1530 i386_cpu_flags cpu = cpu_arch_flags;
1531
1532 cpu.bitfield.cpu64 = 0;
1533 cpu.bitfield.cpuno64 = 0;
1534 cpu = cpu_flags_and (x, cpu);
1535 if (!cpu_flags_all_zero (&cpu))
1536 {
1537 if (x.bitfield.cpuavx)
1538 {
1539 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1540 if (cpu.bitfield.cpuavx)
1541 {
1542 /* Check SSE2AVX. */
1543 if (!t->opcode_modifier.sse2avx|| sse2avx)
1544 {
1545 match |= (CPU_FLAGS_ARCH_MATCH
1546 | CPU_FLAGS_AVX_MATCH);
1547 /* Check AES. */
1548 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1549 match |= CPU_FLAGS_AES_MATCH;
1550 /* Check PCLMUL. */
1551 if (!x.bitfield.cpupclmul
1552 || cpu.bitfield.cpupclmul)
1553 match |= CPU_FLAGS_PCLMUL_MATCH;
1554 }
1555 }
1556 else
1557 match |= CPU_FLAGS_ARCH_MATCH;
1558 }
1559 else
1560 match |= CPU_FLAGS_32BIT_MATCH;
1561 }
1562 }
1563 return match;
1564}
1565
1566static INLINE i386_operand_type
1567operand_type_and (i386_operand_type x, i386_operand_type y)
1568{
1569 switch (ARRAY_SIZE (x.array))
1570 {
1571 case 3:
1572 x.array [2] &= y.array [2];
1573 case 2:
1574 x.array [1] &= y.array [1];
1575 case 1:
1576 x.array [0] &= y.array [0];
1577 break;
1578 default:
1579 abort ();
1580 }
1581 return x;
1582}
1583
1584static INLINE i386_operand_type
1585operand_type_or (i386_operand_type x, i386_operand_type y)
1586{
1587 switch (ARRAY_SIZE (x.array))
1588 {
1589 case 3:
1590 x.array [2] |= y.array [2];
1591 case 2:
1592 x.array [1] |= y.array [1];
1593 case 1:
1594 x.array [0] |= y.array [0];
1595 break;
1596 default:
1597 abort ();
1598 }
1599 return x;
1600}
1601
1602static INLINE i386_operand_type
1603operand_type_xor (i386_operand_type x, i386_operand_type y)
1604{
1605 switch (ARRAY_SIZE (x.array))
1606 {
1607 case 3:
1608 x.array [2] ^= y.array [2];
1609 case 2:
1610 x.array [1] ^= y.array [1];
1611 case 1:
1612 x.array [0] ^= y.array [0];
1613 break;
1614 default:
1615 abort ();
1616 }
1617 return x;
1618}
1619
1620static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1621static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1622static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1623static const i386_operand_type inoutportreg
1624 = OPERAND_TYPE_INOUTPORTREG;
1625static const i386_operand_type reg16_inoutportreg
1626 = OPERAND_TYPE_REG16_INOUTPORTREG;
1627static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1628static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1629static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1630static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1631static const i386_operand_type anydisp
1632 = OPERAND_TYPE_ANYDISP;
1633static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1634static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1635static const i386_operand_type regzmm = OPERAND_TYPE_REGZMM;
1636static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
1637static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1638static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1639static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1640static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1641static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1642static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1643static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1644static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1645static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1646static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1647
1648enum operand_type
1649{
1650 reg,
1651 imm,
1652 disp,
1653 anymem
1654};
1655
1656static INLINE int
1657operand_type_check (i386_operand_type t, enum operand_type c)
1658{
1659 switch (c)
1660 {
1661 case reg:
1662 return (t.bitfield.reg8
1663 || t.bitfield.reg16
1664 || t.bitfield.reg32
1665 || t.bitfield.reg64);
1666
1667 case imm:
1668 return (t.bitfield.imm8
1669 || t.bitfield.imm8s
1670 || t.bitfield.imm16
1671 || t.bitfield.imm32
1672 || t.bitfield.imm32s
1673 || t.bitfield.imm64);
1674
1675 case disp:
1676 return (t.bitfield.disp8
1677 || t.bitfield.disp16
1678 || t.bitfield.disp32
1679 || t.bitfield.disp32s
1680 || t.bitfield.disp64);
1681
1682 case anymem:
1683 return (t.bitfield.disp8
1684 || t.bitfield.disp16
1685 || t.bitfield.disp32
1686 || t.bitfield.disp32s
1687 || t.bitfield.disp64
1688 || t.bitfield.baseindex);
1689
1690 default:
1691 abort ();
1692 }
1693
1694 return 0;
1695}
1696
1697/* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1698 operand J for instruction template T. */
1699
1700static INLINE int
1701match_reg_size (const insn_template *t, unsigned int j)
1702{
1703 return !((i.types[j].bitfield.byte
1704 && !t->operand_types[j].bitfield.byte)
1705 || (i.types[j].bitfield.word
1706 && !t->operand_types[j].bitfield.word)
1707 || (i.types[j].bitfield.dword
1708 && !t->operand_types[j].bitfield.dword)
1709 || (i.types[j].bitfield.qword
1710 && !t->operand_types[j].bitfield.qword));
1711}
1712
1713/* Return 1 if there is no conflict in any size on operand J for
1714 instruction template T. */
1715
1716static INLINE int
1717match_mem_size (const insn_template *t, unsigned int j)
1718{
1719 return (match_reg_size (t, j)
1720 && !((i.types[j].bitfield.unspecified
1721 && !i.broadcast
1722 && !t->operand_types[j].bitfield.unspecified)
1723 || (i.types[j].bitfield.fword
1724 && !t->operand_types[j].bitfield.fword)
1725 || (i.types[j].bitfield.tbyte
1726 && !t->operand_types[j].bitfield.tbyte)
1727 || (i.types[j].bitfield.xmmword
1728 && !t->operand_types[j].bitfield.xmmword)
1729 || (i.types[j].bitfield.ymmword
1730 && !t->operand_types[j].bitfield.ymmword)
1731 || (i.types[j].bitfield.zmmword
1732 && !t->operand_types[j].bitfield.zmmword)));
1733}
1734
1735/* Return 1 if there is no size conflict on any operands for
1736 instruction template T. */
1737
1738static INLINE int
1739operand_size_match (const insn_template *t)
1740{
1741 unsigned int j;
1742 int match = 1;
1743
1744 /* Don't check jump instructions. */
1745 if (t->opcode_modifier.jump
1746 || t->opcode_modifier.jumpbyte
1747 || t->opcode_modifier.jumpdword
1748 || t->opcode_modifier.jumpintersegment)
1749 return match;
1750
1751 /* Check memory and accumulator operand size. */
1752 for (j = 0; j < i.operands; j++)
1753 {
1754 if (t->operand_types[j].bitfield.anysize)
1755 continue;
1756
1757 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1758 {
1759 match = 0;
1760 break;
1761 }
1762
1763 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1764 {
1765 match = 0;
1766 break;
1767 }
1768 }
1769
1770 if (match)
1771 return match;
1772 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1773 {
1774mismatch:
1775 i.error = operand_size_mismatch;
1776 return 0;
1777 }
1778
1779 /* Check reverse. */
1780 gas_assert (i.operands == 2);
1781
1782 match = 1;
1783 for (j = 0; j < 2; j++)
1784 {
1785 if (t->operand_types[j].bitfield.acc
1786 && !match_reg_size (t, j ? 0 : 1))
1787 goto mismatch;
1788
1789 if (i.types[j].bitfield.mem
1790 && !match_mem_size (t, j ? 0 : 1))
1791 goto mismatch;
1792 }
1793
1794 return match;
1795}
1796
1797static INLINE int
1798operand_type_match (i386_operand_type overlap,
1799 i386_operand_type given)
1800{
1801 i386_operand_type temp = overlap;
1802
1803 temp.bitfield.jumpabsolute = 0;
1804 temp.bitfield.unspecified = 0;
1805 temp.bitfield.byte = 0;
1806 temp.bitfield.word = 0;
1807 temp.bitfield.dword = 0;
1808 temp.bitfield.fword = 0;
1809 temp.bitfield.qword = 0;
1810 temp.bitfield.tbyte = 0;
1811 temp.bitfield.xmmword = 0;
1812 temp.bitfield.ymmword = 0;
1813 temp.bitfield.zmmword = 0;
1814 if (operand_type_all_zero (&temp))
1815 goto mismatch;
1816
1817 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1818 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1819 return 1;
1820
1821mismatch:
1822 i.error = operand_type_mismatch;
1823 return 0;
1824}
1825
1826/* If given types g0 and g1 are registers they must be of the same type
1827 unless the expected operand type register overlap is null.
1828 Note that Acc in a template matches every size of reg. */
1829
1830static INLINE int
1831operand_type_register_match (i386_operand_type m0,
1832 i386_operand_type g0,
1833 i386_operand_type t0,
1834 i386_operand_type m1,
1835 i386_operand_type g1,
1836 i386_operand_type t1)
1837{
1838 if (!operand_type_check (g0, reg))
1839 return 1;
1840
1841 if (!operand_type_check (g1, reg))
1842 return 1;
1843
1844 if (g0.bitfield.reg8 == g1.bitfield.reg8
1845 && g0.bitfield.reg16 == g1.bitfield.reg16
1846 && g0.bitfield.reg32 == g1.bitfield.reg32
1847 && g0.bitfield.reg64 == g1.bitfield.reg64)
1848 return 1;
1849
1850 if (m0.bitfield.acc)
1851 {
1852 t0.bitfield.reg8 = 1;
1853 t0.bitfield.reg16 = 1;
1854 t0.bitfield.reg32 = 1;
1855 t0.bitfield.reg64 = 1;
1856 }
1857
1858 if (m1.bitfield.acc)
1859 {
1860 t1.bitfield.reg8 = 1;
1861 t1.bitfield.reg16 = 1;
1862 t1.bitfield.reg32 = 1;
1863 t1.bitfield.reg64 = 1;
1864 }
1865
1866 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1867 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1868 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1869 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1870 return 1;
1871
1872 i.error = register_type_mismatch;
1873
1874 return 0;
1875}
1876
1877static INLINE unsigned int
1878register_number (const reg_entry *r)
1879{
1880 unsigned int nr = r->reg_num;
1881
1882 if (r->reg_flags & RegRex)
1883 nr += 8;
1884
1885 if (r->reg_flags & RegVRex)
1886 nr += 16;
1887
1888 return nr;
1889}
1890
1891static INLINE unsigned int
1892mode_from_disp_size (i386_operand_type t)
1893{
1894 if (t.bitfield.disp8 || t.bitfield.vec_disp8)
1895 return 1;
1896 else if (t.bitfield.disp16
1897 || t.bitfield.disp32
1898 || t.bitfield.disp32s)
1899 return 2;
1900 else
1901 return 0;
1902}
1903
1904static INLINE int
1905fits_in_signed_byte (addressT num)
1906{
1907 return num + 0x80 <= 0xff;
1908}
1909
1910static INLINE int
1911fits_in_unsigned_byte (addressT num)
1912{
1913 return num <= 0xff;
1914}
1915
1916static INLINE int
1917fits_in_unsigned_word (addressT num)
1918{
1919 return num <= 0xffff;
1920}
1921
1922static INLINE int
1923fits_in_signed_word (addressT num)
1924{
1925 return num + 0x8000 <= 0xffff;
1926}
1927
1928static INLINE int
1929fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
1930{
1931#ifndef BFD64
1932 return 1;
1933#else
1934 return num + 0x80000000 <= 0xffffffff;
1935#endif
1936} /* fits_in_signed_long() */
1937
1938static INLINE int
1939fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
1940{
1941#ifndef BFD64
1942 return 1;
1943#else
1944 return num <= 0xffffffff;
1945#endif
1946} /* fits_in_unsigned_long() */
1947
1948static INLINE int
1949fits_in_vec_disp8 (offsetT num)
1950{
1951 int shift = i.memshift;
1952 unsigned int mask;
1953
1954 if (shift == -1)
1955 abort ();
1956
1957 mask = (1 << shift) - 1;
1958
1959 /* Return 0 if NUM isn't properly aligned. */
1960 if ((num & mask))
1961 return 0;
1962
1963 /* Check if NUM will fit in 8bit after shift. */
1964 return fits_in_signed_byte (num >> shift);
1965}
1966
1967static INLINE int
1968fits_in_imm4 (offsetT num)
1969{
1970 return (num & 0xf) == num;
1971}
1972
1973static i386_operand_type
1974smallest_imm_type (offsetT num)
1975{
1976 i386_operand_type t;
1977
1978 operand_type_set (&t, 0);
1979 t.bitfield.imm64 = 1;
1980
1981 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1982 {
1983 /* This code is disabled on the 486 because all the Imm1 forms
1984 in the opcode table are slower on the i486. They're the
1985 versions with the implicitly specified single-position
1986 displacement, which has another syntax if you really want to
1987 use that form. */
1988 t.bitfield.imm1 = 1;
1989 t.bitfield.imm8 = 1;
1990 t.bitfield.imm8s = 1;
1991 t.bitfield.imm16 = 1;
1992 t.bitfield.imm32 = 1;
1993 t.bitfield.imm32s = 1;
1994 }
1995 else if (fits_in_signed_byte (num))
1996 {
1997 t.bitfield.imm8 = 1;
1998 t.bitfield.imm8s = 1;
1999 t.bitfield.imm16 = 1;
2000 t.bitfield.imm32 = 1;
2001 t.bitfield.imm32s = 1;
2002 }
2003 else if (fits_in_unsigned_byte (num))
2004 {
2005 t.bitfield.imm8 = 1;
2006 t.bitfield.imm16 = 1;
2007 t.bitfield.imm32 = 1;
2008 t.bitfield.imm32s = 1;
2009 }
2010 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
2011 {
2012 t.bitfield.imm16 = 1;
2013 t.bitfield.imm32 = 1;
2014 t.bitfield.imm32s = 1;
2015 }
2016 else if (fits_in_signed_long (num))
2017 {
2018 t.bitfield.imm32 = 1;
2019 t.bitfield.imm32s = 1;
2020 }
2021 else if (fits_in_unsigned_long (num))
2022 t.bitfield.imm32 = 1;
2023
2024 return t;
2025}
2026
2027static offsetT
2028offset_in_range (offsetT val, int size)
2029{
2030 addressT mask;
2031
2032 switch (size)
2033 {
2034 case 1: mask = ((addressT) 1 << 8) - 1; break;
2035 case 2: mask = ((addressT) 1 << 16) - 1; break;
2036 case 4: mask = ((addressT) 2 << 31) - 1; break;
2037#ifdef BFD64
2038 case 8: mask = ((addressT) 2 << 63) - 1; break;
2039#endif
2040 default: abort ();
2041 }
2042
2043#ifdef BFD64
2044 /* If BFD64, sign extend val for 32bit address mode. */
2045 if (flag_code != CODE_64BIT
2046 || i.prefix[ADDR_PREFIX])
2047 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2048 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2049#endif
2050
2051 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2052 {
2053 char buf1[40], buf2[40];
2054
2055 sprint_value (buf1, val);
2056 sprint_value (buf2, val & mask);
2057 as_warn (_("%s shortened to %s"), buf1, buf2);
2058 }
2059 return val & mask;
2060}
2061
2062enum PREFIX_GROUP
2063{
2064 PREFIX_EXIST = 0,
2065 PREFIX_LOCK,
2066 PREFIX_REP,
2067 PREFIX_OTHER
2068};
2069
2070/* Returns
2071 a. PREFIX_EXIST if attempting to add a prefix where one from the
2072 same class already exists.
2073 b. PREFIX_LOCK if lock prefix is added.
2074 c. PREFIX_REP if rep/repne prefix is added.
2075 d. PREFIX_OTHER if other prefix is added.
2076 */
2077
2078static enum PREFIX_GROUP
2079add_prefix (unsigned int prefix)
2080{
2081 enum PREFIX_GROUP ret = PREFIX_OTHER;
2082 unsigned int q;
2083
2084 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2085 && flag_code == CODE_64BIT)
2086 {
2087 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2088 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
2089 && (prefix & (REX_R | REX_X | REX_B))))
2090 ret = PREFIX_EXIST;
2091 q = REX_PREFIX;
2092 }
2093 else
2094 {
2095 switch (prefix)
2096 {
2097 default:
2098 abort ();
2099
2100 case CS_PREFIX_OPCODE:
2101 case DS_PREFIX_OPCODE:
2102 case ES_PREFIX_OPCODE:
2103 case FS_PREFIX_OPCODE:
2104 case GS_PREFIX_OPCODE:
2105 case SS_PREFIX_OPCODE:
2106 q = SEG_PREFIX;
2107 break;
2108
2109 case REPNE_PREFIX_OPCODE:
2110 case REPE_PREFIX_OPCODE:
2111 q = REP_PREFIX;
2112 ret = PREFIX_REP;
2113 break;
2114
2115 case LOCK_PREFIX_OPCODE:
2116 q = LOCK_PREFIX;
2117 ret = PREFIX_LOCK;
2118 break;
2119
2120 case FWAIT_OPCODE:
2121 q = WAIT_PREFIX;
2122 break;
2123
2124 case ADDR_PREFIX_OPCODE:
2125 q = ADDR_PREFIX;
2126 break;
2127
2128 case DATA_PREFIX_OPCODE:
2129 q = DATA_PREFIX;
2130 break;
2131 }
2132 if (i.prefix[q] != 0)
2133 ret = PREFIX_EXIST;
2134 }
2135
2136 if (ret)
2137 {
2138 if (!i.prefix[q])
2139 ++i.prefixes;
2140 i.prefix[q] |= prefix;
2141 }
2142 else
2143 as_bad (_("same type of prefix used twice"));
2144
2145 return ret;
2146}
2147
2148static void
2149update_code_flag (int value, int check)
2150{
2151 PRINTF_LIKE ((*as_error));
2152
2153 flag_code = (enum flag_code) value;
2154 if (flag_code == CODE_64BIT)
2155 {
2156 cpu_arch_flags.bitfield.cpu64 = 1;
2157 cpu_arch_flags.bitfield.cpuno64 = 0;
2158 }
2159 else
2160 {
2161 cpu_arch_flags.bitfield.cpu64 = 0;
2162 cpu_arch_flags.bitfield.cpuno64 = 1;
2163 }
2164 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2165 {
2166 if (check)
2167 as_error = as_fatal;
2168 else
2169 as_error = as_bad;
2170 (*as_error) (_("64bit mode not supported on `%s'."),
2171 cpu_arch_name ? cpu_arch_name : default_arch);
2172 }
2173 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2174 {
2175 if (check)
2176 as_error = as_fatal;
2177 else
2178 as_error = as_bad;
2179 (*as_error) (_("32bit mode not supported on `%s'."),
2180 cpu_arch_name ? cpu_arch_name : default_arch);
2181 }
2182 stackop_size = '\0';
2183}
2184
2185static void
2186set_code_flag (int value)
2187{
2188 update_code_flag (value, 0);
2189}
2190
2191static void
2192set_16bit_gcc_code_flag (int new_code_flag)
2193{
2194 flag_code = (enum flag_code) new_code_flag;
2195 if (flag_code != CODE_16BIT)
2196 abort ();
2197 cpu_arch_flags.bitfield.cpu64 = 0;
2198 cpu_arch_flags.bitfield.cpuno64 = 1;
2199 stackop_size = LONG_MNEM_SUFFIX;
2200}
2201
2202static void
2203set_intel_syntax (int syntax_flag)
2204{
2205 /* Find out if register prefixing is specified. */
2206 int ask_naked_reg = 0;
2207
2208 SKIP_WHITESPACE ();
2209 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2210 {
2211 char *string;
2212 int e = get_symbol_name (&string);
2213
2214 if (strcmp (string, "prefix") == 0)
2215 ask_naked_reg = 1;
2216 else if (strcmp (string, "noprefix") == 0)
2217 ask_naked_reg = -1;
2218 else
2219 as_bad (_("bad argument to syntax directive."));
2220 (void) restore_line_pointer (e);
2221 }
2222 demand_empty_rest_of_line ();
2223
2224 intel_syntax = syntax_flag;
2225
2226 if (ask_naked_reg == 0)
2227 allow_naked_reg = (intel_syntax
2228 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2229 else
2230 allow_naked_reg = (ask_naked_reg < 0);
2231
2232 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2233
2234 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2235 identifier_chars['$'] = intel_syntax ? '$' : 0;
2236 register_prefix = allow_naked_reg ? "" : "%";
2237}
2238
2239static void
2240set_intel_mnemonic (int mnemonic_flag)
2241{
2242 intel_mnemonic = mnemonic_flag;
2243}
2244
2245static void
2246set_allow_index_reg (int flag)
2247{
2248 allow_index_reg = flag;
2249}
2250
2251static void
2252set_check (int what)
2253{
2254 enum check_kind *kind;
2255 const char *str;
2256
2257 if (what)
2258 {
2259 kind = &operand_check;
2260 str = "operand";
2261 }
2262 else
2263 {
2264 kind = &sse_check;
2265 str = "sse";
2266 }
2267
2268 SKIP_WHITESPACE ();
2269
2270 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2271 {
2272 char *string;
2273 int e = get_symbol_name (&string);
2274
2275 if (strcmp (string, "none") == 0)
2276 *kind = check_none;
2277 else if (strcmp (string, "warning") == 0)
2278 *kind = check_warning;
2279 else if (strcmp (string, "error") == 0)
2280 *kind = check_error;
2281 else
2282 as_bad (_("bad argument to %s_check directive."), str);
2283 (void) restore_line_pointer (e);
2284 }
2285 else
2286 as_bad (_("missing argument for %s_check directive"), str);
2287
2288 demand_empty_rest_of_line ();
2289}
2290
2291static void
2292check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2293 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2294{
2295#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2296 static const char *arch;
2297
2298 /* Intel LIOM is only supported on ELF. */
2299 if (!IS_ELF)
2300 return;
2301
2302 if (!arch)
2303 {
2304 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2305 use default_arch. */
2306 arch = cpu_arch_name;
2307 if (!arch)
2308 arch = default_arch;
2309 }
2310
2311 /* If we are targeting Intel MCU, we must enable it. */
2312 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_IAMCU
2313 || new_flag.bitfield.cpuiamcu)
2314 return;
2315
2316 /* If we are targeting Intel L1OM, we must enable it. */
2317 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2318 || new_flag.bitfield.cpul1om)
2319 return;
2320
2321 /* If we are targeting Intel K1OM, we must enable it. */
2322 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2323 || new_flag.bitfield.cpuk1om)
2324 return;
2325
2326 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2327#endif
2328}
2329
2330static void
2331set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2332{
2333 SKIP_WHITESPACE ();
2334
2335 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2336 {
2337 char *string;
2338 int e = get_symbol_name (&string);
2339 unsigned int j;
2340 i386_cpu_flags flags;
2341
2342 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2343 {
2344 if (strcmp (string, cpu_arch[j].name) == 0)
2345 {
2346 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2347
2348 if (*string != '.')
2349 {
2350 cpu_arch_name = cpu_arch[j].name;
2351 cpu_sub_arch_name = NULL;
2352 cpu_arch_flags = cpu_arch[j].flags;
2353 if (flag_code == CODE_64BIT)
2354 {
2355 cpu_arch_flags.bitfield.cpu64 = 1;
2356 cpu_arch_flags.bitfield.cpuno64 = 0;
2357 }
2358 else
2359 {
2360 cpu_arch_flags.bitfield.cpu64 = 0;
2361 cpu_arch_flags.bitfield.cpuno64 = 1;
2362 }
2363 cpu_arch_isa = cpu_arch[j].type;
2364 cpu_arch_isa_flags = cpu_arch[j].flags;
2365 if (!cpu_arch_tune_set)
2366 {
2367 cpu_arch_tune = cpu_arch_isa;
2368 cpu_arch_tune_flags = cpu_arch_isa_flags;
2369 }
2370 break;
2371 }
2372
2373 if (!cpu_arch[j].negated)
2374 flags = cpu_flags_or (cpu_arch_flags,
2375 cpu_arch[j].flags);
2376 else
2377 flags = cpu_flags_and_not (cpu_arch_flags,
2378 cpu_arch[j].flags);
2379
2380 if (!valid_iamcu_cpu_flags (&flags))
2381 as_fatal (_("`%s' isn't valid for Intel MCU"),
2382 cpu_arch[j].name);
2383 else if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2384 {
2385 if (cpu_sub_arch_name)
2386 {
2387 char *name = cpu_sub_arch_name;
2388 cpu_sub_arch_name = concat (name,
2389 cpu_arch[j].name,
2390 (const char *) NULL);
2391 free (name);
2392 }
2393 else
2394 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2395 cpu_arch_flags = flags;
2396 cpu_arch_isa_flags = flags;
2397 }
2398 (void) restore_line_pointer (e);
2399 demand_empty_rest_of_line ();
2400 return;
2401 }
2402 }
2403 if (j >= ARRAY_SIZE (cpu_arch))
2404 as_bad (_("no such architecture: `%s'"), string);
2405
2406 *input_line_pointer = e;
2407 }
2408 else
2409 as_bad (_("missing cpu architecture"));
2410
2411 no_cond_jump_promotion = 0;
2412 if (*input_line_pointer == ','
2413 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2414 {
2415 char *string;
2416 char e;
2417
2418 ++input_line_pointer;
2419 e = get_symbol_name (&string);
2420
2421 if (strcmp (string, "nojumps") == 0)
2422 no_cond_jump_promotion = 1;
2423 else if (strcmp (string, "jumps") == 0)
2424 ;
2425 else
2426 as_bad (_("no such architecture modifier: `%s'"), string);
2427
2428 (void) restore_line_pointer (e);
2429 }
2430
2431 demand_empty_rest_of_line ();
2432}
2433
2434enum bfd_architecture
2435i386_arch (void)
2436{
2437 if (cpu_arch_isa == PROCESSOR_L1OM)
2438 {
2439 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2440 || flag_code != CODE_64BIT)
2441 as_fatal (_("Intel L1OM is 64bit ELF only"));
2442 return bfd_arch_l1om;
2443 }
2444 else if (cpu_arch_isa == PROCESSOR_K1OM)
2445 {
2446 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2447 || flag_code != CODE_64BIT)
2448 as_fatal (_("Intel K1OM is 64bit ELF only"));
2449 return bfd_arch_k1om;
2450 }
2451 else if (cpu_arch_isa == PROCESSOR_IAMCU)
2452 {
2453 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2454 || flag_code == CODE_64BIT)
2455 as_fatal (_("Intel MCU is 32bit ELF only"));
2456 return bfd_arch_iamcu;
2457 }
2458 else
2459 return bfd_arch_i386;
2460}
2461
2462unsigned long
2463i386_mach (void)
2464{
2465 if (!strncmp (default_arch, "x86_64", 6))
2466 {
2467 if (cpu_arch_isa == PROCESSOR_L1OM)
2468 {
2469 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2470 || default_arch[6] != '\0')
2471 as_fatal (_("Intel L1OM is 64bit ELF only"));
2472 return bfd_mach_l1om;
2473 }
2474 else if (cpu_arch_isa == PROCESSOR_K1OM)
2475 {
2476 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2477 || default_arch[6] != '\0')
2478 as_fatal (_("Intel K1OM is 64bit ELF only"));
2479 return bfd_mach_k1om;
2480 }
2481 else if (default_arch[6] == '\0')
2482 return bfd_mach_x86_64;
2483 else
2484 return bfd_mach_x64_32;
2485 }
2486 else if (!strcmp (default_arch, "i386")
2487 || !strcmp (default_arch, "iamcu"))
2488 {
2489 if (cpu_arch_isa == PROCESSOR_IAMCU)
2490 {
2491 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
2492 as_fatal (_("Intel MCU is 32bit ELF only"));
2493 return bfd_mach_i386_iamcu;
2494 }
2495 else
2496 return bfd_mach_i386_i386;
2497 }
2498 else
2499 as_fatal (_("unknown architecture"));
2500}
2501\f
2502void
2503md_begin (void)
2504{
2505 const char *hash_err;
2506
2507 /* Initialize op_hash hash table. */
2508 op_hash = hash_new ();
2509
2510 {
2511 const insn_template *optab;
2512 templates *core_optab;
2513
2514 /* Setup for loop. */
2515 optab = i386_optab;
2516 core_optab = XNEW (templates);
2517 core_optab->start = optab;
2518
2519 while (1)
2520 {
2521 ++optab;
2522 if (optab->name == NULL
2523 || strcmp (optab->name, (optab - 1)->name) != 0)
2524 {
2525 /* different name --> ship out current template list;
2526 add to hash table; & begin anew. */
2527 core_optab->end = optab;
2528 hash_err = hash_insert (op_hash,
2529 (optab - 1)->name,
2530 (void *) core_optab);
2531 if (hash_err)
2532 {
2533 as_fatal (_("can't hash %s: %s"),
2534 (optab - 1)->name,
2535 hash_err);
2536 }
2537 if (optab->name == NULL)
2538 break;
2539 core_optab = XNEW (templates);
2540 core_optab->start = optab;
2541 }
2542 }
2543 }
2544
2545 /* Initialize reg_hash hash table. */
2546 reg_hash = hash_new ();
2547 {
2548 const reg_entry *regtab;
2549 unsigned int regtab_size = i386_regtab_size;
2550
2551 for (regtab = i386_regtab; regtab_size--; regtab++)
2552 {
2553 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2554 if (hash_err)
2555 as_fatal (_("can't hash %s: %s"),
2556 regtab->reg_name,
2557 hash_err);
2558 }
2559 }
2560
2561 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2562 {
2563 int c;
2564 char *p;
2565
2566 for (c = 0; c < 256; c++)
2567 {
2568 if (ISDIGIT (c))
2569 {
2570 digit_chars[c] = c;
2571 mnemonic_chars[c] = c;
2572 register_chars[c] = c;
2573 operand_chars[c] = c;
2574 }
2575 else if (ISLOWER (c))
2576 {
2577 mnemonic_chars[c] = c;
2578 register_chars[c] = c;
2579 operand_chars[c] = c;
2580 }
2581 else if (ISUPPER (c))
2582 {
2583 mnemonic_chars[c] = TOLOWER (c);
2584 register_chars[c] = mnemonic_chars[c];
2585 operand_chars[c] = c;
2586 }
2587 else if (c == '{' || c == '}')
2588 operand_chars[c] = c;
2589
2590 if (ISALPHA (c) || ISDIGIT (c))
2591 identifier_chars[c] = c;
2592 else if (c >= 128)
2593 {
2594 identifier_chars[c] = c;
2595 operand_chars[c] = c;
2596 }
2597 }
2598
2599#ifdef LEX_AT
2600 identifier_chars['@'] = '@';
2601#endif
2602#ifdef LEX_QM
2603 identifier_chars['?'] = '?';
2604 operand_chars['?'] = '?';
2605#endif
2606 digit_chars['-'] = '-';
2607 mnemonic_chars['_'] = '_';
2608 mnemonic_chars['-'] = '-';
2609 mnemonic_chars['.'] = '.';
2610 identifier_chars['_'] = '_';
2611 identifier_chars['.'] = '.';
2612
2613 for (p = operand_special_chars; *p != '\0'; p++)
2614 operand_chars[(unsigned char) *p] = *p;
2615 }
2616
2617 if (flag_code == CODE_64BIT)
2618 {
2619#if defined (OBJ_COFF) && defined (TE_PE)
2620 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2621 ? 32 : 16);
2622#else
2623 x86_dwarf2_return_column = 16;
2624#endif
2625 x86_cie_data_alignment = -8;
2626 }
2627 else
2628 {
2629 x86_dwarf2_return_column = 8;
2630 x86_cie_data_alignment = -4;
2631 }
2632}
2633
2634void
2635i386_print_statistics (FILE *file)
2636{
2637 hash_print_statistics (file, "i386 opcode", op_hash);
2638 hash_print_statistics (file, "i386 register", reg_hash);
2639}
2640\f
2641#ifdef DEBUG386
2642
2643/* Debugging routines for md_assemble. */
2644static void pte (insn_template *);
2645static void pt (i386_operand_type);
2646static void pe (expressionS *);
2647static void ps (symbolS *);
2648
2649static void
2650pi (char *line, i386_insn *x)
2651{
2652 unsigned int j;
2653
2654 fprintf (stdout, "%s: template ", line);
2655 pte (&x->tm);
2656 fprintf (stdout, " address: base %s index %s scale %x\n",
2657 x->base_reg ? x->base_reg->reg_name : "none",
2658 x->index_reg ? x->index_reg->reg_name : "none",
2659 x->log2_scale_factor);
2660 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2661 x->rm.mode, x->rm.reg, x->rm.regmem);
2662 fprintf (stdout, " sib: base %x index %x scale %x\n",
2663 x->sib.base, x->sib.index, x->sib.scale);
2664 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2665 (x->rex & REX_W) != 0,
2666 (x->rex & REX_R) != 0,
2667 (x->rex & REX_X) != 0,
2668 (x->rex & REX_B) != 0);
2669 for (j = 0; j < x->operands; j++)
2670 {
2671 fprintf (stdout, " #%d: ", j + 1);
2672 pt (x->types[j]);
2673 fprintf (stdout, "\n");
2674 if (x->types[j].bitfield.reg8
2675 || x->types[j].bitfield.reg16
2676 || x->types[j].bitfield.reg32
2677 || x->types[j].bitfield.reg64
2678 || x->types[j].bitfield.regmmx
2679 || x->types[j].bitfield.regxmm
2680 || x->types[j].bitfield.regymm
2681 || x->types[j].bitfield.regzmm
2682 || x->types[j].bitfield.sreg2
2683 || x->types[j].bitfield.sreg3
2684 || x->types[j].bitfield.control
2685 || x->types[j].bitfield.debug
2686 || x->types[j].bitfield.test)
2687 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2688 if (operand_type_check (x->types[j], imm))
2689 pe (x->op[j].imms);
2690 if (operand_type_check (x->types[j], disp))
2691 pe (x->op[j].disps);
2692 }
2693}
2694
2695static void
2696pte (insn_template *t)
2697{
2698 unsigned int j;
2699 fprintf (stdout, " %d operands ", t->operands);
2700 fprintf (stdout, "opcode %x ", t->base_opcode);
2701 if (t->extension_opcode != None)
2702 fprintf (stdout, "ext %x ", t->extension_opcode);
2703 if (t->opcode_modifier.d)
2704 fprintf (stdout, "D");
2705 if (t->opcode_modifier.w)
2706 fprintf (stdout, "W");
2707 fprintf (stdout, "\n");
2708 for (j = 0; j < t->operands; j++)
2709 {
2710 fprintf (stdout, " #%d type ", j + 1);
2711 pt (t->operand_types[j]);
2712 fprintf (stdout, "\n");
2713 }
2714}
2715
2716static void
2717pe (expressionS *e)
2718{
2719 fprintf (stdout, " operation %d\n", e->X_op);
2720 fprintf (stdout, " add_number %ld (%lx)\n",
2721 (long) e->X_add_number, (long) e->X_add_number);
2722 if (e->X_add_symbol)
2723 {
2724 fprintf (stdout, " add_symbol ");
2725 ps (e->X_add_symbol);
2726 fprintf (stdout, "\n");
2727 }
2728 if (e->X_op_symbol)
2729 {
2730 fprintf (stdout, " op_symbol ");
2731 ps (e->X_op_symbol);
2732 fprintf (stdout, "\n");
2733 }
2734}
2735
2736static void
2737ps (symbolS *s)
2738{
2739 fprintf (stdout, "%s type %s%s",
2740 S_GET_NAME (s),
2741 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2742 segment_name (S_GET_SEGMENT (s)));
2743}
2744
2745static struct type_name
2746 {
2747 i386_operand_type mask;
2748 const char *name;
2749 }
2750const type_names[] =
2751{
2752 { OPERAND_TYPE_REG8, "r8" },
2753 { OPERAND_TYPE_REG16, "r16" },
2754 { OPERAND_TYPE_REG32, "r32" },
2755 { OPERAND_TYPE_REG64, "r64" },
2756 { OPERAND_TYPE_IMM8, "i8" },
2757 { OPERAND_TYPE_IMM8, "i8s" },
2758 { OPERAND_TYPE_IMM16, "i16" },
2759 { OPERAND_TYPE_IMM32, "i32" },
2760 { OPERAND_TYPE_IMM32S, "i32s" },
2761 { OPERAND_TYPE_IMM64, "i64" },
2762 { OPERAND_TYPE_IMM1, "i1" },
2763 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2764 { OPERAND_TYPE_DISP8, "d8" },
2765 { OPERAND_TYPE_DISP16, "d16" },
2766 { OPERAND_TYPE_DISP32, "d32" },
2767 { OPERAND_TYPE_DISP32S, "d32s" },
2768 { OPERAND_TYPE_DISP64, "d64" },
2769 { OPERAND_TYPE_VEC_DISP8, "Vector d8" },
2770 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2771 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2772 { OPERAND_TYPE_CONTROL, "control reg" },
2773 { OPERAND_TYPE_TEST, "test reg" },
2774 { OPERAND_TYPE_DEBUG, "debug reg" },
2775 { OPERAND_TYPE_FLOATREG, "FReg" },
2776 { OPERAND_TYPE_FLOATACC, "FAcc" },
2777 { OPERAND_TYPE_SREG2, "SReg2" },
2778 { OPERAND_TYPE_SREG3, "SReg3" },
2779 { OPERAND_TYPE_ACC, "Acc" },
2780 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2781 { OPERAND_TYPE_REGMMX, "rMMX" },
2782 { OPERAND_TYPE_REGXMM, "rXMM" },
2783 { OPERAND_TYPE_REGYMM, "rYMM" },
2784 { OPERAND_TYPE_REGZMM, "rZMM" },
2785 { OPERAND_TYPE_REGMASK, "Mask reg" },
2786 { OPERAND_TYPE_ESSEG, "es" },
2787};
2788
2789static void
2790pt (i386_operand_type t)
2791{
2792 unsigned int j;
2793 i386_operand_type a;
2794
2795 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2796 {
2797 a = operand_type_and (t, type_names[j].mask);
2798 if (!operand_type_all_zero (&a))
2799 fprintf (stdout, "%s, ", type_names[j].name);
2800 }
2801 fflush (stdout);
2802}
2803
2804#endif /* DEBUG386 */
2805\f
2806static bfd_reloc_code_real_type
2807reloc (unsigned int size,
2808 int pcrel,
2809 int sign,
2810 bfd_reloc_code_real_type other)
2811{
2812 if (other != NO_RELOC)
2813 {
2814 reloc_howto_type *rel;
2815
2816 if (size == 8)
2817 switch (other)
2818 {
2819 case BFD_RELOC_X86_64_GOT32:
2820 return BFD_RELOC_X86_64_GOT64;
2821 break;
2822 case BFD_RELOC_X86_64_GOTPLT64:
2823 return BFD_RELOC_X86_64_GOTPLT64;
2824 break;
2825 case BFD_RELOC_X86_64_PLTOFF64:
2826 return BFD_RELOC_X86_64_PLTOFF64;
2827 break;
2828 case BFD_RELOC_X86_64_GOTPC32:
2829 other = BFD_RELOC_X86_64_GOTPC64;
2830 break;
2831 case BFD_RELOC_X86_64_GOTPCREL:
2832 other = BFD_RELOC_X86_64_GOTPCREL64;
2833 break;
2834 case BFD_RELOC_X86_64_TPOFF32:
2835 other = BFD_RELOC_X86_64_TPOFF64;
2836 break;
2837 case BFD_RELOC_X86_64_DTPOFF32:
2838 other = BFD_RELOC_X86_64_DTPOFF64;
2839 break;
2840 default:
2841 break;
2842 }
2843
2844#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2845 if (other == BFD_RELOC_SIZE32)
2846 {
2847 if (size == 8)
2848 other = BFD_RELOC_SIZE64;
2849 if (pcrel)
2850 {
2851 as_bad (_("there are no pc-relative size relocations"));
2852 return NO_RELOC;
2853 }
2854 }
2855#endif
2856
2857 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2858 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2859 sign = -1;
2860
2861 rel = bfd_reloc_type_lookup (stdoutput, other);
2862 if (!rel)
2863 as_bad (_("unknown relocation (%u)"), other);
2864 else if (size != bfd_get_reloc_size (rel))
2865 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2866 bfd_get_reloc_size (rel),
2867 size);
2868 else if (pcrel && !rel->pc_relative)
2869 as_bad (_("non-pc-relative relocation for pc-relative field"));
2870 else if ((rel->complain_on_overflow == complain_overflow_signed
2871 && !sign)
2872 || (rel->complain_on_overflow == complain_overflow_unsigned
2873 && sign > 0))
2874 as_bad (_("relocated field and relocation type differ in signedness"));
2875 else
2876 return other;
2877 return NO_RELOC;
2878 }
2879
2880 if (pcrel)
2881 {
2882 if (!sign)
2883 as_bad (_("there are no unsigned pc-relative relocations"));
2884 switch (size)
2885 {
2886 case 1: return BFD_RELOC_8_PCREL;
2887 case 2: return BFD_RELOC_16_PCREL;
2888 case 4: return BFD_RELOC_32_PCREL;
2889 case 8: return BFD_RELOC_64_PCREL;
2890 }
2891 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2892 }
2893 else
2894 {
2895 if (sign > 0)
2896 switch (size)
2897 {
2898 case 4: return BFD_RELOC_X86_64_32S;
2899 }
2900 else
2901 switch (size)
2902 {
2903 case 1: return BFD_RELOC_8;
2904 case 2: return BFD_RELOC_16;
2905 case 4: return BFD_RELOC_32;
2906 case 8: return BFD_RELOC_64;
2907 }
2908 as_bad (_("cannot do %s %u byte relocation"),
2909 sign > 0 ? "signed" : "unsigned", size);
2910 }
2911
2912 return NO_RELOC;
2913}
2914
2915/* Here we decide which fixups can be adjusted to make them relative to
2916 the beginning of the section instead of the symbol. Basically we need
2917 to make sure that the dynamic relocations are done correctly, so in
2918 some cases we force the original symbol to be used. */
2919
2920int
2921tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2922{
2923#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2924 if (!IS_ELF)
2925 return 1;
2926
2927 /* Don't adjust pc-relative references to merge sections in 64-bit
2928 mode. */
2929 if (use_rela_relocations
2930 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2931 && fixP->fx_pcrel)
2932 return 0;
2933
2934 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2935 and changed later by validate_fix. */
2936 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2937 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2938 return 0;
2939
2940 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2941 for size relocations. */
2942 if (fixP->fx_r_type == BFD_RELOC_SIZE32
2943 || fixP->fx_r_type == BFD_RELOC_SIZE64
2944 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2945 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2946 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2947 || fixP->fx_r_type == BFD_RELOC_386_GOT32X
2948 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2949 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2950 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2951 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2952 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2953 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2954 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2955 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2956 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2957 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2958 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2959 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2960 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2961 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCRELX
2962 || fixP->fx_r_type == BFD_RELOC_X86_64_REX_GOTPCRELX
2963 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2964 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2965 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2966 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2967 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2968 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2969 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2970 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2971 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2972 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2973 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2974 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2975 return 0;
2976#endif
2977 return 1;
2978}
2979
2980static int
2981intel_float_operand (const char *mnemonic)
2982{
2983 /* Note that the value returned is meaningful only for opcodes with (memory)
2984 operands, hence the code here is free to improperly handle opcodes that
2985 have no operands (for better performance and smaller code). */
2986
2987 if (mnemonic[0] != 'f')
2988 return 0; /* non-math */
2989
2990 switch (mnemonic[1])
2991 {
2992 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2993 the fs segment override prefix not currently handled because no
2994 call path can make opcodes without operands get here */
2995 case 'i':
2996 return 2 /* integer op */;
2997 case 'l':
2998 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2999 return 3; /* fldcw/fldenv */
3000 break;
3001 case 'n':
3002 if (mnemonic[2] != 'o' /* fnop */)
3003 return 3; /* non-waiting control op */
3004 break;
3005 case 'r':
3006 if (mnemonic[2] == 's')
3007 return 3; /* frstor/frstpm */
3008 break;
3009 case 's':
3010 if (mnemonic[2] == 'a')
3011 return 3; /* fsave */
3012 if (mnemonic[2] == 't')
3013 {
3014 switch (mnemonic[3])
3015 {
3016 case 'c': /* fstcw */
3017 case 'd': /* fstdw */
3018 case 'e': /* fstenv */
3019 case 's': /* fsts[gw] */
3020 return 3;
3021 }
3022 }
3023 break;
3024 case 'x':
3025 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
3026 return 0; /* fxsave/fxrstor are not really math ops */
3027 break;
3028 }
3029
3030 return 1;
3031}
3032
3033/* Build the VEX prefix. */
3034
3035static void
3036build_vex_prefix (const insn_template *t)
3037{
3038 unsigned int register_specifier;
3039 unsigned int implied_prefix;
3040 unsigned int vector_length;
3041
3042 /* Check register specifier. */
3043 if (i.vex.register_specifier)
3044 {
3045 register_specifier =
3046 ~register_number (i.vex.register_specifier) & 0xf;
3047 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
3048 }
3049 else
3050 register_specifier = 0xf;
3051
3052 /* Use 2-byte VEX prefix by swappping destination and source
3053 operand. */
3054 if (!i.swap_operand
3055 && i.operands == i.reg_operands
3056 && i.tm.opcode_modifier.vexopcode == VEX0F
3057 && i.tm.opcode_modifier.s
3058 && i.rex == REX_B)
3059 {
3060 unsigned int xchg = i.operands - 1;
3061 union i386_op temp_op;
3062 i386_operand_type temp_type;
3063
3064 temp_type = i.types[xchg];
3065 i.types[xchg] = i.types[0];
3066 i.types[0] = temp_type;
3067 temp_op = i.op[xchg];
3068 i.op[xchg] = i.op[0];
3069 i.op[0] = temp_op;
3070
3071 gas_assert (i.rm.mode == 3);
3072
3073 i.rex = REX_R;
3074 xchg = i.rm.regmem;
3075 i.rm.regmem = i.rm.reg;
3076 i.rm.reg = xchg;
3077
3078 /* Use the next insn. */
3079 i.tm = t[1];
3080 }
3081
3082 if (i.tm.opcode_modifier.vex == VEXScalar)
3083 vector_length = avxscalar;
3084 else
3085 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
3086
3087 switch ((i.tm.base_opcode >> 8) & 0xff)
3088 {
3089 case 0:
3090 implied_prefix = 0;
3091 break;
3092 case DATA_PREFIX_OPCODE:
3093 implied_prefix = 1;
3094 break;
3095 case REPE_PREFIX_OPCODE:
3096 implied_prefix = 2;
3097 break;
3098 case REPNE_PREFIX_OPCODE:
3099 implied_prefix = 3;
3100 break;
3101 default:
3102 abort ();
3103 }
3104
3105 /* Use 2-byte VEX prefix if possible. */
3106 if (i.tm.opcode_modifier.vexopcode == VEX0F
3107 && i.tm.opcode_modifier.vexw != VEXW1
3108 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3109 {
3110 /* 2-byte VEX prefix. */
3111 unsigned int r;
3112
3113 i.vex.length = 2;
3114 i.vex.bytes[0] = 0xc5;
3115
3116 /* Check the REX.R bit. */
3117 r = (i.rex & REX_R) ? 0 : 1;
3118 i.vex.bytes[1] = (r << 7
3119 | register_specifier << 3
3120 | vector_length << 2
3121 | implied_prefix);
3122 }
3123 else
3124 {
3125 /* 3-byte VEX prefix. */
3126 unsigned int m, w;
3127
3128 i.vex.length = 3;
3129
3130 switch (i.tm.opcode_modifier.vexopcode)
3131 {
3132 case VEX0F:
3133 m = 0x1;
3134 i.vex.bytes[0] = 0xc4;
3135 break;
3136 case VEX0F38:
3137 m = 0x2;
3138 i.vex.bytes[0] = 0xc4;
3139 break;
3140 case VEX0F3A:
3141 m = 0x3;
3142 i.vex.bytes[0] = 0xc4;
3143 break;
3144 case XOP08:
3145 m = 0x8;
3146 i.vex.bytes[0] = 0x8f;
3147 break;
3148 case XOP09:
3149 m = 0x9;
3150 i.vex.bytes[0] = 0x8f;
3151 break;
3152 case XOP0A:
3153 m = 0xa;
3154 i.vex.bytes[0] = 0x8f;
3155 break;
3156 default:
3157 abort ();
3158 }
3159
3160 /* The high 3 bits of the second VEX byte are 1's compliment
3161 of RXB bits from REX. */
3162 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3163
3164 /* Check the REX.W bit. */
3165 w = (i.rex & REX_W) ? 1 : 0;
3166 if (i.tm.opcode_modifier.vexw == VEXW1)
3167 w = 1;
3168
3169 i.vex.bytes[2] = (w << 7
3170 | register_specifier << 3
3171 | vector_length << 2
3172 | implied_prefix);
3173 }
3174}
3175
3176/* Build the EVEX prefix. */
3177
3178static void
3179build_evex_prefix (void)
3180{
3181 unsigned int register_specifier;
3182 unsigned int implied_prefix;
3183 unsigned int m, w;
3184 rex_byte vrex_used = 0;
3185
3186 /* Check register specifier. */
3187 if (i.vex.register_specifier)
3188 {
3189 gas_assert ((i.vrex & REX_X) == 0);
3190
3191 register_specifier = i.vex.register_specifier->reg_num;
3192 if ((i.vex.register_specifier->reg_flags & RegRex))
3193 register_specifier += 8;
3194 /* The upper 16 registers are encoded in the fourth byte of the
3195 EVEX prefix. */
3196 if (!(i.vex.register_specifier->reg_flags & RegVRex))
3197 i.vex.bytes[3] = 0x8;
3198 register_specifier = ~register_specifier & 0xf;
3199 }
3200 else
3201 {
3202 register_specifier = 0xf;
3203
3204 /* Encode upper 16 vector index register in the fourth byte of
3205 the EVEX prefix. */
3206 if (!(i.vrex & REX_X))
3207 i.vex.bytes[3] = 0x8;
3208 else
3209 vrex_used |= REX_X;
3210 }
3211
3212 switch ((i.tm.base_opcode >> 8) & 0xff)
3213 {
3214 case 0:
3215 implied_prefix = 0;
3216 break;
3217 case DATA_PREFIX_OPCODE:
3218 implied_prefix = 1;
3219 break;
3220 case REPE_PREFIX_OPCODE:
3221 implied_prefix = 2;
3222 break;
3223 case REPNE_PREFIX_OPCODE:
3224 implied_prefix = 3;
3225 break;
3226 default:
3227 abort ();
3228 }
3229
3230 /* 4 byte EVEX prefix. */
3231 i.vex.length = 4;
3232 i.vex.bytes[0] = 0x62;
3233
3234 /* mmmm bits. */
3235 switch (i.tm.opcode_modifier.vexopcode)
3236 {
3237 case VEX0F:
3238 m = 1;
3239 break;
3240 case VEX0F38:
3241 m = 2;
3242 break;
3243 case VEX0F3A:
3244 m = 3;
3245 break;
3246 default:
3247 abort ();
3248 break;
3249 }
3250
3251 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3252 bits from REX. */
3253 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3254
3255 /* The fifth bit of the second EVEX byte is 1's compliment of the
3256 REX_R bit in VREX. */
3257 if (!(i.vrex & REX_R))
3258 i.vex.bytes[1] |= 0x10;
3259 else
3260 vrex_used |= REX_R;
3261
3262 if ((i.reg_operands + i.imm_operands) == i.operands)
3263 {
3264 /* When all operands are registers, the REX_X bit in REX is not
3265 used. We reuse it to encode the upper 16 registers, which is
3266 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3267 as 1's compliment. */
3268 if ((i.vrex & REX_B))
3269 {
3270 vrex_used |= REX_B;
3271 i.vex.bytes[1] &= ~0x40;
3272 }
3273 }
3274
3275 /* EVEX instructions shouldn't need the REX prefix. */
3276 i.vrex &= ~vrex_used;
3277 gas_assert (i.vrex == 0);
3278
3279 /* Check the REX.W bit. */
3280 w = (i.rex & REX_W) ? 1 : 0;
3281 if (i.tm.opcode_modifier.vexw)
3282 {
3283 if (i.tm.opcode_modifier.vexw == VEXW1)
3284 w = 1;
3285 }
3286 /* If w is not set it means we are dealing with WIG instruction. */
3287 else if (!w)
3288 {
3289 if (evexwig == evexw1)
3290 w = 1;
3291 }
3292
3293 /* Encode the U bit. */
3294 implied_prefix |= 0x4;
3295
3296 /* The third byte of the EVEX prefix. */
3297 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3298
3299 /* The fourth byte of the EVEX prefix. */
3300 /* The zeroing-masking bit. */
3301 if (i.mask && i.mask->zeroing)
3302 i.vex.bytes[3] |= 0x80;
3303
3304 /* Don't always set the broadcast bit if there is no RC. */
3305 if (!i.rounding)
3306 {
3307 /* Encode the vector length. */
3308 unsigned int vec_length;
3309
3310 switch (i.tm.opcode_modifier.evex)
3311 {
3312 case EVEXLIG: /* LL' is ignored */
3313 vec_length = evexlig << 5;
3314 break;
3315 case EVEX128:
3316 vec_length = 0 << 5;
3317 break;
3318 case EVEX256:
3319 vec_length = 1 << 5;
3320 break;
3321 case EVEX512:
3322 vec_length = 2 << 5;
3323 break;
3324 default:
3325 abort ();
3326 break;
3327 }
3328 i.vex.bytes[3] |= vec_length;
3329 /* Encode the broadcast bit. */
3330 if (i.broadcast)
3331 i.vex.bytes[3] |= 0x10;
3332 }
3333 else
3334 {
3335 if (i.rounding->type != saeonly)
3336 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3337 else
3338 i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
3339 }
3340
3341 if (i.mask && i.mask->mask)
3342 i.vex.bytes[3] |= i.mask->mask->reg_num;
3343}
3344
3345static void
3346process_immext (void)
3347{
3348 expressionS *exp;
3349
3350 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3351 && i.operands > 0)
3352 {
3353 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3354 with an opcode suffix which is coded in the same place as an
3355 8-bit immediate field would be.
3356 Here we check those operands and remove them afterwards. */
3357 unsigned int x;
3358
3359 for (x = 0; x < i.operands; x++)
3360 if (register_number (i.op[x].regs) != x)
3361 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3362 register_prefix, i.op[x].regs->reg_name, x + 1,
3363 i.tm.name);
3364
3365 i.operands = 0;
3366 }
3367
3368 if (i.tm.cpu_flags.bitfield.cpumwaitx && i.operands > 0)
3369 {
3370 /* MONITORX/MWAITX instructions have fixed operands with an opcode
3371 suffix which is coded in the same place as an 8-bit immediate
3372 field would be.
3373 Here we check those operands and remove them afterwards. */
3374 unsigned int x;
3375
3376 if (i.operands != 3)
3377 abort();
3378
3379 for (x = 0; x < 2; x++)
3380 if (register_number (i.op[x].regs) != x)
3381 goto bad_register_operand;
3382
3383 /* Check for third operand for mwaitx/monitorx insn. */
3384 if (register_number (i.op[x].regs)
3385 != (x + (i.tm.extension_opcode == 0xfb)))
3386 {
3387bad_register_operand:
3388 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3389 register_prefix, i.op[x].regs->reg_name, x+1,
3390 i.tm.name);
3391 }
3392
3393 i.operands = 0;
3394 }
3395
3396 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3397 which is coded in the same place as an 8-bit immediate field
3398 would be. Here we fake an 8-bit immediate operand from the
3399 opcode suffix stored in tm.extension_opcode.
3400
3401 AVX instructions also use this encoding, for some of
3402 3 argument instructions. */
3403
3404 gas_assert (i.imm_operands <= 1
3405 && (i.operands <= 2
3406 || ((i.tm.opcode_modifier.vex
3407 || i.tm.opcode_modifier.evex)
3408 && i.operands <= 4)));
3409
3410 exp = &im_expressions[i.imm_operands++];
3411 i.op[i.operands].imms = exp;
3412 i.types[i.operands] = imm8;
3413 i.operands++;
3414 exp->X_op = O_constant;
3415 exp->X_add_number = i.tm.extension_opcode;
3416 i.tm.extension_opcode = None;
3417}
3418
3419
3420static int
3421check_hle (void)
3422{
3423 switch (i.tm.opcode_modifier.hleprefixok)
3424 {
3425 default:
3426 abort ();
3427 case HLEPrefixNone:
3428 as_bad (_("invalid instruction `%s' after `%s'"),
3429 i.tm.name, i.hle_prefix);
3430 return 0;
3431 case HLEPrefixLock:
3432 if (i.prefix[LOCK_PREFIX])
3433 return 1;
3434 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
3435 return 0;
3436 case HLEPrefixAny:
3437 return 1;
3438 case HLEPrefixRelease:
3439 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3440 {
3441 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3442 i.tm.name);
3443 return 0;
3444 }
3445 if (i.mem_operands == 0
3446 || !operand_type_check (i.types[i.operands - 1], anymem))
3447 {
3448 as_bad (_("memory destination needed for instruction `%s'"
3449 " after `xrelease'"), i.tm.name);
3450 return 0;
3451 }
3452 return 1;
3453 }
3454}
3455
3456/* This is the guts of the machine-dependent assembler. LINE points to a
3457 machine dependent instruction. This function is supposed to emit
3458 the frags/bytes it assembles to. */
3459
3460void
3461md_assemble (char *line)
3462{
3463 unsigned int j;
3464 char mnemonic[MAX_MNEM_SIZE];
3465 const insn_template *t;
3466
3467 /* Initialize globals. */
3468 memset (&i, '\0', sizeof (i));
3469 for (j = 0; j < MAX_OPERANDS; j++)
3470 i.reloc[j] = NO_RELOC;
3471 memset (disp_expressions, '\0', sizeof (disp_expressions));
3472 memset (im_expressions, '\0', sizeof (im_expressions));
3473 save_stack_p = save_stack;
3474
3475 /* First parse an instruction mnemonic & call i386_operand for the operands.
3476 We assume that the scrubber has arranged it so that line[0] is the valid
3477 start of a (possibly prefixed) mnemonic. */
3478
3479 line = parse_insn (line, mnemonic);
3480 if (line == NULL)
3481 return;
3482
3483 line = parse_operands (line, mnemonic);
3484 this_operand = -1;
3485 if (line == NULL)
3486 return;
3487
3488 /* Now we've parsed the mnemonic into a set of templates, and have the
3489 operands at hand. */
3490
3491 /* All intel opcodes have reversed operands except for "bound" and
3492 "enter". We also don't reverse intersegment "jmp" and "call"
3493 instructions with 2 immediate operands so that the immediate segment
3494 precedes the offset, as it does when in AT&T mode. */
3495 if (intel_syntax
3496 && i.operands > 1
3497 && (strcmp (mnemonic, "bound") != 0)
3498 && (strcmp (mnemonic, "invlpga") != 0)
3499 && !(operand_type_check (i.types[0], imm)
3500 && operand_type_check (i.types[1], imm)))
3501 swap_operands ();
3502
3503 /* The order of the immediates should be reversed
3504 for 2 immediates extrq and insertq instructions */
3505 if (i.imm_operands == 2
3506 && (strcmp (mnemonic, "extrq") == 0
3507 || strcmp (mnemonic, "insertq") == 0))
3508 swap_2_operands (0, 1);
3509
3510 if (i.imm_operands)
3511 optimize_imm ();
3512
3513 /* Don't optimize displacement for movabs since it only takes 64bit
3514 displacement. */
3515 if (i.disp_operands
3516 && i.disp_encoding != disp_encoding_32bit
3517 && (flag_code != CODE_64BIT
3518 || strcmp (mnemonic, "movabs") != 0))
3519 optimize_disp ();
3520
3521 /* Next, we find a template that matches the given insn,
3522 making sure the overlap of the given operands types is consistent
3523 with the template operand types. */
3524
3525 if (!(t = match_template ()))
3526 return;
3527
3528 if (sse_check != check_none
3529 && !i.tm.opcode_modifier.noavx
3530 && (i.tm.cpu_flags.bitfield.cpusse
3531 || i.tm.cpu_flags.bitfield.cpusse2
3532 || i.tm.cpu_flags.bitfield.cpusse3
3533 || i.tm.cpu_flags.bitfield.cpussse3
3534 || i.tm.cpu_flags.bitfield.cpusse4_1
3535 || i.tm.cpu_flags.bitfield.cpusse4_2))
3536 {
3537 (sse_check == check_warning
3538 ? as_warn
3539 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3540 }
3541
3542 /* Zap movzx and movsx suffix. The suffix has been set from
3543 "word ptr" or "byte ptr" on the source operand in Intel syntax
3544 or extracted from mnemonic in AT&T syntax. But we'll use
3545 the destination register to choose the suffix for encoding. */
3546 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3547 {
3548 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3549 there is no suffix, the default will be byte extension. */
3550 if (i.reg_operands != 2
3551 && !i.suffix
3552 && intel_syntax)
3553 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3554
3555 i.suffix = 0;
3556 }
3557
3558 if (i.tm.opcode_modifier.fwait)
3559 if (!add_prefix (FWAIT_OPCODE))
3560 return;
3561
3562 /* Check if REP prefix is OK. */
3563 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
3564 {
3565 as_bad (_("invalid instruction `%s' after `%s'"),
3566 i.tm.name, i.rep_prefix);
3567 return;
3568 }
3569
3570 /* Check for lock without a lockable instruction. Destination operand
3571 must be memory unless it is xchg (0x86). */
3572 if (i.prefix[LOCK_PREFIX]
3573 && (!i.tm.opcode_modifier.islockable
3574 || i.mem_operands == 0
3575 || (i.tm.base_opcode != 0x86
3576 && !operand_type_check (i.types[i.operands - 1], anymem))))
3577 {
3578 as_bad (_("expecting lockable instruction after `lock'"));
3579 return;
3580 }
3581
3582 /* Check if HLE prefix is OK. */
3583 if (i.hle_prefix && !check_hle ())
3584 return;
3585
3586 /* Check BND prefix. */
3587 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
3588 as_bad (_("expecting valid branch instruction after `bnd'"));
3589
3590 if (i.tm.cpu_flags.bitfield.cpumpx
3591 && flag_code == CODE_64BIT
3592 && i.prefix[ADDR_PREFIX])
3593 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3594
3595 /* Insert BND prefix. */
3596 if (add_bnd_prefix
3597 && i.tm.opcode_modifier.bndprefixok
3598 && !i.prefix[BND_PREFIX])
3599 add_prefix (BND_PREFIX_OPCODE);
3600
3601 /* Check string instruction segment overrides. */
3602 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3603 {
3604 if (!check_string ())
3605 return;
3606 i.disp_operands = 0;
3607 }
3608
3609 if (!process_suffix ())
3610 return;
3611
3612 /* Update operand types. */
3613 for (j = 0; j < i.operands; j++)
3614 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3615
3616 /* Make still unresolved immediate matches conform to size of immediate
3617 given in i.suffix. */
3618 if (!finalize_imm ())
3619 return;
3620
3621 if (i.types[0].bitfield.imm1)
3622 i.imm_operands = 0; /* kludge for shift insns. */
3623
3624 /* We only need to check those implicit registers for instructions
3625 with 3 operands or less. */
3626 if (i.operands <= 3)
3627 for (j = 0; j < i.operands; j++)
3628 if (i.types[j].bitfield.inoutportreg
3629 || i.types[j].bitfield.shiftcount
3630 || i.types[j].bitfield.acc
3631 || i.types[j].bitfield.floatacc)
3632 i.reg_operands--;
3633
3634 /* ImmExt should be processed after SSE2AVX. */
3635 if (!i.tm.opcode_modifier.sse2avx
3636 && i.tm.opcode_modifier.immext)
3637 process_immext ();
3638
3639 /* For insns with operands there are more diddles to do to the opcode. */
3640 if (i.operands)
3641 {
3642 if (!process_operands ())
3643 return;
3644 }
3645 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3646 {
3647 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3648 as_warn (_("translating to `%sp'"), i.tm.name);
3649 }
3650
3651 if (i.tm.opcode_modifier.vex || i.tm.opcode_modifier.evex)
3652 {
3653 if (flag_code == CODE_16BIT)
3654 {
3655 as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
3656 i.tm.name);
3657 return;
3658 }
3659
3660 if (i.tm.opcode_modifier.vex)
3661 build_vex_prefix (t);
3662 else
3663 build_evex_prefix ();
3664 }
3665
3666 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3667 instructions may define INT_OPCODE as well, so avoid this corner
3668 case for those instructions that use MODRM. */
3669 if (i.tm.base_opcode == INT_OPCODE
3670 && !i.tm.opcode_modifier.modrm
3671 && i.op[0].imms->X_add_number == 3)
3672 {
3673 i.tm.base_opcode = INT3_OPCODE;
3674 i.imm_operands = 0;
3675 }
3676
3677 if ((i.tm.opcode_modifier.jump
3678 || i.tm.opcode_modifier.jumpbyte
3679 || i.tm.opcode_modifier.jumpdword)
3680 && i.op[0].disps->X_op == O_constant)
3681 {
3682 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3683 the absolute address given by the constant. Since ix86 jumps and
3684 calls are pc relative, we need to generate a reloc. */
3685 i.op[0].disps->X_add_symbol = &abs_symbol;
3686 i.op[0].disps->X_op = O_symbol;
3687 }
3688
3689 if (i.tm.opcode_modifier.rex64)
3690 i.rex |= REX_W;
3691
3692 /* For 8 bit registers we need an empty rex prefix. Also if the
3693 instruction already has a prefix, we need to convert old
3694 registers to new ones. */
3695
3696 if ((i.types[0].bitfield.reg8
3697 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3698 || (i.types[1].bitfield.reg8
3699 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3700 || ((i.types[0].bitfield.reg8
3701 || i.types[1].bitfield.reg8)
3702 && i.rex != 0))
3703 {
3704 int x;
3705
3706 i.rex |= REX_OPCODE;
3707 for (x = 0; x < 2; x++)
3708 {
3709 /* Look for 8 bit operand that uses old registers. */
3710 if (i.types[x].bitfield.reg8
3711 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3712 {
3713 /* In case it is "hi" register, give up. */
3714 if (i.op[x].regs->reg_num > 3)
3715 as_bad (_("can't encode register '%s%s' in an "
3716 "instruction requiring REX prefix."),
3717 register_prefix, i.op[x].regs->reg_name);
3718
3719 /* Otherwise it is equivalent to the extended register.
3720 Since the encoding doesn't change this is merely
3721 cosmetic cleanup for debug output. */
3722
3723 i.op[x].regs = i.op[x].regs + 8;
3724 }
3725 }
3726 }
3727
3728 if (i.rex != 0)
3729 add_prefix (REX_OPCODE | i.rex);
3730
3731 /* We are ready to output the insn. */
3732 output_insn ();
3733}
3734
3735static char *
3736parse_insn (char *line, char *mnemonic)
3737{
3738 char *l = line;
3739 char *token_start = l;
3740 char *mnem_p;
3741 int supported;
3742 const insn_template *t;
3743 char *dot_p = NULL;
3744
3745 while (1)
3746 {
3747 mnem_p = mnemonic;
3748 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3749 {
3750 if (*mnem_p == '.')
3751 dot_p = mnem_p;
3752 mnem_p++;
3753 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3754 {
3755 as_bad (_("no such instruction: `%s'"), token_start);
3756 return NULL;
3757 }
3758 l++;
3759 }
3760 if (!is_space_char (*l)
3761 && *l != END_OF_INSN
3762 && (intel_syntax
3763 || (*l != PREFIX_SEPARATOR
3764 && *l != ',')))
3765 {
3766 as_bad (_("invalid character %s in mnemonic"),
3767 output_invalid (*l));
3768 return NULL;
3769 }
3770 if (token_start == l)
3771 {
3772 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3773 as_bad (_("expecting prefix; got nothing"));
3774 else
3775 as_bad (_("expecting mnemonic; got nothing"));
3776 return NULL;
3777 }
3778
3779 /* Look up instruction (or prefix) via hash table. */
3780 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3781
3782 if (*l != END_OF_INSN
3783 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3784 && current_templates
3785 && current_templates->start->opcode_modifier.isprefix)
3786 {
3787 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3788 {
3789 as_bad ((flag_code != CODE_64BIT
3790 ? _("`%s' is only supported in 64-bit mode")
3791 : _("`%s' is not supported in 64-bit mode")),
3792 current_templates->start->name);
3793 return NULL;
3794 }
3795 /* If we are in 16-bit mode, do not allow addr16 or data16.
3796 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3797 if ((current_templates->start->opcode_modifier.size16
3798 || current_templates->start->opcode_modifier.size32)
3799 && flag_code != CODE_64BIT
3800 && (current_templates->start->opcode_modifier.size32
3801 ^ (flag_code == CODE_16BIT)))
3802 {
3803 as_bad (_("redundant %s prefix"),
3804 current_templates->start->name);
3805 return NULL;
3806 }
3807 /* Add prefix, checking for repeated prefixes. */
3808 switch (add_prefix (current_templates->start->base_opcode))
3809 {
3810 case PREFIX_EXIST:
3811 return NULL;
3812 case PREFIX_REP:
3813 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3814 i.hle_prefix = current_templates->start->name;
3815 else if (current_templates->start->cpu_flags.bitfield.cpumpx)
3816 i.bnd_prefix = current_templates->start->name;
3817 else
3818 i.rep_prefix = current_templates->start->name;
3819 break;
3820 default:
3821 break;
3822 }
3823 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3824 token_start = ++l;
3825 }
3826 else
3827 break;
3828 }
3829
3830 if (!current_templates)
3831 {
3832 /* Check if we should swap operand or force 32bit displacement in
3833 encoding. */
3834 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3835 i.swap_operand = 1;
3836 else if (mnem_p - 3 == dot_p
3837 && dot_p[1] == 'd'
3838 && dot_p[2] == '8')
3839 i.disp_encoding = disp_encoding_8bit;
3840 else if (mnem_p - 4 == dot_p
3841 && dot_p[1] == 'd'
3842 && dot_p[2] == '3'
3843 && dot_p[3] == '2')
3844 i.disp_encoding = disp_encoding_32bit;
3845 else
3846 goto check_suffix;
3847 mnem_p = dot_p;
3848 *dot_p = '\0';
3849 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3850 }
3851
3852 if (!current_templates)
3853 {
3854check_suffix:
3855 /* See if we can get a match by trimming off a suffix. */
3856 switch (mnem_p[-1])
3857 {
3858 case WORD_MNEM_SUFFIX:
3859 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3860 i.suffix = SHORT_MNEM_SUFFIX;
3861 else
3862 case BYTE_MNEM_SUFFIX:
3863 case QWORD_MNEM_SUFFIX:
3864 i.suffix = mnem_p[-1];
3865 mnem_p[-1] = '\0';
3866 current_templates = (const templates *) hash_find (op_hash,
3867 mnemonic);
3868 break;
3869 case SHORT_MNEM_SUFFIX:
3870 case LONG_MNEM_SUFFIX:
3871 if (!intel_syntax)
3872 {
3873 i.suffix = mnem_p[-1];
3874 mnem_p[-1] = '\0';
3875 current_templates = (const templates *) hash_find (op_hash,
3876 mnemonic);
3877 }
3878 break;
3879
3880 /* Intel Syntax. */
3881 case 'd':
3882 if (intel_syntax)
3883 {
3884 if (intel_float_operand (mnemonic) == 1)
3885 i.suffix = SHORT_MNEM_SUFFIX;
3886 else
3887 i.suffix = LONG_MNEM_SUFFIX;
3888 mnem_p[-1] = '\0';
3889 current_templates = (const templates *) hash_find (op_hash,
3890 mnemonic);
3891 }
3892 break;
3893 }
3894 if (!current_templates)
3895 {
3896 as_bad (_("no such instruction: `%s'"), token_start);
3897 return NULL;
3898 }
3899 }
3900
3901 if (current_templates->start->opcode_modifier.jump
3902 || current_templates->start->opcode_modifier.jumpbyte)
3903 {
3904 /* Check for a branch hint. We allow ",pt" and ",pn" for
3905 predict taken and predict not taken respectively.
3906 I'm not sure that branch hints actually do anything on loop
3907 and jcxz insns (JumpByte) for current Pentium4 chips. They
3908 may work in the future and it doesn't hurt to accept them
3909 now. */
3910 if (l[0] == ',' && l[1] == 'p')
3911 {
3912 if (l[2] == 't')
3913 {
3914 if (!add_prefix (DS_PREFIX_OPCODE))
3915 return NULL;
3916 l += 3;
3917 }
3918 else if (l[2] == 'n')
3919 {
3920 if (!add_prefix (CS_PREFIX_OPCODE))
3921 return NULL;
3922 l += 3;
3923 }
3924 }
3925 }
3926 /* Any other comma loses. */
3927 if (*l == ',')
3928 {
3929 as_bad (_("invalid character %s in mnemonic"),
3930 output_invalid (*l));
3931 return NULL;
3932 }
3933
3934 /* Check if instruction is supported on specified architecture. */
3935 supported = 0;
3936 for (t = current_templates->start; t < current_templates->end; ++t)
3937 {
3938 supported |= cpu_flags_match (t);
3939 if (supported == CPU_FLAGS_PERFECT_MATCH)
3940 goto skip;
3941 }
3942
3943 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3944 {
3945 as_bad (flag_code == CODE_64BIT
3946 ? _("`%s' is not supported in 64-bit mode")
3947 : _("`%s' is only supported in 64-bit mode"),
3948 current_templates->start->name);
3949 return NULL;
3950 }
3951 if (supported != CPU_FLAGS_PERFECT_MATCH)
3952 {
3953 as_bad (_("`%s' is not supported on `%s%s'"),
3954 current_templates->start->name,
3955 cpu_arch_name ? cpu_arch_name : default_arch,
3956 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3957 return NULL;
3958 }
3959
3960skip:
3961 if (!cpu_arch_flags.bitfield.cpui386
3962 && (flag_code != CODE_16BIT))
3963 {
3964 as_warn (_("use .code16 to ensure correct addressing mode"));
3965 }
3966
3967 return l;
3968}
3969
3970static char *
3971parse_operands (char *l, const char *mnemonic)
3972{
3973 char *token_start;
3974
3975 /* 1 if operand is pending after ','. */
3976 unsigned int expecting_operand = 0;
3977
3978 /* Non-zero if operand parens not balanced. */
3979 unsigned int paren_not_balanced;
3980
3981 while (*l != END_OF_INSN)
3982 {
3983 /* Skip optional white space before operand. */
3984 if (is_space_char (*l))
3985 ++l;
3986 if (!is_operand_char (*l) && *l != END_OF_INSN && *l != '"')
3987 {
3988 as_bad (_("invalid character %s before operand %d"),
3989 output_invalid (*l),
3990 i.operands + 1);
3991 return NULL;
3992 }
3993 token_start = l; /* After white space. */
3994 paren_not_balanced = 0;
3995 while (paren_not_balanced || *l != ',')
3996 {
3997 if (*l == END_OF_INSN)
3998 {
3999 if (paren_not_balanced)
4000 {
4001 if (!intel_syntax)
4002 as_bad (_("unbalanced parenthesis in operand %d."),
4003 i.operands + 1);
4004 else
4005 as_bad (_("unbalanced brackets in operand %d."),
4006 i.operands + 1);
4007 return NULL;
4008 }
4009 else
4010 break; /* we are done */
4011 }
4012 else if (!is_operand_char (*l) && !is_space_char (*l) && *l != '"')
4013 {
4014 as_bad (_("invalid character %s in operand %d"),
4015 output_invalid (*l),
4016 i.operands + 1);
4017 return NULL;
4018 }
4019 if (!intel_syntax)
4020 {
4021 if (*l == '(')
4022 ++paren_not_balanced;
4023 if (*l == ')')
4024 --paren_not_balanced;
4025 }
4026 else
4027 {
4028 if (*l == '[')
4029 ++paren_not_balanced;
4030 if (*l == ']')
4031 --paren_not_balanced;
4032 }
4033 l++;
4034 }
4035 if (l != token_start)
4036 { /* Yes, we've read in another operand. */
4037 unsigned int operand_ok;
4038 this_operand = i.operands++;
4039 i.types[this_operand].bitfield.unspecified = 1;
4040 if (i.operands > MAX_OPERANDS)
4041 {
4042 as_bad (_("spurious operands; (%d operands/instruction max)"),
4043 MAX_OPERANDS);
4044 return NULL;
4045 }
4046 /* Now parse operand adding info to 'i' as we go along. */
4047 END_STRING_AND_SAVE (l);
4048
4049 if (intel_syntax)
4050 operand_ok =
4051 i386_intel_operand (token_start,
4052 intel_float_operand (mnemonic));
4053 else
4054 operand_ok = i386_att_operand (token_start);
4055
4056 RESTORE_END_STRING (l);
4057 if (!operand_ok)
4058 return NULL;
4059 }
4060 else
4061 {
4062 if (expecting_operand)
4063 {
4064 expecting_operand_after_comma:
4065 as_bad (_("expecting operand after ','; got nothing"));
4066 return NULL;
4067 }
4068 if (*l == ',')
4069 {
4070 as_bad (_("expecting operand before ','; got nothing"));
4071 return NULL;
4072 }
4073 }
4074
4075 /* Now *l must be either ',' or END_OF_INSN. */
4076 if (*l == ',')
4077 {
4078 if (*++l == END_OF_INSN)
4079 {
4080 /* Just skip it, if it's \n complain. */
4081 goto expecting_operand_after_comma;
4082 }
4083 expecting_operand = 1;
4084 }
4085 }
4086 return l;
4087}
4088
4089static void
4090swap_2_operands (int xchg1, int xchg2)
4091{
4092 union i386_op temp_op;
4093 i386_operand_type temp_type;
4094 enum bfd_reloc_code_real temp_reloc;
4095
4096 temp_type = i.types[xchg2];
4097 i.types[xchg2] = i.types[xchg1];
4098 i.types[xchg1] = temp_type;
4099 temp_op = i.op[xchg2];
4100 i.op[xchg2] = i.op[xchg1];
4101 i.op[xchg1] = temp_op;
4102 temp_reloc = i.reloc[xchg2];
4103 i.reloc[xchg2] = i.reloc[xchg1];
4104 i.reloc[xchg1] = temp_reloc;
4105
4106 if (i.mask)
4107 {
4108 if (i.mask->operand == xchg1)
4109 i.mask->operand = xchg2;
4110 else if (i.mask->operand == xchg2)
4111 i.mask->operand = xchg1;
4112 }
4113 if (i.broadcast)
4114 {
4115 if (i.broadcast->operand == xchg1)
4116 i.broadcast->operand = xchg2;
4117 else if (i.broadcast->operand == xchg2)
4118 i.broadcast->operand = xchg1;
4119 }
4120 if (i.rounding)
4121 {
4122 if (i.rounding->operand == xchg1)
4123 i.rounding->operand = xchg2;
4124 else if (i.rounding->operand == xchg2)
4125 i.rounding->operand = xchg1;
4126 }
4127}
4128
4129static void
4130swap_operands (void)
4131{
4132 switch (i.operands)
4133 {
4134 case 5:
4135 case 4:
4136 swap_2_operands (1, i.operands - 2);
4137 case 3:
4138 case 2:
4139 swap_2_operands (0, i.operands - 1);
4140 break;
4141 default:
4142 abort ();
4143 }
4144
4145 if (i.mem_operands == 2)
4146 {
4147 const seg_entry *temp_seg;
4148 temp_seg = i.seg[0];
4149 i.seg[0] = i.seg[1];
4150 i.seg[1] = temp_seg;
4151 }
4152}
4153
4154/* Try to ensure constant immediates are represented in the smallest
4155 opcode possible. */
4156static void
4157optimize_imm (void)
4158{
4159 char guess_suffix = 0;
4160 int op;
4161
4162 if (i.suffix)
4163 guess_suffix = i.suffix;
4164 else if (i.reg_operands)
4165 {
4166 /* Figure out a suffix from the last register operand specified.
4167 We can't do this properly yet, ie. excluding InOutPortReg,
4168 but the following works for instructions with immediates.
4169 In any case, we can't set i.suffix yet. */
4170 for (op = i.operands; --op >= 0;)
4171 if (i.types[op].bitfield.reg8)
4172 {
4173 guess_suffix = BYTE_MNEM_SUFFIX;
4174 break;
4175 }
4176 else if (i.types[op].bitfield.reg16)
4177 {
4178 guess_suffix = WORD_MNEM_SUFFIX;
4179 break;
4180 }
4181 else if (i.types[op].bitfield.reg32)
4182 {
4183 guess_suffix = LONG_MNEM_SUFFIX;
4184 break;
4185 }
4186 else if (i.types[op].bitfield.reg64)
4187 {
4188 guess_suffix = QWORD_MNEM_SUFFIX;
4189 break;
4190 }
4191 }
4192 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4193 guess_suffix = WORD_MNEM_SUFFIX;
4194
4195 for (op = i.operands; --op >= 0;)
4196 if (operand_type_check (i.types[op], imm))
4197 {
4198 switch (i.op[op].imms->X_op)
4199 {
4200 case O_constant:
4201 /* If a suffix is given, this operand may be shortened. */
4202 switch (guess_suffix)
4203 {
4204 case LONG_MNEM_SUFFIX:
4205 i.types[op].bitfield.imm32 = 1;
4206 i.types[op].bitfield.imm64 = 1;
4207 break;
4208 case WORD_MNEM_SUFFIX:
4209 i.types[op].bitfield.imm16 = 1;
4210 i.types[op].bitfield.imm32 = 1;
4211 i.types[op].bitfield.imm32s = 1;
4212 i.types[op].bitfield.imm64 = 1;
4213 break;
4214 case BYTE_MNEM_SUFFIX:
4215 i.types[op].bitfield.imm8 = 1;
4216 i.types[op].bitfield.imm8s = 1;
4217 i.types[op].bitfield.imm16 = 1;
4218 i.types[op].bitfield.imm32 = 1;
4219 i.types[op].bitfield.imm32s = 1;
4220 i.types[op].bitfield.imm64 = 1;
4221 break;
4222 }
4223
4224 /* If this operand is at most 16 bits, convert it
4225 to a signed 16 bit number before trying to see
4226 whether it will fit in an even smaller size.
4227 This allows a 16-bit operand such as $0xffe0 to
4228 be recognised as within Imm8S range. */
4229 if ((i.types[op].bitfield.imm16)
4230 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
4231 {
4232 i.op[op].imms->X_add_number =
4233 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
4234 }
4235#ifdef BFD64
4236 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
4237 if ((i.types[op].bitfield.imm32)
4238 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
4239 == 0))
4240 {
4241 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
4242 ^ ((offsetT) 1 << 31))
4243 - ((offsetT) 1 << 31));
4244 }
4245#endif
4246 i.types[op]
4247 = operand_type_or (i.types[op],
4248 smallest_imm_type (i.op[op].imms->X_add_number));
4249
4250 /* We must avoid matching of Imm32 templates when 64bit
4251 only immediate is available. */
4252 if (guess_suffix == QWORD_MNEM_SUFFIX)
4253 i.types[op].bitfield.imm32 = 0;
4254 break;
4255
4256 case O_absent:
4257 case O_register:
4258 abort ();
4259
4260 /* Symbols and expressions. */
4261 default:
4262 /* Convert symbolic operand to proper sizes for matching, but don't
4263 prevent matching a set of insns that only supports sizes other
4264 than those matching the insn suffix. */
4265 {
4266 i386_operand_type mask, allowed;
4267 const insn_template *t;
4268
4269 operand_type_set (&mask, 0);
4270 operand_type_set (&allowed, 0);
4271
4272 for (t = current_templates->start;
4273 t < current_templates->end;
4274 ++t)
4275 allowed = operand_type_or (allowed,
4276 t->operand_types[op]);
4277 switch (guess_suffix)
4278 {
4279 case QWORD_MNEM_SUFFIX:
4280 mask.bitfield.imm64 = 1;
4281 mask.bitfield.imm32s = 1;
4282 break;
4283 case LONG_MNEM_SUFFIX:
4284 mask.bitfield.imm32 = 1;
4285 break;
4286 case WORD_MNEM_SUFFIX:
4287 mask.bitfield.imm16 = 1;
4288 break;
4289 case BYTE_MNEM_SUFFIX:
4290 mask.bitfield.imm8 = 1;
4291 break;
4292 default:
4293 break;
4294 }
4295 allowed = operand_type_and (mask, allowed);
4296 if (!operand_type_all_zero (&allowed))
4297 i.types[op] = operand_type_and (i.types[op], mask);
4298 }
4299 break;
4300 }
4301 }
4302}
4303
4304/* Try to use the smallest displacement type too. */
4305static void
4306optimize_disp (void)
4307{
4308 int op;
4309
4310 for (op = i.operands; --op >= 0;)
4311 if (operand_type_check (i.types[op], disp))
4312 {
4313 if (i.op[op].disps->X_op == O_constant)
4314 {
4315 offsetT op_disp = i.op[op].disps->X_add_number;
4316
4317 if (i.types[op].bitfield.disp16
4318 && (op_disp & ~(offsetT) 0xffff) == 0)
4319 {
4320 /* If this operand is at most 16 bits, convert
4321 to a signed 16 bit number and don't use 64bit
4322 displacement. */
4323 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
4324 i.types[op].bitfield.disp64 = 0;
4325 }
4326#ifdef BFD64
4327 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
4328 if (i.types[op].bitfield.disp32
4329 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
4330 {
4331 /* If this operand is at most 32 bits, convert
4332 to a signed 32 bit number and don't use 64bit
4333 displacement. */
4334 op_disp &= (((offsetT) 2 << 31) - 1);
4335 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
4336 i.types[op].bitfield.disp64 = 0;
4337 }
4338#endif
4339 if (!op_disp && i.types[op].bitfield.baseindex)
4340 {
4341 i.types[op].bitfield.disp8 = 0;
4342 i.types[op].bitfield.disp16 = 0;
4343 i.types[op].bitfield.disp32 = 0;
4344 i.types[op].bitfield.disp32s = 0;
4345 i.types[op].bitfield.disp64 = 0;
4346 i.op[op].disps = 0;
4347 i.disp_operands--;
4348 }
4349 else if (flag_code == CODE_64BIT)
4350 {
4351 if (fits_in_signed_long (op_disp))
4352 {
4353 i.types[op].bitfield.disp64 = 0;
4354 i.types[op].bitfield.disp32s = 1;
4355 }
4356 if (i.prefix[ADDR_PREFIX]
4357 && fits_in_unsigned_long (op_disp))
4358 i.types[op].bitfield.disp32 = 1;
4359 }
4360 if ((i.types[op].bitfield.disp32
4361 || i.types[op].bitfield.disp32s
4362 || i.types[op].bitfield.disp16)
4363 && fits_in_signed_byte (op_disp))
4364 i.types[op].bitfield.disp8 = 1;
4365 }
4366 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
4367 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
4368 {
4369 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
4370 i.op[op].disps, 0, i.reloc[op]);
4371 i.types[op].bitfield.disp8 = 0;
4372 i.types[op].bitfield.disp16 = 0;
4373 i.types[op].bitfield.disp32 = 0;
4374 i.types[op].bitfield.disp32s = 0;
4375 i.types[op].bitfield.disp64 = 0;
4376 }
4377 else
4378 /* We only support 64bit displacement on constants. */
4379 i.types[op].bitfield.disp64 = 0;
4380 }
4381}
4382
4383/* Check if operands are valid for the instruction. */
4384
4385static int
4386check_VecOperands (const insn_template *t)
4387{
4388 unsigned int op;
4389
4390 /* Without VSIB byte, we can't have a vector register for index. */
4391 if (!t->opcode_modifier.vecsib
4392 && i.index_reg
4393 && (i.index_reg->reg_type.bitfield.regxmm
4394 || i.index_reg->reg_type.bitfield.regymm
4395 || i.index_reg->reg_type.bitfield.regzmm))
4396 {
4397 i.error = unsupported_vector_index_register;
4398 return 1;
4399 }
4400
4401 /* Check if default mask is allowed. */
4402 if (t->opcode_modifier.nodefmask
4403 && (!i.mask || i.mask->mask->reg_num == 0))
4404 {
4405 i.error = no_default_mask;
4406 return 1;
4407 }
4408
4409 /* For VSIB byte, we need a vector register for index, and all vector
4410 registers must be distinct. */
4411 if (t->opcode_modifier.vecsib)
4412 {
4413 if (!i.index_reg
4414 || !((t->opcode_modifier.vecsib == VecSIB128
4415 && i.index_reg->reg_type.bitfield.regxmm)
4416 || (t->opcode_modifier.vecsib == VecSIB256
4417 && i.index_reg->reg_type.bitfield.regymm)
4418 || (t->opcode_modifier.vecsib == VecSIB512
4419 && i.index_reg->reg_type.bitfield.regzmm)))
4420 {
4421 i.error = invalid_vsib_address;
4422 return 1;
4423 }
4424
4425 gas_assert (i.reg_operands == 2 || i.mask);
4426 if (i.reg_operands == 2 && !i.mask)
4427 {
4428 gas_assert (i.types[0].bitfield.regxmm
4429 || i.types[0].bitfield.regymm);
4430 gas_assert (i.types[2].bitfield.regxmm
4431 || i.types[2].bitfield.regymm);
4432 if (operand_check == check_none)
4433 return 0;
4434 if (register_number (i.op[0].regs)
4435 != register_number (i.index_reg)
4436 && register_number (i.op[2].regs)
4437 != register_number (i.index_reg)
4438 && register_number (i.op[0].regs)
4439 != register_number (i.op[2].regs))
4440 return 0;
4441 if (operand_check == check_error)
4442 {
4443 i.error = invalid_vector_register_set;
4444 return 1;
4445 }
4446 as_warn (_("mask, index, and destination registers should be distinct"));
4447 }
4448 else if (i.reg_operands == 1 && i.mask)
4449 {
4450 if ((i.types[1].bitfield.regymm
4451 || i.types[1].bitfield.regzmm)
4452 && (register_number (i.op[1].regs)
4453 == register_number (i.index_reg)))
4454 {
4455 if (operand_check == check_error)
4456 {
4457 i.error = invalid_vector_register_set;
4458 return 1;
4459 }
4460 if (operand_check != check_none)
4461 as_warn (_("index and destination registers should be distinct"));
4462 }
4463 }
4464 }
4465
4466 /* Check if broadcast is supported by the instruction and is applied
4467 to the memory operand. */
4468 if (i.broadcast)
4469 {
4470 int broadcasted_opnd_size;
4471
4472 /* Check if specified broadcast is supported in this instruction,
4473 and it's applied to memory operand of DWORD or QWORD type,
4474 depending on VecESize. */
4475 if (i.broadcast->type != t->opcode_modifier.broadcast
4476 || !i.types[i.broadcast->operand].bitfield.mem
4477 || (t->opcode_modifier.vecesize == 0
4478 && !i.types[i.broadcast->operand].bitfield.dword
4479 && !i.types[i.broadcast->operand].bitfield.unspecified)
4480 || (t->opcode_modifier.vecesize == 1
4481 && !i.types[i.broadcast->operand].bitfield.qword
4482 && !i.types[i.broadcast->operand].bitfield.unspecified))
4483 goto bad_broadcast;
4484
4485 broadcasted_opnd_size = t->opcode_modifier.vecesize ? 64 : 32;
4486 if (i.broadcast->type == BROADCAST_1TO16)
4487 broadcasted_opnd_size <<= 4; /* Broadcast 1to16. */
4488 else if (i.broadcast->type == BROADCAST_1TO8)
4489 broadcasted_opnd_size <<= 3; /* Broadcast 1to8. */
4490 else if (i.broadcast->type == BROADCAST_1TO4)
4491 broadcasted_opnd_size <<= 2; /* Broadcast 1to4. */
4492 else if (i.broadcast->type == BROADCAST_1TO2)
4493 broadcasted_opnd_size <<= 1; /* Broadcast 1to2. */
4494 else
4495 goto bad_broadcast;
4496
4497 if ((broadcasted_opnd_size == 256
4498 && !t->operand_types[i.broadcast->operand].bitfield.ymmword)
4499 || (broadcasted_opnd_size == 512
4500 && !t->operand_types[i.broadcast->operand].bitfield.zmmword))
4501 {
4502 bad_broadcast:
4503 i.error = unsupported_broadcast;
4504 return 1;
4505 }
4506 }
4507 /* If broadcast is supported in this instruction, we need to check if
4508 operand of one-element size isn't specified without broadcast. */
4509 else if (t->opcode_modifier.broadcast && i.mem_operands)
4510 {
4511 /* Find memory operand. */
4512 for (op = 0; op < i.operands; op++)
4513 if (operand_type_check (i.types[op], anymem))
4514 break;
4515 gas_assert (op < i.operands);
4516 /* Check size of the memory operand. */
4517 if ((t->opcode_modifier.vecesize == 0
4518 && i.types[op].bitfield.dword)
4519 || (t->opcode_modifier.vecesize == 1
4520 && i.types[op].bitfield.qword))
4521 {
4522 i.error = broadcast_needed;
4523 return 1;
4524 }
4525 }
4526
4527 /* Check if requested masking is supported. */
4528 if (i.mask
4529 && (!t->opcode_modifier.masking
4530 || (i.mask->zeroing
4531 && t->opcode_modifier.masking == MERGING_MASKING)))
4532 {
4533 i.error = unsupported_masking;
4534 return 1;
4535 }
4536
4537 /* Check if masking is applied to dest operand. */
4538 if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
4539 {
4540 i.error = mask_not_on_destination;
4541 return 1;
4542 }
4543
4544 /* Check RC/SAE. */
4545 if (i.rounding)
4546 {
4547 if ((i.rounding->type != saeonly
4548 && !t->opcode_modifier.staticrounding)
4549 || (i.rounding->type == saeonly
4550 && (t->opcode_modifier.staticrounding
4551 || !t->opcode_modifier.sae)))
4552 {
4553 i.error = unsupported_rc_sae;
4554 return 1;
4555 }
4556 /* If the instruction has several immediate operands and one of
4557 them is rounding, the rounding operand should be the last
4558 immediate operand. */
4559 if (i.imm_operands > 1
4560 && i.rounding->operand != (int) (i.imm_operands - 1))
4561 {
4562 i.error = rc_sae_operand_not_last_imm;
4563 return 1;
4564 }
4565 }
4566
4567 /* Check vector Disp8 operand. */
4568 if (t->opcode_modifier.disp8memshift)
4569 {
4570 if (i.broadcast)
4571 i.memshift = t->opcode_modifier.vecesize ? 3 : 2;
4572 else
4573 i.memshift = t->opcode_modifier.disp8memshift;
4574
4575 for (op = 0; op < i.operands; op++)
4576 if (operand_type_check (i.types[op], disp)
4577 && i.op[op].disps->X_op == O_constant)
4578 {
4579 offsetT value = i.op[op].disps->X_add_number;
4580 int vec_disp8_ok
4581 = (i.disp_encoding != disp_encoding_32bit
4582 && fits_in_vec_disp8 (value));
4583 if (t->operand_types [op].bitfield.vec_disp8)
4584 {
4585 if (vec_disp8_ok)
4586 i.types[op].bitfield.vec_disp8 = 1;
4587 else
4588 {
4589 /* Vector insn can only have Vec_Disp8/Disp32 in
4590 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4591 mode. */
4592 i.types[op].bitfield.disp8 = 0;
4593 if (flag_code != CODE_16BIT)
4594 i.types[op].bitfield.disp16 = 0;
4595 }
4596 }
4597 else if (flag_code != CODE_16BIT)
4598 {
4599 /* One form of this instruction supports vector Disp8.
4600 Try vector Disp8 if we need to use Disp32. */
4601 if (vec_disp8_ok && !fits_in_signed_byte (value))
4602 {
4603 i.error = try_vector_disp8;
4604 return 1;
4605 }
4606 }
4607 }
4608 }
4609 else
4610 i.memshift = -1;
4611
4612 return 0;
4613}
4614
4615/* Check if operands are valid for the instruction. Update VEX
4616 operand types. */
4617
4618static int
4619VEX_check_operands (const insn_template *t)
4620{
4621 /* VREX is only valid with EVEX prefix. */
4622 if (i.need_vrex && !t->opcode_modifier.evex)
4623 {
4624 i.error = invalid_register_operand;
4625 return 1;
4626 }
4627
4628 if (!t->opcode_modifier.vex)
4629 return 0;
4630
4631 /* Only check VEX_Imm4, which must be the first operand. */
4632 if (t->operand_types[0].bitfield.vec_imm4)
4633 {
4634 if (i.op[0].imms->X_op != O_constant
4635 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4636 {
4637 i.error = bad_imm4;
4638 return 1;
4639 }
4640
4641 /* Turn off Imm8 so that update_imm won't complain. */
4642 i.types[0] = vec_imm4;
4643 }
4644
4645 return 0;
4646}
4647
4648static const insn_template *
4649match_template (void)
4650{
4651 /* Points to template once we've found it. */
4652 const insn_template *t;
4653 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4654 i386_operand_type overlap4;
4655 unsigned int found_reverse_match;
4656 i386_opcode_modifier suffix_check;
4657 i386_operand_type operand_types [MAX_OPERANDS];
4658 int addr_prefix_disp;
4659 unsigned int j;
4660 unsigned int found_cpu_match;
4661 unsigned int check_register;
4662 enum i386_error specific_error = 0;
4663
4664#if MAX_OPERANDS != 5
4665# error "MAX_OPERANDS must be 5."
4666#endif
4667
4668 found_reverse_match = 0;
4669 addr_prefix_disp = -1;
4670
4671 memset (&suffix_check, 0, sizeof (suffix_check));
4672 if (i.suffix == BYTE_MNEM_SUFFIX)
4673 suffix_check.no_bsuf = 1;
4674 else if (i.suffix == WORD_MNEM_SUFFIX)
4675 suffix_check.no_wsuf = 1;
4676 else if (i.suffix == SHORT_MNEM_SUFFIX)
4677 suffix_check.no_ssuf = 1;
4678 else if (i.suffix == LONG_MNEM_SUFFIX)
4679 suffix_check.no_lsuf = 1;
4680 else if (i.suffix == QWORD_MNEM_SUFFIX)
4681 suffix_check.no_qsuf = 1;
4682 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4683 suffix_check.no_ldsuf = 1;
4684
4685 /* Must have right number of operands. */
4686 i.error = number_of_operands_mismatch;
4687
4688 for (t = current_templates->start; t < current_templates->end; t++)
4689 {
4690 addr_prefix_disp = -1;
4691
4692 if (i.operands != t->operands)
4693 continue;
4694
4695 /* Check processor support. */
4696 i.error = unsupported;
4697 found_cpu_match = (cpu_flags_match (t)
4698 == CPU_FLAGS_PERFECT_MATCH);
4699 if (!found_cpu_match)
4700 continue;
4701
4702 /* Check old gcc support. */
4703 i.error = old_gcc_only;
4704 if (!old_gcc && t->opcode_modifier.oldgcc)
4705 continue;
4706
4707 /* Check AT&T mnemonic. */
4708 i.error = unsupported_with_intel_mnemonic;
4709 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4710 continue;
4711
4712 /* Check AT&T/Intel syntax. */
4713 i.error = unsupported_syntax;
4714 if ((intel_syntax && t->opcode_modifier.attsyntax)
4715 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4716 continue;
4717
4718 /* Check the suffix, except for some instructions in intel mode. */
4719 i.error = invalid_instruction_suffix;
4720 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4721 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4722 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4723 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4724 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4725 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4726 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4727 continue;
4728
4729 if (!operand_size_match (t))
4730 continue;
4731
4732 for (j = 0; j < MAX_OPERANDS; j++)
4733 operand_types[j] = t->operand_types[j];
4734
4735 /* In general, don't allow 64-bit operands in 32-bit mode. */
4736 if (i.suffix == QWORD_MNEM_SUFFIX
4737 && flag_code != CODE_64BIT
4738 && (intel_syntax
4739 ? (!t->opcode_modifier.ignoresize
4740 && !intel_float_operand (t->name))
4741 : intel_float_operand (t->name) != 2)
4742 && ((!operand_types[0].bitfield.regmmx
4743 && !operand_types[0].bitfield.regxmm
4744 && !operand_types[0].bitfield.regymm
4745 && !operand_types[0].bitfield.regzmm)
4746 || (!operand_types[t->operands > 1].bitfield.regmmx
4747 && operand_types[t->operands > 1].bitfield.regxmm
4748 && operand_types[t->operands > 1].bitfield.regymm
4749 && operand_types[t->operands > 1].bitfield.regzmm))
4750 && (t->base_opcode != 0x0fc7
4751 || t->extension_opcode != 1 /* cmpxchg8b */))
4752 continue;
4753
4754 /* In general, don't allow 32-bit operands on pre-386. */
4755 else if (i.suffix == LONG_MNEM_SUFFIX
4756 && !cpu_arch_flags.bitfield.cpui386
4757 && (intel_syntax
4758 ? (!t->opcode_modifier.ignoresize
4759 && !intel_float_operand (t->name))
4760 : intel_float_operand (t->name) != 2)
4761 && ((!operand_types[0].bitfield.regmmx
4762 && !operand_types[0].bitfield.regxmm)
4763 || (!operand_types[t->operands > 1].bitfield.regmmx
4764 && operand_types[t->operands > 1].bitfield.regxmm)))
4765 continue;
4766
4767 /* Do not verify operands when there are none. */
4768 else
4769 {
4770 if (!t->operands)
4771 /* We've found a match; break out of loop. */
4772 break;
4773 }
4774
4775 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4776 into Disp32/Disp16/Disp32 operand. */
4777 if (i.prefix[ADDR_PREFIX] != 0)
4778 {
4779 /* There should be only one Disp operand. */
4780 switch (flag_code)
4781 {
4782 case CODE_16BIT:
4783 for (j = 0; j < MAX_OPERANDS; j++)
4784 {
4785 if (operand_types[j].bitfield.disp16)
4786 {
4787 addr_prefix_disp = j;
4788 operand_types[j].bitfield.disp32 = 1;
4789 operand_types[j].bitfield.disp16 = 0;
4790 break;
4791 }
4792 }
4793 break;
4794 case CODE_32BIT:
4795 for (j = 0; j < MAX_OPERANDS; j++)
4796 {
4797 if (operand_types[j].bitfield.disp32)
4798 {
4799 addr_prefix_disp = j;
4800 operand_types[j].bitfield.disp32 = 0;
4801 operand_types[j].bitfield.disp16 = 1;
4802 break;
4803 }
4804 }
4805 break;
4806 case CODE_64BIT:
4807 for (j = 0; j < MAX_OPERANDS; j++)
4808 {
4809 if (operand_types[j].bitfield.disp64)
4810 {
4811 addr_prefix_disp = j;
4812 operand_types[j].bitfield.disp64 = 0;
4813 operand_types[j].bitfield.disp32 = 1;
4814 break;
4815 }
4816 }
4817 break;
4818 }
4819 }
4820
4821 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
4822 if (i.reloc[0] == BFD_RELOC_386_GOT32 && t->base_opcode == 0xa0)
4823 continue;
4824
4825 /* We check register size if needed. */
4826 check_register = t->opcode_modifier.checkregsize;
4827 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4828 switch (t->operands)
4829 {
4830 case 1:
4831 if (!operand_type_match (overlap0, i.types[0]))
4832 continue;
4833 break;
4834 case 2:
4835 /* xchg %eax, %eax is a special case. It is an aliase for nop
4836 only in 32bit mode and we can use opcode 0x90. In 64bit
4837 mode, we can't use 0x90 for xchg %eax, %eax since it should
4838 zero-extend %eax to %rax. */
4839 if (flag_code == CODE_64BIT
4840 && t->base_opcode == 0x90
4841 && operand_type_equal (&i.types [0], &acc32)
4842 && operand_type_equal (&i.types [1], &acc32))
4843 continue;
4844 if (i.swap_operand)
4845 {
4846 /* If we swap operand in encoding, we either match
4847 the next one or reverse direction of operands. */
4848 if (t->opcode_modifier.s)
4849 continue;
4850 else if (t->opcode_modifier.d)
4851 goto check_reverse;
4852 }
4853
4854 case 3:
4855 /* If we swap operand in encoding, we match the next one. */
4856 if (i.swap_operand && t->opcode_modifier.s)
4857 continue;
4858 case 4:
4859 case 5:
4860 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4861 if (!operand_type_match (overlap0, i.types[0])
4862 || !operand_type_match (overlap1, i.types[1])
4863 || (check_register
4864 && !operand_type_register_match (overlap0, i.types[0],
4865 operand_types[0],
4866 overlap1, i.types[1],
4867 operand_types[1])))
4868 {
4869 /* Check if other direction is valid ... */
4870 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4871 continue;
4872
4873check_reverse:
4874 /* Try reversing direction of operands. */
4875 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4876 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4877 if (!operand_type_match (overlap0, i.types[0])
4878 || !operand_type_match (overlap1, i.types[1])
4879 || (check_register
4880 && !operand_type_register_match (overlap0,
4881 i.types[0],
4882 operand_types[1],
4883 overlap1,
4884 i.types[1],
4885 operand_types[0])))
4886 {
4887 /* Does not match either direction. */
4888 continue;
4889 }
4890 /* found_reverse_match holds which of D or FloatDR
4891 we've found. */
4892 if (t->opcode_modifier.d)
4893 found_reverse_match = Opcode_D;
4894 else if (t->opcode_modifier.floatd)
4895 found_reverse_match = Opcode_FloatD;
4896 else
4897 found_reverse_match = 0;
4898 if (t->opcode_modifier.floatr)
4899 found_reverse_match |= Opcode_FloatR;
4900 }
4901 else
4902 {
4903 /* Found a forward 2 operand match here. */
4904 switch (t->operands)
4905 {
4906 case 5:
4907 overlap4 = operand_type_and (i.types[4],
4908 operand_types[4]);
4909 case 4:
4910 overlap3 = operand_type_and (i.types[3],
4911 operand_types[3]);
4912 case 3:
4913 overlap2 = operand_type_and (i.types[2],
4914 operand_types[2]);
4915 break;
4916 }
4917
4918 switch (t->operands)
4919 {
4920 case 5:
4921 if (!operand_type_match (overlap4, i.types[4])
4922 || !operand_type_register_match (overlap3,
4923 i.types[3],
4924 operand_types[3],
4925 overlap4,
4926 i.types[4],
4927 operand_types[4]))
4928 continue;
4929 case 4:
4930 if (!operand_type_match (overlap3, i.types[3])
4931 || (check_register
4932 && !operand_type_register_match (overlap2,
4933 i.types[2],
4934 operand_types[2],
4935 overlap3,
4936 i.types[3],
4937 operand_types[3])))
4938 continue;
4939 case 3:
4940 /* Here we make use of the fact that there are no
4941 reverse match 3 operand instructions, and all 3
4942 operand instructions only need to be checked for
4943 register consistency between operands 2 and 3. */
4944 if (!operand_type_match (overlap2, i.types[2])
4945 || (check_register
4946 && !operand_type_register_match (overlap1,
4947 i.types[1],
4948 operand_types[1],
4949 overlap2,
4950 i.types[2],
4951 operand_types[2])))
4952 continue;
4953 break;
4954 }
4955 }
4956 /* Found either forward/reverse 2, 3 or 4 operand match here:
4957 slip through to break. */
4958 }
4959 if (!found_cpu_match)
4960 {
4961 found_reverse_match = 0;
4962 continue;
4963 }
4964
4965 /* Check if vector and VEX operands are valid. */
4966 if (check_VecOperands (t) || VEX_check_operands (t))
4967 {
4968 specific_error = i.error;
4969 continue;
4970 }
4971
4972 /* We've found a match; break out of loop. */
4973 break;
4974 }
4975
4976 if (t == current_templates->end)
4977 {
4978 /* We found no match. */
4979 const char *err_msg;
4980 switch (specific_error ? specific_error : i.error)
4981 {
4982 default:
4983 abort ();
4984 case operand_size_mismatch:
4985 err_msg = _("operand size mismatch");
4986 break;
4987 case operand_type_mismatch:
4988 err_msg = _("operand type mismatch");
4989 break;
4990 case register_type_mismatch:
4991 err_msg = _("register type mismatch");
4992 break;
4993 case number_of_operands_mismatch:
4994 err_msg = _("number of operands mismatch");
4995 break;
4996 case invalid_instruction_suffix:
4997 err_msg = _("invalid instruction suffix");
4998 break;
4999 case bad_imm4:
5000 err_msg = _("constant doesn't fit in 4 bits");
5001 break;
5002 case old_gcc_only:
5003 err_msg = _("only supported with old gcc");
5004 break;
5005 case unsupported_with_intel_mnemonic:
5006 err_msg = _("unsupported with Intel mnemonic");
5007 break;
5008 case unsupported_syntax:
5009 err_msg = _("unsupported syntax");
5010 break;
5011 case unsupported:
5012 as_bad (_("unsupported instruction `%s'"),
5013 current_templates->start->name);
5014 return NULL;
5015 case invalid_vsib_address:
5016 err_msg = _("invalid VSIB address");
5017 break;
5018 case invalid_vector_register_set:
5019 err_msg = _("mask, index, and destination registers must be distinct");
5020 break;
5021 case unsupported_vector_index_register:
5022 err_msg = _("unsupported vector index register");
5023 break;
5024 case unsupported_broadcast:
5025 err_msg = _("unsupported broadcast");
5026 break;
5027 case broadcast_not_on_src_operand:
5028 err_msg = _("broadcast not on source memory operand");
5029 break;
5030 case broadcast_needed:
5031 err_msg = _("broadcast is needed for operand of such type");
5032 break;
5033 case unsupported_masking:
5034 err_msg = _("unsupported masking");
5035 break;
5036 case mask_not_on_destination:
5037 err_msg = _("mask not on destination operand");
5038 break;
5039 case no_default_mask:
5040 err_msg = _("default mask isn't allowed");
5041 break;
5042 case unsupported_rc_sae:
5043 err_msg = _("unsupported static rounding/sae");
5044 break;
5045 case rc_sae_operand_not_last_imm:
5046 if (intel_syntax)
5047 err_msg = _("RC/SAE operand must precede immediate operands");
5048 else
5049 err_msg = _("RC/SAE operand must follow immediate operands");
5050 break;
5051 case invalid_register_operand:
5052 err_msg = _("invalid register operand");
5053 break;
5054 }
5055 as_bad (_("%s for `%s'"), err_msg,
5056 current_templates->start->name);
5057 return NULL;
5058 }
5059
5060 if (!quiet_warnings)
5061 {
5062 if (!intel_syntax
5063 && (i.types[0].bitfield.jumpabsolute
5064 != operand_types[0].bitfield.jumpabsolute))
5065 {
5066 as_warn (_("indirect %s without `*'"), t->name);
5067 }
5068
5069 if (t->opcode_modifier.isprefix
5070 && t->opcode_modifier.ignoresize)
5071 {
5072 /* Warn them that a data or address size prefix doesn't
5073 affect assembly of the next line of code. */
5074 as_warn (_("stand-alone `%s' prefix"), t->name);
5075 }
5076 }
5077
5078 /* Copy the template we found. */
5079 i.tm = *t;
5080
5081 if (addr_prefix_disp != -1)
5082 i.tm.operand_types[addr_prefix_disp]
5083 = operand_types[addr_prefix_disp];
5084
5085 if (found_reverse_match)
5086 {
5087 /* If we found a reverse match we must alter the opcode
5088 direction bit. found_reverse_match holds bits to change
5089 (different for int & float insns). */
5090
5091 i.tm.base_opcode ^= found_reverse_match;
5092
5093 i.tm.operand_types[0] = operand_types[1];
5094 i.tm.operand_types[1] = operand_types[0];
5095 }
5096
5097 return t;
5098}
5099
5100static int
5101check_string (void)
5102{
5103 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
5104 if (i.tm.operand_types[mem_op].bitfield.esseg)
5105 {
5106 if (i.seg[0] != NULL && i.seg[0] != &es)
5107 {
5108 as_bad (_("`%s' operand %d must use `%ses' segment"),
5109 i.tm.name,
5110 mem_op + 1,
5111 register_prefix);
5112 return 0;
5113 }
5114 /* There's only ever one segment override allowed per instruction.
5115 This instruction possibly has a legal segment override on the
5116 second operand, so copy the segment to where non-string
5117 instructions store it, allowing common code. */
5118 i.seg[0] = i.seg[1];
5119 }
5120 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
5121 {
5122 if (i.seg[1] != NULL && i.seg[1] != &es)
5123 {
5124 as_bad (_("`%s' operand %d must use `%ses' segment"),
5125 i.tm.name,
5126 mem_op + 2,
5127 register_prefix);
5128 return 0;
5129 }
5130 }
5131 return 1;
5132}
5133
5134static int
5135process_suffix (void)
5136{
5137 /* If matched instruction specifies an explicit instruction mnemonic
5138 suffix, use it. */
5139 if (i.tm.opcode_modifier.size16)
5140 i.suffix = WORD_MNEM_SUFFIX;
5141 else if (i.tm.opcode_modifier.size32)
5142 i.suffix = LONG_MNEM_SUFFIX;
5143 else if (i.tm.opcode_modifier.size64)
5144 i.suffix = QWORD_MNEM_SUFFIX;
5145 else if (i.reg_operands)
5146 {
5147 /* If there's no instruction mnemonic suffix we try to invent one
5148 based on register operands. */
5149 if (!i.suffix)
5150 {
5151 /* We take i.suffix from the last register operand specified,
5152 Destination register type is more significant than source
5153 register type. crc32 in SSE4.2 prefers source register
5154 type. */
5155 if (i.tm.base_opcode == 0xf20f38f1)
5156 {
5157 if (i.types[0].bitfield.reg16)
5158 i.suffix = WORD_MNEM_SUFFIX;
5159 else if (i.types[0].bitfield.reg32)
5160 i.suffix = LONG_MNEM_SUFFIX;
5161 else if (i.types[0].bitfield.reg64)
5162 i.suffix = QWORD_MNEM_SUFFIX;
5163 }
5164 else if (i.tm.base_opcode == 0xf20f38f0)
5165 {
5166 if (i.types[0].bitfield.reg8)
5167 i.suffix = BYTE_MNEM_SUFFIX;
5168 }
5169
5170 if (!i.suffix)
5171 {
5172 int op;
5173
5174 if (i.tm.base_opcode == 0xf20f38f1
5175 || i.tm.base_opcode == 0xf20f38f0)
5176 {
5177 /* We have to know the operand size for crc32. */
5178 as_bad (_("ambiguous memory operand size for `%s`"),
5179 i.tm.name);
5180 return 0;
5181 }
5182
5183 for (op = i.operands; --op >= 0;)
5184 if (!i.tm.operand_types[op].bitfield.inoutportreg)
5185 {
5186 if (i.types[op].bitfield.reg8)
5187 {
5188 i.suffix = BYTE_MNEM_SUFFIX;
5189 break;
5190 }
5191 else if (i.types[op].bitfield.reg16)
5192 {
5193 i.suffix = WORD_MNEM_SUFFIX;
5194 break;
5195 }
5196 else if (i.types[op].bitfield.reg32)
5197 {
5198 i.suffix = LONG_MNEM_SUFFIX;
5199 break;
5200 }
5201 else if (i.types[op].bitfield.reg64)
5202 {
5203 i.suffix = QWORD_MNEM_SUFFIX;
5204 break;
5205 }
5206 }
5207 }
5208 }
5209 else if (i.suffix == BYTE_MNEM_SUFFIX)
5210 {
5211 if (intel_syntax
5212 && i.tm.opcode_modifier.ignoresize
5213 && i.tm.opcode_modifier.no_bsuf)
5214 i.suffix = 0;
5215 else if (!check_byte_reg ())
5216 return 0;
5217 }
5218 else if (i.suffix == LONG_MNEM_SUFFIX)
5219 {
5220 if (intel_syntax
5221 && i.tm.opcode_modifier.ignoresize
5222 && i.tm.opcode_modifier.no_lsuf)
5223 i.suffix = 0;
5224 else if (!check_long_reg ())
5225 return 0;
5226 }
5227 else if (i.suffix == QWORD_MNEM_SUFFIX)
5228 {
5229 if (intel_syntax
5230 && i.tm.opcode_modifier.ignoresize
5231 && i.tm.opcode_modifier.no_qsuf)
5232 i.suffix = 0;
5233 else if (!check_qword_reg ())
5234 return 0;
5235 }
5236 else if (i.suffix == WORD_MNEM_SUFFIX)
5237 {
5238 if (intel_syntax
5239 && i.tm.opcode_modifier.ignoresize
5240 && i.tm.opcode_modifier.no_wsuf)
5241 i.suffix = 0;
5242 else if (!check_word_reg ())
5243 return 0;
5244 }
5245 else if (i.suffix == XMMWORD_MNEM_SUFFIX
5246 || i.suffix == YMMWORD_MNEM_SUFFIX
5247 || i.suffix == ZMMWORD_MNEM_SUFFIX)
5248 {
5249 /* Skip if the instruction has x/y/z suffix. match_template
5250 should check if it is a valid suffix. */
5251 }
5252 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
5253 /* Do nothing if the instruction is going to ignore the prefix. */
5254 ;
5255 else
5256 abort ();
5257 }
5258 else if (i.tm.opcode_modifier.defaultsize
5259 && !i.suffix
5260 /* exclude fldenv/frstor/fsave/fstenv */
5261 && i.tm.opcode_modifier.no_ssuf)
5262 {
5263 i.suffix = stackop_size;
5264 }
5265 else if (intel_syntax
5266 && !i.suffix
5267 && (i.tm.operand_types[0].bitfield.jumpabsolute
5268 || i.tm.opcode_modifier.jumpbyte
5269 || i.tm.opcode_modifier.jumpintersegment
5270 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
5271 && i.tm.extension_opcode <= 3)))
5272 {
5273 switch (flag_code)
5274 {
5275 case CODE_64BIT:
5276 if (!i.tm.opcode_modifier.no_qsuf)
5277 {
5278 i.suffix = QWORD_MNEM_SUFFIX;
5279 break;
5280 }
5281 case CODE_32BIT:
5282 if (!i.tm.opcode_modifier.no_lsuf)
5283 i.suffix = LONG_MNEM_SUFFIX;
5284 break;
5285 case CODE_16BIT:
5286 if (!i.tm.opcode_modifier.no_wsuf)
5287 i.suffix = WORD_MNEM_SUFFIX;
5288 break;
5289 }
5290 }
5291
5292 if (!i.suffix)
5293 {
5294 if (!intel_syntax)
5295 {
5296 if (i.tm.opcode_modifier.w)
5297 {
5298 as_bad (_("no instruction mnemonic suffix given and "
5299 "no register operands; can't size instruction"));
5300 return 0;
5301 }
5302 }
5303 else
5304 {
5305 unsigned int suffixes;
5306
5307 suffixes = !i.tm.opcode_modifier.no_bsuf;
5308 if (!i.tm.opcode_modifier.no_wsuf)
5309 suffixes |= 1 << 1;
5310 if (!i.tm.opcode_modifier.no_lsuf)
5311 suffixes |= 1 << 2;
5312 if (!i.tm.opcode_modifier.no_ldsuf)
5313 suffixes |= 1 << 3;
5314 if (!i.tm.opcode_modifier.no_ssuf)
5315 suffixes |= 1 << 4;
5316 if (!i.tm.opcode_modifier.no_qsuf)
5317 suffixes |= 1 << 5;
5318
5319 /* There are more than suffix matches. */
5320 if (i.tm.opcode_modifier.w
5321 || ((suffixes & (suffixes - 1))
5322 && !i.tm.opcode_modifier.defaultsize
5323 && !i.tm.opcode_modifier.ignoresize))
5324 {
5325 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
5326 return 0;
5327 }
5328 }
5329 }
5330
5331 /* Change the opcode based on the operand size given by i.suffix;
5332 We don't need to change things for byte insns. */
5333
5334 if (i.suffix
5335 && i.suffix != BYTE_MNEM_SUFFIX
5336 && i.suffix != XMMWORD_MNEM_SUFFIX
5337 && i.suffix != YMMWORD_MNEM_SUFFIX
5338 && i.suffix != ZMMWORD_MNEM_SUFFIX)
5339 {
5340 /* It's not a byte, select word/dword operation. */
5341 if (i.tm.opcode_modifier.w)
5342 {
5343 if (i.tm.opcode_modifier.shortform)
5344 i.tm.base_opcode |= 8;
5345 else
5346 i.tm.base_opcode |= 1;
5347 }
5348
5349 /* Now select between word & dword operations via the operand
5350 size prefix, except for instructions that will ignore this
5351 prefix anyway. */
5352 if (i.tm.opcode_modifier.addrprefixop0)
5353 {
5354 /* The address size override prefix changes the size of the
5355 first operand. */
5356 if ((flag_code == CODE_32BIT
5357 && i.op->regs[0].reg_type.bitfield.reg16)
5358 || (flag_code != CODE_32BIT
5359 && i.op->regs[0].reg_type.bitfield.reg32))
5360 if (!add_prefix (ADDR_PREFIX_OPCODE))
5361 return 0;
5362 }
5363 else if (i.suffix != QWORD_MNEM_SUFFIX
5364 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
5365 && !i.tm.opcode_modifier.ignoresize
5366 && !i.tm.opcode_modifier.floatmf
5367 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
5368 || (flag_code == CODE_64BIT
5369 && i.tm.opcode_modifier.jumpbyte)))
5370 {
5371 unsigned int prefix = DATA_PREFIX_OPCODE;
5372
5373 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
5374 prefix = ADDR_PREFIX_OPCODE;
5375
5376 if (!add_prefix (prefix))
5377 return 0;
5378 }
5379
5380 /* Set mode64 for an operand. */
5381 if (i.suffix == QWORD_MNEM_SUFFIX
5382 && flag_code == CODE_64BIT
5383 && !i.tm.opcode_modifier.norex64)
5384 {
5385 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5386 need rex64. cmpxchg8b is also a special case. */
5387 if (! (i.operands == 2
5388 && i.tm.base_opcode == 0x90
5389 && i.tm.extension_opcode == None
5390 && operand_type_equal (&i.types [0], &acc64)
5391 && operand_type_equal (&i.types [1], &acc64))
5392 && ! (i.operands == 1
5393 && i.tm.base_opcode == 0xfc7
5394 && i.tm.extension_opcode == 1
5395 && !operand_type_check (i.types [0], reg)
5396 && operand_type_check (i.types [0], anymem)))
5397 i.rex |= REX_W;
5398 }
5399
5400 /* Size floating point instruction. */
5401 if (i.suffix == LONG_MNEM_SUFFIX)
5402 if (i.tm.opcode_modifier.floatmf)
5403 i.tm.base_opcode ^= 4;
5404 }
5405
5406 return 1;
5407}
5408
5409static int
5410check_byte_reg (void)
5411{
5412 int op;
5413
5414 for (op = i.operands; --op >= 0;)
5415 {
5416 /* If this is an eight bit register, it's OK. If it's the 16 or
5417 32 bit version of an eight bit register, we will just use the
5418 low portion, and that's OK too. */
5419 if (i.types[op].bitfield.reg8)
5420 continue;
5421
5422 /* I/O port address operands are OK too. */
5423 if (i.tm.operand_types[op].bitfield.inoutportreg)
5424 continue;
5425
5426 /* crc32 doesn't generate this warning. */
5427 if (i.tm.base_opcode == 0xf20f38f0)
5428 continue;
5429
5430 if ((i.types[op].bitfield.reg16
5431 || i.types[op].bitfield.reg32
5432 || i.types[op].bitfield.reg64)
5433 && i.op[op].regs->reg_num < 4
5434 /* Prohibit these changes in 64bit mode, since the lowering
5435 would be more complicated. */
5436 && flag_code != CODE_64BIT)
5437 {
5438#if REGISTER_WARNINGS
5439 if (!quiet_warnings)
5440 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5441 register_prefix,
5442 (i.op[op].regs + (i.types[op].bitfield.reg16
5443 ? REGNAM_AL - REGNAM_AX
5444 : REGNAM_AL - REGNAM_EAX))->reg_name,
5445 register_prefix,
5446 i.op[op].regs->reg_name,
5447 i.suffix);
5448#endif
5449 continue;
5450 }
5451 /* Any other register is bad. */
5452 if (i.types[op].bitfield.reg16
5453 || i.types[op].bitfield.reg32
5454 || i.types[op].bitfield.reg64
5455 || i.types[op].bitfield.regmmx
5456 || i.types[op].bitfield.regxmm
5457 || i.types[op].bitfield.regymm
5458 || i.types[op].bitfield.regzmm
5459 || i.types[op].bitfield.sreg2
5460 || i.types[op].bitfield.sreg3
5461 || i.types[op].bitfield.control
5462 || i.types[op].bitfield.debug
5463 || i.types[op].bitfield.test
5464 || i.types[op].bitfield.floatreg
5465 || i.types[op].bitfield.floatacc)
5466 {
5467 as_bad (_("`%s%s' not allowed with `%s%c'"),
5468 register_prefix,
5469 i.op[op].regs->reg_name,
5470 i.tm.name,
5471 i.suffix);
5472 return 0;
5473 }
5474 }
5475 return 1;
5476}
5477
5478static int
5479check_long_reg (void)
5480{
5481 int op;
5482
5483 for (op = i.operands; --op >= 0;)
5484 /* Reject eight bit registers, except where the template requires
5485 them. (eg. movzb) */
5486 if (i.types[op].bitfield.reg8
5487 && (i.tm.operand_types[op].bitfield.reg16
5488 || i.tm.operand_types[op].bitfield.reg32
5489 || i.tm.operand_types[op].bitfield.acc))
5490 {
5491 as_bad (_("`%s%s' not allowed with `%s%c'"),
5492 register_prefix,
5493 i.op[op].regs->reg_name,
5494 i.tm.name,
5495 i.suffix);
5496 return 0;
5497 }
5498 /* Warn if the e prefix on a general reg is missing. */
5499 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5500 && i.types[op].bitfield.reg16
5501 && (i.tm.operand_types[op].bitfield.reg32
5502 || i.tm.operand_types[op].bitfield.acc))
5503 {
5504 /* Prohibit these changes in the 64bit mode, since the
5505 lowering is more complicated. */
5506 if (flag_code == CODE_64BIT)
5507 {
5508 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5509 register_prefix, i.op[op].regs->reg_name,
5510 i.suffix);
5511 return 0;
5512 }
5513#if REGISTER_WARNINGS
5514 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5515 register_prefix,
5516 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
5517 register_prefix, i.op[op].regs->reg_name, i.suffix);
5518#endif
5519 }
5520 /* Warn if the r prefix on a general reg is present. */
5521 else if (i.types[op].bitfield.reg64
5522 && (i.tm.operand_types[op].bitfield.reg32
5523 || i.tm.operand_types[op].bitfield.acc))
5524 {
5525 if (intel_syntax
5526 && i.tm.opcode_modifier.toqword
5527 && !i.types[0].bitfield.regxmm)
5528 {
5529 /* Convert to QWORD. We want REX byte. */
5530 i.suffix = QWORD_MNEM_SUFFIX;
5531 }
5532 else
5533 {
5534 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5535 register_prefix, i.op[op].regs->reg_name,
5536 i.suffix);
5537 return 0;
5538 }
5539 }
5540 return 1;
5541}
5542
5543static int
5544check_qword_reg (void)
5545{
5546 int op;
5547
5548 for (op = i.operands; --op >= 0; )
5549 /* Reject eight bit registers, except where the template requires
5550 them. (eg. movzb) */
5551 if (i.types[op].bitfield.reg8
5552 && (i.tm.operand_types[op].bitfield.reg16
5553 || i.tm.operand_types[op].bitfield.reg32
5554 || i.tm.operand_types[op].bitfield.acc))
5555 {
5556 as_bad (_("`%s%s' not allowed with `%s%c'"),
5557 register_prefix,
5558 i.op[op].regs->reg_name,
5559 i.tm.name,
5560 i.suffix);
5561 return 0;
5562 }
5563 /* Warn if the r prefix on a general reg is missing. */
5564 else if ((i.types[op].bitfield.reg16
5565 || i.types[op].bitfield.reg32)
5566 && (i.tm.operand_types[op].bitfield.reg32
5567 || i.tm.operand_types[op].bitfield.acc))
5568 {
5569 /* Prohibit these changes in the 64bit mode, since the
5570 lowering is more complicated. */
5571 if (intel_syntax
5572 && i.tm.opcode_modifier.todword
5573 && !i.types[0].bitfield.regxmm)
5574 {
5575 /* Convert to DWORD. We don't want REX byte. */
5576 i.suffix = LONG_MNEM_SUFFIX;
5577 }
5578 else
5579 {
5580 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5581 register_prefix, i.op[op].regs->reg_name,
5582 i.suffix);
5583 return 0;
5584 }
5585 }
5586 return 1;
5587}
5588
5589static int
5590check_word_reg (void)
5591{
5592 int op;
5593 for (op = i.operands; --op >= 0;)
5594 /* Reject eight bit registers, except where the template requires
5595 them. (eg. movzb) */
5596 if (i.types[op].bitfield.reg8
5597 && (i.tm.operand_types[op].bitfield.reg16
5598 || i.tm.operand_types[op].bitfield.reg32
5599 || i.tm.operand_types[op].bitfield.acc))
5600 {
5601 as_bad (_("`%s%s' not allowed with `%s%c'"),
5602 register_prefix,
5603 i.op[op].regs->reg_name,
5604 i.tm.name,
5605 i.suffix);
5606 return 0;
5607 }
5608 /* Warn if the e or r prefix on a general reg is present. */
5609 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5610 && (i.types[op].bitfield.reg32
5611 || i.types[op].bitfield.reg64)
5612 && (i.tm.operand_types[op].bitfield.reg16
5613 || i.tm.operand_types[op].bitfield.acc))
5614 {
5615 /* Prohibit these changes in the 64bit mode, since the
5616 lowering is more complicated. */
5617 if (flag_code == CODE_64BIT)
5618 {
5619 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5620 register_prefix, i.op[op].regs->reg_name,
5621 i.suffix);
5622 return 0;
5623 }
5624#if REGISTER_WARNINGS
5625 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5626 register_prefix,
5627 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5628 register_prefix, i.op[op].regs->reg_name, i.suffix);
5629#endif
5630 }
5631 return 1;
5632}
5633
5634static int
5635update_imm (unsigned int j)
5636{
5637 i386_operand_type overlap = i.types[j];
5638 if ((overlap.bitfield.imm8
5639 || overlap.bitfield.imm8s
5640 || overlap.bitfield.imm16
5641 || overlap.bitfield.imm32
5642 || overlap.bitfield.imm32s
5643 || overlap.bitfield.imm64)
5644 && !operand_type_equal (&overlap, &imm8)
5645 && !operand_type_equal (&overlap, &imm8s)
5646 && !operand_type_equal (&overlap, &imm16)
5647 && !operand_type_equal (&overlap, &imm32)
5648 && !operand_type_equal (&overlap, &imm32s)
5649 && !operand_type_equal (&overlap, &imm64))
5650 {
5651 if (i.suffix)
5652 {
5653 i386_operand_type temp;
5654
5655 operand_type_set (&temp, 0);
5656 if (i.suffix == BYTE_MNEM_SUFFIX)
5657 {
5658 temp.bitfield.imm8 = overlap.bitfield.imm8;
5659 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5660 }
5661 else if (i.suffix == WORD_MNEM_SUFFIX)
5662 temp.bitfield.imm16 = overlap.bitfield.imm16;
5663 else if (i.suffix == QWORD_MNEM_SUFFIX)
5664 {
5665 temp.bitfield.imm64 = overlap.bitfield.imm64;
5666 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5667 }
5668 else
5669 temp.bitfield.imm32 = overlap.bitfield.imm32;
5670 overlap = temp;
5671 }
5672 else if (operand_type_equal (&overlap, &imm16_32_32s)
5673 || operand_type_equal (&overlap, &imm16_32)
5674 || operand_type_equal (&overlap, &imm16_32s))
5675 {
5676 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5677 overlap = imm16;
5678 else
5679 overlap = imm32s;
5680 }
5681 if (!operand_type_equal (&overlap, &imm8)
5682 && !operand_type_equal (&overlap, &imm8s)
5683 && !operand_type_equal (&overlap, &imm16)
5684 && !operand_type_equal (&overlap, &imm32)
5685 && !operand_type_equal (&overlap, &imm32s)
5686 && !operand_type_equal (&overlap, &imm64))
5687 {
5688 as_bad (_("no instruction mnemonic suffix given; "
5689 "can't determine immediate size"));
5690 return 0;
5691 }
5692 }
5693 i.types[j] = overlap;
5694
5695 return 1;
5696}
5697
5698static int
5699finalize_imm (void)
5700{
5701 unsigned int j, n;
5702
5703 /* Update the first 2 immediate operands. */
5704 n = i.operands > 2 ? 2 : i.operands;
5705 if (n)
5706 {
5707 for (j = 0; j < n; j++)
5708 if (update_imm (j) == 0)
5709 return 0;
5710
5711 /* The 3rd operand can't be immediate operand. */
5712 gas_assert (operand_type_check (i.types[2], imm) == 0);
5713 }
5714
5715 return 1;
5716}
5717
5718static int
5719bad_implicit_operand (int xmm)
5720{
5721 const char *ireg = xmm ? "xmm0" : "ymm0";
5722
5723 if (intel_syntax)
5724 as_bad (_("the last operand of `%s' must be `%s%s'"),
5725 i.tm.name, register_prefix, ireg);
5726 else
5727 as_bad (_("the first operand of `%s' must be `%s%s'"),
5728 i.tm.name, register_prefix, ireg);
5729 return 0;
5730}
5731
5732static int
5733process_operands (void)
5734{
5735 /* Default segment register this instruction will use for memory
5736 accesses. 0 means unknown. This is only for optimizing out
5737 unnecessary segment overrides. */
5738 const seg_entry *default_seg = 0;
5739
5740 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5741 {
5742 unsigned int dupl = i.operands;
5743 unsigned int dest = dupl - 1;
5744 unsigned int j;
5745
5746 /* The destination must be an xmm register. */
5747 gas_assert (i.reg_operands
5748 && MAX_OPERANDS > dupl
5749 && operand_type_equal (&i.types[dest], &regxmm));
5750
5751 if (i.tm.opcode_modifier.firstxmm0)
5752 {
5753 /* The first operand is implicit and must be xmm0. */
5754 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5755 if (register_number (i.op[0].regs) != 0)
5756 return bad_implicit_operand (1);
5757
5758 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5759 {
5760 /* Keep xmm0 for instructions with VEX prefix and 3
5761 sources. */
5762 goto duplicate;
5763 }
5764 else
5765 {
5766 /* We remove the first xmm0 and keep the number of
5767 operands unchanged, which in fact duplicates the
5768 destination. */
5769 for (j = 1; j < i.operands; j++)
5770 {
5771 i.op[j - 1] = i.op[j];
5772 i.types[j - 1] = i.types[j];
5773 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5774 }
5775 }
5776 }
5777 else if (i.tm.opcode_modifier.implicit1stxmm0)
5778 {
5779 gas_assert ((MAX_OPERANDS - 1) > dupl
5780 && (i.tm.opcode_modifier.vexsources
5781 == VEX3SOURCES));
5782
5783 /* Add the implicit xmm0 for instructions with VEX prefix
5784 and 3 sources. */
5785 for (j = i.operands; j > 0; j--)
5786 {
5787 i.op[j] = i.op[j - 1];
5788 i.types[j] = i.types[j - 1];
5789 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5790 }
5791 i.op[0].regs
5792 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5793 i.types[0] = regxmm;
5794 i.tm.operand_types[0] = regxmm;
5795
5796 i.operands += 2;
5797 i.reg_operands += 2;
5798 i.tm.operands += 2;
5799
5800 dupl++;
5801 dest++;
5802 i.op[dupl] = i.op[dest];
5803 i.types[dupl] = i.types[dest];
5804 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5805 }
5806 else
5807 {
5808duplicate:
5809 i.operands++;
5810 i.reg_operands++;
5811 i.tm.operands++;
5812
5813 i.op[dupl] = i.op[dest];
5814 i.types[dupl] = i.types[dest];
5815 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5816 }
5817
5818 if (i.tm.opcode_modifier.immext)
5819 process_immext ();
5820 }
5821 else if (i.tm.opcode_modifier.firstxmm0)
5822 {
5823 unsigned int j;
5824
5825 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
5826 gas_assert (i.reg_operands
5827 && (operand_type_equal (&i.types[0], &regxmm)
5828 || operand_type_equal (&i.types[0], &regymm)
5829 || operand_type_equal (&i.types[0], &regzmm)));
5830 if (register_number (i.op[0].regs) != 0)
5831 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5832
5833 for (j = 1; j < i.operands; j++)
5834 {
5835 i.op[j - 1] = i.op[j];
5836 i.types[j - 1] = i.types[j];
5837
5838 /* We need to adjust fields in i.tm since they are used by
5839 build_modrm_byte. */
5840 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5841 }
5842
5843 i.operands--;
5844 i.reg_operands--;
5845 i.tm.operands--;
5846 }
5847 else if (i.tm.opcode_modifier.regkludge)
5848 {
5849 /* The imul $imm, %reg instruction is converted into
5850 imul $imm, %reg, %reg, and the clr %reg instruction
5851 is converted into xor %reg, %reg. */
5852
5853 unsigned int first_reg_op;
5854
5855 if (operand_type_check (i.types[0], reg))
5856 first_reg_op = 0;
5857 else
5858 first_reg_op = 1;
5859 /* Pretend we saw the extra register operand. */
5860 gas_assert (i.reg_operands == 1
5861 && i.op[first_reg_op + 1].regs == 0);
5862 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5863 i.types[first_reg_op + 1] = i.types[first_reg_op];
5864 i.operands++;
5865 i.reg_operands++;
5866 }
5867
5868 if (i.tm.opcode_modifier.shortform)
5869 {
5870 if (i.types[0].bitfield.sreg2
5871 || i.types[0].bitfield.sreg3)
5872 {
5873 if (i.tm.base_opcode == POP_SEG_SHORT
5874 && i.op[0].regs->reg_num == 1)
5875 {
5876 as_bad (_("you can't `pop %scs'"), register_prefix);
5877 return 0;
5878 }
5879 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5880 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5881 i.rex |= REX_B;
5882 }
5883 else
5884 {
5885 /* The register or float register operand is in operand
5886 0 or 1. */
5887 unsigned int op;
5888
5889 if (i.types[0].bitfield.floatreg
5890 || operand_type_check (i.types[0], reg))
5891 op = 0;
5892 else
5893 op = 1;
5894 /* Register goes in low 3 bits of opcode. */
5895 i.tm.base_opcode |= i.op[op].regs->reg_num;
5896 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5897 i.rex |= REX_B;
5898 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5899 {
5900 /* Warn about some common errors, but press on regardless.
5901 The first case can be generated by gcc (<= 2.8.1). */
5902 if (i.operands == 2)
5903 {
5904 /* Reversed arguments on faddp, fsubp, etc. */
5905 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5906 register_prefix, i.op[!intel_syntax].regs->reg_name,
5907 register_prefix, i.op[intel_syntax].regs->reg_name);
5908 }
5909 else
5910 {
5911 /* Extraneous `l' suffix on fp insn. */
5912 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5913 register_prefix, i.op[0].regs->reg_name);
5914 }
5915 }
5916 }
5917 }
5918 else if (i.tm.opcode_modifier.modrm)
5919 {
5920 /* The opcode is completed (modulo i.tm.extension_opcode which
5921 must be put into the modrm byte). Now, we make the modrm and
5922 index base bytes based on all the info we've collected. */
5923
5924 default_seg = build_modrm_byte ();
5925 }
5926 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5927 {
5928 default_seg = &ds;
5929 }
5930 else if (i.tm.opcode_modifier.isstring)
5931 {
5932 /* For the string instructions that allow a segment override
5933 on one of their operands, the default segment is ds. */
5934 default_seg = &ds;
5935 }
5936
5937 if (i.tm.base_opcode == 0x8d /* lea */
5938 && i.seg[0]
5939 && !quiet_warnings)
5940 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5941
5942 /* If a segment was explicitly specified, and the specified segment
5943 is not the default, use an opcode prefix to select it. If we
5944 never figured out what the default segment is, then default_seg
5945 will be zero at this point, and the specified segment prefix will
5946 always be used. */
5947 if ((i.seg[0]) && (i.seg[0] != default_seg))
5948 {
5949 if (!add_prefix (i.seg[0]->seg_prefix))
5950 return 0;
5951 }
5952 return 1;
5953}
5954
5955static const seg_entry *
5956build_modrm_byte (void)
5957{
5958 const seg_entry *default_seg = 0;
5959 unsigned int source, dest;
5960 int vex_3_sources;
5961
5962 /* The first operand of instructions with VEX prefix and 3 sources
5963 must be VEX_Imm4. */
5964 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5965 if (vex_3_sources)
5966 {
5967 unsigned int nds, reg_slot;
5968 expressionS *exp;
5969
5970 if (i.tm.opcode_modifier.veximmext
5971 && i.tm.opcode_modifier.immext)
5972 {
5973 dest = i.operands - 2;
5974 gas_assert (dest == 3);
5975 }
5976 else
5977 dest = i.operands - 1;
5978 nds = dest - 1;
5979
5980 /* There are 2 kinds of instructions:
5981 1. 5 operands: 4 register operands or 3 register operands
5982 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5983 VexW0 or VexW1. The destination must be either XMM, YMM or
5984 ZMM register.
5985 2. 4 operands: 4 register operands or 3 register operands
5986 plus 1 memory operand, VexXDS, and VexImmExt */
5987 gas_assert ((i.reg_operands == 4
5988 || (i.reg_operands == 3 && i.mem_operands == 1))
5989 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5990 && (i.tm.opcode_modifier.veximmext
5991 || (i.imm_operands == 1
5992 && i.types[0].bitfield.vec_imm4
5993 && (i.tm.opcode_modifier.vexw == VEXW0
5994 || i.tm.opcode_modifier.vexw == VEXW1)
5995 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5996 || operand_type_equal (&i.tm.operand_types[dest], &regymm)
5997 || operand_type_equal (&i.tm.operand_types[dest], &regzmm)))));
5998
5999 if (i.imm_operands == 0)
6000 {
6001 /* When there is no immediate operand, generate an 8bit
6002 immediate operand to encode the first operand. */
6003 exp = &im_expressions[i.imm_operands++];
6004 i.op[i.operands].imms = exp;
6005 i.types[i.operands] = imm8;
6006 i.operands++;
6007 /* If VexW1 is set, the first operand is the source and
6008 the second operand is encoded in the immediate operand. */
6009 if (i.tm.opcode_modifier.vexw == VEXW1)
6010 {
6011 source = 0;
6012 reg_slot = 1;
6013 }
6014 else
6015 {
6016 source = 1;
6017 reg_slot = 0;
6018 }
6019
6020 /* FMA swaps REG and NDS. */
6021 if (i.tm.cpu_flags.bitfield.cpufma)
6022 {
6023 unsigned int tmp;
6024 tmp = reg_slot;
6025 reg_slot = nds;
6026 nds = tmp;
6027 }
6028
6029 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
6030 &regxmm)
6031 || operand_type_equal (&i.tm.operand_types[reg_slot],
6032 &regymm)
6033 || operand_type_equal (&i.tm.operand_types[reg_slot],
6034 &regzmm));
6035 exp->X_op = O_constant;
6036 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
6037 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6038 }
6039 else
6040 {
6041 unsigned int imm_slot;
6042
6043 if (i.tm.opcode_modifier.vexw == VEXW0)
6044 {
6045 /* If VexW0 is set, the third operand is the source and
6046 the second operand is encoded in the immediate
6047 operand. */
6048 source = 2;
6049 reg_slot = 1;
6050 }
6051 else
6052 {
6053 /* VexW1 is set, the second operand is the source and
6054 the third operand is encoded in the immediate
6055 operand. */
6056 source = 1;
6057 reg_slot = 2;
6058 }
6059
6060 if (i.tm.opcode_modifier.immext)
6061 {
6062 /* When ImmExt is set, the immdiate byte is the last
6063 operand. */
6064 imm_slot = i.operands - 1;
6065 source--;
6066 reg_slot--;
6067 }
6068 else
6069 {
6070 imm_slot = 0;
6071
6072 /* Turn on Imm8 so that output_imm will generate it. */
6073 i.types[imm_slot].bitfield.imm8 = 1;
6074 }
6075
6076 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
6077 &regxmm)
6078 || operand_type_equal (&i.tm.operand_types[reg_slot],
6079 &regymm)
6080 || operand_type_equal (&i.tm.operand_types[reg_slot],
6081 &regzmm));
6082 i.op[imm_slot].imms->X_add_number
6083 |= register_number (i.op[reg_slot].regs) << 4;
6084 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6085 }
6086
6087 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
6088 || operand_type_equal (&i.tm.operand_types[nds],
6089 &regymm)
6090 || operand_type_equal (&i.tm.operand_types[nds],
6091 &regzmm));
6092 i.vex.register_specifier = i.op[nds].regs;
6093 }
6094 else
6095 source = dest = 0;
6096
6097 /* i.reg_operands MUST be the number of real register operands;
6098 implicit registers do not count. If there are 3 register
6099 operands, it must be a instruction with VexNDS. For a
6100 instruction with VexNDD, the destination register is encoded
6101 in VEX prefix. If there are 4 register operands, it must be
6102 a instruction with VEX prefix and 3 sources. */
6103 if (i.mem_operands == 0
6104 && ((i.reg_operands == 2
6105 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
6106 || (i.reg_operands == 3
6107 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
6108 || (i.reg_operands == 4 && vex_3_sources)))
6109 {
6110 switch (i.operands)
6111 {
6112 case 2:
6113 source = 0;
6114 break;
6115 case 3:
6116 /* When there are 3 operands, one of them may be immediate,
6117 which may be the first or the last operand. Otherwise,
6118 the first operand must be shift count register (cl) or it
6119 is an instruction with VexNDS. */
6120 gas_assert (i.imm_operands == 1
6121 || (i.imm_operands == 0
6122 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
6123 || i.types[0].bitfield.shiftcount)));
6124 if (operand_type_check (i.types[0], imm)
6125 || i.types[0].bitfield.shiftcount)
6126 source = 1;
6127 else
6128 source = 0;
6129 break;
6130 case 4:
6131 /* When there are 4 operands, the first two must be 8bit
6132 immediate operands. The source operand will be the 3rd
6133 one.
6134
6135 For instructions with VexNDS, if the first operand
6136 an imm8, the source operand is the 2nd one. If the last
6137 operand is imm8, the source operand is the first one. */
6138 gas_assert ((i.imm_operands == 2
6139 && i.types[0].bitfield.imm8
6140 && i.types[1].bitfield.imm8)
6141 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
6142 && i.imm_operands == 1
6143 && (i.types[0].bitfield.imm8
6144 || i.types[i.operands - 1].bitfield.imm8
6145 || i.rounding)));
6146 if (i.imm_operands == 2)
6147 source = 2;
6148 else
6149 {
6150 if (i.types[0].bitfield.imm8)
6151 source = 1;
6152 else
6153 source = 0;
6154 }
6155 break;
6156 case 5:
6157 if (i.tm.opcode_modifier.evex)
6158 {
6159 /* For EVEX instructions, when there are 5 operands, the
6160 first one must be immediate operand. If the second one
6161 is immediate operand, the source operand is the 3th
6162 one. If the last one is immediate operand, the source
6163 operand is the 2nd one. */
6164 gas_assert (i.imm_operands == 2
6165 && i.tm.opcode_modifier.sae
6166 && operand_type_check (i.types[0], imm));
6167 if (operand_type_check (i.types[1], imm))
6168 source = 2;
6169 else if (operand_type_check (i.types[4], imm))
6170 source = 1;
6171 else
6172 abort ();
6173 }
6174 break;
6175 default:
6176 abort ();
6177 }
6178
6179 if (!vex_3_sources)
6180 {
6181 dest = source + 1;
6182
6183 /* RC/SAE operand could be between DEST and SRC. That happens
6184 when one operand is GPR and the other one is XMM/YMM/ZMM
6185 register. */
6186 if (i.rounding && i.rounding->operand == (int) dest)
6187 dest++;
6188
6189 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6190 {
6191 /* For instructions with VexNDS, the register-only source
6192 operand must be 32/64bit integer, XMM, YMM or ZMM
6193 register. It is encoded in VEX prefix. We need to
6194 clear RegMem bit before calling operand_type_equal. */
6195
6196 i386_operand_type op;
6197 unsigned int vvvv;
6198
6199 /* Check register-only source operand when two source
6200 operands are swapped. */
6201 if (!i.tm.operand_types[source].bitfield.baseindex
6202 && i.tm.operand_types[dest].bitfield.baseindex)
6203 {
6204 vvvv = source;
6205 source = dest;
6206 }
6207 else
6208 vvvv = dest;
6209
6210 op = i.tm.operand_types[vvvv];
6211 op.bitfield.regmem = 0;
6212 if ((dest + 1) >= i.operands
6213 || (!op.bitfield.reg32
6214 && op.bitfield.reg64
6215 && !operand_type_equal (&op, &regxmm)
6216 && !operand_type_equal (&op, &regymm)
6217 && !operand_type_equal (&op, &regzmm)
6218 && !operand_type_equal (&op, &regmask)))
6219 abort ();
6220 i.vex.register_specifier = i.op[vvvv].regs;
6221 dest++;
6222 }
6223 }
6224
6225 i.rm.mode = 3;
6226 /* One of the register operands will be encoded in the i.tm.reg
6227 field, the other in the combined i.tm.mode and i.tm.regmem
6228 fields. If no form of this instruction supports a memory
6229 destination operand, then we assume the source operand may
6230 sometimes be a memory operand and so we need to store the
6231 destination in the i.rm.reg field. */
6232 if (!i.tm.operand_types[dest].bitfield.regmem
6233 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
6234 {
6235 i.rm.reg = i.op[dest].regs->reg_num;
6236 i.rm.regmem = i.op[source].regs->reg_num;
6237 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6238 i.rex |= REX_R;
6239 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6240 i.vrex |= REX_R;
6241 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6242 i.rex |= REX_B;
6243 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6244 i.vrex |= REX_B;
6245 }
6246 else
6247 {
6248 i.rm.reg = i.op[source].regs->reg_num;
6249 i.rm.regmem = i.op[dest].regs->reg_num;
6250 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6251 i.rex |= REX_B;
6252 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6253 i.vrex |= REX_B;
6254 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6255 i.rex |= REX_R;
6256 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6257 i.vrex |= REX_R;
6258 }
6259 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
6260 {
6261 if (!i.types[0].bitfield.control
6262 && !i.types[1].bitfield.control)
6263 abort ();
6264 i.rex &= ~(REX_R | REX_B);
6265 add_prefix (LOCK_PREFIX_OPCODE);
6266 }
6267 }
6268 else
6269 { /* If it's not 2 reg operands... */
6270 unsigned int mem;
6271
6272 if (i.mem_operands)
6273 {
6274 unsigned int fake_zero_displacement = 0;
6275 unsigned int op;
6276
6277 for (op = 0; op < i.operands; op++)
6278 if (operand_type_check (i.types[op], anymem))
6279 break;
6280 gas_assert (op < i.operands);
6281
6282 if (i.tm.opcode_modifier.vecsib)
6283 {
6284 if (i.index_reg->reg_num == RegEiz
6285 || i.index_reg->reg_num == RegRiz)
6286 abort ();
6287
6288 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6289 if (!i.base_reg)
6290 {
6291 i.sib.base = NO_BASE_REGISTER;
6292 i.sib.scale = i.log2_scale_factor;
6293 /* No Vec_Disp8 if there is no base. */
6294 i.types[op].bitfield.vec_disp8 = 0;
6295 i.types[op].bitfield.disp8 = 0;
6296 i.types[op].bitfield.disp16 = 0;
6297 i.types[op].bitfield.disp64 = 0;
6298 if (flag_code != CODE_64BIT)
6299 {
6300 /* Must be 32 bit */
6301 i.types[op].bitfield.disp32 = 1;
6302 i.types[op].bitfield.disp32s = 0;
6303 }
6304 else
6305 {
6306 i.types[op].bitfield.disp32 = 0;
6307 i.types[op].bitfield.disp32s = 1;
6308 }
6309 }
6310 i.sib.index = i.index_reg->reg_num;
6311 if ((i.index_reg->reg_flags & RegRex) != 0)
6312 i.rex |= REX_X;
6313 if ((i.index_reg->reg_flags & RegVRex) != 0)
6314 i.vrex |= REX_X;
6315 }
6316
6317 default_seg = &ds;
6318
6319 if (i.base_reg == 0)
6320 {
6321 i.rm.mode = 0;
6322 if (!i.disp_operands)
6323 {
6324 fake_zero_displacement = 1;
6325 /* Instructions with VSIB byte need 32bit displacement
6326 if there is no base register. */
6327 if (i.tm.opcode_modifier.vecsib)
6328 i.types[op].bitfield.disp32 = 1;
6329 }
6330 if (i.index_reg == 0)
6331 {
6332 gas_assert (!i.tm.opcode_modifier.vecsib);
6333 /* Operand is just <disp> */
6334 if (flag_code == CODE_64BIT)
6335 {
6336 /* 64bit mode overwrites the 32bit absolute
6337 addressing by RIP relative addressing and
6338 absolute addressing is encoded by one of the
6339 redundant SIB forms. */
6340 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6341 i.sib.base = NO_BASE_REGISTER;
6342 i.sib.index = NO_INDEX_REGISTER;
6343 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
6344 ? disp32s : disp32);
6345 }
6346 else if ((flag_code == CODE_16BIT)
6347 ^ (i.prefix[ADDR_PREFIX] != 0))
6348 {
6349 i.rm.regmem = NO_BASE_REGISTER_16;
6350 i.types[op] = disp16;
6351 }
6352 else
6353 {
6354 i.rm.regmem = NO_BASE_REGISTER;
6355 i.types[op] = disp32;
6356 }
6357 }
6358 else if (!i.tm.opcode_modifier.vecsib)
6359 {
6360 /* !i.base_reg && i.index_reg */
6361 if (i.index_reg->reg_num == RegEiz
6362 || i.index_reg->reg_num == RegRiz)
6363 i.sib.index = NO_INDEX_REGISTER;
6364 else
6365 i.sib.index = i.index_reg->reg_num;
6366 i.sib.base = NO_BASE_REGISTER;
6367 i.sib.scale = i.log2_scale_factor;
6368 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6369 /* No Vec_Disp8 if there is no base. */
6370 i.types[op].bitfield.vec_disp8 = 0;
6371 i.types[op].bitfield.disp8 = 0;
6372 i.types[op].bitfield.disp16 = 0;
6373 i.types[op].bitfield.disp64 = 0;
6374 if (flag_code != CODE_64BIT)
6375 {
6376 /* Must be 32 bit */
6377 i.types[op].bitfield.disp32 = 1;
6378 i.types[op].bitfield.disp32s = 0;
6379 }
6380 else
6381 {
6382 i.types[op].bitfield.disp32 = 0;
6383 i.types[op].bitfield.disp32s = 1;
6384 }
6385 if ((i.index_reg->reg_flags & RegRex) != 0)
6386 i.rex |= REX_X;
6387 }
6388 }
6389 /* RIP addressing for 64bit mode. */
6390 else if (i.base_reg->reg_num == RegRip ||
6391 i.base_reg->reg_num == RegEip)
6392 {
6393 gas_assert (!i.tm.opcode_modifier.vecsib);
6394 i.rm.regmem = NO_BASE_REGISTER;
6395 i.types[op].bitfield.disp8 = 0;
6396 i.types[op].bitfield.disp16 = 0;
6397 i.types[op].bitfield.disp32 = 0;
6398 i.types[op].bitfield.disp32s = 1;
6399 i.types[op].bitfield.disp64 = 0;
6400 i.types[op].bitfield.vec_disp8 = 0;
6401 i.flags[op] |= Operand_PCrel;
6402 if (! i.disp_operands)
6403 fake_zero_displacement = 1;
6404 }
6405 else if (i.base_reg->reg_type.bitfield.reg16)
6406 {
6407 gas_assert (!i.tm.opcode_modifier.vecsib);
6408 switch (i.base_reg->reg_num)
6409 {
6410 case 3: /* (%bx) */
6411 if (i.index_reg == 0)
6412 i.rm.regmem = 7;
6413 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6414 i.rm.regmem = i.index_reg->reg_num - 6;
6415 break;
6416 case 5: /* (%bp) */
6417 default_seg = &ss;
6418 if (i.index_reg == 0)
6419 {
6420 i.rm.regmem = 6;
6421 if (operand_type_check (i.types[op], disp) == 0)
6422 {
6423 /* fake (%bp) into 0(%bp) */
6424 if (i.tm.operand_types[op].bitfield.vec_disp8)
6425 i.types[op].bitfield.vec_disp8 = 1;
6426 else
6427 i.types[op].bitfield.disp8 = 1;
6428 fake_zero_displacement = 1;
6429 }
6430 }
6431 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6432 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
6433 break;
6434 default: /* (%si) -> 4 or (%di) -> 5 */
6435 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
6436 }
6437 i.rm.mode = mode_from_disp_size (i.types[op]);
6438 }
6439 else /* i.base_reg and 32/64 bit mode */
6440 {
6441 if (flag_code == CODE_64BIT
6442 && operand_type_check (i.types[op], disp))
6443 {
6444 i386_operand_type temp;
6445 operand_type_set (&temp, 0);
6446 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
6447 temp.bitfield.vec_disp8
6448 = i.types[op].bitfield.vec_disp8;
6449 i.types[op] = temp;
6450 if (i.prefix[ADDR_PREFIX] == 0)
6451 i.types[op].bitfield.disp32s = 1;
6452 else
6453 i.types[op].bitfield.disp32 = 1;
6454 }
6455
6456 if (!i.tm.opcode_modifier.vecsib)
6457 i.rm.regmem = i.base_reg->reg_num;
6458 if ((i.base_reg->reg_flags & RegRex) != 0)
6459 i.rex |= REX_B;
6460 i.sib.base = i.base_reg->reg_num;
6461 /* x86-64 ignores REX prefix bit here to avoid decoder
6462 complications. */
6463 if (!(i.base_reg->reg_flags & RegRex)
6464 && (i.base_reg->reg_num == EBP_REG_NUM
6465 || i.base_reg->reg_num == ESP_REG_NUM))
6466 default_seg = &ss;
6467 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
6468 {
6469 fake_zero_displacement = 1;
6470 if (i.tm.operand_types [op].bitfield.vec_disp8)
6471 i.types[op].bitfield.vec_disp8 = 1;
6472 else
6473 i.types[op].bitfield.disp8 = 1;
6474 }
6475 i.sib.scale = i.log2_scale_factor;
6476 if (i.index_reg == 0)
6477 {
6478 gas_assert (!i.tm.opcode_modifier.vecsib);
6479 /* <disp>(%esp) becomes two byte modrm with no index
6480 register. We've already stored the code for esp
6481 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6482 Any base register besides %esp will not use the
6483 extra modrm byte. */
6484 i.sib.index = NO_INDEX_REGISTER;
6485 }
6486 else if (!i.tm.opcode_modifier.vecsib)
6487 {
6488 if (i.index_reg->reg_num == RegEiz
6489 || i.index_reg->reg_num == RegRiz)
6490 i.sib.index = NO_INDEX_REGISTER;
6491 else
6492 i.sib.index = i.index_reg->reg_num;
6493 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6494 if ((i.index_reg->reg_flags & RegRex) != 0)
6495 i.rex |= REX_X;
6496 }
6497
6498 if (i.disp_operands
6499 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
6500 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
6501 i.rm.mode = 0;
6502 else
6503 {
6504 if (!fake_zero_displacement
6505 && !i.disp_operands
6506 && i.disp_encoding)
6507 {
6508 fake_zero_displacement = 1;
6509 if (i.disp_encoding == disp_encoding_8bit)
6510 i.types[op].bitfield.disp8 = 1;
6511 else
6512 i.types[op].bitfield.disp32 = 1;
6513 }
6514 i.rm.mode = mode_from_disp_size (i.types[op]);
6515 }
6516 }
6517
6518 if (fake_zero_displacement)
6519 {
6520 /* Fakes a zero displacement assuming that i.types[op]
6521 holds the correct displacement size. */
6522 expressionS *exp;
6523
6524 gas_assert (i.op[op].disps == 0);
6525 exp = &disp_expressions[i.disp_operands++];
6526 i.op[op].disps = exp;
6527 exp->X_op = O_constant;
6528 exp->X_add_number = 0;
6529 exp->X_add_symbol = (symbolS *) 0;
6530 exp->X_op_symbol = (symbolS *) 0;
6531 }
6532
6533 mem = op;
6534 }
6535 else
6536 mem = ~0;
6537
6538 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
6539 {
6540 if (operand_type_check (i.types[0], imm))
6541 i.vex.register_specifier = NULL;
6542 else
6543 {
6544 /* VEX.vvvv encodes one of the sources when the first
6545 operand is not an immediate. */
6546 if (i.tm.opcode_modifier.vexw == VEXW0)
6547 i.vex.register_specifier = i.op[0].regs;
6548 else
6549 i.vex.register_specifier = i.op[1].regs;
6550 }
6551
6552 /* Destination is a XMM register encoded in the ModRM.reg
6553 and VEX.R bit. */
6554 i.rm.reg = i.op[2].regs->reg_num;
6555 if ((i.op[2].regs->reg_flags & RegRex) != 0)
6556 i.rex |= REX_R;
6557
6558 /* ModRM.rm and VEX.B encodes the other source. */
6559 if (!i.mem_operands)
6560 {
6561 i.rm.mode = 3;
6562
6563 if (i.tm.opcode_modifier.vexw == VEXW0)
6564 i.rm.regmem = i.op[1].regs->reg_num;
6565 else
6566 i.rm.regmem = i.op[0].regs->reg_num;
6567
6568 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6569 i.rex |= REX_B;
6570 }
6571 }
6572 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
6573 {
6574 i.vex.register_specifier = i.op[2].regs;
6575 if (!i.mem_operands)
6576 {
6577 i.rm.mode = 3;
6578 i.rm.regmem = i.op[1].regs->reg_num;
6579 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6580 i.rex |= REX_B;
6581 }
6582 }
6583 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6584 (if any) based on i.tm.extension_opcode. Again, we must be
6585 careful to make sure that segment/control/debug/test/MMX
6586 registers are coded into the i.rm.reg field. */
6587 else if (i.reg_operands)
6588 {
6589 unsigned int op;
6590 unsigned int vex_reg = ~0;
6591
6592 for (op = 0; op < i.operands; op++)
6593 if (i.types[op].bitfield.reg8
6594 || i.types[op].bitfield.reg16
6595 || i.types[op].bitfield.reg32
6596 || i.types[op].bitfield.reg64
6597 || i.types[op].bitfield.regmmx
6598 || i.types[op].bitfield.regxmm
6599 || i.types[op].bitfield.regymm
6600 || i.types[op].bitfield.regbnd
6601 || i.types[op].bitfield.regzmm
6602 || i.types[op].bitfield.regmask
6603 || i.types[op].bitfield.sreg2
6604 || i.types[op].bitfield.sreg3
6605 || i.types[op].bitfield.control
6606 || i.types[op].bitfield.debug
6607 || i.types[op].bitfield.test)
6608 break;
6609
6610 if (vex_3_sources)
6611 op = dest;
6612 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6613 {
6614 /* For instructions with VexNDS, the register-only
6615 source operand is encoded in VEX prefix. */
6616 gas_assert (mem != (unsigned int) ~0);
6617
6618 if (op > mem)
6619 {
6620 vex_reg = op++;
6621 gas_assert (op < i.operands);
6622 }
6623 else
6624 {
6625 /* Check register-only source operand when two source
6626 operands are swapped. */
6627 if (!i.tm.operand_types[op].bitfield.baseindex
6628 && i.tm.operand_types[op + 1].bitfield.baseindex)
6629 {
6630 vex_reg = op;
6631 op += 2;
6632 gas_assert (mem == (vex_reg + 1)
6633 && op < i.operands);
6634 }
6635 else
6636 {
6637 vex_reg = op + 1;
6638 gas_assert (vex_reg < i.operands);
6639 }
6640 }
6641 }
6642 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
6643 {
6644 /* For instructions with VexNDD, the register destination
6645 is encoded in VEX prefix. */
6646 if (i.mem_operands == 0)
6647 {
6648 /* There is no memory operand. */
6649 gas_assert ((op + 2) == i.operands);
6650 vex_reg = op + 1;
6651 }
6652 else
6653 {
6654 /* There are only 2 operands. */
6655 gas_assert (op < 2 && i.operands == 2);
6656 vex_reg = 1;
6657 }
6658 }
6659 else
6660 gas_assert (op < i.operands);
6661
6662 if (vex_reg != (unsigned int) ~0)
6663 {
6664 i386_operand_type *type = &i.tm.operand_types[vex_reg];
6665
6666 if (type->bitfield.reg32 != 1
6667 && type->bitfield.reg64 != 1
6668 && !operand_type_equal (type, &regxmm)
6669 && !operand_type_equal (type, &regymm)
6670 && !operand_type_equal (type, &regzmm)
6671 && !operand_type_equal (type, &regmask))
6672 abort ();
6673
6674 i.vex.register_specifier = i.op[vex_reg].regs;
6675 }
6676
6677 /* Don't set OP operand twice. */
6678 if (vex_reg != op)
6679 {
6680 /* If there is an extension opcode to put here, the
6681 register number must be put into the regmem field. */
6682 if (i.tm.extension_opcode != None)
6683 {
6684 i.rm.regmem = i.op[op].regs->reg_num;
6685 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6686 i.rex |= REX_B;
6687 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6688 i.vrex |= REX_B;
6689 }
6690 else
6691 {
6692 i.rm.reg = i.op[op].regs->reg_num;
6693 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6694 i.rex |= REX_R;
6695 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6696 i.vrex |= REX_R;
6697 }
6698 }
6699
6700 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6701 must set it to 3 to indicate this is a register operand
6702 in the regmem field. */
6703 if (!i.mem_operands)
6704 i.rm.mode = 3;
6705 }
6706
6707 /* Fill in i.rm.reg field with extension opcode (if any). */
6708 if (i.tm.extension_opcode != None)
6709 i.rm.reg = i.tm.extension_opcode;
6710 }
6711 return default_seg;
6712}
6713
6714static void
6715output_branch (void)
6716{
6717 char *p;
6718 int size;
6719 int code16;
6720 int prefix;
6721 relax_substateT subtype;
6722 symbolS *sym;
6723 offsetT off;
6724
6725 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6726 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6727
6728 prefix = 0;
6729 if (i.prefix[DATA_PREFIX] != 0)
6730 {
6731 prefix = 1;
6732 i.prefixes -= 1;
6733 code16 ^= CODE16;
6734 }
6735 /* Pentium4 branch hints. */
6736 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6737 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6738 {
6739 prefix++;
6740 i.prefixes--;
6741 }
6742 if (i.prefix[REX_PREFIX] != 0)
6743 {
6744 prefix++;
6745 i.prefixes--;
6746 }
6747
6748 /* BND prefixed jump. */
6749 if (i.prefix[BND_PREFIX] != 0)
6750 {
6751 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6752 i.prefixes -= 1;
6753 }
6754
6755 if (i.prefixes != 0 && !intel_syntax)
6756 as_warn (_("skipping prefixes on this instruction"));
6757
6758 /* It's always a symbol; End frag & setup for relax.
6759 Make sure there is enough room in this frag for the largest
6760 instruction we may generate in md_convert_frag. This is 2
6761 bytes for the opcode and room for the prefix and largest
6762 displacement. */
6763 frag_grow (prefix + 2 + 4);
6764 /* Prefix and 1 opcode byte go in fr_fix. */
6765 p = frag_more (prefix + 1);
6766 if (i.prefix[DATA_PREFIX] != 0)
6767 *p++ = DATA_PREFIX_OPCODE;
6768 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6769 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6770 *p++ = i.prefix[SEG_PREFIX];
6771 if (i.prefix[REX_PREFIX] != 0)
6772 *p++ = i.prefix[REX_PREFIX];
6773 *p = i.tm.base_opcode;
6774
6775 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6776 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6777 else if (cpu_arch_flags.bitfield.cpui386)
6778 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6779 else
6780 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6781 subtype |= code16;
6782
6783 sym = i.op[0].disps->X_add_symbol;
6784 off = i.op[0].disps->X_add_number;
6785
6786 if (i.op[0].disps->X_op != O_constant
6787 && i.op[0].disps->X_op != O_symbol)
6788 {
6789 /* Handle complex expressions. */
6790 sym = make_expr_symbol (i.op[0].disps);
6791 off = 0;
6792 }
6793
6794 /* 1 possible extra opcode + 4 byte displacement go in var part.
6795 Pass reloc in fr_var. */
6796 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6797}
6798
6799static void
6800output_jump (void)
6801{
6802 char *p;
6803 int size;
6804 fixS *fixP;
6805
6806 if (i.tm.opcode_modifier.jumpbyte)
6807 {
6808 /* This is a loop or jecxz type instruction. */
6809 size = 1;
6810 if (i.prefix[ADDR_PREFIX] != 0)
6811 {
6812 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6813 i.prefixes -= 1;
6814 }
6815 /* Pentium4 branch hints. */
6816 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6817 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6818 {
6819 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6820 i.prefixes--;
6821 }
6822 }
6823 else
6824 {
6825 int code16;
6826
6827 code16 = 0;
6828 if (flag_code == CODE_16BIT)
6829 code16 = CODE16;
6830
6831 if (i.prefix[DATA_PREFIX] != 0)
6832 {
6833 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6834 i.prefixes -= 1;
6835 code16 ^= CODE16;
6836 }
6837
6838 size = 4;
6839 if (code16)
6840 size = 2;
6841 }
6842
6843 if (i.prefix[REX_PREFIX] != 0)
6844 {
6845 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6846 i.prefixes -= 1;
6847 }
6848
6849 /* BND prefixed jump. */
6850 if (i.prefix[BND_PREFIX] != 0)
6851 {
6852 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6853 i.prefixes -= 1;
6854 }
6855
6856 if (i.prefixes != 0 && !intel_syntax)
6857 as_warn (_("skipping prefixes on this instruction"));
6858
6859 p = frag_more (i.tm.opcode_length + size);
6860 switch (i.tm.opcode_length)
6861 {
6862 case 2:
6863 *p++ = i.tm.base_opcode >> 8;
6864 case 1:
6865 *p++ = i.tm.base_opcode;
6866 break;
6867 default:
6868 abort ();
6869 }
6870
6871 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6872 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6873
6874 /* All jumps handled here are signed, but don't use a signed limit
6875 check for 32 and 16 bit jumps as we want to allow wrap around at
6876 4G and 64k respectively. */
6877 if (size == 1)
6878 fixP->fx_signed = 1;
6879}
6880
6881static void
6882output_interseg_jump (void)
6883{
6884 char *p;
6885 int size;
6886 int prefix;
6887 int code16;
6888
6889 code16 = 0;
6890 if (flag_code == CODE_16BIT)
6891 code16 = CODE16;
6892
6893 prefix = 0;
6894 if (i.prefix[DATA_PREFIX] != 0)
6895 {
6896 prefix = 1;
6897 i.prefixes -= 1;
6898 code16 ^= CODE16;
6899 }
6900 if (i.prefix[REX_PREFIX] != 0)
6901 {
6902 prefix++;
6903 i.prefixes -= 1;
6904 }
6905
6906 size = 4;
6907 if (code16)
6908 size = 2;
6909
6910 if (i.prefixes != 0 && !intel_syntax)
6911 as_warn (_("skipping prefixes on this instruction"));
6912
6913 /* 1 opcode; 2 segment; offset */
6914 p = frag_more (prefix + 1 + 2 + size);
6915
6916 if (i.prefix[DATA_PREFIX] != 0)
6917 *p++ = DATA_PREFIX_OPCODE;
6918
6919 if (i.prefix[REX_PREFIX] != 0)
6920 *p++ = i.prefix[REX_PREFIX];
6921
6922 *p++ = i.tm.base_opcode;
6923 if (i.op[1].imms->X_op == O_constant)
6924 {
6925 offsetT n = i.op[1].imms->X_add_number;
6926
6927 if (size == 2
6928 && !fits_in_unsigned_word (n)
6929 && !fits_in_signed_word (n))
6930 {
6931 as_bad (_("16-bit jump out of range"));
6932 return;
6933 }
6934 md_number_to_chars (p, n, size);
6935 }
6936 else
6937 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6938 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6939 if (i.op[0].imms->X_op != O_constant)
6940 as_bad (_("can't handle non absolute segment in `%s'"),
6941 i.tm.name);
6942 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6943}
6944
6945static void
6946output_insn (void)
6947{
6948 fragS *insn_start_frag;
6949 offsetT insn_start_off;
6950
6951 /* Tie dwarf2 debug info to the address at the start of the insn.
6952 We can't do this after the insn has been output as the current
6953 frag may have been closed off. eg. by frag_var. */
6954 dwarf2_emit_insn (0);
6955
6956 insn_start_frag = frag_now;
6957 insn_start_off = frag_now_fix ();
6958
6959 /* Output jumps. */
6960 if (i.tm.opcode_modifier.jump)
6961 output_branch ();
6962 else if (i.tm.opcode_modifier.jumpbyte
6963 || i.tm.opcode_modifier.jumpdword)
6964 output_jump ();
6965 else if (i.tm.opcode_modifier.jumpintersegment)
6966 output_interseg_jump ();
6967 else
6968 {
6969 /* Output normal instructions here. */
6970 char *p;
6971 unsigned char *q;
6972 unsigned int j;
6973 unsigned int prefix;
6974
6975 if (avoid_fence
6976 && i.tm.base_opcode == 0xfae
6977 && i.operands == 1
6978 && i.imm_operands == 1
6979 && (i.op[0].imms->X_add_number == 0xe8
6980 || i.op[0].imms->X_add_number == 0xf0
6981 || i.op[0].imms->X_add_number == 0xf8))
6982 {
6983 /* Encode lfence, mfence, and sfence as
6984 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
6985 offsetT val = 0x240483f0ULL;
6986 p = frag_more (5);
6987 md_number_to_chars (p, val, 5);
6988 return;
6989 }
6990
6991 /* Some processors fail on LOCK prefix. This options makes
6992 assembler ignore LOCK prefix and serves as a workaround. */
6993 if (omit_lock_prefix)
6994 {
6995 if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
6996 return;
6997 i.prefix[LOCK_PREFIX] = 0;
6998 }
6999
7000 /* Since the VEX/EVEX prefix contains the implicit prefix, we
7001 don't need the explicit prefix. */
7002 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
7003 {
7004 switch (i.tm.opcode_length)
7005 {
7006 case 3:
7007 if (i.tm.base_opcode & 0xff000000)
7008 {
7009 prefix = (i.tm.base_opcode >> 24) & 0xff;
7010 goto check_prefix;
7011 }
7012 break;
7013 case 2:
7014 if ((i.tm.base_opcode & 0xff0000) != 0)
7015 {
7016 prefix = (i.tm.base_opcode >> 16) & 0xff;
7017 if (i.tm.cpu_flags.bitfield.cpupadlock)
7018 {
7019check_prefix:
7020 if (prefix != REPE_PREFIX_OPCODE
7021 || (i.prefix[REP_PREFIX]
7022 != REPE_PREFIX_OPCODE))
7023 add_prefix (prefix);
7024 }
7025 else
7026 add_prefix (prefix);
7027 }
7028 break;
7029 case 1:
7030 break;
7031 default:
7032 abort ();
7033 }
7034
7035#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7036 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
7037 R_X86_64_GOTTPOFF relocation so that linker can safely
7038 perform IE->LE optimization. */
7039 if (x86_elf_abi == X86_64_X32_ABI
7040 && i.operands == 2
7041 && i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF
7042 && i.prefix[REX_PREFIX] == 0)
7043 add_prefix (REX_OPCODE);
7044#endif
7045
7046 /* The prefix bytes. */
7047 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
7048 if (*q)
7049 FRAG_APPEND_1_CHAR (*q);
7050 }
7051 else
7052 {
7053 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
7054 if (*q)
7055 switch (j)
7056 {
7057 case REX_PREFIX:
7058 /* REX byte is encoded in VEX prefix. */
7059 break;
7060 case SEG_PREFIX:
7061 case ADDR_PREFIX:
7062 FRAG_APPEND_1_CHAR (*q);
7063 break;
7064 default:
7065 /* There should be no other prefixes for instructions
7066 with VEX prefix. */
7067 abort ();
7068 }
7069
7070 /* For EVEX instructions i.vrex should become 0 after
7071 build_evex_prefix. For VEX instructions upper 16 registers
7072 aren't available, so VREX should be 0. */
7073 if (i.vrex)
7074 abort ();
7075 /* Now the VEX prefix. */
7076 p = frag_more (i.vex.length);
7077 for (j = 0; j < i.vex.length; j++)
7078 p[j] = i.vex.bytes[j];
7079 }
7080
7081 /* Now the opcode; be careful about word order here! */
7082 if (i.tm.opcode_length == 1)
7083 {
7084 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
7085 }
7086 else
7087 {
7088 switch (i.tm.opcode_length)
7089 {
7090 case 4:
7091 p = frag_more (4);
7092 *p++ = (i.tm.base_opcode >> 24) & 0xff;
7093 *p++ = (i.tm.base_opcode >> 16) & 0xff;
7094 break;
7095 case 3:
7096 p = frag_more (3);
7097 *p++ = (i.tm.base_opcode >> 16) & 0xff;
7098 break;
7099 case 2:
7100 p = frag_more (2);
7101 break;
7102 default:
7103 abort ();
7104 break;
7105 }
7106
7107 /* Put out high byte first: can't use md_number_to_chars! */
7108 *p++ = (i.tm.base_opcode >> 8) & 0xff;
7109 *p = i.tm.base_opcode & 0xff;
7110 }
7111
7112 /* Now the modrm byte and sib byte (if present). */
7113 if (i.tm.opcode_modifier.modrm)
7114 {
7115 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
7116 | i.rm.reg << 3
7117 | i.rm.mode << 6));
7118 /* If i.rm.regmem == ESP (4)
7119 && i.rm.mode != (Register mode)
7120 && not 16 bit
7121 ==> need second modrm byte. */
7122 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
7123 && i.rm.mode != 3
7124 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
7125 FRAG_APPEND_1_CHAR ((i.sib.base << 0
7126 | i.sib.index << 3
7127 | i.sib.scale << 6));
7128 }
7129
7130 if (i.disp_operands)
7131 output_disp (insn_start_frag, insn_start_off);
7132
7133 if (i.imm_operands)
7134 output_imm (insn_start_frag, insn_start_off);
7135 }
7136
7137#ifdef DEBUG386
7138 if (flag_debug)
7139 {
7140 pi ("" /*line*/, &i);
7141 }
7142#endif /* DEBUG386 */
7143}
7144
7145/* Return the size of the displacement operand N. */
7146
7147static int
7148disp_size (unsigned int n)
7149{
7150 int size = 4;
7151
7152 /* Vec_Disp8 has to be 8bit. */
7153 if (i.types[n].bitfield.vec_disp8)
7154 size = 1;
7155 else if (i.types[n].bitfield.disp64)
7156 size = 8;
7157 else if (i.types[n].bitfield.disp8)
7158 size = 1;
7159 else if (i.types[n].bitfield.disp16)
7160 size = 2;
7161 return size;
7162}
7163
7164/* Return the size of the immediate operand N. */
7165
7166static int
7167imm_size (unsigned int n)
7168{
7169 int size = 4;
7170 if (i.types[n].bitfield.imm64)
7171 size = 8;
7172 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
7173 size = 1;
7174 else if (i.types[n].bitfield.imm16)
7175 size = 2;
7176 return size;
7177}
7178
7179static void
7180output_disp (fragS *insn_start_frag, offsetT insn_start_off)
7181{
7182 char *p;
7183 unsigned int n;
7184
7185 for (n = 0; n < i.operands; n++)
7186 {
7187 if (i.types[n].bitfield.vec_disp8
7188 || operand_type_check (i.types[n], disp))
7189 {
7190 if (i.op[n].disps->X_op == O_constant)
7191 {
7192 int size = disp_size (n);
7193 offsetT val = i.op[n].disps->X_add_number;
7194
7195 if (i.types[n].bitfield.vec_disp8)
7196 val >>= i.memshift;
7197 val = offset_in_range (val, size);
7198 p = frag_more (size);
7199 md_number_to_chars (p, val, size);
7200 }
7201 else
7202 {
7203 enum bfd_reloc_code_real reloc_type;
7204 int size = disp_size (n);
7205 int sign = i.types[n].bitfield.disp32s;
7206 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
7207 fixS *fixP;
7208
7209 /* We can't have 8 bit displacement here. */
7210 gas_assert (!i.types[n].bitfield.disp8);
7211
7212 /* The PC relative address is computed relative
7213 to the instruction boundary, so in case immediate
7214 fields follows, we need to adjust the value. */
7215 if (pcrel && i.imm_operands)
7216 {
7217 unsigned int n1;
7218 int sz = 0;
7219
7220 for (n1 = 0; n1 < i.operands; n1++)
7221 if (operand_type_check (i.types[n1], imm))
7222 {
7223 /* Only one immediate is allowed for PC
7224 relative address. */
7225 gas_assert (sz == 0);
7226 sz = imm_size (n1);
7227 i.op[n].disps->X_add_number -= sz;
7228 }
7229 /* We should find the immediate. */
7230 gas_assert (sz != 0);
7231 }
7232
7233 p = frag_more (size);
7234 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
7235 if (GOT_symbol
7236 && GOT_symbol == i.op[n].disps->X_add_symbol
7237 && (((reloc_type == BFD_RELOC_32
7238 || reloc_type == BFD_RELOC_X86_64_32S
7239 || (reloc_type == BFD_RELOC_64
7240 && object_64bit))
7241 && (i.op[n].disps->X_op == O_symbol
7242 || (i.op[n].disps->X_op == O_add
7243 && ((symbol_get_value_expression
7244 (i.op[n].disps->X_op_symbol)->X_op)
7245 == O_subtract))))
7246 || reloc_type == BFD_RELOC_32_PCREL))
7247 {
7248 offsetT add;
7249
7250 if (insn_start_frag == frag_now)
7251 add = (p - frag_now->fr_literal) - insn_start_off;
7252 else
7253 {
7254 fragS *fr;
7255
7256 add = insn_start_frag->fr_fix - insn_start_off;
7257 for (fr = insn_start_frag->fr_next;
7258 fr && fr != frag_now; fr = fr->fr_next)
7259 add += fr->fr_fix;
7260 add += p - frag_now->fr_literal;
7261 }
7262
7263 if (!object_64bit)
7264 {
7265 reloc_type = BFD_RELOC_386_GOTPC;
7266 i.op[n].imms->X_add_number += add;
7267 }
7268 else if (reloc_type == BFD_RELOC_64)
7269 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7270 else
7271 /* Don't do the adjustment for x86-64, as there
7272 the pcrel addressing is relative to the _next_
7273 insn, and that is taken care of in other code. */
7274 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7275 }
7276 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal,
7277 size, i.op[n].disps, pcrel,
7278 reloc_type);
7279 /* Check for "call/jmp *mem", "mov mem, %reg",
7280 "test %reg, mem" and "binop mem, %reg" where binop
7281 is one of adc, add, and, cmp, or, sbb, sub, xor
7282 instructions. Always generate R_386_GOT32X for
7283 "sym*GOT" operand in 32-bit mode. */
7284 if ((generate_relax_relocations
7285 || (!object_64bit
7286 && i.rm.mode == 0
7287 && i.rm.regmem == 5))
7288 && (i.rm.mode == 2
7289 || (i.rm.mode == 0 && i.rm.regmem == 5))
7290 && ((i.operands == 1
7291 && i.tm.base_opcode == 0xff
7292 && (i.rm.reg == 2 || i.rm.reg == 4))
7293 || (i.operands == 2
7294 && (i.tm.base_opcode == 0x8b
7295 || i.tm.base_opcode == 0x85
7296 || (i.tm.base_opcode & 0xc7) == 0x03))))
7297 {
7298 if (object_64bit)
7299 {
7300 fixP->fx_tcbit = i.rex != 0;
7301 if (i.base_reg
7302 && (i.base_reg->reg_num == RegRip
7303 || i.base_reg->reg_num == RegEip))
7304 fixP->fx_tcbit2 = 1;
7305 }
7306 else
7307 fixP->fx_tcbit2 = 1;
7308 }
7309 }
7310 }
7311 }
7312}
7313
7314static void
7315output_imm (fragS *insn_start_frag, offsetT insn_start_off)
7316{
7317 char *p;
7318 unsigned int n;
7319
7320 for (n = 0; n < i.operands; n++)
7321 {
7322 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7323 if (i.rounding && (int) n == i.rounding->operand)
7324 continue;
7325
7326 if (operand_type_check (i.types[n], imm))
7327 {
7328 if (i.op[n].imms->X_op == O_constant)
7329 {
7330 int size = imm_size (n);
7331 offsetT val;
7332
7333 val = offset_in_range (i.op[n].imms->X_add_number,
7334 size);
7335 p = frag_more (size);
7336 md_number_to_chars (p, val, size);
7337 }
7338 else
7339 {
7340 /* Not absolute_section.
7341 Need a 32-bit fixup (don't support 8bit
7342 non-absolute imms). Try to support other
7343 sizes ... */
7344 enum bfd_reloc_code_real reloc_type;
7345 int size = imm_size (n);
7346 int sign;
7347
7348 if (i.types[n].bitfield.imm32s
7349 && (i.suffix == QWORD_MNEM_SUFFIX
7350 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
7351 sign = 1;
7352 else
7353 sign = 0;
7354
7355 p = frag_more (size);
7356 reloc_type = reloc (size, 0, sign, i.reloc[n]);
7357
7358 /* This is tough to explain. We end up with this one if we
7359 * have operands that look like
7360 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7361 * obtain the absolute address of the GOT, and it is strongly
7362 * preferable from a performance point of view to avoid using
7363 * a runtime relocation for this. The actual sequence of
7364 * instructions often look something like:
7365 *
7366 * call .L66
7367 * .L66:
7368 * popl %ebx
7369 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7370 *
7371 * The call and pop essentially return the absolute address
7372 * of the label .L66 and store it in %ebx. The linker itself
7373 * will ultimately change the first operand of the addl so
7374 * that %ebx points to the GOT, but to keep things simple, the
7375 * .o file must have this operand set so that it generates not
7376 * the absolute address of .L66, but the absolute address of
7377 * itself. This allows the linker itself simply treat a GOTPC
7378 * relocation as asking for a pcrel offset to the GOT to be
7379 * added in, and the addend of the relocation is stored in the
7380 * operand field for the instruction itself.
7381 *
7382 * Our job here is to fix the operand so that it would add
7383 * the correct offset so that %ebx would point to itself. The
7384 * thing that is tricky is that .-.L66 will point to the
7385 * beginning of the instruction, so we need to further modify
7386 * the operand so that it will point to itself. There are
7387 * other cases where you have something like:
7388 *
7389 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7390 *
7391 * and here no correction would be required. Internally in
7392 * the assembler we treat operands of this form as not being
7393 * pcrel since the '.' is explicitly mentioned, and I wonder
7394 * whether it would simplify matters to do it this way. Who
7395 * knows. In earlier versions of the PIC patches, the
7396 * pcrel_adjust field was used to store the correction, but
7397 * since the expression is not pcrel, I felt it would be
7398 * confusing to do it this way. */
7399
7400 if ((reloc_type == BFD_RELOC_32
7401 || reloc_type == BFD_RELOC_X86_64_32S
7402 || reloc_type == BFD_RELOC_64)
7403 && GOT_symbol
7404 && GOT_symbol == i.op[n].imms->X_add_symbol
7405 && (i.op[n].imms->X_op == O_symbol
7406 || (i.op[n].imms->X_op == O_add
7407 && ((symbol_get_value_expression
7408 (i.op[n].imms->X_op_symbol)->X_op)
7409 == O_subtract))))
7410 {
7411 offsetT add;
7412
7413 if (insn_start_frag == frag_now)
7414 add = (p - frag_now->fr_literal) - insn_start_off;
7415 else
7416 {
7417 fragS *fr;
7418
7419 add = insn_start_frag->fr_fix - insn_start_off;
7420 for (fr = insn_start_frag->fr_next;
7421 fr && fr != frag_now; fr = fr->fr_next)
7422 add += fr->fr_fix;
7423 add += p - frag_now->fr_literal;
7424 }
7425
7426 if (!object_64bit)
7427 reloc_type = BFD_RELOC_386_GOTPC;
7428 else if (size == 4)
7429 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7430 else if (size == 8)
7431 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7432 i.op[n].imms->X_add_number += add;
7433 }
7434 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7435 i.op[n].imms, 0, reloc_type);
7436 }
7437 }
7438 }
7439}
7440\f
7441/* x86_cons_fix_new is called via the expression parsing code when a
7442 reloc is needed. We use this hook to get the correct .got reloc. */
7443static int cons_sign = -1;
7444
7445void
7446x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
7447 expressionS *exp, bfd_reloc_code_real_type r)
7448{
7449 r = reloc (len, 0, cons_sign, r);
7450
7451#ifdef TE_PE
7452 if (exp->X_op == O_secrel)
7453 {
7454 exp->X_op = O_symbol;
7455 r = BFD_RELOC_32_SECREL;
7456 }
7457#endif
7458
7459 fix_new_exp (frag, off, len, exp, 0, r);
7460}
7461
7462/* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7463 purpose of the `.dc.a' internal pseudo-op. */
7464
7465int
7466x86_address_bytes (void)
7467{
7468 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
7469 return 4;
7470 return stdoutput->arch_info->bits_per_address / 8;
7471}
7472
7473#if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7474 || defined (LEX_AT)
7475# define lex_got(reloc, adjust, types) NULL
7476#else
7477/* Parse operands of the form
7478 <symbol>@GOTOFF+<nnn>
7479 and similar .plt or .got references.
7480
7481 If we find one, set up the correct relocation in RELOC and copy the
7482 input string, minus the `@GOTOFF' into a malloc'd buffer for
7483 parsing by the calling routine. Return this buffer, and if ADJUST
7484 is non-null set it to the length of the string we removed from the
7485 input line. Otherwise return NULL. */
7486static char *
7487lex_got (enum bfd_reloc_code_real *rel,
7488 int *adjust,
7489 i386_operand_type *types)
7490{
7491 /* Some of the relocations depend on the size of what field is to
7492 be relocated. But in our callers i386_immediate and i386_displacement
7493 we don't yet know the operand size (this will be set by insn
7494 matching). Hence we record the word32 relocation here,
7495 and adjust the reloc according to the real size in reloc(). */
7496 static const struct {
7497 const char *str;
7498 int len;
7499 const enum bfd_reloc_code_real rel[2];
7500 const i386_operand_type types64;
7501 } gotrel[] = {
7502#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7503 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
7504 BFD_RELOC_SIZE32 },
7505 OPERAND_TYPE_IMM32_64 },
7506#endif
7507 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
7508 BFD_RELOC_X86_64_PLTOFF64 },
7509 OPERAND_TYPE_IMM64 },
7510 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
7511 BFD_RELOC_X86_64_PLT32 },
7512 OPERAND_TYPE_IMM32_32S_DISP32 },
7513 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
7514 BFD_RELOC_X86_64_GOTPLT64 },
7515 OPERAND_TYPE_IMM64_DISP64 },
7516 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
7517 BFD_RELOC_X86_64_GOTOFF64 },
7518 OPERAND_TYPE_IMM64_DISP64 },
7519 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
7520 BFD_RELOC_X86_64_GOTPCREL },
7521 OPERAND_TYPE_IMM32_32S_DISP32 },
7522 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
7523 BFD_RELOC_X86_64_TLSGD },
7524 OPERAND_TYPE_IMM32_32S_DISP32 },
7525 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
7526 _dummy_first_bfd_reloc_code_real },
7527 OPERAND_TYPE_NONE },
7528 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
7529 BFD_RELOC_X86_64_TLSLD },
7530 OPERAND_TYPE_IMM32_32S_DISP32 },
7531 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
7532 BFD_RELOC_X86_64_GOTTPOFF },
7533 OPERAND_TYPE_IMM32_32S_DISP32 },
7534 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
7535 BFD_RELOC_X86_64_TPOFF32 },
7536 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7537 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
7538 _dummy_first_bfd_reloc_code_real },
7539 OPERAND_TYPE_NONE },
7540 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
7541 BFD_RELOC_X86_64_DTPOFF32 },
7542 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7543 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
7544 _dummy_first_bfd_reloc_code_real },
7545 OPERAND_TYPE_NONE },
7546 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
7547 _dummy_first_bfd_reloc_code_real },
7548 OPERAND_TYPE_NONE },
7549 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
7550 BFD_RELOC_X86_64_GOT32 },
7551 OPERAND_TYPE_IMM32_32S_64_DISP32 },
7552 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
7553 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
7554 OPERAND_TYPE_IMM32_32S_DISP32 },
7555 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
7556 BFD_RELOC_X86_64_TLSDESC_CALL },
7557 OPERAND_TYPE_IMM32_32S_DISP32 },
7558 };
7559 char *cp;
7560 unsigned int j;
7561
7562#if defined (OBJ_MAYBE_ELF)
7563 if (!IS_ELF)
7564 return NULL;
7565#endif
7566
7567 for (cp = input_line_pointer; *cp != '@'; cp++)
7568 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7569 return NULL;
7570
7571 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7572 {
7573 int len = gotrel[j].len;
7574 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7575 {
7576 if (gotrel[j].rel[object_64bit] != 0)
7577 {
7578 int first, second;
7579 char *tmpbuf, *past_reloc;
7580
7581 *rel = gotrel[j].rel[object_64bit];
7582
7583 if (types)
7584 {
7585 if (flag_code != CODE_64BIT)
7586 {
7587 types->bitfield.imm32 = 1;
7588 types->bitfield.disp32 = 1;
7589 }
7590 else
7591 *types = gotrel[j].types64;
7592 }
7593
7594 if (j != 0 && GOT_symbol == NULL)
7595 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
7596
7597 /* The length of the first part of our input line. */
7598 first = cp - input_line_pointer;
7599
7600 /* The second part goes from after the reloc token until
7601 (and including) an end_of_line char or comma. */
7602 past_reloc = cp + 1 + len;
7603 cp = past_reloc;
7604 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7605 ++cp;
7606 second = cp + 1 - past_reloc;
7607
7608 /* Allocate and copy string. The trailing NUL shouldn't
7609 be necessary, but be safe. */
7610 tmpbuf = XNEWVEC (char, first + second + 2);
7611 memcpy (tmpbuf, input_line_pointer, first);
7612 if (second != 0 && *past_reloc != ' ')
7613 /* Replace the relocation token with ' ', so that
7614 errors like foo@GOTOFF1 will be detected. */
7615 tmpbuf[first++] = ' ';
7616 else
7617 /* Increment length by 1 if the relocation token is
7618 removed. */
7619 len++;
7620 if (adjust)
7621 *adjust = len;
7622 memcpy (tmpbuf + first, past_reloc, second);
7623 tmpbuf[first + second] = '\0';
7624 return tmpbuf;
7625 }
7626
7627 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7628 gotrel[j].str, 1 << (5 + object_64bit));
7629 return NULL;
7630 }
7631 }
7632
7633 /* Might be a symbol version string. Don't as_bad here. */
7634 return NULL;
7635}
7636#endif
7637
7638#ifdef TE_PE
7639#ifdef lex_got
7640#undef lex_got
7641#endif
7642/* Parse operands of the form
7643 <symbol>@SECREL32+<nnn>
7644
7645 If we find one, set up the correct relocation in RELOC and copy the
7646 input string, minus the `@SECREL32' into a malloc'd buffer for
7647 parsing by the calling routine. Return this buffer, and if ADJUST
7648 is non-null set it to the length of the string we removed from the
7649 input line. Otherwise return NULL.
7650
7651 This function is copied from the ELF version above adjusted for PE targets. */
7652
7653static char *
7654lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
7655 int *adjust ATTRIBUTE_UNUSED,
7656 i386_operand_type *types)
7657{
7658 static const struct
7659 {
7660 const char *str;
7661 int len;
7662 const enum bfd_reloc_code_real rel[2];
7663 const i386_operand_type types64;
7664 }
7665 gotrel[] =
7666 {
7667 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
7668 BFD_RELOC_32_SECREL },
7669 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7670 };
7671
7672 char *cp;
7673 unsigned j;
7674
7675 for (cp = input_line_pointer; *cp != '@'; cp++)
7676 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7677 return NULL;
7678
7679 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7680 {
7681 int len = gotrel[j].len;
7682
7683 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7684 {
7685 if (gotrel[j].rel[object_64bit] != 0)
7686 {
7687 int first, second;
7688 char *tmpbuf, *past_reloc;
7689
7690 *rel = gotrel[j].rel[object_64bit];
7691 if (adjust)
7692 *adjust = len;
7693
7694 if (types)
7695 {
7696 if (flag_code != CODE_64BIT)
7697 {
7698 types->bitfield.imm32 = 1;
7699 types->bitfield.disp32 = 1;
7700 }
7701 else
7702 *types = gotrel[j].types64;
7703 }
7704
7705 /* The length of the first part of our input line. */
7706 first = cp - input_line_pointer;
7707
7708 /* The second part goes from after the reloc token until
7709 (and including) an end_of_line char or comma. */
7710 past_reloc = cp + 1 + len;
7711 cp = past_reloc;
7712 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7713 ++cp;
7714 second = cp + 1 - past_reloc;
7715
7716 /* Allocate and copy string. The trailing NUL shouldn't
7717 be necessary, but be safe. */
7718 tmpbuf = XNEWVEC (char, first + second + 2);
7719 memcpy (tmpbuf, input_line_pointer, first);
7720 if (second != 0 && *past_reloc != ' ')
7721 /* Replace the relocation token with ' ', so that
7722 errors like foo@SECLREL321 will be detected. */
7723 tmpbuf[first++] = ' ';
7724 memcpy (tmpbuf + first, past_reloc, second);
7725 tmpbuf[first + second] = '\0';
7726 return tmpbuf;
7727 }
7728
7729 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7730 gotrel[j].str, 1 << (5 + object_64bit));
7731 return NULL;
7732 }
7733 }
7734
7735 /* Might be a symbol version string. Don't as_bad here. */
7736 return NULL;
7737}
7738
7739#endif /* TE_PE */
7740
7741bfd_reloc_code_real_type
7742x86_cons (expressionS *exp, int size)
7743{
7744 bfd_reloc_code_real_type got_reloc = NO_RELOC;
7745
7746 intel_syntax = -intel_syntax;
7747
7748 exp->X_md = 0;
7749 if (size == 4 || (object_64bit && size == 8))
7750 {
7751 /* Handle @GOTOFF and the like in an expression. */
7752 char *save;
7753 char *gotfree_input_line;
7754 int adjust = 0;
7755
7756 save = input_line_pointer;
7757 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
7758 if (gotfree_input_line)
7759 input_line_pointer = gotfree_input_line;
7760
7761 expression (exp);
7762
7763 if (gotfree_input_line)
7764 {
7765 /* expression () has merrily parsed up to the end of line,
7766 or a comma - in the wrong buffer. Transfer how far
7767 input_line_pointer has moved to the right buffer. */
7768 input_line_pointer = (save
7769 + (input_line_pointer - gotfree_input_line)
7770 + adjust);
7771 free (gotfree_input_line);
7772 if (exp->X_op == O_constant
7773 || exp->X_op == O_absent
7774 || exp->X_op == O_illegal
7775 || exp->X_op == O_register
7776 || exp->X_op == O_big)
7777 {
7778 char c = *input_line_pointer;
7779 *input_line_pointer = 0;
7780 as_bad (_("missing or invalid expression `%s'"), save);
7781 *input_line_pointer = c;
7782 }
7783 }
7784 }
7785 else
7786 expression (exp);
7787
7788 intel_syntax = -intel_syntax;
7789
7790 if (intel_syntax)
7791 i386_intel_simplify (exp);
7792
7793 return got_reloc;
7794}
7795
7796static void
7797signed_cons (int size)
7798{
7799 if (flag_code == CODE_64BIT)
7800 cons_sign = 1;
7801 cons (size);
7802 cons_sign = -1;
7803}
7804
7805#ifdef TE_PE
7806static void
7807pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7808{
7809 expressionS exp;
7810
7811 do
7812 {
7813 expression (&exp);
7814 if (exp.X_op == O_symbol)
7815 exp.X_op = O_secrel;
7816
7817 emit_expr (&exp, 4);
7818 }
7819 while (*input_line_pointer++ == ',');
7820
7821 input_line_pointer--;
7822 demand_empty_rest_of_line ();
7823}
7824#endif
7825
7826/* Handle Vector operations. */
7827
7828static char *
7829check_VecOperations (char *op_string, char *op_end)
7830{
7831 const reg_entry *mask;
7832 const char *saved;
7833 char *end_op;
7834
7835 while (*op_string
7836 && (op_end == NULL || op_string < op_end))
7837 {
7838 saved = op_string;
7839 if (*op_string == '{')
7840 {
7841 op_string++;
7842
7843 /* Check broadcasts. */
7844 if (strncmp (op_string, "1to", 3) == 0)
7845 {
7846 int bcst_type;
7847
7848 if (i.broadcast)
7849 goto duplicated_vec_op;
7850
7851 op_string += 3;
7852 if (*op_string == '8')
7853 bcst_type = BROADCAST_1TO8;
7854 else if (*op_string == '4')
7855 bcst_type = BROADCAST_1TO4;
7856 else if (*op_string == '2')
7857 bcst_type = BROADCAST_1TO2;
7858 else if (*op_string == '1'
7859 && *(op_string+1) == '6')
7860 {
7861 bcst_type = BROADCAST_1TO16;
7862 op_string++;
7863 }
7864 else
7865 {
7866 as_bad (_("Unsupported broadcast: `%s'"), saved);
7867 return NULL;
7868 }
7869 op_string++;
7870
7871 broadcast_op.type = bcst_type;
7872 broadcast_op.operand = this_operand;
7873 i.broadcast = &broadcast_op;
7874 }
7875 /* Check masking operation. */
7876 else if ((mask = parse_register (op_string, &end_op)) != NULL)
7877 {
7878 /* k0 can't be used for write mask. */
7879 if (mask->reg_num == 0)
7880 {
7881 as_bad (_("`%s' can't be used for write mask"),
7882 op_string);
7883 return NULL;
7884 }
7885
7886 if (!i.mask)
7887 {
7888 mask_op.mask = mask;
7889 mask_op.zeroing = 0;
7890 mask_op.operand = this_operand;
7891 i.mask = &mask_op;
7892 }
7893 else
7894 {
7895 if (i.mask->mask)
7896 goto duplicated_vec_op;
7897
7898 i.mask->mask = mask;
7899
7900 /* Only "{z}" is allowed here. No need to check
7901 zeroing mask explicitly. */
7902 if (i.mask->operand != this_operand)
7903 {
7904 as_bad (_("invalid write mask `%s'"), saved);
7905 return NULL;
7906 }
7907 }
7908
7909 op_string = end_op;
7910 }
7911 /* Check zeroing-flag for masking operation. */
7912 else if (*op_string == 'z')
7913 {
7914 if (!i.mask)
7915 {
7916 mask_op.mask = NULL;
7917 mask_op.zeroing = 1;
7918 mask_op.operand = this_operand;
7919 i.mask = &mask_op;
7920 }
7921 else
7922 {
7923 if (i.mask->zeroing)
7924 {
7925 duplicated_vec_op:
7926 as_bad (_("duplicated `%s'"), saved);
7927 return NULL;
7928 }
7929
7930 i.mask->zeroing = 1;
7931
7932 /* Only "{%k}" is allowed here. No need to check mask
7933 register explicitly. */
7934 if (i.mask->operand != this_operand)
7935 {
7936 as_bad (_("invalid zeroing-masking `%s'"),
7937 saved);
7938 return NULL;
7939 }
7940 }
7941
7942 op_string++;
7943 }
7944 else
7945 goto unknown_vec_op;
7946
7947 if (*op_string != '}')
7948 {
7949 as_bad (_("missing `}' in `%s'"), saved);
7950 return NULL;
7951 }
7952 op_string++;
7953 continue;
7954 }
7955 unknown_vec_op:
7956 /* We don't know this one. */
7957 as_bad (_("unknown vector operation: `%s'"), saved);
7958 return NULL;
7959 }
7960
7961 return op_string;
7962}
7963
7964static int
7965i386_immediate (char *imm_start)
7966{
7967 char *save_input_line_pointer;
7968 char *gotfree_input_line;
7969 segT exp_seg = 0;
7970 expressionS *exp;
7971 i386_operand_type types;
7972
7973 operand_type_set (&types, ~0);
7974
7975 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7976 {
7977 as_bad (_("at most %d immediate operands are allowed"),
7978 MAX_IMMEDIATE_OPERANDS);
7979 return 0;
7980 }
7981
7982 exp = &im_expressions[i.imm_operands++];
7983 i.op[this_operand].imms = exp;
7984
7985 if (is_space_char (*imm_start))
7986 ++imm_start;
7987
7988 save_input_line_pointer = input_line_pointer;
7989 input_line_pointer = imm_start;
7990
7991 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7992 if (gotfree_input_line)
7993 input_line_pointer = gotfree_input_line;
7994
7995 exp_seg = expression (exp);
7996
7997 SKIP_WHITESPACE ();
7998
7999 /* Handle vector operations. */
8000 if (*input_line_pointer == '{')
8001 {
8002 input_line_pointer = check_VecOperations (input_line_pointer,
8003 NULL);
8004 if (input_line_pointer == NULL)
8005 return 0;
8006 }
8007
8008 if (*input_line_pointer)
8009 as_bad (_("junk `%s' after expression"), input_line_pointer);
8010
8011 input_line_pointer = save_input_line_pointer;
8012 if (gotfree_input_line)
8013 {
8014 free (gotfree_input_line);
8015
8016 if (exp->X_op == O_constant || exp->X_op == O_register)
8017 exp->X_op = O_illegal;
8018 }
8019
8020 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
8021}
8022
8023static int
8024i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8025 i386_operand_type types, const char *imm_start)
8026{
8027 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
8028 {
8029 if (imm_start)
8030 as_bad (_("missing or invalid immediate expression `%s'"),
8031 imm_start);
8032 return 0;
8033 }
8034 else if (exp->X_op == O_constant)
8035 {
8036 /* Size it properly later. */
8037 i.types[this_operand].bitfield.imm64 = 1;
8038 /* If not 64bit, sign extend val. */
8039 if (flag_code != CODE_64BIT
8040 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
8041 exp->X_add_number
8042 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
8043 }
8044#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8045 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
8046 && exp_seg != absolute_section
8047 && exp_seg != text_section
8048 && exp_seg != data_section
8049 && exp_seg != bss_section
8050 && exp_seg != undefined_section
8051 && !bfd_is_com_section (exp_seg))
8052 {
8053 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8054 return 0;
8055 }
8056#endif
8057 else if (!intel_syntax && exp_seg == reg_section)
8058 {
8059 if (imm_start)
8060 as_bad (_("illegal immediate register operand %s"), imm_start);
8061 return 0;
8062 }
8063 else
8064 {
8065 /* This is an address. The size of the address will be
8066 determined later, depending on destination register,
8067 suffix, or the default for the section. */
8068 i.types[this_operand].bitfield.imm8 = 1;
8069 i.types[this_operand].bitfield.imm16 = 1;
8070 i.types[this_operand].bitfield.imm32 = 1;
8071 i.types[this_operand].bitfield.imm32s = 1;
8072 i.types[this_operand].bitfield.imm64 = 1;
8073 i.types[this_operand] = operand_type_and (i.types[this_operand],
8074 types);
8075 }
8076
8077 return 1;
8078}
8079
8080static char *
8081i386_scale (char *scale)
8082{
8083 offsetT val;
8084 char *save = input_line_pointer;
8085
8086 input_line_pointer = scale;
8087 val = get_absolute_expression ();
8088
8089 switch (val)
8090 {
8091 case 1:
8092 i.log2_scale_factor = 0;
8093 break;
8094 case 2:
8095 i.log2_scale_factor = 1;
8096 break;
8097 case 4:
8098 i.log2_scale_factor = 2;
8099 break;
8100 case 8:
8101 i.log2_scale_factor = 3;
8102 break;
8103 default:
8104 {
8105 char sep = *input_line_pointer;
8106
8107 *input_line_pointer = '\0';
8108 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
8109 scale);
8110 *input_line_pointer = sep;
8111 input_line_pointer = save;
8112 return NULL;
8113 }
8114 }
8115 if (i.log2_scale_factor != 0 && i.index_reg == 0)
8116 {
8117 as_warn (_("scale factor of %d without an index register"),
8118 1 << i.log2_scale_factor);
8119 i.log2_scale_factor = 0;
8120 }
8121 scale = input_line_pointer;
8122 input_line_pointer = save;
8123 return scale;
8124}
8125
8126static int
8127i386_displacement (char *disp_start, char *disp_end)
8128{
8129 expressionS *exp;
8130 segT exp_seg = 0;
8131 char *save_input_line_pointer;
8132 char *gotfree_input_line;
8133 int override;
8134 i386_operand_type bigdisp, types = anydisp;
8135 int ret;
8136
8137 if (i.disp_operands == MAX_MEMORY_OPERANDS)
8138 {
8139 as_bad (_("at most %d displacement operands are allowed"),
8140 MAX_MEMORY_OPERANDS);
8141 return 0;
8142 }
8143
8144 operand_type_set (&bigdisp, 0);
8145 if ((i.types[this_operand].bitfield.jumpabsolute)
8146 || (!current_templates->start->opcode_modifier.jump
8147 && !current_templates->start->opcode_modifier.jumpdword))
8148 {
8149 bigdisp.bitfield.disp32 = 1;
8150 override = (i.prefix[ADDR_PREFIX] != 0);
8151 if (flag_code == CODE_64BIT)
8152 {
8153 if (!override)
8154 {
8155 bigdisp.bitfield.disp32s = 1;
8156 bigdisp.bitfield.disp64 = 1;
8157 }
8158 }
8159 else if ((flag_code == CODE_16BIT) ^ override)
8160 {
8161 bigdisp.bitfield.disp32 = 0;
8162 bigdisp.bitfield.disp16 = 1;
8163 }
8164 }
8165 else
8166 {
8167 /* For PC-relative branches, the width of the displacement
8168 is dependent upon data size, not address size. */
8169 override = (i.prefix[DATA_PREFIX] != 0);
8170 if (flag_code == CODE_64BIT)
8171 {
8172 if (override || i.suffix == WORD_MNEM_SUFFIX)
8173 bigdisp.bitfield.disp16 = 1;
8174 else
8175 {
8176 bigdisp.bitfield.disp32 = 1;
8177 bigdisp.bitfield.disp32s = 1;
8178 }
8179 }
8180 else
8181 {
8182 if (!override)
8183 override = (i.suffix == (flag_code != CODE_16BIT
8184 ? WORD_MNEM_SUFFIX
8185 : LONG_MNEM_SUFFIX));
8186 bigdisp.bitfield.disp32 = 1;
8187 if ((flag_code == CODE_16BIT) ^ override)
8188 {
8189 bigdisp.bitfield.disp32 = 0;
8190 bigdisp.bitfield.disp16 = 1;
8191 }
8192 }
8193 }
8194 i.types[this_operand] = operand_type_or (i.types[this_operand],
8195 bigdisp);
8196
8197 exp = &disp_expressions[i.disp_operands];
8198 i.op[this_operand].disps = exp;
8199 i.disp_operands++;
8200 save_input_line_pointer = input_line_pointer;
8201 input_line_pointer = disp_start;
8202 END_STRING_AND_SAVE (disp_end);
8203
8204#ifndef GCC_ASM_O_HACK
8205#define GCC_ASM_O_HACK 0
8206#endif
8207#if GCC_ASM_O_HACK
8208 END_STRING_AND_SAVE (disp_end + 1);
8209 if (i.types[this_operand].bitfield.baseIndex
8210 && displacement_string_end[-1] == '+')
8211 {
8212 /* This hack is to avoid a warning when using the "o"
8213 constraint within gcc asm statements.
8214 For instance:
8215
8216 #define _set_tssldt_desc(n,addr,limit,type) \
8217 __asm__ __volatile__ ( \
8218 "movw %w2,%0\n\t" \
8219 "movw %w1,2+%0\n\t" \
8220 "rorl $16,%1\n\t" \
8221 "movb %b1,4+%0\n\t" \
8222 "movb %4,5+%0\n\t" \
8223 "movb $0,6+%0\n\t" \
8224 "movb %h1,7+%0\n\t" \
8225 "rorl $16,%1" \
8226 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8227
8228 This works great except that the output assembler ends
8229 up looking a bit weird if it turns out that there is
8230 no offset. You end up producing code that looks like:
8231
8232 #APP
8233 movw $235,(%eax)
8234 movw %dx,2+(%eax)
8235 rorl $16,%edx
8236 movb %dl,4+(%eax)
8237 movb $137,5+(%eax)
8238 movb $0,6+(%eax)
8239 movb %dh,7+(%eax)
8240 rorl $16,%edx
8241 #NO_APP
8242
8243 So here we provide the missing zero. */
8244
8245 *displacement_string_end = '0';
8246 }
8247#endif
8248 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
8249 if (gotfree_input_line)
8250 input_line_pointer = gotfree_input_line;
8251
8252 exp_seg = expression (exp);
8253
8254 SKIP_WHITESPACE ();
8255 if (*input_line_pointer)
8256 as_bad (_("junk `%s' after expression"), input_line_pointer);
8257#if GCC_ASM_O_HACK
8258 RESTORE_END_STRING (disp_end + 1);
8259#endif
8260 input_line_pointer = save_input_line_pointer;
8261 if (gotfree_input_line)
8262 {
8263 free (gotfree_input_line);
8264
8265 if (exp->X_op == O_constant || exp->X_op == O_register)
8266 exp->X_op = O_illegal;
8267 }
8268
8269 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
8270
8271 RESTORE_END_STRING (disp_end);
8272
8273 return ret;
8274}
8275
8276static int
8277i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8278 i386_operand_type types, const char *disp_start)
8279{
8280 i386_operand_type bigdisp;
8281 int ret = 1;
8282
8283 /* We do this to make sure that the section symbol is in
8284 the symbol table. We will ultimately change the relocation
8285 to be relative to the beginning of the section. */
8286 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
8287 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
8288 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8289 {
8290 if (exp->X_op != O_symbol)
8291 goto inv_disp;
8292
8293 if (S_IS_LOCAL (exp->X_add_symbol)
8294 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
8295 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
8296 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
8297 exp->X_op = O_subtract;
8298 exp->X_op_symbol = GOT_symbol;
8299 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
8300 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
8301 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8302 i.reloc[this_operand] = BFD_RELOC_64;
8303 else
8304 i.reloc[this_operand] = BFD_RELOC_32;
8305 }
8306
8307 else if (exp->X_op == O_absent
8308 || exp->X_op == O_illegal
8309 || exp->X_op == O_big)
8310 {
8311 inv_disp:
8312 as_bad (_("missing or invalid displacement expression `%s'"),
8313 disp_start);
8314 ret = 0;
8315 }
8316
8317 else if (flag_code == CODE_64BIT
8318 && !i.prefix[ADDR_PREFIX]
8319 && exp->X_op == O_constant)
8320 {
8321 /* Since displacement is signed extended to 64bit, don't allow
8322 disp32 and turn off disp32s if they are out of range. */
8323 i.types[this_operand].bitfield.disp32 = 0;
8324 if (!fits_in_signed_long (exp->X_add_number))
8325 {
8326 i.types[this_operand].bitfield.disp32s = 0;
8327 if (i.types[this_operand].bitfield.baseindex)
8328 {
8329 as_bad (_("0x%lx out range of signed 32bit displacement"),
8330 (long) exp->X_add_number);
8331 ret = 0;
8332 }
8333 }
8334 }
8335
8336#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8337 else if (exp->X_op != O_constant
8338 && OUTPUT_FLAVOR == bfd_target_aout_flavour
8339 && exp_seg != absolute_section
8340 && exp_seg != text_section
8341 && exp_seg != data_section
8342 && exp_seg != bss_section
8343 && exp_seg != undefined_section
8344 && !bfd_is_com_section (exp_seg))
8345 {
8346 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8347 ret = 0;
8348 }
8349#endif
8350
8351 /* Check if this is a displacement only operand. */
8352 bigdisp = i.types[this_operand];
8353 bigdisp.bitfield.disp8 = 0;
8354 bigdisp.bitfield.disp16 = 0;
8355 bigdisp.bitfield.disp32 = 0;
8356 bigdisp.bitfield.disp32s = 0;
8357 bigdisp.bitfield.disp64 = 0;
8358 if (operand_type_all_zero (&bigdisp))
8359 i.types[this_operand] = operand_type_and (i.types[this_operand],
8360 types);
8361
8362 return ret;
8363}
8364
8365/* Make sure the memory operand we've been dealt is valid.
8366 Return 1 on success, 0 on a failure. */
8367
8368static int
8369i386_index_check (const char *operand_string)
8370{
8371 const char *kind = "base/index";
8372 enum flag_code addr_mode;
8373
8374 if (i.prefix[ADDR_PREFIX])
8375 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
8376 else
8377 {
8378 addr_mode = flag_code;
8379
8380#if INFER_ADDR_PREFIX
8381 if (i.mem_operands == 0)
8382 {
8383 /* Infer address prefix from the first memory operand. */
8384 const reg_entry *addr_reg = i.base_reg;
8385
8386 if (addr_reg == NULL)
8387 addr_reg = i.index_reg;
8388
8389 if (addr_reg)
8390 {
8391 if (addr_reg->reg_num == RegEip
8392 || addr_reg->reg_num == RegEiz
8393 || addr_reg->reg_type.bitfield.reg32)
8394 addr_mode = CODE_32BIT;
8395 else if (flag_code != CODE_64BIT
8396 && addr_reg->reg_type.bitfield.reg16)
8397 addr_mode = CODE_16BIT;
8398
8399 if (addr_mode != flag_code)
8400 {
8401 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
8402 i.prefixes += 1;
8403 /* Change the size of any displacement too. At most one
8404 of Disp16 or Disp32 is set.
8405 FIXME. There doesn't seem to be any real need for
8406 separate Disp16 and Disp32 flags. The same goes for
8407 Imm16 and Imm32. Removing them would probably clean
8408 up the code quite a lot. */
8409 if (flag_code != CODE_64BIT
8410 && (i.types[this_operand].bitfield.disp16
8411 || i.types[this_operand].bitfield.disp32))
8412 i.types[this_operand]
8413 = operand_type_xor (i.types[this_operand], disp16_32);
8414 }
8415 }
8416 }
8417#endif
8418 }
8419
8420 if (current_templates->start->opcode_modifier.isstring
8421 && !current_templates->start->opcode_modifier.immext
8422 && (current_templates->end[-1].opcode_modifier.isstring
8423 || i.mem_operands))
8424 {
8425 /* Memory operands of string insns are special in that they only allow
8426 a single register (rDI, rSI, or rBX) as their memory address. */
8427 const reg_entry *expected_reg;
8428 static const char *di_si[][2] =
8429 {
8430 { "esi", "edi" },
8431 { "si", "di" },
8432 { "rsi", "rdi" }
8433 };
8434 static const char *bx[] = { "ebx", "bx", "rbx" };
8435
8436 kind = "string address";
8437
8438 if (current_templates->start->opcode_modifier.w)
8439 {
8440 i386_operand_type type = current_templates->end[-1].operand_types[0];
8441
8442 if (!type.bitfield.baseindex
8443 || ((!i.mem_operands != !intel_syntax)
8444 && current_templates->end[-1].operand_types[1]
8445 .bitfield.baseindex))
8446 type = current_templates->end[-1].operand_types[1];
8447 expected_reg = hash_find (reg_hash,
8448 di_si[addr_mode][type.bitfield.esseg]);
8449
8450 }
8451 else
8452 expected_reg = hash_find (reg_hash, bx[addr_mode]);
8453
8454 if (i.base_reg != expected_reg
8455 || i.index_reg
8456 || operand_type_check (i.types[this_operand], disp))
8457 {
8458 /* The second memory operand must have the same size as
8459 the first one. */
8460 if (i.mem_operands
8461 && i.base_reg
8462 && !((addr_mode == CODE_64BIT
8463 && i.base_reg->reg_type.bitfield.reg64)
8464 || (addr_mode == CODE_32BIT
8465 ? i.base_reg->reg_type.bitfield.reg32
8466 : i.base_reg->reg_type.bitfield.reg16)))
8467 goto bad_address;
8468
8469 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8470 operand_string,
8471 intel_syntax ? '[' : '(',
8472 register_prefix,
8473 expected_reg->reg_name,
8474 intel_syntax ? ']' : ')');
8475 return 1;
8476 }
8477 else
8478 return 1;
8479
8480bad_address:
8481 as_bad (_("`%s' is not a valid %s expression"),
8482 operand_string, kind);
8483 return 0;
8484 }
8485 else
8486 {
8487 if (addr_mode != CODE_16BIT)
8488 {
8489 /* 32-bit/64-bit checks. */
8490 if ((i.base_reg
8491 && (addr_mode == CODE_64BIT
8492 ? !i.base_reg->reg_type.bitfield.reg64
8493 : !i.base_reg->reg_type.bitfield.reg32)
8494 && (i.index_reg
8495 || (i.base_reg->reg_num
8496 != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
8497 || (i.index_reg
8498 && !i.index_reg->reg_type.bitfield.regxmm
8499 && !i.index_reg->reg_type.bitfield.regymm
8500 && !i.index_reg->reg_type.bitfield.regzmm
8501 && ((addr_mode == CODE_64BIT
8502 ? !(i.index_reg->reg_type.bitfield.reg64
8503 || i.index_reg->reg_num == RegRiz)
8504 : !(i.index_reg->reg_type.bitfield.reg32
8505 || i.index_reg->reg_num == RegEiz))
8506 || !i.index_reg->reg_type.bitfield.baseindex)))
8507 goto bad_address;
8508 }
8509 else
8510 {
8511 /* 16-bit checks. */
8512 if ((i.base_reg
8513 && (!i.base_reg->reg_type.bitfield.reg16
8514 || !i.base_reg->reg_type.bitfield.baseindex))
8515 || (i.index_reg
8516 && (!i.index_reg->reg_type.bitfield.reg16
8517 || !i.index_reg->reg_type.bitfield.baseindex
8518 || !(i.base_reg
8519 && i.base_reg->reg_num < 6
8520 && i.index_reg->reg_num >= 6
8521 && i.log2_scale_factor == 0))))
8522 goto bad_address;
8523 }
8524 }
8525 return 1;
8526}
8527
8528/* Handle vector immediates. */
8529
8530static int
8531RC_SAE_immediate (const char *imm_start)
8532{
8533 unsigned int match_found, j;
8534 const char *pstr = imm_start;
8535 expressionS *exp;
8536
8537 if (*pstr != '{')
8538 return 0;
8539
8540 pstr++;
8541 match_found = 0;
8542 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
8543 {
8544 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
8545 {
8546 if (!i.rounding)
8547 {
8548 rc_op.type = RC_NamesTable[j].type;
8549 rc_op.operand = this_operand;
8550 i.rounding = &rc_op;
8551 }
8552 else
8553 {
8554 as_bad (_("duplicated `%s'"), imm_start);
8555 return 0;
8556 }
8557 pstr += RC_NamesTable[j].len;
8558 match_found = 1;
8559 break;
8560 }
8561 }
8562 if (!match_found)
8563 return 0;
8564
8565 if (*pstr++ != '}')
8566 {
8567 as_bad (_("Missing '}': '%s'"), imm_start);
8568 return 0;
8569 }
8570 /* RC/SAE immediate string should contain nothing more. */;
8571 if (*pstr != 0)
8572 {
8573 as_bad (_("Junk after '}': '%s'"), imm_start);
8574 return 0;
8575 }
8576
8577 exp = &im_expressions[i.imm_operands++];
8578 i.op[this_operand].imms = exp;
8579
8580 exp->X_op = O_constant;
8581 exp->X_add_number = 0;
8582 exp->X_add_symbol = (symbolS *) 0;
8583 exp->X_op_symbol = (symbolS *) 0;
8584
8585 i.types[this_operand].bitfield.imm8 = 1;
8586 return 1;
8587}
8588
8589/* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8590 on error. */
8591
8592static int
8593i386_att_operand (char *operand_string)
8594{
8595 const reg_entry *r;
8596 char *end_op;
8597 char *op_string = operand_string;
8598
8599 if (is_space_char (*op_string))
8600 ++op_string;
8601
8602 /* We check for an absolute prefix (differentiating,
8603 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8604 if (*op_string == ABSOLUTE_PREFIX)
8605 {
8606 ++op_string;
8607 if (is_space_char (*op_string))
8608 ++op_string;
8609 i.types[this_operand].bitfield.jumpabsolute = 1;
8610 }
8611
8612 /* Check if operand is a register. */
8613 if ((r = parse_register (op_string, &end_op)) != NULL)
8614 {
8615 i386_operand_type temp;
8616
8617 /* Check for a segment override by searching for ':' after a
8618 segment register. */
8619 op_string = end_op;
8620 if (is_space_char (*op_string))
8621 ++op_string;
8622 if (*op_string == ':'
8623 && (r->reg_type.bitfield.sreg2
8624 || r->reg_type.bitfield.sreg3))
8625 {
8626 switch (r->reg_num)
8627 {
8628 case 0:
8629 i.seg[i.mem_operands] = &es;
8630 break;
8631 case 1:
8632 i.seg[i.mem_operands] = &cs;
8633 break;
8634 case 2:
8635 i.seg[i.mem_operands] = &ss;
8636 break;
8637 case 3:
8638 i.seg[i.mem_operands] = &ds;
8639 break;
8640 case 4:
8641 i.seg[i.mem_operands] = &fs;
8642 break;
8643 case 5:
8644 i.seg[i.mem_operands] = &gs;
8645 break;
8646 }
8647
8648 /* Skip the ':' and whitespace. */
8649 ++op_string;
8650 if (is_space_char (*op_string))
8651 ++op_string;
8652
8653 if (!is_digit_char (*op_string)
8654 && !is_identifier_char (*op_string)
8655 && *op_string != '('
8656 && *op_string != ABSOLUTE_PREFIX)
8657 {
8658 as_bad (_("bad memory operand `%s'"), op_string);
8659 return 0;
8660 }
8661 /* Handle case of %es:*foo. */
8662 if (*op_string == ABSOLUTE_PREFIX)
8663 {
8664 ++op_string;
8665 if (is_space_char (*op_string))
8666 ++op_string;
8667 i.types[this_operand].bitfield.jumpabsolute = 1;
8668 }
8669 goto do_memory_reference;
8670 }
8671
8672 /* Handle vector operations. */
8673 if (*op_string == '{')
8674 {
8675 op_string = check_VecOperations (op_string, NULL);
8676 if (op_string == NULL)
8677 return 0;
8678 }
8679
8680 if (*op_string)
8681 {
8682 as_bad (_("junk `%s' after register"), op_string);
8683 return 0;
8684 }
8685 temp = r->reg_type;
8686 temp.bitfield.baseindex = 0;
8687 i.types[this_operand] = operand_type_or (i.types[this_operand],
8688 temp);
8689 i.types[this_operand].bitfield.unspecified = 0;
8690 i.op[this_operand].regs = r;
8691 i.reg_operands++;
8692 }
8693 else if (*op_string == REGISTER_PREFIX)
8694 {
8695 as_bad (_("bad register name `%s'"), op_string);
8696 return 0;
8697 }
8698 else if (*op_string == IMMEDIATE_PREFIX)
8699 {
8700 ++op_string;
8701 if (i.types[this_operand].bitfield.jumpabsolute)
8702 {
8703 as_bad (_("immediate operand illegal with absolute jump"));
8704 return 0;
8705 }
8706 if (!i386_immediate (op_string))
8707 return 0;
8708 }
8709 else if (RC_SAE_immediate (operand_string))
8710 {
8711 /* If it is a RC or SAE immediate, do nothing. */
8712 ;
8713 }
8714 else if (is_digit_char (*op_string)
8715 || is_identifier_char (*op_string)
8716 || *op_string == '"'
8717 || *op_string == '(')
8718 {
8719 /* This is a memory reference of some sort. */
8720 char *base_string;
8721
8722 /* Start and end of displacement string expression (if found). */
8723 char *displacement_string_start;
8724 char *displacement_string_end;
8725 char *vop_start;
8726
8727 do_memory_reference:
8728 if ((i.mem_operands == 1
8729 && !current_templates->start->opcode_modifier.isstring)
8730 || i.mem_operands == 2)
8731 {
8732 as_bad (_("too many memory references for `%s'"),
8733 current_templates->start->name);
8734 return 0;
8735 }
8736
8737 /* Check for base index form. We detect the base index form by
8738 looking for an ')' at the end of the operand, searching
8739 for the '(' matching it, and finding a REGISTER_PREFIX or ','
8740 after the '('. */
8741 base_string = op_string + strlen (op_string);
8742
8743 /* Handle vector operations. */
8744 vop_start = strchr (op_string, '{');
8745 if (vop_start && vop_start < base_string)
8746 {
8747 if (check_VecOperations (vop_start, base_string) == NULL)
8748 return 0;
8749 base_string = vop_start;
8750 }
8751
8752 --base_string;
8753 if (is_space_char (*base_string))
8754 --base_string;
8755
8756 /* If we only have a displacement, set-up for it to be parsed later. */
8757 displacement_string_start = op_string;
8758 displacement_string_end = base_string + 1;
8759
8760 if (*base_string == ')')
8761 {
8762 char *temp_string;
8763 unsigned int parens_balanced = 1;
8764 /* We've already checked that the number of left & right ()'s are
8765 equal, so this loop will not be infinite. */
8766 do
8767 {
8768 base_string--;
8769 if (*base_string == ')')
8770 parens_balanced++;
8771 if (*base_string == '(')
8772 parens_balanced--;
8773 }
8774 while (parens_balanced);
8775
8776 temp_string = base_string;
8777
8778 /* Skip past '(' and whitespace. */
8779 ++base_string;
8780 if (is_space_char (*base_string))
8781 ++base_string;
8782
8783 if (*base_string == ','
8784 || ((i.base_reg = parse_register (base_string, &end_op))
8785 != NULL))
8786 {
8787 displacement_string_end = temp_string;
8788
8789 i.types[this_operand].bitfield.baseindex = 1;
8790
8791 if (i.base_reg)
8792 {
8793 base_string = end_op;
8794 if (is_space_char (*base_string))
8795 ++base_string;
8796 }
8797
8798 /* There may be an index reg or scale factor here. */
8799 if (*base_string == ',')
8800 {
8801 ++base_string;
8802 if (is_space_char (*base_string))
8803 ++base_string;
8804
8805 if ((i.index_reg = parse_register (base_string, &end_op))
8806 != NULL)
8807 {
8808 base_string = end_op;
8809 if (is_space_char (*base_string))
8810 ++base_string;
8811 if (*base_string == ',')
8812 {
8813 ++base_string;
8814 if (is_space_char (*base_string))
8815 ++base_string;
8816 }
8817 else if (*base_string != ')')
8818 {
8819 as_bad (_("expecting `,' or `)' "
8820 "after index register in `%s'"),
8821 operand_string);
8822 return 0;
8823 }
8824 }
8825 else if (*base_string == REGISTER_PREFIX)
8826 {
8827 end_op = strchr (base_string, ',');
8828 if (end_op)
8829 *end_op = '\0';
8830 as_bad (_("bad register name `%s'"), base_string);
8831 return 0;
8832 }
8833
8834 /* Check for scale factor. */
8835 if (*base_string != ')')
8836 {
8837 char *end_scale = i386_scale (base_string);
8838
8839 if (!end_scale)
8840 return 0;
8841
8842 base_string = end_scale;
8843 if (is_space_char (*base_string))
8844 ++base_string;
8845 if (*base_string != ')')
8846 {
8847 as_bad (_("expecting `)' "
8848 "after scale factor in `%s'"),
8849 operand_string);
8850 return 0;
8851 }
8852 }
8853 else if (!i.index_reg)
8854 {
8855 as_bad (_("expecting index register or scale factor "
8856 "after `,'; got '%c'"),
8857 *base_string);
8858 return 0;
8859 }
8860 }
8861 else if (*base_string != ')')
8862 {
8863 as_bad (_("expecting `,' or `)' "
8864 "after base register in `%s'"),
8865 operand_string);
8866 return 0;
8867 }
8868 }
8869 else if (*base_string == REGISTER_PREFIX)
8870 {
8871 end_op = strchr (base_string, ',');
8872 if (end_op)
8873 *end_op = '\0';
8874 as_bad (_("bad register name `%s'"), base_string);
8875 return 0;
8876 }
8877 }
8878
8879 /* If there's an expression beginning the operand, parse it,
8880 assuming displacement_string_start and
8881 displacement_string_end are meaningful. */
8882 if (displacement_string_start != displacement_string_end)
8883 {
8884 if (!i386_displacement (displacement_string_start,
8885 displacement_string_end))
8886 return 0;
8887 }
8888
8889 /* Special case for (%dx) while doing input/output op. */
8890 if (i.base_reg
8891 && operand_type_equal (&i.base_reg->reg_type,
8892 &reg16_inoutportreg)
8893 && i.index_reg == 0
8894 && i.log2_scale_factor == 0
8895 && i.seg[i.mem_operands] == 0
8896 && !operand_type_check (i.types[this_operand], disp))
8897 {
8898 i.types[this_operand] = inoutportreg;
8899 return 1;
8900 }
8901
8902 if (i386_index_check (operand_string) == 0)
8903 return 0;
8904 i.types[this_operand].bitfield.mem = 1;
8905 i.mem_operands++;
8906 }
8907 else
8908 {
8909 /* It's not a memory operand; argh! */
8910 as_bad (_("invalid char %s beginning operand %d `%s'"),
8911 output_invalid (*op_string),
8912 this_operand + 1,
8913 op_string);
8914 return 0;
8915 }
8916 return 1; /* Normal return. */
8917}
8918\f
8919/* Calculate the maximum variable size (i.e., excluding fr_fix)
8920 that an rs_machine_dependent frag may reach. */
8921
8922unsigned int
8923i386_frag_max_var (fragS *frag)
8924{
8925 /* The only relaxable frags are for jumps.
8926 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
8927 gas_assert (frag->fr_type == rs_machine_dependent);
8928 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
8929}
8930
8931#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8932static int
8933elf_symbol_resolved_in_segment_p (symbolS *fr_symbol, offsetT fr_var)
8934{
8935 /* STT_GNU_IFUNC symbol must go through PLT. */
8936 if ((symbol_get_bfdsym (fr_symbol)->flags
8937 & BSF_GNU_INDIRECT_FUNCTION) != 0)
8938 return 0;
8939
8940 if (!S_IS_EXTERNAL (fr_symbol))
8941 /* Symbol may be weak or local. */
8942 return !S_IS_WEAK (fr_symbol);
8943
8944 /* Global symbols with non-default visibility can't be preempted. */
8945 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol)) != STV_DEFAULT)
8946 return 1;
8947
8948 if (fr_var != NO_RELOC)
8949 switch ((enum bfd_reloc_code_real) fr_var)
8950 {
8951 case BFD_RELOC_386_PLT32:
8952 case BFD_RELOC_X86_64_PLT32:
8953 /* Symbol with PLT relocatin may be preempted. */
8954 return 0;
8955 default:
8956 abort ();
8957 }
8958
8959 /* Global symbols with default visibility in a shared library may be
8960 preempted by another definition. */
8961 return !shared;
8962}
8963#endif
8964
8965/* md_estimate_size_before_relax()
8966
8967 Called just before relax() for rs_machine_dependent frags. The x86
8968 assembler uses these frags to handle variable size jump
8969 instructions.
8970
8971 Any symbol that is now undefined will not become defined.
8972 Return the correct fr_subtype in the frag.
8973 Return the initial "guess for variable size of frag" to caller.
8974 The guess is actually the growth beyond the fixed part. Whatever
8975 we do to grow the fixed or variable part contributes to our
8976 returned value. */
8977
8978int
8979md_estimate_size_before_relax (fragS *fragP, segT segment)
8980{
8981 /* We've already got fragP->fr_subtype right; all we have to do is
8982 check for un-relaxable symbols. On an ELF system, we can't relax
8983 an externally visible symbol, because it may be overridden by a
8984 shared library. */
8985 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
8986#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8987 || (IS_ELF
8988 && !elf_symbol_resolved_in_segment_p (fragP->fr_symbol,
8989 fragP->fr_var))
8990#endif
8991#if defined (OBJ_COFF) && defined (TE_PE)
8992 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
8993 && S_IS_WEAK (fragP->fr_symbol))
8994#endif
8995 )
8996 {
8997 /* Symbol is undefined in this segment, or we need to keep a
8998 reloc so that weak symbols can be overridden. */
8999 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
9000 enum bfd_reloc_code_real reloc_type;
9001 unsigned char *opcode;
9002 int old_fr_fix;
9003
9004 if (fragP->fr_var != NO_RELOC)
9005 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
9006 else if (size == 2)
9007 reloc_type = BFD_RELOC_16_PCREL;
9008 else
9009 reloc_type = BFD_RELOC_32_PCREL;
9010
9011 old_fr_fix = fragP->fr_fix;
9012 opcode = (unsigned char *) fragP->fr_opcode;
9013
9014 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
9015 {
9016 case UNCOND_JUMP:
9017 /* Make jmp (0xeb) a (d)word displacement jump. */
9018 opcode[0] = 0xe9;
9019 fragP->fr_fix += size;
9020 fix_new (fragP, old_fr_fix, size,
9021 fragP->fr_symbol,
9022 fragP->fr_offset, 1,
9023 reloc_type);
9024 break;
9025
9026 case COND_JUMP86:
9027 if (size == 2
9028 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
9029 {
9030 /* Negate the condition, and branch past an
9031 unconditional jump. */
9032 opcode[0] ^= 1;
9033 opcode[1] = 3;
9034 /* Insert an unconditional jump. */
9035 opcode[2] = 0xe9;
9036 /* We added two extra opcode bytes, and have a two byte
9037 offset. */
9038 fragP->fr_fix += 2 + 2;
9039 fix_new (fragP, old_fr_fix + 2, 2,
9040 fragP->fr_symbol,
9041 fragP->fr_offset, 1,
9042 reloc_type);
9043 break;
9044 }
9045 /* Fall through. */
9046
9047 case COND_JUMP:
9048 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
9049 {
9050 fixS *fixP;
9051
9052 fragP->fr_fix += 1;
9053 fixP = fix_new (fragP, old_fr_fix, 1,
9054 fragP->fr_symbol,
9055 fragP->fr_offset, 1,
9056 BFD_RELOC_8_PCREL);
9057 fixP->fx_signed = 1;
9058 break;
9059 }
9060
9061 /* This changes the byte-displacement jump 0x7N
9062 to the (d)word-displacement jump 0x0f,0x8N. */
9063 opcode[1] = opcode[0] + 0x10;
9064 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9065 /* We've added an opcode byte. */
9066 fragP->fr_fix += 1 + size;
9067 fix_new (fragP, old_fr_fix + 1, size,
9068 fragP->fr_symbol,
9069 fragP->fr_offset, 1,
9070 reloc_type);
9071 break;
9072
9073 default:
9074 BAD_CASE (fragP->fr_subtype);
9075 break;
9076 }
9077 frag_wane (fragP);
9078 return fragP->fr_fix - old_fr_fix;
9079 }
9080
9081 /* Guess size depending on current relax state. Initially the relax
9082 state will correspond to a short jump and we return 1, because
9083 the variable part of the frag (the branch offset) is one byte
9084 long. However, we can relax a section more than once and in that
9085 case we must either set fr_subtype back to the unrelaxed state,
9086 or return the value for the appropriate branch. */
9087 return md_relax_table[fragP->fr_subtype].rlx_length;
9088}
9089
9090/* Called after relax() is finished.
9091
9092 In: Address of frag.
9093 fr_type == rs_machine_dependent.
9094 fr_subtype is what the address relaxed to.
9095
9096 Out: Any fixSs and constants are set up.
9097 Caller will turn frag into a ".space 0". */
9098
9099void
9100md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
9101 fragS *fragP)
9102{
9103 unsigned char *opcode;
9104 unsigned char *where_to_put_displacement = NULL;
9105 offsetT target_address;
9106 offsetT opcode_address;
9107 unsigned int extension = 0;
9108 offsetT displacement_from_opcode_start;
9109
9110 opcode = (unsigned char *) fragP->fr_opcode;
9111
9112 /* Address we want to reach in file space. */
9113 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
9114
9115 /* Address opcode resides at in file space. */
9116 opcode_address = fragP->fr_address + fragP->fr_fix;
9117
9118 /* Displacement from opcode start to fill into instruction. */
9119 displacement_from_opcode_start = target_address - opcode_address;
9120
9121 if ((fragP->fr_subtype & BIG) == 0)
9122 {
9123 /* Don't have to change opcode. */
9124 extension = 1; /* 1 opcode + 1 displacement */
9125 where_to_put_displacement = &opcode[1];
9126 }
9127 else
9128 {
9129 if (no_cond_jump_promotion
9130 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
9131 as_warn_where (fragP->fr_file, fragP->fr_line,
9132 _("long jump required"));
9133
9134 switch (fragP->fr_subtype)
9135 {
9136 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
9137 extension = 4; /* 1 opcode + 4 displacement */
9138 opcode[0] = 0xe9;
9139 where_to_put_displacement = &opcode[1];
9140 break;
9141
9142 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
9143 extension = 2; /* 1 opcode + 2 displacement */
9144 opcode[0] = 0xe9;
9145 where_to_put_displacement = &opcode[1];
9146 break;
9147
9148 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
9149 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
9150 extension = 5; /* 2 opcode + 4 displacement */
9151 opcode[1] = opcode[0] + 0x10;
9152 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9153 where_to_put_displacement = &opcode[2];
9154 break;
9155
9156 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
9157 extension = 3; /* 2 opcode + 2 displacement */
9158 opcode[1] = opcode[0] + 0x10;
9159 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9160 where_to_put_displacement = &opcode[2];
9161 break;
9162
9163 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
9164 extension = 4;
9165 opcode[0] ^= 1;
9166 opcode[1] = 3;
9167 opcode[2] = 0xe9;
9168 where_to_put_displacement = &opcode[3];
9169 break;
9170
9171 default:
9172 BAD_CASE (fragP->fr_subtype);
9173 break;
9174 }
9175 }
9176
9177 /* If size if less then four we are sure that the operand fits,
9178 but if it's 4, then it could be that the displacement is larger
9179 then -/+ 2GB. */
9180 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
9181 && object_64bit
9182 && ((addressT) (displacement_from_opcode_start - extension
9183 + ((addressT) 1 << 31))
9184 > (((addressT) 2 << 31) - 1)))
9185 {
9186 as_bad_where (fragP->fr_file, fragP->fr_line,
9187 _("jump target out of range"));
9188 /* Make us emit 0. */
9189 displacement_from_opcode_start = extension;
9190 }
9191 /* Now put displacement after opcode. */
9192 md_number_to_chars ((char *) where_to_put_displacement,
9193 (valueT) (displacement_from_opcode_start - extension),
9194 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
9195 fragP->fr_fix += extension;
9196}
9197\f
9198/* Apply a fixup (fixP) to segment data, once it has been determined
9199 by our caller that we have all the info we need to fix it up.
9200
9201 Parameter valP is the pointer to the value of the bits.
9202
9203 On the 386, immediates, displacements, and data pointers are all in
9204 the same (little-endian) format, so we don't need to care about which
9205 we are handling. */
9206
9207void
9208md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
9209{
9210 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
9211 valueT value = *valP;
9212
9213#if !defined (TE_Mach)
9214 if (fixP->fx_pcrel)
9215 {
9216 switch (fixP->fx_r_type)
9217 {
9218 default:
9219 break;
9220
9221 case BFD_RELOC_64:
9222 fixP->fx_r_type = BFD_RELOC_64_PCREL;
9223 break;
9224 case BFD_RELOC_32:
9225 case BFD_RELOC_X86_64_32S:
9226 fixP->fx_r_type = BFD_RELOC_32_PCREL;
9227 break;
9228 case BFD_RELOC_16:
9229 fixP->fx_r_type = BFD_RELOC_16_PCREL;
9230 break;
9231 case BFD_RELOC_8:
9232 fixP->fx_r_type = BFD_RELOC_8_PCREL;
9233 break;
9234 }
9235 }
9236
9237 if (fixP->fx_addsy != NULL
9238 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
9239 || fixP->fx_r_type == BFD_RELOC_64_PCREL
9240 || fixP->fx_r_type == BFD_RELOC_16_PCREL
9241 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
9242 && !use_rela_relocations)
9243 {
9244 /* This is a hack. There should be a better way to handle this.
9245 This covers for the fact that bfd_install_relocation will
9246 subtract the current location (for partial_inplace, PC relative
9247 relocations); see more below. */
9248#ifndef OBJ_AOUT
9249 if (IS_ELF
9250#ifdef TE_PE
9251 || OUTPUT_FLAVOR == bfd_target_coff_flavour
9252#endif
9253 )
9254 value += fixP->fx_where + fixP->fx_frag->fr_address;
9255#endif
9256#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9257 if (IS_ELF)
9258 {
9259 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
9260
9261 if ((sym_seg == seg
9262 || (symbol_section_p (fixP->fx_addsy)
9263 && sym_seg != absolute_section))
9264 && !generic_force_reloc (fixP))
9265 {
9266 /* Yes, we add the values in twice. This is because
9267 bfd_install_relocation subtracts them out again. I think
9268 bfd_install_relocation is broken, but I don't dare change
9269 it. FIXME. */
9270 value += fixP->fx_where + fixP->fx_frag->fr_address;
9271 }
9272 }
9273#endif
9274#if defined (OBJ_COFF) && defined (TE_PE)
9275 /* For some reason, the PE format does not store a
9276 section address offset for a PC relative symbol. */
9277 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
9278 || S_IS_WEAK (fixP->fx_addsy))
9279 value += md_pcrel_from (fixP);
9280#endif
9281 }
9282#if defined (OBJ_COFF) && defined (TE_PE)
9283 if (fixP->fx_addsy != NULL
9284 && S_IS_WEAK (fixP->fx_addsy)
9285 /* PR 16858: Do not modify weak function references. */
9286 && ! fixP->fx_pcrel)
9287 {
9288#if !defined (TE_PEP)
9289 /* For x86 PE weak function symbols are neither PC-relative
9290 nor do they set S_IS_FUNCTION. So the only reliable way
9291 to detect them is to check the flags of their containing
9292 section. */
9293 if (S_GET_SEGMENT (fixP->fx_addsy) != NULL
9294 && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE)
9295 ;
9296 else
9297#endif
9298 value -= S_GET_VALUE (fixP->fx_addsy);
9299 }
9300#endif
9301
9302 /* Fix a few things - the dynamic linker expects certain values here,
9303 and we must not disappoint it. */
9304#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9305 if (IS_ELF && fixP->fx_addsy)
9306 switch (fixP->fx_r_type)
9307 {
9308 case BFD_RELOC_386_PLT32:
9309 case BFD_RELOC_X86_64_PLT32:
9310 /* Make the jump instruction point to the address of the operand. At
9311 runtime we merely add the offset to the actual PLT entry. */
9312 value = -4;
9313 break;
9314
9315 case BFD_RELOC_386_TLS_GD:
9316 case BFD_RELOC_386_TLS_LDM:
9317 case BFD_RELOC_386_TLS_IE_32:
9318 case BFD_RELOC_386_TLS_IE:
9319 case BFD_RELOC_386_TLS_GOTIE:
9320 case BFD_RELOC_386_TLS_GOTDESC:
9321 case BFD_RELOC_X86_64_TLSGD:
9322 case BFD_RELOC_X86_64_TLSLD:
9323 case BFD_RELOC_X86_64_GOTTPOFF:
9324 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9325 value = 0; /* Fully resolved at runtime. No addend. */
9326 /* Fallthrough */
9327 case BFD_RELOC_386_TLS_LE:
9328 case BFD_RELOC_386_TLS_LDO_32:
9329 case BFD_RELOC_386_TLS_LE_32:
9330 case BFD_RELOC_X86_64_DTPOFF32:
9331 case BFD_RELOC_X86_64_DTPOFF64:
9332 case BFD_RELOC_X86_64_TPOFF32:
9333 case BFD_RELOC_X86_64_TPOFF64:
9334 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9335 break;
9336
9337 case BFD_RELOC_386_TLS_DESC_CALL:
9338 case BFD_RELOC_X86_64_TLSDESC_CALL:
9339 value = 0; /* Fully resolved at runtime. No addend. */
9340 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9341 fixP->fx_done = 0;
9342 return;
9343
9344 case BFD_RELOC_VTABLE_INHERIT:
9345 case BFD_RELOC_VTABLE_ENTRY:
9346 fixP->fx_done = 0;
9347 return;
9348
9349 default:
9350 break;
9351 }
9352#endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9353 *valP = value;
9354#endif /* !defined (TE_Mach) */
9355
9356 /* Are we finished with this relocation now? */
9357 if (fixP->fx_addsy == NULL)
9358 fixP->fx_done = 1;
9359#if defined (OBJ_COFF) && defined (TE_PE)
9360 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
9361 {
9362 fixP->fx_done = 0;
9363 /* Remember value for tc_gen_reloc. */
9364 fixP->fx_addnumber = value;
9365 /* Clear out the frag for now. */
9366 value = 0;
9367 }
9368#endif
9369 else if (use_rela_relocations)
9370 {
9371 fixP->fx_no_overflow = 1;
9372 /* Remember value for tc_gen_reloc. */
9373 fixP->fx_addnumber = value;
9374 value = 0;
9375 }
9376
9377 md_number_to_chars (p, value, fixP->fx_size);
9378}
9379\f
9380const char *
9381md_atof (int type, char *litP, int *sizeP)
9382{
9383 /* This outputs the LITTLENUMs in REVERSE order;
9384 in accord with the bigendian 386. */
9385 return ieee_md_atof (type, litP, sizeP, FALSE);
9386}
9387\f
9388static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
9389
9390static char *
9391output_invalid (int c)
9392{
9393 if (ISPRINT (c))
9394 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9395 "'%c'", c);
9396 else
9397 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9398 "(0x%x)", (unsigned char) c);
9399 return output_invalid_buf;
9400}
9401
9402/* REG_STRING starts *before* REGISTER_PREFIX. */
9403
9404static const reg_entry *
9405parse_real_register (char *reg_string, char **end_op)
9406{
9407 char *s = reg_string;
9408 char *p;
9409 char reg_name_given[MAX_REG_NAME_SIZE + 1];
9410 const reg_entry *r;
9411
9412 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9413 if (*s == REGISTER_PREFIX)
9414 ++s;
9415
9416 if (is_space_char (*s))
9417 ++s;
9418
9419 p = reg_name_given;
9420 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
9421 {
9422 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
9423 return (const reg_entry *) NULL;
9424 s++;
9425 }
9426
9427 /* For naked regs, make sure that we are not dealing with an identifier.
9428 This prevents confusing an identifier like `eax_var' with register
9429 `eax'. */
9430 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
9431 return (const reg_entry *) NULL;
9432
9433 *end_op = s;
9434
9435 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
9436
9437 /* Handle floating point regs, allowing spaces in the (i) part. */
9438 if (r == i386_regtab /* %st is first entry of table */)
9439 {
9440 if (is_space_char (*s))
9441 ++s;
9442 if (*s == '(')
9443 {
9444 ++s;
9445 if (is_space_char (*s))
9446 ++s;
9447 if (*s >= '0' && *s <= '7')
9448 {
9449 int fpr = *s - '0';
9450 ++s;
9451 if (is_space_char (*s))
9452 ++s;
9453 if (*s == ')')
9454 {
9455 *end_op = s + 1;
9456 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
9457 know (r);
9458 return r + fpr;
9459 }
9460 }
9461 /* We have "%st(" then garbage. */
9462 return (const reg_entry *) NULL;
9463 }
9464 }
9465
9466 if (r == NULL || allow_pseudo_reg)
9467 return r;
9468
9469 if (operand_type_all_zero (&r->reg_type))
9470 return (const reg_entry *) NULL;
9471
9472 if ((r->reg_type.bitfield.reg32
9473 || r->reg_type.bitfield.sreg3
9474 || r->reg_type.bitfield.control
9475 || r->reg_type.bitfield.debug
9476 || r->reg_type.bitfield.test)
9477 && !cpu_arch_flags.bitfield.cpui386)
9478 return (const reg_entry *) NULL;
9479
9480 if (r->reg_type.bitfield.floatreg
9481 && !cpu_arch_flags.bitfield.cpu8087
9482 && !cpu_arch_flags.bitfield.cpu287
9483 && !cpu_arch_flags.bitfield.cpu387)
9484 return (const reg_entry *) NULL;
9485
9486 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
9487 return (const reg_entry *) NULL;
9488
9489 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
9490 return (const reg_entry *) NULL;
9491
9492 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
9493 return (const reg_entry *) NULL;
9494
9495 if ((r->reg_type.bitfield.regzmm || r->reg_type.bitfield.regmask)
9496 && !cpu_arch_flags.bitfield.cpuavx512f)
9497 return (const reg_entry *) NULL;
9498
9499 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9500 if (!allow_index_reg
9501 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
9502 return (const reg_entry *) NULL;
9503
9504 /* Upper 16 vector register is only available with VREX in 64bit
9505 mode. */
9506 if ((r->reg_flags & RegVRex))
9507 {
9508 if (!cpu_arch_flags.bitfield.cpuvrex
9509 || flag_code != CODE_64BIT)
9510 return (const reg_entry *) NULL;
9511
9512 i.need_vrex = 1;
9513 }
9514
9515 if (((r->reg_flags & (RegRex64 | RegRex))
9516 || r->reg_type.bitfield.reg64)
9517 && (!cpu_arch_flags.bitfield.cpulm
9518 || !operand_type_equal (&r->reg_type, &control))
9519 && flag_code != CODE_64BIT)
9520 return (const reg_entry *) NULL;
9521
9522 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
9523 return (const reg_entry *) NULL;
9524
9525 return r;
9526}
9527
9528/* REG_STRING starts *before* REGISTER_PREFIX. */
9529
9530static const reg_entry *
9531parse_register (char *reg_string, char **end_op)
9532{
9533 const reg_entry *r;
9534
9535 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
9536 r = parse_real_register (reg_string, end_op);
9537 else
9538 r = NULL;
9539 if (!r)
9540 {
9541 char *save = input_line_pointer;
9542 char c;
9543 symbolS *symbolP;
9544
9545 input_line_pointer = reg_string;
9546 c = get_symbol_name (&reg_string);
9547 symbolP = symbol_find (reg_string);
9548 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
9549 {
9550 const expressionS *e = symbol_get_value_expression (symbolP);
9551
9552 know (e->X_op == O_register);
9553 know (e->X_add_number >= 0
9554 && (valueT) e->X_add_number < i386_regtab_size);
9555 r = i386_regtab + e->X_add_number;
9556 if ((r->reg_flags & RegVRex))
9557 i.need_vrex = 1;
9558 *end_op = input_line_pointer;
9559 }
9560 *input_line_pointer = c;
9561 input_line_pointer = save;
9562 }
9563 return r;
9564}
9565
9566int
9567i386_parse_name (char *name, expressionS *e, char *nextcharP)
9568{
9569 const reg_entry *r;
9570 char *end = input_line_pointer;
9571
9572 *end = *nextcharP;
9573 r = parse_register (name, &input_line_pointer);
9574 if (r && end <= input_line_pointer)
9575 {
9576 *nextcharP = *input_line_pointer;
9577 *input_line_pointer = 0;
9578 e->X_op = O_register;
9579 e->X_add_number = r - i386_regtab;
9580 return 1;
9581 }
9582 input_line_pointer = end;
9583 *end = 0;
9584 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
9585}
9586
9587void
9588md_operand (expressionS *e)
9589{
9590 char *end;
9591 const reg_entry *r;
9592
9593 switch (*input_line_pointer)
9594 {
9595 case REGISTER_PREFIX:
9596 r = parse_real_register (input_line_pointer, &end);
9597 if (r)
9598 {
9599 e->X_op = O_register;
9600 e->X_add_number = r - i386_regtab;
9601 input_line_pointer = end;
9602 }
9603 break;
9604
9605 case '[':
9606 gas_assert (intel_syntax);
9607 end = input_line_pointer++;
9608 expression (e);
9609 if (*input_line_pointer == ']')
9610 {
9611 ++input_line_pointer;
9612 e->X_op_symbol = make_expr_symbol (e);
9613 e->X_add_symbol = NULL;
9614 e->X_add_number = 0;
9615 e->X_op = O_index;
9616 }
9617 else
9618 {
9619 e->X_op = O_absent;
9620 input_line_pointer = end;
9621 }
9622 break;
9623 }
9624}
9625
9626\f
9627#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9628const char *md_shortopts = "kVQ:sqn";
9629#else
9630const char *md_shortopts = "qn";
9631#endif
9632
9633#define OPTION_32 (OPTION_MD_BASE + 0)
9634#define OPTION_64 (OPTION_MD_BASE + 1)
9635#define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9636#define OPTION_MARCH (OPTION_MD_BASE + 3)
9637#define OPTION_MTUNE (OPTION_MD_BASE + 4)
9638#define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9639#define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9640#define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9641#define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9642#define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9643#define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9644#define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9645#define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9646#define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9647#define OPTION_X32 (OPTION_MD_BASE + 14)
9648#define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9649#define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9650#define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9651#define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
9652#define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
9653#define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
9654#define OPTION_MSHARED (OPTION_MD_BASE + 21)
9655#define OPTION_MAMD64 (OPTION_MD_BASE + 22)
9656#define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
9657#define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
9658#define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 25)
9659
9660struct option md_longopts[] =
9661{
9662 {"32", no_argument, NULL, OPTION_32},
9663#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9664 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9665 {"64", no_argument, NULL, OPTION_64},
9666#endif
9667#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9668 {"x32", no_argument, NULL, OPTION_X32},
9669 {"mshared", no_argument, NULL, OPTION_MSHARED},
9670#endif
9671 {"divide", no_argument, NULL, OPTION_DIVIDE},
9672 {"march", required_argument, NULL, OPTION_MARCH},
9673 {"mtune", required_argument, NULL, OPTION_MTUNE},
9674 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
9675 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
9676 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
9677 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
9678 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
9679 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
9680 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
9681 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
9682 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
9683 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
9684 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
9685 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
9686# if defined (TE_PE) || defined (TE_PEP)
9687 {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ},
9688#endif
9689 {"momit-lock-prefix", required_argument, NULL, OPTION_MOMIT_LOCK_PREFIX},
9690 {"mfence-as-lock-add", required_argument, NULL, OPTION_MFENCE_AS_LOCK_ADD},
9691 {"mrelax-relocations", required_argument, NULL, OPTION_MRELAX_RELOCATIONS},
9692 {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG},
9693 {"mamd64", no_argument, NULL, OPTION_MAMD64},
9694 {"mintel64", no_argument, NULL, OPTION_MINTEL64},
9695 {NULL, no_argument, NULL, 0}
9696};
9697size_t md_longopts_size = sizeof (md_longopts);
9698
9699int
9700md_parse_option (int c, const char *arg)
9701{
9702 unsigned int j;
9703 char *arch, *next;
9704
9705 switch (c)
9706 {
9707 case 'n':
9708 optimize_align_code = 0;
9709 break;
9710
9711 case 'q':
9712 quiet_warnings = 1;
9713 break;
9714
9715#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9716 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
9717 should be emitted or not. FIXME: Not implemented. */
9718 case 'Q':
9719 break;
9720
9721 /* -V: SVR4 argument to print version ID. */
9722 case 'V':
9723 print_version_id ();
9724 break;
9725
9726 /* -k: Ignore for FreeBSD compatibility. */
9727 case 'k':
9728 break;
9729
9730 case 's':
9731 /* -s: On i386 Solaris, this tells the native assembler to use
9732 .stab instead of .stab.excl. We always use .stab anyhow. */
9733 break;
9734
9735 case OPTION_MSHARED:
9736 shared = 1;
9737 break;
9738#endif
9739#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9740 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9741 case OPTION_64:
9742 {
9743 const char **list, **l;
9744
9745 list = bfd_target_list ();
9746 for (l = list; *l != NULL; l++)
9747 if (CONST_STRNEQ (*l, "elf64-x86-64")
9748 || strcmp (*l, "coff-x86-64") == 0
9749 || strcmp (*l, "pe-x86-64") == 0
9750 || strcmp (*l, "pei-x86-64") == 0
9751 || strcmp (*l, "mach-o-x86-64") == 0)
9752 {
9753 default_arch = "x86_64";
9754 break;
9755 }
9756 if (*l == NULL)
9757 as_fatal (_("no compiled in support for x86_64"));
9758 free (list);
9759 }
9760 break;
9761#endif
9762
9763#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9764 case OPTION_X32:
9765 if (IS_ELF)
9766 {
9767 const char **list, **l;
9768
9769 list = bfd_target_list ();
9770 for (l = list; *l != NULL; l++)
9771 if (CONST_STRNEQ (*l, "elf32-x86-64"))
9772 {
9773 default_arch = "x86_64:32";
9774 break;
9775 }
9776 if (*l == NULL)
9777 as_fatal (_("no compiled in support for 32bit x86_64"));
9778 free (list);
9779 }
9780 else
9781 as_fatal (_("32bit x86_64 is only supported for ELF"));
9782 break;
9783#endif
9784
9785 case OPTION_32:
9786 default_arch = "i386";
9787 break;
9788
9789 case OPTION_DIVIDE:
9790#ifdef SVR4_COMMENT_CHARS
9791 {
9792 char *n, *t;
9793 const char *s;
9794
9795 n = XNEWVEC (char, strlen (i386_comment_chars) + 1);
9796 t = n;
9797 for (s = i386_comment_chars; *s != '\0'; s++)
9798 if (*s != '/')
9799 *t++ = *s;
9800 *t = '\0';
9801 i386_comment_chars = n;
9802 }
9803#endif
9804 break;
9805
9806 case OPTION_MARCH:
9807 arch = xstrdup (arg);
9808 do
9809 {
9810 if (*arch == '.')
9811 as_fatal (_("invalid -march= option: `%s'"), arg);
9812 next = strchr (arch, '+');
9813 if (next)
9814 *next++ = '\0';
9815 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9816 {
9817 if (strcmp (arch, cpu_arch [j].name) == 0)
9818 {
9819 /* Processor. */
9820 if (! cpu_arch[j].flags.bitfield.cpui386)
9821 continue;
9822
9823 cpu_arch_name = cpu_arch[j].name;
9824 cpu_sub_arch_name = NULL;
9825 cpu_arch_flags = cpu_arch[j].flags;
9826 cpu_arch_isa = cpu_arch[j].type;
9827 cpu_arch_isa_flags = cpu_arch[j].flags;
9828 if (!cpu_arch_tune_set)
9829 {
9830 cpu_arch_tune = cpu_arch_isa;
9831 cpu_arch_tune_flags = cpu_arch_isa_flags;
9832 }
9833 break;
9834 }
9835 else if (*cpu_arch [j].name == '.'
9836 && strcmp (arch, cpu_arch [j].name + 1) == 0)
9837 {
9838 /* ISA entension. */
9839 i386_cpu_flags flags;
9840
9841 if (!cpu_arch[j].negated)
9842 flags = cpu_flags_or (cpu_arch_flags,
9843 cpu_arch[j].flags);
9844 else
9845 flags = cpu_flags_and_not (cpu_arch_flags,
9846 cpu_arch[j].flags);
9847
9848 if (!valid_iamcu_cpu_flags (&flags))
9849 as_fatal (_("`%s' isn't valid for Intel MCU"), arch);
9850 else if (!cpu_flags_equal (&flags, &cpu_arch_flags))
9851 {
9852 if (cpu_sub_arch_name)
9853 {
9854 char *name = cpu_sub_arch_name;
9855 cpu_sub_arch_name = concat (name,
9856 cpu_arch[j].name,
9857 (const char *) NULL);
9858 free (name);
9859 }
9860 else
9861 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
9862 cpu_arch_flags = flags;
9863 cpu_arch_isa_flags = flags;
9864 }
9865 break;
9866 }
9867 }
9868
9869 if (j >= ARRAY_SIZE (cpu_arch))
9870 as_fatal (_("invalid -march= option: `%s'"), arg);
9871
9872 arch = next;
9873 }
9874 while (next != NULL );
9875 break;
9876
9877 case OPTION_MTUNE:
9878 if (*arg == '.')
9879 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9880 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9881 {
9882 if (strcmp (arg, cpu_arch [j].name) == 0)
9883 {
9884 cpu_arch_tune_set = 1;
9885 cpu_arch_tune = cpu_arch [j].type;
9886 cpu_arch_tune_flags = cpu_arch[j].flags;
9887 break;
9888 }
9889 }
9890 if (j >= ARRAY_SIZE (cpu_arch))
9891 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9892 break;
9893
9894 case OPTION_MMNEMONIC:
9895 if (strcasecmp (arg, "att") == 0)
9896 intel_mnemonic = 0;
9897 else if (strcasecmp (arg, "intel") == 0)
9898 intel_mnemonic = 1;
9899 else
9900 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
9901 break;
9902
9903 case OPTION_MSYNTAX:
9904 if (strcasecmp (arg, "att") == 0)
9905 intel_syntax = 0;
9906 else if (strcasecmp (arg, "intel") == 0)
9907 intel_syntax = 1;
9908 else
9909 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
9910 break;
9911
9912 case OPTION_MINDEX_REG:
9913 allow_index_reg = 1;
9914 break;
9915
9916 case OPTION_MNAKED_REG:
9917 allow_naked_reg = 1;
9918 break;
9919
9920 case OPTION_MOLD_GCC:
9921 old_gcc = 1;
9922 break;
9923
9924 case OPTION_MSSE2AVX:
9925 sse2avx = 1;
9926 break;
9927
9928 case OPTION_MSSE_CHECK:
9929 if (strcasecmp (arg, "error") == 0)
9930 sse_check = check_error;
9931 else if (strcasecmp (arg, "warning") == 0)
9932 sse_check = check_warning;
9933 else if (strcasecmp (arg, "none") == 0)
9934 sse_check = check_none;
9935 else
9936 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
9937 break;
9938
9939 case OPTION_MOPERAND_CHECK:
9940 if (strcasecmp (arg, "error") == 0)
9941 operand_check = check_error;
9942 else if (strcasecmp (arg, "warning") == 0)
9943 operand_check = check_warning;
9944 else if (strcasecmp (arg, "none") == 0)
9945 operand_check = check_none;
9946 else
9947 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
9948 break;
9949
9950 case OPTION_MAVXSCALAR:
9951 if (strcasecmp (arg, "128") == 0)
9952 avxscalar = vex128;
9953 else if (strcasecmp (arg, "256") == 0)
9954 avxscalar = vex256;
9955 else
9956 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
9957 break;
9958
9959 case OPTION_MADD_BND_PREFIX:
9960 add_bnd_prefix = 1;
9961 break;
9962
9963 case OPTION_MEVEXLIG:
9964 if (strcmp (arg, "128") == 0)
9965 evexlig = evexl128;
9966 else if (strcmp (arg, "256") == 0)
9967 evexlig = evexl256;
9968 else if (strcmp (arg, "512") == 0)
9969 evexlig = evexl512;
9970 else
9971 as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
9972 break;
9973
9974 case OPTION_MEVEXRCIG:
9975 if (strcmp (arg, "rne") == 0)
9976 evexrcig = rne;
9977 else if (strcmp (arg, "rd") == 0)
9978 evexrcig = rd;
9979 else if (strcmp (arg, "ru") == 0)
9980 evexrcig = ru;
9981 else if (strcmp (arg, "rz") == 0)
9982 evexrcig = rz;
9983 else
9984 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg);
9985 break;
9986
9987 case OPTION_MEVEXWIG:
9988 if (strcmp (arg, "0") == 0)
9989 evexwig = evexw0;
9990 else if (strcmp (arg, "1") == 0)
9991 evexwig = evexw1;
9992 else
9993 as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
9994 break;
9995
9996# if defined (TE_PE) || defined (TE_PEP)
9997 case OPTION_MBIG_OBJ:
9998 use_big_obj = 1;
9999 break;
10000#endif
10001
10002 case OPTION_MOMIT_LOCK_PREFIX:
10003 if (strcasecmp (arg, "yes") == 0)
10004 omit_lock_prefix = 1;
10005 else if (strcasecmp (arg, "no") == 0)
10006 omit_lock_prefix = 0;
10007 else
10008 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg);
10009 break;
10010
10011 case OPTION_MFENCE_AS_LOCK_ADD:
10012 if (strcasecmp (arg, "yes") == 0)
10013 avoid_fence = 1;
10014 else if (strcasecmp (arg, "no") == 0)
10015 avoid_fence = 0;
10016 else
10017 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg);
10018 break;
10019
10020 case OPTION_MRELAX_RELOCATIONS:
10021 if (strcasecmp (arg, "yes") == 0)
10022 generate_relax_relocations = 1;
10023 else if (strcasecmp (arg, "no") == 0)
10024 generate_relax_relocations = 0;
10025 else
10026 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg);
10027 break;
10028
10029 case OPTION_MAMD64:
10030 cpu_arch_flags.bitfield.cpuamd64 = 1;
10031 cpu_arch_flags.bitfield.cpuintel64 = 0;
10032 cpu_arch_isa_flags.bitfield.cpuamd64 = 1;
10033 cpu_arch_isa_flags.bitfield.cpuintel64 = 0;
10034 break;
10035
10036 case OPTION_MINTEL64:
10037 cpu_arch_flags.bitfield.cpuamd64 = 0;
10038 cpu_arch_flags.bitfield.cpuintel64 = 1;
10039 cpu_arch_isa_flags.bitfield.cpuamd64 = 0;
10040 cpu_arch_isa_flags.bitfield.cpuintel64 = 1;
10041 break;
10042
10043 default:
10044 return 0;
10045 }
10046 return 1;
10047}
10048
10049#define MESSAGE_TEMPLATE \
10050" "
10051
10052static void
10053show_arch (FILE *stream, int ext, int check)
10054{
10055 static char message[] = MESSAGE_TEMPLATE;
10056 char *start = message + 27;
10057 char *p;
10058 int size = sizeof (MESSAGE_TEMPLATE);
10059 int left;
10060 const char *name;
10061 int len;
10062 unsigned int j;
10063
10064 p = start;
10065 left = size - (start - message);
10066 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
10067 {
10068 /* Should it be skipped? */
10069 if (cpu_arch [j].skip)
10070 continue;
10071
10072 name = cpu_arch [j].name;
10073 len = cpu_arch [j].len;
10074 if (*name == '.')
10075 {
10076 /* It is an extension. Skip if we aren't asked to show it. */
10077 if (ext)
10078 {
10079 name++;
10080 len--;
10081 }
10082 else
10083 continue;
10084 }
10085 else if (ext)
10086 {
10087 /* It is an processor. Skip if we show only extension. */
10088 continue;
10089 }
10090 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
10091 {
10092 /* It is an impossible processor - skip. */
10093 continue;
10094 }
10095
10096 /* Reserve 2 spaces for ", " or ",\0" */
10097 left -= len + 2;
10098
10099 /* Check if there is any room. */
10100 if (left >= 0)
10101 {
10102 if (p != start)
10103 {
10104 *p++ = ',';
10105 *p++ = ' ';
10106 }
10107 p = mempcpy (p, name, len);
10108 }
10109 else
10110 {
10111 /* Output the current message now and start a new one. */
10112 *p++ = ',';
10113 *p = '\0';
10114 fprintf (stream, "%s\n", message);
10115 p = start;
10116 left = size - (start - message) - len - 2;
10117
10118 gas_assert (left >= 0);
10119
10120 p = mempcpy (p, name, len);
10121 }
10122 }
10123
10124 *p = '\0';
10125 fprintf (stream, "%s\n", message);
10126}
10127
10128void
10129md_show_usage (FILE *stream)
10130{
10131#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10132 fprintf (stream, _("\
10133 -Q ignored\n\
10134 -V print assembler version number\n\
10135 -k ignored\n"));
10136#endif
10137 fprintf (stream, _("\
10138 -n Do not optimize code alignment\n\
10139 -q quieten some warnings\n"));
10140#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10141 fprintf (stream, _("\
10142 -s ignored\n"));
10143#endif
10144#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10145 || defined (TE_PE) || defined (TE_PEP))
10146 fprintf (stream, _("\
10147 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
10148#endif
10149#ifdef SVR4_COMMENT_CHARS
10150 fprintf (stream, _("\
10151 --divide do not treat `/' as a comment character\n"));
10152#else
10153 fprintf (stream, _("\
10154 --divide ignored\n"));
10155#endif
10156 fprintf (stream, _("\
10157 -march=CPU[,+EXTENSION...]\n\
10158 generate code for CPU and EXTENSION, CPU is one of:\n"));
10159 show_arch (stream, 0, 1);
10160 fprintf (stream, _("\
10161 EXTENSION is combination of:\n"));
10162 show_arch (stream, 1, 0);
10163 fprintf (stream, _("\
10164 -mtune=CPU optimize for CPU, CPU is one of:\n"));
10165 show_arch (stream, 0, 0);
10166 fprintf (stream, _("\
10167 -msse2avx encode SSE instructions with VEX prefix\n"));
10168 fprintf (stream, _("\
10169 -msse-check=[none|error|warning]\n\
10170 check SSE instructions\n"));
10171 fprintf (stream, _("\
10172 -moperand-check=[none|error|warning]\n\
10173 check operand combinations for validity\n"));
10174 fprintf (stream, _("\
10175 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
10176 length\n"));
10177 fprintf (stream, _("\
10178 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
10179 length\n"));
10180 fprintf (stream, _("\
10181 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
10182 for EVEX.W bit ignored instructions\n"));
10183 fprintf (stream, _("\
10184 -mevexrcig=[rne|rd|ru|rz]\n\
10185 encode EVEX instructions with specific EVEX.RC value\n\
10186 for SAE-only ignored instructions\n"));
10187 fprintf (stream, _("\
10188 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
10189 fprintf (stream, _("\
10190 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
10191 fprintf (stream, _("\
10192 -mindex-reg support pseudo index registers\n"));
10193 fprintf (stream, _("\
10194 -mnaked-reg don't require `%%' prefix for registers\n"));
10195 fprintf (stream, _("\
10196 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
10197 fprintf (stream, _("\
10198 -madd-bnd-prefix add BND prefix for all valid branches\n"));
10199 fprintf (stream, _("\
10200 -mshared disable branch optimization for shared code\n"));
10201# if defined (TE_PE) || defined (TE_PEP)
10202 fprintf (stream, _("\
10203 -mbig-obj generate big object files\n"));
10204#endif
10205 fprintf (stream, _("\
10206 -momit-lock-prefix=[no|yes]\n\
10207 strip all lock prefixes\n"));
10208 fprintf (stream, _("\
10209 -mfence-as-lock-add=[no|yes]\n\
10210 encode lfence, mfence and sfence as\n\
10211 lock addl $0x0, (%%{re}sp)\n"));
10212 fprintf (stream, _("\
10213 -mrelax-relocations=[no|yes]\n\
10214 generate relax relocations\n"));
10215 fprintf (stream, _("\
10216 -mamd64 accept only AMD64 ISA\n"));
10217 fprintf (stream, _("\
10218 -mintel64 accept only Intel64 ISA\n"));
10219}
10220
10221#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
10222 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10223 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10224
10225/* Pick the target format to use. */
10226
10227const char *
10228i386_target_format (void)
10229{
10230 if (!strncmp (default_arch, "x86_64", 6))
10231 {
10232 update_code_flag (CODE_64BIT, 1);
10233 if (default_arch[6] == '\0')
10234 x86_elf_abi = X86_64_ABI;
10235 else
10236 x86_elf_abi = X86_64_X32_ABI;
10237 }
10238 else if (!strcmp (default_arch, "i386"))
10239 update_code_flag (CODE_32BIT, 1);
10240 else if (!strcmp (default_arch, "iamcu"))
10241 {
10242 update_code_flag (CODE_32BIT, 1);
10243 if (cpu_arch_isa == PROCESSOR_UNKNOWN)
10244 {
10245 static const i386_cpu_flags iamcu_flags = CPU_IAMCU_FLAGS;
10246 cpu_arch_name = "iamcu";
10247 cpu_sub_arch_name = NULL;
10248 cpu_arch_flags = iamcu_flags;
10249 cpu_arch_isa = PROCESSOR_IAMCU;
10250 cpu_arch_isa_flags = iamcu_flags;
10251 if (!cpu_arch_tune_set)
10252 {
10253 cpu_arch_tune = cpu_arch_isa;
10254 cpu_arch_tune_flags = cpu_arch_isa_flags;
10255 }
10256 }
10257 else
10258 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
10259 cpu_arch_name);
10260 }
10261 else
10262 as_fatal (_("unknown architecture"));
10263
10264 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
10265 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10266 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
10267 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10268
10269 switch (OUTPUT_FLAVOR)
10270 {
10271#if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
10272 case bfd_target_aout_flavour:
10273 return AOUT_TARGET_FORMAT;
10274#endif
10275#if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
10276# if defined (TE_PE) || defined (TE_PEP)
10277 case bfd_target_coff_flavour:
10278 if (flag_code == CODE_64BIT)
10279 return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
10280 else
10281 return "pe-i386";
10282# elif defined (TE_GO32)
10283 case bfd_target_coff_flavour:
10284 return "coff-go32";
10285# else
10286 case bfd_target_coff_flavour:
10287 return "coff-i386";
10288# endif
10289#endif
10290#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10291 case bfd_target_elf_flavour:
10292 {
10293 const char *format;
10294
10295 switch (x86_elf_abi)
10296 {
10297 default:
10298 format = ELF_TARGET_FORMAT;
10299 break;
10300 case X86_64_ABI:
10301 use_rela_relocations = 1;
10302 object_64bit = 1;
10303 format = ELF_TARGET_FORMAT64;
10304 break;
10305 case X86_64_X32_ABI:
10306 use_rela_relocations = 1;
10307 object_64bit = 1;
10308 disallow_64bit_reloc = 1;
10309 format = ELF_TARGET_FORMAT32;
10310 break;
10311 }
10312 if (cpu_arch_isa == PROCESSOR_L1OM)
10313 {
10314 if (x86_elf_abi != X86_64_ABI)
10315 as_fatal (_("Intel L1OM is 64bit only"));
10316 return ELF_TARGET_L1OM_FORMAT;
10317 }
10318 else if (cpu_arch_isa == PROCESSOR_K1OM)
10319 {
10320 if (x86_elf_abi != X86_64_ABI)
10321 as_fatal (_("Intel K1OM is 64bit only"));
10322 return ELF_TARGET_K1OM_FORMAT;
10323 }
10324 else if (cpu_arch_isa == PROCESSOR_IAMCU)
10325 {
10326 if (x86_elf_abi != I386_ABI)
10327 as_fatal (_("Intel MCU is 32bit only"));
10328 return ELF_TARGET_IAMCU_FORMAT;
10329 }
10330 else
10331 return format;
10332 }
10333#endif
10334#if defined (OBJ_MACH_O)
10335 case bfd_target_mach_o_flavour:
10336 if (flag_code == CODE_64BIT)
10337 {
10338 use_rela_relocations = 1;
10339 object_64bit = 1;
10340 return "mach-o-x86-64";
10341 }
10342 else
10343 return "mach-o-i386";
10344#endif
10345 default:
10346 abort ();
10347 return NULL;
10348 }
10349}
10350
10351#endif /* OBJ_MAYBE_ more than one */
10352\f
10353symbolS *
10354md_undefined_symbol (char *name)
10355{
10356 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
10357 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
10358 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
10359 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
10360 {
10361 if (!GOT_symbol)
10362 {
10363 if (symbol_find (name))
10364 as_bad (_("GOT already in symbol table"));
10365 GOT_symbol = symbol_new (name, undefined_section,
10366 (valueT) 0, &zero_address_frag);
10367 };
10368 return GOT_symbol;
10369 }
10370 return 0;
10371}
10372
10373/* Round up a section size to the appropriate boundary. */
10374
10375valueT
10376md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
10377{
10378#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10379 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
10380 {
10381 /* For a.out, force the section size to be aligned. If we don't do
10382 this, BFD will align it for us, but it will not write out the
10383 final bytes of the section. This may be a bug in BFD, but it is
10384 easier to fix it here since that is how the other a.out targets
10385 work. */
10386 int align;
10387
10388 align = bfd_get_section_alignment (stdoutput, segment);
10389 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
10390 }
10391#endif
10392
10393 return size;
10394}
10395
10396/* On the i386, PC-relative offsets are relative to the start of the
10397 next instruction. That is, the address of the offset, plus its
10398 size, since the offset is always the last part of the insn. */
10399
10400long
10401md_pcrel_from (fixS *fixP)
10402{
10403 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
10404}
10405
10406#ifndef I386COFF
10407
10408static void
10409s_bss (int ignore ATTRIBUTE_UNUSED)
10410{
10411 int temp;
10412
10413#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10414 if (IS_ELF)
10415 obj_elf_section_change_hook ();
10416#endif
10417 temp = get_absolute_expression ();
10418 subseg_set (bss_section, (subsegT) temp);
10419 demand_empty_rest_of_line ();
10420}
10421
10422#endif
10423
10424void
10425i386_validate_fix (fixS *fixp)
10426{
10427 if (fixp->fx_subsy)
10428 {
10429 if (fixp->fx_subsy == GOT_symbol)
10430 {
10431 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
10432 {
10433 if (!object_64bit)
10434 abort ();
10435#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10436 if (fixp->fx_tcbit2)
10437 fixp->fx_r_type = (fixp->fx_tcbit
10438 ? BFD_RELOC_X86_64_REX_GOTPCRELX
10439 : BFD_RELOC_X86_64_GOTPCRELX);
10440 else
10441#endif
10442 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
10443 }
10444 else
10445 {
10446 if (!object_64bit)
10447 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
10448 else
10449 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
10450 }
10451 fixp->fx_subsy = 0;
10452 }
10453 }
10454#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10455 else if (!object_64bit)
10456 {
10457 if (fixp->fx_r_type == BFD_RELOC_386_GOT32
10458 && fixp->fx_tcbit2)
10459 fixp->fx_r_type = BFD_RELOC_386_GOT32X;
10460 }
10461#endif
10462}
10463
10464arelent *
10465tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
10466{
10467 arelent *rel;
10468 bfd_reloc_code_real_type code;
10469
10470 switch (fixp->fx_r_type)
10471 {
10472#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10473 case BFD_RELOC_SIZE32:
10474 case BFD_RELOC_SIZE64:
10475 if (S_IS_DEFINED (fixp->fx_addsy)
10476 && !S_IS_EXTERNAL (fixp->fx_addsy))
10477 {
10478 /* Resolve size relocation against local symbol to size of
10479 the symbol plus addend. */
10480 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
10481 if (fixp->fx_r_type == BFD_RELOC_SIZE32
10482 && !fits_in_unsigned_long (value))
10483 as_bad_where (fixp->fx_file, fixp->fx_line,
10484 _("symbol size computation overflow"));
10485 fixp->fx_addsy = NULL;
10486 fixp->fx_subsy = NULL;
10487 md_apply_fix (fixp, (valueT *) &value, NULL);
10488 return NULL;
10489 }
10490#endif
10491
10492 case BFD_RELOC_X86_64_PLT32:
10493 case BFD_RELOC_X86_64_GOT32:
10494 case BFD_RELOC_X86_64_GOTPCREL:
10495 case BFD_RELOC_X86_64_GOTPCRELX:
10496 case BFD_RELOC_X86_64_REX_GOTPCRELX:
10497 case BFD_RELOC_386_PLT32:
10498 case BFD_RELOC_386_GOT32:
10499 case BFD_RELOC_386_GOT32X:
10500 case BFD_RELOC_386_GOTOFF:
10501 case BFD_RELOC_386_GOTPC:
10502 case BFD_RELOC_386_TLS_GD:
10503 case BFD_RELOC_386_TLS_LDM:
10504 case BFD_RELOC_386_TLS_LDO_32:
10505 case BFD_RELOC_386_TLS_IE_32:
10506 case BFD_RELOC_386_TLS_IE:
10507 case BFD_RELOC_386_TLS_GOTIE:
10508 case BFD_RELOC_386_TLS_LE_32:
10509 case BFD_RELOC_386_TLS_LE:
10510 case BFD_RELOC_386_TLS_GOTDESC:
10511 case BFD_RELOC_386_TLS_DESC_CALL:
10512 case BFD_RELOC_X86_64_TLSGD:
10513 case BFD_RELOC_X86_64_TLSLD:
10514 case BFD_RELOC_X86_64_DTPOFF32:
10515 case BFD_RELOC_X86_64_DTPOFF64:
10516 case BFD_RELOC_X86_64_GOTTPOFF:
10517 case BFD_RELOC_X86_64_TPOFF32:
10518 case BFD_RELOC_X86_64_TPOFF64:
10519 case BFD_RELOC_X86_64_GOTOFF64:
10520 case BFD_RELOC_X86_64_GOTPC32:
10521 case BFD_RELOC_X86_64_GOT64:
10522 case BFD_RELOC_X86_64_GOTPCREL64:
10523 case BFD_RELOC_X86_64_GOTPC64:
10524 case BFD_RELOC_X86_64_GOTPLT64:
10525 case BFD_RELOC_X86_64_PLTOFF64:
10526 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10527 case BFD_RELOC_X86_64_TLSDESC_CALL:
10528 case BFD_RELOC_RVA:
10529 case BFD_RELOC_VTABLE_ENTRY:
10530 case BFD_RELOC_VTABLE_INHERIT:
10531#ifdef TE_PE
10532 case BFD_RELOC_32_SECREL:
10533#endif
10534 code = fixp->fx_r_type;
10535 break;
10536 case BFD_RELOC_X86_64_32S:
10537 if (!fixp->fx_pcrel)
10538 {
10539 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10540 code = fixp->fx_r_type;
10541 break;
10542 }
10543 default:
10544 if (fixp->fx_pcrel)
10545 {
10546 switch (fixp->fx_size)
10547 {
10548 default:
10549 as_bad_where (fixp->fx_file, fixp->fx_line,
10550 _("can not do %d byte pc-relative relocation"),
10551 fixp->fx_size);
10552 code = BFD_RELOC_32_PCREL;
10553 break;
10554 case 1: code = BFD_RELOC_8_PCREL; break;
10555 case 2: code = BFD_RELOC_16_PCREL; break;
10556 case 4: code = BFD_RELOC_32_PCREL; break;
10557#ifdef BFD64
10558 case 8: code = BFD_RELOC_64_PCREL; break;
10559#endif
10560 }
10561 }
10562 else
10563 {
10564 switch (fixp->fx_size)
10565 {
10566 default:
10567 as_bad_where (fixp->fx_file, fixp->fx_line,
10568 _("can not do %d byte relocation"),
10569 fixp->fx_size);
10570 code = BFD_RELOC_32;
10571 break;
10572 case 1: code = BFD_RELOC_8; break;
10573 case 2: code = BFD_RELOC_16; break;
10574 case 4: code = BFD_RELOC_32; break;
10575#ifdef BFD64
10576 case 8: code = BFD_RELOC_64; break;
10577#endif
10578 }
10579 }
10580 break;
10581 }
10582
10583 if ((code == BFD_RELOC_32
10584 || code == BFD_RELOC_32_PCREL
10585 || code == BFD_RELOC_X86_64_32S)
10586 && GOT_symbol
10587 && fixp->fx_addsy == GOT_symbol)
10588 {
10589 if (!object_64bit)
10590 code = BFD_RELOC_386_GOTPC;
10591 else
10592 code = BFD_RELOC_X86_64_GOTPC32;
10593 }
10594 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
10595 && GOT_symbol
10596 && fixp->fx_addsy == GOT_symbol)
10597 {
10598 code = BFD_RELOC_X86_64_GOTPC64;
10599 }
10600
10601 rel = XNEW (arelent);
10602 rel->sym_ptr_ptr = XNEW (asymbol *);
10603 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10604
10605 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
10606
10607 if (!use_rela_relocations)
10608 {
10609 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10610 vtable entry to be used in the relocation's section offset. */
10611 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
10612 rel->address = fixp->fx_offset;
10613#if defined (OBJ_COFF) && defined (TE_PE)
10614 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
10615 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
10616 else
10617#endif
10618 rel->addend = 0;
10619 }
10620 /* Use the rela in 64bit mode. */
10621 else
10622 {
10623 if (disallow_64bit_reloc)
10624 switch (code)
10625 {
10626 case BFD_RELOC_X86_64_DTPOFF64:
10627 case BFD_RELOC_X86_64_TPOFF64:
10628 case BFD_RELOC_64_PCREL:
10629 case BFD_RELOC_X86_64_GOTOFF64:
10630 case BFD_RELOC_X86_64_GOT64:
10631 case BFD_RELOC_X86_64_GOTPCREL64:
10632 case BFD_RELOC_X86_64_GOTPC64:
10633 case BFD_RELOC_X86_64_GOTPLT64:
10634 case BFD_RELOC_X86_64_PLTOFF64:
10635 as_bad_where (fixp->fx_file, fixp->fx_line,
10636 _("cannot represent relocation type %s in x32 mode"),
10637 bfd_get_reloc_code_name (code));
10638 break;
10639 default:
10640 break;
10641 }
10642
10643 if (!fixp->fx_pcrel)
10644 rel->addend = fixp->fx_offset;
10645 else
10646 switch (code)
10647 {
10648 case BFD_RELOC_X86_64_PLT32:
10649 case BFD_RELOC_X86_64_GOT32:
10650 case BFD_RELOC_X86_64_GOTPCREL:
10651 case BFD_RELOC_X86_64_GOTPCRELX:
10652 case BFD_RELOC_X86_64_REX_GOTPCRELX:
10653 case BFD_RELOC_X86_64_TLSGD:
10654 case BFD_RELOC_X86_64_TLSLD:
10655 case BFD_RELOC_X86_64_GOTTPOFF:
10656 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10657 case BFD_RELOC_X86_64_TLSDESC_CALL:
10658 rel->addend = fixp->fx_offset - fixp->fx_size;
10659 break;
10660 default:
10661 rel->addend = (section->vma
10662 - fixp->fx_size
10663 + fixp->fx_addnumber
10664 + md_pcrel_from (fixp));
10665 break;
10666 }
10667 }
10668
10669 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
10670 if (rel->howto == NULL)
10671 {
10672 as_bad_where (fixp->fx_file, fixp->fx_line,
10673 _("cannot represent relocation type %s"),
10674 bfd_get_reloc_code_name (code));
10675 /* Set howto to a garbage value so that we can keep going. */
10676 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
10677 gas_assert (rel->howto != NULL);
10678 }
10679
10680 return rel;
10681}
10682
10683#include "tc-i386-intel.c"
10684
10685void
10686tc_x86_parse_to_dw2regnum (expressionS *exp)
10687{
10688 int saved_naked_reg;
10689 char saved_register_dot;
10690
10691 saved_naked_reg = allow_naked_reg;
10692 allow_naked_reg = 1;
10693 saved_register_dot = register_chars['.'];
10694 register_chars['.'] = '.';
10695 allow_pseudo_reg = 1;
10696 expression_and_evaluate (exp);
10697 allow_pseudo_reg = 0;
10698 register_chars['.'] = saved_register_dot;
10699 allow_naked_reg = saved_naked_reg;
10700
10701 if (exp->X_op == O_register && exp->X_add_number >= 0)
10702 {
10703 if ((addressT) exp->X_add_number < i386_regtab_size)
10704 {
10705 exp->X_op = O_constant;
10706 exp->X_add_number = i386_regtab[exp->X_add_number]
10707 .dw2_regnum[flag_code >> 1];
10708 }
10709 else
10710 exp->X_op = O_illegal;
10711 }
10712}
10713
10714void
10715tc_x86_frame_initial_instructions (void)
10716{
10717 static unsigned int sp_regno[2];
10718
10719 if (!sp_regno[flag_code >> 1])
10720 {
10721 char *saved_input = input_line_pointer;
10722 char sp[][4] = {"esp", "rsp"};
10723 expressionS exp;
10724
10725 input_line_pointer = sp[flag_code >> 1];
10726 tc_x86_parse_to_dw2regnum (&exp);
10727 gas_assert (exp.X_op == O_constant);
10728 sp_regno[flag_code >> 1] = exp.X_add_number;
10729 input_line_pointer = saved_input;
10730 }
10731
10732 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
10733 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
10734}
10735
10736int
10737x86_dwarf2_addr_size (void)
10738{
10739#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10740 if (x86_elf_abi == X86_64_X32_ABI)
10741 return 4;
10742#endif
10743 return bfd_arch_bits_per_address (stdoutput) / 8;
10744}
10745
10746int
10747i386_elf_section_type (const char *str, size_t len)
10748{
10749 if (flag_code == CODE_64BIT
10750 && len == sizeof ("unwind") - 1
10751 && strncmp (str, "unwind", 6) == 0)
10752 return SHT_X86_64_UNWIND;
10753
10754 return -1;
10755}
10756
10757#ifdef TE_SOLARIS
10758void
10759i386_solaris_fix_up_eh_frame (segT sec)
10760{
10761 if (flag_code == CODE_64BIT)
10762 elf_section_type (sec) = SHT_X86_64_UNWIND;
10763}
10764#endif
10765
10766#ifdef TE_PE
10767void
10768tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10769{
10770 expressionS exp;
10771
10772 exp.X_op = O_secrel;
10773 exp.X_add_symbol = symbol;
10774 exp.X_add_number = 0;
10775 emit_expr (&exp, size);
10776}
10777#endif
10778
10779#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10780/* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
10781
10782bfd_vma
10783x86_64_section_letter (int letter, const char **ptr_msg)
10784{
10785 if (flag_code == CODE_64BIT)
10786 {
10787 if (letter == 'l')
10788 return SHF_X86_64_LARGE;
10789
10790 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
10791 }
10792 else
10793 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
10794 return -1;
10795}
10796
10797bfd_vma
10798x86_64_section_word (char *str, size_t len)
10799{
10800 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
10801 return SHF_X86_64_LARGE;
10802
10803 return -1;
10804}
10805
10806static void
10807handle_large_common (int small ATTRIBUTE_UNUSED)
10808{
10809 if (flag_code != CODE_64BIT)
10810 {
10811 s_comm_internal (0, elf_common_parse);
10812 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
10813 }
10814 else
10815 {
10816 static segT lbss_section;
10817 asection *saved_com_section_ptr = elf_com_section_ptr;
10818 asection *saved_bss_section = bss_section;
10819
10820 if (lbss_section == NULL)
10821 {
10822 flagword applicable;
10823 segT seg = now_seg;
10824 subsegT subseg = now_subseg;
10825
10826 /* The .lbss section is for local .largecomm symbols. */
10827 lbss_section = subseg_new (".lbss", 0);
10828 applicable = bfd_applicable_section_flags (stdoutput);
10829 bfd_set_section_flags (stdoutput, lbss_section,
10830 applicable & SEC_ALLOC);
10831 seg_info (lbss_section)->bss = 1;
10832
10833 subseg_set (seg, subseg);
10834 }
10835
10836 elf_com_section_ptr = &_bfd_elf_large_com_section;
10837 bss_section = lbss_section;
10838
10839 s_comm_internal (0, elf_common_parse);
10840
10841 elf_com_section_ptr = saved_com_section_ptr;
10842 bss_section = saved_bss_section;
10843 }
10844}
10845#endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.075805 seconds and 4 git commands to generate.