4074a902c83c3bd81b66653f3dd12954b6972aa7
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2020 Free Software Foundation, Inc.
3
4 This file is part of GAS, the GNU Assembler.
5
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
19 02110-1301, USA. */
20
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
27
28 #include "as.h"
29 #include "safe-ctype.h"
30 #include "subsegs.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
35
36 #ifdef HAVE_LIMITS_H
37 #include <limits.h>
38 #else
39 #ifdef HAVE_SYS_PARAM_H
40 #include <sys/param.h>
41 #endif
42 #ifndef INT_MAX
43 #define INT_MAX (int) (((unsigned) (-1)) >> 1)
44 #endif
45 #endif
46
47 #ifndef INFER_ADDR_PREFIX
48 #define INFER_ADDR_PREFIX 1
49 #endif
50
51 #ifndef DEFAULT_ARCH
52 #define DEFAULT_ARCH "i386"
53 #endif
54
55 #ifndef INLINE
56 #if __GNUC__ >= 2
57 #define INLINE __inline__
58 #else
59 #define INLINE
60 #endif
61 #endif
62
63 /* Prefixes will be emitted in the order defined below.
64 WAIT_PREFIX must be the first prefix since FWAIT is really is an
65 instruction, and so must come before any prefixes.
66 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
67 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
68 #define WAIT_PREFIX 0
69 #define SEG_PREFIX 1
70 #define ADDR_PREFIX 2
71 #define DATA_PREFIX 3
72 #define REP_PREFIX 4
73 #define HLE_PREFIX REP_PREFIX
74 #define BND_PREFIX REP_PREFIX
75 #define LOCK_PREFIX 5
76 #define REX_PREFIX 6 /* must come last. */
77 #define MAX_PREFIXES 7 /* max prefixes per opcode */
78
79 /* we define the syntax here (modulo base,index,scale syntax) */
80 #define REGISTER_PREFIX '%'
81 #define IMMEDIATE_PREFIX '$'
82 #define ABSOLUTE_PREFIX '*'
83
84 /* these are the instruction mnemonic suffixes in AT&T syntax or
85 memory operand size in Intel syntax. */
86 #define WORD_MNEM_SUFFIX 'w'
87 #define BYTE_MNEM_SUFFIX 'b'
88 #define SHORT_MNEM_SUFFIX 's'
89 #define LONG_MNEM_SUFFIX 'l'
90 #define QWORD_MNEM_SUFFIX 'q'
91 /* Intel Syntax. Use a non-ascii letter since since it never appears
92 in instructions. */
93 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
94
95 #define END_OF_INSN '\0'
96
97 /* This matches the C -> StaticRounding alias in the opcode table. */
98 #define commutative staticrounding
99
100 /*
101 'templates' is for grouping together 'template' structures for opcodes
102 of the same name. This is only used for storing the insns in the grand
103 ole hash table of insns.
104 The templates themselves start at START and range up to (but not including)
105 END.
106 */
107 typedef struct
108 {
109 const insn_template *start;
110 const insn_template *end;
111 }
112 templates;
113
114 /* 386 operand encoding bytes: see 386 book for details of this. */
115 typedef struct
116 {
117 unsigned int regmem; /* codes register or memory operand */
118 unsigned int reg; /* codes register operand (or extended opcode) */
119 unsigned int mode; /* how to interpret regmem & reg */
120 }
121 modrm_byte;
122
123 /* x86-64 extension prefix. */
124 typedef int rex_byte;
125
126 /* 386 opcode byte to code indirect addressing. */
127 typedef struct
128 {
129 unsigned base;
130 unsigned index;
131 unsigned scale;
132 }
133 sib_byte;
134
135 /* x86 arch names, types and features */
136 typedef struct
137 {
138 const char *name; /* arch name */
139 unsigned int len; /* arch string length */
140 enum processor_type type; /* arch type */
141 i386_cpu_flags flags; /* cpu feature flags */
142 unsigned int skip; /* show_arch should skip this. */
143 }
144 arch_entry;
145
146 /* Used to turn off indicated flags. */
147 typedef struct
148 {
149 const char *name; /* arch name */
150 unsigned int len; /* arch string length */
151 i386_cpu_flags flags; /* cpu feature flags */
152 }
153 noarch_entry;
154
155 static void update_code_flag (int, int);
156 static void set_code_flag (int);
157 static void set_16bit_gcc_code_flag (int);
158 static void set_intel_syntax (int);
159 static void set_intel_mnemonic (int);
160 static void set_allow_index_reg (int);
161 static void set_check (int);
162 static void set_cpu_arch (int);
163 #ifdef TE_PE
164 static void pe_directive_secrel (int);
165 #endif
166 static void signed_cons (int);
167 static char *output_invalid (int c);
168 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
169 const char *);
170 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
171 const char *);
172 static int i386_att_operand (char *);
173 static int i386_intel_operand (char *, int);
174 static int i386_intel_simplify (expressionS *);
175 static int i386_intel_parse_name (const char *, expressionS *);
176 static const reg_entry *parse_register (char *, char **);
177 static char *parse_insn (char *, char *);
178 static char *parse_operands (char *, const char *);
179 static void swap_operands (void);
180 static void swap_2_operands (int, int);
181 static enum flag_code i386_addressing_mode (void);
182 static void optimize_imm (void);
183 static void optimize_disp (void);
184 static const insn_template *match_template (char);
185 static int check_string (void);
186 static int process_suffix (void);
187 static int check_byte_reg (void);
188 static int check_long_reg (void);
189 static int check_qword_reg (void);
190 static int check_word_reg (void);
191 static int finalize_imm (void);
192 static int process_operands (void);
193 static const seg_entry *build_modrm_byte (void);
194 static void output_insn (void);
195 static void output_imm (fragS *, offsetT);
196 static void output_disp (fragS *, offsetT);
197 #ifndef I386COFF
198 static void s_bss (int);
199 #endif
200 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
201 static void handle_large_common (int small ATTRIBUTE_UNUSED);
202
203 /* GNU_PROPERTY_X86_ISA_1_USED. */
204 static unsigned int x86_isa_1_used;
205 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
206 static unsigned int x86_feature_2_used;
207 /* Generate x86 used ISA and feature properties. */
208 static unsigned int x86_used_note = DEFAULT_X86_USED_NOTE;
209 #endif
210
211 static const char *default_arch = DEFAULT_ARCH;
212
213 /* This struct describes rounding control and SAE in the instruction. */
214 struct RC_Operation
215 {
216 enum rc_type
217 {
218 rne = 0,
219 rd,
220 ru,
221 rz,
222 saeonly
223 } type;
224 int operand;
225 };
226
227 static struct RC_Operation rc_op;
228
229 /* The struct describes masking, applied to OPERAND in the instruction.
230 MASK is a pointer to the corresponding mask register. ZEROING tells
231 whether merging or zeroing mask is used. */
232 struct Mask_Operation
233 {
234 const reg_entry *mask;
235 unsigned int zeroing;
236 /* The operand where this operation is associated. */
237 int operand;
238 };
239
240 static struct Mask_Operation mask_op;
241
242 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
243 broadcast factor. */
244 struct Broadcast_Operation
245 {
246 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
247 int type;
248
249 /* Index of broadcasted operand. */
250 int operand;
251
252 /* Number of bytes to broadcast. */
253 int bytes;
254 };
255
256 static struct Broadcast_Operation broadcast_op;
257
258 /* VEX prefix. */
259 typedef struct
260 {
261 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
262 unsigned char bytes[4];
263 unsigned int length;
264 /* Destination or source register specifier. */
265 const reg_entry *register_specifier;
266 } vex_prefix;
267
268 /* 'md_assemble ()' gathers together information and puts it into a
269 i386_insn. */
270
271 union i386_op
272 {
273 expressionS *disps;
274 expressionS *imms;
275 const reg_entry *regs;
276 };
277
278 enum i386_error
279 {
280 operand_size_mismatch,
281 operand_type_mismatch,
282 register_type_mismatch,
283 number_of_operands_mismatch,
284 invalid_instruction_suffix,
285 bad_imm4,
286 unsupported_with_intel_mnemonic,
287 unsupported_syntax,
288 unsupported,
289 invalid_vsib_address,
290 invalid_vector_register_set,
291 unsupported_vector_index_register,
292 unsupported_broadcast,
293 broadcast_needed,
294 unsupported_masking,
295 mask_not_on_destination,
296 no_default_mask,
297 unsupported_rc_sae,
298 rc_sae_operand_not_last_imm,
299 invalid_register_operand,
300 };
301
302 struct _i386_insn
303 {
304 /* TM holds the template for the insn were currently assembling. */
305 insn_template tm;
306
307 /* SUFFIX holds the instruction size suffix for byte, word, dword
308 or qword, if given. */
309 char suffix;
310
311 /* OPERANDS gives the number of given operands. */
312 unsigned int operands;
313
314 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
315 of given register, displacement, memory operands and immediate
316 operands. */
317 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
318
319 /* TYPES [i] is the type (see above #defines) which tells us how to
320 use OP[i] for the corresponding operand. */
321 i386_operand_type types[MAX_OPERANDS];
322
323 /* Displacement expression, immediate expression, or register for each
324 operand. */
325 union i386_op op[MAX_OPERANDS];
326
327 /* Flags for operands. */
328 unsigned int flags[MAX_OPERANDS];
329 #define Operand_PCrel 1
330 #define Operand_Mem 2
331
332 /* Relocation type for operand */
333 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
334
335 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
336 the base index byte below. */
337 const reg_entry *base_reg;
338 const reg_entry *index_reg;
339 unsigned int log2_scale_factor;
340
341 /* SEG gives the seg_entries of this insn. They are zero unless
342 explicit segment overrides are given. */
343 const seg_entry *seg[2];
344
345 /* Copied first memory operand string, for re-checking. */
346 char *memop1_string;
347
348 /* PREFIX holds all the given prefix opcodes (usually null).
349 PREFIXES is the number of prefix opcodes. */
350 unsigned int prefixes;
351 unsigned char prefix[MAX_PREFIXES];
352
353 /* Register is in low 3 bits of opcode. */
354 bfd_boolean short_form;
355
356 /* The operand to a branch insn indicates an absolute branch. */
357 bfd_boolean jumpabsolute;
358
359 /* Has MMX register operands. */
360 bfd_boolean has_regmmx;
361
362 /* Has XMM register operands. */
363 bfd_boolean has_regxmm;
364
365 /* Has YMM register operands. */
366 bfd_boolean has_regymm;
367
368 /* Has ZMM register operands. */
369 bfd_boolean has_regzmm;
370
371 /* Has GOTPC or TLS relocation. */
372 bfd_boolean has_gotpc_tls_reloc;
373
374 /* RM and SIB are the modrm byte and the sib byte where the
375 addressing modes of this insn are encoded. */
376 modrm_byte rm;
377 rex_byte rex;
378 rex_byte vrex;
379 sib_byte sib;
380 vex_prefix vex;
381
382 /* Masking attributes. */
383 struct Mask_Operation *mask;
384
385 /* Rounding control and SAE attributes. */
386 struct RC_Operation *rounding;
387
388 /* Broadcasting attributes. */
389 struct Broadcast_Operation *broadcast;
390
391 /* Compressed disp8*N attribute. */
392 unsigned int memshift;
393
394 /* Prefer load or store in encoding. */
395 enum
396 {
397 dir_encoding_default = 0,
398 dir_encoding_load,
399 dir_encoding_store,
400 dir_encoding_swap
401 } dir_encoding;
402
403 /* Prefer 8bit or 32bit displacement in encoding. */
404 enum
405 {
406 disp_encoding_default = 0,
407 disp_encoding_8bit,
408 disp_encoding_32bit
409 } disp_encoding;
410
411 /* Prefer the REX byte in encoding. */
412 bfd_boolean rex_encoding;
413
414 /* Disable instruction size optimization. */
415 bfd_boolean no_optimize;
416
417 /* How to encode vector instructions. */
418 enum
419 {
420 vex_encoding_default = 0,
421 vex_encoding_vex,
422 vex_encoding_vex3,
423 vex_encoding_evex
424 } vec_encoding;
425
426 /* REP prefix. */
427 const char *rep_prefix;
428
429 /* HLE prefix. */
430 const char *hle_prefix;
431
432 /* Have BND prefix. */
433 const char *bnd_prefix;
434
435 /* Have NOTRACK prefix. */
436 const char *notrack_prefix;
437
438 /* Error message. */
439 enum i386_error error;
440 };
441
442 typedef struct _i386_insn i386_insn;
443
444 /* Link RC type with corresponding string, that'll be looked for in
445 asm. */
446 struct RC_name
447 {
448 enum rc_type type;
449 const char *name;
450 unsigned int len;
451 };
452
453 static const struct RC_name RC_NamesTable[] =
454 {
455 { rne, STRING_COMMA_LEN ("rn-sae") },
456 { rd, STRING_COMMA_LEN ("rd-sae") },
457 { ru, STRING_COMMA_LEN ("ru-sae") },
458 { rz, STRING_COMMA_LEN ("rz-sae") },
459 { saeonly, STRING_COMMA_LEN ("sae") },
460 };
461
462 /* List of chars besides those in app.c:symbol_chars that can start an
463 operand. Used to prevent the scrubber eating vital white-space. */
464 const char extra_symbol_chars[] = "*%-([{}"
465 #ifdef LEX_AT
466 "@"
467 #endif
468 #ifdef LEX_QM
469 "?"
470 #endif
471 ;
472
473 #if (defined (TE_I386AIX) \
474 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
475 && !defined (TE_GNU) \
476 && !defined (TE_LINUX) \
477 && !defined (TE_NACL) \
478 && !defined (TE_FreeBSD) \
479 && !defined (TE_DragonFly) \
480 && !defined (TE_NetBSD)))
481 /* This array holds the chars that always start a comment. If the
482 pre-processor is disabled, these aren't very useful. The option
483 --divide will remove '/' from this list. */
484 const char *i386_comment_chars = "#/";
485 #define SVR4_COMMENT_CHARS 1
486 #define PREFIX_SEPARATOR '\\'
487
488 #else
489 const char *i386_comment_chars = "#";
490 #define PREFIX_SEPARATOR '/'
491 #endif
492
493 /* This array holds the chars that only start a comment at the beginning of
494 a line. If the line seems to have the form '# 123 filename'
495 .line and .file directives will appear in the pre-processed output.
496 Note that input_file.c hand checks for '#' at the beginning of the
497 first line of the input file. This is because the compiler outputs
498 #NO_APP at the beginning of its output.
499 Also note that comments started like this one will always work if
500 '/' isn't otherwise defined. */
501 const char line_comment_chars[] = "#/";
502
503 const char line_separator_chars[] = ";";
504
505 /* Chars that can be used to separate mant from exp in floating point
506 nums. */
507 const char EXP_CHARS[] = "eE";
508
509 /* Chars that mean this number is a floating point constant
510 As in 0f12.456
511 or 0d1.2345e12. */
512 const char FLT_CHARS[] = "fFdDxX";
513
514 /* Tables for lexical analysis. */
515 static char mnemonic_chars[256];
516 static char register_chars[256];
517 static char operand_chars[256];
518 static char identifier_chars[256];
519 static char digit_chars[256];
520
521 /* Lexical macros. */
522 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
523 #define is_operand_char(x) (operand_chars[(unsigned char) x])
524 #define is_register_char(x) (register_chars[(unsigned char) x])
525 #define is_space_char(x) ((x) == ' ')
526 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
527 #define is_digit_char(x) (digit_chars[(unsigned char) x])
528
529 /* All non-digit non-letter characters that may occur in an operand. */
530 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
531
532 /* md_assemble() always leaves the strings it's passed unaltered. To
533 effect this we maintain a stack of saved characters that we've smashed
534 with '\0's (indicating end of strings for various sub-fields of the
535 assembler instruction). */
536 static char save_stack[32];
537 static char *save_stack_p;
538 #define END_STRING_AND_SAVE(s) \
539 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
540 #define RESTORE_END_STRING(s) \
541 do { *(s) = *--save_stack_p; } while (0)
542
543 /* The instruction we're assembling. */
544 static i386_insn i;
545
546 /* Possible templates for current insn. */
547 static const templates *current_templates;
548
549 /* Per instruction expressionS buffers: max displacements & immediates. */
550 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
551 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
552
553 /* Current operand we are working on. */
554 static int this_operand = -1;
555
556 /* We support four different modes. FLAG_CODE variable is used to distinguish
557 these. */
558
559 enum flag_code {
560 CODE_32BIT,
561 CODE_16BIT,
562 CODE_64BIT };
563
564 static enum flag_code flag_code;
565 static unsigned int object_64bit;
566 static unsigned int disallow_64bit_reloc;
567 static int use_rela_relocations = 0;
568 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
569 static const char *tls_get_addr;
570
571 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
572 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
573 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
574
575 /* The ELF ABI to use. */
576 enum x86_elf_abi
577 {
578 I386_ABI,
579 X86_64_ABI,
580 X86_64_X32_ABI
581 };
582
583 static enum x86_elf_abi x86_elf_abi = I386_ABI;
584 #endif
585
586 #if defined (TE_PE) || defined (TE_PEP)
587 /* Use big object file format. */
588 static int use_big_obj = 0;
589 #endif
590
591 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
592 /* 1 if generating code for a shared library. */
593 static int shared = 0;
594 #endif
595
596 /* 1 for intel syntax,
597 0 if att syntax. */
598 static int intel_syntax = 0;
599
600 static enum x86_64_isa
601 {
602 amd64 = 1, /* AMD64 ISA. */
603 intel64 /* Intel64 ISA. */
604 } isa64;
605
606 /* 1 for intel mnemonic,
607 0 if att mnemonic. */
608 static int intel_mnemonic = !SYSV386_COMPAT;
609
610 /* 1 if pseudo registers are permitted. */
611 static int allow_pseudo_reg = 0;
612
613 /* 1 if register prefix % not required. */
614 static int allow_naked_reg = 0;
615
616 /* 1 if the assembler should add BND prefix for all control-transferring
617 instructions supporting it, even if this prefix wasn't specified
618 explicitly. */
619 static int add_bnd_prefix = 0;
620
621 /* 1 if pseudo index register, eiz/riz, is allowed . */
622 static int allow_index_reg = 0;
623
624 /* 1 if the assembler should ignore LOCK prefix, even if it was
625 specified explicitly. */
626 static int omit_lock_prefix = 0;
627
628 /* 1 if the assembler should encode lfence, mfence, and sfence as
629 "lock addl $0, (%{re}sp)". */
630 static int avoid_fence = 0;
631
632 /* Type of the previous instruction. */
633 static struct
634 {
635 segT seg;
636 const char *file;
637 const char *name;
638 unsigned int line;
639 enum last_insn_kind
640 {
641 last_insn_other = 0,
642 last_insn_directive,
643 last_insn_prefix
644 } kind;
645 } last_insn;
646
647 /* 1 if the assembler should generate relax relocations. */
648
649 static int generate_relax_relocations
650 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS;
651
652 static enum check_kind
653 {
654 check_none = 0,
655 check_warning,
656 check_error
657 }
658 sse_check, operand_check = check_warning;
659
660 /* Non-zero if branches should be aligned within power of 2 boundary. */
661 static int align_branch_power = 0;
662
663 /* Types of branches to align. */
664 enum align_branch_kind
665 {
666 align_branch_none = 0,
667 align_branch_jcc = 1,
668 align_branch_fused = 2,
669 align_branch_jmp = 3,
670 align_branch_call = 4,
671 align_branch_indirect = 5,
672 align_branch_ret = 6
673 };
674
675 /* Type bits of branches to align. */
676 enum align_branch_bit
677 {
678 align_branch_jcc_bit = 1 << align_branch_jcc,
679 align_branch_fused_bit = 1 << align_branch_fused,
680 align_branch_jmp_bit = 1 << align_branch_jmp,
681 align_branch_call_bit = 1 << align_branch_call,
682 align_branch_indirect_bit = 1 << align_branch_indirect,
683 align_branch_ret_bit = 1 << align_branch_ret
684 };
685
686 static unsigned int align_branch = (align_branch_jcc_bit
687 | align_branch_fused_bit
688 | align_branch_jmp_bit);
689
690 /* The maximum padding size for fused jcc. CMP like instruction can
691 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
692 prefixes. */
693 #define MAX_FUSED_JCC_PADDING_SIZE 20
694
695 /* The maximum number of prefixes added for an instruction. */
696 static unsigned int align_branch_prefix_size = 5;
697
698 /* Optimization:
699 1. Clear the REX_W bit with register operand if possible.
700 2. Above plus use 128bit vector instruction to clear the full vector
701 register.
702 */
703 static int optimize = 0;
704
705 /* Optimization:
706 1. Clear the REX_W bit with register operand if possible.
707 2. Above plus use 128bit vector instruction to clear the full vector
708 register.
709 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
710 "testb $imm7,%r8".
711 */
712 static int optimize_for_space = 0;
713
714 /* Register prefix used for error message. */
715 static const char *register_prefix = "%";
716
717 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
718 leave, push, and pop instructions so that gcc has the same stack
719 frame as in 32 bit mode. */
720 static char stackop_size = '\0';
721
722 /* Non-zero to optimize code alignment. */
723 int optimize_align_code = 1;
724
725 /* Non-zero to quieten some warnings. */
726 static int quiet_warnings = 0;
727
728 /* CPU name. */
729 static const char *cpu_arch_name = NULL;
730 static char *cpu_sub_arch_name = NULL;
731
732 /* CPU feature flags. */
733 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
734
735 /* If we have selected a cpu we are generating instructions for. */
736 static int cpu_arch_tune_set = 0;
737
738 /* Cpu we are generating instructions for. */
739 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
740
741 /* CPU feature flags of cpu we are generating instructions for. */
742 static i386_cpu_flags cpu_arch_tune_flags;
743
744 /* CPU instruction set architecture used. */
745 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
746
747 /* CPU feature flags of instruction set architecture used. */
748 i386_cpu_flags cpu_arch_isa_flags;
749
750 /* If set, conditional jumps are not automatically promoted to handle
751 larger than a byte offset. */
752 static unsigned int no_cond_jump_promotion = 0;
753
754 /* Encode SSE instructions with VEX prefix. */
755 static unsigned int sse2avx;
756
757 /* Encode scalar AVX instructions with specific vector length. */
758 static enum
759 {
760 vex128 = 0,
761 vex256
762 } avxscalar;
763
764 /* Encode VEX WIG instructions with specific vex.w. */
765 static enum
766 {
767 vexw0 = 0,
768 vexw1
769 } vexwig;
770
771 /* Encode scalar EVEX LIG instructions with specific vector length. */
772 static enum
773 {
774 evexl128 = 0,
775 evexl256,
776 evexl512
777 } evexlig;
778
779 /* Encode EVEX WIG instructions with specific evex.w. */
780 static enum
781 {
782 evexw0 = 0,
783 evexw1
784 } evexwig;
785
786 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
787 static enum rc_type evexrcig = rne;
788
789 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
790 static symbolS *GOT_symbol;
791
792 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
793 unsigned int x86_dwarf2_return_column;
794
795 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
796 int x86_cie_data_alignment;
797
798 /* Interface to relax_segment.
799 There are 3 major relax states for 386 jump insns because the
800 different types of jumps add different sizes to frags when we're
801 figuring out what sort of jump to choose to reach a given label.
802
803 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
804 branches which are handled by md_estimate_size_before_relax() and
805 i386_generic_table_relax_frag(). */
806
807 /* Types. */
808 #define UNCOND_JUMP 0
809 #define COND_JUMP 1
810 #define COND_JUMP86 2
811 #define BRANCH_PADDING 3
812 #define BRANCH_PREFIX 4
813 #define FUSED_JCC_PADDING 5
814
815 /* Sizes. */
816 #define CODE16 1
817 #define SMALL 0
818 #define SMALL16 (SMALL | CODE16)
819 #define BIG 2
820 #define BIG16 (BIG | CODE16)
821
822 #ifndef INLINE
823 #ifdef __GNUC__
824 #define INLINE __inline__
825 #else
826 #define INLINE
827 #endif
828 #endif
829
830 #define ENCODE_RELAX_STATE(type, size) \
831 ((relax_substateT) (((type) << 2) | (size)))
832 #define TYPE_FROM_RELAX_STATE(s) \
833 ((s) >> 2)
834 #define DISP_SIZE_FROM_RELAX_STATE(s) \
835 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
836
837 /* This table is used by relax_frag to promote short jumps to long
838 ones where necessary. SMALL (short) jumps may be promoted to BIG
839 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
840 don't allow a short jump in a 32 bit code segment to be promoted to
841 a 16 bit offset jump because it's slower (requires data size
842 prefix), and doesn't work, unless the destination is in the bottom
843 64k of the code segment (The top 16 bits of eip are zeroed). */
844
845 const relax_typeS md_relax_table[] =
846 {
847 /* The fields are:
848 1) most positive reach of this state,
849 2) most negative reach of this state,
850 3) how many bytes this mode will have in the variable part of the frag
851 4) which index into the table to try if we can't fit into this one. */
852
853 /* UNCOND_JUMP states. */
854 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
855 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
856 /* dword jmp adds 4 bytes to frag:
857 0 extra opcode bytes, 4 displacement bytes. */
858 {0, 0, 4, 0},
859 /* word jmp adds 2 byte2 to frag:
860 0 extra opcode bytes, 2 displacement bytes. */
861 {0, 0, 2, 0},
862
863 /* COND_JUMP states. */
864 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
865 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
866 /* dword conditionals adds 5 bytes to frag:
867 1 extra opcode byte, 4 displacement bytes. */
868 {0, 0, 5, 0},
869 /* word conditionals add 3 bytes to frag:
870 1 extra opcode byte, 2 displacement bytes. */
871 {0, 0, 3, 0},
872
873 /* COND_JUMP86 states. */
874 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
875 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
876 /* dword conditionals adds 5 bytes to frag:
877 1 extra opcode byte, 4 displacement bytes. */
878 {0, 0, 5, 0},
879 /* word conditionals add 4 bytes to frag:
880 1 displacement byte and a 3 byte long branch insn. */
881 {0, 0, 4, 0}
882 };
883
884 static const arch_entry cpu_arch[] =
885 {
886 /* Do not replace the first two entries - i386_target_format()
887 relies on them being there in this order. */
888 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
889 CPU_GENERIC32_FLAGS, 0 },
890 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
891 CPU_GENERIC64_FLAGS, 0 },
892 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
893 CPU_NONE_FLAGS, 0 },
894 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
895 CPU_I186_FLAGS, 0 },
896 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
897 CPU_I286_FLAGS, 0 },
898 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
899 CPU_I386_FLAGS, 0 },
900 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
901 CPU_I486_FLAGS, 0 },
902 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
903 CPU_I586_FLAGS, 0 },
904 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
905 CPU_I686_FLAGS, 0 },
906 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
907 CPU_I586_FLAGS, 0 },
908 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
909 CPU_PENTIUMPRO_FLAGS, 0 },
910 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
911 CPU_P2_FLAGS, 0 },
912 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
913 CPU_P3_FLAGS, 0 },
914 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
915 CPU_P4_FLAGS, 0 },
916 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
917 CPU_CORE_FLAGS, 0 },
918 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
919 CPU_NOCONA_FLAGS, 0 },
920 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
921 CPU_CORE_FLAGS, 1 },
922 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
923 CPU_CORE_FLAGS, 0 },
924 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
925 CPU_CORE2_FLAGS, 1 },
926 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
927 CPU_CORE2_FLAGS, 0 },
928 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
929 CPU_COREI7_FLAGS, 0 },
930 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
931 CPU_L1OM_FLAGS, 0 },
932 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
933 CPU_K1OM_FLAGS, 0 },
934 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU,
935 CPU_IAMCU_FLAGS, 0 },
936 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
937 CPU_K6_FLAGS, 0 },
938 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
939 CPU_K6_2_FLAGS, 0 },
940 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
941 CPU_ATHLON_FLAGS, 0 },
942 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
943 CPU_K8_FLAGS, 1 },
944 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
945 CPU_K8_FLAGS, 0 },
946 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
947 CPU_K8_FLAGS, 0 },
948 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
949 CPU_AMDFAM10_FLAGS, 0 },
950 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
951 CPU_BDVER1_FLAGS, 0 },
952 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
953 CPU_BDVER2_FLAGS, 0 },
954 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
955 CPU_BDVER3_FLAGS, 0 },
956 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
957 CPU_BDVER4_FLAGS, 0 },
958 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER,
959 CPU_ZNVER1_FLAGS, 0 },
960 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER,
961 CPU_ZNVER2_FLAGS, 0 },
962 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
963 CPU_BTVER1_FLAGS, 0 },
964 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
965 CPU_BTVER2_FLAGS, 0 },
966 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
967 CPU_8087_FLAGS, 0 },
968 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
969 CPU_287_FLAGS, 0 },
970 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
971 CPU_387_FLAGS, 0 },
972 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN,
973 CPU_687_FLAGS, 0 },
974 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN,
975 CPU_CMOV_FLAGS, 0 },
976 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN,
977 CPU_FXSR_FLAGS, 0 },
978 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
979 CPU_MMX_FLAGS, 0 },
980 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
981 CPU_SSE_FLAGS, 0 },
982 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
983 CPU_SSE2_FLAGS, 0 },
984 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
985 CPU_SSE3_FLAGS, 0 },
986 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
987 CPU_SSE4A_FLAGS, 0 },
988 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
989 CPU_SSSE3_FLAGS, 0 },
990 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
991 CPU_SSE4_1_FLAGS, 0 },
992 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
993 CPU_SSE4_2_FLAGS, 0 },
994 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
995 CPU_SSE4_2_FLAGS, 0 },
996 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
997 CPU_AVX_FLAGS, 0 },
998 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
999 CPU_AVX2_FLAGS, 0 },
1000 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
1001 CPU_AVX512F_FLAGS, 0 },
1002 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
1003 CPU_AVX512CD_FLAGS, 0 },
1004 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
1005 CPU_AVX512ER_FLAGS, 0 },
1006 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
1007 CPU_AVX512PF_FLAGS, 0 },
1008 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
1009 CPU_AVX512DQ_FLAGS, 0 },
1010 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
1011 CPU_AVX512BW_FLAGS, 0 },
1012 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
1013 CPU_AVX512VL_FLAGS, 0 },
1014 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
1015 CPU_VMX_FLAGS, 0 },
1016 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
1017 CPU_VMFUNC_FLAGS, 0 },
1018 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
1019 CPU_SMX_FLAGS, 0 },
1020 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
1021 CPU_XSAVE_FLAGS, 0 },
1022 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
1023 CPU_XSAVEOPT_FLAGS, 0 },
1024 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
1025 CPU_XSAVEC_FLAGS, 0 },
1026 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
1027 CPU_XSAVES_FLAGS, 0 },
1028 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
1029 CPU_AES_FLAGS, 0 },
1030 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
1031 CPU_PCLMUL_FLAGS, 0 },
1032 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
1033 CPU_PCLMUL_FLAGS, 1 },
1034 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
1035 CPU_FSGSBASE_FLAGS, 0 },
1036 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
1037 CPU_RDRND_FLAGS, 0 },
1038 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
1039 CPU_F16C_FLAGS, 0 },
1040 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
1041 CPU_BMI2_FLAGS, 0 },
1042 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
1043 CPU_FMA_FLAGS, 0 },
1044 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
1045 CPU_FMA4_FLAGS, 0 },
1046 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
1047 CPU_XOP_FLAGS, 0 },
1048 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
1049 CPU_LWP_FLAGS, 0 },
1050 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
1051 CPU_MOVBE_FLAGS, 0 },
1052 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
1053 CPU_CX16_FLAGS, 0 },
1054 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
1055 CPU_EPT_FLAGS, 0 },
1056 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
1057 CPU_LZCNT_FLAGS, 0 },
1058 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
1059 CPU_HLE_FLAGS, 0 },
1060 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
1061 CPU_RTM_FLAGS, 0 },
1062 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
1063 CPU_INVPCID_FLAGS, 0 },
1064 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
1065 CPU_CLFLUSH_FLAGS, 0 },
1066 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
1067 CPU_NOP_FLAGS, 0 },
1068 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
1069 CPU_SYSCALL_FLAGS, 0 },
1070 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
1071 CPU_RDTSCP_FLAGS, 0 },
1072 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
1073 CPU_3DNOW_FLAGS, 0 },
1074 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
1075 CPU_3DNOWA_FLAGS, 0 },
1076 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
1077 CPU_PADLOCK_FLAGS, 0 },
1078 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
1079 CPU_SVME_FLAGS, 1 },
1080 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
1081 CPU_SVME_FLAGS, 0 },
1082 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
1083 CPU_SSE4A_FLAGS, 0 },
1084 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
1085 CPU_ABM_FLAGS, 0 },
1086 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
1087 CPU_BMI_FLAGS, 0 },
1088 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
1089 CPU_TBM_FLAGS, 0 },
1090 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
1091 CPU_ADX_FLAGS, 0 },
1092 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
1093 CPU_RDSEED_FLAGS, 0 },
1094 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
1095 CPU_PRFCHW_FLAGS, 0 },
1096 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
1097 CPU_SMAP_FLAGS, 0 },
1098 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
1099 CPU_MPX_FLAGS, 0 },
1100 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
1101 CPU_SHA_FLAGS, 0 },
1102 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
1103 CPU_CLFLUSHOPT_FLAGS, 0 },
1104 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
1105 CPU_PREFETCHWT1_FLAGS, 0 },
1106 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
1107 CPU_SE1_FLAGS, 0 },
1108 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN,
1109 CPU_CLWB_FLAGS, 0 },
1110 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN,
1111 CPU_AVX512IFMA_FLAGS, 0 },
1112 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN,
1113 CPU_AVX512VBMI_FLAGS, 0 },
1114 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN,
1115 CPU_AVX512_4FMAPS_FLAGS, 0 },
1116 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN,
1117 CPU_AVX512_4VNNIW_FLAGS, 0 },
1118 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN,
1119 CPU_AVX512_VPOPCNTDQ_FLAGS, 0 },
1120 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN,
1121 CPU_AVX512_VBMI2_FLAGS, 0 },
1122 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN,
1123 CPU_AVX512_VNNI_FLAGS, 0 },
1124 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN,
1125 CPU_AVX512_BITALG_FLAGS, 0 },
1126 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN,
1127 CPU_CLZERO_FLAGS, 0 },
1128 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN,
1129 CPU_MWAITX_FLAGS, 0 },
1130 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN,
1131 CPU_OSPKE_FLAGS, 0 },
1132 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN,
1133 CPU_RDPID_FLAGS, 0 },
1134 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN,
1135 CPU_PTWRITE_FLAGS, 0 },
1136 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN,
1137 CPU_IBT_FLAGS, 0 },
1138 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN,
1139 CPU_SHSTK_FLAGS, 0 },
1140 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN,
1141 CPU_GFNI_FLAGS, 0 },
1142 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN,
1143 CPU_VAES_FLAGS, 0 },
1144 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN,
1145 CPU_VPCLMULQDQ_FLAGS, 0 },
1146 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN,
1147 CPU_WBNOINVD_FLAGS, 0 },
1148 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN,
1149 CPU_PCONFIG_FLAGS, 0 },
1150 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN,
1151 CPU_WAITPKG_FLAGS, 0 },
1152 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN,
1153 CPU_CLDEMOTE_FLAGS, 0 },
1154 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN,
1155 CPU_MOVDIRI_FLAGS, 0 },
1156 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN,
1157 CPU_MOVDIR64B_FLAGS, 0 },
1158 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN,
1159 CPU_AVX512_BF16_FLAGS, 0 },
1160 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN,
1161 CPU_AVX512_VP2INTERSECT_FLAGS, 0 },
1162 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN,
1163 CPU_ENQCMD_FLAGS, 0 },
1164 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN,
1165 CPU_RDPRU_FLAGS, 0 },
1166 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN,
1167 CPU_MCOMMIT_FLAGS, 0 },
1168 };
1169
1170 static const noarch_entry cpu_noarch[] =
1171 {
1172 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS },
1173 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS },
1174 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS },
1175 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS },
1176 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS },
1177 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS },
1178 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS },
1179 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS },
1180 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS },
1181 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS },
1182 { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS },
1183 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS },
1184 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS },
1185 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS },
1186 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS },
1187 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS },
1188 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS },
1189 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS },
1190 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS },
1191 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS },
1192 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS },
1193 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS },
1194 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS },
1195 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS },
1196 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS },
1197 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS },
1198 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS },
1199 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS },
1200 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS },
1201 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS },
1202 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS },
1203 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS },
1204 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS },
1205 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS },
1206 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS },
1207 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS },
1208 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS },
1209 { STRING_COMMA_LEN ("noavx512_vp2intersect"), CPU_ANY_SHSTK_FLAGS },
1210 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS },
1211 };
1212
1213 #ifdef I386COFF
1214 /* Like s_lcomm_internal in gas/read.c but the alignment string
1215 is allowed to be optional. */
1216
1217 static symbolS *
1218 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
1219 {
1220 addressT align = 0;
1221
1222 SKIP_WHITESPACE ();
1223
1224 if (needs_align
1225 && *input_line_pointer == ',')
1226 {
1227 align = parse_align (needs_align - 1);
1228
1229 if (align == (addressT) -1)
1230 return NULL;
1231 }
1232 else
1233 {
1234 if (size >= 8)
1235 align = 3;
1236 else if (size >= 4)
1237 align = 2;
1238 else if (size >= 2)
1239 align = 1;
1240 else
1241 align = 0;
1242 }
1243
1244 bss_alloc (symbolP, size, align);
1245 return symbolP;
1246 }
1247
1248 static void
1249 pe_lcomm (int needs_align)
1250 {
1251 s_comm_internal (needs_align * 2, pe_lcomm_internal);
1252 }
1253 #endif
1254
1255 const pseudo_typeS md_pseudo_table[] =
1256 {
1257 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1258 {"align", s_align_bytes, 0},
1259 #else
1260 {"align", s_align_ptwo, 0},
1261 #endif
1262 {"arch", set_cpu_arch, 0},
1263 #ifndef I386COFF
1264 {"bss", s_bss, 0},
1265 #else
1266 {"lcomm", pe_lcomm, 1},
1267 #endif
1268 {"ffloat", float_cons, 'f'},
1269 {"dfloat", float_cons, 'd'},
1270 {"tfloat", float_cons, 'x'},
1271 {"value", cons, 2},
1272 {"slong", signed_cons, 4},
1273 {"noopt", s_ignore, 0},
1274 {"optim", s_ignore, 0},
1275 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
1276 {"code16", set_code_flag, CODE_16BIT},
1277 {"code32", set_code_flag, CODE_32BIT},
1278 #ifdef BFD64
1279 {"code64", set_code_flag, CODE_64BIT},
1280 #endif
1281 {"intel_syntax", set_intel_syntax, 1},
1282 {"att_syntax", set_intel_syntax, 0},
1283 {"intel_mnemonic", set_intel_mnemonic, 1},
1284 {"att_mnemonic", set_intel_mnemonic, 0},
1285 {"allow_index_reg", set_allow_index_reg, 1},
1286 {"disallow_index_reg", set_allow_index_reg, 0},
1287 {"sse_check", set_check, 0},
1288 {"operand_check", set_check, 1},
1289 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1290 {"largecomm", handle_large_common, 0},
1291 #else
1292 {"file", dwarf2_directive_file, 0},
1293 {"loc", dwarf2_directive_loc, 0},
1294 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
1295 #endif
1296 #ifdef TE_PE
1297 {"secrel32", pe_directive_secrel, 0},
1298 #endif
1299 {0, 0, 0}
1300 };
1301
1302 /* For interface with expression (). */
1303 extern char *input_line_pointer;
1304
1305 /* Hash table for instruction mnemonic lookup. */
1306 static struct hash_control *op_hash;
1307
1308 /* Hash table for register lookup. */
1309 static struct hash_control *reg_hash;
1310 \f
1311 /* Various efficient no-op patterns for aligning code labels.
1312 Note: Don't try to assemble the instructions in the comments.
1313 0L and 0w are not legal. */
1314 static const unsigned char f32_1[] =
1315 {0x90}; /* nop */
1316 static const unsigned char f32_2[] =
1317 {0x66,0x90}; /* xchg %ax,%ax */
1318 static const unsigned char f32_3[] =
1319 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1320 static const unsigned char f32_4[] =
1321 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1322 static const unsigned char f32_6[] =
1323 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1324 static const unsigned char f32_7[] =
1325 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1326 static const unsigned char f16_3[] =
1327 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1328 static const unsigned char f16_4[] =
1329 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1330 static const unsigned char jump_disp8[] =
1331 {0xeb}; /* jmp disp8 */
1332 static const unsigned char jump32_disp32[] =
1333 {0xe9}; /* jmp disp32 */
1334 static const unsigned char jump16_disp32[] =
1335 {0x66,0xe9}; /* jmp disp32 */
1336 /* 32-bit NOPs patterns. */
1337 static const unsigned char *const f32_patt[] = {
1338 f32_1, f32_2, f32_3, f32_4, NULL, f32_6, f32_7
1339 };
1340 /* 16-bit NOPs patterns. */
1341 static const unsigned char *const f16_patt[] = {
1342 f32_1, f32_2, f16_3, f16_4
1343 };
1344 /* nopl (%[re]ax) */
1345 static const unsigned char alt_3[] =
1346 {0x0f,0x1f,0x00};
1347 /* nopl 0(%[re]ax) */
1348 static const unsigned char alt_4[] =
1349 {0x0f,0x1f,0x40,0x00};
1350 /* nopl 0(%[re]ax,%[re]ax,1) */
1351 static const unsigned char alt_5[] =
1352 {0x0f,0x1f,0x44,0x00,0x00};
1353 /* nopw 0(%[re]ax,%[re]ax,1) */
1354 static const unsigned char alt_6[] =
1355 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1356 /* nopl 0L(%[re]ax) */
1357 static const unsigned char alt_7[] =
1358 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1359 /* nopl 0L(%[re]ax,%[re]ax,1) */
1360 static const unsigned char alt_8[] =
1361 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1362 /* nopw 0L(%[re]ax,%[re]ax,1) */
1363 static const unsigned char alt_9[] =
1364 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1365 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1366 static const unsigned char alt_10[] =
1367 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1368 /* data16 nopw %cs:0L(%eax,%eax,1) */
1369 static const unsigned char alt_11[] =
1370 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1371 /* 32-bit and 64-bit NOPs patterns. */
1372 static const unsigned char *const alt_patt[] = {
1373 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1374 alt_9, alt_10, alt_11
1375 };
1376
1377 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1378 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1379
1380 static void
1381 i386_output_nops (char *where, const unsigned char *const *patt,
1382 int count, int max_single_nop_size)
1383
1384 {
1385 /* Place the longer NOP first. */
1386 int last;
1387 int offset;
1388 const unsigned char *nops;
1389
1390 if (max_single_nop_size < 1)
1391 {
1392 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1393 max_single_nop_size);
1394 return;
1395 }
1396
1397 nops = patt[max_single_nop_size - 1];
1398
1399 /* Use the smaller one if the requsted one isn't available. */
1400 if (nops == NULL)
1401 {
1402 max_single_nop_size--;
1403 nops = patt[max_single_nop_size - 1];
1404 }
1405
1406 last = count % max_single_nop_size;
1407
1408 count -= last;
1409 for (offset = 0; offset < count; offset += max_single_nop_size)
1410 memcpy (where + offset, nops, max_single_nop_size);
1411
1412 if (last)
1413 {
1414 nops = patt[last - 1];
1415 if (nops == NULL)
1416 {
1417 /* Use the smaller one plus one-byte NOP if the needed one
1418 isn't available. */
1419 last--;
1420 nops = patt[last - 1];
1421 memcpy (where + offset, nops, last);
1422 where[offset + last] = *patt[0];
1423 }
1424 else
1425 memcpy (where + offset, nops, last);
1426 }
1427 }
1428
1429 static INLINE int
1430 fits_in_imm7 (offsetT num)
1431 {
1432 return (num & 0x7f) == num;
1433 }
1434
1435 static INLINE int
1436 fits_in_imm31 (offsetT num)
1437 {
1438 return (num & 0x7fffffff) == num;
1439 }
1440
1441 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1442 single NOP instruction LIMIT. */
1443
1444 void
1445 i386_generate_nops (fragS *fragP, char *where, offsetT count, int limit)
1446 {
1447 const unsigned char *const *patt = NULL;
1448 int max_single_nop_size;
1449 /* Maximum number of NOPs before switching to jump over NOPs. */
1450 int max_number_of_nops;
1451
1452 switch (fragP->fr_type)
1453 {
1454 case rs_fill_nop:
1455 case rs_align_code:
1456 break;
1457 case rs_machine_dependent:
1458 /* Allow NOP padding for jumps and calls. */
1459 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PADDING
1460 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == FUSED_JCC_PADDING)
1461 break;
1462 /* Fall through. */
1463 default:
1464 return;
1465 }
1466
1467 /* We need to decide which NOP sequence to use for 32bit and
1468 64bit. When -mtune= is used:
1469
1470 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1471 PROCESSOR_GENERIC32, f32_patt will be used.
1472 2. For the rest, alt_patt will be used.
1473
1474 When -mtune= isn't used, alt_patt will be used if
1475 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1476 be used.
1477
1478 When -march= or .arch is used, we can't use anything beyond
1479 cpu_arch_isa_flags. */
1480
1481 if (flag_code == CODE_16BIT)
1482 {
1483 patt = f16_patt;
1484 max_single_nop_size = sizeof (f16_patt) / sizeof (f16_patt[0]);
1485 /* Limit number of NOPs to 2 in 16-bit mode. */
1486 max_number_of_nops = 2;
1487 }
1488 else
1489 {
1490 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1491 {
1492 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1493 switch (cpu_arch_tune)
1494 {
1495 case PROCESSOR_UNKNOWN:
1496 /* We use cpu_arch_isa_flags to check if we SHOULD
1497 optimize with nops. */
1498 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1499 patt = alt_patt;
1500 else
1501 patt = f32_patt;
1502 break;
1503 case PROCESSOR_PENTIUM4:
1504 case PROCESSOR_NOCONA:
1505 case PROCESSOR_CORE:
1506 case PROCESSOR_CORE2:
1507 case PROCESSOR_COREI7:
1508 case PROCESSOR_L1OM:
1509 case PROCESSOR_K1OM:
1510 case PROCESSOR_GENERIC64:
1511 case PROCESSOR_K6:
1512 case PROCESSOR_ATHLON:
1513 case PROCESSOR_K8:
1514 case PROCESSOR_AMDFAM10:
1515 case PROCESSOR_BD:
1516 case PROCESSOR_ZNVER:
1517 case PROCESSOR_BT:
1518 patt = alt_patt;
1519 break;
1520 case PROCESSOR_I386:
1521 case PROCESSOR_I486:
1522 case PROCESSOR_PENTIUM:
1523 case PROCESSOR_PENTIUMPRO:
1524 case PROCESSOR_IAMCU:
1525 case PROCESSOR_GENERIC32:
1526 patt = f32_patt;
1527 break;
1528 }
1529 }
1530 else
1531 {
1532 switch (fragP->tc_frag_data.tune)
1533 {
1534 case PROCESSOR_UNKNOWN:
1535 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1536 PROCESSOR_UNKNOWN. */
1537 abort ();
1538 break;
1539
1540 case PROCESSOR_I386:
1541 case PROCESSOR_I486:
1542 case PROCESSOR_PENTIUM:
1543 case PROCESSOR_IAMCU:
1544 case PROCESSOR_K6:
1545 case PROCESSOR_ATHLON:
1546 case PROCESSOR_K8:
1547 case PROCESSOR_AMDFAM10:
1548 case PROCESSOR_BD:
1549 case PROCESSOR_ZNVER:
1550 case PROCESSOR_BT:
1551 case PROCESSOR_GENERIC32:
1552 /* We use cpu_arch_isa_flags to check if we CAN optimize
1553 with nops. */
1554 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1555 patt = alt_patt;
1556 else
1557 patt = f32_patt;
1558 break;
1559 case PROCESSOR_PENTIUMPRO:
1560 case PROCESSOR_PENTIUM4:
1561 case PROCESSOR_NOCONA:
1562 case PROCESSOR_CORE:
1563 case PROCESSOR_CORE2:
1564 case PROCESSOR_COREI7:
1565 case PROCESSOR_L1OM:
1566 case PROCESSOR_K1OM:
1567 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1568 patt = alt_patt;
1569 else
1570 patt = f32_patt;
1571 break;
1572 case PROCESSOR_GENERIC64:
1573 patt = alt_patt;
1574 break;
1575 }
1576 }
1577
1578 if (patt == f32_patt)
1579 {
1580 max_single_nop_size = sizeof (f32_patt) / sizeof (f32_patt[0]);
1581 /* Limit number of NOPs to 2 for older processors. */
1582 max_number_of_nops = 2;
1583 }
1584 else
1585 {
1586 max_single_nop_size = sizeof (alt_patt) / sizeof (alt_patt[0]);
1587 /* Limit number of NOPs to 7 for newer processors. */
1588 max_number_of_nops = 7;
1589 }
1590 }
1591
1592 if (limit == 0)
1593 limit = max_single_nop_size;
1594
1595 if (fragP->fr_type == rs_fill_nop)
1596 {
1597 /* Output NOPs for .nop directive. */
1598 if (limit > max_single_nop_size)
1599 {
1600 as_bad_where (fragP->fr_file, fragP->fr_line,
1601 _("invalid single nop size: %d "
1602 "(expect within [0, %d])"),
1603 limit, max_single_nop_size);
1604 return;
1605 }
1606 }
1607 else if (fragP->fr_type != rs_machine_dependent)
1608 fragP->fr_var = count;
1609
1610 if ((count / max_single_nop_size) > max_number_of_nops)
1611 {
1612 /* Generate jump over NOPs. */
1613 offsetT disp = count - 2;
1614 if (fits_in_imm7 (disp))
1615 {
1616 /* Use "jmp disp8" if possible. */
1617 count = disp;
1618 where[0] = jump_disp8[0];
1619 where[1] = count;
1620 where += 2;
1621 }
1622 else
1623 {
1624 unsigned int size_of_jump;
1625
1626 if (flag_code == CODE_16BIT)
1627 {
1628 where[0] = jump16_disp32[0];
1629 where[1] = jump16_disp32[1];
1630 size_of_jump = 2;
1631 }
1632 else
1633 {
1634 where[0] = jump32_disp32[0];
1635 size_of_jump = 1;
1636 }
1637
1638 count -= size_of_jump + 4;
1639 if (!fits_in_imm31 (count))
1640 {
1641 as_bad_where (fragP->fr_file, fragP->fr_line,
1642 _("jump over nop padding out of range"));
1643 return;
1644 }
1645
1646 md_number_to_chars (where + size_of_jump, count, 4);
1647 where += size_of_jump + 4;
1648 }
1649 }
1650
1651 /* Generate multiple NOPs. */
1652 i386_output_nops (where, patt, count, limit);
1653 }
1654
1655 static INLINE int
1656 operand_type_all_zero (const union i386_operand_type *x)
1657 {
1658 switch (ARRAY_SIZE(x->array))
1659 {
1660 case 3:
1661 if (x->array[2])
1662 return 0;
1663 /* Fall through. */
1664 case 2:
1665 if (x->array[1])
1666 return 0;
1667 /* Fall through. */
1668 case 1:
1669 return !x->array[0];
1670 default:
1671 abort ();
1672 }
1673 }
1674
1675 static INLINE void
1676 operand_type_set (union i386_operand_type *x, unsigned int v)
1677 {
1678 switch (ARRAY_SIZE(x->array))
1679 {
1680 case 3:
1681 x->array[2] = v;
1682 /* Fall through. */
1683 case 2:
1684 x->array[1] = v;
1685 /* Fall through. */
1686 case 1:
1687 x->array[0] = v;
1688 /* Fall through. */
1689 break;
1690 default:
1691 abort ();
1692 }
1693
1694 x->bitfield.class = ClassNone;
1695 x->bitfield.instance = InstanceNone;
1696 }
1697
1698 static INLINE int
1699 operand_type_equal (const union i386_operand_type *x,
1700 const union i386_operand_type *y)
1701 {
1702 switch (ARRAY_SIZE(x->array))
1703 {
1704 case 3:
1705 if (x->array[2] != y->array[2])
1706 return 0;
1707 /* Fall through. */
1708 case 2:
1709 if (x->array[1] != y->array[1])
1710 return 0;
1711 /* Fall through. */
1712 case 1:
1713 return x->array[0] == y->array[0];
1714 break;
1715 default:
1716 abort ();
1717 }
1718 }
1719
1720 static INLINE int
1721 cpu_flags_all_zero (const union i386_cpu_flags *x)
1722 {
1723 switch (ARRAY_SIZE(x->array))
1724 {
1725 case 4:
1726 if (x->array[3])
1727 return 0;
1728 /* Fall through. */
1729 case 3:
1730 if (x->array[2])
1731 return 0;
1732 /* Fall through. */
1733 case 2:
1734 if (x->array[1])
1735 return 0;
1736 /* Fall through. */
1737 case 1:
1738 return !x->array[0];
1739 default:
1740 abort ();
1741 }
1742 }
1743
1744 static INLINE int
1745 cpu_flags_equal (const union i386_cpu_flags *x,
1746 const union i386_cpu_flags *y)
1747 {
1748 switch (ARRAY_SIZE(x->array))
1749 {
1750 case 4:
1751 if (x->array[3] != y->array[3])
1752 return 0;
1753 /* Fall through. */
1754 case 3:
1755 if (x->array[2] != y->array[2])
1756 return 0;
1757 /* Fall through. */
1758 case 2:
1759 if (x->array[1] != y->array[1])
1760 return 0;
1761 /* Fall through. */
1762 case 1:
1763 return x->array[0] == y->array[0];
1764 break;
1765 default:
1766 abort ();
1767 }
1768 }
1769
1770 static INLINE int
1771 cpu_flags_check_cpu64 (i386_cpu_flags f)
1772 {
1773 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1774 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1775 }
1776
1777 static INLINE i386_cpu_flags
1778 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1779 {
1780 switch (ARRAY_SIZE (x.array))
1781 {
1782 case 4:
1783 x.array [3] &= y.array [3];
1784 /* Fall through. */
1785 case 3:
1786 x.array [2] &= y.array [2];
1787 /* Fall through. */
1788 case 2:
1789 x.array [1] &= y.array [1];
1790 /* Fall through. */
1791 case 1:
1792 x.array [0] &= y.array [0];
1793 break;
1794 default:
1795 abort ();
1796 }
1797 return x;
1798 }
1799
1800 static INLINE i386_cpu_flags
1801 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1802 {
1803 switch (ARRAY_SIZE (x.array))
1804 {
1805 case 4:
1806 x.array [3] |= y.array [3];
1807 /* Fall through. */
1808 case 3:
1809 x.array [2] |= y.array [2];
1810 /* Fall through. */
1811 case 2:
1812 x.array [1] |= y.array [1];
1813 /* Fall through. */
1814 case 1:
1815 x.array [0] |= y.array [0];
1816 break;
1817 default:
1818 abort ();
1819 }
1820 return x;
1821 }
1822
1823 static INLINE i386_cpu_flags
1824 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1825 {
1826 switch (ARRAY_SIZE (x.array))
1827 {
1828 case 4:
1829 x.array [3] &= ~y.array [3];
1830 /* Fall through. */
1831 case 3:
1832 x.array [2] &= ~y.array [2];
1833 /* Fall through. */
1834 case 2:
1835 x.array [1] &= ~y.array [1];
1836 /* Fall through. */
1837 case 1:
1838 x.array [0] &= ~y.array [0];
1839 break;
1840 default:
1841 abort ();
1842 }
1843 return x;
1844 }
1845
1846 static const i386_cpu_flags avx512 = CPU_ANY_AVX512F_FLAGS;
1847
1848 #define CPU_FLAGS_ARCH_MATCH 0x1
1849 #define CPU_FLAGS_64BIT_MATCH 0x2
1850
1851 #define CPU_FLAGS_PERFECT_MATCH \
1852 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1853
1854 /* Return CPU flags match bits. */
1855
1856 static int
1857 cpu_flags_match (const insn_template *t)
1858 {
1859 i386_cpu_flags x = t->cpu_flags;
1860 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1861
1862 x.bitfield.cpu64 = 0;
1863 x.bitfield.cpuno64 = 0;
1864
1865 if (cpu_flags_all_zero (&x))
1866 {
1867 /* This instruction is available on all archs. */
1868 match |= CPU_FLAGS_ARCH_MATCH;
1869 }
1870 else
1871 {
1872 /* This instruction is available only on some archs. */
1873 i386_cpu_flags cpu = cpu_arch_flags;
1874
1875 /* AVX512VL is no standalone feature - match it and then strip it. */
1876 if (x.bitfield.cpuavx512vl && !cpu.bitfield.cpuavx512vl)
1877 return match;
1878 x.bitfield.cpuavx512vl = 0;
1879
1880 cpu = cpu_flags_and (x, cpu);
1881 if (!cpu_flags_all_zero (&cpu))
1882 {
1883 if (x.bitfield.cpuavx)
1884 {
1885 /* We need to check a few extra flags with AVX. */
1886 if (cpu.bitfield.cpuavx
1887 && (!t->opcode_modifier.sse2avx || sse2avx)
1888 && (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1889 && (!x.bitfield.cpugfni || cpu.bitfield.cpugfni)
1890 && (!x.bitfield.cpupclmul || cpu.bitfield.cpupclmul))
1891 match |= CPU_FLAGS_ARCH_MATCH;
1892 }
1893 else if (x.bitfield.cpuavx512f)
1894 {
1895 /* We need to check a few extra flags with AVX512F. */
1896 if (cpu.bitfield.cpuavx512f
1897 && (!x.bitfield.cpugfni || cpu.bitfield.cpugfni)
1898 && (!x.bitfield.cpuvaes || cpu.bitfield.cpuvaes)
1899 && (!x.bitfield.cpuvpclmulqdq || cpu.bitfield.cpuvpclmulqdq))
1900 match |= CPU_FLAGS_ARCH_MATCH;
1901 }
1902 else
1903 match |= CPU_FLAGS_ARCH_MATCH;
1904 }
1905 }
1906 return match;
1907 }
1908
1909 static INLINE i386_operand_type
1910 operand_type_and (i386_operand_type x, i386_operand_type y)
1911 {
1912 if (x.bitfield.class != y.bitfield.class)
1913 x.bitfield.class = ClassNone;
1914 if (x.bitfield.instance != y.bitfield.instance)
1915 x.bitfield.instance = InstanceNone;
1916
1917 switch (ARRAY_SIZE (x.array))
1918 {
1919 case 3:
1920 x.array [2] &= y.array [2];
1921 /* Fall through. */
1922 case 2:
1923 x.array [1] &= y.array [1];
1924 /* Fall through. */
1925 case 1:
1926 x.array [0] &= y.array [0];
1927 break;
1928 default:
1929 abort ();
1930 }
1931 return x;
1932 }
1933
1934 static INLINE i386_operand_type
1935 operand_type_and_not (i386_operand_type x, i386_operand_type y)
1936 {
1937 gas_assert (y.bitfield.class == ClassNone);
1938 gas_assert (y.bitfield.instance == InstanceNone);
1939
1940 switch (ARRAY_SIZE (x.array))
1941 {
1942 case 3:
1943 x.array [2] &= ~y.array [2];
1944 /* Fall through. */
1945 case 2:
1946 x.array [1] &= ~y.array [1];
1947 /* Fall through. */
1948 case 1:
1949 x.array [0] &= ~y.array [0];
1950 break;
1951 default:
1952 abort ();
1953 }
1954 return x;
1955 }
1956
1957 static INLINE i386_operand_type
1958 operand_type_or (i386_operand_type x, i386_operand_type y)
1959 {
1960 gas_assert (x.bitfield.class == ClassNone ||
1961 y.bitfield.class == ClassNone ||
1962 x.bitfield.class == y.bitfield.class);
1963 gas_assert (x.bitfield.instance == InstanceNone ||
1964 y.bitfield.instance == InstanceNone ||
1965 x.bitfield.instance == y.bitfield.instance);
1966
1967 switch (ARRAY_SIZE (x.array))
1968 {
1969 case 3:
1970 x.array [2] |= y.array [2];
1971 /* Fall through. */
1972 case 2:
1973 x.array [1] |= y.array [1];
1974 /* Fall through. */
1975 case 1:
1976 x.array [0] |= y.array [0];
1977 break;
1978 default:
1979 abort ();
1980 }
1981 return x;
1982 }
1983
1984 static INLINE i386_operand_type
1985 operand_type_xor (i386_operand_type x, i386_operand_type y)
1986 {
1987 gas_assert (y.bitfield.class == ClassNone);
1988 gas_assert (y.bitfield.instance == InstanceNone);
1989
1990 switch (ARRAY_SIZE (x.array))
1991 {
1992 case 3:
1993 x.array [2] ^= y.array [2];
1994 /* Fall through. */
1995 case 2:
1996 x.array [1] ^= y.array [1];
1997 /* Fall through. */
1998 case 1:
1999 x.array [0] ^= y.array [0];
2000 break;
2001 default:
2002 abort ();
2003 }
2004 return x;
2005 }
2006
2007 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
2008 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
2009 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
2010 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
2011 static const i386_operand_type anydisp = OPERAND_TYPE_ANYDISP;
2012 static const i386_operand_type anyimm = OPERAND_TYPE_ANYIMM;
2013 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
2014 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
2015 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
2016 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
2017 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
2018 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
2019 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
2020 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
2021 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
2022 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
2023 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
2024
2025 enum operand_type
2026 {
2027 reg,
2028 imm,
2029 disp,
2030 anymem
2031 };
2032
2033 static INLINE int
2034 operand_type_check (i386_operand_type t, enum operand_type c)
2035 {
2036 switch (c)
2037 {
2038 case reg:
2039 return t.bitfield.class == Reg;
2040
2041 case imm:
2042 return (t.bitfield.imm8
2043 || t.bitfield.imm8s
2044 || t.bitfield.imm16
2045 || t.bitfield.imm32
2046 || t.bitfield.imm32s
2047 || t.bitfield.imm64);
2048
2049 case disp:
2050 return (t.bitfield.disp8
2051 || t.bitfield.disp16
2052 || t.bitfield.disp32
2053 || t.bitfield.disp32s
2054 || t.bitfield.disp64);
2055
2056 case anymem:
2057 return (t.bitfield.disp8
2058 || t.bitfield.disp16
2059 || t.bitfield.disp32
2060 || t.bitfield.disp32s
2061 || t.bitfield.disp64
2062 || t.bitfield.baseindex);
2063
2064 default:
2065 abort ();
2066 }
2067
2068 return 0;
2069 }
2070
2071 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2072 between operand GIVEN and opeand WANTED for instruction template T. */
2073
2074 static INLINE int
2075 match_operand_size (const insn_template *t, unsigned int wanted,
2076 unsigned int given)
2077 {
2078 return !((i.types[given].bitfield.byte
2079 && !t->operand_types[wanted].bitfield.byte)
2080 || (i.types[given].bitfield.word
2081 && !t->operand_types[wanted].bitfield.word)
2082 || (i.types[given].bitfield.dword
2083 && !t->operand_types[wanted].bitfield.dword)
2084 || (i.types[given].bitfield.qword
2085 && !t->operand_types[wanted].bitfield.qword)
2086 || (i.types[given].bitfield.tbyte
2087 && !t->operand_types[wanted].bitfield.tbyte));
2088 }
2089
2090 /* Return 1 if there is no conflict in SIMD register between operand
2091 GIVEN and opeand WANTED for instruction template T. */
2092
2093 static INLINE int
2094 match_simd_size (const insn_template *t, unsigned int wanted,
2095 unsigned int given)
2096 {
2097 return !((i.types[given].bitfield.xmmword
2098 && !t->operand_types[wanted].bitfield.xmmword)
2099 || (i.types[given].bitfield.ymmword
2100 && !t->operand_types[wanted].bitfield.ymmword)
2101 || (i.types[given].bitfield.zmmword
2102 && !t->operand_types[wanted].bitfield.zmmword));
2103 }
2104
2105 /* Return 1 if there is no conflict in any size between operand GIVEN
2106 and opeand WANTED for instruction template T. */
2107
2108 static INLINE int
2109 match_mem_size (const insn_template *t, unsigned int wanted,
2110 unsigned int given)
2111 {
2112 return (match_operand_size (t, wanted, given)
2113 && !((i.types[given].bitfield.unspecified
2114 && !i.broadcast
2115 && !t->operand_types[wanted].bitfield.unspecified)
2116 || (i.types[given].bitfield.fword
2117 && !t->operand_types[wanted].bitfield.fword)
2118 /* For scalar opcode templates to allow register and memory
2119 operands at the same time, some special casing is needed
2120 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2121 down-conversion vpmov*. */
2122 || ((t->operand_types[wanted].bitfield.class == RegSIMD
2123 && !t->opcode_modifier.broadcast
2124 && (t->operand_types[wanted].bitfield.byte
2125 || t->operand_types[wanted].bitfield.word
2126 || t->operand_types[wanted].bitfield.dword
2127 || t->operand_types[wanted].bitfield.qword))
2128 ? (i.types[given].bitfield.xmmword
2129 || i.types[given].bitfield.ymmword
2130 || i.types[given].bitfield.zmmword)
2131 : !match_simd_size(t, wanted, given))));
2132 }
2133
2134 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2135 operands for instruction template T, and it has MATCH_REVERSE set if there
2136 is no size conflict on any operands for the template with operands reversed
2137 (and the template allows for reversing in the first place). */
2138
2139 #define MATCH_STRAIGHT 1
2140 #define MATCH_REVERSE 2
2141
2142 static INLINE unsigned int
2143 operand_size_match (const insn_template *t)
2144 {
2145 unsigned int j, match = MATCH_STRAIGHT;
2146
2147 /* Don't check non-absolute jump instructions. */
2148 if (t->opcode_modifier.jump
2149 && t->opcode_modifier.jump != JUMP_ABSOLUTE)
2150 return match;
2151
2152 /* Check memory and accumulator operand size. */
2153 for (j = 0; j < i.operands; j++)
2154 {
2155 if (i.types[j].bitfield.class != Reg
2156 && i.types[j].bitfield.class != RegSIMD
2157 && t->opcode_modifier.anysize)
2158 continue;
2159
2160 if (t->operand_types[j].bitfield.class == Reg
2161 && !match_operand_size (t, j, j))
2162 {
2163 match = 0;
2164 break;
2165 }
2166
2167 if (t->operand_types[j].bitfield.class == RegSIMD
2168 && !match_simd_size (t, j, j))
2169 {
2170 match = 0;
2171 break;
2172 }
2173
2174 if (t->operand_types[j].bitfield.instance == Accum
2175 && (!match_operand_size (t, j, j) || !match_simd_size (t, j, j)))
2176 {
2177 match = 0;
2178 break;
2179 }
2180
2181 if ((i.flags[j] & Operand_Mem) && !match_mem_size (t, j, j))
2182 {
2183 match = 0;
2184 break;
2185 }
2186 }
2187
2188 if (!t->opcode_modifier.d)
2189 {
2190 mismatch:
2191 if (!match)
2192 i.error = operand_size_mismatch;
2193 return match;
2194 }
2195
2196 /* Check reverse. */
2197 gas_assert (i.operands >= 2 && i.operands <= 3);
2198
2199 for (j = 0; j < i.operands; j++)
2200 {
2201 unsigned int given = i.operands - j - 1;
2202
2203 if (t->operand_types[j].bitfield.class == Reg
2204 && !match_operand_size (t, j, given))
2205 goto mismatch;
2206
2207 if (t->operand_types[j].bitfield.class == RegSIMD
2208 && !match_simd_size (t, j, given))
2209 goto mismatch;
2210
2211 if (t->operand_types[j].bitfield.instance == Accum
2212 && (!match_operand_size (t, j, given)
2213 || !match_simd_size (t, j, given)))
2214 goto mismatch;
2215
2216 if ((i.flags[given] & Operand_Mem) && !match_mem_size (t, j, given))
2217 goto mismatch;
2218 }
2219
2220 return match | MATCH_REVERSE;
2221 }
2222
2223 static INLINE int
2224 operand_type_match (i386_operand_type overlap,
2225 i386_operand_type given)
2226 {
2227 i386_operand_type temp = overlap;
2228
2229 temp.bitfield.unspecified = 0;
2230 temp.bitfield.byte = 0;
2231 temp.bitfield.word = 0;
2232 temp.bitfield.dword = 0;
2233 temp.bitfield.fword = 0;
2234 temp.bitfield.qword = 0;
2235 temp.bitfield.tbyte = 0;
2236 temp.bitfield.xmmword = 0;
2237 temp.bitfield.ymmword = 0;
2238 temp.bitfield.zmmword = 0;
2239 if (operand_type_all_zero (&temp))
2240 goto mismatch;
2241
2242 if (given.bitfield.baseindex == overlap.bitfield.baseindex)
2243 return 1;
2244
2245 mismatch:
2246 i.error = operand_type_mismatch;
2247 return 0;
2248 }
2249
2250 /* If given types g0 and g1 are registers they must be of the same type
2251 unless the expected operand type register overlap is null.
2252 Some Intel syntax memory operand size checking also happens here. */
2253
2254 static INLINE int
2255 operand_type_register_match (i386_operand_type g0,
2256 i386_operand_type t0,
2257 i386_operand_type g1,
2258 i386_operand_type t1)
2259 {
2260 if (g0.bitfield.class != Reg
2261 && g0.bitfield.class != RegSIMD
2262 && (!operand_type_check (g0, anymem)
2263 || g0.bitfield.unspecified
2264 || (t0.bitfield.class != Reg
2265 && t0.bitfield.class != RegSIMD)))
2266 return 1;
2267
2268 if (g1.bitfield.class != Reg
2269 && g1.bitfield.class != RegSIMD
2270 && (!operand_type_check (g1, anymem)
2271 || g1.bitfield.unspecified
2272 || (t1.bitfield.class != Reg
2273 && t1.bitfield.class != RegSIMD)))
2274 return 1;
2275
2276 if (g0.bitfield.byte == g1.bitfield.byte
2277 && g0.bitfield.word == g1.bitfield.word
2278 && g0.bitfield.dword == g1.bitfield.dword
2279 && g0.bitfield.qword == g1.bitfield.qword
2280 && g0.bitfield.xmmword == g1.bitfield.xmmword
2281 && g0.bitfield.ymmword == g1.bitfield.ymmword
2282 && g0.bitfield.zmmword == g1.bitfield.zmmword)
2283 return 1;
2284
2285 if (!(t0.bitfield.byte & t1.bitfield.byte)
2286 && !(t0.bitfield.word & t1.bitfield.word)
2287 && !(t0.bitfield.dword & t1.bitfield.dword)
2288 && !(t0.bitfield.qword & t1.bitfield.qword)
2289 && !(t0.bitfield.xmmword & t1.bitfield.xmmword)
2290 && !(t0.bitfield.ymmword & t1.bitfield.ymmword)
2291 && !(t0.bitfield.zmmword & t1.bitfield.zmmword))
2292 return 1;
2293
2294 i.error = register_type_mismatch;
2295
2296 return 0;
2297 }
2298
2299 static INLINE unsigned int
2300 register_number (const reg_entry *r)
2301 {
2302 unsigned int nr = r->reg_num;
2303
2304 if (r->reg_flags & RegRex)
2305 nr += 8;
2306
2307 if (r->reg_flags & RegVRex)
2308 nr += 16;
2309
2310 return nr;
2311 }
2312
2313 static INLINE unsigned int
2314 mode_from_disp_size (i386_operand_type t)
2315 {
2316 if (t.bitfield.disp8)
2317 return 1;
2318 else if (t.bitfield.disp16
2319 || t.bitfield.disp32
2320 || t.bitfield.disp32s)
2321 return 2;
2322 else
2323 return 0;
2324 }
2325
2326 static INLINE int
2327 fits_in_signed_byte (addressT num)
2328 {
2329 return num + 0x80 <= 0xff;
2330 }
2331
2332 static INLINE int
2333 fits_in_unsigned_byte (addressT num)
2334 {
2335 return num <= 0xff;
2336 }
2337
2338 static INLINE int
2339 fits_in_unsigned_word (addressT num)
2340 {
2341 return num <= 0xffff;
2342 }
2343
2344 static INLINE int
2345 fits_in_signed_word (addressT num)
2346 {
2347 return num + 0x8000 <= 0xffff;
2348 }
2349
2350 static INLINE int
2351 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
2352 {
2353 #ifndef BFD64
2354 return 1;
2355 #else
2356 return num + 0x80000000 <= 0xffffffff;
2357 #endif
2358 } /* fits_in_signed_long() */
2359
2360 static INLINE int
2361 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
2362 {
2363 #ifndef BFD64
2364 return 1;
2365 #else
2366 return num <= 0xffffffff;
2367 #endif
2368 } /* fits_in_unsigned_long() */
2369
2370 static INLINE int
2371 fits_in_disp8 (offsetT num)
2372 {
2373 int shift = i.memshift;
2374 unsigned int mask;
2375
2376 if (shift == -1)
2377 abort ();
2378
2379 mask = (1 << shift) - 1;
2380
2381 /* Return 0 if NUM isn't properly aligned. */
2382 if ((num & mask))
2383 return 0;
2384
2385 /* Check if NUM will fit in 8bit after shift. */
2386 return fits_in_signed_byte (num >> shift);
2387 }
2388
2389 static INLINE int
2390 fits_in_imm4 (offsetT num)
2391 {
2392 return (num & 0xf) == num;
2393 }
2394
2395 static i386_operand_type
2396 smallest_imm_type (offsetT num)
2397 {
2398 i386_operand_type t;
2399
2400 operand_type_set (&t, 0);
2401 t.bitfield.imm64 = 1;
2402
2403 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
2404 {
2405 /* This code is disabled on the 486 because all the Imm1 forms
2406 in the opcode table are slower on the i486. They're the
2407 versions with the implicitly specified single-position
2408 displacement, which has another syntax if you really want to
2409 use that form. */
2410 t.bitfield.imm1 = 1;
2411 t.bitfield.imm8 = 1;
2412 t.bitfield.imm8s = 1;
2413 t.bitfield.imm16 = 1;
2414 t.bitfield.imm32 = 1;
2415 t.bitfield.imm32s = 1;
2416 }
2417 else if (fits_in_signed_byte (num))
2418 {
2419 t.bitfield.imm8 = 1;
2420 t.bitfield.imm8s = 1;
2421 t.bitfield.imm16 = 1;
2422 t.bitfield.imm32 = 1;
2423 t.bitfield.imm32s = 1;
2424 }
2425 else if (fits_in_unsigned_byte (num))
2426 {
2427 t.bitfield.imm8 = 1;
2428 t.bitfield.imm16 = 1;
2429 t.bitfield.imm32 = 1;
2430 t.bitfield.imm32s = 1;
2431 }
2432 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
2433 {
2434 t.bitfield.imm16 = 1;
2435 t.bitfield.imm32 = 1;
2436 t.bitfield.imm32s = 1;
2437 }
2438 else if (fits_in_signed_long (num))
2439 {
2440 t.bitfield.imm32 = 1;
2441 t.bitfield.imm32s = 1;
2442 }
2443 else if (fits_in_unsigned_long (num))
2444 t.bitfield.imm32 = 1;
2445
2446 return t;
2447 }
2448
2449 static offsetT
2450 offset_in_range (offsetT val, int size)
2451 {
2452 addressT mask;
2453
2454 switch (size)
2455 {
2456 case 1: mask = ((addressT) 1 << 8) - 1; break;
2457 case 2: mask = ((addressT) 1 << 16) - 1; break;
2458 case 4: mask = ((addressT) 2 << 31) - 1; break;
2459 #ifdef BFD64
2460 case 8: mask = ((addressT) 2 << 63) - 1; break;
2461 #endif
2462 default: abort ();
2463 }
2464
2465 #ifdef BFD64
2466 /* If BFD64, sign extend val for 32bit address mode. */
2467 if (flag_code != CODE_64BIT
2468 || i.prefix[ADDR_PREFIX])
2469 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2470 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2471 #endif
2472
2473 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2474 {
2475 char buf1[40], buf2[40];
2476
2477 sprint_value (buf1, val);
2478 sprint_value (buf2, val & mask);
2479 as_warn (_("%s shortened to %s"), buf1, buf2);
2480 }
2481 return val & mask;
2482 }
2483
2484 enum PREFIX_GROUP
2485 {
2486 PREFIX_EXIST = 0,
2487 PREFIX_LOCK,
2488 PREFIX_REP,
2489 PREFIX_DS,
2490 PREFIX_OTHER
2491 };
2492
2493 /* Returns
2494 a. PREFIX_EXIST if attempting to add a prefix where one from the
2495 same class already exists.
2496 b. PREFIX_LOCK if lock prefix is added.
2497 c. PREFIX_REP if rep/repne prefix is added.
2498 d. PREFIX_DS if ds prefix is added.
2499 e. PREFIX_OTHER if other prefix is added.
2500 */
2501
2502 static enum PREFIX_GROUP
2503 add_prefix (unsigned int prefix)
2504 {
2505 enum PREFIX_GROUP ret = PREFIX_OTHER;
2506 unsigned int q;
2507
2508 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2509 && flag_code == CODE_64BIT)
2510 {
2511 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2512 || (i.prefix[REX_PREFIX] & prefix & REX_R)
2513 || (i.prefix[REX_PREFIX] & prefix & REX_X)
2514 || (i.prefix[REX_PREFIX] & prefix & REX_B))
2515 ret = PREFIX_EXIST;
2516 q = REX_PREFIX;
2517 }
2518 else
2519 {
2520 switch (prefix)
2521 {
2522 default:
2523 abort ();
2524
2525 case DS_PREFIX_OPCODE:
2526 ret = PREFIX_DS;
2527 /* Fall through. */
2528 case CS_PREFIX_OPCODE:
2529 case ES_PREFIX_OPCODE:
2530 case FS_PREFIX_OPCODE:
2531 case GS_PREFIX_OPCODE:
2532 case SS_PREFIX_OPCODE:
2533 q = SEG_PREFIX;
2534 break;
2535
2536 case REPNE_PREFIX_OPCODE:
2537 case REPE_PREFIX_OPCODE:
2538 q = REP_PREFIX;
2539 ret = PREFIX_REP;
2540 break;
2541
2542 case LOCK_PREFIX_OPCODE:
2543 q = LOCK_PREFIX;
2544 ret = PREFIX_LOCK;
2545 break;
2546
2547 case FWAIT_OPCODE:
2548 q = WAIT_PREFIX;
2549 break;
2550
2551 case ADDR_PREFIX_OPCODE:
2552 q = ADDR_PREFIX;
2553 break;
2554
2555 case DATA_PREFIX_OPCODE:
2556 q = DATA_PREFIX;
2557 break;
2558 }
2559 if (i.prefix[q] != 0)
2560 ret = PREFIX_EXIST;
2561 }
2562
2563 if (ret)
2564 {
2565 if (!i.prefix[q])
2566 ++i.prefixes;
2567 i.prefix[q] |= prefix;
2568 }
2569 else
2570 as_bad (_("same type of prefix used twice"));
2571
2572 return ret;
2573 }
2574
2575 static void
2576 update_code_flag (int value, int check)
2577 {
2578 PRINTF_LIKE ((*as_error));
2579
2580 flag_code = (enum flag_code) value;
2581 if (flag_code == CODE_64BIT)
2582 {
2583 cpu_arch_flags.bitfield.cpu64 = 1;
2584 cpu_arch_flags.bitfield.cpuno64 = 0;
2585 }
2586 else
2587 {
2588 cpu_arch_flags.bitfield.cpu64 = 0;
2589 cpu_arch_flags.bitfield.cpuno64 = 1;
2590 }
2591 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2592 {
2593 if (check)
2594 as_error = as_fatal;
2595 else
2596 as_error = as_bad;
2597 (*as_error) (_("64bit mode not supported on `%s'."),
2598 cpu_arch_name ? cpu_arch_name : default_arch);
2599 }
2600 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2601 {
2602 if (check)
2603 as_error = as_fatal;
2604 else
2605 as_error = as_bad;
2606 (*as_error) (_("32bit mode not supported on `%s'."),
2607 cpu_arch_name ? cpu_arch_name : default_arch);
2608 }
2609 stackop_size = '\0';
2610 }
2611
2612 static void
2613 set_code_flag (int value)
2614 {
2615 update_code_flag (value, 0);
2616 }
2617
2618 static void
2619 set_16bit_gcc_code_flag (int new_code_flag)
2620 {
2621 flag_code = (enum flag_code) new_code_flag;
2622 if (flag_code != CODE_16BIT)
2623 abort ();
2624 cpu_arch_flags.bitfield.cpu64 = 0;
2625 cpu_arch_flags.bitfield.cpuno64 = 1;
2626 stackop_size = LONG_MNEM_SUFFIX;
2627 }
2628
2629 static void
2630 set_intel_syntax (int syntax_flag)
2631 {
2632 /* Find out if register prefixing is specified. */
2633 int ask_naked_reg = 0;
2634
2635 SKIP_WHITESPACE ();
2636 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2637 {
2638 char *string;
2639 int e = get_symbol_name (&string);
2640
2641 if (strcmp (string, "prefix") == 0)
2642 ask_naked_reg = 1;
2643 else if (strcmp (string, "noprefix") == 0)
2644 ask_naked_reg = -1;
2645 else
2646 as_bad (_("bad argument to syntax directive."));
2647 (void) restore_line_pointer (e);
2648 }
2649 demand_empty_rest_of_line ();
2650
2651 intel_syntax = syntax_flag;
2652
2653 if (ask_naked_reg == 0)
2654 allow_naked_reg = (intel_syntax
2655 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2656 else
2657 allow_naked_reg = (ask_naked_reg < 0);
2658
2659 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2660
2661 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2662 identifier_chars['$'] = intel_syntax ? '$' : 0;
2663 register_prefix = allow_naked_reg ? "" : "%";
2664 }
2665
2666 static void
2667 set_intel_mnemonic (int mnemonic_flag)
2668 {
2669 intel_mnemonic = mnemonic_flag;
2670 }
2671
2672 static void
2673 set_allow_index_reg (int flag)
2674 {
2675 allow_index_reg = flag;
2676 }
2677
2678 static void
2679 set_check (int what)
2680 {
2681 enum check_kind *kind;
2682 const char *str;
2683
2684 if (what)
2685 {
2686 kind = &operand_check;
2687 str = "operand";
2688 }
2689 else
2690 {
2691 kind = &sse_check;
2692 str = "sse";
2693 }
2694
2695 SKIP_WHITESPACE ();
2696
2697 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2698 {
2699 char *string;
2700 int e = get_symbol_name (&string);
2701
2702 if (strcmp (string, "none") == 0)
2703 *kind = check_none;
2704 else if (strcmp (string, "warning") == 0)
2705 *kind = check_warning;
2706 else if (strcmp (string, "error") == 0)
2707 *kind = check_error;
2708 else
2709 as_bad (_("bad argument to %s_check directive."), str);
2710 (void) restore_line_pointer (e);
2711 }
2712 else
2713 as_bad (_("missing argument for %s_check directive"), str);
2714
2715 demand_empty_rest_of_line ();
2716 }
2717
2718 static void
2719 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2720 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2721 {
2722 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2723 static const char *arch;
2724
2725 /* Intel LIOM is only supported on ELF. */
2726 if (!IS_ELF)
2727 return;
2728
2729 if (!arch)
2730 {
2731 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2732 use default_arch. */
2733 arch = cpu_arch_name;
2734 if (!arch)
2735 arch = default_arch;
2736 }
2737
2738 /* If we are targeting Intel MCU, we must enable it. */
2739 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_IAMCU
2740 || new_flag.bitfield.cpuiamcu)
2741 return;
2742
2743 /* If we are targeting Intel L1OM, we must enable it. */
2744 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2745 || new_flag.bitfield.cpul1om)
2746 return;
2747
2748 /* If we are targeting Intel K1OM, we must enable it. */
2749 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2750 || new_flag.bitfield.cpuk1om)
2751 return;
2752
2753 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2754 #endif
2755 }
2756
2757 static void
2758 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2759 {
2760 SKIP_WHITESPACE ();
2761
2762 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2763 {
2764 char *string;
2765 int e = get_symbol_name (&string);
2766 unsigned int j;
2767 i386_cpu_flags flags;
2768
2769 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2770 {
2771 if (strcmp (string, cpu_arch[j].name) == 0)
2772 {
2773 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2774
2775 if (*string != '.')
2776 {
2777 cpu_arch_name = cpu_arch[j].name;
2778 cpu_sub_arch_name = NULL;
2779 cpu_arch_flags = cpu_arch[j].flags;
2780 if (flag_code == CODE_64BIT)
2781 {
2782 cpu_arch_flags.bitfield.cpu64 = 1;
2783 cpu_arch_flags.bitfield.cpuno64 = 0;
2784 }
2785 else
2786 {
2787 cpu_arch_flags.bitfield.cpu64 = 0;
2788 cpu_arch_flags.bitfield.cpuno64 = 1;
2789 }
2790 cpu_arch_isa = cpu_arch[j].type;
2791 cpu_arch_isa_flags = cpu_arch[j].flags;
2792 if (!cpu_arch_tune_set)
2793 {
2794 cpu_arch_tune = cpu_arch_isa;
2795 cpu_arch_tune_flags = cpu_arch_isa_flags;
2796 }
2797 break;
2798 }
2799
2800 flags = cpu_flags_or (cpu_arch_flags,
2801 cpu_arch[j].flags);
2802
2803 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2804 {
2805 if (cpu_sub_arch_name)
2806 {
2807 char *name = cpu_sub_arch_name;
2808 cpu_sub_arch_name = concat (name,
2809 cpu_arch[j].name,
2810 (const char *) NULL);
2811 free (name);
2812 }
2813 else
2814 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2815 cpu_arch_flags = flags;
2816 cpu_arch_isa_flags = flags;
2817 }
2818 else
2819 cpu_arch_isa_flags
2820 = cpu_flags_or (cpu_arch_isa_flags,
2821 cpu_arch[j].flags);
2822 (void) restore_line_pointer (e);
2823 demand_empty_rest_of_line ();
2824 return;
2825 }
2826 }
2827
2828 if (*string == '.' && j >= ARRAY_SIZE (cpu_arch))
2829 {
2830 /* Disable an ISA extension. */
2831 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
2832 if (strcmp (string + 1, cpu_noarch [j].name) == 0)
2833 {
2834 flags = cpu_flags_and_not (cpu_arch_flags,
2835 cpu_noarch[j].flags);
2836 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2837 {
2838 if (cpu_sub_arch_name)
2839 {
2840 char *name = cpu_sub_arch_name;
2841 cpu_sub_arch_name = concat (name, string,
2842 (const char *) NULL);
2843 free (name);
2844 }
2845 else
2846 cpu_sub_arch_name = xstrdup (string);
2847 cpu_arch_flags = flags;
2848 cpu_arch_isa_flags = flags;
2849 }
2850 (void) restore_line_pointer (e);
2851 demand_empty_rest_of_line ();
2852 return;
2853 }
2854
2855 j = ARRAY_SIZE (cpu_arch);
2856 }
2857
2858 if (j >= ARRAY_SIZE (cpu_arch))
2859 as_bad (_("no such architecture: `%s'"), string);
2860
2861 *input_line_pointer = e;
2862 }
2863 else
2864 as_bad (_("missing cpu architecture"));
2865
2866 no_cond_jump_promotion = 0;
2867 if (*input_line_pointer == ','
2868 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2869 {
2870 char *string;
2871 char e;
2872
2873 ++input_line_pointer;
2874 e = get_symbol_name (&string);
2875
2876 if (strcmp (string, "nojumps") == 0)
2877 no_cond_jump_promotion = 1;
2878 else if (strcmp (string, "jumps") == 0)
2879 ;
2880 else
2881 as_bad (_("no such architecture modifier: `%s'"), string);
2882
2883 (void) restore_line_pointer (e);
2884 }
2885
2886 demand_empty_rest_of_line ();
2887 }
2888
2889 enum bfd_architecture
2890 i386_arch (void)
2891 {
2892 if (cpu_arch_isa == PROCESSOR_L1OM)
2893 {
2894 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2895 || flag_code != CODE_64BIT)
2896 as_fatal (_("Intel L1OM is 64bit ELF only"));
2897 return bfd_arch_l1om;
2898 }
2899 else if (cpu_arch_isa == PROCESSOR_K1OM)
2900 {
2901 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2902 || flag_code != CODE_64BIT)
2903 as_fatal (_("Intel K1OM is 64bit ELF only"));
2904 return bfd_arch_k1om;
2905 }
2906 else if (cpu_arch_isa == PROCESSOR_IAMCU)
2907 {
2908 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2909 || flag_code == CODE_64BIT)
2910 as_fatal (_("Intel MCU is 32bit ELF only"));
2911 return bfd_arch_iamcu;
2912 }
2913 else
2914 return bfd_arch_i386;
2915 }
2916
2917 unsigned long
2918 i386_mach (void)
2919 {
2920 if (!strncmp (default_arch, "x86_64", 6))
2921 {
2922 if (cpu_arch_isa == PROCESSOR_L1OM)
2923 {
2924 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2925 || default_arch[6] != '\0')
2926 as_fatal (_("Intel L1OM is 64bit ELF only"));
2927 return bfd_mach_l1om;
2928 }
2929 else if (cpu_arch_isa == PROCESSOR_K1OM)
2930 {
2931 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2932 || default_arch[6] != '\0')
2933 as_fatal (_("Intel K1OM is 64bit ELF only"));
2934 return bfd_mach_k1om;
2935 }
2936 else if (default_arch[6] == '\0')
2937 return bfd_mach_x86_64;
2938 else
2939 return bfd_mach_x64_32;
2940 }
2941 else if (!strcmp (default_arch, "i386")
2942 || !strcmp (default_arch, "iamcu"))
2943 {
2944 if (cpu_arch_isa == PROCESSOR_IAMCU)
2945 {
2946 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
2947 as_fatal (_("Intel MCU is 32bit ELF only"));
2948 return bfd_mach_i386_iamcu;
2949 }
2950 else
2951 return bfd_mach_i386_i386;
2952 }
2953 else
2954 as_fatal (_("unknown architecture"));
2955 }
2956 \f
2957 void
2958 md_begin (void)
2959 {
2960 const char *hash_err;
2961
2962 /* Support pseudo prefixes like {disp32}. */
2963 lex_type ['{'] = LEX_BEGIN_NAME;
2964
2965 /* Initialize op_hash hash table. */
2966 op_hash = hash_new ();
2967
2968 {
2969 const insn_template *optab;
2970 templates *core_optab;
2971
2972 /* Setup for loop. */
2973 optab = i386_optab;
2974 core_optab = XNEW (templates);
2975 core_optab->start = optab;
2976
2977 while (1)
2978 {
2979 ++optab;
2980 if (optab->name == NULL
2981 || strcmp (optab->name, (optab - 1)->name) != 0)
2982 {
2983 /* different name --> ship out current template list;
2984 add to hash table; & begin anew. */
2985 core_optab->end = optab;
2986 hash_err = hash_insert (op_hash,
2987 (optab - 1)->name,
2988 (void *) core_optab);
2989 if (hash_err)
2990 {
2991 as_fatal (_("can't hash %s: %s"),
2992 (optab - 1)->name,
2993 hash_err);
2994 }
2995 if (optab->name == NULL)
2996 break;
2997 core_optab = XNEW (templates);
2998 core_optab->start = optab;
2999 }
3000 }
3001 }
3002
3003 /* Initialize reg_hash hash table. */
3004 reg_hash = hash_new ();
3005 {
3006 const reg_entry *regtab;
3007 unsigned int regtab_size = i386_regtab_size;
3008
3009 for (regtab = i386_regtab; regtab_size--; regtab++)
3010 {
3011 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
3012 if (hash_err)
3013 as_fatal (_("can't hash %s: %s"),
3014 regtab->reg_name,
3015 hash_err);
3016 }
3017 }
3018
3019 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3020 {
3021 int c;
3022 char *p;
3023
3024 for (c = 0; c < 256; c++)
3025 {
3026 if (ISDIGIT (c))
3027 {
3028 digit_chars[c] = c;
3029 mnemonic_chars[c] = c;
3030 register_chars[c] = c;
3031 operand_chars[c] = c;
3032 }
3033 else if (ISLOWER (c))
3034 {
3035 mnemonic_chars[c] = c;
3036 register_chars[c] = c;
3037 operand_chars[c] = c;
3038 }
3039 else if (ISUPPER (c))
3040 {
3041 mnemonic_chars[c] = TOLOWER (c);
3042 register_chars[c] = mnemonic_chars[c];
3043 operand_chars[c] = c;
3044 }
3045 else if (c == '{' || c == '}')
3046 {
3047 mnemonic_chars[c] = c;
3048 operand_chars[c] = c;
3049 }
3050
3051 if (ISALPHA (c) || ISDIGIT (c))
3052 identifier_chars[c] = c;
3053 else if (c >= 128)
3054 {
3055 identifier_chars[c] = c;
3056 operand_chars[c] = c;
3057 }
3058 }
3059
3060 #ifdef LEX_AT
3061 identifier_chars['@'] = '@';
3062 #endif
3063 #ifdef LEX_QM
3064 identifier_chars['?'] = '?';
3065 operand_chars['?'] = '?';
3066 #endif
3067 digit_chars['-'] = '-';
3068 mnemonic_chars['_'] = '_';
3069 mnemonic_chars['-'] = '-';
3070 mnemonic_chars['.'] = '.';
3071 identifier_chars['_'] = '_';
3072 identifier_chars['.'] = '.';
3073
3074 for (p = operand_special_chars; *p != '\0'; p++)
3075 operand_chars[(unsigned char) *p] = *p;
3076 }
3077
3078 if (flag_code == CODE_64BIT)
3079 {
3080 #if defined (OBJ_COFF) && defined (TE_PE)
3081 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
3082 ? 32 : 16);
3083 #else
3084 x86_dwarf2_return_column = 16;
3085 #endif
3086 x86_cie_data_alignment = -8;
3087 }
3088 else
3089 {
3090 x86_dwarf2_return_column = 8;
3091 x86_cie_data_alignment = -4;
3092 }
3093
3094 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3095 can be turned into BRANCH_PREFIX frag. */
3096 if (align_branch_prefix_size > MAX_FUSED_JCC_PADDING_SIZE)
3097 abort ();
3098 }
3099
3100 void
3101 i386_print_statistics (FILE *file)
3102 {
3103 hash_print_statistics (file, "i386 opcode", op_hash);
3104 hash_print_statistics (file, "i386 register", reg_hash);
3105 }
3106 \f
3107 #ifdef DEBUG386
3108
3109 /* Debugging routines for md_assemble. */
3110 static void pte (insn_template *);
3111 static void pt (i386_operand_type);
3112 static void pe (expressionS *);
3113 static void ps (symbolS *);
3114
3115 static void
3116 pi (const char *line, i386_insn *x)
3117 {
3118 unsigned int j;
3119
3120 fprintf (stdout, "%s: template ", line);
3121 pte (&x->tm);
3122 fprintf (stdout, " address: base %s index %s scale %x\n",
3123 x->base_reg ? x->base_reg->reg_name : "none",
3124 x->index_reg ? x->index_reg->reg_name : "none",
3125 x->log2_scale_factor);
3126 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
3127 x->rm.mode, x->rm.reg, x->rm.regmem);
3128 fprintf (stdout, " sib: base %x index %x scale %x\n",
3129 x->sib.base, x->sib.index, x->sib.scale);
3130 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
3131 (x->rex & REX_W) != 0,
3132 (x->rex & REX_R) != 0,
3133 (x->rex & REX_X) != 0,
3134 (x->rex & REX_B) != 0);
3135 for (j = 0; j < x->operands; j++)
3136 {
3137 fprintf (stdout, " #%d: ", j + 1);
3138 pt (x->types[j]);
3139 fprintf (stdout, "\n");
3140 if (x->types[j].bitfield.class == Reg
3141 || x->types[j].bitfield.class == RegMMX
3142 || x->types[j].bitfield.class == RegSIMD
3143 || x->types[j].bitfield.class == SReg
3144 || x->types[j].bitfield.class == RegCR
3145 || x->types[j].bitfield.class == RegDR
3146 || x->types[j].bitfield.class == RegTR)
3147 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
3148 if (operand_type_check (x->types[j], imm))
3149 pe (x->op[j].imms);
3150 if (operand_type_check (x->types[j], disp))
3151 pe (x->op[j].disps);
3152 }
3153 }
3154
3155 static void
3156 pte (insn_template *t)
3157 {
3158 unsigned int j;
3159 fprintf (stdout, " %d operands ", t->operands);
3160 fprintf (stdout, "opcode %x ", t->base_opcode);
3161 if (t->extension_opcode != None)
3162 fprintf (stdout, "ext %x ", t->extension_opcode);
3163 if (t->opcode_modifier.d)
3164 fprintf (stdout, "D");
3165 if (t->opcode_modifier.w)
3166 fprintf (stdout, "W");
3167 fprintf (stdout, "\n");
3168 for (j = 0; j < t->operands; j++)
3169 {
3170 fprintf (stdout, " #%d type ", j + 1);
3171 pt (t->operand_types[j]);
3172 fprintf (stdout, "\n");
3173 }
3174 }
3175
3176 static void
3177 pe (expressionS *e)
3178 {
3179 fprintf (stdout, " operation %d\n", e->X_op);
3180 fprintf (stdout, " add_number %ld (%lx)\n",
3181 (long) e->X_add_number, (long) e->X_add_number);
3182 if (e->X_add_symbol)
3183 {
3184 fprintf (stdout, " add_symbol ");
3185 ps (e->X_add_symbol);
3186 fprintf (stdout, "\n");
3187 }
3188 if (e->X_op_symbol)
3189 {
3190 fprintf (stdout, " op_symbol ");
3191 ps (e->X_op_symbol);
3192 fprintf (stdout, "\n");
3193 }
3194 }
3195
3196 static void
3197 ps (symbolS *s)
3198 {
3199 fprintf (stdout, "%s type %s%s",
3200 S_GET_NAME (s),
3201 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
3202 segment_name (S_GET_SEGMENT (s)));
3203 }
3204
3205 static struct type_name
3206 {
3207 i386_operand_type mask;
3208 const char *name;
3209 }
3210 const type_names[] =
3211 {
3212 { OPERAND_TYPE_REG8, "r8" },
3213 { OPERAND_TYPE_REG16, "r16" },
3214 { OPERAND_TYPE_REG32, "r32" },
3215 { OPERAND_TYPE_REG64, "r64" },
3216 { OPERAND_TYPE_ACC8, "acc8" },
3217 { OPERAND_TYPE_ACC16, "acc16" },
3218 { OPERAND_TYPE_ACC32, "acc32" },
3219 { OPERAND_TYPE_ACC64, "acc64" },
3220 { OPERAND_TYPE_IMM8, "i8" },
3221 { OPERAND_TYPE_IMM8, "i8s" },
3222 { OPERAND_TYPE_IMM16, "i16" },
3223 { OPERAND_TYPE_IMM32, "i32" },
3224 { OPERAND_TYPE_IMM32S, "i32s" },
3225 { OPERAND_TYPE_IMM64, "i64" },
3226 { OPERAND_TYPE_IMM1, "i1" },
3227 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
3228 { OPERAND_TYPE_DISP8, "d8" },
3229 { OPERAND_TYPE_DISP16, "d16" },
3230 { OPERAND_TYPE_DISP32, "d32" },
3231 { OPERAND_TYPE_DISP32S, "d32s" },
3232 { OPERAND_TYPE_DISP64, "d64" },
3233 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
3234 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
3235 { OPERAND_TYPE_CONTROL, "control reg" },
3236 { OPERAND_TYPE_TEST, "test reg" },
3237 { OPERAND_TYPE_DEBUG, "debug reg" },
3238 { OPERAND_TYPE_FLOATREG, "FReg" },
3239 { OPERAND_TYPE_FLOATACC, "FAcc" },
3240 { OPERAND_TYPE_SREG, "SReg" },
3241 { OPERAND_TYPE_REGMMX, "rMMX" },
3242 { OPERAND_TYPE_REGXMM, "rXMM" },
3243 { OPERAND_TYPE_REGYMM, "rYMM" },
3244 { OPERAND_TYPE_REGZMM, "rZMM" },
3245 { OPERAND_TYPE_REGMASK, "Mask reg" },
3246 };
3247
3248 static void
3249 pt (i386_operand_type t)
3250 {
3251 unsigned int j;
3252 i386_operand_type a;
3253
3254 for (j = 0; j < ARRAY_SIZE (type_names); j++)
3255 {
3256 a = operand_type_and (t, type_names[j].mask);
3257 if (operand_type_equal (&a, &type_names[j].mask))
3258 fprintf (stdout, "%s, ", type_names[j].name);
3259 }
3260 fflush (stdout);
3261 }
3262
3263 #endif /* DEBUG386 */
3264 \f
3265 static bfd_reloc_code_real_type
3266 reloc (unsigned int size,
3267 int pcrel,
3268 int sign,
3269 bfd_reloc_code_real_type other)
3270 {
3271 if (other != NO_RELOC)
3272 {
3273 reloc_howto_type *rel;
3274
3275 if (size == 8)
3276 switch (other)
3277 {
3278 case BFD_RELOC_X86_64_GOT32:
3279 return BFD_RELOC_X86_64_GOT64;
3280 break;
3281 case BFD_RELOC_X86_64_GOTPLT64:
3282 return BFD_RELOC_X86_64_GOTPLT64;
3283 break;
3284 case BFD_RELOC_X86_64_PLTOFF64:
3285 return BFD_RELOC_X86_64_PLTOFF64;
3286 break;
3287 case BFD_RELOC_X86_64_GOTPC32:
3288 other = BFD_RELOC_X86_64_GOTPC64;
3289 break;
3290 case BFD_RELOC_X86_64_GOTPCREL:
3291 other = BFD_RELOC_X86_64_GOTPCREL64;
3292 break;
3293 case BFD_RELOC_X86_64_TPOFF32:
3294 other = BFD_RELOC_X86_64_TPOFF64;
3295 break;
3296 case BFD_RELOC_X86_64_DTPOFF32:
3297 other = BFD_RELOC_X86_64_DTPOFF64;
3298 break;
3299 default:
3300 break;
3301 }
3302
3303 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3304 if (other == BFD_RELOC_SIZE32)
3305 {
3306 if (size == 8)
3307 other = BFD_RELOC_SIZE64;
3308 if (pcrel)
3309 {
3310 as_bad (_("there are no pc-relative size relocations"));
3311 return NO_RELOC;
3312 }
3313 }
3314 #endif
3315
3316 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3317 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
3318 sign = -1;
3319
3320 rel = bfd_reloc_type_lookup (stdoutput, other);
3321 if (!rel)
3322 as_bad (_("unknown relocation (%u)"), other);
3323 else if (size != bfd_get_reloc_size (rel))
3324 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3325 bfd_get_reloc_size (rel),
3326 size);
3327 else if (pcrel && !rel->pc_relative)
3328 as_bad (_("non-pc-relative relocation for pc-relative field"));
3329 else if ((rel->complain_on_overflow == complain_overflow_signed
3330 && !sign)
3331 || (rel->complain_on_overflow == complain_overflow_unsigned
3332 && sign > 0))
3333 as_bad (_("relocated field and relocation type differ in signedness"));
3334 else
3335 return other;
3336 return NO_RELOC;
3337 }
3338
3339 if (pcrel)
3340 {
3341 if (!sign)
3342 as_bad (_("there are no unsigned pc-relative relocations"));
3343 switch (size)
3344 {
3345 case 1: return BFD_RELOC_8_PCREL;
3346 case 2: return BFD_RELOC_16_PCREL;
3347 case 4: return BFD_RELOC_32_PCREL;
3348 case 8: return BFD_RELOC_64_PCREL;
3349 }
3350 as_bad (_("cannot do %u byte pc-relative relocation"), size);
3351 }
3352 else
3353 {
3354 if (sign > 0)
3355 switch (size)
3356 {
3357 case 4: return BFD_RELOC_X86_64_32S;
3358 }
3359 else
3360 switch (size)
3361 {
3362 case 1: return BFD_RELOC_8;
3363 case 2: return BFD_RELOC_16;
3364 case 4: return BFD_RELOC_32;
3365 case 8: return BFD_RELOC_64;
3366 }
3367 as_bad (_("cannot do %s %u byte relocation"),
3368 sign > 0 ? "signed" : "unsigned", size);
3369 }
3370
3371 return NO_RELOC;
3372 }
3373
3374 /* Here we decide which fixups can be adjusted to make them relative to
3375 the beginning of the section instead of the symbol. Basically we need
3376 to make sure that the dynamic relocations are done correctly, so in
3377 some cases we force the original symbol to be used. */
3378
3379 int
3380 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
3381 {
3382 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3383 if (!IS_ELF)
3384 return 1;
3385
3386 /* Don't adjust pc-relative references to merge sections in 64-bit
3387 mode. */
3388 if (use_rela_relocations
3389 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
3390 && fixP->fx_pcrel)
3391 return 0;
3392
3393 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3394 and changed later by validate_fix. */
3395 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
3396 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
3397 return 0;
3398
3399 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3400 for size relocations. */
3401 if (fixP->fx_r_type == BFD_RELOC_SIZE32
3402 || fixP->fx_r_type == BFD_RELOC_SIZE64
3403 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
3404 || fixP->fx_r_type == BFD_RELOC_386_GOT32
3405 || fixP->fx_r_type == BFD_RELOC_386_GOT32X
3406 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
3407 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
3408 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
3409 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
3410 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
3411 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
3412 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
3413 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
3414 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
3415 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
3416 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
3417 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
3418 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCRELX
3419 || fixP->fx_r_type == BFD_RELOC_X86_64_REX_GOTPCRELX
3420 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
3421 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
3422 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
3423 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
3424 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
3425 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
3426 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
3427 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
3428 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
3429 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
3430 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
3431 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
3432 return 0;
3433 #endif
3434 return 1;
3435 }
3436
3437 static int
3438 intel_float_operand (const char *mnemonic)
3439 {
3440 /* Note that the value returned is meaningful only for opcodes with (memory)
3441 operands, hence the code here is free to improperly handle opcodes that
3442 have no operands (for better performance and smaller code). */
3443
3444 if (mnemonic[0] != 'f')
3445 return 0; /* non-math */
3446
3447 switch (mnemonic[1])
3448 {
3449 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3450 the fs segment override prefix not currently handled because no
3451 call path can make opcodes without operands get here */
3452 case 'i':
3453 return 2 /* integer op */;
3454 case 'l':
3455 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
3456 return 3; /* fldcw/fldenv */
3457 break;
3458 case 'n':
3459 if (mnemonic[2] != 'o' /* fnop */)
3460 return 3; /* non-waiting control op */
3461 break;
3462 case 'r':
3463 if (mnemonic[2] == 's')
3464 return 3; /* frstor/frstpm */
3465 break;
3466 case 's':
3467 if (mnemonic[2] == 'a')
3468 return 3; /* fsave */
3469 if (mnemonic[2] == 't')
3470 {
3471 switch (mnemonic[3])
3472 {
3473 case 'c': /* fstcw */
3474 case 'd': /* fstdw */
3475 case 'e': /* fstenv */
3476 case 's': /* fsts[gw] */
3477 return 3;
3478 }
3479 }
3480 break;
3481 case 'x':
3482 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
3483 return 0; /* fxsave/fxrstor are not really math ops */
3484 break;
3485 }
3486
3487 return 1;
3488 }
3489
3490 /* Build the VEX prefix. */
3491
3492 static void
3493 build_vex_prefix (const insn_template *t)
3494 {
3495 unsigned int register_specifier;
3496 unsigned int implied_prefix;
3497 unsigned int vector_length;
3498 unsigned int w;
3499
3500 /* Check register specifier. */
3501 if (i.vex.register_specifier)
3502 {
3503 register_specifier =
3504 ~register_number (i.vex.register_specifier) & 0xf;
3505 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
3506 }
3507 else
3508 register_specifier = 0xf;
3509
3510 /* Use 2-byte VEX prefix by swapping destination and source operand
3511 if there are more than 1 register operand. */
3512 if (i.reg_operands > 1
3513 && i.vec_encoding != vex_encoding_vex3
3514 && i.dir_encoding == dir_encoding_default
3515 && i.operands == i.reg_operands
3516 && operand_type_equal (&i.types[0], &i.types[i.operands - 1])
3517 && i.tm.opcode_modifier.vexopcode == VEX0F
3518 && (i.tm.opcode_modifier.load || i.tm.opcode_modifier.d)
3519 && i.rex == REX_B)
3520 {
3521 unsigned int xchg = i.operands - 1;
3522 union i386_op temp_op;
3523 i386_operand_type temp_type;
3524
3525 temp_type = i.types[xchg];
3526 i.types[xchg] = i.types[0];
3527 i.types[0] = temp_type;
3528 temp_op = i.op[xchg];
3529 i.op[xchg] = i.op[0];
3530 i.op[0] = temp_op;
3531
3532 gas_assert (i.rm.mode == 3);
3533
3534 i.rex = REX_R;
3535 xchg = i.rm.regmem;
3536 i.rm.regmem = i.rm.reg;
3537 i.rm.reg = xchg;
3538
3539 if (i.tm.opcode_modifier.d)
3540 i.tm.base_opcode ^= (i.tm.base_opcode & 0xee) != 0x6e
3541 ? Opcode_SIMD_FloatD : Opcode_SIMD_IntD;
3542 else /* Use the next insn. */
3543 i.tm = t[1];
3544 }
3545
3546 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3547 are no memory operands and at least 3 register ones. */
3548 if (i.reg_operands >= 3
3549 && i.vec_encoding != vex_encoding_vex3
3550 && i.reg_operands == i.operands - i.imm_operands
3551 && i.tm.opcode_modifier.vex
3552 && i.tm.opcode_modifier.commutative
3553 && (i.tm.opcode_modifier.sse2avx || optimize > 1)
3554 && i.rex == REX_B
3555 && i.vex.register_specifier
3556 && !(i.vex.register_specifier->reg_flags & RegRex))
3557 {
3558 unsigned int xchg = i.operands - i.reg_operands;
3559 union i386_op temp_op;
3560 i386_operand_type temp_type;
3561
3562 gas_assert (i.tm.opcode_modifier.vexopcode == VEX0F);
3563 gas_assert (!i.tm.opcode_modifier.sae);
3564 gas_assert (operand_type_equal (&i.types[i.operands - 2],
3565 &i.types[i.operands - 3]));
3566 gas_assert (i.rm.mode == 3);
3567
3568 temp_type = i.types[xchg];
3569 i.types[xchg] = i.types[xchg + 1];
3570 i.types[xchg + 1] = temp_type;
3571 temp_op = i.op[xchg];
3572 i.op[xchg] = i.op[xchg + 1];
3573 i.op[xchg + 1] = temp_op;
3574
3575 i.rex = 0;
3576 xchg = i.rm.regmem | 8;
3577 i.rm.regmem = ~register_specifier & 0xf;
3578 gas_assert (!(i.rm.regmem & 8));
3579 i.vex.register_specifier += xchg - i.rm.regmem;
3580 register_specifier = ~xchg & 0xf;
3581 }
3582
3583 if (i.tm.opcode_modifier.vex == VEXScalar)
3584 vector_length = avxscalar;
3585 else if (i.tm.opcode_modifier.vex == VEX256)
3586 vector_length = 1;
3587 else
3588 {
3589 unsigned int op;
3590
3591 /* Determine vector length from the last multi-length vector
3592 operand. */
3593 vector_length = 0;
3594 for (op = t->operands; op--;)
3595 if (t->operand_types[op].bitfield.xmmword
3596 && t->operand_types[op].bitfield.ymmword
3597 && i.types[op].bitfield.ymmword)
3598 {
3599 vector_length = 1;
3600 break;
3601 }
3602 }
3603
3604 switch ((i.tm.base_opcode >> 8) & 0xff)
3605 {
3606 case 0:
3607 implied_prefix = 0;
3608 break;
3609 case DATA_PREFIX_OPCODE:
3610 implied_prefix = 1;
3611 break;
3612 case REPE_PREFIX_OPCODE:
3613 implied_prefix = 2;
3614 break;
3615 case REPNE_PREFIX_OPCODE:
3616 implied_prefix = 3;
3617 break;
3618 default:
3619 abort ();
3620 }
3621
3622 /* Check the REX.W bit and VEXW. */
3623 if (i.tm.opcode_modifier.vexw == VEXWIG)
3624 w = (vexwig == vexw1 || (i.rex & REX_W)) ? 1 : 0;
3625 else if (i.tm.opcode_modifier.vexw)
3626 w = i.tm.opcode_modifier.vexw == VEXW1 ? 1 : 0;
3627 else
3628 w = (flag_code == CODE_64BIT ? i.rex & REX_W : vexwig == vexw1) ? 1 : 0;
3629
3630 /* Use 2-byte VEX prefix if possible. */
3631 if (w == 0
3632 && i.vec_encoding != vex_encoding_vex3
3633 && i.tm.opcode_modifier.vexopcode == VEX0F
3634 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3635 {
3636 /* 2-byte VEX prefix. */
3637 unsigned int r;
3638
3639 i.vex.length = 2;
3640 i.vex.bytes[0] = 0xc5;
3641
3642 /* Check the REX.R bit. */
3643 r = (i.rex & REX_R) ? 0 : 1;
3644 i.vex.bytes[1] = (r << 7
3645 | register_specifier << 3
3646 | vector_length << 2
3647 | implied_prefix);
3648 }
3649 else
3650 {
3651 /* 3-byte VEX prefix. */
3652 unsigned int m;
3653
3654 i.vex.length = 3;
3655
3656 switch (i.tm.opcode_modifier.vexopcode)
3657 {
3658 case VEX0F:
3659 m = 0x1;
3660 i.vex.bytes[0] = 0xc4;
3661 break;
3662 case VEX0F38:
3663 m = 0x2;
3664 i.vex.bytes[0] = 0xc4;
3665 break;
3666 case VEX0F3A:
3667 m = 0x3;
3668 i.vex.bytes[0] = 0xc4;
3669 break;
3670 case XOP08:
3671 m = 0x8;
3672 i.vex.bytes[0] = 0x8f;
3673 break;
3674 case XOP09:
3675 m = 0x9;
3676 i.vex.bytes[0] = 0x8f;
3677 break;
3678 case XOP0A:
3679 m = 0xa;
3680 i.vex.bytes[0] = 0x8f;
3681 break;
3682 default:
3683 abort ();
3684 }
3685
3686 /* The high 3 bits of the second VEX byte are 1's compliment
3687 of RXB bits from REX. */
3688 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3689
3690 i.vex.bytes[2] = (w << 7
3691 | register_specifier << 3
3692 | vector_length << 2
3693 | implied_prefix);
3694 }
3695 }
3696
3697 static INLINE bfd_boolean
3698 is_evex_encoding (const insn_template *t)
3699 {
3700 return t->opcode_modifier.evex || t->opcode_modifier.disp8memshift
3701 || t->opcode_modifier.broadcast || t->opcode_modifier.masking
3702 || t->opcode_modifier.sae;
3703 }
3704
3705 static INLINE bfd_boolean
3706 is_any_vex_encoding (const insn_template *t)
3707 {
3708 return t->opcode_modifier.vex || t->opcode_modifier.vexopcode
3709 || is_evex_encoding (t);
3710 }
3711
3712 /* Build the EVEX prefix. */
3713
3714 static void
3715 build_evex_prefix (void)
3716 {
3717 unsigned int register_specifier;
3718 unsigned int implied_prefix;
3719 unsigned int m, w;
3720 rex_byte vrex_used = 0;
3721
3722 /* Check register specifier. */
3723 if (i.vex.register_specifier)
3724 {
3725 gas_assert ((i.vrex & REX_X) == 0);
3726
3727 register_specifier = i.vex.register_specifier->reg_num;
3728 if ((i.vex.register_specifier->reg_flags & RegRex))
3729 register_specifier += 8;
3730 /* The upper 16 registers are encoded in the fourth byte of the
3731 EVEX prefix. */
3732 if (!(i.vex.register_specifier->reg_flags & RegVRex))
3733 i.vex.bytes[3] = 0x8;
3734 register_specifier = ~register_specifier & 0xf;
3735 }
3736 else
3737 {
3738 register_specifier = 0xf;
3739
3740 /* Encode upper 16 vector index register in the fourth byte of
3741 the EVEX prefix. */
3742 if (!(i.vrex & REX_X))
3743 i.vex.bytes[3] = 0x8;
3744 else
3745 vrex_used |= REX_X;
3746 }
3747
3748 switch ((i.tm.base_opcode >> 8) & 0xff)
3749 {
3750 case 0:
3751 implied_prefix = 0;
3752 break;
3753 case DATA_PREFIX_OPCODE:
3754 implied_prefix = 1;
3755 break;
3756 case REPE_PREFIX_OPCODE:
3757 implied_prefix = 2;
3758 break;
3759 case REPNE_PREFIX_OPCODE:
3760 implied_prefix = 3;
3761 break;
3762 default:
3763 abort ();
3764 }
3765
3766 /* 4 byte EVEX prefix. */
3767 i.vex.length = 4;
3768 i.vex.bytes[0] = 0x62;
3769
3770 /* mmmm bits. */
3771 switch (i.tm.opcode_modifier.vexopcode)
3772 {
3773 case VEX0F:
3774 m = 1;
3775 break;
3776 case VEX0F38:
3777 m = 2;
3778 break;
3779 case VEX0F3A:
3780 m = 3;
3781 break;
3782 default:
3783 abort ();
3784 break;
3785 }
3786
3787 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3788 bits from REX. */
3789 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3790
3791 /* The fifth bit of the second EVEX byte is 1's compliment of the
3792 REX_R bit in VREX. */
3793 if (!(i.vrex & REX_R))
3794 i.vex.bytes[1] |= 0x10;
3795 else
3796 vrex_used |= REX_R;
3797
3798 if ((i.reg_operands + i.imm_operands) == i.operands)
3799 {
3800 /* When all operands are registers, the REX_X bit in REX is not
3801 used. We reuse it to encode the upper 16 registers, which is
3802 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3803 as 1's compliment. */
3804 if ((i.vrex & REX_B))
3805 {
3806 vrex_used |= REX_B;
3807 i.vex.bytes[1] &= ~0x40;
3808 }
3809 }
3810
3811 /* EVEX instructions shouldn't need the REX prefix. */
3812 i.vrex &= ~vrex_used;
3813 gas_assert (i.vrex == 0);
3814
3815 /* Check the REX.W bit and VEXW. */
3816 if (i.tm.opcode_modifier.vexw == VEXWIG)
3817 w = (evexwig == evexw1 || (i.rex & REX_W)) ? 1 : 0;
3818 else if (i.tm.opcode_modifier.vexw)
3819 w = i.tm.opcode_modifier.vexw == VEXW1 ? 1 : 0;
3820 else
3821 w = (flag_code == CODE_64BIT ? i.rex & REX_W : evexwig == evexw1) ? 1 : 0;
3822
3823 /* Encode the U bit. */
3824 implied_prefix |= 0x4;
3825
3826 /* The third byte of the EVEX prefix. */
3827 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3828
3829 /* The fourth byte of the EVEX prefix. */
3830 /* The zeroing-masking bit. */
3831 if (i.mask && i.mask->zeroing)
3832 i.vex.bytes[3] |= 0x80;
3833
3834 /* Don't always set the broadcast bit if there is no RC. */
3835 if (!i.rounding)
3836 {
3837 /* Encode the vector length. */
3838 unsigned int vec_length;
3839
3840 if (!i.tm.opcode_modifier.evex
3841 || i.tm.opcode_modifier.evex == EVEXDYN)
3842 {
3843 unsigned int op;
3844
3845 /* Determine vector length from the last multi-length vector
3846 operand. */
3847 vec_length = 0;
3848 for (op = i.operands; op--;)
3849 if (i.tm.operand_types[op].bitfield.xmmword
3850 + i.tm.operand_types[op].bitfield.ymmword
3851 + i.tm.operand_types[op].bitfield.zmmword > 1)
3852 {
3853 if (i.types[op].bitfield.zmmword)
3854 {
3855 i.tm.opcode_modifier.evex = EVEX512;
3856 break;
3857 }
3858 else if (i.types[op].bitfield.ymmword)
3859 {
3860 i.tm.opcode_modifier.evex = EVEX256;
3861 break;
3862 }
3863 else if (i.types[op].bitfield.xmmword)
3864 {
3865 i.tm.opcode_modifier.evex = EVEX128;
3866 break;
3867 }
3868 else if (i.broadcast && (int) op == i.broadcast->operand)
3869 {
3870 switch (i.broadcast->bytes)
3871 {
3872 case 64:
3873 i.tm.opcode_modifier.evex = EVEX512;
3874 break;
3875 case 32:
3876 i.tm.opcode_modifier.evex = EVEX256;
3877 break;
3878 case 16:
3879 i.tm.opcode_modifier.evex = EVEX128;
3880 break;
3881 default:
3882 abort ();
3883 }
3884 break;
3885 }
3886 }
3887
3888 if (op >= MAX_OPERANDS)
3889 abort ();
3890 }
3891
3892 switch (i.tm.opcode_modifier.evex)
3893 {
3894 case EVEXLIG: /* LL' is ignored */
3895 vec_length = evexlig << 5;
3896 break;
3897 case EVEX128:
3898 vec_length = 0 << 5;
3899 break;
3900 case EVEX256:
3901 vec_length = 1 << 5;
3902 break;
3903 case EVEX512:
3904 vec_length = 2 << 5;
3905 break;
3906 default:
3907 abort ();
3908 break;
3909 }
3910 i.vex.bytes[3] |= vec_length;
3911 /* Encode the broadcast bit. */
3912 if (i.broadcast)
3913 i.vex.bytes[3] |= 0x10;
3914 }
3915 else
3916 {
3917 if (i.rounding->type != saeonly)
3918 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3919 else
3920 i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
3921 }
3922
3923 if (i.mask && i.mask->mask)
3924 i.vex.bytes[3] |= i.mask->mask->reg_num;
3925 }
3926
3927 static void
3928 process_immext (void)
3929 {
3930 expressionS *exp;
3931
3932 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3933 which is coded in the same place as an 8-bit immediate field
3934 would be. Here we fake an 8-bit immediate operand from the
3935 opcode suffix stored in tm.extension_opcode.
3936
3937 AVX instructions also use this encoding, for some of
3938 3 argument instructions. */
3939
3940 gas_assert (i.imm_operands <= 1
3941 && (i.operands <= 2
3942 || (is_any_vex_encoding (&i.tm)
3943 && i.operands <= 4)));
3944
3945 exp = &im_expressions[i.imm_operands++];
3946 i.op[i.operands].imms = exp;
3947 i.types[i.operands] = imm8;
3948 i.operands++;
3949 exp->X_op = O_constant;
3950 exp->X_add_number = i.tm.extension_opcode;
3951 i.tm.extension_opcode = None;
3952 }
3953
3954
3955 static int
3956 check_hle (void)
3957 {
3958 switch (i.tm.opcode_modifier.hleprefixok)
3959 {
3960 default:
3961 abort ();
3962 case HLEPrefixNone:
3963 as_bad (_("invalid instruction `%s' after `%s'"),
3964 i.tm.name, i.hle_prefix);
3965 return 0;
3966 case HLEPrefixLock:
3967 if (i.prefix[LOCK_PREFIX])
3968 return 1;
3969 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
3970 return 0;
3971 case HLEPrefixAny:
3972 return 1;
3973 case HLEPrefixRelease:
3974 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3975 {
3976 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3977 i.tm.name);
3978 return 0;
3979 }
3980 if (i.mem_operands == 0 || !(i.flags[i.operands - 1] & Operand_Mem))
3981 {
3982 as_bad (_("memory destination needed for instruction `%s'"
3983 " after `xrelease'"), i.tm.name);
3984 return 0;
3985 }
3986 return 1;
3987 }
3988 }
3989
3990 /* Try the shortest encoding by shortening operand size. */
3991
3992 static void
3993 optimize_encoding (void)
3994 {
3995 unsigned int j;
3996
3997 if (optimize_for_space
3998 && !is_any_vex_encoding (&i.tm)
3999 && i.reg_operands == 1
4000 && i.imm_operands == 1
4001 && !i.types[1].bitfield.byte
4002 && i.op[0].imms->X_op == O_constant
4003 && fits_in_imm7 (i.op[0].imms->X_add_number)
4004 && (i.tm.base_opcode == 0xa8
4005 || (i.tm.base_opcode == 0xf6
4006 && i.tm.extension_opcode == 0x0)))
4007 {
4008 /* Optimize: -Os:
4009 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4010 */
4011 unsigned int base_regnum = i.op[1].regs->reg_num;
4012 if (flag_code == CODE_64BIT || base_regnum < 4)
4013 {
4014 i.types[1].bitfield.byte = 1;
4015 /* Ignore the suffix. */
4016 i.suffix = 0;
4017 /* Convert to byte registers. */
4018 if (i.types[1].bitfield.word)
4019 j = 16;
4020 else if (i.types[1].bitfield.dword)
4021 j = 32;
4022 else
4023 j = 48;
4024 if (!(i.op[1].regs->reg_flags & RegRex) && base_regnum < 4)
4025 j += 8;
4026 i.op[1].regs -= j;
4027 }
4028 }
4029 else if (flag_code == CODE_64BIT
4030 && !is_any_vex_encoding (&i.tm)
4031 && ((i.types[1].bitfield.qword
4032 && i.reg_operands == 1
4033 && i.imm_operands == 1
4034 && i.op[0].imms->X_op == O_constant
4035 && ((i.tm.base_opcode == 0xb8
4036 && i.tm.extension_opcode == None
4037 && fits_in_unsigned_long (i.op[0].imms->X_add_number))
4038 || (fits_in_imm31 (i.op[0].imms->X_add_number)
4039 && ((i.tm.base_opcode == 0x24
4040 || i.tm.base_opcode == 0xa8)
4041 || (i.tm.base_opcode == 0x80
4042 && i.tm.extension_opcode == 0x4)
4043 || ((i.tm.base_opcode == 0xf6
4044 || (i.tm.base_opcode | 1) == 0xc7)
4045 && i.tm.extension_opcode == 0x0)))
4046 || (fits_in_imm7 (i.op[0].imms->X_add_number)
4047 && i.tm.base_opcode == 0x83
4048 && i.tm.extension_opcode == 0x4)))
4049 || (i.types[0].bitfield.qword
4050 && ((i.reg_operands == 2
4051 && i.op[0].regs == i.op[1].regs
4052 && (i.tm.base_opcode == 0x30
4053 || i.tm.base_opcode == 0x28))
4054 || (i.reg_operands == 1
4055 && i.operands == 1
4056 && i.tm.base_opcode == 0x30)))))
4057 {
4058 /* Optimize: -O:
4059 andq $imm31, %r64 -> andl $imm31, %r32
4060 andq $imm7, %r64 -> andl $imm7, %r32
4061 testq $imm31, %r64 -> testl $imm31, %r32
4062 xorq %r64, %r64 -> xorl %r32, %r32
4063 subq %r64, %r64 -> subl %r32, %r32
4064 movq $imm31, %r64 -> movl $imm31, %r32
4065 movq $imm32, %r64 -> movl $imm32, %r32
4066 */
4067 i.tm.opcode_modifier.norex64 = 1;
4068 if (i.tm.base_opcode == 0xb8 || (i.tm.base_opcode | 1) == 0xc7)
4069 {
4070 /* Handle
4071 movq $imm31, %r64 -> movl $imm31, %r32
4072 movq $imm32, %r64 -> movl $imm32, %r32
4073 */
4074 i.tm.operand_types[0].bitfield.imm32 = 1;
4075 i.tm.operand_types[0].bitfield.imm32s = 0;
4076 i.tm.operand_types[0].bitfield.imm64 = 0;
4077 i.types[0].bitfield.imm32 = 1;
4078 i.types[0].bitfield.imm32s = 0;
4079 i.types[0].bitfield.imm64 = 0;
4080 i.types[1].bitfield.dword = 1;
4081 i.types[1].bitfield.qword = 0;
4082 if ((i.tm.base_opcode | 1) == 0xc7)
4083 {
4084 /* Handle
4085 movq $imm31, %r64 -> movl $imm31, %r32
4086 */
4087 i.tm.base_opcode = 0xb8;
4088 i.tm.extension_opcode = None;
4089 i.tm.opcode_modifier.w = 0;
4090 i.tm.opcode_modifier.modrm = 0;
4091 }
4092 }
4093 }
4094 else if (optimize > 1
4095 && !optimize_for_space
4096 && !is_any_vex_encoding (&i.tm)
4097 && i.reg_operands == 2
4098 && i.op[0].regs == i.op[1].regs
4099 && ((i.tm.base_opcode & ~(Opcode_D | 1)) == 0x8
4100 || (i.tm.base_opcode & ~(Opcode_D | 1)) == 0x20)
4101 && (flag_code != CODE_64BIT || !i.types[0].bitfield.dword))
4102 {
4103 /* Optimize: -O2:
4104 andb %rN, %rN -> testb %rN, %rN
4105 andw %rN, %rN -> testw %rN, %rN
4106 andq %rN, %rN -> testq %rN, %rN
4107 orb %rN, %rN -> testb %rN, %rN
4108 orw %rN, %rN -> testw %rN, %rN
4109 orq %rN, %rN -> testq %rN, %rN
4110
4111 and outside of 64-bit mode
4112
4113 andl %rN, %rN -> testl %rN, %rN
4114 orl %rN, %rN -> testl %rN, %rN
4115 */
4116 i.tm.base_opcode = 0x84 | (i.tm.base_opcode & 1);
4117 }
4118 else if (i.reg_operands == 3
4119 && i.op[0].regs == i.op[1].regs
4120 && !i.types[2].bitfield.xmmword
4121 && (i.tm.opcode_modifier.vex
4122 || ((!i.mask || i.mask->zeroing)
4123 && !i.rounding
4124 && is_evex_encoding (&i.tm)
4125 && (i.vec_encoding != vex_encoding_evex
4126 || cpu_arch_isa_flags.bitfield.cpuavx512vl
4127 || i.tm.cpu_flags.bitfield.cpuavx512vl
4128 || (i.tm.operand_types[2].bitfield.zmmword
4129 && i.types[2].bitfield.ymmword))))
4130 && ((i.tm.base_opcode == 0x55
4131 || i.tm.base_opcode == 0x6655
4132 || i.tm.base_opcode == 0x66df
4133 || i.tm.base_opcode == 0x57
4134 || i.tm.base_opcode == 0x6657
4135 || i.tm.base_opcode == 0x66ef
4136 || i.tm.base_opcode == 0x66f8
4137 || i.tm.base_opcode == 0x66f9
4138 || i.tm.base_opcode == 0x66fa
4139 || i.tm.base_opcode == 0x66fb
4140 || i.tm.base_opcode == 0x42
4141 || i.tm.base_opcode == 0x6642
4142 || i.tm.base_opcode == 0x47
4143 || i.tm.base_opcode == 0x6647)
4144 && i.tm.extension_opcode == None))
4145 {
4146 /* Optimize: -O1:
4147 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4148 vpsubq and vpsubw:
4149 EVEX VOP %zmmM, %zmmM, %zmmN
4150 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4151 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4152 EVEX VOP %ymmM, %ymmM, %ymmN
4153 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4154 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4155 VEX VOP %ymmM, %ymmM, %ymmN
4156 -> VEX VOP %xmmM, %xmmM, %xmmN
4157 VOP, one of vpandn and vpxor:
4158 VEX VOP %ymmM, %ymmM, %ymmN
4159 -> VEX VOP %xmmM, %xmmM, %xmmN
4160 VOP, one of vpandnd and vpandnq:
4161 EVEX VOP %zmmM, %zmmM, %zmmN
4162 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4163 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4164 EVEX VOP %ymmM, %ymmM, %ymmN
4165 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4166 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4167 VOP, one of vpxord and vpxorq:
4168 EVEX VOP %zmmM, %zmmM, %zmmN
4169 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4170 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4171 EVEX VOP %ymmM, %ymmM, %ymmN
4172 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4173 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4174 VOP, one of kxord and kxorq:
4175 VEX VOP %kM, %kM, %kN
4176 -> VEX kxorw %kM, %kM, %kN
4177 VOP, one of kandnd and kandnq:
4178 VEX VOP %kM, %kM, %kN
4179 -> VEX kandnw %kM, %kM, %kN
4180 */
4181 if (is_evex_encoding (&i.tm))
4182 {
4183 if (i.vec_encoding != vex_encoding_evex)
4184 {
4185 i.tm.opcode_modifier.vex = VEX128;
4186 i.tm.opcode_modifier.vexw = VEXW0;
4187 i.tm.opcode_modifier.evex = 0;
4188 }
4189 else if (optimize > 1)
4190 i.tm.opcode_modifier.evex = EVEX128;
4191 else
4192 return;
4193 }
4194 else if (i.tm.operand_types[0].bitfield.class == RegMask)
4195 {
4196 i.tm.base_opcode &= 0xff;
4197 i.tm.opcode_modifier.vexw = VEXW0;
4198 }
4199 else
4200 i.tm.opcode_modifier.vex = VEX128;
4201
4202 if (i.tm.opcode_modifier.vex)
4203 for (j = 0; j < 3; j++)
4204 {
4205 i.types[j].bitfield.xmmword = 1;
4206 i.types[j].bitfield.ymmword = 0;
4207 }
4208 }
4209 else if (i.vec_encoding != vex_encoding_evex
4210 && !i.types[0].bitfield.zmmword
4211 && !i.types[1].bitfield.zmmword
4212 && !i.mask
4213 && !i.broadcast
4214 && is_evex_encoding (&i.tm)
4215 && ((i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0x666f
4216 || (i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf36f
4217 || (i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf26f
4218 || (i.tm.base_opcode & ~4) == 0x66db
4219 || (i.tm.base_opcode & ~4) == 0x66eb)
4220 && i.tm.extension_opcode == None)
4221 {
4222 /* Optimize: -O1:
4223 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4224 vmovdqu32 and vmovdqu64:
4225 EVEX VOP %xmmM, %xmmN
4226 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4227 EVEX VOP %ymmM, %ymmN
4228 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4229 EVEX VOP %xmmM, mem
4230 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4231 EVEX VOP %ymmM, mem
4232 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4233 EVEX VOP mem, %xmmN
4234 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4235 EVEX VOP mem, %ymmN
4236 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4237 VOP, one of vpand, vpandn, vpor, vpxor:
4238 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4239 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4240 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4241 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4242 EVEX VOP{d,q} mem, %xmmM, %xmmN
4243 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4244 EVEX VOP{d,q} mem, %ymmM, %ymmN
4245 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4246 */
4247 for (j = 0; j < i.operands; j++)
4248 if (operand_type_check (i.types[j], disp)
4249 && i.op[j].disps->X_op == O_constant)
4250 {
4251 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4252 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4253 bytes, we choose EVEX Disp8 over VEX Disp32. */
4254 int evex_disp8, vex_disp8;
4255 unsigned int memshift = i.memshift;
4256 offsetT n = i.op[j].disps->X_add_number;
4257
4258 evex_disp8 = fits_in_disp8 (n);
4259 i.memshift = 0;
4260 vex_disp8 = fits_in_disp8 (n);
4261 if (evex_disp8 != vex_disp8)
4262 {
4263 i.memshift = memshift;
4264 return;
4265 }
4266
4267 i.types[j].bitfield.disp8 = vex_disp8;
4268 break;
4269 }
4270 if ((i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf26f)
4271 i.tm.base_opcode ^= 0xf36f ^ 0xf26f;
4272 i.tm.opcode_modifier.vex
4273 = i.types[0].bitfield.ymmword ? VEX256 : VEX128;
4274 i.tm.opcode_modifier.vexw = VEXW0;
4275 /* VPAND, VPOR, and VPXOR are commutative. */
4276 if (i.reg_operands == 3 && i.tm.base_opcode != 0x66df)
4277 i.tm.opcode_modifier.commutative = 1;
4278 i.tm.opcode_modifier.evex = 0;
4279 i.tm.opcode_modifier.masking = 0;
4280 i.tm.opcode_modifier.broadcast = 0;
4281 i.tm.opcode_modifier.disp8memshift = 0;
4282 i.memshift = 0;
4283 if (j < i.operands)
4284 i.types[j].bitfield.disp8
4285 = fits_in_disp8 (i.op[j].disps->X_add_number);
4286 }
4287 }
4288
4289 /* This is the guts of the machine-dependent assembler. LINE points to a
4290 machine dependent instruction. This function is supposed to emit
4291 the frags/bytes it assembles to. */
4292
4293 void
4294 md_assemble (char *line)
4295 {
4296 unsigned int j;
4297 char mnemonic[MAX_MNEM_SIZE], mnem_suffix;
4298 const insn_template *t;
4299
4300 /* Initialize globals. */
4301 memset (&i, '\0', sizeof (i));
4302 for (j = 0; j < MAX_OPERANDS; j++)
4303 i.reloc[j] = NO_RELOC;
4304 memset (disp_expressions, '\0', sizeof (disp_expressions));
4305 memset (im_expressions, '\0', sizeof (im_expressions));
4306 save_stack_p = save_stack;
4307
4308 /* First parse an instruction mnemonic & call i386_operand for the operands.
4309 We assume that the scrubber has arranged it so that line[0] is the valid
4310 start of a (possibly prefixed) mnemonic. */
4311
4312 line = parse_insn (line, mnemonic);
4313 if (line == NULL)
4314 return;
4315 mnem_suffix = i.suffix;
4316
4317 line = parse_operands (line, mnemonic);
4318 this_operand = -1;
4319 xfree (i.memop1_string);
4320 i.memop1_string = NULL;
4321 if (line == NULL)
4322 return;
4323
4324 /* Now we've parsed the mnemonic into a set of templates, and have the
4325 operands at hand. */
4326
4327 /* All Intel opcodes have reversed operands except for "bound", "enter"
4328 "monitor*", and "mwait*". We also don't reverse intersegment "jmp"
4329 and "call" instructions with 2 immediate operands so that the immediate
4330 segment precedes the offset, as it does when in AT&T mode. */
4331 if (intel_syntax
4332 && i.operands > 1
4333 && (strcmp (mnemonic, "bound") != 0)
4334 && (strcmp (mnemonic, "invlpga") != 0)
4335 && (strncmp (mnemonic, "monitor", 7) != 0)
4336 && (strncmp (mnemonic, "mwait", 5) != 0)
4337 && !(operand_type_check (i.types[0], imm)
4338 && operand_type_check (i.types[1], imm)))
4339 swap_operands ();
4340
4341 /* The order of the immediates should be reversed
4342 for 2 immediates extrq and insertq instructions */
4343 if (i.imm_operands == 2
4344 && (strcmp (mnemonic, "extrq") == 0
4345 || strcmp (mnemonic, "insertq") == 0))
4346 swap_2_operands (0, 1);
4347
4348 if (i.imm_operands)
4349 optimize_imm ();
4350
4351 /* Don't optimize displacement for movabs since it only takes 64bit
4352 displacement. */
4353 if (i.disp_operands
4354 && i.disp_encoding != disp_encoding_32bit
4355 && (flag_code != CODE_64BIT
4356 || strcmp (mnemonic, "movabs") != 0))
4357 optimize_disp ();
4358
4359 /* Next, we find a template that matches the given insn,
4360 making sure the overlap of the given operands types is consistent
4361 with the template operand types. */
4362
4363 if (!(t = match_template (mnem_suffix)))
4364 return;
4365
4366 if (sse_check != check_none
4367 && !i.tm.opcode_modifier.noavx
4368 && !i.tm.cpu_flags.bitfield.cpuavx
4369 && !i.tm.cpu_flags.bitfield.cpuavx512f
4370 && (i.tm.cpu_flags.bitfield.cpusse
4371 || i.tm.cpu_flags.bitfield.cpusse2
4372 || i.tm.cpu_flags.bitfield.cpusse3
4373 || i.tm.cpu_flags.bitfield.cpussse3
4374 || i.tm.cpu_flags.bitfield.cpusse4_1
4375 || i.tm.cpu_flags.bitfield.cpusse4_2
4376 || i.tm.cpu_flags.bitfield.cpusse4a
4377 || i.tm.cpu_flags.bitfield.cpupclmul
4378 || i.tm.cpu_flags.bitfield.cpuaes
4379 || i.tm.cpu_flags.bitfield.cpusha
4380 || i.tm.cpu_flags.bitfield.cpugfni))
4381 {
4382 (sse_check == check_warning
4383 ? as_warn
4384 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
4385 }
4386
4387 if (i.tm.opcode_modifier.fwait)
4388 if (!add_prefix (FWAIT_OPCODE))
4389 return;
4390
4391 /* Check if REP prefix is OK. */
4392 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
4393 {
4394 as_bad (_("invalid instruction `%s' after `%s'"),
4395 i.tm.name, i.rep_prefix);
4396 return;
4397 }
4398
4399 /* Check for lock without a lockable instruction. Destination operand
4400 must be memory unless it is xchg (0x86). */
4401 if (i.prefix[LOCK_PREFIX]
4402 && (!i.tm.opcode_modifier.islockable
4403 || i.mem_operands == 0
4404 || (i.tm.base_opcode != 0x86
4405 && !(i.flags[i.operands - 1] & Operand_Mem))))
4406 {
4407 as_bad (_("expecting lockable instruction after `lock'"));
4408 return;
4409 }
4410
4411 /* Check for data size prefix on VEX/XOP/EVEX encoded insns. */
4412 if (i.prefix[DATA_PREFIX] && is_any_vex_encoding (&i.tm))
4413 {
4414 as_bad (_("data size prefix invalid with `%s'"), i.tm.name);
4415 return;
4416 }
4417
4418 /* Check if HLE prefix is OK. */
4419 if (i.hle_prefix && !check_hle ())
4420 return;
4421
4422 /* Check BND prefix. */
4423 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
4424 as_bad (_("expecting valid branch instruction after `bnd'"));
4425
4426 /* Check NOTRACK prefix. */
4427 if (i.notrack_prefix && !i.tm.opcode_modifier.notrackprefixok)
4428 as_bad (_("expecting indirect branch instruction after `notrack'"));
4429
4430 if (i.tm.cpu_flags.bitfield.cpumpx)
4431 {
4432 if (flag_code == CODE_64BIT && i.prefix[ADDR_PREFIX])
4433 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4434 else if (flag_code != CODE_16BIT
4435 ? i.prefix[ADDR_PREFIX]
4436 : i.mem_operands && !i.prefix[ADDR_PREFIX])
4437 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4438 }
4439
4440 /* Insert BND prefix. */
4441 if (add_bnd_prefix && i.tm.opcode_modifier.bndprefixok)
4442 {
4443 if (!i.prefix[BND_PREFIX])
4444 add_prefix (BND_PREFIX_OPCODE);
4445 else if (i.prefix[BND_PREFIX] != BND_PREFIX_OPCODE)
4446 {
4447 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4448 i.prefix[BND_PREFIX] = BND_PREFIX_OPCODE;
4449 }
4450 }
4451
4452 /* Check string instruction segment overrides. */
4453 if (i.tm.opcode_modifier.isstring >= IS_STRING_ES_OP0)
4454 {
4455 gas_assert (i.mem_operands);
4456 if (!check_string ())
4457 return;
4458 i.disp_operands = 0;
4459 }
4460
4461 if (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize)
4462 optimize_encoding ();
4463
4464 if (!process_suffix ())
4465 return;
4466
4467 /* Update operand types. */
4468 for (j = 0; j < i.operands; j++)
4469 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
4470
4471 /* Make still unresolved immediate matches conform to size of immediate
4472 given in i.suffix. */
4473 if (!finalize_imm ())
4474 return;
4475
4476 if (i.types[0].bitfield.imm1)
4477 i.imm_operands = 0; /* kludge for shift insns. */
4478
4479 /* We only need to check those implicit registers for instructions
4480 with 3 operands or less. */
4481 if (i.operands <= 3)
4482 for (j = 0; j < i.operands; j++)
4483 if (i.types[j].bitfield.instance != InstanceNone
4484 && !i.types[j].bitfield.xmmword)
4485 i.reg_operands--;
4486
4487 /* ImmExt should be processed after SSE2AVX. */
4488 if (!i.tm.opcode_modifier.sse2avx
4489 && i.tm.opcode_modifier.immext)
4490 process_immext ();
4491
4492 /* For insns with operands there are more diddles to do to the opcode. */
4493 if (i.operands)
4494 {
4495 if (!process_operands ())
4496 return;
4497 }
4498 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
4499 {
4500 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4501 as_warn (_("translating to `%sp'"), i.tm.name);
4502 }
4503
4504 if (is_any_vex_encoding (&i.tm))
4505 {
4506 if (!cpu_arch_flags.bitfield.cpui286)
4507 {
4508 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
4509 i.tm.name);
4510 return;
4511 }
4512
4513 if (i.tm.opcode_modifier.vex)
4514 build_vex_prefix (t);
4515 else
4516 build_evex_prefix ();
4517 }
4518
4519 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4520 instructions may define INT_OPCODE as well, so avoid this corner
4521 case for those instructions that use MODRM. */
4522 if (i.tm.base_opcode == INT_OPCODE
4523 && !i.tm.opcode_modifier.modrm
4524 && i.op[0].imms->X_add_number == 3)
4525 {
4526 i.tm.base_opcode = INT3_OPCODE;
4527 i.imm_operands = 0;
4528 }
4529
4530 if ((i.tm.opcode_modifier.jump == JUMP
4531 || i.tm.opcode_modifier.jump == JUMP_BYTE
4532 || i.tm.opcode_modifier.jump == JUMP_DWORD)
4533 && i.op[0].disps->X_op == O_constant)
4534 {
4535 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4536 the absolute address given by the constant. Since ix86 jumps and
4537 calls are pc relative, we need to generate a reloc. */
4538 i.op[0].disps->X_add_symbol = &abs_symbol;
4539 i.op[0].disps->X_op = O_symbol;
4540 }
4541
4542 if (i.tm.opcode_modifier.rex64)
4543 i.rex |= REX_W;
4544
4545 /* For 8 bit registers we need an empty rex prefix. Also if the
4546 instruction already has a prefix, we need to convert old
4547 registers to new ones. */
4548
4549 if ((i.types[0].bitfield.class == Reg && i.types[0].bitfield.byte
4550 && (i.op[0].regs->reg_flags & RegRex64) != 0)
4551 || (i.types[1].bitfield.class == Reg && i.types[1].bitfield.byte
4552 && (i.op[1].regs->reg_flags & RegRex64) != 0)
4553 || (((i.types[0].bitfield.class == Reg && i.types[0].bitfield.byte)
4554 || (i.types[1].bitfield.class == Reg && i.types[1].bitfield.byte))
4555 && i.rex != 0))
4556 {
4557 int x;
4558
4559 i.rex |= REX_OPCODE;
4560 for (x = 0; x < 2; x++)
4561 {
4562 /* Look for 8 bit operand that uses old registers. */
4563 if (i.types[x].bitfield.class == Reg && i.types[x].bitfield.byte
4564 && (i.op[x].regs->reg_flags & RegRex64) == 0)
4565 {
4566 gas_assert (!(i.op[x].regs->reg_flags & RegRex));
4567 /* In case it is "hi" register, give up. */
4568 if (i.op[x].regs->reg_num > 3)
4569 as_bad (_("can't encode register '%s%s' in an "
4570 "instruction requiring REX prefix."),
4571 register_prefix, i.op[x].regs->reg_name);
4572
4573 /* Otherwise it is equivalent to the extended register.
4574 Since the encoding doesn't change this is merely
4575 cosmetic cleanup for debug output. */
4576
4577 i.op[x].regs = i.op[x].regs + 8;
4578 }
4579 }
4580 }
4581
4582 if (i.rex == 0 && i.rex_encoding)
4583 {
4584 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4585 that uses legacy register. If it is "hi" register, don't add
4586 the REX_OPCODE byte. */
4587 int x;
4588 for (x = 0; x < 2; x++)
4589 if (i.types[x].bitfield.class == Reg
4590 && i.types[x].bitfield.byte
4591 && (i.op[x].regs->reg_flags & RegRex64) == 0
4592 && i.op[x].regs->reg_num > 3)
4593 {
4594 gas_assert (!(i.op[x].regs->reg_flags & RegRex));
4595 i.rex_encoding = FALSE;
4596 break;
4597 }
4598
4599 if (i.rex_encoding)
4600 i.rex = REX_OPCODE;
4601 }
4602
4603 if (i.rex != 0)
4604 add_prefix (REX_OPCODE | i.rex);
4605
4606 /* We are ready to output the insn. */
4607 output_insn ();
4608
4609 last_insn.seg = now_seg;
4610
4611 if (i.tm.opcode_modifier.isprefix)
4612 {
4613 last_insn.kind = last_insn_prefix;
4614 last_insn.name = i.tm.name;
4615 last_insn.file = as_where (&last_insn.line);
4616 }
4617 else
4618 last_insn.kind = last_insn_other;
4619 }
4620
4621 static char *
4622 parse_insn (char *line, char *mnemonic)
4623 {
4624 char *l = line;
4625 char *token_start = l;
4626 char *mnem_p;
4627 int supported;
4628 const insn_template *t;
4629 char *dot_p = NULL;
4630
4631 while (1)
4632 {
4633 mnem_p = mnemonic;
4634 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
4635 {
4636 if (*mnem_p == '.')
4637 dot_p = mnem_p;
4638 mnem_p++;
4639 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
4640 {
4641 as_bad (_("no such instruction: `%s'"), token_start);
4642 return NULL;
4643 }
4644 l++;
4645 }
4646 if (!is_space_char (*l)
4647 && *l != END_OF_INSN
4648 && (intel_syntax
4649 || (*l != PREFIX_SEPARATOR
4650 && *l != ',')))
4651 {
4652 as_bad (_("invalid character %s in mnemonic"),
4653 output_invalid (*l));
4654 return NULL;
4655 }
4656 if (token_start == l)
4657 {
4658 if (!intel_syntax && *l == PREFIX_SEPARATOR)
4659 as_bad (_("expecting prefix; got nothing"));
4660 else
4661 as_bad (_("expecting mnemonic; got nothing"));
4662 return NULL;
4663 }
4664
4665 /* Look up instruction (or prefix) via hash table. */
4666 current_templates = (const templates *) hash_find (op_hash, mnemonic);
4667
4668 if (*l != END_OF_INSN
4669 && (!is_space_char (*l) || l[1] != END_OF_INSN)
4670 && current_templates
4671 && current_templates->start->opcode_modifier.isprefix)
4672 {
4673 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
4674 {
4675 as_bad ((flag_code != CODE_64BIT
4676 ? _("`%s' is only supported in 64-bit mode")
4677 : _("`%s' is not supported in 64-bit mode")),
4678 current_templates->start->name);
4679 return NULL;
4680 }
4681 /* If we are in 16-bit mode, do not allow addr16 or data16.
4682 Similarly, in 32-bit mode, do not allow addr32 or data32. */
4683 if ((current_templates->start->opcode_modifier.size == SIZE16
4684 || current_templates->start->opcode_modifier.size == SIZE32)
4685 && flag_code != CODE_64BIT
4686 && ((current_templates->start->opcode_modifier.size == SIZE32)
4687 ^ (flag_code == CODE_16BIT)))
4688 {
4689 as_bad (_("redundant %s prefix"),
4690 current_templates->start->name);
4691 return NULL;
4692 }
4693 if (current_templates->start->opcode_length == 0)
4694 {
4695 /* Handle pseudo prefixes. */
4696 switch (current_templates->start->base_opcode)
4697 {
4698 case 0x0:
4699 /* {disp8} */
4700 i.disp_encoding = disp_encoding_8bit;
4701 break;
4702 case 0x1:
4703 /* {disp32} */
4704 i.disp_encoding = disp_encoding_32bit;
4705 break;
4706 case 0x2:
4707 /* {load} */
4708 i.dir_encoding = dir_encoding_load;
4709 break;
4710 case 0x3:
4711 /* {store} */
4712 i.dir_encoding = dir_encoding_store;
4713 break;
4714 case 0x4:
4715 /* {vex} */
4716 i.vec_encoding = vex_encoding_vex;
4717 break;
4718 case 0x5:
4719 /* {vex3} */
4720 i.vec_encoding = vex_encoding_vex3;
4721 break;
4722 case 0x6:
4723 /* {evex} */
4724 i.vec_encoding = vex_encoding_evex;
4725 break;
4726 case 0x7:
4727 /* {rex} */
4728 i.rex_encoding = TRUE;
4729 break;
4730 case 0x8:
4731 /* {nooptimize} */
4732 i.no_optimize = TRUE;
4733 break;
4734 default:
4735 abort ();
4736 }
4737 }
4738 else
4739 {
4740 /* Add prefix, checking for repeated prefixes. */
4741 switch (add_prefix (current_templates->start->base_opcode))
4742 {
4743 case PREFIX_EXIST:
4744 return NULL;
4745 case PREFIX_DS:
4746 if (current_templates->start->cpu_flags.bitfield.cpuibt)
4747 i.notrack_prefix = current_templates->start->name;
4748 break;
4749 case PREFIX_REP:
4750 if (current_templates->start->cpu_flags.bitfield.cpuhle)
4751 i.hle_prefix = current_templates->start->name;
4752 else if (current_templates->start->cpu_flags.bitfield.cpumpx)
4753 i.bnd_prefix = current_templates->start->name;
4754 else
4755 i.rep_prefix = current_templates->start->name;
4756 break;
4757 default:
4758 break;
4759 }
4760 }
4761 /* Skip past PREFIX_SEPARATOR and reset token_start. */
4762 token_start = ++l;
4763 }
4764 else
4765 break;
4766 }
4767
4768 if (!current_templates)
4769 {
4770 /* Deprecated functionality (new code should use pseudo-prefixes instead):
4771 Check if we should swap operand or force 32bit displacement in
4772 encoding. */
4773 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
4774 i.dir_encoding = dir_encoding_swap;
4775 else if (mnem_p - 3 == dot_p
4776 && dot_p[1] == 'd'
4777 && dot_p[2] == '8')
4778 i.disp_encoding = disp_encoding_8bit;
4779 else if (mnem_p - 4 == dot_p
4780 && dot_p[1] == 'd'
4781 && dot_p[2] == '3'
4782 && dot_p[3] == '2')
4783 i.disp_encoding = disp_encoding_32bit;
4784 else
4785 goto check_suffix;
4786 mnem_p = dot_p;
4787 *dot_p = '\0';
4788 current_templates = (const templates *) hash_find (op_hash, mnemonic);
4789 }
4790
4791 if (!current_templates)
4792 {
4793 check_suffix:
4794 if (mnem_p > mnemonic)
4795 {
4796 /* See if we can get a match by trimming off a suffix. */
4797 switch (mnem_p[-1])
4798 {
4799 case WORD_MNEM_SUFFIX:
4800 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
4801 i.suffix = SHORT_MNEM_SUFFIX;
4802 else
4803 /* Fall through. */
4804 case BYTE_MNEM_SUFFIX:
4805 case QWORD_MNEM_SUFFIX:
4806 i.suffix = mnem_p[-1];
4807 mnem_p[-1] = '\0';
4808 current_templates = (const templates *) hash_find (op_hash,
4809 mnemonic);
4810 break;
4811 case SHORT_MNEM_SUFFIX:
4812 case LONG_MNEM_SUFFIX:
4813 if (!intel_syntax)
4814 {
4815 i.suffix = mnem_p[-1];
4816 mnem_p[-1] = '\0';
4817 current_templates = (const templates *) hash_find (op_hash,
4818 mnemonic);
4819 }
4820 break;
4821
4822 /* Intel Syntax. */
4823 case 'd':
4824 if (intel_syntax)
4825 {
4826 if (intel_float_operand (mnemonic) == 1)
4827 i.suffix = SHORT_MNEM_SUFFIX;
4828 else
4829 i.suffix = LONG_MNEM_SUFFIX;
4830 mnem_p[-1] = '\0';
4831 current_templates = (const templates *) hash_find (op_hash,
4832 mnemonic);
4833 }
4834 break;
4835 }
4836 }
4837
4838 if (!current_templates)
4839 {
4840 as_bad (_("no such instruction: `%s'"), token_start);
4841 return NULL;
4842 }
4843 }
4844
4845 if (current_templates->start->opcode_modifier.jump == JUMP
4846 || current_templates->start->opcode_modifier.jump == JUMP_BYTE)
4847 {
4848 /* Check for a branch hint. We allow ",pt" and ",pn" for
4849 predict taken and predict not taken respectively.
4850 I'm not sure that branch hints actually do anything on loop
4851 and jcxz insns (JumpByte) for current Pentium4 chips. They
4852 may work in the future and it doesn't hurt to accept them
4853 now. */
4854 if (l[0] == ',' && l[1] == 'p')
4855 {
4856 if (l[2] == 't')
4857 {
4858 if (!add_prefix (DS_PREFIX_OPCODE))
4859 return NULL;
4860 l += 3;
4861 }
4862 else if (l[2] == 'n')
4863 {
4864 if (!add_prefix (CS_PREFIX_OPCODE))
4865 return NULL;
4866 l += 3;
4867 }
4868 }
4869 }
4870 /* Any other comma loses. */
4871 if (*l == ',')
4872 {
4873 as_bad (_("invalid character %s in mnemonic"),
4874 output_invalid (*l));
4875 return NULL;
4876 }
4877
4878 /* Check if instruction is supported on specified architecture. */
4879 supported = 0;
4880 for (t = current_templates->start; t < current_templates->end; ++t)
4881 {
4882 supported |= cpu_flags_match (t);
4883 if (supported == CPU_FLAGS_PERFECT_MATCH)
4884 {
4885 if (!cpu_arch_flags.bitfield.cpui386 && (flag_code != CODE_16BIT))
4886 as_warn (_("use .code16 to ensure correct addressing mode"));
4887
4888 return l;
4889 }
4890 }
4891
4892 if (!(supported & CPU_FLAGS_64BIT_MATCH))
4893 as_bad (flag_code == CODE_64BIT
4894 ? _("`%s' is not supported in 64-bit mode")
4895 : _("`%s' is only supported in 64-bit mode"),
4896 current_templates->start->name);
4897 else
4898 as_bad (_("`%s' is not supported on `%s%s'"),
4899 current_templates->start->name,
4900 cpu_arch_name ? cpu_arch_name : default_arch,
4901 cpu_sub_arch_name ? cpu_sub_arch_name : "");
4902
4903 return NULL;
4904 }
4905
4906 static char *
4907 parse_operands (char *l, const char *mnemonic)
4908 {
4909 char *token_start;
4910
4911 /* 1 if operand is pending after ','. */
4912 unsigned int expecting_operand = 0;
4913
4914 /* Non-zero if operand parens not balanced. */
4915 unsigned int paren_not_balanced;
4916
4917 while (*l != END_OF_INSN)
4918 {
4919 /* Skip optional white space before operand. */
4920 if (is_space_char (*l))
4921 ++l;
4922 if (!is_operand_char (*l) && *l != END_OF_INSN && *l != '"')
4923 {
4924 as_bad (_("invalid character %s before operand %d"),
4925 output_invalid (*l),
4926 i.operands + 1);
4927 return NULL;
4928 }
4929 token_start = l; /* After white space. */
4930 paren_not_balanced = 0;
4931 while (paren_not_balanced || *l != ',')
4932 {
4933 if (*l == END_OF_INSN)
4934 {
4935 if (paren_not_balanced)
4936 {
4937 if (!intel_syntax)
4938 as_bad (_("unbalanced parenthesis in operand %d."),
4939 i.operands + 1);
4940 else
4941 as_bad (_("unbalanced brackets in operand %d."),
4942 i.operands + 1);
4943 return NULL;
4944 }
4945 else
4946 break; /* we are done */
4947 }
4948 else if (!is_operand_char (*l) && !is_space_char (*l) && *l != '"')
4949 {
4950 as_bad (_("invalid character %s in operand %d"),
4951 output_invalid (*l),
4952 i.operands + 1);
4953 return NULL;
4954 }
4955 if (!intel_syntax)
4956 {
4957 if (*l == '(')
4958 ++paren_not_balanced;
4959 if (*l == ')')
4960 --paren_not_balanced;
4961 }
4962 else
4963 {
4964 if (*l == '[')
4965 ++paren_not_balanced;
4966 if (*l == ']')
4967 --paren_not_balanced;
4968 }
4969 l++;
4970 }
4971 if (l != token_start)
4972 { /* Yes, we've read in another operand. */
4973 unsigned int operand_ok;
4974 this_operand = i.operands++;
4975 if (i.operands > MAX_OPERANDS)
4976 {
4977 as_bad (_("spurious operands; (%d operands/instruction max)"),
4978 MAX_OPERANDS);
4979 return NULL;
4980 }
4981 i.types[this_operand].bitfield.unspecified = 1;
4982 /* Now parse operand adding info to 'i' as we go along. */
4983 END_STRING_AND_SAVE (l);
4984
4985 if (i.mem_operands > 1)
4986 {
4987 as_bad (_("too many memory references for `%s'"),
4988 mnemonic);
4989 return 0;
4990 }
4991
4992 if (intel_syntax)
4993 operand_ok =
4994 i386_intel_operand (token_start,
4995 intel_float_operand (mnemonic));
4996 else
4997 operand_ok = i386_att_operand (token_start);
4998
4999 RESTORE_END_STRING (l);
5000 if (!operand_ok)
5001 return NULL;
5002 }
5003 else
5004 {
5005 if (expecting_operand)
5006 {
5007 expecting_operand_after_comma:
5008 as_bad (_("expecting operand after ','; got nothing"));
5009 return NULL;
5010 }
5011 if (*l == ',')
5012 {
5013 as_bad (_("expecting operand before ','; got nothing"));
5014 return NULL;
5015 }
5016 }
5017
5018 /* Now *l must be either ',' or END_OF_INSN. */
5019 if (*l == ',')
5020 {
5021 if (*++l == END_OF_INSN)
5022 {
5023 /* Just skip it, if it's \n complain. */
5024 goto expecting_operand_after_comma;
5025 }
5026 expecting_operand = 1;
5027 }
5028 }
5029 return l;
5030 }
5031
5032 static void
5033 swap_2_operands (int xchg1, int xchg2)
5034 {
5035 union i386_op temp_op;
5036 i386_operand_type temp_type;
5037 unsigned int temp_flags;
5038 enum bfd_reloc_code_real temp_reloc;
5039
5040 temp_type = i.types[xchg2];
5041 i.types[xchg2] = i.types[xchg1];
5042 i.types[xchg1] = temp_type;
5043
5044 temp_flags = i.flags[xchg2];
5045 i.flags[xchg2] = i.flags[xchg1];
5046 i.flags[xchg1] = temp_flags;
5047
5048 temp_op = i.op[xchg2];
5049 i.op[xchg2] = i.op[xchg1];
5050 i.op[xchg1] = temp_op;
5051
5052 temp_reloc = i.reloc[xchg2];
5053 i.reloc[xchg2] = i.reloc[xchg1];
5054 i.reloc[xchg1] = temp_reloc;
5055
5056 if (i.mask)
5057 {
5058 if (i.mask->operand == xchg1)
5059 i.mask->operand = xchg2;
5060 else if (i.mask->operand == xchg2)
5061 i.mask->operand = xchg1;
5062 }
5063 if (i.broadcast)
5064 {
5065 if (i.broadcast->operand == xchg1)
5066 i.broadcast->operand = xchg2;
5067 else if (i.broadcast->operand == xchg2)
5068 i.broadcast->operand = xchg1;
5069 }
5070 if (i.rounding)
5071 {
5072 if (i.rounding->operand == xchg1)
5073 i.rounding->operand = xchg2;
5074 else if (i.rounding->operand == xchg2)
5075 i.rounding->operand = xchg1;
5076 }
5077 }
5078
5079 static void
5080 swap_operands (void)
5081 {
5082 switch (i.operands)
5083 {
5084 case 5:
5085 case 4:
5086 swap_2_operands (1, i.operands - 2);
5087 /* Fall through. */
5088 case 3:
5089 case 2:
5090 swap_2_operands (0, i.operands - 1);
5091 break;
5092 default:
5093 abort ();
5094 }
5095
5096 if (i.mem_operands == 2)
5097 {
5098 const seg_entry *temp_seg;
5099 temp_seg = i.seg[0];
5100 i.seg[0] = i.seg[1];
5101 i.seg[1] = temp_seg;
5102 }
5103 }
5104
5105 /* Try to ensure constant immediates are represented in the smallest
5106 opcode possible. */
5107 static void
5108 optimize_imm (void)
5109 {
5110 char guess_suffix = 0;
5111 int op;
5112
5113 if (i.suffix)
5114 guess_suffix = i.suffix;
5115 else if (i.reg_operands)
5116 {
5117 /* Figure out a suffix from the last register operand specified.
5118 We can't do this properly yet, i.e. excluding special register
5119 instances, but the following works for instructions with
5120 immediates. In any case, we can't set i.suffix yet. */
5121 for (op = i.operands; --op >= 0;)
5122 if (i.types[op].bitfield.class != Reg)
5123 continue;
5124 else if (i.types[op].bitfield.byte)
5125 {
5126 guess_suffix = BYTE_MNEM_SUFFIX;
5127 break;
5128 }
5129 else if (i.types[op].bitfield.word)
5130 {
5131 guess_suffix = WORD_MNEM_SUFFIX;
5132 break;
5133 }
5134 else if (i.types[op].bitfield.dword)
5135 {
5136 guess_suffix = LONG_MNEM_SUFFIX;
5137 break;
5138 }
5139 else if (i.types[op].bitfield.qword)
5140 {
5141 guess_suffix = QWORD_MNEM_SUFFIX;
5142 break;
5143 }
5144 }
5145 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5146 guess_suffix = WORD_MNEM_SUFFIX;
5147
5148 for (op = i.operands; --op >= 0;)
5149 if (operand_type_check (i.types[op], imm))
5150 {
5151 switch (i.op[op].imms->X_op)
5152 {
5153 case O_constant:
5154 /* If a suffix is given, this operand may be shortened. */
5155 switch (guess_suffix)
5156 {
5157 case LONG_MNEM_SUFFIX:
5158 i.types[op].bitfield.imm32 = 1;
5159 i.types[op].bitfield.imm64 = 1;
5160 break;
5161 case WORD_MNEM_SUFFIX:
5162 i.types[op].bitfield.imm16 = 1;
5163 i.types[op].bitfield.imm32 = 1;
5164 i.types[op].bitfield.imm32s = 1;
5165 i.types[op].bitfield.imm64 = 1;
5166 break;
5167 case BYTE_MNEM_SUFFIX:
5168 i.types[op].bitfield.imm8 = 1;
5169 i.types[op].bitfield.imm8s = 1;
5170 i.types[op].bitfield.imm16 = 1;
5171 i.types[op].bitfield.imm32 = 1;
5172 i.types[op].bitfield.imm32s = 1;
5173 i.types[op].bitfield.imm64 = 1;
5174 break;
5175 }
5176
5177 /* If this operand is at most 16 bits, convert it
5178 to a signed 16 bit number before trying to see
5179 whether it will fit in an even smaller size.
5180 This allows a 16-bit operand such as $0xffe0 to
5181 be recognised as within Imm8S range. */
5182 if ((i.types[op].bitfield.imm16)
5183 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
5184 {
5185 i.op[op].imms->X_add_number =
5186 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
5187 }
5188 #ifdef BFD64
5189 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5190 if ((i.types[op].bitfield.imm32)
5191 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
5192 == 0))
5193 {
5194 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
5195 ^ ((offsetT) 1 << 31))
5196 - ((offsetT) 1 << 31));
5197 }
5198 #endif
5199 i.types[op]
5200 = operand_type_or (i.types[op],
5201 smallest_imm_type (i.op[op].imms->X_add_number));
5202
5203 /* We must avoid matching of Imm32 templates when 64bit
5204 only immediate is available. */
5205 if (guess_suffix == QWORD_MNEM_SUFFIX)
5206 i.types[op].bitfield.imm32 = 0;
5207 break;
5208
5209 case O_absent:
5210 case O_register:
5211 abort ();
5212
5213 /* Symbols and expressions. */
5214 default:
5215 /* Convert symbolic operand to proper sizes for matching, but don't
5216 prevent matching a set of insns that only supports sizes other
5217 than those matching the insn suffix. */
5218 {
5219 i386_operand_type mask, allowed;
5220 const insn_template *t;
5221
5222 operand_type_set (&mask, 0);
5223 operand_type_set (&allowed, 0);
5224
5225 for (t = current_templates->start;
5226 t < current_templates->end;
5227 ++t)
5228 {
5229 allowed = operand_type_or (allowed, t->operand_types[op]);
5230 allowed = operand_type_and (allowed, anyimm);
5231 }
5232 switch (guess_suffix)
5233 {
5234 case QWORD_MNEM_SUFFIX:
5235 mask.bitfield.imm64 = 1;
5236 mask.bitfield.imm32s = 1;
5237 break;
5238 case LONG_MNEM_SUFFIX:
5239 mask.bitfield.imm32 = 1;
5240 break;
5241 case WORD_MNEM_SUFFIX:
5242 mask.bitfield.imm16 = 1;
5243 break;
5244 case BYTE_MNEM_SUFFIX:
5245 mask.bitfield.imm8 = 1;
5246 break;
5247 default:
5248 break;
5249 }
5250 allowed = operand_type_and (mask, allowed);
5251 if (!operand_type_all_zero (&allowed))
5252 i.types[op] = operand_type_and (i.types[op], mask);
5253 }
5254 break;
5255 }
5256 }
5257 }
5258
5259 /* Try to use the smallest displacement type too. */
5260 static void
5261 optimize_disp (void)
5262 {
5263 int op;
5264
5265 for (op = i.operands; --op >= 0;)
5266 if (operand_type_check (i.types[op], disp))
5267 {
5268 if (i.op[op].disps->X_op == O_constant)
5269 {
5270 offsetT op_disp = i.op[op].disps->X_add_number;
5271
5272 if (i.types[op].bitfield.disp16
5273 && (op_disp & ~(offsetT) 0xffff) == 0)
5274 {
5275 /* If this operand is at most 16 bits, convert
5276 to a signed 16 bit number and don't use 64bit
5277 displacement. */
5278 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
5279 i.types[op].bitfield.disp64 = 0;
5280 }
5281 #ifdef BFD64
5282 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5283 if (i.types[op].bitfield.disp32
5284 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
5285 {
5286 /* If this operand is at most 32 bits, convert
5287 to a signed 32 bit number and don't use 64bit
5288 displacement. */
5289 op_disp &= (((offsetT) 2 << 31) - 1);
5290 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
5291 i.types[op].bitfield.disp64 = 0;
5292 }
5293 #endif
5294 if (!op_disp && i.types[op].bitfield.baseindex)
5295 {
5296 i.types[op].bitfield.disp8 = 0;
5297 i.types[op].bitfield.disp16 = 0;
5298 i.types[op].bitfield.disp32 = 0;
5299 i.types[op].bitfield.disp32s = 0;
5300 i.types[op].bitfield.disp64 = 0;
5301 i.op[op].disps = 0;
5302 i.disp_operands--;
5303 }
5304 else if (flag_code == CODE_64BIT)
5305 {
5306 if (fits_in_signed_long (op_disp))
5307 {
5308 i.types[op].bitfield.disp64 = 0;
5309 i.types[op].bitfield.disp32s = 1;
5310 }
5311 if (i.prefix[ADDR_PREFIX]
5312 && fits_in_unsigned_long (op_disp))
5313 i.types[op].bitfield.disp32 = 1;
5314 }
5315 if ((i.types[op].bitfield.disp32
5316 || i.types[op].bitfield.disp32s
5317 || i.types[op].bitfield.disp16)
5318 && fits_in_disp8 (op_disp))
5319 i.types[op].bitfield.disp8 = 1;
5320 }
5321 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5322 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
5323 {
5324 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
5325 i.op[op].disps, 0, i.reloc[op]);
5326 i.types[op].bitfield.disp8 = 0;
5327 i.types[op].bitfield.disp16 = 0;
5328 i.types[op].bitfield.disp32 = 0;
5329 i.types[op].bitfield.disp32s = 0;
5330 i.types[op].bitfield.disp64 = 0;
5331 }
5332 else
5333 /* We only support 64bit displacement on constants. */
5334 i.types[op].bitfield.disp64 = 0;
5335 }
5336 }
5337
5338 /* Return 1 if there is a match in broadcast bytes between operand
5339 GIVEN and instruction template T. */
5340
5341 static INLINE int
5342 match_broadcast_size (const insn_template *t, unsigned int given)
5343 {
5344 return ((t->opcode_modifier.broadcast == BYTE_BROADCAST
5345 && i.types[given].bitfield.byte)
5346 || (t->opcode_modifier.broadcast == WORD_BROADCAST
5347 && i.types[given].bitfield.word)
5348 || (t->opcode_modifier.broadcast == DWORD_BROADCAST
5349 && i.types[given].bitfield.dword)
5350 || (t->opcode_modifier.broadcast == QWORD_BROADCAST
5351 && i.types[given].bitfield.qword));
5352 }
5353
5354 /* Check if operands are valid for the instruction. */
5355
5356 static int
5357 check_VecOperands (const insn_template *t)
5358 {
5359 unsigned int op;
5360 i386_cpu_flags cpu;
5361
5362 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5363 any one operand are implicity requiring AVX512VL support if the actual
5364 operand size is YMMword or XMMword. Since this function runs after
5365 template matching, there's no need to check for YMMword/XMMword in
5366 the template. */
5367 cpu = cpu_flags_and (t->cpu_flags, avx512);
5368 if (!cpu_flags_all_zero (&cpu)
5369 && !t->cpu_flags.bitfield.cpuavx512vl
5370 && !cpu_arch_flags.bitfield.cpuavx512vl)
5371 {
5372 for (op = 0; op < t->operands; ++op)
5373 {
5374 if (t->operand_types[op].bitfield.zmmword
5375 && (i.types[op].bitfield.ymmword
5376 || i.types[op].bitfield.xmmword))
5377 {
5378 i.error = unsupported;
5379 return 1;
5380 }
5381 }
5382 }
5383
5384 /* Without VSIB byte, we can't have a vector register for index. */
5385 if (!t->opcode_modifier.vecsib
5386 && i.index_reg
5387 && (i.index_reg->reg_type.bitfield.xmmword
5388 || i.index_reg->reg_type.bitfield.ymmword
5389 || i.index_reg->reg_type.bitfield.zmmword))
5390 {
5391 i.error = unsupported_vector_index_register;
5392 return 1;
5393 }
5394
5395 /* Check if default mask is allowed. */
5396 if (t->opcode_modifier.nodefmask
5397 && (!i.mask || i.mask->mask->reg_num == 0))
5398 {
5399 i.error = no_default_mask;
5400 return 1;
5401 }
5402
5403 /* For VSIB byte, we need a vector register for index, and all vector
5404 registers must be distinct. */
5405 if (t->opcode_modifier.vecsib)
5406 {
5407 if (!i.index_reg
5408 || !((t->opcode_modifier.vecsib == VecSIB128
5409 && i.index_reg->reg_type.bitfield.xmmword)
5410 || (t->opcode_modifier.vecsib == VecSIB256
5411 && i.index_reg->reg_type.bitfield.ymmword)
5412 || (t->opcode_modifier.vecsib == VecSIB512
5413 && i.index_reg->reg_type.bitfield.zmmword)))
5414 {
5415 i.error = invalid_vsib_address;
5416 return 1;
5417 }
5418
5419 gas_assert (i.reg_operands == 2 || i.mask);
5420 if (i.reg_operands == 2 && !i.mask)
5421 {
5422 gas_assert (i.types[0].bitfield.class == RegSIMD);
5423 gas_assert (i.types[0].bitfield.xmmword
5424 || i.types[0].bitfield.ymmword);
5425 gas_assert (i.types[2].bitfield.class == RegSIMD);
5426 gas_assert (i.types[2].bitfield.xmmword
5427 || i.types[2].bitfield.ymmword);
5428 if (operand_check == check_none)
5429 return 0;
5430 if (register_number (i.op[0].regs)
5431 != register_number (i.index_reg)
5432 && register_number (i.op[2].regs)
5433 != register_number (i.index_reg)
5434 && register_number (i.op[0].regs)
5435 != register_number (i.op[2].regs))
5436 return 0;
5437 if (operand_check == check_error)
5438 {
5439 i.error = invalid_vector_register_set;
5440 return 1;
5441 }
5442 as_warn (_("mask, index, and destination registers should be distinct"));
5443 }
5444 else if (i.reg_operands == 1 && i.mask)
5445 {
5446 if (i.types[1].bitfield.class == RegSIMD
5447 && (i.types[1].bitfield.xmmword
5448 || i.types[1].bitfield.ymmword
5449 || i.types[1].bitfield.zmmword)
5450 && (register_number (i.op[1].regs)
5451 == register_number (i.index_reg)))
5452 {
5453 if (operand_check == check_error)
5454 {
5455 i.error = invalid_vector_register_set;
5456 return 1;
5457 }
5458 if (operand_check != check_none)
5459 as_warn (_("index and destination registers should be distinct"));
5460 }
5461 }
5462 }
5463
5464 /* Check if broadcast is supported by the instruction and is applied
5465 to the memory operand. */
5466 if (i.broadcast)
5467 {
5468 i386_operand_type type, overlap;
5469
5470 /* Check if specified broadcast is supported in this instruction,
5471 and its broadcast bytes match the memory operand. */
5472 op = i.broadcast->operand;
5473 if (!t->opcode_modifier.broadcast
5474 || !(i.flags[op] & Operand_Mem)
5475 || (!i.types[op].bitfield.unspecified
5476 && !match_broadcast_size (t, op)))
5477 {
5478 bad_broadcast:
5479 i.error = unsupported_broadcast;
5480 return 1;
5481 }
5482
5483 i.broadcast->bytes = ((1 << (t->opcode_modifier.broadcast - 1))
5484 * i.broadcast->type);
5485 operand_type_set (&type, 0);
5486 switch (i.broadcast->bytes)
5487 {
5488 case 2:
5489 type.bitfield.word = 1;
5490 break;
5491 case 4:
5492 type.bitfield.dword = 1;
5493 break;
5494 case 8:
5495 type.bitfield.qword = 1;
5496 break;
5497 case 16:
5498 type.bitfield.xmmword = 1;
5499 break;
5500 case 32:
5501 type.bitfield.ymmword = 1;
5502 break;
5503 case 64:
5504 type.bitfield.zmmword = 1;
5505 break;
5506 default:
5507 goto bad_broadcast;
5508 }
5509
5510 overlap = operand_type_and (type, t->operand_types[op]);
5511 if (operand_type_all_zero (&overlap))
5512 goto bad_broadcast;
5513
5514 if (t->opcode_modifier.checkregsize)
5515 {
5516 unsigned int j;
5517
5518 type.bitfield.baseindex = 1;
5519 for (j = 0; j < i.operands; ++j)
5520 {
5521 if (j != op
5522 && !operand_type_register_match(i.types[j],
5523 t->operand_types[j],
5524 type,
5525 t->operand_types[op]))
5526 goto bad_broadcast;
5527 }
5528 }
5529 }
5530 /* If broadcast is supported in this instruction, we need to check if
5531 operand of one-element size isn't specified without broadcast. */
5532 else if (t->opcode_modifier.broadcast && i.mem_operands)
5533 {
5534 /* Find memory operand. */
5535 for (op = 0; op < i.operands; op++)
5536 if (i.flags[op] & Operand_Mem)
5537 break;
5538 gas_assert (op < i.operands);
5539 /* Check size of the memory operand. */
5540 if (match_broadcast_size (t, op))
5541 {
5542 i.error = broadcast_needed;
5543 return 1;
5544 }
5545 }
5546 else
5547 op = MAX_OPERANDS - 1; /* Avoid uninitialized variable warning. */
5548
5549 /* Check if requested masking is supported. */
5550 if (i.mask)
5551 {
5552 switch (t->opcode_modifier.masking)
5553 {
5554 case BOTH_MASKING:
5555 break;
5556 case MERGING_MASKING:
5557 if (i.mask->zeroing)
5558 {
5559 case 0:
5560 i.error = unsupported_masking;
5561 return 1;
5562 }
5563 break;
5564 case DYNAMIC_MASKING:
5565 /* Memory destinations allow only merging masking. */
5566 if (i.mask->zeroing && i.mem_operands)
5567 {
5568 /* Find memory operand. */
5569 for (op = 0; op < i.operands; op++)
5570 if (i.flags[op] & Operand_Mem)
5571 break;
5572 gas_assert (op < i.operands);
5573 if (op == i.operands - 1)
5574 {
5575 i.error = unsupported_masking;
5576 return 1;
5577 }
5578 }
5579 break;
5580 default:
5581 abort ();
5582 }
5583 }
5584
5585 /* Check if masking is applied to dest operand. */
5586 if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
5587 {
5588 i.error = mask_not_on_destination;
5589 return 1;
5590 }
5591
5592 /* Check RC/SAE. */
5593 if (i.rounding)
5594 {
5595 if (!t->opcode_modifier.sae
5596 || (i.rounding->type != saeonly && !t->opcode_modifier.staticrounding))
5597 {
5598 i.error = unsupported_rc_sae;
5599 return 1;
5600 }
5601 /* If the instruction has several immediate operands and one of
5602 them is rounding, the rounding operand should be the last
5603 immediate operand. */
5604 if (i.imm_operands > 1
5605 && i.rounding->operand != (int) (i.imm_operands - 1))
5606 {
5607 i.error = rc_sae_operand_not_last_imm;
5608 return 1;
5609 }
5610 }
5611
5612 /* Check vector Disp8 operand. */
5613 if (t->opcode_modifier.disp8memshift
5614 && i.disp_encoding != disp_encoding_32bit)
5615 {
5616 if (i.broadcast)
5617 i.memshift = t->opcode_modifier.broadcast - 1;
5618 else if (t->opcode_modifier.disp8memshift != DISP8_SHIFT_VL)
5619 i.memshift = t->opcode_modifier.disp8memshift;
5620 else
5621 {
5622 const i386_operand_type *type = NULL;
5623
5624 i.memshift = 0;
5625 for (op = 0; op < i.operands; op++)
5626 if (i.flags[op] & Operand_Mem)
5627 {
5628 if (t->opcode_modifier.evex == EVEXLIG)
5629 i.memshift = 2 + (i.suffix == QWORD_MNEM_SUFFIX);
5630 else if (t->operand_types[op].bitfield.xmmword
5631 + t->operand_types[op].bitfield.ymmword
5632 + t->operand_types[op].bitfield.zmmword <= 1)
5633 type = &t->operand_types[op];
5634 else if (!i.types[op].bitfield.unspecified)
5635 type = &i.types[op];
5636 }
5637 else if (i.types[op].bitfield.class == RegSIMD
5638 && t->opcode_modifier.evex != EVEXLIG)
5639 {
5640 if (i.types[op].bitfield.zmmword)
5641 i.memshift = 6;
5642 else if (i.types[op].bitfield.ymmword && i.memshift < 5)
5643 i.memshift = 5;
5644 else if (i.types[op].bitfield.xmmword && i.memshift < 4)
5645 i.memshift = 4;
5646 }
5647
5648 if (type)
5649 {
5650 if (type->bitfield.zmmword)
5651 i.memshift = 6;
5652 else if (type->bitfield.ymmword)
5653 i.memshift = 5;
5654 else if (type->bitfield.xmmword)
5655 i.memshift = 4;
5656 }
5657
5658 /* For the check in fits_in_disp8(). */
5659 if (i.memshift == 0)
5660 i.memshift = -1;
5661 }
5662
5663 for (op = 0; op < i.operands; op++)
5664 if (operand_type_check (i.types[op], disp)
5665 && i.op[op].disps->X_op == O_constant)
5666 {
5667 if (fits_in_disp8 (i.op[op].disps->X_add_number))
5668 {
5669 i.types[op].bitfield.disp8 = 1;
5670 return 0;
5671 }
5672 i.types[op].bitfield.disp8 = 0;
5673 }
5674 }
5675
5676 i.memshift = 0;
5677
5678 return 0;
5679 }
5680
5681 /* Check if operands are valid for the instruction. Update VEX
5682 operand types. */
5683
5684 static int
5685 VEX_check_operands (const insn_template *t)
5686 {
5687 if (i.vec_encoding == vex_encoding_evex)
5688 {
5689 /* This instruction must be encoded with EVEX prefix. */
5690 if (!is_evex_encoding (t))
5691 {
5692 i.error = unsupported;
5693 return 1;
5694 }
5695 return 0;
5696 }
5697
5698 if (!t->opcode_modifier.vex)
5699 {
5700 /* This instruction template doesn't have VEX prefix. */
5701 if (i.vec_encoding != vex_encoding_default)
5702 {
5703 i.error = unsupported;
5704 return 1;
5705 }
5706 return 0;
5707 }
5708
5709 /* Check the special Imm4 cases; must be the first operand. */
5710 if (t->cpu_flags.bitfield.cpuxop && t->operands == 5)
5711 {
5712 if (i.op[0].imms->X_op != O_constant
5713 || !fits_in_imm4 (i.op[0].imms->X_add_number))
5714 {
5715 i.error = bad_imm4;
5716 return 1;
5717 }
5718
5719 /* Turn off Imm<N> so that update_imm won't complain. */
5720 operand_type_set (&i.types[0], 0);
5721 }
5722
5723 return 0;
5724 }
5725
5726 static const insn_template *
5727 match_template (char mnem_suffix)
5728 {
5729 /* Points to template once we've found it. */
5730 const insn_template *t;
5731 i386_operand_type overlap0, overlap1, overlap2, overlap3;
5732 i386_operand_type overlap4;
5733 unsigned int found_reverse_match;
5734 i386_opcode_modifier suffix_check;
5735 i386_operand_type operand_types [MAX_OPERANDS];
5736 int addr_prefix_disp;
5737 unsigned int j, size_match, check_register;
5738 enum i386_error specific_error = 0;
5739
5740 #if MAX_OPERANDS != 5
5741 # error "MAX_OPERANDS must be 5."
5742 #endif
5743
5744 found_reverse_match = 0;
5745 addr_prefix_disp = -1;
5746
5747 /* Prepare for mnemonic suffix check. */
5748 memset (&suffix_check, 0, sizeof (suffix_check));
5749 switch (mnem_suffix)
5750 {
5751 case BYTE_MNEM_SUFFIX:
5752 suffix_check.no_bsuf = 1;
5753 break;
5754 case WORD_MNEM_SUFFIX:
5755 suffix_check.no_wsuf = 1;
5756 break;
5757 case SHORT_MNEM_SUFFIX:
5758 suffix_check.no_ssuf = 1;
5759 break;
5760 case LONG_MNEM_SUFFIX:
5761 suffix_check.no_lsuf = 1;
5762 break;
5763 case QWORD_MNEM_SUFFIX:
5764 suffix_check.no_qsuf = 1;
5765 break;
5766 default:
5767 /* NB: In Intel syntax, normally we can check for memory operand
5768 size when there is no mnemonic suffix. But jmp and call have
5769 2 different encodings with Dword memory operand size, one with
5770 No_ldSuf and the other without. i.suffix is set to
5771 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
5772 if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
5773 suffix_check.no_ldsuf = 1;
5774 }
5775
5776 /* Must have right number of operands. */
5777 i.error = number_of_operands_mismatch;
5778
5779 for (t = current_templates->start; t < current_templates->end; t++)
5780 {
5781 addr_prefix_disp = -1;
5782 found_reverse_match = 0;
5783
5784 if (i.operands != t->operands)
5785 continue;
5786
5787 /* Check processor support. */
5788 i.error = unsupported;
5789 if (cpu_flags_match (t) != CPU_FLAGS_PERFECT_MATCH)
5790 continue;
5791
5792 /* Check AT&T mnemonic. */
5793 i.error = unsupported_with_intel_mnemonic;
5794 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
5795 continue;
5796
5797 /* Check AT&T/Intel syntax. */
5798 i.error = unsupported_syntax;
5799 if ((intel_syntax && t->opcode_modifier.attsyntax)
5800 || (!intel_syntax && t->opcode_modifier.intelsyntax))
5801 continue;
5802
5803 /* Check Intel64/AMD64 ISA. */
5804 switch (isa64)
5805 {
5806 default:
5807 /* Default: Don't accept Intel64. */
5808 if (t->opcode_modifier.isa64 == INTEL64)
5809 continue;
5810 break;
5811 case amd64:
5812 /* -mamd64: Don't accept Intel64 and Intel64 only. */
5813 if (t->opcode_modifier.isa64 >= INTEL64)
5814 continue;
5815 break;
5816 case intel64:
5817 /* -mintel64: Don't accept AMD64. */
5818 if (t->opcode_modifier.isa64 == AMD64 && flag_code == CODE_64BIT)
5819 continue;
5820 break;
5821 }
5822
5823 /* Check the suffix. */
5824 i.error = invalid_instruction_suffix;
5825 if ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
5826 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
5827 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
5828 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
5829 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
5830 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf))
5831 continue;
5832
5833 size_match = operand_size_match (t);
5834 if (!size_match)
5835 continue;
5836
5837 /* This is intentionally not
5838
5839 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
5840
5841 as the case of a missing * on the operand is accepted (perhaps with
5842 a warning, issued further down). */
5843 if (i.jumpabsolute && t->opcode_modifier.jump != JUMP_ABSOLUTE)
5844 {
5845 i.error = operand_type_mismatch;
5846 continue;
5847 }
5848
5849 for (j = 0; j < MAX_OPERANDS; j++)
5850 operand_types[j] = t->operand_types[j];
5851
5852 /* In general, don't allow 64-bit operands in 32-bit mode. */
5853 if (i.suffix == QWORD_MNEM_SUFFIX
5854 && flag_code != CODE_64BIT
5855 && (intel_syntax
5856 ? (!t->opcode_modifier.ignoresize
5857 && !t->opcode_modifier.broadcast
5858 && !intel_float_operand (t->name))
5859 : intel_float_operand (t->name) != 2)
5860 && ((operand_types[0].bitfield.class != RegMMX
5861 && operand_types[0].bitfield.class != RegSIMD)
5862 || (operand_types[t->operands > 1].bitfield.class != RegMMX
5863 && operand_types[t->operands > 1].bitfield.class != RegSIMD))
5864 && (t->base_opcode != 0x0fc7
5865 || t->extension_opcode != 1 /* cmpxchg8b */))
5866 continue;
5867
5868 /* In general, don't allow 32-bit operands on pre-386. */
5869 else if (i.suffix == LONG_MNEM_SUFFIX
5870 && !cpu_arch_flags.bitfield.cpui386
5871 && (intel_syntax
5872 ? (!t->opcode_modifier.ignoresize
5873 && !intel_float_operand (t->name))
5874 : intel_float_operand (t->name) != 2)
5875 && ((operand_types[0].bitfield.class != RegMMX
5876 && operand_types[0].bitfield.class != RegSIMD)
5877 || (operand_types[t->operands > 1].bitfield.class != RegMMX
5878 && operand_types[t->operands > 1].bitfield.class
5879 != RegSIMD)))
5880 continue;
5881
5882 /* Do not verify operands when there are none. */
5883 else
5884 {
5885 if (!t->operands)
5886 /* We've found a match; break out of loop. */
5887 break;
5888 }
5889
5890 if (!t->opcode_modifier.jump
5891 || t->opcode_modifier.jump == JUMP_ABSOLUTE)
5892 {
5893 /* There should be only one Disp operand. */
5894 for (j = 0; j < MAX_OPERANDS; j++)
5895 if (operand_type_check (operand_types[j], disp))
5896 break;
5897 if (j < MAX_OPERANDS)
5898 {
5899 bfd_boolean override = (i.prefix[ADDR_PREFIX] != 0);
5900
5901 addr_prefix_disp = j;
5902
5903 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
5904 operand into Disp32/Disp32/Disp16/Disp32 operand. */
5905 switch (flag_code)
5906 {
5907 case CODE_16BIT:
5908 override = !override;
5909 /* Fall through. */
5910 case CODE_32BIT:
5911 if (operand_types[j].bitfield.disp32
5912 && operand_types[j].bitfield.disp16)
5913 {
5914 operand_types[j].bitfield.disp16 = override;
5915 operand_types[j].bitfield.disp32 = !override;
5916 }
5917 operand_types[j].bitfield.disp32s = 0;
5918 operand_types[j].bitfield.disp64 = 0;
5919 break;
5920
5921 case CODE_64BIT:
5922 if (operand_types[j].bitfield.disp32s
5923 || operand_types[j].bitfield.disp64)
5924 {
5925 operand_types[j].bitfield.disp64 &= !override;
5926 operand_types[j].bitfield.disp32s &= !override;
5927 operand_types[j].bitfield.disp32 = override;
5928 }
5929 operand_types[j].bitfield.disp16 = 0;
5930 break;
5931 }
5932 }
5933 }
5934
5935 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
5936 if (i.reloc[0] == BFD_RELOC_386_GOT32 && t->base_opcode == 0xa0)
5937 continue;
5938
5939 /* We check register size if needed. */
5940 if (t->opcode_modifier.checkregsize)
5941 {
5942 check_register = (1 << t->operands) - 1;
5943 if (i.broadcast)
5944 check_register &= ~(1 << i.broadcast->operand);
5945 }
5946 else
5947 check_register = 0;
5948
5949 overlap0 = operand_type_and (i.types[0], operand_types[0]);
5950 switch (t->operands)
5951 {
5952 case 1:
5953 if (!operand_type_match (overlap0, i.types[0]))
5954 continue;
5955 break;
5956 case 2:
5957 /* xchg %eax, %eax is a special case. It is an alias for nop
5958 only in 32bit mode and we can use opcode 0x90. In 64bit
5959 mode, we can't use 0x90 for xchg %eax, %eax since it should
5960 zero-extend %eax to %rax. */
5961 if (flag_code == CODE_64BIT
5962 && t->base_opcode == 0x90
5963 && i.types[0].bitfield.instance == Accum
5964 && i.types[0].bitfield.dword
5965 && i.types[1].bitfield.instance == Accum
5966 && i.types[1].bitfield.dword)
5967 continue;
5968 /* xrelease mov %eax, <disp> is another special case. It must not
5969 match the accumulator-only encoding of mov. */
5970 if (flag_code != CODE_64BIT
5971 && i.hle_prefix
5972 && t->base_opcode == 0xa0
5973 && i.types[0].bitfield.instance == Accum
5974 && (i.flags[1] & Operand_Mem))
5975 continue;
5976 /* Fall through. */
5977
5978 case 3:
5979 if (!(size_match & MATCH_STRAIGHT))
5980 goto check_reverse;
5981 /* Reverse direction of operands if swapping is possible in the first
5982 place (operands need to be symmetric) and
5983 - the load form is requested, and the template is a store form,
5984 - the store form is requested, and the template is a load form,
5985 - the non-default (swapped) form is requested. */
5986 overlap1 = operand_type_and (operand_types[0], operand_types[1]);
5987 if (t->opcode_modifier.d && i.reg_operands == i.operands
5988 && !operand_type_all_zero (&overlap1))
5989 switch (i.dir_encoding)
5990 {
5991 case dir_encoding_load:
5992 if (operand_type_check (operand_types[i.operands - 1], anymem)
5993 || t->opcode_modifier.regmem)
5994 goto check_reverse;
5995 break;
5996
5997 case dir_encoding_store:
5998 if (!operand_type_check (operand_types[i.operands - 1], anymem)
5999 && !t->opcode_modifier.regmem)
6000 goto check_reverse;
6001 break;
6002
6003 case dir_encoding_swap:
6004 goto check_reverse;
6005
6006 case dir_encoding_default:
6007 break;
6008 }
6009 /* If we want store form, we skip the current load. */
6010 if ((i.dir_encoding == dir_encoding_store
6011 || i.dir_encoding == dir_encoding_swap)
6012 && i.mem_operands == 0
6013 && t->opcode_modifier.load)
6014 continue;
6015 /* Fall through. */
6016 case 4:
6017 case 5:
6018 overlap1 = operand_type_and (i.types[1], operand_types[1]);
6019 if (!operand_type_match (overlap0, i.types[0])
6020 || !operand_type_match (overlap1, i.types[1])
6021 || ((check_register & 3) == 3
6022 && !operand_type_register_match (i.types[0],
6023 operand_types[0],
6024 i.types[1],
6025 operand_types[1])))
6026 {
6027 /* Check if other direction is valid ... */
6028 if (!t->opcode_modifier.d)
6029 continue;
6030
6031 check_reverse:
6032 if (!(size_match & MATCH_REVERSE))
6033 continue;
6034 /* Try reversing direction of operands. */
6035 overlap0 = operand_type_and (i.types[0], operand_types[i.operands - 1]);
6036 overlap1 = operand_type_and (i.types[i.operands - 1], operand_types[0]);
6037 if (!operand_type_match (overlap0, i.types[0])
6038 || !operand_type_match (overlap1, i.types[i.operands - 1])
6039 || (check_register
6040 && !operand_type_register_match (i.types[0],
6041 operand_types[i.operands - 1],
6042 i.types[i.operands - 1],
6043 operand_types[0])))
6044 {
6045 /* Does not match either direction. */
6046 continue;
6047 }
6048 /* found_reverse_match holds which of D or FloatR
6049 we've found. */
6050 if (!t->opcode_modifier.d)
6051 found_reverse_match = 0;
6052 else if (operand_types[0].bitfield.tbyte)
6053 found_reverse_match = Opcode_FloatD;
6054 else if (operand_types[0].bitfield.xmmword
6055 || operand_types[i.operands - 1].bitfield.xmmword
6056 || operand_types[0].bitfield.class == RegMMX
6057 || operand_types[i.operands - 1].bitfield.class == RegMMX
6058 || is_any_vex_encoding(t))
6059 found_reverse_match = (t->base_opcode & 0xee) != 0x6e
6060 ? Opcode_SIMD_FloatD : Opcode_SIMD_IntD;
6061 else
6062 found_reverse_match = Opcode_D;
6063 if (t->opcode_modifier.floatr)
6064 found_reverse_match |= Opcode_FloatR;
6065 }
6066 else
6067 {
6068 /* Found a forward 2 operand match here. */
6069 switch (t->operands)
6070 {
6071 case 5:
6072 overlap4 = operand_type_and (i.types[4],
6073 operand_types[4]);
6074 /* Fall through. */
6075 case 4:
6076 overlap3 = operand_type_and (i.types[3],
6077 operand_types[3]);
6078 /* Fall through. */
6079 case 3:
6080 overlap2 = operand_type_and (i.types[2],
6081 operand_types[2]);
6082 break;
6083 }
6084
6085 switch (t->operands)
6086 {
6087 case 5:
6088 if (!operand_type_match (overlap4, i.types[4])
6089 || !operand_type_register_match (i.types[3],
6090 operand_types[3],
6091 i.types[4],
6092 operand_types[4]))
6093 continue;
6094 /* Fall through. */
6095 case 4:
6096 if (!operand_type_match (overlap3, i.types[3])
6097 || ((check_register & 0xa) == 0xa
6098 && !operand_type_register_match (i.types[1],
6099 operand_types[1],
6100 i.types[3],
6101 operand_types[3]))
6102 || ((check_register & 0xc) == 0xc
6103 && !operand_type_register_match (i.types[2],
6104 operand_types[2],
6105 i.types[3],
6106 operand_types[3])))
6107 continue;
6108 /* Fall through. */
6109 case 3:
6110 /* Here we make use of the fact that there are no
6111 reverse match 3 operand instructions. */
6112 if (!operand_type_match (overlap2, i.types[2])
6113 || ((check_register & 5) == 5
6114 && !operand_type_register_match (i.types[0],
6115 operand_types[0],
6116 i.types[2],
6117 operand_types[2]))
6118 || ((check_register & 6) == 6
6119 && !operand_type_register_match (i.types[1],
6120 operand_types[1],
6121 i.types[2],
6122 operand_types[2])))
6123 continue;
6124 break;
6125 }
6126 }
6127 /* Found either forward/reverse 2, 3 or 4 operand match here:
6128 slip through to break. */
6129 }
6130
6131 /* Check if vector and VEX operands are valid. */
6132 if (check_VecOperands (t) || VEX_check_operands (t))
6133 {
6134 specific_error = i.error;
6135 continue;
6136 }
6137
6138 /* We've found a match; break out of loop. */
6139 break;
6140 }
6141
6142 if (t == current_templates->end)
6143 {
6144 /* We found no match. */
6145 const char *err_msg;
6146 switch (specific_error ? specific_error : i.error)
6147 {
6148 default:
6149 abort ();
6150 case operand_size_mismatch:
6151 err_msg = _("operand size mismatch");
6152 break;
6153 case operand_type_mismatch:
6154 err_msg = _("operand type mismatch");
6155 break;
6156 case register_type_mismatch:
6157 err_msg = _("register type mismatch");
6158 break;
6159 case number_of_operands_mismatch:
6160 err_msg = _("number of operands mismatch");
6161 break;
6162 case invalid_instruction_suffix:
6163 err_msg = _("invalid instruction suffix");
6164 break;
6165 case bad_imm4:
6166 err_msg = _("constant doesn't fit in 4 bits");
6167 break;
6168 case unsupported_with_intel_mnemonic:
6169 err_msg = _("unsupported with Intel mnemonic");
6170 break;
6171 case unsupported_syntax:
6172 err_msg = _("unsupported syntax");
6173 break;
6174 case unsupported:
6175 as_bad (_("unsupported instruction `%s'"),
6176 current_templates->start->name);
6177 return NULL;
6178 case invalid_vsib_address:
6179 err_msg = _("invalid VSIB address");
6180 break;
6181 case invalid_vector_register_set:
6182 err_msg = _("mask, index, and destination registers must be distinct");
6183 break;
6184 case unsupported_vector_index_register:
6185 err_msg = _("unsupported vector index register");
6186 break;
6187 case unsupported_broadcast:
6188 err_msg = _("unsupported broadcast");
6189 break;
6190 case broadcast_needed:
6191 err_msg = _("broadcast is needed for operand of such type");
6192 break;
6193 case unsupported_masking:
6194 err_msg = _("unsupported masking");
6195 break;
6196 case mask_not_on_destination:
6197 err_msg = _("mask not on destination operand");
6198 break;
6199 case no_default_mask:
6200 err_msg = _("default mask isn't allowed");
6201 break;
6202 case unsupported_rc_sae:
6203 err_msg = _("unsupported static rounding/sae");
6204 break;
6205 case rc_sae_operand_not_last_imm:
6206 if (intel_syntax)
6207 err_msg = _("RC/SAE operand must precede immediate operands");
6208 else
6209 err_msg = _("RC/SAE operand must follow immediate operands");
6210 break;
6211 case invalid_register_operand:
6212 err_msg = _("invalid register operand");
6213 break;
6214 }
6215 as_bad (_("%s for `%s'"), err_msg,
6216 current_templates->start->name);
6217 return NULL;
6218 }
6219
6220 if (!quiet_warnings)
6221 {
6222 if (!intel_syntax
6223 && (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE)))
6224 as_warn (_("indirect %s without `*'"), t->name);
6225
6226 if (t->opcode_modifier.isprefix
6227 && t->opcode_modifier.ignoresize)
6228 {
6229 /* Warn them that a data or address size prefix doesn't
6230 affect assembly of the next line of code. */
6231 as_warn (_("stand-alone `%s' prefix"), t->name);
6232 }
6233 }
6234
6235 /* Copy the template we found. */
6236 i.tm = *t;
6237
6238 if (addr_prefix_disp != -1)
6239 i.tm.operand_types[addr_prefix_disp]
6240 = operand_types[addr_prefix_disp];
6241
6242 if (found_reverse_match)
6243 {
6244 /* If we found a reverse match we must alter the opcode direction
6245 bit and clear/flip the regmem modifier one. found_reverse_match
6246 holds bits to change (different for int & float insns). */
6247
6248 i.tm.base_opcode ^= found_reverse_match;
6249
6250 i.tm.operand_types[0] = operand_types[i.operands - 1];
6251 i.tm.operand_types[i.operands - 1] = operand_types[0];
6252
6253 /* Certain SIMD insns have their load forms specified in the opcode
6254 table, and hence we need to _set_ RegMem instead of clearing it.
6255 We need to avoid setting the bit though on insns like KMOVW. */
6256 i.tm.opcode_modifier.regmem
6257 = i.tm.opcode_modifier.modrm && i.tm.opcode_modifier.d
6258 && i.tm.operands > 2U - i.tm.opcode_modifier.sse2avx
6259 && !i.tm.opcode_modifier.regmem;
6260 }
6261
6262 return t;
6263 }
6264
6265 static int
6266 check_string (void)
6267 {
6268 unsigned int es_op = i.tm.opcode_modifier.isstring - IS_STRING_ES_OP0;
6269 unsigned int op = i.tm.operand_types[0].bitfield.baseindex ? es_op : 0;
6270
6271 if (i.seg[op] != NULL && i.seg[op] != &es)
6272 {
6273 as_bad (_("`%s' operand %u must use `%ses' segment"),
6274 i.tm.name,
6275 intel_syntax ? i.tm.operands - es_op : es_op + 1,
6276 register_prefix);
6277 return 0;
6278 }
6279
6280 /* There's only ever one segment override allowed per instruction.
6281 This instruction possibly has a legal segment override on the
6282 second operand, so copy the segment to where non-string
6283 instructions store it, allowing common code. */
6284 i.seg[op] = i.seg[1];
6285
6286 return 1;
6287 }
6288
6289 static int
6290 process_suffix (void)
6291 {
6292 /* If matched instruction specifies an explicit instruction mnemonic
6293 suffix, use it. */
6294 if (i.tm.opcode_modifier.size == SIZE16)
6295 i.suffix = WORD_MNEM_SUFFIX;
6296 else if (i.tm.opcode_modifier.size == SIZE32)
6297 i.suffix = LONG_MNEM_SUFFIX;
6298 else if (i.tm.opcode_modifier.size == SIZE64)
6299 i.suffix = QWORD_MNEM_SUFFIX;
6300 else if (i.reg_operands
6301 && (i.operands > 1 || i.types[0].bitfield.class == Reg))
6302 {
6303 unsigned int numop = i.operands;
6304
6305 /* movsx/movzx want only their source operand considered here, for the
6306 ambiguity checking below. The suffix will be replaced afterwards
6307 to represent the destination (register). */
6308 if (((i.tm.base_opcode | 8) == 0xfbe && i.tm.opcode_modifier.w)
6309 || (i.tm.base_opcode == 0x63 && i.tm.cpu_flags.bitfield.cpu64))
6310 --i.operands;
6311
6312 /* If there's no instruction mnemonic suffix we try to invent one
6313 based on GPR operands. */
6314 if (!i.suffix)
6315 {
6316 /* We take i.suffix from the last register operand specified,
6317 Destination register type is more significant than source
6318 register type. crc32 in SSE4.2 prefers source register
6319 type. */
6320 unsigned int op = i.tm.base_opcode != 0xf20f38f0 ? i.operands : 1;
6321
6322 while (op--)
6323 if (i.tm.operand_types[op].bitfield.instance == InstanceNone
6324 || i.tm.operand_types[op].bitfield.instance == Accum)
6325 {
6326 if (i.types[op].bitfield.class != Reg)
6327 continue;
6328 if (i.types[op].bitfield.byte)
6329 i.suffix = BYTE_MNEM_SUFFIX;
6330 else if (i.types[op].bitfield.word)
6331 i.suffix = WORD_MNEM_SUFFIX;
6332 else if (i.types[op].bitfield.dword)
6333 i.suffix = LONG_MNEM_SUFFIX;
6334 else if (i.types[op].bitfield.qword)
6335 i.suffix = QWORD_MNEM_SUFFIX;
6336 else
6337 continue;
6338 break;
6339 }
6340
6341 /* As an exception, movsx/movzx silently default to a byte source
6342 in AT&T mode. */
6343 if ((i.tm.base_opcode | 8) == 0xfbe && i.tm.opcode_modifier.w
6344 && !i.suffix && !intel_syntax)
6345 i.suffix = BYTE_MNEM_SUFFIX;
6346 }
6347 else if (i.suffix == BYTE_MNEM_SUFFIX)
6348 {
6349 if (intel_syntax
6350 && i.tm.opcode_modifier.ignoresize
6351 && i.tm.opcode_modifier.no_bsuf)
6352 i.suffix = 0;
6353 else if (!check_byte_reg ())
6354 return 0;
6355 }
6356 else if (i.suffix == LONG_MNEM_SUFFIX)
6357 {
6358 if (intel_syntax
6359 && i.tm.opcode_modifier.ignoresize
6360 && i.tm.opcode_modifier.no_lsuf
6361 && !i.tm.opcode_modifier.todword
6362 && !i.tm.opcode_modifier.toqword)
6363 i.suffix = 0;
6364 else if (!check_long_reg ())
6365 return 0;
6366 }
6367 else if (i.suffix == QWORD_MNEM_SUFFIX)
6368 {
6369 if (intel_syntax
6370 && i.tm.opcode_modifier.ignoresize
6371 && i.tm.opcode_modifier.no_qsuf
6372 && !i.tm.opcode_modifier.todword
6373 && !i.tm.opcode_modifier.toqword)
6374 i.suffix = 0;
6375 else if (!check_qword_reg ())
6376 return 0;
6377 }
6378 else if (i.suffix == WORD_MNEM_SUFFIX)
6379 {
6380 if (intel_syntax
6381 && i.tm.opcode_modifier.ignoresize
6382 && i.tm.opcode_modifier.no_wsuf)
6383 i.suffix = 0;
6384 else if (!check_word_reg ())
6385 return 0;
6386 }
6387 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
6388 /* Do nothing if the instruction is going to ignore the prefix. */
6389 ;
6390 else
6391 abort ();
6392
6393 /* Undo the movsx/movzx change done above. */
6394 i.operands = numop;
6395 }
6396 else if (i.tm.opcode_modifier.defaultsize && !i.suffix)
6397 {
6398 i.suffix = stackop_size;
6399 if (stackop_size == LONG_MNEM_SUFFIX)
6400 {
6401 /* stackop_size is set to LONG_MNEM_SUFFIX for the
6402 .code16gcc directive to support 16-bit mode with
6403 32-bit address. For IRET without a suffix, generate
6404 16-bit IRET (opcode 0xcf) to return from an interrupt
6405 handler. */
6406 if (i.tm.base_opcode == 0xcf)
6407 {
6408 i.suffix = WORD_MNEM_SUFFIX;
6409 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
6410 }
6411 /* Warn about changed behavior for segment register push/pop. */
6412 else if ((i.tm.base_opcode | 1) == 0x07)
6413 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
6414 i.tm.name);
6415 }
6416 }
6417 else if (!i.suffix
6418 && (i.tm.opcode_modifier.jump == JUMP_ABSOLUTE
6419 || i.tm.opcode_modifier.jump == JUMP_BYTE
6420 || i.tm.opcode_modifier.jump == JUMP_INTERSEGMENT
6421 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
6422 && i.tm.extension_opcode <= 3)))
6423 {
6424 switch (flag_code)
6425 {
6426 case CODE_64BIT:
6427 if (!i.tm.opcode_modifier.no_qsuf)
6428 {
6429 i.suffix = QWORD_MNEM_SUFFIX;
6430 break;
6431 }
6432 /* Fall through. */
6433 case CODE_32BIT:
6434 if (!i.tm.opcode_modifier.no_lsuf)
6435 i.suffix = LONG_MNEM_SUFFIX;
6436 break;
6437 case CODE_16BIT:
6438 if (!i.tm.opcode_modifier.no_wsuf)
6439 i.suffix = WORD_MNEM_SUFFIX;
6440 break;
6441 }
6442 }
6443
6444 if (!i.suffix
6445 && (!i.tm.opcode_modifier.defaultsize
6446 /* Also cover lret/retf/iret in 64-bit mode. */
6447 || (flag_code == CODE_64BIT
6448 && !i.tm.opcode_modifier.no_lsuf
6449 && !i.tm.opcode_modifier.no_qsuf))
6450 && !i.tm.opcode_modifier.ignoresize
6451 /* Accept FLDENV et al without suffix. */
6452 && (i.tm.opcode_modifier.no_ssuf || i.tm.opcode_modifier.floatmf))
6453 {
6454 unsigned int suffixes, evex = 0;
6455
6456 suffixes = !i.tm.opcode_modifier.no_bsuf;
6457 if (!i.tm.opcode_modifier.no_wsuf)
6458 suffixes |= 1 << 1;
6459 if (!i.tm.opcode_modifier.no_lsuf)
6460 suffixes |= 1 << 2;
6461 if (!i.tm.opcode_modifier.no_ldsuf)
6462 suffixes |= 1 << 3;
6463 if (!i.tm.opcode_modifier.no_ssuf)
6464 suffixes |= 1 << 4;
6465 if (flag_code == CODE_64BIT && !i.tm.opcode_modifier.no_qsuf)
6466 suffixes |= 1 << 5;
6467
6468 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
6469 also suitable for AT&T syntax mode, it was requested that this be
6470 restricted to just Intel syntax. */
6471 if (intel_syntax && is_any_vex_encoding (&i.tm) && !i.broadcast)
6472 {
6473 unsigned int op;
6474
6475 for (op = 0; op < i.tm.operands; ++op)
6476 {
6477 if (is_evex_encoding (&i.tm)
6478 && !cpu_arch_flags.bitfield.cpuavx512vl)
6479 {
6480 if (i.tm.operand_types[op].bitfield.ymmword)
6481 i.tm.operand_types[op].bitfield.xmmword = 0;
6482 if (i.tm.operand_types[op].bitfield.zmmword)
6483 i.tm.operand_types[op].bitfield.ymmword = 0;
6484 if (!i.tm.opcode_modifier.evex
6485 || i.tm.opcode_modifier.evex == EVEXDYN)
6486 i.tm.opcode_modifier.evex = EVEX512;
6487 }
6488
6489 if (i.tm.operand_types[op].bitfield.xmmword
6490 + i.tm.operand_types[op].bitfield.ymmword
6491 + i.tm.operand_types[op].bitfield.zmmword < 2)
6492 continue;
6493
6494 /* Any properly sized operand disambiguates the insn. */
6495 if (i.types[op].bitfield.xmmword
6496 || i.types[op].bitfield.ymmword
6497 || i.types[op].bitfield.zmmword)
6498 {
6499 suffixes &= ~(7 << 6);
6500 evex = 0;
6501 break;
6502 }
6503
6504 if ((i.flags[op] & Operand_Mem)
6505 && i.tm.operand_types[op].bitfield.unspecified)
6506 {
6507 if (i.tm.operand_types[op].bitfield.xmmword)
6508 suffixes |= 1 << 6;
6509 if (i.tm.operand_types[op].bitfield.ymmword)
6510 suffixes |= 1 << 7;
6511 if (i.tm.operand_types[op].bitfield.zmmword)
6512 suffixes |= 1 << 8;
6513 if (is_evex_encoding (&i.tm))
6514 evex = EVEX512;
6515 }
6516 }
6517 }
6518
6519 /* Are multiple suffixes / operand sizes allowed? */
6520 if (suffixes & (suffixes - 1))
6521 {
6522 if (intel_syntax
6523 && (!i.tm.opcode_modifier.defaultsize
6524 || operand_check == check_error))
6525 {
6526 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
6527 return 0;
6528 }
6529 if (operand_check == check_error)
6530 {
6531 as_bad (_("no instruction mnemonic suffix given and "
6532 "no register operands; can't size `%s'"), i.tm.name);
6533 return 0;
6534 }
6535 if (operand_check == check_warning)
6536 as_warn (_("%s; using default for `%s'"),
6537 intel_syntax
6538 ? _("ambiguous operand size")
6539 : _("no instruction mnemonic suffix given and "
6540 "no register operands"),
6541 i.tm.name);
6542
6543 if (i.tm.opcode_modifier.floatmf)
6544 i.suffix = SHORT_MNEM_SUFFIX;
6545 else if ((i.tm.base_opcode | 8) == 0xfbe
6546 || (i.tm.base_opcode == 0x63
6547 && i.tm.cpu_flags.bitfield.cpu64))
6548 /* handled below */;
6549 else if (evex)
6550 i.tm.opcode_modifier.evex = evex;
6551 else if (flag_code == CODE_16BIT)
6552 i.suffix = WORD_MNEM_SUFFIX;
6553 else if (!i.tm.opcode_modifier.no_lsuf)
6554 i.suffix = LONG_MNEM_SUFFIX;
6555 else
6556 i.suffix = QWORD_MNEM_SUFFIX;
6557 }
6558 }
6559
6560 if ((i.tm.base_opcode | 8) == 0xfbe
6561 || (i.tm.base_opcode == 0x63 && i.tm.cpu_flags.bitfield.cpu64))
6562 {
6563 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
6564 In AT&T syntax, if there is no suffix (warned about above), the default
6565 will be byte extension. */
6566 if (i.tm.opcode_modifier.w && i.suffix && i.suffix != BYTE_MNEM_SUFFIX)
6567 i.tm.base_opcode |= 1;
6568
6569 /* For further processing, the suffix should represent the destination
6570 (register). This is already the case when one was used with
6571 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
6572 no suffix to begin with. */
6573 if (i.tm.opcode_modifier.w || i.tm.base_opcode == 0x63 || !i.suffix)
6574 {
6575 if (i.types[1].bitfield.word)
6576 i.suffix = WORD_MNEM_SUFFIX;
6577 else if (i.types[1].bitfield.qword)
6578 i.suffix = QWORD_MNEM_SUFFIX;
6579 else
6580 i.suffix = LONG_MNEM_SUFFIX;
6581
6582 i.tm.opcode_modifier.w = 0;
6583 }
6584 }
6585
6586 if (!i.tm.opcode_modifier.modrm && i.reg_operands && i.tm.operands < 3)
6587 i.short_form = (i.tm.operand_types[0].bitfield.class == Reg)
6588 != (i.tm.operand_types[1].bitfield.class == Reg);
6589
6590 /* Change the opcode based on the operand size given by i.suffix. */
6591 switch (i.suffix)
6592 {
6593 /* Size floating point instruction. */
6594 case LONG_MNEM_SUFFIX:
6595 if (i.tm.opcode_modifier.floatmf)
6596 {
6597 i.tm.base_opcode ^= 4;
6598 break;
6599 }
6600 /* fall through */
6601 case WORD_MNEM_SUFFIX:
6602 case QWORD_MNEM_SUFFIX:
6603 /* It's not a byte, select word/dword operation. */
6604 if (i.tm.opcode_modifier.w)
6605 {
6606 if (i.short_form)
6607 i.tm.base_opcode |= 8;
6608 else
6609 i.tm.base_opcode |= 1;
6610 }
6611 /* fall through */
6612 case SHORT_MNEM_SUFFIX:
6613 /* Now select between word & dword operations via the operand
6614 size prefix, except for instructions that will ignore this
6615 prefix anyway. */
6616 if (i.reg_operands > 0
6617 && i.types[0].bitfield.class == Reg
6618 && i.tm.opcode_modifier.addrprefixopreg
6619 && (i.tm.operand_types[0].bitfield.instance == Accum
6620 || i.operands == 1))
6621 {
6622 /* The address size override prefix changes the size of the
6623 first operand. */
6624 if ((flag_code == CODE_32BIT
6625 && i.op[0].regs->reg_type.bitfield.word)
6626 || (flag_code != CODE_32BIT
6627 && i.op[0].regs->reg_type.bitfield.dword))
6628 if (!add_prefix (ADDR_PREFIX_OPCODE))
6629 return 0;
6630 }
6631 else if (i.suffix != QWORD_MNEM_SUFFIX
6632 && !i.tm.opcode_modifier.ignoresize
6633 && !i.tm.opcode_modifier.floatmf
6634 && !is_any_vex_encoding (&i.tm)
6635 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
6636 || (flag_code == CODE_64BIT
6637 && i.tm.opcode_modifier.jump == JUMP_BYTE)))
6638 {
6639 unsigned int prefix = DATA_PREFIX_OPCODE;
6640
6641 if (i.tm.opcode_modifier.jump == JUMP_BYTE) /* jcxz, loop */
6642 prefix = ADDR_PREFIX_OPCODE;
6643
6644 if (!add_prefix (prefix))
6645 return 0;
6646 }
6647
6648 /* Set mode64 for an operand. */
6649 if (i.suffix == QWORD_MNEM_SUFFIX
6650 && flag_code == CODE_64BIT
6651 && !i.tm.opcode_modifier.norex64
6652 /* Special case for xchg %rax,%rax. It is NOP and doesn't
6653 need rex64. */
6654 && ! (i.operands == 2
6655 && i.tm.base_opcode == 0x90
6656 && i.tm.extension_opcode == None
6657 && i.types[0].bitfield.instance == Accum
6658 && i.types[0].bitfield.qword
6659 && i.types[1].bitfield.instance == Accum
6660 && i.types[1].bitfield.qword))
6661 i.rex |= REX_W;
6662
6663 break;
6664 }
6665
6666 if (i.reg_operands != 0
6667 && i.operands > 1
6668 && i.tm.opcode_modifier.addrprefixopreg
6669 && i.tm.operand_types[0].bitfield.instance != Accum)
6670 {
6671 /* Check invalid register operand when the address size override
6672 prefix changes the size of register operands. */
6673 unsigned int op;
6674 enum { need_word, need_dword, need_qword } need;
6675
6676 if (flag_code == CODE_32BIT)
6677 need = i.prefix[ADDR_PREFIX] ? need_word : need_dword;
6678 else
6679 {
6680 if (i.prefix[ADDR_PREFIX])
6681 need = need_dword;
6682 else
6683 need = flag_code == CODE_64BIT ? need_qword : need_word;
6684 }
6685
6686 for (op = 0; op < i.operands; op++)
6687 if (i.types[op].bitfield.class == Reg
6688 && ((need == need_word
6689 && !i.op[op].regs->reg_type.bitfield.word)
6690 || (need == need_dword
6691 && !i.op[op].regs->reg_type.bitfield.dword)
6692 || (need == need_qword
6693 && !i.op[op].regs->reg_type.bitfield.qword)))
6694 {
6695 as_bad (_("invalid register operand size for `%s'"),
6696 i.tm.name);
6697 return 0;
6698 }
6699 }
6700
6701 return 1;
6702 }
6703
6704 static int
6705 check_byte_reg (void)
6706 {
6707 int op;
6708
6709 for (op = i.operands; --op >= 0;)
6710 {
6711 /* Skip non-register operands. */
6712 if (i.types[op].bitfield.class != Reg)
6713 continue;
6714
6715 /* If this is an eight bit register, it's OK. If it's the 16 or
6716 32 bit version of an eight bit register, we will just use the
6717 low portion, and that's OK too. */
6718 if (i.types[op].bitfield.byte)
6719 continue;
6720
6721 /* I/O port address operands are OK too. */
6722 if (i.tm.operand_types[op].bitfield.instance == RegD
6723 && i.tm.operand_types[op].bitfield.word)
6724 continue;
6725
6726 /* crc32 only wants its source operand checked here. */
6727 if (i.tm.base_opcode == 0xf20f38f0 && op)
6728 continue;
6729
6730 /* Any other register is bad. */
6731 if (i.types[op].bitfield.class == Reg
6732 || i.types[op].bitfield.class == RegMMX
6733 || i.types[op].bitfield.class == RegSIMD
6734 || i.types[op].bitfield.class == SReg
6735 || i.types[op].bitfield.class == RegCR
6736 || i.types[op].bitfield.class == RegDR
6737 || i.types[op].bitfield.class == RegTR)
6738 {
6739 as_bad (_("`%s%s' not allowed with `%s%c'"),
6740 register_prefix,
6741 i.op[op].regs->reg_name,
6742 i.tm.name,
6743 i.suffix);
6744 return 0;
6745 }
6746 }
6747 return 1;
6748 }
6749
6750 static int
6751 check_long_reg (void)
6752 {
6753 int op;
6754
6755 for (op = i.operands; --op >= 0;)
6756 /* Skip non-register operands. */
6757 if (i.types[op].bitfield.class != Reg)
6758 continue;
6759 /* Reject eight bit registers, except where the template requires
6760 them. (eg. movzb) */
6761 else if (i.types[op].bitfield.byte
6762 && (i.tm.operand_types[op].bitfield.class == Reg
6763 || i.tm.operand_types[op].bitfield.instance == Accum)
6764 && (i.tm.operand_types[op].bitfield.word
6765 || i.tm.operand_types[op].bitfield.dword))
6766 {
6767 as_bad (_("`%s%s' not allowed with `%s%c'"),
6768 register_prefix,
6769 i.op[op].regs->reg_name,
6770 i.tm.name,
6771 i.suffix);
6772 return 0;
6773 }
6774 /* Error if the e prefix on a general reg is missing. */
6775 else if (i.types[op].bitfield.word
6776 && (i.tm.operand_types[op].bitfield.class == Reg
6777 || i.tm.operand_types[op].bitfield.instance == Accum)
6778 && i.tm.operand_types[op].bitfield.dword)
6779 {
6780 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6781 register_prefix, i.op[op].regs->reg_name,
6782 i.suffix);
6783 return 0;
6784 }
6785 /* Warn if the r prefix on a general reg is present. */
6786 else if (i.types[op].bitfield.qword
6787 && (i.tm.operand_types[op].bitfield.class == Reg
6788 || i.tm.operand_types[op].bitfield.instance == Accum)
6789 && i.tm.operand_types[op].bitfield.dword)
6790 {
6791 if (intel_syntax
6792 && i.tm.opcode_modifier.toqword
6793 && i.types[0].bitfield.class != RegSIMD)
6794 {
6795 /* Convert to QWORD. We want REX byte. */
6796 i.suffix = QWORD_MNEM_SUFFIX;
6797 }
6798 else
6799 {
6800 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6801 register_prefix, i.op[op].regs->reg_name,
6802 i.suffix);
6803 return 0;
6804 }
6805 }
6806 return 1;
6807 }
6808
6809 static int
6810 check_qword_reg (void)
6811 {
6812 int op;
6813
6814 for (op = i.operands; --op >= 0; )
6815 /* Skip non-register operands. */
6816 if (i.types[op].bitfield.class != Reg)
6817 continue;
6818 /* Reject eight bit registers, except where the template requires
6819 them. (eg. movzb) */
6820 else if (i.types[op].bitfield.byte
6821 && (i.tm.operand_types[op].bitfield.class == Reg
6822 || i.tm.operand_types[op].bitfield.instance == Accum)
6823 && (i.tm.operand_types[op].bitfield.word
6824 || i.tm.operand_types[op].bitfield.dword))
6825 {
6826 as_bad (_("`%s%s' not allowed with `%s%c'"),
6827 register_prefix,
6828 i.op[op].regs->reg_name,
6829 i.tm.name,
6830 i.suffix);
6831 return 0;
6832 }
6833 /* Warn if the r prefix on a general reg is missing. */
6834 else if ((i.types[op].bitfield.word
6835 || i.types[op].bitfield.dword)
6836 && (i.tm.operand_types[op].bitfield.class == Reg
6837 || i.tm.operand_types[op].bitfield.instance == Accum)
6838 && i.tm.operand_types[op].bitfield.qword)
6839 {
6840 /* Prohibit these changes in the 64bit mode, since the
6841 lowering is more complicated. */
6842 if (intel_syntax
6843 && i.tm.opcode_modifier.todword
6844 && i.types[0].bitfield.class != RegSIMD)
6845 {
6846 /* Convert to DWORD. We don't want REX byte. */
6847 i.suffix = LONG_MNEM_SUFFIX;
6848 }
6849 else
6850 {
6851 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6852 register_prefix, i.op[op].regs->reg_name,
6853 i.suffix);
6854 return 0;
6855 }
6856 }
6857 return 1;
6858 }
6859
6860 static int
6861 check_word_reg (void)
6862 {
6863 int op;
6864 for (op = i.operands; --op >= 0;)
6865 /* Skip non-register operands. */
6866 if (i.types[op].bitfield.class != Reg)
6867 continue;
6868 /* Reject eight bit registers, except where the template requires
6869 them. (eg. movzb) */
6870 else if (i.types[op].bitfield.byte
6871 && (i.tm.operand_types[op].bitfield.class == Reg
6872 || i.tm.operand_types[op].bitfield.instance == Accum)
6873 && (i.tm.operand_types[op].bitfield.word
6874 || i.tm.operand_types[op].bitfield.dword))
6875 {
6876 as_bad (_("`%s%s' not allowed with `%s%c'"),
6877 register_prefix,
6878 i.op[op].regs->reg_name,
6879 i.tm.name,
6880 i.suffix);
6881 return 0;
6882 }
6883 /* Error if the e or r prefix on a general reg is present. */
6884 else if ((i.types[op].bitfield.dword
6885 || i.types[op].bitfield.qword)
6886 && (i.tm.operand_types[op].bitfield.class == Reg
6887 || i.tm.operand_types[op].bitfield.instance == Accum)
6888 && i.tm.operand_types[op].bitfield.word)
6889 {
6890 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6891 register_prefix, i.op[op].regs->reg_name,
6892 i.suffix);
6893 return 0;
6894 }
6895 return 1;
6896 }
6897
6898 static int
6899 update_imm (unsigned int j)
6900 {
6901 i386_operand_type overlap = i.types[j];
6902 if ((overlap.bitfield.imm8
6903 || overlap.bitfield.imm8s
6904 || overlap.bitfield.imm16
6905 || overlap.bitfield.imm32
6906 || overlap.bitfield.imm32s
6907 || overlap.bitfield.imm64)
6908 && !operand_type_equal (&overlap, &imm8)
6909 && !operand_type_equal (&overlap, &imm8s)
6910 && !operand_type_equal (&overlap, &imm16)
6911 && !operand_type_equal (&overlap, &imm32)
6912 && !operand_type_equal (&overlap, &imm32s)
6913 && !operand_type_equal (&overlap, &imm64))
6914 {
6915 if (i.suffix)
6916 {
6917 i386_operand_type temp;
6918
6919 operand_type_set (&temp, 0);
6920 if (i.suffix == BYTE_MNEM_SUFFIX)
6921 {
6922 temp.bitfield.imm8 = overlap.bitfield.imm8;
6923 temp.bitfield.imm8s = overlap.bitfield.imm8s;
6924 }
6925 else if (i.suffix == WORD_MNEM_SUFFIX)
6926 temp.bitfield.imm16 = overlap.bitfield.imm16;
6927 else if (i.suffix == QWORD_MNEM_SUFFIX)
6928 {
6929 temp.bitfield.imm64 = overlap.bitfield.imm64;
6930 temp.bitfield.imm32s = overlap.bitfield.imm32s;
6931 }
6932 else
6933 temp.bitfield.imm32 = overlap.bitfield.imm32;
6934 overlap = temp;
6935 }
6936 else if (operand_type_equal (&overlap, &imm16_32_32s)
6937 || operand_type_equal (&overlap, &imm16_32)
6938 || operand_type_equal (&overlap, &imm16_32s))
6939 {
6940 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
6941 overlap = imm16;
6942 else
6943 overlap = imm32s;
6944 }
6945 if (!operand_type_equal (&overlap, &imm8)
6946 && !operand_type_equal (&overlap, &imm8s)
6947 && !operand_type_equal (&overlap, &imm16)
6948 && !operand_type_equal (&overlap, &imm32)
6949 && !operand_type_equal (&overlap, &imm32s)
6950 && !operand_type_equal (&overlap, &imm64))
6951 {
6952 as_bad (_("no instruction mnemonic suffix given; "
6953 "can't determine immediate size"));
6954 return 0;
6955 }
6956 }
6957 i.types[j] = overlap;
6958
6959 return 1;
6960 }
6961
6962 static int
6963 finalize_imm (void)
6964 {
6965 unsigned int j, n;
6966
6967 /* Update the first 2 immediate operands. */
6968 n = i.operands > 2 ? 2 : i.operands;
6969 if (n)
6970 {
6971 for (j = 0; j < n; j++)
6972 if (update_imm (j) == 0)
6973 return 0;
6974
6975 /* The 3rd operand can't be immediate operand. */
6976 gas_assert (operand_type_check (i.types[2], imm) == 0);
6977 }
6978
6979 return 1;
6980 }
6981
6982 static int
6983 process_operands (void)
6984 {
6985 /* Default segment register this instruction will use for memory
6986 accesses. 0 means unknown. This is only for optimizing out
6987 unnecessary segment overrides. */
6988 const seg_entry *default_seg = 0;
6989
6990 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
6991 {
6992 unsigned int dupl = i.operands;
6993 unsigned int dest = dupl - 1;
6994 unsigned int j;
6995
6996 /* The destination must be an xmm register. */
6997 gas_assert (i.reg_operands
6998 && MAX_OPERANDS > dupl
6999 && operand_type_equal (&i.types[dest], &regxmm));
7000
7001 if (i.tm.operand_types[0].bitfield.instance == Accum
7002 && i.tm.operand_types[0].bitfield.xmmword)
7003 {
7004 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
7005 {
7006 /* Keep xmm0 for instructions with VEX prefix and 3
7007 sources. */
7008 i.tm.operand_types[0].bitfield.instance = InstanceNone;
7009 i.tm.operand_types[0].bitfield.class = RegSIMD;
7010 goto duplicate;
7011 }
7012 else
7013 {
7014 /* We remove the first xmm0 and keep the number of
7015 operands unchanged, which in fact duplicates the
7016 destination. */
7017 for (j = 1; j < i.operands; j++)
7018 {
7019 i.op[j - 1] = i.op[j];
7020 i.types[j - 1] = i.types[j];
7021 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
7022 i.flags[j - 1] = i.flags[j];
7023 }
7024 }
7025 }
7026 else if (i.tm.opcode_modifier.implicit1stxmm0)
7027 {
7028 gas_assert ((MAX_OPERANDS - 1) > dupl
7029 && (i.tm.opcode_modifier.vexsources
7030 == VEX3SOURCES));
7031
7032 /* Add the implicit xmm0 for instructions with VEX prefix
7033 and 3 sources. */
7034 for (j = i.operands; j > 0; j--)
7035 {
7036 i.op[j] = i.op[j - 1];
7037 i.types[j] = i.types[j - 1];
7038 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
7039 i.flags[j] = i.flags[j - 1];
7040 }
7041 i.op[0].regs
7042 = (const reg_entry *) hash_find (reg_hash, "xmm0");
7043 i.types[0] = regxmm;
7044 i.tm.operand_types[0] = regxmm;
7045
7046 i.operands += 2;
7047 i.reg_operands += 2;
7048 i.tm.operands += 2;
7049
7050 dupl++;
7051 dest++;
7052 i.op[dupl] = i.op[dest];
7053 i.types[dupl] = i.types[dest];
7054 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
7055 i.flags[dupl] = i.flags[dest];
7056 }
7057 else
7058 {
7059 duplicate:
7060 i.operands++;
7061 i.reg_operands++;
7062 i.tm.operands++;
7063
7064 i.op[dupl] = i.op[dest];
7065 i.types[dupl] = i.types[dest];
7066 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
7067 i.flags[dupl] = i.flags[dest];
7068 }
7069
7070 if (i.tm.opcode_modifier.immext)
7071 process_immext ();
7072 }
7073 else if (i.tm.operand_types[0].bitfield.instance == Accum
7074 && i.tm.operand_types[0].bitfield.xmmword)
7075 {
7076 unsigned int j;
7077
7078 for (j = 1; j < i.operands; j++)
7079 {
7080 i.op[j - 1] = i.op[j];
7081 i.types[j - 1] = i.types[j];
7082
7083 /* We need to adjust fields in i.tm since they are used by
7084 build_modrm_byte. */
7085 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
7086
7087 i.flags[j - 1] = i.flags[j];
7088 }
7089
7090 i.operands--;
7091 i.reg_operands--;
7092 i.tm.operands--;
7093 }
7094 else if (i.tm.opcode_modifier.implicitquadgroup)
7095 {
7096 unsigned int regnum, first_reg_in_group, last_reg_in_group;
7097
7098 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7099 gas_assert (i.operands >= 2 && i.types[1].bitfield.class == RegSIMD);
7100 regnum = register_number (i.op[1].regs);
7101 first_reg_in_group = regnum & ~3;
7102 last_reg_in_group = first_reg_in_group + 3;
7103 if (regnum != first_reg_in_group)
7104 as_warn (_("source register `%s%s' implicitly denotes"
7105 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7106 register_prefix, i.op[1].regs->reg_name,
7107 register_prefix, i.op[1].regs->reg_name, first_reg_in_group,
7108 register_prefix, i.op[1].regs->reg_name, last_reg_in_group,
7109 i.tm.name);
7110 }
7111 else if (i.tm.opcode_modifier.regkludge)
7112 {
7113 /* The imul $imm, %reg instruction is converted into
7114 imul $imm, %reg, %reg, and the clr %reg instruction
7115 is converted into xor %reg, %reg. */
7116
7117 unsigned int first_reg_op;
7118
7119 if (operand_type_check (i.types[0], reg))
7120 first_reg_op = 0;
7121 else
7122 first_reg_op = 1;
7123 /* Pretend we saw the extra register operand. */
7124 gas_assert (i.reg_operands == 1
7125 && i.op[first_reg_op + 1].regs == 0);
7126 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
7127 i.types[first_reg_op + 1] = i.types[first_reg_op];
7128 i.operands++;
7129 i.reg_operands++;
7130 }
7131
7132 if (i.tm.opcode_modifier.modrm)
7133 {
7134 /* The opcode is completed (modulo i.tm.extension_opcode which
7135 must be put into the modrm byte). Now, we make the modrm and
7136 index base bytes based on all the info we've collected. */
7137
7138 default_seg = build_modrm_byte ();
7139 }
7140 else if (i.types[0].bitfield.class == SReg)
7141 {
7142 if (flag_code != CODE_64BIT
7143 ? i.tm.base_opcode == POP_SEG_SHORT
7144 && i.op[0].regs->reg_num == 1
7145 : (i.tm.base_opcode | 1) == POP_SEG386_SHORT
7146 && i.op[0].regs->reg_num < 4)
7147 {
7148 as_bad (_("you can't `%s %s%s'"),
7149 i.tm.name, register_prefix, i.op[0].regs->reg_name);
7150 return 0;
7151 }
7152 if ( i.op[0].regs->reg_num > 3 && i.tm.opcode_length == 1 )
7153 {
7154 i.tm.base_opcode ^= POP_SEG_SHORT ^ POP_SEG386_SHORT;
7155 i.tm.opcode_length = 2;
7156 }
7157 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
7158 }
7159 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
7160 {
7161 default_seg = &ds;
7162 }
7163 else if (i.tm.opcode_modifier.isstring)
7164 {
7165 /* For the string instructions that allow a segment override
7166 on one of their operands, the default segment is ds. */
7167 default_seg = &ds;
7168 }
7169 else if (i.short_form)
7170 {
7171 /* The register or float register operand is in operand
7172 0 or 1. */
7173 unsigned int op = i.tm.operand_types[0].bitfield.class != Reg;
7174
7175 /* Register goes in low 3 bits of opcode. */
7176 i.tm.base_opcode |= i.op[op].regs->reg_num;
7177 if ((i.op[op].regs->reg_flags & RegRex) != 0)
7178 i.rex |= REX_B;
7179 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
7180 {
7181 /* Warn about some common errors, but press on regardless.
7182 The first case can be generated by gcc (<= 2.8.1). */
7183 if (i.operands == 2)
7184 {
7185 /* Reversed arguments on faddp, fsubp, etc. */
7186 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
7187 register_prefix, i.op[!intel_syntax].regs->reg_name,
7188 register_prefix, i.op[intel_syntax].regs->reg_name);
7189 }
7190 else
7191 {
7192 /* Extraneous `l' suffix on fp insn. */
7193 as_warn (_("translating to `%s %s%s'"), i.tm.name,
7194 register_prefix, i.op[0].regs->reg_name);
7195 }
7196 }
7197 }
7198
7199 if ((i.seg[0] || i.prefix[SEG_PREFIX])
7200 && i.tm.base_opcode == 0x8d /* lea */
7201 && !is_any_vex_encoding(&i.tm))
7202 {
7203 if (!quiet_warnings)
7204 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
7205 if (optimize)
7206 {
7207 i.seg[0] = NULL;
7208 i.prefix[SEG_PREFIX] = 0;
7209 }
7210 }
7211
7212 /* If a segment was explicitly specified, and the specified segment
7213 is neither the default nor the one already recorded from a prefix,
7214 use an opcode prefix to select it. If we never figured out what
7215 the default segment is, then default_seg will be zero at this
7216 point, and the specified segment prefix will always be used. */
7217 if (i.seg[0]
7218 && i.seg[0] != default_seg
7219 && i.seg[0]->seg_prefix != i.prefix[SEG_PREFIX])
7220 {
7221 if (!add_prefix (i.seg[0]->seg_prefix))
7222 return 0;
7223 }
7224 return 1;
7225 }
7226
7227 static const seg_entry *
7228 build_modrm_byte (void)
7229 {
7230 const seg_entry *default_seg = 0;
7231 unsigned int source, dest;
7232 int vex_3_sources;
7233
7234 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
7235 if (vex_3_sources)
7236 {
7237 unsigned int nds, reg_slot;
7238 expressionS *exp;
7239
7240 dest = i.operands - 1;
7241 nds = dest - 1;
7242
7243 /* There are 2 kinds of instructions:
7244 1. 5 operands: 4 register operands or 3 register operands
7245 plus 1 memory operand plus one Imm4 operand, VexXDS, and
7246 VexW0 or VexW1. The destination must be either XMM, YMM or
7247 ZMM register.
7248 2. 4 operands: 4 register operands or 3 register operands
7249 plus 1 memory operand, with VexXDS. */
7250 gas_assert ((i.reg_operands == 4
7251 || (i.reg_operands == 3 && i.mem_operands == 1))
7252 && i.tm.opcode_modifier.vexvvvv == VEXXDS
7253 && i.tm.opcode_modifier.vexw
7254 && i.tm.operand_types[dest].bitfield.class == RegSIMD);
7255
7256 /* If VexW1 is set, the first non-immediate operand is the source and
7257 the second non-immediate one is encoded in the immediate operand. */
7258 if (i.tm.opcode_modifier.vexw == VEXW1)
7259 {
7260 source = i.imm_operands;
7261 reg_slot = i.imm_operands + 1;
7262 }
7263 else
7264 {
7265 source = i.imm_operands + 1;
7266 reg_slot = i.imm_operands;
7267 }
7268
7269 if (i.imm_operands == 0)
7270 {
7271 /* When there is no immediate operand, generate an 8bit
7272 immediate operand to encode the first operand. */
7273 exp = &im_expressions[i.imm_operands++];
7274 i.op[i.operands].imms = exp;
7275 i.types[i.operands] = imm8;
7276 i.operands++;
7277
7278 gas_assert (i.tm.operand_types[reg_slot].bitfield.class == RegSIMD);
7279 exp->X_op = O_constant;
7280 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
7281 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
7282 }
7283 else
7284 {
7285 gas_assert (i.imm_operands == 1);
7286 gas_assert (fits_in_imm4 (i.op[0].imms->X_add_number));
7287 gas_assert (!i.tm.opcode_modifier.immext);
7288
7289 /* Turn on Imm8 again so that output_imm will generate it. */
7290 i.types[0].bitfield.imm8 = 1;
7291
7292 gas_assert (i.tm.operand_types[reg_slot].bitfield.class == RegSIMD);
7293 i.op[0].imms->X_add_number
7294 |= register_number (i.op[reg_slot].regs) << 4;
7295 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
7296 }
7297
7298 gas_assert (i.tm.operand_types[nds].bitfield.class == RegSIMD);
7299 i.vex.register_specifier = i.op[nds].regs;
7300 }
7301 else
7302 source = dest = 0;
7303
7304 /* i.reg_operands MUST be the number of real register operands;
7305 implicit registers do not count. If there are 3 register
7306 operands, it must be a instruction with VexNDS. For a
7307 instruction with VexNDD, the destination register is encoded
7308 in VEX prefix. If there are 4 register operands, it must be
7309 a instruction with VEX prefix and 3 sources. */
7310 if (i.mem_operands == 0
7311 && ((i.reg_operands == 2
7312 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
7313 || (i.reg_operands == 3
7314 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
7315 || (i.reg_operands == 4 && vex_3_sources)))
7316 {
7317 switch (i.operands)
7318 {
7319 case 2:
7320 source = 0;
7321 break;
7322 case 3:
7323 /* When there are 3 operands, one of them may be immediate,
7324 which may be the first or the last operand. Otherwise,
7325 the first operand must be shift count register (cl) or it
7326 is an instruction with VexNDS. */
7327 gas_assert (i.imm_operands == 1
7328 || (i.imm_operands == 0
7329 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
7330 || (i.types[0].bitfield.instance == RegC
7331 && i.types[0].bitfield.byte))));
7332 if (operand_type_check (i.types[0], imm)
7333 || (i.types[0].bitfield.instance == RegC
7334 && i.types[0].bitfield.byte))
7335 source = 1;
7336 else
7337 source = 0;
7338 break;
7339 case 4:
7340 /* When there are 4 operands, the first two must be 8bit
7341 immediate operands. The source operand will be the 3rd
7342 one.
7343
7344 For instructions with VexNDS, if the first operand
7345 an imm8, the source operand is the 2nd one. If the last
7346 operand is imm8, the source operand is the first one. */
7347 gas_assert ((i.imm_operands == 2
7348 && i.types[0].bitfield.imm8
7349 && i.types[1].bitfield.imm8)
7350 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
7351 && i.imm_operands == 1
7352 && (i.types[0].bitfield.imm8
7353 || i.types[i.operands - 1].bitfield.imm8
7354 || i.rounding)));
7355 if (i.imm_operands == 2)
7356 source = 2;
7357 else
7358 {
7359 if (i.types[0].bitfield.imm8)
7360 source = 1;
7361 else
7362 source = 0;
7363 }
7364 break;
7365 case 5:
7366 if (is_evex_encoding (&i.tm))
7367 {
7368 /* For EVEX instructions, when there are 5 operands, the
7369 first one must be immediate operand. If the second one
7370 is immediate operand, the source operand is the 3th
7371 one. If the last one is immediate operand, the source
7372 operand is the 2nd one. */
7373 gas_assert (i.imm_operands == 2
7374 && i.tm.opcode_modifier.sae
7375 && operand_type_check (i.types[0], imm));
7376 if (operand_type_check (i.types[1], imm))
7377 source = 2;
7378 else if (operand_type_check (i.types[4], imm))
7379 source = 1;
7380 else
7381 abort ();
7382 }
7383 break;
7384 default:
7385 abort ();
7386 }
7387
7388 if (!vex_3_sources)
7389 {
7390 dest = source + 1;
7391
7392 /* RC/SAE operand could be between DEST and SRC. That happens
7393 when one operand is GPR and the other one is XMM/YMM/ZMM
7394 register. */
7395 if (i.rounding && i.rounding->operand == (int) dest)
7396 dest++;
7397
7398 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
7399 {
7400 /* For instructions with VexNDS, the register-only source
7401 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
7402 register. It is encoded in VEX prefix. */
7403
7404 i386_operand_type op;
7405 unsigned int vvvv;
7406
7407 /* Check register-only source operand when two source
7408 operands are swapped. */
7409 if (!i.tm.operand_types[source].bitfield.baseindex
7410 && i.tm.operand_types[dest].bitfield.baseindex)
7411 {
7412 vvvv = source;
7413 source = dest;
7414 }
7415 else
7416 vvvv = dest;
7417
7418 op = i.tm.operand_types[vvvv];
7419 if ((dest + 1) >= i.operands
7420 || ((op.bitfield.class != Reg
7421 || (!op.bitfield.dword && !op.bitfield.qword))
7422 && op.bitfield.class != RegSIMD
7423 && !operand_type_equal (&op, &regmask)))
7424 abort ();
7425 i.vex.register_specifier = i.op[vvvv].regs;
7426 dest++;
7427 }
7428 }
7429
7430 i.rm.mode = 3;
7431 /* One of the register operands will be encoded in the i.rm.reg
7432 field, the other in the combined i.rm.mode and i.rm.regmem
7433 fields. If no form of this instruction supports a memory
7434 destination operand, then we assume the source operand may
7435 sometimes be a memory operand and so we need to store the
7436 destination in the i.rm.reg field. */
7437 if (!i.tm.opcode_modifier.regmem
7438 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
7439 {
7440 i.rm.reg = i.op[dest].regs->reg_num;
7441 i.rm.regmem = i.op[source].regs->reg_num;
7442 if (i.op[dest].regs->reg_type.bitfield.class == RegMMX
7443 || i.op[source].regs->reg_type.bitfield.class == RegMMX)
7444 i.has_regmmx = TRUE;
7445 else if (i.op[dest].regs->reg_type.bitfield.class == RegSIMD
7446 || i.op[source].regs->reg_type.bitfield.class == RegSIMD)
7447 {
7448 if (i.types[dest].bitfield.zmmword
7449 || i.types[source].bitfield.zmmword)
7450 i.has_regzmm = TRUE;
7451 else if (i.types[dest].bitfield.ymmword
7452 || i.types[source].bitfield.ymmword)
7453 i.has_regymm = TRUE;
7454 else
7455 i.has_regxmm = TRUE;
7456 }
7457 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
7458 i.rex |= REX_R;
7459 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
7460 i.vrex |= REX_R;
7461 if ((i.op[source].regs->reg_flags & RegRex) != 0)
7462 i.rex |= REX_B;
7463 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
7464 i.vrex |= REX_B;
7465 }
7466 else
7467 {
7468 i.rm.reg = i.op[source].regs->reg_num;
7469 i.rm.regmem = i.op[dest].regs->reg_num;
7470 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
7471 i.rex |= REX_B;
7472 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
7473 i.vrex |= REX_B;
7474 if ((i.op[source].regs->reg_flags & RegRex) != 0)
7475 i.rex |= REX_R;
7476 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
7477 i.vrex |= REX_R;
7478 }
7479 if (flag_code != CODE_64BIT && (i.rex & REX_R))
7480 {
7481 if (i.types[!i.tm.opcode_modifier.regmem].bitfield.class != RegCR)
7482 abort ();
7483 i.rex &= ~REX_R;
7484 add_prefix (LOCK_PREFIX_OPCODE);
7485 }
7486 }
7487 else
7488 { /* If it's not 2 reg operands... */
7489 unsigned int mem;
7490
7491 if (i.mem_operands)
7492 {
7493 unsigned int fake_zero_displacement = 0;
7494 unsigned int op;
7495
7496 for (op = 0; op < i.operands; op++)
7497 if (i.flags[op] & Operand_Mem)
7498 break;
7499 gas_assert (op < i.operands);
7500
7501 if (i.tm.opcode_modifier.vecsib)
7502 {
7503 if (i.index_reg->reg_num == RegIZ)
7504 abort ();
7505
7506 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
7507 if (!i.base_reg)
7508 {
7509 i.sib.base = NO_BASE_REGISTER;
7510 i.sib.scale = i.log2_scale_factor;
7511 i.types[op].bitfield.disp8 = 0;
7512 i.types[op].bitfield.disp16 = 0;
7513 i.types[op].bitfield.disp64 = 0;
7514 if (flag_code != CODE_64BIT || i.prefix[ADDR_PREFIX])
7515 {
7516 /* Must be 32 bit */
7517 i.types[op].bitfield.disp32 = 1;
7518 i.types[op].bitfield.disp32s = 0;
7519 }
7520 else
7521 {
7522 i.types[op].bitfield.disp32 = 0;
7523 i.types[op].bitfield.disp32s = 1;
7524 }
7525 }
7526 i.sib.index = i.index_reg->reg_num;
7527 if ((i.index_reg->reg_flags & RegRex) != 0)
7528 i.rex |= REX_X;
7529 if ((i.index_reg->reg_flags & RegVRex) != 0)
7530 i.vrex |= REX_X;
7531 }
7532
7533 default_seg = &ds;
7534
7535 if (i.base_reg == 0)
7536 {
7537 i.rm.mode = 0;
7538 if (!i.disp_operands)
7539 fake_zero_displacement = 1;
7540 if (i.index_reg == 0)
7541 {
7542 i386_operand_type newdisp;
7543
7544 gas_assert (!i.tm.opcode_modifier.vecsib);
7545 /* Operand is just <disp> */
7546 if (flag_code == CODE_64BIT)
7547 {
7548 /* 64bit mode overwrites the 32bit absolute
7549 addressing by RIP relative addressing and
7550 absolute addressing is encoded by one of the
7551 redundant SIB forms. */
7552 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
7553 i.sib.base = NO_BASE_REGISTER;
7554 i.sib.index = NO_INDEX_REGISTER;
7555 newdisp = (!i.prefix[ADDR_PREFIX] ? disp32s : disp32);
7556 }
7557 else if ((flag_code == CODE_16BIT)
7558 ^ (i.prefix[ADDR_PREFIX] != 0))
7559 {
7560 i.rm.regmem = NO_BASE_REGISTER_16;
7561 newdisp = disp16;
7562 }
7563 else
7564 {
7565 i.rm.regmem = NO_BASE_REGISTER;
7566 newdisp = disp32;
7567 }
7568 i.types[op] = operand_type_and_not (i.types[op], anydisp);
7569 i.types[op] = operand_type_or (i.types[op], newdisp);
7570 }
7571 else if (!i.tm.opcode_modifier.vecsib)
7572 {
7573 /* !i.base_reg && i.index_reg */
7574 if (i.index_reg->reg_num == RegIZ)
7575 i.sib.index = NO_INDEX_REGISTER;
7576 else
7577 i.sib.index = i.index_reg->reg_num;
7578 i.sib.base = NO_BASE_REGISTER;
7579 i.sib.scale = i.log2_scale_factor;
7580 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
7581 i.types[op].bitfield.disp8 = 0;
7582 i.types[op].bitfield.disp16 = 0;
7583 i.types[op].bitfield.disp64 = 0;
7584 if (flag_code != CODE_64BIT || i.prefix[ADDR_PREFIX])
7585 {
7586 /* Must be 32 bit */
7587 i.types[op].bitfield.disp32 = 1;
7588 i.types[op].bitfield.disp32s = 0;
7589 }
7590 else
7591 {
7592 i.types[op].bitfield.disp32 = 0;
7593 i.types[op].bitfield.disp32s = 1;
7594 }
7595 if ((i.index_reg->reg_flags & RegRex) != 0)
7596 i.rex |= REX_X;
7597 }
7598 }
7599 /* RIP addressing for 64bit mode. */
7600 else if (i.base_reg->reg_num == RegIP)
7601 {
7602 gas_assert (!i.tm.opcode_modifier.vecsib);
7603 i.rm.regmem = NO_BASE_REGISTER;
7604 i.types[op].bitfield.disp8 = 0;
7605 i.types[op].bitfield.disp16 = 0;
7606 i.types[op].bitfield.disp32 = 0;
7607 i.types[op].bitfield.disp32s = 1;
7608 i.types[op].bitfield.disp64 = 0;
7609 i.flags[op] |= Operand_PCrel;
7610 if (! i.disp_operands)
7611 fake_zero_displacement = 1;
7612 }
7613 else if (i.base_reg->reg_type.bitfield.word)
7614 {
7615 gas_assert (!i.tm.opcode_modifier.vecsib);
7616 switch (i.base_reg->reg_num)
7617 {
7618 case 3: /* (%bx) */
7619 if (i.index_reg == 0)
7620 i.rm.regmem = 7;
7621 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
7622 i.rm.regmem = i.index_reg->reg_num - 6;
7623 break;
7624 case 5: /* (%bp) */
7625 default_seg = &ss;
7626 if (i.index_reg == 0)
7627 {
7628 i.rm.regmem = 6;
7629 if (operand_type_check (i.types[op], disp) == 0)
7630 {
7631 /* fake (%bp) into 0(%bp) */
7632 i.types[op].bitfield.disp8 = 1;
7633 fake_zero_displacement = 1;
7634 }
7635 }
7636 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
7637 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
7638 break;
7639 default: /* (%si) -> 4 or (%di) -> 5 */
7640 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
7641 }
7642 i.rm.mode = mode_from_disp_size (i.types[op]);
7643 }
7644 else /* i.base_reg and 32/64 bit mode */
7645 {
7646 if (flag_code == CODE_64BIT
7647 && operand_type_check (i.types[op], disp))
7648 {
7649 i.types[op].bitfield.disp16 = 0;
7650 i.types[op].bitfield.disp64 = 0;
7651 if (i.prefix[ADDR_PREFIX] == 0)
7652 {
7653 i.types[op].bitfield.disp32 = 0;
7654 i.types[op].bitfield.disp32s = 1;
7655 }
7656 else
7657 {
7658 i.types[op].bitfield.disp32 = 1;
7659 i.types[op].bitfield.disp32s = 0;
7660 }
7661 }
7662
7663 if (!i.tm.opcode_modifier.vecsib)
7664 i.rm.regmem = i.base_reg->reg_num;
7665 if ((i.base_reg->reg_flags & RegRex) != 0)
7666 i.rex |= REX_B;
7667 i.sib.base = i.base_reg->reg_num;
7668 /* x86-64 ignores REX prefix bit here to avoid decoder
7669 complications. */
7670 if (!(i.base_reg->reg_flags & RegRex)
7671 && (i.base_reg->reg_num == EBP_REG_NUM
7672 || i.base_reg->reg_num == ESP_REG_NUM))
7673 default_seg = &ss;
7674 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
7675 {
7676 fake_zero_displacement = 1;
7677 i.types[op].bitfield.disp8 = 1;
7678 }
7679 i.sib.scale = i.log2_scale_factor;
7680 if (i.index_reg == 0)
7681 {
7682 gas_assert (!i.tm.opcode_modifier.vecsib);
7683 /* <disp>(%esp) becomes two byte modrm with no index
7684 register. We've already stored the code for esp
7685 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
7686 Any base register besides %esp will not use the
7687 extra modrm byte. */
7688 i.sib.index = NO_INDEX_REGISTER;
7689 }
7690 else if (!i.tm.opcode_modifier.vecsib)
7691 {
7692 if (i.index_reg->reg_num == RegIZ)
7693 i.sib.index = NO_INDEX_REGISTER;
7694 else
7695 i.sib.index = i.index_reg->reg_num;
7696 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
7697 if ((i.index_reg->reg_flags & RegRex) != 0)
7698 i.rex |= REX_X;
7699 }
7700
7701 if (i.disp_operands
7702 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
7703 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
7704 i.rm.mode = 0;
7705 else
7706 {
7707 if (!fake_zero_displacement
7708 && !i.disp_operands
7709 && i.disp_encoding)
7710 {
7711 fake_zero_displacement = 1;
7712 if (i.disp_encoding == disp_encoding_8bit)
7713 i.types[op].bitfield.disp8 = 1;
7714 else
7715 i.types[op].bitfield.disp32 = 1;
7716 }
7717 i.rm.mode = mode_from_disp_size (i.types[op]);
7718 }
7719 }
7720
7721 if (fake_zero_displacement)
7722 {
7723 /* Fakes a zero displacement assuming that i.types[op]
7724 holds the correct displacement size. */
7725 expressionS *exp;
7726
7727 gas_assert (i.op[op].disps == 0);
7728 exp = &disp_expressions[i.disp_operands++];
7729 i.op[op].disps = exp;
7730 exp->X_op = O_constant;
7731 exp->X_add_number = 0;
7732 exp->X_add_symbol = (symbolS *) 0;
7733 exp->X_op_symbol = (symbolS *) 0;
7734 }
7735
7736 mem = op;
7737 }
7738 else
7739 mem = ~0;
7740
7741 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
7742 {
7743 if (operand_type_check (i.types[0], imm))
7744 i.vex.register_specifier = NULL;
7745 else
7746 {
7747 /* VEX.vvvv encodes one of the sources when the first
7748 operand is not an immediate. */
7749 if (i.tm.opcode_modifier.vexw == VEXW0)
7750 i.vex.register_specifier = i.op[0].regs;
7751 else
7752 i.vex.register_specifier = i.op[1].regs;
7753 }
7754
7755 /* Destination is a XMM register encoded in the ModRM.reg
7756 and VEX.R bit. */
7757 i.rm.reg = i.op[2].regs->reg_num;
7758 if ((i.op[2].regs->reg_flags & RegRex) != 0)
7759 i.rex |= REX_R;
7760
7761 /* ModRM.rm and VEX.B encodes the other source. */
7762 if (!i.mem_operands)
7763 {
7764 i.rm.mode = 3;
7765
7766 if (i.tm.opcode_modifier.vexw == VEXW0)
7767 i.rm.regmem = i.op[1].regs->reg_num;
7768 else
7769 i.rm.regmem = i.op[0].regs->reg_num;
7770
7771 if ((i.op[1].regs->reg_flags & RegRex) != 0)
7772 i.rex |= REX_B;
7773 }
7774 }
7775 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
7776 {
7777 i.vex.register_specifier = i.op[2].regs;
7778 if (!i.mem_operands)
7779 {
7780 i.rm.mode = 3;
7781 i.rm.regmem = i.op[1].regs->reg_num;
7782 if ((i.op[1].regs->reg_flags & RegRex) != 0)
7783 i.rex |= REX_B;
7784 }
7785 }
7786 /* Fill in i.rm.reg or i.rm.regmem field with register operand
7787 (if any) based on i.tm.extension_opcode. Again, we must be
7788 careful to make sure that segment/control/debug/test/MMX
7789 registers are coded into the i.rm.reg field. */
7790 else if (i.reg_operands)
7791 {
7792 unsigned int op;
7793 unsigned int vex_reg = ~0;
7794
7795 for (op = 0; op < i.operands; op++)
7796 {
7797 if (i.types[op].bitfield.class == Reg
7798 || i.types[op].bitfield.class == RegBND
7799 || i.types[op].bitfield.class == RegMask
7800 || i.types[op].bitfield.class == SReg
7801 || i.types[op].bitfield.class == RegCR
7802 || i.types[op].bitfield.class == RegDR
7803 || i.types[op].bitfield.class == RegTR)
7804 break;
7805 if (i.types[op].bitfield.class == RegSIMD)
7806 {
7807 if (i.types[op].bitfield.zmmword)
7808 i.has_regzmm = TRUE;
7809 else if (i.types[op].bitfield.ymmword)
7810 i.has_regymm = TRUE;
7811 else
7812 i.has_regxmm = TRUE;
7813 break;
7814 }
7815 if (i.types[op].bitfield.class == RegMMX)
7816 {
7817 i.has_regmmx = TRUE;
7818 break;
7819 }
7820 }
7821
7822 if (vex_3_sources)
7823 op = dest;
7824 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
7825 {
7826 /* For instructions with VexNDS, the register-only
7827 source operand is encoded in VEX prefix. */
7828 gas_assert (mem != (unsigned int) ~0);
7829
7830 if (op > mem)
7831 {
7832 vex_reg = op++;
7833 gas_assert (op < i.operands);
7834 }
7835 else
7836 {
7837 /* Check register-only source operand when two source
7838 operands are swapped. */
7839 if (!i.tm.operand_types[op].bitfield.baseindex
7840 && i.tm.operand_types[op + 1].bitfield.baseindex)
7841 {
7842 vex_reg = op;
7843 op += 2;
7844 gas_assert (mem == (vex_reg + 1)
7845 && op < i.operands);
7846 }
7847 else
7848 {
7849 vex_reg = op + 1;
7850 gas_assert (vex_reg < i.operands);
7851 }
7852 }
7853 }
7854 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
7855 {
7856 /* For instructions with VexNDD, the register destination
7857 is encoded in VEX prefix. */
7858 if (i.mem_operands == 0)
7859 {
7860 /* There is no memory operand. */
7861 gas_assert ((op + 2) == i.operands);
7862 vex_reg = op + 1;
7863 }
7864 else
7865 {
7866 /* There are only 2 non-immediate operands. */
7867 gas_assert (op < i.imm_operands + 2
7868 && i.operands == i.imm_operands + 2);
7869 vex_reg = i.imm_operands + 1;
7870 }
7871 }
7872 else
7873 gas_assert (op < i.operands);
7874
7875 if (vex_reg != (unsigned int) ~0)
7876 {
7877 i386_operand_type *type = &i.tm.operand_types[vex_reg];
7878
7879 if ((type->bitfield.class != Reg
7880 || (!type->bitfield.dword && !type->bitfield.qword))
7881 && type->bitfield.class != RegSIMD
7882 && !operand_type_equal (type, &regmask))
7883 abort ();
7884
7885 i.vex.register_specifier = i.op[vex_reg].regs;
7886 }
7887
7888 /* Don't set OP operand twice. */
7889 if (vex_reg != op)
7890 {
7891 /* If there is an extension opcode to put here, the
7892 register number must be put into the regmem field. */
7893 if (i.tm.extension_opcode != None)
7894 {
7895 i.rm.regmem = i.op[op].regs->reg_num;
7896 if ((i.op[op].regs->reg_flags & RegRex) != 0)
7897 i.rex |= REX_B;
7898 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
7899 i.vrex |= REX_B;
7900 }
7901 else
7902 {
7903 i.rm.reg = i.op[op].regs->reg_num;
7904 if ((i.op[op].regs->reg_flags & RegRex) != 0)
7905 i.rex |= REX_R;
7906 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
7907 i.vrex |= REX_R;
7908 }
7909 }
7910
7911 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
7912 must set it to 3 to indicate this is a register operand
7913 in the regmem field. */
7914 if (!i.mem_operands)
7915 i.rm.mode = 3;
7916 }
7917
7918 /* Fill in i.rm.reg field with extension opcode (if any). */
7919 if (i.tm.extension_opcode != None)
7920 i.rm.reg = i.tm.extension_opcode;
7921 }
7922 return default_seg;
7923 }
7924
7925 static unsigned int
7926 flip_code16 (unsigned int code16)
7927 {
7928 gas_assert (i.tm.operands == 1);
7929
7930 return !(i.prefix[REX_PREFIX] & REX_W)
7931 && (code16 ? i.tm.operand_types[0].bitfield.disp32
7932 || i.tm.operand_types[0].bitfield.disp32s
7933 : i.tm.operand_types[0].bitfield.disp16)
7934 ? CODE16 : 0;
7935 }
7936
7937 static void
7938 output_branch (void)
7939 {
7940 char *p;
7941 int size;
7942 int code16;
7943 int prefix;
7944 relax_substateT subtype;
7945 symbolS *sym;
7946 offsetT off;
7947
7948 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
7949 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
7950
7951 prefix = 0;
7952 if (i.prefix[DATA_PREFIX] != 0)
7953 {
7954 prefix = 1;
7955 i.prefixes -= 1;
7956 code16 ^= flip_code16(code16);
7957 }
7958 /* Pentium4 branch hints. */
7959 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
7960 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
7961 {
7962 prefix++;
7963 i.prefixes--;
7964 }
7965 if (i.prefix[REX_PREFIX] != 0)
7966 {
7967 prefix++;
7968 i.prefixes--;
7969 }
7970
7971 /* BND prefixed jump. */
7972 if (i.prefix[BND_PREFIX] != 0)
7973 {
7974 prefix++;
7975 i.prefixes--;
7976 }
7977
7978 if (i.prefixes != 0)
7979 as_warn (_("skipping prefixes on `%s'"), i.tm.name);
7980
7981 /* It's always a symbol; End frag & setup for relax.
7982 Make sure there is enough room in this frag for the largest
7983 instruction we may generate in md_convert_frag. This is 2
7984 bytes for the opcode and room for the prefix and largest
7985 displacement. */
7986 frag_grow (prefix + 2 + 4);
7987 /* Prefix and 1 opcode byte go in fr_fix. */
7988 p = frag_more (prefix + 1);
7989 if (i.prefix[DATA_PREFIX] != 0)
7990 *p++ = DATA_PREFIX_OPCODE;
7991 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
7992 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
7993 *p++ = i.prefix[SEG_PREFIX];
7994 if (i.prefix[BND_PREFIX] != 0)
7995 *p++ = BND_PREFIX_OPCODE;
7996 if (i.prefix[REX_PREFIX] != 0)
7997 *p++ = i.prefix[REX_PREFIX];
7998 *p = i.tm.base_opcode;
7999
8000 if ((unsigned char) *p == JUMP_PC_RELATIVE)
8001 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
8002 else if (cpu_arch_flags.bitfield.cpui386)
8003 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
8004 else
8005 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
8006 subtype |= code16;
8007
8008 sym = i.op[0].disps->X_add_symbol;
8009 off = i.op[0].disps->X_add_number;
8010
8011 if (i.op[0].disps->X_op != O_constant
8012 && i.op[0].disps->X_op != O_symbol)
8013 {
8014 /* Handle complex expressions. */
8015 sym = make_expr_symbol (i.op[0].disps);
8016 off = 0;
8017 }
8018
8019 /* 1 possible extra opcode + 4 byte displacement go in var part.
8020 Pass reloc in fr_var. */
8021 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
8022 }
8023
8024 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8025 /* Return TRUE iff PLT32 relocation should be used for branching to
8026 symbol S. */
8027
8028 static bfd_boolean
8029 need_plt32_p (symbolS *s)
8030 {
8031 /* PLT32 relocation is ELF only. */
8032 if (!IS_ELF)
8033 return FALSE;
8034
8035 #ifdef TE_SOLARIS
8036 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8037 krtld support it. */
8038 return FALSE;
8039 #endif
8040
8041 /* Since there is no need to prepare for PLT branch on x86-64, we
8042 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8043 be used as a marker for 32-bit PC-relative branches. */
8044 if (!object_64bit)
8045 return FALSE;
8046
8047 /* Weak or undefined symbol need PLT32 relocation. */
8048 if (S_IS_WEAK (s) || !S_IS_DEFINED (s))
8049 return TRUE;
8050
8051 /* Non-global symbol doesn't need PLT32 relocation. */
8052 if (! S_IS_EXTERNAL (s))
8053 return FALSE;
8054
8055 /* Other global symbols need PLT32 relocation. NB: Symbol with
8056 non-default visibilities are treated as normal global symbol
8057 so that PLT32 relocation can be used as a marker for 32-bit
8058 PC-relative branches. It is useful for linker relaxation. */
8059 return TRUE;
8060 }
8061 #endif
8062
8063 static void
8064 output_jump (void)
8065 {
8066 char *p;
8067 int size;
8068 fixS *fixP;
8069 bfd_reloc_code_real_type jump_reloc = i.reloc[0];
8070
8071 if (i.tm.opcode_modifier.jump == JUMP_BYTE)
8072 {
8073 /* This is a loop or jecxz type instruction. */
8074 size = 1;
8075 if (i.prefix[ADDR_PREFIX] != 0)
8076 {
8077 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
8078 i.prefixes -= 1;
8079 }
8080 /* Pentium4 branch hints. */
8081 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
8082 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
8083 {
8084 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
8085 i.prefixes--;
8086 }
8087 }
8088 else
8089 {
8090 int code16;
8091
8092 code16 = 0;
8093 if (flag_code == CODE_16BIT)
8094 code16 = CODE16;
8095
8096 if (i.prefix[DATA_PREFIX] != 0)
8097 {
8098 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
8099 i.prefixes -= 1;
8100 code16 ^= flip_code16(code16);
8101 }
8102
8103 size = 4;
8104 if (code16)
8105 size = 2;
8106 }
8107
8108 /* BND prefixed jump. */
8109 if (i.prefix[BND_PREFIX] != 0)
8110 {
8111 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
8112 i.prefixes -= 1;
8113 }
8114
8115 if (i.prefix[REX_PREFIX] != 0)
8116 {
8117 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
8118 i.prefixes -= 1;
8119 }
8120
8121 if (i.prefixes != 0)
8122 as_warn (_("skipping prefixes on `%s'"), i.tm.name);
8123
8124 p = frag_more (i.tm.opcode_length + size);
8125 switch (i.tm.opcode_length)
8126 {
8127 case 2:
8128 *p++ = i.tm.base_opcode >> 8;
8129 /* Fall through. */
8130 case 1:
8131 *p++ = i.tm.base_opcode;
8132 break;
8133 default:
8134 abort ();
8135 }
8136
8137 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8138 if (size == 4
8139 && jump_reloc == NO_RELOC
8140 && need_plt32_p (i.op[0].disps->X_add_symbol))
8141 jump_reloc = BFD_RELOC_X86_64_PLT32;
8142 #endif
8143
8144 jump_reloc = reloc (size, 1, 1, jump_reloc);
8145
8146 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
8147 i.op[0].disps, 1, jump_reloc);
8148
8149 /* All jumps handled here are signed, but don't use a signed limit
8150 check for 32 and 16 bit jumps as we want to allow wrap around at
8151 4G and 64k respectively. */
8152 if (size == 1)
8153 fixP->fx_signed = 1;
8154 }
8155
8156 static void
8157 output_interseg_jump (void)
8158 {
8159 char *p;
8160 int size;
8161 int prefix;
8162 int code16;
8163
8164 code16 = 0;
8165 if (flag_code == CODE_16BIT)
8166 code16 = CODE16;
8167
8168 prefix = 0;
8169 if (i.prefix[DATA_PREFIX] != 0)
8170 {
8171 prefix = 1;
8172 i.prefixes -= 1;
8173 code16 ^= CODE16;
8174 }
8175
8176 gas_assert (!i.prefix[REX_PREFIX]);
8177
8178 size = 4;
8179 if (code16)
8180 size = 2;
8181
8182 if (i.prefixes != 0)
8183 as_warn (_("skipping prefixes on `%s'"), i.tm.name);
8184
8185 /* 1 opcode; 2 segment; offset */
8186 p = frag_more (prefix + 1 + 2 + size);
8187
8188 if (i.prefix[DATA_PREFIX] != 0)
8189 *p++ = DATA_PREFIX_OPCODE;
8190
8191 if (i.prefix[REX_PREFIX] != 0)
8192 *p++ = i.prefix[REX_PREFIX];
8193
8194 *p++ = i.tm.base_opcode;
8195 if (i.op[1].imms->X_op == O_constant)
8196 {
8197 offsetT n = i.op[1].imms->X_add_number;
8198
8199 if (size == 2
8200 && !fits_in_unsigned_word (n)
8201 && !fits_in_signed_word (n))
8202 {
8203 as_bad (_("16-bit jump out of range"));
8204 return;
8205 }
8206 md_number_to_chars (p, n, size);
8207 }
8208 else
8209 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
8210 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
8211 if (i.op[0].imms->X_op != O_constant)
8212 as_bad (_("can't handle non absolute segment in `%s'"),
8213 i.tm.name);
8214 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
8215 }
8216
8217 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8218 void
8219 x86_cleanup (void)
8220 {
8221 char *p;
8222 asection *seg = now_seg;
8223 subsegT subseg = now_subseg;
8224 asection *sec;
8225 unsigned int alignment, align_size_1;
8226 unsigned int isa_1_descsz, feature_2_descsz, descsz;
8227 unsigned int isa_1_descsz_raw, feature_2_descsz_raw;
8228 unsigned int padding;
8229
8230 if (!IS_ELF || !x86_used_note)
8231 return;
8232
8233 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_X86;
8234
8235 /* The .note.gnu.property section layout:
8236
8237 Field Length Contents
8238 ---- ---- ----
8239 n_namsz 4 4
8240 n_descsz 4 The note descriptor size
8241 n_type 4 NT_GNU_PROPERTY_TYPE_0
8242 n_name 4 "GNU"
8243 n_desc n_descsz The program property array
8244 .... .... ....
8245 */
8246
8247 /* Create the .note.gnu.property section. */
8248 sec = subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME, 0);
8249 bfd_set_section_flags (sec,
8250 (SEC_ALLOC
8251 | SEC_LOAD
8252 | SEC_DATA
8253 | SEC_HAS_CONTENTS
8254 | SEC_READONLY));
8255
8256 if (get_elf_backend_data (stdoutput)->s->elfclass == ELFCLASS64)
8257 {
8258 align_size_1 = 7;
8259 alignment = 3;
8260 }
8261 else
8262 {
8263 align_size_1 = 3;
8264 alignment = 2;
8265 }
8266
8267 bfd_set_section_alignment (sec, alignment);
8268 elf_section_type (sec) = SHT_NOTE;
8269
8270 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
8271 + 4-byte data */
8272 isa_1_descsz_raw = 4 + 4 + 4;
8273 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
8274 isa_1_descsz = (isa_1_descsz_raw + align_size_1) & ~align_size_1;
8275
8276 feature_2_descsz_raw = isa_1_descsz;
8277 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
8278 + 4-byte data */
8279 feature_2_descsz_raw += 4 + 4 + 4;
8280 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
8281 feature_2_descsz = ((feature_2_descsz_raw + align_size_1)
8282 & ~align_size_1);
8283
8284 descsz = feature_2_descsz;
8285 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
8286 p = frag_more (4 + 4 + 4 + 4 + descsz);
8287
8288 /* Write n_namsz. */
8289 md_number_to_chars (p, (valueT) 4, 4);
8290
8291 /* Write n_descsz. */
8292 md_number_to_chars (p + 4, (valueT) descsz, 4);
8293
8294 /* Write n_type. */
8295 md_number_to_chars (p + 4 * 2, (valueT) NT_GNU_PROPERTY_TYPE_0, 4);
8296
8297 /* Write n_name. */
8298 memcpy (p + 4 * 3, "GNU", 4);
8299
8300 /* Write 4-byte type. */
8301 md_number_to_chars (p + 4 * 4,
8302 (valueT) GNU_PROPERTY_X86_ISA_1_USED, 4);
8303
8304 /* Write 4-byte data size. */
8305 md_number_to_chars (p + 4 * 5, (valueT) 4, 4);
8306
8307 /* Write 4-byte data. */
8308 md_number_to_chars (p + 4 * 6, (valueT) x86_isa_1_used, 4);
8309
8310 /* Zero out paddings. */
8311 padding = isa_1_descsz - isa_1_descsz_raw;
8312 if (padding)
8313 memset (p + 4 * 7, 0, padding);
8314
8315 /* Write 4-byte type. */
8316 md_number_to_chars (p + isa_1_descsz + 4 * 4,
8317 (valueT) GNU_PROPERTY_X86_FEATURE_2_USED, 4);
8318
8319 /* Write 4-byte data size. */
8320 md_number_to_chars (p + isa_1_descsz + 4 * 5, (valueT) 4, 4);
8321
8322 /* Write 4-byte data. */
8323 md_number_to_chars (p + isa_1_descsz + 4 * 6,
8324 (valueT) x86_feature_2_used, 4);
8325
8326 /* Zero out paddings. */
8327 padding = feature_2_descsz - feature_2_descsz_raw;
8328 if (padding)
8329 memset (p + isa_1_descsz + 4 * 7, 0, padding);
8330
8331 /* We probably can't restore the current segment, for there likely
8332 isn't one yet... */
8333 if (seg && subseg)
8334 subseg_set (seg, subseg);
8335 }
8336 #endif
8337
8338 static unsigned int
8339 encoding_length (const fragS *start_frag, offsetT start_off,
8340 const char *frag_now_ptr)
8341 {
8342 unsigned int len = 0;
8343
8344 if (start_frag != frag_now)
8345 {
8346 const fragS *fr = start_frag;
8347
8348 do {
8349 len += fr->fr_fix;
8350 fr = fr->fr_next;
8351 } while (fr && fr != frag_now);
8352 }
8353
8354 return len - start_off + (frag_now_ptr - frag_now->fr_literal);
8355 }
8356
8357 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
8358 be macro-fused with conditional jumps. */
8359
8360 static int
8361 maybe_fused_with_jcc_p (void)
8362 {
8363 /* No RIP address. */
8364 if (i.base_reg && i.base_reg->reg_num == RegIP)
8365 return 0;
8366
8367 /* No VEX/EVEX encoding. */
8368 if (is_any_vex_encoding (&i.tm))
8369 return 0;
8370
8371 /* and, add, sub with destination register. */
8372 if ((i.tm.base_opcode >= 0x20 && i.tm.base_opcode <= 0x25)
8373 || i.tm.base_opcode <= 5
8374 || (i.tm.base_opcode >= 0x28 && i.tm.base_opcode <= 0x2d)
8375 || ((i.tm.base_opcode | 3) == 0x83
8376 && ((i.tm.extension_opcode | 1) == 0x5
8377 || i.tm.extension_opcode == 0x0)))
8378 return (i.types[1].bitfield.class == Reg
8379 || i.types[1].bitfield.instance == Accum);
8380
8381 /* test, cmp with any register. */
8382 if ((i.tm.base_opcode | 1) == 0x85
8383 || (i.tm.base_opcode | 1) == 0xa9
8384 || ((i.tm.base_opcode | 1) == 0xf7
8385 && i.tm.extension_opcode == 0)
8386 || (i.tm.base_opcode >= 0x38 && i.tm.base_opcode <= 0x3d)
8387 || ((i.tm.base_opcode | 3) == 0x83
8388 && (i.tm.extension_opcode == 0x7)))
8389 return (i.types[0].bitfield.class == Reg
8390 || i.types[0].bitfield.instance == Accum
8391 || i.types[1].bitfield.class == Reg
8392 || i.types[1].bitfield.instance == Accum);
8393
8394 /* inc, dec with any register. */
8395 if ((i.tm.cpu_flags.bitfield.cpuno64
8396 && (i.tm.base_opcode | 0xf) == 0x4f)
8397 || ((i.tm.base_opcode | 1) == 0xff
8398 && i.tm.extension_opcode <= 0x1))
8399 return (i.types[0].bitfield.class == Reg
8400 || i.types[0].bitfield.instance == Accum);
8401
8402 return 0;
8403 }
8404
8405 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
8406
8407 static int
8408 add_fused_jcc_padding_frag_p (void)
8409 {
8410 /* NB: Don't work with COND_JUMP86 without i386. */
8411 if (!align_branch_power
8412 || now_seg == absolute_section
8413 || !cpu_arch_flags.bitfield.cpui386
8414 || !(align_branch & align_branch_fused_bit))
8415 return 0;
8416
8417 if (maybe_fused_with_jcc_p ())
8418 {
8419 if (last_insn.kind == last_insn_other
8420 || last_insn.seg != now_seg)
8421 return 1;
8422 if (flag_debug)
8423 as_warn_where (last_insn.file, last_insn.line,
8424 _("`%s` skips -malign-branch-boundary on `%s`"),
8425 last_insn.name, i.tm.name);
8426 }
8427
8428 return 0;
8429 }
8430
8431 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
8432
8433 static int
8434 add_branch_prefix_frag_p (void)
8435 {
8436 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
8437 to PadLock instructions since they include prefixes in opcode. */
8438 if (!align_branch_power
8439 || !align_branch_prefix_size
8440 || now_seg == absolute_section
8441 || i.tm.cpu_flags.bitfield.cpupadlock
8442 || !cpu_arch_flags.bitfield.cpui386)
8443 return 0;
8444
8445 /* Don't add prefix if it is a prefix or there is no operand in case
8446 that segment prefix is special. */
8447 if (!i.operands || i.tm.opcode_modifier.isprefix)
8448 return 0;
8449
8450 if (last_insn.kind == last_insn_other
8451 || last_insn.seg != now_seg)
8452 return 1;
8453
8454 if (flag_debug)
8455 as_warn_where (last_insn.file, last_insn.line,
8456 _("`%s` skips -malign-branch-boundary on `%s`"),
8457 last_insn.name, i.tm.name);
8458
8459 return 0;
8460 }
8461
8462 /* Return 1 if a BRANCH_PADDING frag should be generated. */
8463
8464 static int
8465 add_branch_padding_frag_p (enum align_branch_kind *branch_p)
8466 {
8467 int add_padding;
8468
8469 /* NB: Don't work with COND_JUMP86 without i386. */
8470 if (!align_branch_power
8471 || now_seg == absolute_section
8472 || !cpu_arch_flags.bitfield.cpui386)
8473 return 0;
8474
8475 add_padding = 0;
8476
8477 /* Check for jcc and direct jmp. */
8478 if (i.tm.opcode_modifier.jump == JUMP)
8479 {
8480 if (i.tm.base_opcode == JUMP_PC_RELATIVE)
8481 {
8482 *branch_p = align_branch_jmp;
8483 add_padding = align_branch & align_branch_jmp_bit;
8484 }
8485 else
8486 {
8487 *branch_p = align_branch_jcc;
8488 if ((align_branch & align_branch_jcc_bit))
8489 add_padding = 1;
8490 }
8491 }
8492 else if (is_any_vex_encoding (&i.tm))
8493 return 0;
8494 else if ((i.tm.base_opcode | 1) == 0xc3)
8495 {
8496 /* Near ret. */
8497 *branch_p = align_branch_ret;
8498 if ((align_branch & align_branch_ret_bit))
8499 add_padding = 1;
8500 }
8501 else
8502 {
8503 /* Check for indirect jmp, direct and indirect calls. */
8504 if (i.tm.base_opcode == 0xe8)
8505 {
8506 /* Direct call. */
8507 *branch_p = align_branch_call;
8508 if ((align_branch & align_branch_call_bit))
8509 add_padding = 1;
8510 }
8511 else if (i.tm.base_opcode == 0xff
8512 && (i.tm.extension_opcode == 2
8513 || i.tm.extension_opcode == 4))
8514 {
8515 /* Indirect call and jmp. */
8516 *branch_p = align_branch_indirect;
8517 if ((align_branch & align_branch_indirect_bit))
8518 add_padding = 1;
8519 }
8520
8521 if (add_padding
8522 && i.disp_operands
8523 && tls_get_addr
8524 && (i.op[0].disps->X_op == O_symbol
8525 || (i.op[0].disps->X_op == O_subtract
8526 && i.op[0].disps->X_op_symbol == GOT_symbol)))
8527 {
8528 symbolS *s = i.op[0].disps->X_add_symbol;
8529 /* No padding to call to global or undefined tls_get_addr. */
8530 if ((S_IS_EXTERNAL (s) || !S_IS_DEFINED (s))
8531 && strcmp (S_GET_NAME (s), tls_get_addr) == 0)
8532 return 0;
8533 }
8534 }
8535
8536 if (add_padding
8537 && last_insn.kind != last_insn_other
8538 && last_insn.seg == now_seg)
8539 {
8540 if (flag_debug)
8541 as_warn_where (last_insn.file, last_insn.line,
8542 _("`%s` skips -malign-branch-boundary on `%s`"),
8543 last_insn.name, i.tm.name);
8544 return 0;
8545 }
8546
8547 return add_padding;
8548 }
8549
8550 static void
8551 output_insn (void)
8552 {
8553 fragS *insn_start_frag;
8554 offsetT insn_start_off;
8555 fragS *fragP = NULL;
8556 enum align_branch_kind branch = align_branch_none;
8557
8558 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8559 if (IS_ELF && x86_used_note)
8560 {
8561 if (i.tm.cpu_flags.bitfield.cpucmov)
8562 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_CMOV;
8563 if (i.tm.cpu_flags.bitfield.cpusse)
8564 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE;
8565 if (i.tm.cpu_flags.bitfield.cpusse2)
8566 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE2;
8567 if (i.tm.cpu_flags.bitfield.cpusse3)
8568 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE3;
8569 if (i.tm.cpu_flags.bitfield.cpussse3)
8570 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSSE3;
8571 if (i.tm.cpu_flags.bitfield.cpusse4_1)
8572 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE4_1;
8573 if (i.tm.cpu_flags.bitfield.cpusse4_2)
8574 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE4_2;
8575 if (i.tm.cpu_flags.bitfield.cpuavx)
8576 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX;
8577 if (i.tm.cpu_flags.bitfield.cpuavx2)
8578 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX2;
8579 if (i.tm.cpu_flags.bitfield.cpufma)
8580 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_FMA;
8581 if (i.tm.cpu_flags.bitfield.cpuavx512f)
8582 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512F;
8583 if (i.tm.cpu_flags.bitfield.cpuavx512cd)
8584 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512CD;
8585 if (i.tm.cpu_flags.bitfield.cpuavx512er)
8586 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512ER;
8587 if (i.tm.cpu_flags.bitfield.cpuavx512pf)
8588 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512PF;
8589 if (i.tm.cpu_flags.bitfield.cpuavx512vl)
8590 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512VL;
8591 if (i.tm.cpu_flags.bitfield.cpuavx512dq)
8592 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512DQ;
8593 if (i.tm.cpu_flags.bitfield.cpuavx512bw)
8594 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512BW;
8595 if (i.tm.cpu_flags.bitfield.cpuavx512_4fmaps)
8596 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_4FMAPS;
8597 if (i.tm.cpu_flags.bitfield.cpuavx512_4vnniw)
8598 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_4VNNIW;
8599 if (i.tm.cpu_flags.bitfield.cpuavx512_bitalg)
8600 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_BITALG;
8601 if (i.tm.cpu_flags.bitfield.cpuavx512ifma)
8602 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_IFMA;
8603 if (i.tm.cpu_flags.bitfield.cpuavx512vbmi)
8604 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI;
8605 if (i.tm.cpu_flags.bitfield.cpuavx512_vbmi2)
8606 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI2;
8607 if (i.tm.cpu_flags.bitfield.cpuavx512_vnni)
8608 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VNNI;
8609 if (i.tm.cpu_flags.bitfield.cpuavx512_bf16)
8610 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_BF16;
8611
8612 if (i.tm.cpu_flags.bitfield.cpu8087
8613 || i.tm.cpu_flags.bitfield.cpu287
8614 || i.tm.cpu_flags.bitfield.cpu387
8615 || i.tm.cpu_flags.bitfield.cpu687
8616 || i.tm.cpu_flags.bitfield.cpufisttp)
8617 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_X87;
8618 if (i.has_regmmx
8619 || i.tm.base_opcode == 0xf77 /* emms */
8620 || i.tm.base_opcode == 0xf0e /* femms */)
8621 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_MMX;
8622 if (i.has_regxmm)
8623 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XMM;
8624 if (i.has_regymm)
8625 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_YMM;
8626 if (i.has_regzmm)
8627 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_ZMM;
8628 if (i.tm.cpu_flags.bitfield.cpufxsr)
8629 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_FXSR;
8630 if (i.tm.cpu_flags.bitfield.cpuxsave)
8631 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVE;
8632 if (i.tm.cpu_flags.bitfield.cpuxsaveopt)
8633 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT;
8634 if (i.tm.cpu_flags.bitfield.cpuxsavec)
8635 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVEC;
8636 }
8637 #endif
8638
8639 /* Tie dwarf2 debug info to the address at the start of the insn.
8640 We can't do this after the insn has been output as the current
8641 frag may have been closed off. eg. by frag_var. */
8642 dwarf2_emit_insn (0);
8643
8644 insn_start_frag = frag_now;
8645 insn_start_off = frag_now_fix ();
8646
8647 if (add_branch_padding_frag_p (&branch))
8648 {
8649 char *p;
8650 /* Branch can be 8 bytes. Leave some room for prefixes. */
8651 unsigned int max_branch_padding_size = 14;
8652
8653 /* Align section to boundary. */
8654 record_alignment (now_seg, align_branch_power);
8655
8656 /* Make room for padding. */
8657 frag_grow (max_branch_padding_size);
8658
8659 /* Start of the padding. */
8660 p = frag_more (0);
8661
8662 fragP = frag_now;
8663
8664 frag_var (rs_machine_dependent, max_branch_padding_size, 0,
8665 ENCODE_RELAX_STATE (BRANCH_PADDING, 0),
8666 NULL, 0, p);
8667
8668 fragP->tc_frag_data.branch_type = branch;
8669 fragP->tc_frag_data.max_bytes = max_branch_padding_size;
8670 }
8671
8672 /* Output jumps. */
8673 if (i.tm.opcode_modifier.jump == JUMP)
8674 output_branch ();
8675 else if (i.tm.opcode_modifier.jump == JUMP_BYTE
8676 || i.tm.opcode_modifier.jump == JUMP_DWORD)
8677 output_jump ();
8678 else if (i.tm.opcode_modifier.jump == JUMP_INTERSEGMENT)
8679 output_interseg_jump ();
8680 else
8681 {
8682 /* Output normal instructions here. */
8683 char *p;
8684 unsigned char *q;
8685 unsigned int j;
8686 unsigned int prefix;
8687
8688 if (avoid_fence
8689 && (i.tm.base_opcode == 0xfaee8
8690 || i.tm.base_opcode == 0xfaef0
8691 || i.tm.base_opcode == 0xfaef8))
8692 {
8693 /* Encode lfence, mfence, and sfence as
8694 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
8695 offsetT val = 0x240483f0ULL;
8696 p = frag_more (5);
8697 md_number_to_chars (p, val, 5);
8698 return;
8699 }
8700
8701 /* Some processors fail on LOCK prefix. This options makes
8702 assembler ignore LOCK prefix and serves as a workaround. */
8703 if (omit_lock_prefix)
8704 {
8705 if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
8706 return;
8707 i.prefix[LOCK_PREFIX] = 0;
8708 }
8709
8710 if (branch)
8711 /* Skip if this is a branch. */
8712 ;
8713 else if (add_fused_jcc_padding_frag_p ())
8714 {
8715 /* Make room for padding. */
8716 frag_grow (MAX_FUSED_JCC_PADDING_SIZE);
8717 p = frag_more (0);
8718
8719 fragP = frag_now;
8720
8721 frag_var (rs_machine_dependent, MAX_FUSED_JCC_PADDING_SIZE, 0,
8722 ENCODE_RELAX_STATE (FUSED_JCC_PADDING, 0),
8723 NULL, 0, p);
8724
8725 fragP->tc_frag_data.branch_type = align_branch_fused;
8726 fragP->tc_frag_data.max_bytes = MAX_FUSED_JCC_PADDING_SIZE;
8727 }
8728 else if (add_branch_prefix_frag_p ())
8729 {
8730 unsigned int max_prefix_size = align_branch_prefix_size;
8731
8732 /* Make room for padding. */
8733 frag_grow (max_prefix_size);
8734 p = frag_more (0);
8735
8736 fragP = frag_now;
8737
8738 frag_var (rs_machine_dependent, max_prefix_size, 0,
8739 ENCODE_RELAX_STATE (BRANCH_PREFIX, 0),
8740 NULL, 0, p);
8741
8742 fragP->tc_frag_data.max_bytes = max_prefix_size;
8743 }
8744
8745 /* Since the VEX/EVEX prefix contains the implicit prefix, we
8746 don't need the explicit prefix. */
8747 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
8748 {
8749 switch (i.tm.opcode_length)
8750 {
8751 case 3:
8752 if (i.tm.base_opcode & 0xff000000)
8753 {
8754 prefix = (i.tm.base_opcode >> 24) & 0xff;
8755 if (!i.tm.cpu_flags.bitfield.cpupadlock
8756 || prefix != REPE_PREFIX_OPCODE
8757 || (i.prefix[REP_PREFIX] != REPE_PREFIX_OPCODE))
8758 add_prefix (prefix);
8759 }
8760 break;
8761 case 2:
8762 if ((i.tm.base_opcode & 0xff0000) != 0)
8763 {
8764 prefix = (i.tm.base_opcode >> 16) & 0xff;
8765 add_prefix (prefix);
8766 }
8767 break;
8768 case 1:
8769 break;
8770 case 0:
8771 /* Check for pseudo prefixes. */
8772 as_bad_where (insn_start_frag->fr_file,
8773 insn_start_frag->fr_line,
8774 _("pseudo prefix without instruction"));
8775 return;
8776 default:
8777 abort ();
8778 }
8779
8780 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8781 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
8782 R_X86_64_GOTTPOFF relocation so that linker can safely
8783 perform IE->LE optimization. A dummy REX_OPCODE prefix
8784 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
8785 relocation for GDesc -> IE/LE optimization. */
8786 if (x86_elf_abi == X86_64_X32_ABI
8787 && i.operands == 2
8788 && (i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF
8789 || i.reloc[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC)
8790 && i.prefix[REX_PREFIX] == 0)
8791 add_prefix (REX_OPCODE);
8792 #endif
8793
8794 /* The prefix bytes. */
8795 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
8796 if (*q)
8797 FRAG_APPEND_1_CHAR (*q);
8798 }
8799 else
8800 {
8801 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
8802 if (*q)
8803 switch (j)
8804 {
8805 case REX_PREFIX:
8806 /* REX byte is encoded in VEX prefix. */
8807 break;
8808 case SEG_PREFIX:
8809 case ADDR_PREFIX:
8810 FRAG_APPEND_1_CHAR (*q);
8811 break;
8812 default:
8813 /* There should be no other prefixes for instructions
8814 with VEX prefix. */
8815 abort ();
8816 }
8817
8818 /* For EVEX instructions i.vrex should become 0 after
8819 build_evex_prefix. For VEX instructions upper 16 registers
8820 aren't available, so VREX should be 0. */
8821 if (i.vrex)
8822 abort ();
8823 /* Now the VEX prefix. */
8824 p = frag_more (i.vex.length);
8825 for (j = 0; j < i.vex.length; j++)
8826 p[j] = i.vex.bytes[j];
8827 }
8828
8829 /* Now the opcode; be careful about word order here! */
8830 if (i.tm.opcode_length == 1)
8831 {
8832 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
8833 }
8834 else
8835 {
8836 switch (i.tm.opcode_length)
8837 {
8838 case 4:
8839 p = frag_more (4);
8840 *p++ = (i.tm.base_opcode >> 24) & 0xff;
8841 *p++ = (i.tm.base_opcode >> 16) & 0xff;
8842 break;
8843 case 3:
8844 p = frag_more (3);
8845 *p++ = (i.tm.base_opcode >> 16) & 0xff;
8846 break;
8847 case 2:
8848 p = frag_more (2);
8849 break;
8850 default:
8851 abort ();
8852 break;
8853 }
8854
8855 /* Put out high byte first: can't use md_number_to_chars! */
8856 *p++ = (i.tm.base_opcode >> 8) & 0xff;
8857 *p = i.tm.base_opcode & 0xff;
8858 }
8859
8860 /* Now the modrm byte and sib byte (if present). */
8861 if (i.tm.opcode_modifier.modrm)
8862 {
8863 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
8864 | i.rm.reg << 3
8865 | i.rm.mode << 6));
8866 /* If i.rm.regmem == ESP (4)
8867 && i.rm.mode != (Register mode)
8868 && not 16 bit
8869 ==> need second modrm byte. */
8870 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
8871 && i.rm.mode != 3
8872 && !(i.base_reg && i.base_reg->reg_type.bitfield.word))
8873 FRAG_APPEND_1_CHAR ((i.sib.base << 0
8874 | i.sib.index << 3
8875 | i.sib.scale << 6));
8876 }
8877
8878 if (i.disp_operands)
8879 output_disp (insn_start_frag, insn_start_off);
8880
8881 if (i.imm_operands)
8882 output_imm (insn_start_frag, insn_start_off);
8883
8884 /*
8885 * frag_now_fix () returning plain abs_section_offset when we're in the
8886 * absolute section, and abs_section_offset not getting updated as data
8887 * gets added to the frag breaks the logic below.
8888 */
8889 if (now_seg != absolute_section)
8890 {
8891 j = encoding_length (insn_start_frag, insn_start_off, frag_more (0));
8892 if (j > 15)
8893 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
8894 j);
8895 else if (fragP)
8896 {
8897 /* NB: Don't add prefix with GOTPC relocation since
8898 output_disp() above depends on the fixed encoding
8899 length. Can't add prefix with TLS relocation since
8900 it breaks TLS linker optimization. */
8901 unsigned int max = i.has_gotpc_tls_reloc ? 0 : 15 - j;
8902 /* Prefix count on the current instruction. */
8903 unsigned int count = i.vex.length;
8904 unsigned int k;
8905 for (k = 0; k < ARRAY_SIZE (i.prefix); k++)
8906 /* REX byte is encoded in VEX/EVEX prefix. */
8907 if (i.prefix[k] && (k != REX_PREFIX || !i.vex.length))
8908 count++;
8909
8910 /* Count prefixes for extended opcode maps. */
8911 if (!i.vex.length)
8912 switch (i.tm.opcode_length)
8913 {
8914 case 3:
8915 if (((i.tm.base_opcode >> 16) & 0xff) == 0xf)
8916 {
8917 count++;
8918 switch ((i.tm.base_opcode >> 8) & 0xff)
8919 {
8920 case 0x38:
8921 case 0x3a:
8922 count++;
8923 break;
8924 default:
8925 break;
8926 }
8927 }
8928 break;
8929 case 2:
8930 if (((i.tm.base_opcode >> 8) & 0xff) == 0xf)
8931 count++;
8932 break;
8933 case 1:
8934 break;
8935 default:
8936 abort ();
8937 }
8938
8939 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype)
8940 == BRANCH_PREFIX)
8941 {
8942 /* Set the maximum prefix size in BRANCH_PREFIX
8943 frag. */
8944 if (fragP->tc_frag_data.max_bytes > max)
8945 fragP->tc_frag_data.max_bytes = max;
8946 if (fragP->tc_frag_data.max_bytes > count)
8947 fragP->tc_frag_data.max_bytes -= count;
8948 else
8949 fragP->tc_frag_data.max_bytes = 0;
8950 }
8951 else
8952 {
8953 /* Remember the maximum prefix size in FUSED_JCC_PADDING
8954 frag. */
8955 unsigned int max_prefix_size;
8956 if (align_branch_prefix_size > max)
8957 max_prefix_size = max;
8958 else
8959 max_prefix_size = align_branch_prefix_size;
8960 if (max_prefix_size > count)
8961 fragP->tc_frag_data.max_prefix_length
8962 = max_prefix_size - count;
8963 }
8964
8965 /* Use existing segment prefix if possible. Use CS
8966 segment prefix in 64-bit mode. In 32-bit mode, use SS
8967 segment prefix with ESP/EBP base register and use DS
8968 segment prefix without ESP/EBP base register. */
8969 if (i.prefix[SEG_PREFIX])
8970 fragP->tc_frag_data.default_prefix = i.prefix[SEG_PREFIX];
8971 else if (flag_code == CODE_64BIT)
8972 fragP->tc_frag_data.default_prefix = CS_PREFIX_OPCODE;
8973 else if (i.base_reg
8974 && (i.base_reg->reg_num == 4
8975 || i.base_reg->reg_num == 5))
8976 fragP->tc_frag_data.default_prefix = SS_PREFIX_OPCODE;
8977 else
8978 fragP->tc_frag_data.default_prefix = DS_PREFIX_OPCODE;
8979 }
8980 }
8981 }
8982
8983 /* NB: Don't work with COND_JUMP86 without i386. */
8984 if (align_branch_power
8985 && now_seg != absolute_section
8986 && cpu_arch_flags.bitfield.cpui386)
8987 {
8988 /* Terminate each frag so that we can add prefix and check for
8989 fused jcc. */
8990 frag_wane (frag_now);
8991 frag_new (0);
8992 }
8993
8994 #ifdef DEBUG386
8995 if (flag_debug)
8996 {
8997 pi ("" /*line*/, &i);
8998 }
8999 #endif /* DEBUG386 */
9000 }
9001
9002 /* Return the size of the displacement operand N. */
9003
9004 static int
9005 disp_size (unsigned int n)
9006 {
9007 int size = 4;
9008
9009 if (i.types[n].bitfield.disp64)
9010 size = 8;
9011 else if (i.types[n].bitfield.disp8)
9012 size = 1;
9013 else if (i.types[n].bitfield.disp16)
9014 size = 2;
9015 return size;
9016 }
9017
9018 /* Return the size of the immediate operand N. */
9019
9020 static int
9021 imm_size (unsigned int n)
9022 {
9023 int size = 4;
9024 if (i.types[n].bitfield.imm64)
9025 size = 8;
9026 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
9027 size = 1;
9028 else if (i.types[n].bitfield.imm16)
9029 size = 2;
9030 return size;
9031 }
9032
9033 static void
9034 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
9035 {
9036 char *p;
9037 unsigned int n;
9038
9039 for (n = 0; n < i.operands; n++)
9040 {
9041 if (operand_type_check (i.types[n], disp))
9042 {
9043 if (i.op[n].disps->X_op == O_constant)
9044 {
9045 int size = disp_size (n);
9046 offsetT val = i.op[n].disps->X_add_number;
9047
9048 val = offset_in_range (val >> (size == 1 ? i.memshift : 0),
9049 size);
9050 p = frag_more (size);
9051 md_number_to_chars (p, val, size);
9052 }
9053 else
9054 {
9055 enum bfd_reloc_code_real reloc_type;
9056 int size = disp_size (n);
9057 int sign = i.types[n].bitfield.disp32s;
9058 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
9059 fixS *fixP;
9060
9061 /* We can't have 8 bit displacement here. */
9062 gas_assert (!i.types[n].bitfield.disp8);
9063
9064 /* The PC relative address is computed relative
9065 to the instruction boundary, so in case immediate
9066 fields follows, we need to adjust the value. */
9067 if (pcrel && i.imm_operands)
9068 {
9069 unsigned int n1;
9070 int sz = 0;
9071
9072 for (n1 = 0; n1 < i.operands; n1++)
9073 if (operand_type_check (i.types[n1], imm))
9074 {
9075 /* Only one immediate is allowed for PC
9076 relative address. */
9077 gas_assert (sz == 0);
9078 sz = imm_size (n1);
9079 i.op[n].disps->X_add_number -= sz;
9080 }
9081 /* We should find the immediate. */
9082 gas_assert (sz != 0);
9083 }
9084
9085 p = frag_more (size);
9086 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
9087 if (GOT_symbol
9088 && GOT_symbol == i.op[n].disps->X_add_symbol
9089 && (((reloc_type == BFD_RELOC_32
9090 || reloc_type == BFD_RELOC_X86_64_32S
9091 || (reloc_type == BFD_RELOC_64
9092 && object_64bit))
9093 && (i.op[n].disps->X_op == O_symbol
9094 || (i.op[n].disps->X_op == O_add
9095 && ((symbol_get_value_expression
9096 (i.op[n].disps->X_op_symbol)->X_op)
9097 == O_subtract))))
9098 || reloc_type == BFD_RELOC_32_PCREL))
9099 {
9100 if (!object_64bit)
9101 {
9102 reloc_type = BFD_RELOC_386_GOTPC;
9103 i.has_gotpc_tls_reloc = TRUE;
9104 i.op[n].imms->X_add_number +=
9105 encoding_length (insn_start_frag, insn_start_off, p);
9106 }
9107 else if (reloc_type == BFD_RELOC_64)
9108 reloc_type = BFD_RELOC_X86_64_GOTPC64;
9109 else
9110 /* Don't do the adjustment for x86-64, as there
9111 the pcrel addressing is relative to the _next_
9112 insn, and that is taken care of in other code. */
9113 reloc_type = BFD_RELOC_X86_64_GOTPC32;
9114 }
9115 else if (align_branch_power)
9116 {
9117 switch (reloc_type)
9118 {
9119 case BFD_RELOC_386_TLS_GD:
9120 case BFD_RELOC_386_TLS_LDM:
9121 case BFD_RELOC_386_TLS_IE:
9122 case BFD_RELOC_386_TLS_IE_32:
9123 case BFD_RELOC_386_TLS_GOTIE:
9124 case BFD_RELOC_386_TLS_GOTDESC:
9125 case BFD_RELOC_386_TLS_DESC_CALL:
9126 case BFD_RELOC_X86_64_TLSGD:
9127 case BFD_RELOC_X86_64_TLSLD:
9128 case BFD_RELOC_X86_64_GOTTPOFF:
9129 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9130 case BFD_RELOC_X86_64_TLSDESC_CALL:
9131 i.has_gotpc_tls_reloc = TRUE;
9132 default:
9133 break;
9134 }
9135 }
9136 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal,
9137 size, i.op[n].disps, pcrel,
9138 reloc_type);
9139 /* Check for "call/jmp *mem", "mov mem, %reg",
9140 "test %reg, mem" and "binop mem, %reg" where binop
9141 is one of adc, add, and, cmp, or, sbb, sub, xor
9142 instructions without data prefix. Always generate
9143 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
9144 if (i.prefix[DATA_PREFIX] == 0
9145 && (generate_relax_relocations
9146 || (!object_64bit
9147 && i.rm.mode == 0
9148 && i.rm.regmem == 5))
9149 && (i.rm.mode == 2
9150 || (i.rm.mode == 0 && i.rm.regmem == 5))
9151 && !is_any_vex_encoding(&i.tm)
9152 && ((i.operands == 1
9153 && i.tm.base_opcode == 0xff
9154 && (i.rm.reg == 2 || i.rm.reg == 4))
9155 || (i.operands == 2
9156 && (i.tm.base_opcode == 0x8b
9157 || i.tm.base_opcode == 0x85
9158 || (i.tm.base_opcode & ~0x38) == 0x03))))
9159 {
9160 if (object_64bit)
9161 {
9162 fixP->fx_tcbit = i.rex != 0;
9163 if (i.base_reg
9164 && (i.base_reg->reg_num == RegIP))
9165 fixP->fx_tcbit2 = 1;
9166 }
9167 else
9168 fixP->fx_tcbit2 = 1;
9169 }
9170 }
9171 }
9172 }
9173 }
9174
9175 static void
9176 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
9177 {
9178 char *p;
9179 unsigned int n;
9180
9181 for (n = 0; n < i.operands; n++)
9182 {
9183 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
9184 if (i.rounding && (int) n == i.rounding->operand)
9185 continue;
9186
9187 if (operand_type_check (i.types[n], imm))
9188 {
9189 if (i.op[n].imms->X_op == O_constant)
9190 {
9191 int size = imm_size (n);
9192 offsetT val;
9193
9194 val = offset_in_range (i.op[n].imms->X_add_number,
9195 size);
9196 p = frag_more (size);
9197 md_number_to_chars (p, val, size);
9198 }
9199 else
9200 {
9201 /* Not absolute_section.
9202 Need a 32-bit fixup (don't support 8bit
9203 non-absolute imms). Try to support other
9204 sizes ... */
9205 enum bfd_reloc_code_real reloc_type;
9206 int size = imm_size (n);
9207 int sign;
9208
9209 if (i.types[n].bitfield.imm32s
9210 && (i.suffix == QWORD_MNEM_SUFFIX
9211 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
9212 sign = 1;
9213 else
9214 sign = 0;
9215
9216 p = frag_more (size);
9217 reloc_type = reloc (size, 0, sign, i.reloc[n]);
9218
9219 /* This is tough to explain. We end up with this one if we
9220 * have operands that look like
9221 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
9222 * obtain the absolute address of the GOT, and it is strongly
9223 * preferable from a performance point of view to avoid using
9224 * a runtime relocation for this. The actual sequence of
9225 * instructions often look something like:
9226 *
9227 * call .L66
9228 * .L66:
9229 * popl %ebx
9230 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
9231 *
9232 * The call and pop essentially return the absolute address
9233 * of the label .L66 and store it in %ebx. The linker itself
9234 * will ultimately change the first operand of the addl so
9235 * that %ebx points to the GOT, but to keep things simple, the
9236 * .o file must have this operand set so that it generates not
9237 * the absolute address of .L66, but the absolute address of
9238 * itself. This allows the linker itself simply treat a GOTPC
9239 * relocation as asking for a pcrel offset to the GOT to be
9240 * added in, and the addend of the relocation is stored in the
9241 * operand field for the instruction itself.
9242 *
9243 * Our job here is to fix the operand so that it would add
9244 * the correct offset so that %ebx would point to itself. The
9245 * thing that is tricky is that .-.L66 will point to the
9246 * beginning of the instruction, so we need to further modify
9247 * the operand so that it will point to itself. There are
9248 * other cases where you have something like:
9249 *
9250 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
9251 *
9252 * and here no correction would be required. Internally in
9253 * the assembler we treat operands of this form as not being
9254 * pcrel since the '.' is explicitly mentioned, and I wonder
9255 * whether it would simplify matters to do it this way. Who
9256 * knows. In earlier versions of the PIC patches, the
9257 * pcrel_adjust field was used to store the correction, but
9258 * since the expression is not pcrel, I felt it would be
9259 * confusing to do it this way. */
9260
9261 if ((reloc_type == BFD_RELOC_32
9262 || reloc_type == BFD_RELOC_X86_64_32S
9263 || reloc_type == BFD_RELOC_64)
9264 && GOT_symbol
9265 && GOT_symbol == i.op[n].imms->X_add_symbol
9266 && (i.op[n].imms->X_op == O_symbol
9267 || (i.op[n].imms->X_op == O_add
9268 && ((symbol_get_value_expression
9269 (i.op[n].imms->X_op_symbol)->X_op)
9270 == O_subtract))))
9271 {
9272 if (!object_64bit)
9273 reloc_type = BFD_RELOC_386_GOTPC;
9274 else if (size == 4)
9275 reloc_type = BFD_RELOC_X86_64_GOTPC32;
9276 else if (size == 8)
9277 reloc_type = BFD_RELOC_X86_64_GOTPC64;
9278 i.has_gotpc_tls_reloc = TRUE;
9279 i.op[n].imms->X_add_number +=
9280 encoding_length (insn_start_frag, insn_start_off, p);
9281 }
9282 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
9283 i.op[n].imms, 0, reloc_type);
9284 }
9285 }
9286 }
9287 }
9288 \f
9289 /* x86_cons_fix_new is called via the expression parsing code when a
9290 reloc is needed. We use this hook to get the correct .got reloc. */
9291 static int cons_sign = -1;
9292
9293 void
9294 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
9295 expressionS *exp, bfd_reloc_code_real_type r)
9296 {
9297 r = reloc (len, 0, cons_sign, r);
9298
9299 #ifdef TE_PE
9300 if (exp->X_op == O_secrel)
9301 {
9302 exp->X_op = O_symbol;
9303 r = BFD_RELOC_32_SECREL;
9304 }
9305 #endif
9306
9307 fix_new_exp (frag, off, len, exp, 0, r);
9308 }
9309
9310 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
9311 purpose of the `.dc.a' internal pseudo-op. */
9312
9313 int
9314 x86_address_bytes (void)
9315 {
9316 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
9317 return 4;
9318 return stdoutput->arch_info->bits_per_address / 8;
9319 }
9320
9321 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
9322 || defined (LEX_AT)
9323 # define lex_got(reloc, adjust, types) NULL
9324 #else
9325 /* Parse operands of the form
9326 <symbol>@GOTOFF+<nnn>
9327 and similar .plt or .got references.
9328
9329 If we find one, set up the correct relocation in RELOC and copy the
9330 input string, minus the `@GOTOFF' into a malloc'd buffer for
9331 parsing by the calling routine. Return this buffer, and if ADJUST
9332 is non-null set it to the length of the string we removed from the
9333 input line. Otherwise return NULL. */
9334 static char *
9335 lex_got (enum bfd_reloc_code_real *rel,
9336 int *adjust,
9337 i386_operand_type *types)
9338 {
9339 /* Some of the relocations depend on the size of what field is to
9340 be relocated. But in our callers i386_immediate and i386_displacement
9341 we don't yet know the operand size (this will be set by insn
9342 matching). Hence we record the word32 relocation here,
9343 and adjust the reloc according to the real size in reloc(). */
9344 static const struct {
9345 const char *str;
9346 int len;
9347 const enum bfd_reloc_code_real rel[2];
9348 const i386_operand_type types64;
9349 } gotrel[] = {
9350 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9351 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
9352 BFD_RELOC_SIZE32 },
9353 OPERAND_TYPE_IMM32_64 },
9354 #endif
9355 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
9356 BFD_RELOC_X86_64_PLTOFF64 },
9357 OPERAND_TYPE_IMM64 },
9358 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
9359 BFD_RELOC_X86_64_PLT32 },
9360 OPERAND_TYPE_IMM32_32S_DISP32 },
9361 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
9362 BFD_RELOC_X86_64_GOTPLT64 },
9363 OPERAND_TYPE_IMM64_DISP64 },
9364 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
9365 BFD_RELOC_X86_64_GOTOFF64 },
9366 OPERAND_TYPE_IMM64_DISP64 },
9367 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
9368 BFD_RELOC_X86_64_GOTPCREL },
9369 OPERAND_TYPE_IMM32_32S_DISP32 },
9370 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
9371 BFD_RELOC_X86_64_TLSGD },
9372 OPERAND_TYPE_IMM32_32S_DISP32 },
9373 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
9374 _dummy_first_bfd_reloc_code_real },
9375 OPERAND_TYPE_NONE },
9376 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
9377 BFD_RELOC_X86_64_TLSLD },
9378 OPERAND_TYPE_IMM32_32S_DISP32 },
9379 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
9380 BFD_RELOC_X86_64_GOTTPOFF },
9381 OPERAND_TYPE_IMM32_32S_DISP32 },
9382 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
9383 BFD_RELOC_X86_64_TPOFF32 },
9384 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
9385 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
9386 _dummy_first_bfd_reloc_code_real },
9387 OPERAND_TYPE_NONE },
9388 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
9389 BFD_RELOC_X86_64_DTPOFF32 },
9390 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
9391 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
9392 _dummy_first_bfd_reloc_code_real },
9393 OPERAND_TYPE_NONE },
9394 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
9395 _dummy_first_bfd_reloc_code_real },
9396 OPERAND_TYPE_NONE },
9397 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
9398 BFD_RELOC_X86_64_GOT32 },
9399 OPERAND_TYPE_IMM32_32S_64_DISP32 },
9400 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
9401 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
9402 OPERAND_TYPE_IMM32_32S_DISP32 },
9403 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
9404 BFD_RELOC_X86_64_TLSDESC_CALL },
9405 OPERAND_TYPE_IMM32_32S_DISP32 },
9406 };
9407 char *cp;
9408 unsigned int j;
9409
9410 #if defined (OBJ_MAYBE_ELF)
9411 if (!IS_ELF)
9412 return NULL;
9413 #endif
9414
9415 for (cp = input_line_pointer; *cp != '@'; cp++)
9416 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
9417 return NULL;
9418
9419 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
9420 {
9421 int len = gotrel[j].len;
9422 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
9423 {
9424 if (gotrel[j].rel[object_64bit] != 0)
9425 {
9426 int first, second;
9427 char *tmpbuf, *past_reloc;
9428
9429 *rel = gotrel[j].rel[object_64bit];
9430
9431 if (types)
9432 {
9433 if (flag_code != CODE_64BIT)
9434 {
9435 types->bitfield.imm32 = 1;
9436 types->bitfield.disp32 = 1;
9437 }
9438 else
9439 *types = gotrel[j].types64;
9440 }
9441
9442 if (j != 0 && GOT_symbol == NULL)
9443 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
9444
9445 /* The length of the first part of our input line. */
9446 first = cp - input_line_pointer;
9447
9448 /* The second part goes from after the reloc token until
9449 (and including) an end_of_line char or comma. */
9450 past_reloc = cp + 1 + len;
9451 cp = past_reloc;
9452 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
9453 ++cp;
9454 second = cp + 1 - past_reloc;
9455
9456 /* Allocate and copy string. The trailing NUL shouldn't
9457 be necessary, but be safe. */
9458 tmpbuf = XNEWVEC (char, first + second + 2);
9459 memcpy (tmpbuf, input_line_pointer, first);
9460 if (second != 0 && *past_reloc != ' ')
9461 /* Replace the relocation token with ' ', so that
9462 errors like foo@GOTOFF1 will be detected. */
9463 tmpbuf[first++] = ' ';
9464 else
9465 /* Increment length by 1 if the relocation token is
9466 removed. */
9467 len++;
9468 if (adjust)
9469 *adjust = len;
9470 memcpy (tmpbuf + first, past_reloc, second);
9471 tmpbuf[first + second] = '\0';
9472 return tmpbuf;
9473 }
9474
9475 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9476 gotrel[j].str, 1 << (5 + object_64bit));
9477 return NULL;
9478 }
9479 }
9480
9481 /* Might be a symbol version string. Don't as_bad here. */
9482 return NULL;
9483 }
9484 #endif
9485
9486 #ifdef TE_PE
9487 #ifdef lex_got
9488 #undef lex_got
9489 #endif
9490 /* Parse operands of the form
9491 <symbol>@SECREL32+<nnn>
9492
9493 If we find one, set up the correct relocation in RELOC and copy the
9494 input string, minus the `@SECREL32' into a malloc'd buffer for
9495 parsing by the calling routine. Return this buffer, and if ADJUST
9496 is non-null set it to the length of the string we removed from the
9497 input line. Otherwise return NULL.
9498
9499 This function is copied from the ELF version above adjusted for PE targets. */
9500
9501 static char *
9502 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
9503 int *adjust ATTRIBUTE_UNUSED,
9504 i386_operand_type *types)
9505 {
9506 static const struct
9507 {
9508 const char *str;
9509 int len;
9510 const enum bfd_reloc_code_real rel[2];
9511 const i386_operand_type types64;
9512 }
9513 gotrel[] =
9514 {
9515 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
9516 BFD_RELOC_32_SECREL },
9517 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
9518 };
9519
9520 char *cp;
9521 unsigned j;
9522
9523 for (cp = input_line_pointer; *cp != '@'; cp++)
9524 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
9525 return NULL;
9526
9527 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
9528 {
9529 int len = gotrel[j].len;
9530
9531 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
9532 {
9533 if (gotrel[j].rel[object_64bit] != 0)
9534 {
9535 int first, second;
9536 char *tmpbuf, *past_reloc;
9537
9538 *rel = gotrel[j].rel[object_64bit];
9539 if (adjust)
9540 *adjust = len;
9541
9542 if (types)
9543 {
9544 if (flag_code != CODE_64BIT)
9545 {
9546 types->bitfield.imm32 = 1;
9547 types->bitfield.disp32 = 1;
9548 }
9549 else
9550 *types = gotrel[j].types64;
9551 }
9552
9553 /* The length of the first part of our input line. */
9554 first = cp - input_line_pointer;
9555
9556 /* The second part goes from after the reloc token until
9557 (and including) an end_of_line char or comma. */
9558 past_reloc = cp + 1 + len;
9559 cp = past_reloc;
9560 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
9561 ++cp;
9562 second = cp + 1 - past_reloc;
9563
9564 /* Allocate and copy string. The trailing NUL shouldn't
9565 be necessary, but be safe. */
9566 tmpbuf = XNEWVEC (char, first + second + 2);
9567 memcpy (tmpbuf, input_line_pointer, first);
9568 if (second != 0 && *past_reloc != ' ')
9569 /* Replace the relocation token with ' ', so that
9570 errors like foo@SECLREL321 will be detected. */
9571 tmpbuf[first++] = ' ';
9572 memcpy (tmpbuf + first, past_reloc, second);
9573 tmpbuf[first + second] = '\0';
9574 return tmpbuf;
9575 }
9576
9577 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9578 gotrel[j].str, 1 << (5 + object_64bit));
9579 return NULL;
9580 }
9581 }
9582
9583 /* Might be a symbol version string. Don't as_bad here. */
9584 return NULL;
9585 }
9586
9587 #endif /* TE_PE */
9588
9589 bfd_reloc_code_real_type
9590 x86_cons (expressionS *exp, int size)
9591 {
9592 bfd_reloc_code_real_type got_reloc = NO_RELOC;
9593
9594 intel_syntax = -intel_syntax;
9595
9596 exp->X_md = 0;
9597 if (size == 4 || (object_64bit && size == 8))
9598 {
9599 /* Handle @GOTOFF and the like in an expression. */
9600 char *save;
9601 char *gotfree_input_line;
9602 int adjust = 0;
9603
9604 save = input_line_pointer;
9605 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
9606 if (gotfree_input_line)
9607 input_line_pointer = gotfree_input_line;
9608
9609 expression (exp);
9610
9611 if (gotfree_input_line)
9612 {
9613 /* expression () has merrily parsed up to the end of line,
9614 or a comma - in the wrong buffer. Transfer how far
9615 input_line_pointer has moved to the right buffer. */
9616 input_line_pointer = (save
9617 + (input_line_pointer - gotfree_input_line)
9618 + adjust);
9619 free (gotfree_input_line);
9620 if (exp->X_op == O_constant
9621 || exp->X_op == O_absent
9622 || exp->X_op == O_illegal
9623 || exp->X_op == O_register
9624 || exp->X_op == O_big)
9625 {
9626 char c = *input_line_pointer;
9627 *input_line_pointer = 0;
9628 as_bad (_("missing or invalid expression `%s'"), save);
9629 *input_line_pointer = c;
9630 }
9631 else if ((got_reloc == BFD_RELOC_386_PLT32
9632 || got_reloc == BFD_RELOC_X86_64_PLT32)
9633 && exp->X_op != O_symbol)
9634 {
9635 char c = *input_line_pointer;
9636 *input_line_pointer = 0;
9637 as_bad (_("invalid PLT expression `%s'"), save);
9638 *input_line_pointer = c;
9639 }
9640 }
9641 }
9642 else
9643 expression (exp);
9644
9645 intel_syntax = -intel_syntax;
9646
9647 if (intel_syntax)
9648 i386_intel_simplify (exp);
9649
9650 return got_reloc;
9651 }
9652
9653 static void
9654 signed_cons (int size)
9655 {
9656 if (flag_code == CODE_64BIT)
9657 cons_sign = 1;
9658 cons (size);
9659 cons_sign = -1;
9660 }
9661
9662 #ifdef TE_PE
9663 static void
9664 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
9665 {
9666 expressionS exp;
9667
9668 do
9669 {
9670 expression (&exp);
9671 if (exp.X_op == O_symbol)
9672 exp.X_op = O_secrel;
9673
9674 emit_expr (&exp, 4);
9675 }
9676 while (*input_line_pointer++ == ',');
9677
9678 input_line_pointer--;
9679 demand_empty_rest_of_line ();
9680 }
9681 #endif
9682
9683 /* Handle Vector operations. */
9684
9685 static char *
9686 check_VecOperations (char *op_string, char *op_end)
9687 {
9688 const reg_entry *mask;
9689 const char *saved;
9690 char *end_op;
9691
9692 while (*op_string
9693 && (op_end == NULL || op_string < op_end))
9694 {
9695 saved = op_string;
9696 if (*op_string == '{')
9697 {
9698 op_string++;
9699
9700 /* Check broadcasts. */
9701 if (strncmp (op_string, "1to", 3) == 0)
9702 {
9703 int bcst_type;
9704
9705 if (i.broadcast)
9706 goto duplicated_vec_op;
9707
9708 op_string += 3;
9709 if (*op_string == '8')
9710 bcst_type = 8;
9711 else if (*op_string == '4')
9712 bcst_type = 4;
9713 else if (*op_string == '2')
9714 bcst_type = 2;
9715 else if (*op_string == '1'
9716 && *(op_string+1) == '6')
9717 {
9718 bcst_type = 16;
9719 op_string++;
9720 }
9721 else
9722 {
9723 as_bad (_("Unsupported broadcast: `%s'"), saved);
9724 return NULL;
9725 }
9726 op_string++;
9727
9728 broadcast_op.type = bcst_type;
9729 broadcast_op.operand = this_operand;
9730 broadcast_op.bytes = 0;
9731 i.broadcast = &broadcast_op;
9732 }
9733 /* Check masking operation. */
9734 else if ((mask = parse_register (op_string, &end_op)) != NULL)
9735 {
9736 /* k0 can't be used for write mask. */
9737 if (mask->reg_type.bitfield.class != RegMask || !mask->reg_num)
9738 {
9739 as_bad (_("`%s%s' can't be used for write mask"),
9740 register_prefix, mask->reg_name);
9741 return NULL;
9742 }
9743
9744 if (!i.mask)
9745 {
9746 mask_op.mask = mask;
9747 mask_op.zeroing = 0;
9748 mask_op.operand = this_operand;
9749 i.mask = &mask_op;
9750 }
9751 else
9752 {
9753 if (i.mask->mask)
9754 goto duplicated_vec_op;
9755
9756 i.mask->mask = mask;
9757
9758 /* Only "{z}" is allowed here. No need to check
9759 zeroing mask explicitly. */
9760 if (i.mask->operand != this_operand)
9761 {
9762 as_bad (_("invalid write mask `%s'"), saved);
9763 return NULL;
9764 }
9765 }
9766
9767 op_string = end_op;
9768 }
9769 /* Check zeroing-flag for masking operation. */
9770 else if (*op_string == 'z')
9771 {
9772 if (!i.mask)
9773 {
9774 mask_op.mask = NULL;
9775 mask_op.zeroing = 1;
9776 mask_op.operand = this_operand;
9777 i.mask = &mask_op;
9778 }
9779 else
9780 {
9781 if (i.mask->zeroing)
9782 {
9783 duplicated_vec_op:
9784 as_bad (_("duplicated `%s'"), saved);
9785 return NULL;
9786 }
9787
9788 i.mask->zeroing = 1;
9789
9790 /* Only "{%k}" is allowed here. No need to check mask
9791 register explicitly. */
9792 if (i.mask->operand != this_operand)
9793 {
9794 as_bad (_("invalid zeroing-masking `%s'"),
9795 saved);
9796 return NULL;
9797 }
9798 }
9799
9800 op_string++;
9801 }
9802 else
9803 goto unknown_vec_op;
9804
9805 if (*op_string != '}')
9806 {
9807 as_bad (_("missing `}' in `%s'"), saved);
9808 return NULL;
9809 }
9810 op_string++;
9811
9812 /* Strip whitespace since the addition of pseudo prefixes
9813 changed how the scrubber treats '{'. */
9814 if (is_space_char (*op_string))
9815 ++op_string;
9816
9817 continue;
9818 }
9819 unknown_vec_op:
9820 /* We don't know this one. */
9821 as_bad (_("unknown vector operation: `%s'"), saved);
9822 return NULL;
9823 }
9824
9825 if (i.mask && i.mask->zeroing && !i.mask->mask)
9826 {
9827 as_bad (_("zeroing-masking only allowed with write mask"));
9828 return NULL;
9829 }
9830
9831 return op_string;
9832 }
9833
9834 static int
9835 i386_immediate (char *imm_start)
9836 {
9837 char *save_input_line_pointer;
9838 char *gotfree_input_line;
9839 segT exp_seg = 0;
9840 expressionS *exp;
9841 i386_operand_type types;
9842
9843 operand_type_set (&types, ~0);
9844
9845 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
9846 {
9847 as_bad (_("at most %d immediate operands are allowed"),
9848 MAX_IMMEDIATE_OPERANDS);
9849 return 0;
9850 }
9851
9852 exp = &im_expressions[i.imm_operands++];
9853 i.op[this_operand].imms = exp;
9854
9855 if (is_space_char (*imm_start))
9856 ++imm_start;
9857
9858 save_input_line_pointer = input_line_pointer;
9859 input_line_pointer = imm_start;
9860
9861 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
9862 if (gotfree_input_line)
9863 input_line_pointer = gotfree_input_line;
9864
9865 exp_seg = expression (exp);
9866
9867 SKIP_WHITESPACE ();
9868
9869 /* Handle vector operations. */
9870 if (*input_line_pointer == '{')
9871 {
9872 input_line_pointer = check_VecOperations (input_line_pointer,
9873 NULL);
9874 if (input_line_pointer == NULL)
9875 return 0;
9876 }
9877
9878 if (*input_line_pointer)
9879 as_bad (_("junk `%s' after expression"), input_line_pointer);
9880
9881 input_line_pointer = save_input_line_pointer;
9882 if (gotfree_input_line)
9883 {
9884 free (gotfree_input_line);
9885
9886 if (exp->X_op == O_constant || exp->X_op == O_register)
9887 exp->X_op = O_illegal;
9888 }
9889
9890 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
9891 }
9892
9893 static int
9894 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
9895 i386_operand_type types, const char *imm_start)
9896 {
9897 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
9898 {
9899 if (imm_start)
9900 as_bad (_("missing or invalid immediate expression `%s'"),
9901 imm_start);
9902 return 0;
9903 }
9904 else if (exp->X_op == O_constant)
9905 {
9906 /* Size it properly later. */
9907 i.types[this_operand].bitfield.imm64 = 1;
9908 /* If not 64bit, sign extend val. */
9909 if (flag_code != CODE_64BIT
9910 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
9911 exp->X_add_number
9912 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
9913 }
9914 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9915 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
9916 && exp_seg != absolute_section
9917 && exp_seg != text_section
9918 && exp_seg != data_section
9919 && exp_seg != bss_section
9920 && exp_seg != undefined_section
9921 && !bfd_is_com_section (exp_seg))
9922 {
9923 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
9924 return 0;
9925 }
9926 #endif
9927 else if (!intel_syntax && exp_seg == reg_section)
9928 {
9929 if (imm_start)
9930 as_bad (_("illegal immediate register operand %s"), imm_start);
9931 return 0;
9932 }
9933 else
9934 {
9935 /* This is an address. The size of the address will be
9936 determined later, depending on destination register,
9937 suffix, or the default for the section. */
9938 i.types[this_operand].bitfield.imm8 = 1;
9939 i.types[this_operand].bitfield.imm16 = 1;
9940 i.types[this_operand].bitfield.imm32 = 1;
9941 i.types[this_operand].bitfield.imm32s = 1;
9942 i.types[this_operand].bitfield.imm64 = 1;
9943 i.types[this_operand] = operand_type_and (i.types[this_operand],
9944 types);
9945 }
9946
9947 return 1;
9948 }
9949
9950 static char *
9951 i386_scale (char *scale)
9952 {
9953 offsetT val;
9954 char *save = input_line_pointer;
9955
9956 input_line_pointer = scale;
9957 val = get_absolute_expression ();
9958
9959 switch (val)
9960 {
9961 case 1:
9962 i.log2_scale_factor = 0;
9963 break;
9964 case 2:
9965 i.log2_scale_factor = 1;
9966 break;
9967 case 4:
9968 i.log2_scale_factor = 2;
9969 break;
9970 case 8:
9971 i.log2_scale_factor = 3;
9972 break;
9973 default:
9974 {
9975 char sep = *input_line_pointer;
9976
9977 *input_line_pointer = '\0';
9978 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
9979 scale);
9980 *input_line_pointer = sep;
9981 input_line_pointer = save;
9982 return NULL;
9983 }
9984 }
9985 if (i.log2_scale_factor != 0 && i.index_reg == 0)
9986 {
9987 as_warn (_("scale factor of %d without an index register"),
9988 1 << i.log2_scale_factor);
9989 i.log2_scale_factor = 0;
9990 }
9991 scale = input_line_pointer;
9992 input_line_pointer = save;
9993 return scale;
9994 }
9995
9996 static int
9997 i386_displacement (char *disp_start, char *disp_end)
9998 {
9999 expressionS *exp;
10000 segT exp_seg = 0;
10001 char *save_input_line_pointer;
10002 char *gotfree_input_line;
10003 int override;
10004 i386_operand_type bigdisp, types = anydisp;
10005 int ret;
10006
10007 if (i.disp_operands == MAX_MEMORY_OPERANDS)
10008 {
10009 as_bad (_("at most %d displacement operands are allowed"),
10010 MAX_MEMORY_OPERANDS);
10011 return 0;
10012 }
10013
10014 operand_type_set (&bigdisp, 0);
10015 if (i.jumpabsolute
10016 || i.types[this_operand].bitfield.baseindex
10017 || (current_templates->start->opcode_modifier.jump != JUMP
10018 && current_templates->start->opcode_modifier.jump != JUMP_DWORD))
10019 {
10020 i386_addressing_mode ();
10021 override = (i.prefix[ADDR_PREFIX] != 0);
10022 if (flag_code == CODE_64BIT)
10023 {
10024 if (!override)
10025 {
10026 bigdisp.bitfield.disp32s = 1;
10027 bigdisp.bitfield.disp64 = 1;
10028 }
10029 else
10030 bigdisp.bitfield.disp32 = 1;
10031 }
10032 else if ((flag_code == CODE_16BIT) ^ override)
10033 bigdisp.bitfield.disp16 = 1;
10034 else
10035 bigdisp.bitfield.disp32 = 1;
10036 }
10037 else
10038 {
10039 /* For PC-relative branches, the width of the displacement may be
10040 dependent upon data size, but is never dependent upon address size.
10041 Also make sure to not unintentionally match against a non-PC-relative
10042 branch template. */
10043 static templates aux_templates;
10044 const insn_template *t = current_templates->start;
10045 bfd_boolean has_intel64 = FALSE;
10046
10047 aux_templates.start = t;
10048 while (++t < current_templates->end)
10049 {
10050 if (t->opcode_modifier.jump
10051 != current_templates->start->opcode_modifier.jump)
10052 break;
10053 if ((t->opcode_modifier.isa64 >= INTEL64))
10054 has_intel64 = TRUE;
10055 }
10056 if (t < current_templates->end)
10057 {
10058 aux_templates.end = t;
10059 current_templates = &aux_templates;
10060 }
10061
10062 override = (i.prefix[DATA_PREFIX] != 0);
10063 if (flag_code == CODE_64BIT)
10064 {
10065 if ((override || i.suffix == WORD_MNEM_SUFFIX)
10066 && (!intel64 || !has_intel64))
10067 bigdisp.bitfield.disp16 = 1;
10068 else
10069 bigdisp.bitfield.disp32s = 1;
10070 }
10071 else
10072 {
10073 if (!override)
10074 override = (i.suffix == (flag_code != CODE_16BIT
10075 ? WORD_MNEM_SUFFIX
10076 : LONG_MNEM_SUFFIX));
10077 bigdisp.bitfield.disp32 = 1;
10078 if ((flag_code == CODE_16BIT) ^ override)
10079 {
10080 bigdisp.bitfield.disp32 = 0;
10081 bigdisp.bitfield.disp16 = 1;
10082 }
10083 }
10084 }
10085 i.types[this_operand] = operand_type_or (i.types[this_operand],
10086 bigdisp);
10087
10088 exp = &disp_expressions[i.disp_operands];
10089 i.op[this_operand].disps = exp;
10090 i.disp_operands++;
10091 save_input_line_pointer = input_line_pointer;
10092 input_line_pointer = disp_start;
10093 END_STRING_AND_SAVE (disp_end);
10094
10095 #ifndef GCC_ASM_O_HACK
10096 #define GCC_ASM_O_HACK 0
10097 #endif
10098 #if GCC_ASM_O_HACK
10099 END_STRING_AND_SAVE (disp_end + 1);
10100 if (i.types[this_operand].bitfield.baseIndex
10101 && displacement_string_end[-1] == '+')
10102 {
10103 /* This hack is to avoid a warning when using the "o"
10104 constraint within gcc asm statements.
10105 For instance:
10106
10107 #define _set_tssldt_desc(n,addr,limit,type) \
10108 __asm__ __volatile__ ( \
10109 "movw %w2,%0\n\t" \
10110 "movw %w1,2+%0\n\t" \
10111 "rorl $16,%1\n\t" \
10112 "movb %b1,4+%0\n\t" \
10113 "movb %4,5+%0\n\t" \
10114 "movb $0,6+%0\n\t" \
10115 "movb %h1,7+%0\n\t" \
10116 "rorl $16,%1" \
10117 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
10118
10119 This works great except that the output assembler ends
10120 up looking a bit weird if it turns out that there is
10121 no offset. You end up producing code that looks like:
10122
10123 #APP
10124 movw $235,(%eax)
10125 movw %dx,2+(%eax)
10126 rorl $16,%edx
10127 movb %dl,4+(%eax)
10128 movb $137,5+(%eax)
10129 movb $0,6+(%eax)
10130 movb %dh,7+(%eax)
10131 rorl $16,%edx
10132 #NO_APP
10133
10134 So here we provide the missing zero. */
10135
10136 *displacement_string_end = '0';
10137 }
10138 #endif
10139 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
10140 if (gotfree_input_line)
10141 input_line_pointer = gotfree_input_line;
10142
10143 exp_seg = expression (exp);
10144
10145 SKIP_WHITESPACE ();
10146 if (*input_line_pointer)
10147 as_bad (_("junk `%s' after expression"), input_line_pointer);
10148 #if GCC_ASM_O_HACK
10149 RESTORE_END_STRING (disp_end + 1);
10150 #endif
10151 input_line_pointer = save_input_line_pointer;
10152 if (gotfree_input_line)
10153 {
10154 free (gotfree_input_line);
10155
10156 if (exp->X_op == O_constant || exp->X_op == O_register)
10157 exp->X_op = O_illegal;
10158 }
10159
10160 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
10161
10162 RESTORE_END_STRING (disp_end);
10163
10164 return ret;
10165 }
10166
10167 static int
10168 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
10169 i386_operand_type types, const char *disp_start)
10170 {
10171 i386_operand_type bigdisp;
10172 int ret = 1;
10173
10174 /* We do this to make sure that the section symbol is in
10175 the symbol table. We will ultimately change the relocation
10176 to be relative to the beginning of the section. */
10177 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
10178 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
10179 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
10180 {
10181 if (exp->X_op != O_symbol)
10182 goto inv_disp;
10183
10184 if (S_IS_LOCAL (exp->X_add_symbol)
10185 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
10186 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
10187 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
10188 exp->X_op = O_subtract;
10189 exp->X_op_symbol = GOT_symbol;
10190 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
10191 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
10192 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
10193 i.reloc[this_operand] = BFD_RELOC_64;
10194 else
10195 i.reloc[this_operand] = BFD_RELOC_32;
10196 }
10197
10198 else if (exp->X_op == O_absent
10199 || exp->X_op == O_illegal
10200 || exp->X_op == O_big)
10201 {
10202 inv_disp:
10203 as_bad (_("missing or invalid displacement expression `%s'"),
10204 disp_start);
10205 ret = 0;
10206 }
10207
10208 else if (flag_code == CODE_64BIT
10209 && !i.prefix[ADDR_PREFIX]
10210 && exp->X_op == O_constant)
10211 {
10212 /* Since displacement is signed extended to 64bit, don't allow
10213 disp32 and turn off disp32s if they are out of range. */
10214 i.types[this_operand].bitfield.disp32 = 0;
10215 if (!fits_in_signed_long (exp->X_add_number))
10216 {
10217 i.types[this_operand].bitfield.disp32s = 0;
10218 if (i.types[this_operand].bitfield.baseindex)
10219 {
10220 as_bad (_("0x%lx out range of signed 32bit displacement"),
10221 (long) exp->X_add_number);
10222 ret = 0;
10223 }
10224 }
10225 }
10226
10227 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10228 else if (exp->X_op != O_constant
10229 && OUTPUT_FLAVOR == bfd_target_aout_flavour
10230 && exp_seg != absolute_section
10231 && exp_seg != text_section
10232 && exp_seg != data_section
10233 && exp_seg != bss_section
10234 && exp_seg != undefined_section
10235 && !bfd_is_com_section (exp_seg))
10236 {
10237 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
10238 ret = 0;
10239 }
10240 #endif
10241
10242 if (current_templates->start->opcode_modifier.jump == JUMP_BYTE
10243 /* Constants get taken care of by optimize_disp(). */
10244 && exp->X_op != O_constant)
10245 i.types[this_operand].bitfield.disp8 = 1;
10246
10247 /* Check if this is a displacement only operand. */
10248 bigdisp = i.types[this_operand];
10249 bigdisp.bitfield.disp8 = 0;
10250 bigdisp.bitfield.disp16 = 0;
10251 bigdisp.bitfield.disp32 = 0;
10252 bigdisp.bitfield.disp32s = 0;
10253 bigdisp.bitfield.disp64 = 0;
10254 if (operand_type_all_zero (&bigdisp))
10255 i.types[this_operand] = operand_type_and (i.types[this_operand],
10256 types);
10257
10258 return ret;
10259 }
10260
10261 /* Return the active addressing mode, taking address override and
10262 registers forming the address into consideration. Update the
10263 address override prefix if necessary. */
10264
10265 static enum flag_code
10266 i386_addressing_mode (void)
10267 {
10268 enum flag_code addr_mode;
10269
10270 if (i.prefix[ADDR_PREFIX])
10271 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
10272 else
10273 {
10274 addr_mode = flag_code;
10275
10276 #if INFER_ADDR_PREFIX
10277 if (i.mem_operands == 0)
10278 {
10279 /* Infer address prefix from the first memory operand. */
10280 const reg_entry *addr_reg = i.base_reg;
10281
10282 if (addr_reg == NULL)
10283 addr_reg = i.index_reg;
10284
10285 if (addr_reg)
10286 {
10287 if (addr_reg->reg_type.bitfield.dword)
10288 addr_mode = CODE_32BIT;
10289 else if (flag_code != CODE_64BIT
10290 && addr_reg->reg_type.bitfield.word)
10291 addr_mode = CODE_16BIT;
10292
10293 if (addr_mode != flag_code)
10294 {
10295 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
10296 i.prefixes += 1;
10297 /* Change the size of any displacement too. At most one
10298 of Disp16 or Disp32 is set.
10299 FIXME. There doesn't seem to be any real need for
10300 separate Disp16 and Disp32 flags. The same goes for
10301 Imm16 and Imm32. Removing them would probably clean
10302 up the code quite a lot. */
10303 if (flag_code != CODE_64BIT
10304 && (i.types[this_operand].bitfield.disp16
10305 || i.types[this_operand].bitfield.disp32))
10306 i.types[this_operand]
10307 = operand_type_xor (i.types[this_operand], disp16_32);
10308 }
10309 }
10310 }
10311 #endif
10312 }
10313
10314 return addr_mode;
10315 }
10316
10317 /* Make sure the memory operand we've been dealt is valid.
10318 Return 1 on success, 0 on a failure. */
10319
10320 static int
10321 i386_index_check (const char *operand_string)
10322 {
10323 const char *kind = "base/index";
10324 enum flag_code addr_mode = i386_addressing_mode ();
10325
10326 if (current_templates->start->opcode_modifier.isstring
10327 && !current_templates->start->cpu_flags.bitfield.cpupadlock
10328 && (current_templates->end[-1].opcode_modifier.isstring
10329 || i.mem_operands))
10330 {
10331 /* Memory operands of string insns are special in that they only allow
10332 a single register (rDI, rSI, or rBX) as their memory address. */
10333 const reg_entry *expected_reg;
10334 static const char *di_si[][2] =
10335 {
10336 { "esi", "edi" },
10337 { "si", "di" },
10338 { "rsi", "rdi" }
10339 };
10340 static const char *bx[] = { "ebx", "bx", "rbx" };
10341
10342 kind = "string address";
10343
10344 if (current_templates->start->opcode_modifier.repprefixok)
10345 {
10346 int es_op = current_templates->end[-1].opcode_modifier.isstring
10347 - IS_STRING_ES_OP0;
10348 int op = 0;
10349
10350 if (!current_templates->end[-1].operand_types[0].bitfield.baseindex
10351 || ((!i.mem_operands != !intel_syntax)
10352 && current_templates->end[-1].operand_types[1]
10353 .bitfield.baseindex))
10354 op = 1;
10355 expected_reg = hash_find (reg_hash, di_si[addr_mode][op == es_op]);
10356 }
10357 else
10358 expected_reg = hash_find (reg_hash, bx[addr_mode]);
10359
10360 if (i.base_reg != expected_reg
10361 || i.index_reg
10362 || operand_type_check (i.types[this_operand], disp))
10363 {
10364 /* The second memory operand must have the same size as
10365 the first one. */
10366 if (i.mem_operands
10367 && i.base_reg
10368 && !((addr_mode == CODE_64BIT
10369 && i.base_reg->reg_type.bitfield.qword)
10370 || (addr_mode == CODE_32BIT
10371 ? i.base_reg->reg_type.bitfield.dword
10372 : i.base_reg->reg_type.bitfield.word)))
10373 goto bad_address;
10374
10375 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
10376 operand_string,
10377 intel_syntax ? '[' : '(',
10378 register_prefix,
10379 expected_reg->reg_name,
10380 intel_syntax ? ']' : ')');
10381 return 1;
10382 }
10383 else
10384 return 1;
10385
10386 bad_address:
10387 as_bad (_("`%s' is not a valid %s expression"),
10388 operand_string, kind);
10389 return 0;
10390 }
10391 else
10392 {
10393 if (addr_mode != CODE_16BIT)
10394 {
10395 /* 32-bit/64-bit checks. */
10396 if ((i.base_reg
10397 && ((addr_mode == CODE_64BIT
10398 ? !i.base_reg->reg_type.bitfield.qword
10399 : !i.base_reg->reg_type.bitfield.dword)
10400 || (i.index_reg && i.base_reg->reg_num == RegIP)
10401 || i.base_reg->reg_num == RegIZ))
10402 || (i.index_reg
10403 && !i.index_reg->reg_type.bitfield.xmmword
10404 && !i.index_reg->reg_type.bitfield.ymmword
10405 && !i.index_reg->reg_type.bitfield.zmmword
10406 && ((addr_mode == CODE_64BIT
10407 ? !i.index_reg->reg_type.bitfield.qword
10408 : !i.index_reg->reg_type.bitfield.dword)
10409 || !i.index_reg->reg_type.bitfield.baseindex)))
10410 goto bad_address;
10411
10412 /* bndmk, bndldx, and bndstx have special restrictions. */
10413 if (current_templates->start->base_opcode == 0xf30f1b
10414 || (current_templates->start->base_opcode & ~1) == 0x0f1a)
10415 {
10416 /* They cannot use RIP-relative addressing. */
10417 if (i.base_reg && i.base_reg->reg_num == RegIP)
10418 {
10419 as_bad (_("`%s' cannot be used here"), operand_string);
10420 return 0;
10421 }
10422
10423 /* bndldx and bndstx ignore their scale factor. */
10424 if (current_templates->start->base_opcode != 0xf30f1b
10425 && i.log2_scale_factor)
10426 as_warn (_("register scaling is being ignored here"));
10427 }
10428 }
10429 else
10430 {
10431 /* 16-bit checks. */
10432 if ((i.base_reg
10433 && (!i.base_reg->reg_type.bitfield.word
10434 || !i.base_reg->reg_type.bitfield.baseindex))
10435 || (i.index_reg
10436 && (!i.index_reg->reg_type.bitfield.word
10437 || !i.index_reg->reg_type.bitfield.baseindex
10438 || !(i.base_reg
10439 && i.base_reg->reg_num < 6
10440 && i.index_reg->reg_num >= 6
10441 && i.log2_scale_factor == 0))))
10442 goto bad_address;
10443 }
10444 }
10445 return 1;
10446 }
10447
10448 /* Handle vector immediates. */
10449
10450 static int
10451 RC_SAE_immediate (const char *imm_start)
10452 {
10453 unsigned int match_found, j;
10454 const char *pstr = imm_start;
10455 expressionS *exp;
10456
10457 if (*pstr != '{')
10458 return 0;
10459
10460 pstr++;
10461 match_found = 0;
10462 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
10463 {
10464 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
10465 {
10466 if (!i.rounding)
10467 {
10468 rc_op.type = RC_NamesTable[j].type;
10469 rc_op.operand = this_operand;
10470 i.rounding = &rc_op;
10471 }
10472 else
10473 {
10474 as_bad (_("duplicated `%s'"), imm_start);
10475 return 0;
10476 }
10477 pstr += RC_NamesTable[j].len;
10478 match_found = 1;
10479 break;
10480 }
10481 }
10482 if (!match_found)
10483 return 0;
10484
10485 if (*pstr++ != '}')
10486 {
10487 as_bad (_("Missing '}': '%s'"), imm_start);
10488 return 0;
10489 }
10490 /* RC/SAE immediate string should contain nothing more. */;
10491 if (*pstr != 0)
10492 {
10493 as_bad (_("Junk after '}': '%s'"), imm_start);
10494 return 0;
10495 }
10496
10497 exp = &im_expressions[i.imm_operands++];
10498 i.op[this_operand].imms = exp;
10499
10500 exp->X_op = O_constant;
10501 exp->X_add_number = 0;
10502 exp->X_add_symbol = (symbolS *) 0;
10503 exp->X_op_symbol = (symbolS *) 0;
10504
10505 i.types[this_operand].bitfield.imm8 = 1;
10506 return 1;
10507 }
10508
10509 /* Only string instructions can have a second memory operand, so
10510 reduce current_templates to just those if it contains any. */
10511 static int
10512 maybe_adjust_templates (void)
10513 {
10514 const insn_template *t;
10515
10516 gas_assert (i.mem_operands == 1);
10517
10518 for (t = current_templates->start; t < current_templates->end; ++t)
10519 if (t->opcode_modifier.isstring)
10520 break;
10521
10522 if (t < current_templates->end)
10523 {
10524 static templates aux_templates;
10525 bfd_boolean recheck;
10526
10527 aux_templates.start = t;
10528 for (; t < current_templates->end; ++t)
10529 if (!t->opcode_modifier.isstring)
10530 break;
10531 aux_templates.end = t;
10532
10533 /* Determine whether to re-check the first memory operand. */
10534 recheck = (aux_templates.start != current_templates->start
10535 || t != current_templates->end);
10536
10537 current_templates = &aux_templates;
10538
10539 if (recheck)
10540 {
10541 i.mem_operands = 0;
10542 if (i.memop1_string != NULL
10543 && i386_index_check (i.memop1_string) == 0)
10544 return 0;
10545 i.mem_operands = 1;
10546 }
10547 }
10548
10549 return 1;
10550 }
10551
10552 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
10553 on error. */
10554
10555 static int
10556 i386_att_operand (char *operand_string)
10557 {
10558 const reg_entry *r;
10559 char *end_op;
10560 char *op_string = operand_string;
10561
10562 if (is_space_char (*op_string))
10563 ++op_string;
10564
10565 /* We check for an absolute prefix (differentiating,
10566 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
10567 if (*op_string == ABSOLUTE_PREFIX)
10568 {
10569 ++op_string;
10570 if (is_space_char (*op_string))
10571 ++op_string;
10572 i.jumpabsolute = TRUE;
10573 }
10574
10575 /* Check if operand is a register. */
10576 if ((r = parse_register (op_string, &end_op)) != NULL)
10577 {
10578 i386_operand_type temp;
10579
10580 /* Check for a segment override by searching for ':' after a
10581 segment register. */
10582 op_string = end_op;
10583 if (is_space_char (*op_string))
10584 ++op_string;
10585 if (*op_string == ':' && r->reg_type.bitfield.class == SReg)
10586 {
10587 switch (r->reg_num)
10588 {
10589 case 0:
10590 i.seg[i.mem_operands] = &es;
10591 break;
10592 case 1:
10593 i.seg[i.mem_operands] = &cs;
10594 break;
10595 case 2:
10596 i.seg[i.mem_operands] = &ss;
10597 break;
10598 case 3:
10599 i.seg[i.mem_operands] = &ds;
10600 break;
10601 case 4:
10602 i.seg[i.mem_operands] = &fs;
10603 break;
10604 case 5:
10605 i.seg[i.mem_operands] = &gs;
10606 break;
10607 }
10608
10609 /* Skip the ':' and whitespace. */
10610 ++op_string;
10611 if (is_space_char (*op_string))
10612 ++op_string;
10613
10614 if (!is_digit_char (*op_string)
10615 && !is_identifier_char (*op_string)
10616 && *op_string != '('
10617 && *op_string != ABSOLUTE_PREFIX)
10618 {
10619 as_bad (_("bad memory operand `%s'"), op_string);
10620 return 0;
10621 }
10622 /* Handle case of %es:*foo. */
10623 if (*op_string == ABSOLUTE_PREFIX)
10624 {
10625 ++op_string;
10626 if (is_space_char (*op_string))
10627 ++op_string;
10628 i.jumpabsolute = TRUE;
10629 }
10630 goto do_memory_reference;
10631 }
10632
10633 /* Handle vector operations. */
10634 if (*op_string == '{')
10635 {
10636 op_string = check_VecOperations (op_string, NULL);
10637 if (op_string == NULL)
10638 return 0;
10639 }
10640
10641 if (*op_string)
10642 {
10643 as_bad (_("junk `%s' after register"), op_string);
10644 return 0;
10645 }
10646 temp = r->reg_type;
10647 temp.bitfield.baseindex = 0;
10648 i.types[this_operand] = operand_type_or (i.types[this_operand],
10649 temp);
10650 i.types[this_operand].bitfield.unspecified = 0;
10651 i.op[this_operand].regs = r;
10652 i.reg_operands++;
10653 }
10654 else if (*op_string == REGISTER_PREFIX)
10655 {
10656 as_bad (_("bad register name `%s'"), op_string);
10657 return 0;
10658 }
10659 else if (*op_string == IMMEDIATE_PREFIX)
10660 {
10661 ++op_string;
10662 if (i.jumpabsolute)
10663 {
10664 as_bad (_("immediate operand illegal with absolute jump"));
10665 return 0;
10666 }
10667 if (!i386_immediate (op_string))
10668 return 0;
10669 }
10670 else if (RC_SAE_immediate (operand_string))
10671 {
10672 /* If it is a RC or SAE immediate, do nothing. */
10673 ;
10674 }
10675 else if (is_digit_char (*op_string)
10676 || is_identifier_char (*op_string)
10677 || *op_string == '"'
10678 || *op_string == '(')
10679 {
10680 /* This is a memory reference of some sort. */
10681 char *base_string;
10682
10683 /* Start and end of displacement string expression (if found). */
10684 char *displacement_string_start;
10685 char *displacement_string_end;
10686 char *vop_start;
10687
10688 do_memory_reference:
10689 if (i.mem_operands == 1 && !maybe_adjust_templates ())
10690 return 0;
10691 if ((i.mem_operands == 1
10692 && !current_templates->start->opcode_modifier.isstring)
10693 || i.mem_operands == 2)
10694 {
10695 as_bad (_("too many memory references for `%s'"),
10696 current_templates->start->name);
10697 return 0;
10698 }
10699
10700 /* Check for base index form. We detect the base index form by
10701 looking for an ')' at the end of the operand, searching
10702 for the '(' matching it, and finding a REGISTER_PREFIX or ','
10703 after the '('. */
10704 base_string = op_string + strlen (op_string);
10705
10706 /* Handle vector operations. */
10707 vop_start = strchr (op_string, '{');
10708 if (vop_start && vop_start < base_string)
10709 {
10710 if (check_VecOperations (vop_start, base_string) == NULL)
10711 return 0;
10712 base_string = vop_start;
10713 }
10714
10715 --base_string;
10716 if (is_space_char (*base_string))
10717 --base_string;
10718
10719 /* If we only have a displacement, set-up for it to be parsed later. */
10720 displacement_string_start = op_string;
10721 displacement_string_end = base_string + 1;
10722
10723 if (*base_string == ')')
10724 {
10725 char *temp_string;
10726 unsigned int parens_balanced = 1;
10727 /* We've already checked that the number of left & right ()'s are
10728 equal, so this loop will not be infinite. */
10729 do
10730 {
10731 base_string--;
10732 if (*base_string == ')')
10733 parens_balanced++;
10734 if (*base_string == '(')
10735 parens_balanced--;
10736 }
10737 while (parens_balanced);
10738
10739 temp_string = base_string;
10740
10741 /* Skip past '(' and whitespace. */
10742 ++base_string;
10743 if (is_space_char (*base_string))
10744 ++base_string;
10745
10746 if (*base_string == ','
10747 || ((i.base_reg = parse_register (base_string, &end_op))
10748 != NULL))
10749 {
10750 displacement_string_end = temp_string;
10751
10752 i.types[this_operand].bitfield.baseindex = 1;
10753
10754 if (i.base_reg)
10755 {
10756 base_string = end_op;
10757 if (is_space_char (*base_string))
10758 ++base_string;
10759 }
10760
10761 /* There may be an index reg or scale factor here. */
10762 if (*base_string == ',')
10763 {
10764 ++base_string;
10765 if (is_space_char (*base_string))
10766 ++base_string;
10767
10768 if ((i.index_reg = parse_register (base_string, &end_op))
10769 != NULL)
10770 {
10771 base_string = end_op;
10772 if (is_space_char (*base_string))
10773 ++base_string;
10774 if (*base_string == ',')
10775 {
10776 ++base_string;
10777 if (is_space_char (*base_string))
10778 ++base_string;
10779 }
10780 else if (*base_string != ')')
10781 {
10782 as_bad (_("expecting `,' or `)' "
10783 "after index register in `%s'"),
10784 operand_string);
10785 return 0;
10786 }
10787 }
10788 else if (*base_string == REGISTER_PREFIX)
10789 {
10790 end_op = strchr (base_string, ',');
10791 if (end_op)
10792 *end_op = '\0';
10793 as_bad (_("bad register name `%s'"), base_string);
10794 return 0;
10795 }
10796
10797 /* Check for scale factor. */
10798 if (*base_string != ')')
10799 {
10800 char *end_scale = i386_scale (base_string);
10801
10802 if (!end_scale)
10803 return 0;
10804
10805 base_string = end_scale;
10806 if (is_space_char (*base_string))
10807 ++base_string;
10808 if (*base_string != ')')
10809 {
10810 as_bad (_("expecting `)' "
10811 "after scale factor in `%s'"),
10812 operand_string);
10813 return 0;
10814 }
10815 }
10816 else if (!i.index_reg)
10817 {
10818 as_bad (_("expecting index register or scale factor "
10819 "after `,'; got '%c'"),
10820 *base_string);
10821 return 0;
10822 }
10823 }
10824 else if (*base_string != ')')
10825 {
10826 as_bad (_("expecting `,' or `)' "
10827 "after base register in `%s'"),
10828 operand_string);
10829 return 0;
10830 }
10831 }
10832 else if (*base_string == REGISTER_PREFIX)
10833 {
10834 end_op = strchr (base_string, ',');
10835 if (end_op)
10836 *end_op = '\0';
10837 as_bad (_("bad register name `%s'"), base_string);
10838 return 0;
10839 }
10840 }
10841
10842 /* If there's an expression beginning the operand, parse it,
10843 assuming displacement_string_start and
10844 displacement_string_end are meaningful. */
10845 if (displacement_string_start != displacement_string_end)
10846 {
10847 if (!i386_displacement (displacement_string_start,
10848 displacement_string_end))
10849 return 0;
10850 }
10851
10852 /* Special case for (%dx) while doing input/output op. */
10853 if (i.base_reg
10854 && i.base_reg->reg_type.bitfield.instance == RegD
10855 && i.base_reg->reg_type.bitfield.word
10856 && i.index_reg == 0
10857 && i.log2_scale_factor == 0
10858 && i.seg[i.mem_operands] == 0
10859 && !operand_type_check (i.types[this_operand], disp))
10860 {
10861 i.types[this_operand] = i.base_reg->reg_type;
10862 return 1;
10863 }
10864
10865 if (i386_index_check (operand_string) == 0)
10866 return 0;
10867 i.flags[this_operand] |= Operand_Mem;
10868 if (i.mem_operands == 0)
10869 i.memop1_string = xstrdup (operand_string);
10870 i.mem_operands++;
10871 }
10872 else
10873 {
10874 /* It's not a memory operand; argh! */
10875 as_bad (_("invalid char %s beginning operand %d `%s'"),
10876 output_invalid (*op_string),
10877 this_operand + 1,
10878 op_string);
10879 return 0;
10880 }
10881 return 1; /* Normal return. */
10882 }
10883 \f
10884 /* Calculate the maximum variable size (i.e., excluding fr_fix)
10885 that an rs_machine_dependent frag may reach. */
10886
10887 unsigned int
10888 i386_frag_max_var (fragS *frag)
10889 {
10890 /* The only relaxable frags are for jumps.
10891 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
10892 gas_assert (frag->fr_type == rs_machine_dependent);
10893 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
10894 }
10895
10896 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10897 static int
10898 elf_symbol_resolved_in_segment_p (symbolS *fr_symbol, offsetT fr_var)
10899 {
10900 /* STT_GNU_IFUNC symbol must go through PLT. */
10901 if ((symbol_get_bfdsym (fr_symbol)->flags
10902 & BSF_GNU_INDIRECT_FUNCTION) != 0)
10903 return 0;
10904
10905 if (!S_IS_EXTERNAL (fr_symbol))
10906 /* Symbol may be weak or local. */
10907 return !S_IS_WEAK (fr_symbol);
10908
10909 /* Global symbols with non-default visibility can't be preempted. */
10910 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol)) != STV_DEFAULT)
10911 return 1;
10912
10913 if (fr_var != NO_RELOC)
10914 switch ((enum bfd_reloc_code_real) fr_var)
10915 {
10916 case BFD_RELOC_386_PLT32:
10917 case BFD_RELOC_X86_64_PLT32:
10918 /* Symbol with PLT relocation may be preempted. */
10919 return 0;
10920 default:
10921 abort ();
10922 }
10923
10924 /* Global symbols with default visibility in a shared library may be
10925 preempted by another definition. */
10926 return !shared;
10927 }
10928 #endif
10929
10930 /* Return the next non-empty frag. */
10931
10932 static fragS *
10933 i386_next_non_empty_frag (fragS *fragP)
10934 {
10935 /* There may be a frag with a ".fill 0" when there is no room in
10936 the current frag for frag_grow in output_insn. */
10937 for (fragP = fragP->fr_next;
10938 (fragP != NULL
10939 && fragP->fr_type == rs_fill
10940 && fragP->fr_fix == 0);
10941 fragP = fragP->fr_next)
10942 ;
10943 return fragP;
10944 }
10945
10946 /* Return the next jcc frag after BRANCH_PADDING. */
10947
10948 static fragS *
10949 i386_next_jcc_frag (fragS *fragP)
10950 {
10951 if (!fragP)
10952 return NULL;
10953
10954 if (fragP->fr_type == rs_machine_dependent
10955 && (TYPE_FROM_RELAX_STATE (fragP->fr_subtype)
10956 == BRANCH_PADDING))
10957 {
10958 fragP = i386_next_non_empty_frag (fragP);
10959 if (fragP->fr_type != rs_machine_dependent)
10960 return NULL;
10961 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == COND_JUMP)
10962 return fragP;
10963 }
10964
10965 return NULL;
10966 }
10967
10968 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
10969
10970 static void
10971 i386_classify_machine_dependent_frag (fragS *fragP)
10972 {
10973 fragS *cmp_fragP;
10974 fragS *pad_fragP;
10975 fragS *branch_fragP;
10976 fragS *next_fragP;
10977 unsigned int max_prefix_length;
10978
10979 if (fragP->tc_frag_data.classified)
10980 return;
10981
10982 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
10983 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
10984 for (next_fragP = fragP;
10985 next_fragP != NULL;
10986 next_fragP = next_fragP->fr_next)
10987 {
10988 next_fragP->tc_frag_data.classified = 1;
10989 if (next_fragP->fr_type == rs_machine_dependent)
10990 switch (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype))
10991 {
10992 case BRANCH_PADDING:
10993 /* The BRANCH_PADDING frag must be followed by a branch
10994 frag. */
10995 branch_fragP = i386_next_non_empty_frag (next_fragP);
10996 next_fragP->tc_frag_data.u.branch_fragP = branch_fragP;
10997 break;
10998 case FUSED_JCC_PADDING:
10999 /* Check if this is a fused jcc:
11000 FUSED_JCC_PADDING
11001 CMP like instruction
11002 BRANCH_PADDING
11003 COND_JUMP
11004 */
11005 cmp_fragP = i386_next_non_empty_frag (next_fragP);
11006 pad_fragP = i386_next_non_empty_frag (cmp_fragP);
11007 branch_fragP = i386_next_jcc_frag (pad_fragP);
11008 if (branch_fragP)
11009 {
11010 /* The BRANCH_PADDING frag is merged with the
11011 FUSED_JCC_PADDING frag. */
11012 next_fragP->tc_frag_data.u.branch_fragP = branch_fragP;
11013 /* CMP like instruction size. */
11014 next_fragP->tc_frag_data.cmp_size = cmp_fragP->fr_fix;
11015 frag_wane (pad_fragP);
11016 /* Skip to branch_fragP. */
11017 next_fragP = branch_fragP;
11018 }
11019 else if (next_fragP->tc_frag_data.max_prefix_length)
11020 {
11021 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
11022 a fused jcc. */
11023 next_fragP->fr_subtype
11024 = ENCODE_RELAX_STATE (BRANCH_PREFIX, 0);
11025 next_fragP->tc_frag_data.max_bytes
11026 = next_fragP->tc_frag_data.max_prefix_length;
11027 /* This will be updated in the BRANCH_PREFIX scan. */
11028 next_fragP->tc_frag_data.max_prefix_length = 0;
11029 }
11030 else
11031 frag_wane (next_fragP);
11032 break;
11033 }
11034 }
11035
11036 /* Stop if there is no BRANCH_PREFIX. */
11037 if (!align_branch_prefix_size)
11038 return;
11039
11040 /* Scan for BRANCH_PREFIX. */
11041 for (; fragP != NULL; fragP = fragP->fr_next)
11042 {
11043 if (fragP->fr_type != rs_machine_dependent
11044 || (TYPE_FROM_RELAX_STATE (fragP->fr_subtype)
11045 != BRANCH_PREFIX))
11046 continue;
11047
11048 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
11049 COND_JUMP_PREFIX. */
11050 max_prefix_length = 0;
11051 for (next_fragP = fragP;
11052 next_fragP != NULL;
11053 next_fragP = next_fragP->fr_next)
11054 {
11055 if (next_fragP->fr_type == rs_fill)
11056 /* Skip rs_fill frags. */
11057 continue;
11058 else if (next_fragP->fr_type != rs_machine_dependent)
11059 /* Stop for all other frags. */
11060 break;
11061
11062 /* rs_machine_dependent frags. */
11063 if (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11064 == BRANCH_PREFIX)
11065 {
11066 /* Count BRANCH_PREFIX frags. */
11067 if (max_prefix_length >= MAX_FUSED_JCC_PADDING_SIZE)
11068 {
11069 max_prefix_length = MAX_FUSED_JCC_PADDING_SIZE;
11070 frag_wane (next_fragP);
11071 }
11072 else
11073 max_prefix_length
11074 += next_fragP->tc_frag_data.max_bytes;
11075 }
11076 else if ((TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11077 == BRANCH_PADDING)
11078 || (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11079 == FUSED_JCC_PADDING))
11080 {
11081 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
11082 fragP->tc_frag_data.u.padding_fragP = next_fragP;
11083 break;
11084 }
11085 else
11086 /* Stop for other rs_machine_dependent frags. */
11087 break;
11088 }
11089
11090 fragP->tc_frag_data.max_prefix_length = max_prefix_length;
11091
11092 /* Skip to the next frag. */
11093 fragP = next_fragP;
11094 }
11095 }
11096
11097 /* Compute padding size for
11098
11099 FUSED_JCC_PADDING
11100 CMP like instruction
11101 BRANCH_PADDING
11102 COND_JUMP/UNCOND_JUMP
11103
11104 or
11105
11106 BRANCH_PADDING
11107 COND_JUMP/UNCOND_JUMP
11108 */
11109
11110 static int
11111 i386_branch_padding_size (fragS *fragP, offsetT address)
11112 {
11113 unsigned int offset, size, padding_size;
11114 fragS *branch_fragP = fragP->tc_frag_data.u.branch_fragP;
11115
11116 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
11117 if (!address)
11118 address = fragP->fr_address;
11119 address += fragP->fr_fix;
11120
11121 /* CMP like instrunction size. */
11122 size = fragP->tc_frag_data.cmp_size;
11123
11124 /* The base size of the branch frag. */
11125 size += branch_fragP->fr_fix;
11126
11127 /* Add opcode and displacement bytes for the rs_machine_dependent
11128 branch frag. */
11129 if (branch_fragP->fr_type == rs_machine_dependent)
11130 size += md_relax_table[branch_fragP->fr_subtype].rlx_length;
11131
11132 /* Check if branch is within boundary and doesn't end at the last
11133 byte. */
11134 offset = address & ((1U << align_branch_power) - 1);
11135 if ((offset + size) >= (1U << align_branch_power))
11136 /* Padding needed to avoid crossing boundary. */
11137 padding_size = (1U << align_branch_power) - offset;
11138 else
11139 /* No padding needed. */
11140 padding_size = 0;
11141
11142 /* The return value may be saved in tc_frag_data.length which is
11143 unsigned byte. */
11144 if (!fits_in_unsigned_byte (padding_size))
11145 abort ();
11146
11147 return padding_size;
11148 }
11149
11150 /* i386_generic_table_relax_frag()
11151
11152 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
11153 grow/shrink padding to align branch frags. Hand others to
11154 relax_frag(). */
11155
11156 long
11157 i386_generic_table_relax_frag (segT segment, fragS *fragP, long stretch)
11158 {
11159 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PADDING
11160 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == FUSED_JCC_PADDING)
11161 {
11162 long padding_size = i386_branch_padding_size (fragP, 0);
11163 long grow = padding_size - fragP->tc_frag_data.length;
11164
11165 /* When the BRANCH_PREFIX frag is used, the computed address
11166 must match the actual address and there should be no padding. */
11167 if (fragP->tc_frag_data.padding_address
11168 && (fragP->tc_frag_data.padding_address != fragP->fr_address
11169 || padding_size))
11170 abort ();
11171
11172 /* Update the padding size. */
11173 if (grow)
11174 fragP->tc_frag_data.length = padding_size;
11175
11176 return grow;
11177 }
11178 else if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PREFIX)
11179 {
11180 fragS *padding_fragP, *next_fragP;
11181 long padding_size, left_size, last_size;
11182
11183 padding_fragP = fragP->tc_frag_data.u.padding_fragP;
11184 if (!padding_fragP)
11185 /* Use the padding set by the leading BRANCH_PREFIX frag. */
11186 return (fragP->tc_frag_data.length
11187 - fragP->tc_frag_data.last_length);
11188
11189 /* Compute the relative address of the padding frag in the very
11190 first time where the BRANCH_PREFIX frag sizes are zero. */
11191 if (!fragP->tc_frag_data.padding_address)
11192 fragP->tc_frag_data.padding_address
11193 = padding_fragP->fr_address - (fragP->fr_address - stretch);
11194
11195 /* First update the last length from the previous interation. */
11196 left_size = fragP->tc_frag_data.prefix_length;
11197 for (next_fragP = fragP;
11198 next_fragP != padding_fragP;
11199 next_fragP = next_fragP->fr_next)
11200 if (next_fragP->fr_type == rs_machine_dependent
11201 && (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11202 == BRANCH_PREFIX))
11203 {
11204 if (left_size)
11205 {
11206 int max = next_fragP->tc_frag_data.max_bytes;
11207 if (max)
11208 {
11209 int size;
11210 if (max > left_size)
11211 size = left_size;
11212 else
11213 size = max;
11214 left_size -= size;
11215 next_fragP->tc_frag_data.last_length = size;
11216 }
11217 }
11218 else
11219 next_fragP->tc_frag_data.last_length = 0;
11220 }
11221
11222 /* Check the padding size for the padding frag. */
11223 padding_size = i386_branch_padding_size
11224 (padding_fragP, (fragP->fr_address
11225 + fragP->tc_frag_data.padding_address));
11226
11227 last_size = fragP->tc_frag_data.prefix_length;
11228 /* Check if there is change from the last interation. */
11229 if (padding_size == last_size)
11230 {
11231 /* Update the expected address of the padding frag. */
11232 padding_fragP->tc_frag_data.padding_address
11233 = (fragP->fr_address + padding_size
11234 + fragP->tc_frag_data.padding_address);
11235 return 0;
11236 }
11237
11238 if (padding_size > fragP->tc_frag_data.max_prefix_length)
11239 {
11240 /* No padding if there is no sufficient room. Clear the
11241 expected address of the padding frag. */
11242 padding_fragP->tc_frag_data.padding_address = 0;
11243 padding_size = 0;
11244 }
11245 else
11246 /* Store the expected address of the padding frag. */
11247 padding_fragP->tc_frag_data.padding_address
11248 = (fragP->fr_address + padding_size
11249 + fragP->tc_frag_data.padding_address);
11250
11251 fragP->tc_frag_data.prefix_length = padding_size;
11252
11253 /* Update the length for the current interation. */
11254 left_size = padding_size;
11255 for (next_fragP = fragP;
11256 next_fragP != padding_fragP;
11257 next_fragP = next_fragP->fr_next)
11258 if (next_fragP->fr_type == rs_machine_dependent
11259 && (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11260 == BRANCH_PREFIX))
11261 {
11262 if (left_size)
11263 {
11264 int max = next_fragP->tc_frag_data.max_bytes;
11265 if (max)
11266 {
11267 int size;
11268 if (max > left_size)
11269 size = left_size;
11270 else
11271 size = max;
11272 left_size -= size;
11273 next_fragP->tc_frag_data.length = size;
11274 }
11275 }
11276 else
11277 next_fragP->tc_frag_data.length = 0;
11278 }
11279
11280 return (fragP->tc_frag_data.length
11281 - fragP->tc_frag_data.last_length);
11282 }
11283 return relax_frag (segment, fragP, stretch);
11284 }
11285
11286 /* md_estimate_size_before_relax()
11287
11288 Called just before relax() for rs_machine_dependent frags. The x86
11289 assembler uses these frags to handle variable size jump
11290 instructions.
11291
11292 Any symbol that is now undefined will not become defined.
11293 Return the correct fr_subtype in the frag.
11294 Return the initial "guess for variable size of frag" to caller.
11295 The guess is actually the growth beyond the fixed part. Whatever
11296 we do to grow the fixed or variable part contributes to our
11297 returned value. */
11298
11299 int
11300 md_estimate_size_before_relax (fragS *fragP, segT segment)
11301 {
11302 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PADDING
11303 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PREFIX
11304 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == FUSED_JCC_PADDING)
11305 {
11306 i386_classify_machine_dependent_frag (fragP);
11307 return fragP->tc_frag_data.length;
11308 }
11309
11310 /* We've already got fragP->fr_subtype right; all we have to do is
11311 check for un-relaxable symbols. On an ELF system, we can't relax
11312 an externally visible symbol, because it may be overridden by a
11313 shared library. */
11314 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
11315 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11316 || (IS_ELF
11317 && !elf_symbol_resolved_in_segment_p (fragP->fr_symbol,
11318 fragP->fr_var))
11319 #endif
11320 #if defined (OBJ_COFF) && defined (TE_PE)
11321 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
11322 && S_IS_WEAK (fragP->fr_symbol))
11323 #endif
11324 )
11325 {
11326 /* Symbol is undefined in this segment, or we need to keep a
11327 reloc so that weak symbols can be overridden. */
11328 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
11329 enum bfd_reloc_code_real reloc_type;
11330 unsigned char *opcode;
11331 int old_fr_fix;
11332
11333 if (fragP->fr_var != NO_RELOC)
11334 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
11335 else if (size == 2)
11336 reloc_type = BFD_RELOC_16_PCREL;
11337 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11338 else if (need_plt32_p (fragP->fr_symbol))
11339 reloc_type = BFD_RELOC_X86_64_PLT32;
11340 #endif
11341 else
11342 reloc_type = BFD_RELOC_32_PCREL;
11343
11344 old_fr_fix = fragP->fr_fix;
11345 opcode = (unsigned char *) fragP->fr_opcode;
11346
11347 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
11348 {
11349 case UNCOND_JUMP:
11350 /* Make jmp (0xeb) a (d)word displacement jump. */
11351 opcode[0] = 0xe9;
11352 fragP->fr_fix += size;
11353 fix_new (fragP, old_fr_fix, size,
11354 fragP->fr_symbol,
11355 fragP->fr_offset, 1,
11356 reloc_type);
11357 break;
11358
11359 case COND_JUMP86:
11360 if (size == 2
11361 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
11362 {
11363 /* Negate the condition, and branch past an
11364 unconditional jump. */
11365 opcode[0] ^= 1;
11366 opcode[1] = 3;
11367 /* Insert an unconditional jump. */
11368 opcode[2] = 0xe9;
11369 /* We added two extra opcode bytes, and have a two byte
11370 offset. */
11371 fragP->fr_fix += 2 + 2;
11372 fix_new (fragP, old_fr_fix + 2, 2,
11373 fragP->fr_symbol,
11374 fragP->fr_offset, 1,
11375 reloc_type);
11376 break;
11377 }
11378 /* Fall through. */
11379
11380 case COND_JUMP:
11381 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
11382 {
11383 fixS *fixP;
11384
11385 fragP->fr_fix += 1;
11386 fixP = fix_new (fragP, old_fr_fix, 1,
11387 fragP->fr_symbol,
11388 fragP->fr_offset, 1,
11389 BFD_RELOC_8_PCREL);
11390 fixP->fx_signed = 1;
11391 break;
11392 }
11393
11394 /* This changes the byte-displacement jump 0x7N
11395 to the (d)word-displacement jump 0x0f,0x8N. */
11396 opcode[1] = opcode[0] + 0x10;
11397 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
11398 /* We've added an opcode byte. */
11399 fragP->fr_fix += 1 + size;
11400 fix_new (fragP, old_fr_fix + 1, size,
11401 fragP->fr_symbol,
11402 fragP->fr_offset, 1,
11403 reloc_type);
11404 break;
11405
11406 default:
11407 BAD_CASE (fragP->fr_subtype);
11408 break;
11409 }
11410 frag_wane (fragP);
11411 return fragP->fr_fix - old_fr_fix;
11412 }
11413
11414 /* Guess size depending on current relax state. Initially the relax
11415 state will correspond to a short jump and we return 1, because
11416 the variable part of the frag (the branch offset) is one byte
11417 long. However, we can relax a section more than once and in that
11418 case we must either set fr_subtype back to the unrelaxed state,
11419 or return the value for the appropriate branch. */
11420 return md_relax_table[fragP->fr_subtype].rlx_length;
11421 }
11422
11423 /* Called after relax() is finished.
11424
11425 In: Address of frag.
11426 fr_type == rs_machine_dependent.
11427 fr_subtype is what the address relaxed to.
11428
11429 Out: Any fixSs and constants are set up.
11430 Caller will turn frag into a ".space 0". */
11431
11432 void
11433 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
11434 fragS *fragP)
11435 {
11436 unsigned char *opcode;
11437 unsigned char *where_to_put_displacement = NULL;
11438 offsetT target_address;
11439 offsetT opcode_address;
11440 unsigned int extension = 0;
11441 offsetT displacement_from_opcode_start;
11442
11443 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PADDING
11444 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == FUSED_JCC_PADDING
11445 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PREFIX)
11446 {
11447 /* Generate nop padding. */
11448 unsigned int size = fragP->tc_frag_data.length;
11449 if (size)
11450 {
11451 if (size > fragP->tc_frag_data.max_bytes)
11452 abort ();
11453
11454 if (flag_debug)
11455 {
11456 const char *msg;
11457 const char *branch = "branch";
11458 const char *prefix = "";
11459 fragS *padding_fragP;
11460 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype)
11461 == BRANCH_PREFIX)
11462 {
11463 padding_fragP = fragP->tc_frag_data.u.padding_fragP;
11464 switch (fragP->tc_frag_data.default_prefix)
11465 {
11466 default:
11467 abort ();
11468 break;
11469 case CS_PREFIX_OPCODE:
11470 prefix = " cs";
11471 break;
11472 case DS_PREFIX_OPCODE:
11473 prefix = " ds";
11474 break;
11475 case ES_PREFIX_OPCODE:
11476 prefix = " es";
11477 break;
11478 case FS_PREFIX_OPCODE:
11479 prefix = " fs";
11480 break;
11481 case GS_PREFIX_OPCODE:
11482 prefix = " gs";
11483 break;
11484 case SS_PREFIX_OPCODE:
11485 prefix = " ss";
11486 break;
11487 }
11488 if (padding_fragP)
11489 msg = _("%s:%u: add %d%s at 0x%llx to align "
11490 "%s within %d-byte boundary\n");
11491 else
11492 msg = _("%s:%u: add additional %d%s at 0x%llx to "
11493 "align %s within %d-byte boundary\n");
11494 }
11495 else
11496 {
11497 padding_fragP = fragP;
11498 msg = _("%s:%u: add %d%s-byte nop at 0x%llx to align "
11499 "%s within %d-byte boundary\n");
11500 }
11501
11502 if (padding_fragP)
11503 switch (padding_fragP->tc_frag_data.branch_type)
11504 {
11505 case align_branch_jcc:
11506 branch = "jcc";
11507 break;
11508 case align_branch_fused:
11509 branch = "fused jcc";
11510 break;
11511 case align_branch_jmp:
11512 branch = "jmp";
11513 break;
11514 case align_branch_call:
11515 branch = "call";
11516 break;
11517 case align_branch_indirect:
11518 branch = "indiret branch";
11519 break;
11520 case align_branch_ret:
11521 branch = "ret";
11522 break;
11523 default:
11524 break;
11525 }
11526
11527 fprintf (stdout, msg,
11528 fragP->fr_file, fragP->fr_line, size, prefix,
11529 (long long) fragP->fr_address, branch,
11530 1 << align_branch_power);
11531 }
11532 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PREFIX)
11533 memset (fragP->fr_opcode,
11534 fragP->tc_frag_data.default_prefix, size);
11535 else
11536 i386_generate_nops (fragP, (char *) fragP->fr_opcode,
11537 size, 0);
11538 fragP->fr_fix += size;
11539 }
11540 return;
11541 }
11542
11543 opcode = (unsigned char *) fragP->fr_opcode;
11544
11545 /* Address we want to reach in file space. */
11546 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
11547
11548 /* Address opcode resides at in file space. */
11549 opcode_address = fragP->fr_address + fragP->fr_fix;
11550
11551 /* Displacement from opcode start to fill into instruction. */
11552 displacement_from_opcode_start = target_address - opcode_address;
11553
11554 if ((fragP->fr_subtype & BIG) == 0)
11555 {
11556 /* Don't have to change opcode. */
11557 extension = 1; /* 1 opcode + 1 displacement */
11558 where_to_put_displacement = &opcode[1];
11559 }
11560 else
11561 {
11562 if (no_cond_jump_promotion
11563 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
11564 as_warn_where (fragP->fr_file, fragP->fr_line,
11565 _("long jump required"));
11566
11567 switch (fragP->fr_subtype)
11568 {
11569 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
11570 extension = 4; /* 1 opcode + 4 displacement */
11571 opcode[0] = 0xe9;
11572 where_to_put_displacement = &opcode[1];
11573 break;
11574
11575 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
11576 extension = 2; /* 1 opcode + 2 displacement */
11577 opcode[0] = 0xe9;
11578 where_to_put_displacement = &opcode[1];
11579 break;
11580
11581 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
11582 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
11583 extension = 5; /* 2 opcode + 4 displacement */
11584 opcode[1] = opcode[0] + 0x10;
11585 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
11586 where_to_put_displacement = &opcode[2];
11587 break;
11588
11589 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
11590 extension = 3; /* 2 opcode + 2 displacement */
11591 opcode[1] = opcode[0] + 0x10;
11592 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
11593 where_to_put_displacement = &opcode[2];
11594 break;
11595
11596 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
11597 extension = 4;
11598 opcode[0] ^= 1;
11599 opcode[1] = 3;
11600 opcode[2] = 0xe9;
11601 where_to_put_displacement = &opcode[3];
11602 break;
11603
11604 default:
11605 BAD_CASE (fragP->fr_subtype);
11606 break;
11607 }
11608 }
11609
11610 /* If size if less then four we are sure that the operand fits,
11611 but if it's 4, then it could be that the displacement is larger
11612 then -/+ 2GB. */
11613 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
11614 && object_64bit
11615 && ((addressT) (displacement_from_opcode_start - extension
11616 + ((addressT) 1 << 31))
11617 > (((addressT) 2 << 31) - 1)))
11618 {
11619 as_bad_where (fragP->fr_file, fragP->fr_line,
11620 _("jump target out of range"));
11621 /* Make us emit 0. */
11622 displacement_from_opcode_start = extension;
11623 }
11624 /* Now put displacement after opcode. */
11625 md_number_to_chars ((char *) where_to_put_displacement,
11626 (valueT) (displacement_from_opcode_start - extension),
11627 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
11628 fragP->fr_fix += extension;
11629 }
11630 \f
11631 /* Apply a fixup (fixP) to segment data, once it has been determined
11632 by our caller that we have all the info we need to fix it up.
11633
11634 Parameter valP is the pointer to the value of the bits.
11635
11636 On the 386, immediates, displacements, and data pointers are all in
11637 the same (little-endian) format, so we don't need to care about which
11638 we are handling. */
11639
11640 void
11641 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11642 {
11643 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
11644 valueT value = *valP;
11645
11646 #if !defined (TE_Mach)
11647 if (fixP->fx_pcrel)
11648 {
11649 switch (fixP->fx_r_type)
11650 {
11651 default:
11652 break;
11653
11654 case BFD_RELOC_64:
11655 fixP->fx_r_type = BFD_RELOC_64_PCREL;
11656 break;
11657 case BFD_RELOC_32:
11658 case BFD_RELOC_X86_64_32S:
11659 fixP->fx_r_type = BFD_RELOC_32_PCREL;
11660 break;
11661 case BFD_RELOC_16:
11662 fixP->fx_r_type = BFD_RELOC_16_PCREL;
11663 break;
11664 case BFD_RELOC_8:
11665 fixP->fx_r_type = BFD_RELOC_8_PCREL;
11666 break;
11667 }
11668 }
11669
11670 if (fixP->fx_addsy != NULL
11671 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
11672 || fixP->fx_r_type == BFD_RELOC_64_PCREL
11673 || fixP->fx_r_type == BFD_RELOC_16_PCREL
11674 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
11675 && !use_rela_relocations)
11676 {
11677 /* This is a hack. There should be a better way to handle this.
11678 This covers for the fact that bfd_install_relocation will
11679 subtract the current location (for partial_inplace, PC relative
11680 relocations); see more below. */
11681 #ifndef OBJ_AOUT
11682 if (IS_ELF
11683 #ifdef TE_PE
11684 || OUTPUT_FLAVOR == bfd_target_coff_flavour
11685 #endif
11686 )
11687 value += fixP->fx_where + fixP->fx_frag->fr_address;
11688 #endif
11689 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11690 if (IS_ELF)
11691 {
11692 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
11693
11694 if ((sym_seg == seg
11695 || (symbol_section_p (fixP->fx_addsy)
11696 && sym_seg != absolute_section))
11697 && !generic_force_reloc (fixP))
11698 {
11699 /* Yes, we add the values in twice. This is because
11700 bfd_install_relocation subtracts them out again. I think
11701 bfd_install_relocation is broken, but I don't dare change
11702 it. FIXME. */
11703 value += fixP->fx_where + fixP->fx_frag->fr_address;
11704 }
11705 }
11706 #endif
11707 #if defined (OBJ_COFF) && defined (TE_PE)
11708 /* For some reason, the PE format does not store a
11709 section address offset for a PC relative symbol. */
11710 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
11711 || S_IS_WEAK (fixP->fx_addsy))
11712 value += md_pcrel_from (fixP);
11713 #endif
11714 }
11715 #if defined (OBJ_COFF) && defined (TE_PE)
11716 if (fixP->fx_addsy != NULL
11717 && S_IS_WEAK (fixP->fx_addsy)
11718 /* PR 16858: Do not modify weak function references. */
11719 && ! fixP->fx_pcrel)
11720 {
11721 #if !defined (TE_PEP)
11722 /* For x86 PE weak function symbols are neither PC-relative
11723 nor do they set S_IS_FUNCTION. So the only reliable way
11724 to detect them is to check the flags of their containing
11725 section. */
11726 if (S_GET_SEGMENT (fixP->fx_addsy) != NULL
11727 && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE)
11728 ;
11729 else
11730 #endif
11731 value -= S_GET_VALUE (fixP->fx_addsy);
11732 }
11733 #endif
11734
11735 /* Fix a few things - the dynamic linker expects certain values here,
11736 and we must not disappoint it. */
11737 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11738 if (IS_ELF && fixP->fx_addsy)
11739 switch (fixP->fx_r_type)
11740 {
11741 case BFD_RELOC_386_PLT32:
11742 case BFD_RELOC_X86_64_PLT32:
11743 /* Make the jump instruction point to the address of the operand.
11744 At runtime we merely add the offset to the actual PLT entry.
11745 NB: Subtract the offset size only for jump instructions. */
11746 if (fixP->fx_pcrel)
11747 value = -4;
11748 break;
11749
11750 case BFD_RELOC_386_TLS_GD:
11751 case BFD_RELOC_386_TLS_LDM:
11752 case BFD_RELOC_386_TLS_IE_32:
11753 case BFD_RELOC_386_TLS_IE:
11754 case BFD_RELOC_386_TLS_GOTIE:
11755 case BFD_RELOC_386_TLS_GOTDESC:
11756 case BFD_RELOC_X86_64_TLSGD:
11757 case BFD_RELOC_X86_64_TLSLD:
11758 case BFD_RELOC_X86_64_GOTTPOFF:
11759 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
11760 value = 0; /* Fully resolved at runtime. No addend. */
11761 /* Fallthrough */
11762 case BFD_RELOC_386_TLS_LE:
11763 case BFD_RELOC_386_TLS_LDO_32:
11764 case BFD_RELOC_386_TLS_LE_32:
11765 case BFD_RELOC_X86_64_DTPOFF32:
11766 case BFD_RELOC_X86_64_DTPOFF64:
11767 case BFD_RELOC_X86_64_TPOFF32:
11768 case BFD_RELOC_X86_64_TPOFF64:
11769 S_SET_THREAD_LOCAL (fixP->fx_addsy);
11770 break;
11771
11772 case BFD_RELOC_386_TLS_DESC_CALL:
11773 case BFD_RELOC_X86_64_TLSDESC_CALL:
11774 value = 0; /* Fully resolved at runtime. No addend. */
11775 S_SET_THREAD_LOCAL (fixP->fx_addsy);
11776 fixP->fx_done = 0;
11777 return;
11778
11779 case BFD_RELOC_VTABLE_INHERIT:
11780 case BFD_RELOC_VTABLE_ENTRY:
11781 fixP->fx_done = 0;
11782 return;
11783
11784 default:
11785 break;
11786 }
11787 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
11788 *valP = value;
11789 #endif /* !defined (TE_Mach) */
11790
11791 /* Are we finished with this relocation now? */
11792 if (fixP->fx_addsy == NULL)
11793 fixP->fx_done = 1;
11794 #if defined (OBJ_COFF) && defined (TE_PE)
11795 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
11796 {
11797 fixP->fx_done = 0;
11798 /* Remember value for tc_gen_reloc. */
11799 fixP->fx_addnumber = value;
11800 /* Clear out the frag for now. */
11801 value = 0;
11802 }
11803 #endif
11804 else if (use_rela_relocations)
11805 {
11806 fixP->fx_no_overflow = 1;
11807 /* Remember value for tc_gen_reloc. */
11808 fixP->fx_addnumber = value;
11809 value = 0;
11810 }
11811
11812 md_number_to_chars (p, value, fixP->fx_size);
11813 }
11814 \f
11815 const char *
11816 md_atof (int type, char *litP, int *sizeP)
11817 {
11818 /* This outputs the LITTLENUMs in REVERSE order;
11819 in accord with the bigendian 386. */
11820 return ieee_md_atof (type, litP, sizeP, FALSE);
11821 }
11822 \f
11823 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
11824
11825 static char *
11826 output_invalid (int c)
11827 {
11828 if (ISPRINT (c))
11829 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
11830 "'%c'", c);
11831 else
11832 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
11833 "(0x%x)", (unsigned char) c);
11834 return output_invalid_buf;
11835 }
11836
11837 /* REG_STRING starts *before* REGISTER_PREFIX. */
11838
11839 static const reg_entry *
11840 parse_real_register (char *reg_string, char **end_op)
11841 {
11842 char *s = reg_string;
11843 char *p;
11844 char reg_name_given[MAX_REG_NAME_SIZE + 1];
11845 const reg_entry *r;
11846
11847 /* Skip possible REGISTER_PREFIX and possible whitespace. */
11848 if (*s == REGISTER_PREFIX)
11849 ++s;
11850
11851 if (is_space_char (*s))
11852 ++s;
11853
11854 p = reg_name_given;
11855 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
11856 {
11857 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
11858 return (const reg_entry *) NULL;
11859 s++;
11860 }
11861
11862 /* For naked regs, make sure that we are not dealing with an identifier.
11863 This prevents confusing an identifier like `eax_var' with register
11864 `eax'. */
11865 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
11866 return (const reg_entry *) NULL;
11867
11868 *end_op = s;
11869
11870 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
11871
11872 /* Handle floating point regs, allowing spaces in the (i) part. */
11873 if (r == i386_regtab /* %st is first entry of table */)
11874 {
11875 if (!cpu_arch_flags.bitfield.cpu8087
11876 && !cpu_arch_flags.bitfield.cpu287
11877 && !cpu_arch_flags.bitfield.cpu387)
11878 return (const reg_entry *) NULL;
11879
11880 if (is_space_char (*s))
11881 ++s;
11882 if (*s == '(')
11883 {
11884 ++s;
11885 if (is_space_char (*s))
11886 ++s;
11887 if (*s >= '0' && *s <= '7')
11888 {
11889 int fpr = *s - '0';
11890 ++s;
11891 if (is_space_char (*s))
11892 ++s;
11893 if (*s == ')')
11894 {
11895 *end_op = s + 1;
11896 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
11897 know (r);
11898 return r + fpr;
11899 }
11900 }
11901 /* We have "%st(" then garbage. */
11902 return (const reg_entry *) NULL;
11903 }
11904 }
11905
11906 if (r == NULL || allow_pseudo_reg)
11907 return r;
11908
11909 if (operand_type_all_zero (&r->reg_type))
11910 return (const reg_entry *) NULL;
11911
11912 if ((r->reg_type.bitfield.dword
11913 || (r->reg_type.bitfield.class == SReg && r->reg_num > 3)
11914 || r->reg_type.bitfield.class == RegCR
11915 || r->reg_type.bitfield.class == RegDR
11916 || r->reg_type.bitfield.class == RegTR)
11917 && !cpu_arch_flags.bitfield.cpui386)
11918 return (const reg_entry *) NULL;
11919
11920 if (r->reg_type.bitfield.class == RegMMX && !cpu_arch_flags.bitfield.cpummx)
11921 return (const reg_entry *) NULL;
11922
11923 if (!cpu_arch_flags.bitfield.cpuavx512f)
11924 {
11925 if (r->reg_type.bitfield.zmmword
11926 || r->reg_type.bitfield.class == RegMask)
11927 return (const reg_entry *) NULL;
11928
11929 if (!cpu_arch_flags.bitfield.cpuavx)
11930 {
11931 if (r->reg_type.bitfield.ymmword)
11932 return (const reg_entry *) NULL;
11933
11934 if (!cpu_arch_flags.bitfield.cpusse && r->reg_type.bitfield.xmmword)
11935 return (const reg_entry *) NULL;
11936 }
11937 }
11938
11939 if (r->reg_type.bitfield.class == RegBND && !cpu_arch_flags.bitfield.cpumpx)
11940 return (const reg_entry *) NULL;
11941
11942 /* Don't allow fake index register unless allow_index_reg isn't 0. */
11943 if (!allow_index_reg && r->reg_num == RegIZ)
11944 return (const reg_entry *) NULL;
11945
11946 /* Upper 16 vector registers are only available with VREX in 64bit
11947 mode, and require EVEX encoding. */
11948 if (r->reg_flags & RegVRex)
11949 {
11950 if (!cpu_arch_flags.bitfield.cpuavx512f
11951 || flag_code != CODE_64BIT)
11952 return (const reg_entry *) NULL;
11953
11954 i.vec_encoding = vex_encoding_evex;
11955 }
11956
11957 if (((r->reg_flags & (RegRex64 | RegRex)) || r->reg_type.bitfield.qword)
11958 && (!cpu_arch_flags.bitfield.cpulm || r->reg_type.bitfield.class != RegCR)
11959 && flag_code != CODE_64BIT)
11960 return (const reg_entry *) NULL;
11961
11962 if (r->reg_type.bitfield.class == SReg && r->reg_num == RegFlat
11963 && !intel_syntax)
11964 return (const reg_entry *) NULL;
11965
11966 return r;
11967 }
11968
11969 /* REG_STRING starts *before* REGISTER_PREFIX. */
11970
11971 static const reg_entry *
11972 parse_register (char *reg_string, char **end_op)
11973 {
11974 const reg_entry *r;
11975
11976 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
11977 r = parse_real_register (reg_string, end_op);
11978 else
11979 r = NULL;
11980 if (!r)
11981 {
11982 char *save = input_line_pointer;
11983 char c;
11984 symbolS *symbolP;
11985
11986 input_line_pointer = reg_string;
11987 c = get_symbol_name (&reg_string);
11988 symbolP = symbol_find (reg_string);
11989 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
11990 {
11991 const expressionS *e = symbol_get_value_expression (symbolP);
11992
11993 know (e->X_op == O_register);
11994 know (e->X_add_number >= 0
11995 && (valueT) e->X_add_number < i386_regtab_size);
11996 r = i386_regtab + e->X_add_number;
11997 if ((r->reg_flags & RegVRex))
11998 i.vec_encoding = vex_encoding_evex;
11999 *end_op = input_line_pointer;
12000 }
12001 *input_line_pointer = c;
12002 input_line_pointer = save;
12003 }
12004 return r;
12005 }
12006
12007 int
12008 i386_parse_name (char *name, expressionS *e, char *nextcharP)
12009 {
12010 const reg_entry *r;
12011 char *end = input_line_pointer;
12012
12013 *end = *nextcharP;
12014 r = parse_register (name, &input_line_pointer);
12015 if (r && end <= input_line_pointer)
12016 {
12017 *nextcharP = *input_line_pointer;
12018 *input_line_pointer = 0;
12019 e->X_op = O_register;
12020 e->X_add_number = r - i386_regtab;
12021 return 1;
12022 }
12023 input_line_pointer = end;
12024 *end = 0;
12025 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
12026 }
12027
12028 void
12029 md_operand (expressionS *e)
12030 {
12031 char *end;
12032 const reg_entry *r;
12033
12034 switch (*input_line_pointer)
12035 {
12036 case REGISTER_PREFIX:
12037 r = parse_real_register (input_line_pointer, &end);
12038 if (r)
12039 {
12040 e->X_op = O_register;
12041 e->X_add_number = r - i386_regtab;
12042 input_line_pointer = end;
12043 }
12044 break;
12045
12046 case '[':
12047 gas_assert (intel_syntax);
12048 end = input_line_pointer++;
12049 expression (e);
12050 if (*input_line_pointer == ']')
12051 {
12052 ++input_line_pointer;
12053 e->X_op_symbol = make_expr_symbol (e);
12054 e->X_add_symbol = NULL;
12055 e->X_add_number = 0;
12056 e->X_op = O_index;
12057 }
12058 else
12059 {
12060 e->X_op = O_absent;
12061 input_line_pointer = end;
12062 }
12063 break;
12064 }
12065 }
12066
12067 \f
12068 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12069 const char *md_shortopts = "kVQ:sqnO::";
12070 #else
12071 const char *md_shortopts = "qnO::";
12072 #endif
12073
12074 #define OPTION_32 (OPTION_MD_BASE + 0)
12075 #define OPTION_64 (OPTION_MD_BASE + 1)
12076 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
12077 #define OPTION_MARCH (OPTION_MD_BASE + 3)
12078 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
12079 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
12080 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
12081 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
12082 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
12083 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
12084 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
12085 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
12086 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
12087 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
12088 #define OPTION_X32 (OPTION_MD_BASE + 14)
12089 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
12090 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
12091 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
12092 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
12093 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
12094 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
12095 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
12096 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
12097 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
12098 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
12099 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
12100 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
12101 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
12102 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
12103 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
12104 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
12105
12106 struct option md_longopts[] =
12107 {
12108 {"32", no_argument, NULL, OPTION_32},
12109 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12110 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12111 {"64", no_argument, NULL, OPTION_64},
12112 #endif
12113 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12114 {"x32", no_argument, NULL, OPTION_X32},
12115 {"mshared", no_argument, NULL, OPTION_MSHARED},
12116 {"mx86-used-note", required_argument, NULL, OPTION_X86_USED_NOTE},
12117 #endif
12118 {"divide", no_argument, NULL, OPTION_DIVIDE},
12119 {"march", required_argument, NULL, OPTION_MARCH},
12120 {"mtune", required_argument, NULL, OPTION_MTUNE},
12121 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
12122 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
12123 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
12124 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
12125 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
12126 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
12127 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
12128 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
12129 {"mvexwig", required_argument, NULL, OPTION_MVEXWIG},
12130 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
12131 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
12132 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
12133 # if defined (TE_PE) || defined (TE_PEP)
12134 {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ},
12135 #endif
12136 {"momit-lock-prefix", required_argument, NULL, OPTION_MOMIT_LOCK_PREFIX},
12137 {"mfence-as-lock-add", required_argument, NULL, OPTION_MFENCE_AS_LOCK_ADD},
12138 {"mrelax-relocations", required_argument, NULL, OPTION_MRELAX_RELOCATIONS},
12139 {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG},
12140 {"malign-branch-boundary", required_argument, NULL, OPTION_MALIGN_BRANCH_BOUNDARY},
12141 {"malign-branch-prefix-size", required_argument, NULL, OPTION_MALIGN_BRANCH_PREFIX_SIZE},
12142 {"malign-branch", required_argument, NULL, OPTION_MALIGN_BRANCH},
12143 {"mbranches-within-32B-boundaries", no_argument, NULL, OPTION_MBRANCHES_WITH_32B_BOUNDARIES},
12144 {"mamd64", no_argument, NULL, OPTION_MAMD64},
12145 {"mintel64", no_argument, NULL, OPTION_MINTEL64},
12146 {NULL, no_argument, NULL, 0}
12147 };
12148 size_t md_longopts_size = sizeof (md_longopts);
12149
12150 int
12151 md_parse_option (int c, const char *arg)
12152 {
12153 unsigned int j;
12154 char *arch, *next, *saved, *type;
12155
12156 switch (c)
12157 {
12158 case 'n':
12159 optimize_align_code = 0;
12160 break;
12161
12162 case 'q':
12163 quiet_warnings = 1;
12164 break;
12165
12166 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12167 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
12168 should be emitted or not. FIXME: Not implemented. */
12169 case 'Q':
12170 if ((arg[0] != 'y' && arg[0] != 'n') || arg[1])
12171 return 0;
12172 break;
12173
12174 /* -V: SVR4 argument to print version ID. */
12175 case 'V':
12176 print_version_id ();
12177 break;
12178
12179 /* -k: Ignore for FreeBSD compatibility. */
12180 case 'k':
12181 break;
12182
12183 case 's':
12184 /* -s: On i386 Solaris, this tells the native assembler to use
12185 .stab instead of .stab.excl. We always use .stab anyhow. */
12186 break;
12187
12188 case OPTION_MSHARED:
12189 shared = 1;
12190 break;
12191
12192 case OPTION_X86_USED_NOTE:
12193 if (strcasecmp (arg, "yes") == 0)
12194 x86_used_note = 1;
12195 else if (strcasecmp (arg, "no") == 0)
12196 x86_used_note = 0;
12197 else
12198 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg);
12199 break;
12200
12201
12202 #endif
12203 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12204 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12205 case OPTION_64:
12206 {
12207 const char **list, **l;
12208
12209 list = bfd_target_list ();
12210 for (l = list; *l != NULL; l++)
12211 if (CONST_STRNEQ (*l, "elf64-x86-64")
12212 || strcmp (*l, "coff-x86-64") == 0
12213 || strcmp (*l, "pe-x86-64") == 0
12214 || strcmp (*l, "pei-x86-64") == 0
12215 || strcmp (*l, "mach-o-x86-64") == 0)
12216 {
12217 default_arch = "x86_64";
12218 break;
12219 }
12220 if (*l == NULL)
12221 as_fatal (_("no compiled in support for x86_64"));
12222 free (list);
12223 }
12224 break;
12225 #endif
12226
12227 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12228 case OPTION_X32:
12229 if (IS_ELF)
12230 {
12231 const char **list, **l;
12232
12233 list = bfd_target_list ();
12234 for (l = list; *l != NULL; l++)
12235 if (CONST_STRNEQ (*l, "elf32-x86-64"))
12236 {
12237 default_arch = "x86_64:32";
12238 break;
12239 }
12240 if (*l == NULL)
12241 as_fatal (_("no compiled in support for 32bit x86_64"));
12242 free (list);
12243 }
12244 else
12245 as_fatal (_("32bit x86_64 is only supported for ELF"));
12246 break;
12247 #endif
12248
12249 case OPTION_32:
12250 default_arch = "i386";
12251 break;
12252
12253 case OPTION_DIVIDE:
12254 #ifdef SVR4_COMMENT_CHARS
12255 {
12256 char *n, *t;
12257 const char *s;
12258
12259 n = XNEWVEC (char, strlen (i386_comment_chars) + 1);
12260 t = n;
12261 for (s = i386_comment_chars; *s != '\0'; s++)
12262 if (*s != '/')
12263 *t++ = *s;
12264 *t = '\0';
12265 i386_comment_chars = n;
12266 }
12267 #endif
12268 break;
12269
12270 case OPTION_MARCH:
12271 saved = xstrdup (arg);
12272 arch = saved;
12273 /* Allow -march=+nosse. */
12274 if (*arch == '+')
12275 arch++;
12276 do
12277 {
12278 if (*arch == '.')
12279 as_fatal (_("invalid -march= option: `%s'"), arg);
12280 next = strchr (arch, '+');
12281 if (next)
12282 *next++ = '\0';
12283 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
12284 {
12285 if (strcmp (arch, cpu_arch [j].name) == 0)
12286 {
12287 /* Processor. */
12288 if (! cpu_arch[j].flags.bitfield.cpui386)
12289 continue;
12290
12291 cpu_arch_name = cpu_arch[j].name;
12292 cpu_sub_arch_name = NULL;
12293 cpu_arch_flags = cpu_arch[j].flags;
12294 cpu_arch_isa = cpu_arch[j].type;
12295 cpu_arch_isa_flags = cpu_arch[j].flags;
12296 if (!cpu_arch_tune_set)
12297 {
12298 cpu_arch_tune = cpu_arch_isa;
12299 cpu_arch_tune_flags = cpu_arch_isa_flags;
12300 }
12301 break;
12302 }
12303 else if (*cpu_arch [j].name == '.'
12304 && strcmp (arch, cpu_arch [j].name + 1) == 0)
12305 {
12306 /* ISA extension. */
12307 i386_cpu_flags flags;
12308
12309 flags = cpu_flags_or (cpu_arch_flags,
12310 cpu_arch[j].flags);
12311
12312 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
12313 {
12314 if (cpu_sub_arch_name)
12315 {
12316 char *name = cpu_sub_arch_name;
12317 cpu_sub_arch_name = concat (name,
12318 cpu_arch[j].name,
12319 (const char *) NULL);
12320 free (name);
12321 }
12322 else
12323 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
12324 cpu_arch_flags = flags;
12325 cpu_arch_isa_flags = flags;
12326 }
12327 else
12328 cpu_arch_isa_flags
12329 = cpu_flags_or (cpu_arch_isa_flags,
12330 cpu_arch[j].flags);
12331 break;
12332 }
12333 }
12334
12335 if (j >= ARRAY_SIZE (cpu_arch))
12336 {
12337 /* Disable an ISA extension. */
12338 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
12339 if (strcmp (arch, cpu_noarch [j].name) == 0)
12340 {
12341 i386_cpu_flags flags;
12342
12343 flags = cpu_flags_and_not (cpu_arch_flags,
12344 cpu_noarch[j].flags);
12345 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
12346 {
12347 if (cpu_sub_arch_name)
12348 {
12349 char *name = cpu_sub_arch_name;
12350 cpu_sub_arch_name = concat (arch,
12351 (const char *) NULL);
12352 free (name);
12353 }
12354 else
12355 cpu_sub_arch_name = xstrdup (arch);
12356 cpu_arch_flags = flags;
12357 cpu_arch_isa_flags = flags;
12358 }
12359 break;
12360 }
12361
12362 if (j >= ARRAY_SIZE (cpu_noarch))
12363 j = ARRAY_SIZE (cpu_arch);
12364 }
12365
12366 if (j >= ARRAY_SIZE (cpu_arch))
12367 as_fatal (_("invalid -march= option: `%s'"), arg);
12368
12369 arch = next;
12370 }
12371 while (next != NULL);
12372 free (saved);
12373 break;
12374
12375 case OPTION_MTUNE:
12376 if (*arg == '.')
12377 as_fatal (_("invalid -mtune= option: `%s'"), arg);
12378 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
12379 {
12380 if (strcmp (arg, cpu_arch [j].name) == 0)
12381 {
12382 cpu_arch_tune_set = 1;
12383 cpu_arch_tune = cpu_arch [j].type;
12384 cpu_arch_tune_flags = cpu_arch[j].flags;
12385 break;
12386 }
12387 }
12388 if (j >= ARRAY_SIZE (cpu_arch))
12389 as_fatal (_("invalid -mtune= option: `%s'"), arg);
12390 break;
12391
12392 case OPTION_MMNEMONIC:
12393 if (strcasecmp (arg, "att") == 0)
12394 intel_mnemonic = 0;
12395 else if (strcasecmp (arg, "intel") == 0)
12396 intel_mnemonic = 1;
12397 else
12398 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
12399 break;
12400
12401 case OPTION_MSYNTAX:
12402 if (strcasecmp (arg, "att") == 0)
12403 intel_syntax = 0;
12404 else if (strcasecmp (arg, "intel") == 0)
12405 intel_syntax = 1;
12406 else
12407 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
12408 break;
12409
12410 case OPTION_MINDEX_REG:
12411 allow_index_reg = 1;
12412 break;
12413
12414 case OPTION_MNAKED_REG:
12415 allow_naked_reg = 1;
12416 break;
12417
12418 case OPTION_MSSE2AVX:
12419 sse2avx = 1;
12420 break;
12421
12422 case OPTION_MSSE_CHECK:
12423 if (strcasecmp (arg, "error") == 0)
12424 sse_check = check_error;
12425 else if (strcasecmp (arg, "warning") == 0)
12426 sse_check = check_warning;
12427 else if (strcasecmp (arg, "none") == 0)
12428 sse_check = check_none;
12429 else
12430 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
12431 break;
12432
12433 case OPTION_MOPERAND_CHECK:
12434 if (strcasecmp (arg, "error") == 0)
12435 operand_check = check_error;
12436 else if (strcasecmp (arg, "warning") == 0)
12437 operand_check = check_warning;
12438 else if (strcasecmp (arg, "none") == 0)
12439 operand_check = check_none;
12440 else
12441 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
12442 break;
12443
12444 case OPTION_MAVXSCALAR:
12445 if (strcasecmp (arg, "128") == 0)
12446 avxscalar = vex128;
12447 else if (strcasecmp (arg, "256") == 0)
12448 avxscalar = vex256;
12449 else
12450 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
12451 break;
12452
12453 case OPTION_MVEXWIG:
12454 if (strcmp (arg, "0") == 0)
12455 vexwig = vexw0;
12456 else if (strcmp (arg, "1") == 0)
12457 vexwig = vexw1;
12458 else
12459 as_fatal (_("invalid -mvexwig= option: `%s'"), arg);
12460 break;
12461
12462 case OPTION_MADD_BND_PREFIX:
12463 add_bnd_prefix = 1;
12464 break;
12465
12466 case OPTION_MEVEXLIG:
12467 if (strcmp (arg, "128") == 0)
12468 evexlig = evexl128;
12469 else if (strcmp (arg, "256") == 0)
12470 evexlig = evexl256;
12471 else if (strcmp (arg, "512") == 0)
12472 evexlig = evexl512;
12473 else
12474 as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
12475 break;
12476
12477 case OPTION_MEVEXRCIG:
12478 if (strcmp (arg, "rne") == 0)
12479 evexrcig = rne;
12480 else if (strcmp (arg, "rd") == 0)
12481 evexrcig = rd;
12482 else if (strcmp (arg, "ru") == 0)
12483 evexrcig = ru;
12484 else if (strcmp (arg, "rz") == 0)
12485 evexrcig = rz;
12486 else
12487 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg);
12488 break;
12489
12490 case OPTION_MEVEXWIG:
12491 if (strcmp (arg, "0") == 0)
12492 evexwig = evexw0;
12493 else if (strcmp (arg, "1") == 0)
12494 evexwig = evexw1;
12495 else
12496 as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
12497 break;
12498
12499 # if defined (TE_PE) || defined (TE_PEP)
12500 case OPTION_MBIG_OBJ:
12501 use_big_obj = 1;
12502 break;
12503 #endif
12504
12505 case OPTION_MOMIT_LOCK_PREFIX:
12506 if (strcasecmp (arg, "yes") == 0)
12507 omit_lock_prefix = 1;
12508 else if (strcasecmp (arg, "no") == 0)
12509 omit_lock_prefix = 0;
12510 else
12511 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg);
12512 break;
12513
12514 case OPTION_MFENCE_AS_LOCK_ADD:
12515 if (strcasecmp (arg, "yes") == 0)
12516 avoid_fence = 1;
12517 else if (strcasecmp (arg, "no") == 0)
12518 avoid_fence = 0;
12519 else
12520 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg);
12521 break;
12522
12523 case OPTION_MRELAX_RELOCATIONS:
12524 if (strcasecmp (arg, "yes") == 0)
12525 generate_relax_relocations = 1;
12526 else if (strcasecmp (arg, "no") == 0)
12527 generate_relax_relocations = 0;
12528 else
12529 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg);
12530 break;
12531
12532 case OPTION_MALIGN_BRANCH_BOUNDARY:
12533 {
12534 char *end;
12535 long int align = strtoul (arg, &end, 0);
12536 if (*end == '\0')
12537 {
12538 if (align == 0)
12539 {
12540 align_branch_power = 0;
12541 break;
12542 }
12543 else if (align >= 16)
12544 {
12545 int align_power;
12546 for (align_power = 0;
12547 (align & 1) == 0;
12548 align >>= 1, align_power++)
12549 continue;
12550 /* Limit alignment power to 31. */
12551 if (align == 1 && align_power < 32)
12552 {
12553 align_branch_power = align_power;
12554 break;
12555 }
12556 }
12557 }
12558 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg);
12559 }
12560 break;
12561
12562 case OPTION_MALIGN_BRANCH_PREFIX_SIZE:
12563 {
12564 char *end;
12565 int align = strtoul (arg, &end, 0);
12566 /* Some processors only support 5 prefixes. */
12567 if (*end == '\0' && align >= 0 && align < 6)
12568 {
12569 align_branch_prefix_size = align;
12570 break;
12571 }
12572 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
12573 arg);
12574 }
12575 break;
12576
12577 case OPTION_MALIGN_BRANCH:
12578 align_branch = 0;
12579 saved = xstrdup (arg);
12580 type = saved;
12581 do
12582 {
12583 next = strchr (type, '+');
12584 if (next)
12585 *next++ = '\0';
12586 if (strcasecmp (type, "jcc") == 0)
12587 align_branch |= align_branch_jcc_bit;
12588 else if (strcasecmp (type, "fused") == 0)
12589 align_branch |= align_branch_fused_bit;
12590 else if (strcasecmp (type, "jmp") == 0)
12591 align_branch |= align_branch_jmp_bit;
12592 else if (strcasecmp (type, "call") == 0)
12593 align_branch |= align_branch_call_bit;
12594 else if (strcasecmp (type, "ret") == 0)
12595 align_branch |= align_branch_ret_bit;
12596 else if (strcasecmp (type, "indirect") == 0)
12597 align_branch |= align_branch_indirect_bit;
12598 else
12599 as_fatal (_("invalid -malign-branch= option: `%s'"), arg);
12600 type = next;
12601 }
12602 while (next != NULL);
12603 free (saved);
12604 break;
12605
12606 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES:
12607 align_branch_power = 5;
12608 align_branch_prefix_size = 5;
12609 align_branch = (align_branch_jcc_bit
12610 | align_branch_fused_bit
12611 | align_branch_jmp_bit);
12612 break;
12613
12614 case OPTION_MAMD64:
12615 isa64 = amd64;
12616 break;
12617
12618 case OPTION_MINTEL64:
12619 isa64 = intel64;
12620 break;
12621
12622 case 'O':
12623 if (arg == NULL)
12624 {
12625 optimize = 1;
12626 /* Turn off -Os. */
12627 optimize_for_space = 0;
12628 }
12629 else if (*arg == 's')
12630 {
12631 optimize_for_space = 1;
12632 /* Turn on all encoding optimizations. */
12633 optimize = INT_MAX;
12634 }
12635 else
12636 {
12637 optimize = atoi (arg);
12638 /* Turn off -Os. */
12639 optimize_for_space = 0;
12640 }
12641 break;
12642
12643 default:
12644 return 0;
12645 }
12646 return 1;
12647 }
12648
12649 #define MESSAGE_TEMPLATE \
12650 " "
12651
12652 static char *
12653 output_message (FILE *stream, char *p, char *message, char *start,
12654 int *left_p, const char *name, int len)
12655 {
12656 int size = sizeof (MESSAGE_TEMPLATE);
12657 int left = *left_p;
12658
12659 /* Reserve 2 spaces for ", " or ",\0" */
12660 left -= len + 2;
12661
12662 /* Check if there is any room. */
12663 if (left >= 0)
12664 {
12665 if (p != start)
12666 {
12667 *p++ = ',';
12668 *p++ = ' ';
12669 }
12670 p = mempcpy (p, name, len);
12671 }
12672 else
12673 {
12674 /* Output the current message now and start a new one. */
12675 *p++ = ',';
12676 *p = '\0';
12677 fprintf (stream, "%s\n", message);
12678 p = start;
12679 left = size - (start - message) - len - 2;
12680
12681 gas_assert (left >= 0);
12682
12683 p = mempcpy (p, name, len);
12684 }
12685
12686 *left_p = left;
12687 return p;
12688 }
12689
12690 static void
12691 show_arch (FILE *stream, int ext, int check)
12692 {
12693 static char message[] = MESSAGE_TEMPLATE;
12694 char *start = message + 27;
12695 char *p;
12696 int size = sizeof (MESSAGE_TEMPLATE);
12697 int left;
12698 const char *name;
12699 int len;
12700 unsigned int j;
12701
12702 p = start;
12703 left = size - (start - message);
12704 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
12705 {
12706 /* Should it be skipped? */
12707 if (cpu_arch [j].skip)
12708 continue;
12709
12710 name = cpu_arch [j].name;
12711 len = cpu_arch [j].len;
12712 if (*name == '.')
12713 {
12714 /* It is an extension. Skip if we aren't asked to show it. */
12715 if (ext)
12716 {
12717 name++;
12718 len--;
12719 }
12720 else
12721 continue;
12722 }
12723 else if (ext)
12724 {
12725 /* It is an processor. Skip if we show only extension. */
12726 continue;
12727 }
12728 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
12729 {
12730 /* It is an impossible processor - skip. */
12731 continue;
12732 }
12733
12734 p = output_message (stream, p, message, start, &left, name, len);
12735 }
12736
12737 /* Display disabled extensions. */
12738 if (ext)
12739 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
12740 {
12741 name = cpu_noarch [j].name;
12742 len = cpu_noarch [j].len;
12743 p = output_message (stream, p, message, start, &left, name,
12744 len);
12745 }
12746
12747 *p = '\0';
12748 fprintf (stream, "%s\n", message);
12749 }
12750
12751 void
12752 md_show_usage (FILE *stream)
12753 {
12754 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12755 fprintf (stream, _("\
12756 -Qy, -Qn ignored\n\
12757 -V print assembler version number\n\
12758 -k ignored\n"));
12759 #endif
12760 fprintf (stream, _("\
12761 -n Do not optimize code alignment\n\
12762 -q quieten some warnings\n"));
12763 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12764 fprintf (stream, _("\
12765 -s ignored\n"));
12766 #endif
12767 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12768 || defined (TE_PE) || defined (TE_PEP))
12769 fprintf (stream, _("\
12770 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
12771 #endif
12772 #ifdef SVR4_COMMENT_CHARS
12773 fprintf (stream, _("\
12774 --divide do not treat `/' as a comment character\n"));
12775 #else
12776 fprintf (stream, _("\
12777 --divide ignored\n"));
12778 #endif
12779 fprintf (stream, _("\
12780 -march=CPU[,+EXTENSION...]\n\
12781 generate code for CPU and EXTENSION, CPU is one of:\n"));
12782 show_arch (stream, 0, 1);
12783 fprintf (stream, _("\
12784 EXTENSION is combination of:\n"));
12785 show_arch (stream, 1, 0);
12786 fprintf (stream, _("\
12787 -mtune=CPU optimize for CPU, CPU is one of:\n"));
12788 show_arch (stream, 0, 0);
12789 fprintf (stream, _("\
12790 -msse2avx encode SSE instructions with VEX prefix\n"));
12791 fprintf (stream, _("\
12792 -msse-check=[none|error|warning] (default: warning)\n\
12793 check SSE instructions\n"));
12794 fprintf (stream, _("\
12795 -moperand-check=[none|error|warning] (default: warning)\n\
12796 check operand combinations for validity\n"));
12797 fprintf (stream, _("\
12798 -mavxscalar=[128|256] (default: 128)\n\
12799 encode scalar AVX instructions with specific vector\n\
12800 length\n"));
12801 fprintf (stream, _("\
12802 -mvexwig=[0|1] (default: 0)\n\
12803 encode VEX instructions with specific VEX.W value\n\
12804 for VEX.W bit ignored instructions\n"));
12805 fprintf (stream, _("\
12806 -mevexlig=[128|256|512] (default: 128)\n\
12807 encode scalar EVEX instructions with specific vector\n\
12808 length\n"));
12809 fprintf (stream, _("\
12810 -mevexwig=[0|1] (default: 0)\n\
12811 encode EVEX instructions with specific EVEX.W value\n\
12812 for EVEX.W bit ignored instructions\n"));
12813 fprintf (stream, _("\
12814 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
12815 encode EVEX instructions with specific EVEX.RC value\n\
12816 for SAE-only ignored instructions\n"));
12817 fprintf (stream, _("\
12818 -mmnemonic=[att|intel] "));
12819 if (SYSV386_COMPAT)
12820 fprintf (stream, _("(default: att)\n"));
12821 else
12822 fprintf (stream, _("(default: intel)\n"));
12823 fprintf (stream, _("\
12824 use AT&T/Intel mnemonic\n"));
12825 fprintf (stream, _("\
12826 -msyntax=[att|intel] (default: att)\n\
12827 use AT&T/Intel syntax\n"));
12828 fprintf (stream, _("\
12829 -mindex-reg support pseudo index registers\n"));
12830 fprintf (stream, _("\
12831 -mnaked-reg don't require `%%' prefix for registers\n"));
12832 fprintf (stream, _("\
12833 -madd-bnd-prefix add BND prefix for all valid branches\n"));
12834 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12835 fprintf (stream, _("\
12836 -mshared disable branch optimization for shared code\n"));
12837 fprintf (stream, _("\
12838 -mx86-used-note=[no|yes] "));
12839 if (DEFAULT_X86_USED_NOTE)
12840 fprintf (stream, _("(default: yes)\n"));
12841 else
12842 fprintf (stream, _("(default: no)\n"));
12843 fprintf (stream, _("\
12844 generate x86 used ISA and feature properties\n"));
12845 #endif
12846 #if defined (TE_PE) || defined (TE_PEP)
12847 fprintf (stream, _("\
12848 -mbig-obj generate big object files\n"));
12849 #endif
12850 fprintf (stream, _("\
12851 -momit-lock-prefix=[no|yes] (default: no)\n\
12852 strip all lock prefixes\n"));
12853 fprintf (stream, _("\
12854 -mfence-as-lock-add=[no|yes] (default: no)\n\
12855 encode lfence, mfence and sfence as\n\
12856 lock addl $0x0, (%%{re}sp)\n"));
12857 fprintf (stream, _("\
12858 -mrelax-relocations=[no|yes] "));
12859 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS)
12860 fprintf (stream, _("(default: yes)\n"));
12861 else
12862 fprintf (stream, _("(default: no)\n"));
12863 fprintf (stream, _("\
12864 generate relax relocations\n"));
12865 fprintf (stream, _("\
12866 -malign-branch-boundary=NUM (default: 0)\n\
12867 align branches within NUM byte boundary\n"));
12868 fprintf (stream, _("\
12869 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
12870 TYPE is combination of jcc, fused, jmp, call, ret,\n\
12871 indirect\n\
12872 specify types of branches to align\n"));
12873 fprintf (stream, _("\
12874 -malign-branch-prefix-size=NUM (default: 5)\n\
12875 align branches with NUM prefixes per instruction\n"));
12876 fprintf (stream, _("\
12877 -mbranches-within-32B-boundaries\n\
12878 align branches within 32 byte boundary\n"));
12879 fprintf (stream, _("\
12880 -mamd64 accept only AMD64 ISA [default]\n"));
12881 fprintf (stream, _("\
12882 -mintel64 accept only Intel64 ISA\n"));
12883 }
12884
12885 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
12886 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12887 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12888
12889 /* Pick the target format to use. */
12890
12891 const char *
12892 i386_target_format (void)
12893 {
12894 if (!strncmp (default_arch, "x86_64", 6))
12895 {
12896 update_code_flag (CODE_64BIT, 1);
12897 if (default_arch[6] == '\0')
12898 x86_elf_abi = X86_64_ABI;
12899 else
12900 x86_elf_abi = X86_64_X32_ABI;
12901 }
12902 else if (!strcmp (default_arch, "i386"))
12903 update_code_flag (CODE_32BIT, 1);
12904 else if (!strcmp (default_arch, "iamcu"))
12905 {
12906 update_code_flag (CODE_32BIT, 1);
12907 if (cpu_arch_isa == PROCESSOR_UNKNOWN)
12908 {
12909 static const i386_cpu_flags iamcu_flags = CPU_IAMCU_FLAGS;
12910 cpu_arch_name = "iamcu";
12911 cpu_sub_arch_name = NULL;
12912 cpu_arch_flags = iamcu_flags;
12913 cpu_arch_isa = PROCESSOR_IAMCU;
12914 cpu_arch_isa_flags = iamcu_flags;
12915 if (!cpu_arch_tune_set)
12916 {
12917 cpu_arch_tune = cpu_arch_isa;
12918 cpu_arch_tune_flags = cpu_arch_isa_flags;
12919 }
12920 }
12921 else if (cpu_arch_isa != PROCESSOR_IAMCU)
12922 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
12923 cpu_arch_name);
12924 }
12925 else
12926 as_fatal (_("unknown architecture"));
12927
12928 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
12929 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
12930 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
12931 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
12932
12933 switch (OUTPUT_FLAVOR)
12934 {
12935 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
12936 case bfd_target_aout_flavour:
12937 return AOUT_TARGET_FORMAT;
12938 #endif
12939 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
12940 # if defined (TE_PE) || defined (TE_PEP)
12941 case bfd_target_coff_flavour:
12942 if (flag_code == CODE_64BIT)
12943 return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
12944 else
12945 return "pe-i386";
12946 # elif defined (TE_GO32)
12947 case bfd_target_coff_flavour:
12948 return "coff-go32";
12949 # else
12950 case bfd_target_coff_flavour:
12951 return "coff-i386";
12952 # endif
12953 #endif
12954 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
12955 case bfd_target_elf_flavour:
12956 {
12957 const char *format;
12958
12959 switch (x86_elf_abi)
12960 {
12961 default:
12962 format = ELF_TARGET_FORMAT;
12963 #ifndef TE_SOLARIS
12964 tls_get_addr = "___tls_get_addr";
12965 #endif
12966 break;
12967 case X86_64_ABI:
12968 use_rela_relocations = 1;
12969 object_64bit = 1;
12970 #ifndef TE_SOLARIS
12971 tls_get_addr = "__tls_get_addr";
12972 #endif
12973 format = ELF_TARGET_FORMAT64;
12974 break;
12975 case X86_64_X32_ABI:
12976 use_rela_relocations = 1;
12977 object_64bit = 1;
12978 #ifndef TE_SOLARIS
12979 tls_get_addr = "__tls_get_addr";
12980 #endif
12981 disallow_64bit_reloc = 1;
12982 format = ELF_TARGET_FORMAT32;
12983 break;
12984 }
12985 if (cpu_arch_isa == PROCESSOR_L1OM)
12986 {
12987 if (x86_elf_abi != X86_64_ABI)
12988 as_fatal (_("Intel L1OM is 64bit only"));
12989 return ELF_TARGET_L1OM_FORMAT;
12990 }
12991 else if (cpu_arch_isa == PROCESSOR_K1OM)
12992 {
12993 if (x86_elf_abi != X86_64_ABI)
12994 as_fatal (_("Intel K1OM is 64bit only"));
12995 return ELF_TARGET_K1OM_FORMAT;
12996 }
12997 else if (cpu_arch_isa == PROCESSOR_IAMCU)
12998 {
12999 if (x86_elf_abi != I386_ABI)
13000 as_fatal (_("Intel MCU is 32bit only"));
13001 return ELF_TARGET_IAMCU_FORMAT;
13002 }
13003 else
13004 return format;
13005 }
13006 #endif
13007 #if defined (OBJ_MACH_O)
13008 case bfd_target_mach_o_flavour:
13009 if (flag_code == CODE_64BIT)
13010 {
13011 use_rela_relocations = 1;
13012 object_64bit = 1;
13013 return "mach-o-x86-64";
13014 }
13015 else
13016 return "mach-o-i386";
13017 #endif
13018 default:
13019 abort ();
13020 return NULL;
13021 }
13022 }
13023
13024 #endif /* OBJ_MAYBE_ more than one */
13025 \f
13026 symbolS *
13027 md_undefined_symbol (char *name)
13028 {
13029 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
13030 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
13031 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
13032 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
13033 {
13034 if (!GOT_symbol)
13035 {
13036 if (symbol_find (name))
13037 as_bad (_("GOT already in symbol table"));
13038 GOT_symbol = symbol_new (name, undefined_section,
13039 (valueT) 0, &zero_address_frag);
13040 };
13041 return GOT_symbol;
13042 }
13043 return 0;
13044 }
13045
13046 /* Round up a section size to the appropriate boundary. */
13047
13048 valueT
13049 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
13050 {
13051 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
13052 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
13053 {
13054 /* For a.out, force the section size to be aligned. If we don't do
13055 this, BFD will align it for us, but it will not write out the
13056 final bytes of the section. This may be a bug in BFD, but it is
13057 easier to fix it here since that is how the other a.out targets
13058 work. */
13059 int align;
13060
13061 align = bfd_section_alignment (segment);
13062 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
13063 }
13064 #endif
13065
13066 return size;
13067 }
13068
13069 /* On the i386, PC-relative offsets are relative to the start of the
13070 next instruction. That is, the address of the offset, plus its
13071 size, since the offset is always the last part of the insn. */
13072
13073 long
13074 md_pcrel_from (fixS *fixP)
13075 {
13076 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
13077 }
13078
13079 #ifndef I386COFF
13080
13081 static void
13082 s_bss (int ignore ATTRIBUTE_UNUSED)
13083 {
13084 int temp;
13085
13086 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13087 if (IS_ELF)
13088 obj_elf_section_change_hook ();
13089 #endif
13090 temp = get_absolute_expression ();
13091 subseg_set (bss_section, (subsegT) temp);
13092 demand_empty_rest_of_line ();
13093 }
13094
13095 #endif
13096
13097 /* Remember constant directive. */
13098
13099 void
13100 i386_cons_align (int ignore ATTRIBUTE_UNUSED)
13101 {
13102 if (last_insn.kind != last_insn_directive
13103 && (bfd_section_flags (now_seg) & SEC_CODE))
13104 {
13105 last_insn.seg = now_seg;
13106 last_insn.kind = last_insn_directive;
13107 last_insn.name = "constant directive";
13108 last_insn.file = as_where (&last_insn.line);
13109 }
13110 }
13111
13112 void
13113 i386_validate_fix (fixS *fixp)
13114 {
13115 if (fixp->fx_subsy)
13116 {
13117 if (fixp->fx_subsy == GOT_symbol)
13118 {
13119 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
13120 {
13121 if (!object_64bit)
13122 abort ();
13123 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13124 if (fixp->fx_tcbit2)
13125 fixp->fx_r_type = (fixp->fx_tcbit
13126 ? BFD_RELOC_X86_64_REX_GOTPCRELX
13127 : BFD_RELOC_X86_64_GOTPCRELX);
13128 else
13129 #endif
13130 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
13131 }
13132 else
13133 {
13134 if (!object_64bit)
13135 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
13136 else
13137 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
13138 }
13139 fixp->fx_subsy = 0;
13140 }
13141 }
13142 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13143 else if (!object_64bit)
13144 {
13145 if (fixp->fx_r_type == BFD_RELOC_386_GOT32
13146 && fixp->fx_tcbit2)
13147 fixp->fx_r_type = BFD_RELOC_386_GOT32X;
13148 }
13149 #endif
13150 }
13151
13152 arelent *
13153 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
13154 {
13155 arelent *rel;
13156 bfd_reloc_code_real_type code;
13157
13158 switch (fixp->fx_r_type)
13159 {
13160 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13161 case BFD_RELOC_SIZE32:
13162 case BFD_RELOC_SIZE64:
13163 if (S_IS_DEFINED (fixp->fx_addsy)
13164 && !S_IS_EXTERNAL (fixp->fx_addsy))
13165 {
13166 /* Resolve size relocation against local symbol to size of
13167 the symbol plus addend. */
13168 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
13169 if (fixp->fx_r_type == BFD_RELOC_SIZE32
13170 && !fits_in_unsigned_long (value))
13171 as_bad_where (fixp->fx_file, fixp->fx_line,
13172 _("symbol size computation overflow"));
13173 fixp->fx_addsy = NULL;
13174 fixp->fx_subsy = NULL;
13175 md_apply_fix (fixp, (valueT *) &value, NULL);
13176 return NULL;
13177 }
13178 #endif
13179 /* Fall through. */
13180
13181 case BFD_RELOC_X86_64_PLT32:
13182 case BFD_RELOC_X86_64_GOT32:
13183 case BFD_RELOC_X86_64_GOTPCREL:
13184 case BFD_RELOC_X86_64_GOTPCRELX:
13185 case BFD_RELOC_X86_64_REX_GOTPCRELX:
13186 case BFD_RELOC_386_PLT32:
13187 case BFD_RELOC_386_GOT32:
13188 case BFD_RELOC_386_GOT32X:
13189 case BFD_RELOC_386_GOTOFF:
13190 case BFD_RELOC_386_GOTPC:
13191 case BFD_RELOC_386_TLS_GD:
13192 case BFD_RELOC_386_TLS_LDM:
13193 case BFD_RELOC_386_TLS_LDO_32:
13194 case BFD_RELOC_386_TLS_IE_32:
13195 case BFD_RELOC_386_TLS_IE:
13196 case BFD_RELOC_386_TLS_GOTIE:
13197 case BFD_RELOC_386_TLS_LE_32:
13198 case BFD_RELOC_386_TLS_LE:
13199 case BFD_RELOC_386_TLS_GOTDESC:
13200 case BFD_RELOC_386_TLS_DESC_CALL:
13201 case BFD_RELOC_X86_64_TLSGD:
13202 case BFD_RELOC_X86_64_TLSLD:
13203 case BFD_RELOC_X86_64_DTPOFF32:
13204 case BFD_RELOC_X86_64_DTPOFF64:
13205 case BFD_RELOC_X86_64_GOTTPOFF:
13206 case BFD_RELOC_X86_64_TPOFF32:
13207 case BFD_RELOC_X86_64_TPOFF64:
13208 case BFD_RELOC_X86_64_GOTOFF64:
13209 case BFD_RELOC_X86_64_GOTPC32:
13210 case BFD_RELOC_X86_64_GOT64:
13211 case BFD_RELOC_X86_64_GOTPCREL64:
13212 case BFD_RELOC_X86_64_GOTPC64:
13213 case BFD_RELOC_X86_64_GOTPLT64:
13214 case BFD_RELOC_X86_64_PLTOFF64:
13215 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
13216 case BFD_RELOC_X86_64_TLSDESC_CALL:
13217 case BFD_RELOC_RVA:
13218 case BFD_RELOC_VTABLE_ENTRY:
13219 case BFD_RELOC_VTABLE_INHERIT:
13220 #ifdef TE_PE
13221 case BFD_RELOC_32_SECREL:
13222 #endif
13223 code = fixp->fx_r_type;
13224 break;
13225 case BFD_RELOC_X86_64_32S:
13226 if (!fixp->fx_pcrel)
13227 {
13228 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
13229 code = fixp->fx_r_type;
13230 break;
13231 }
13232 /* Fall through. */
13233 default:
13234 if (fixp->fx_pcrel)
13235 {
13236 switch (fixp->fx_size)
13237 {
13238 default:
13239 as_bad_where (fixp->fx_file, fixp->fx_line,
13240 _("can not do %d byte pc-relative relocation"),
13241 fixp->fx_size);
13242 code = BFD_RELOC_32_PCREL;
13243 break;
13244 case 1: code = BFD_RELOC_8_PCREL; break;
13245 case 2: code = BFD_RELOC_16_PCREL; break;
13246 case 4: code = BFD_RELOC_32_PCREL; break;
13247 #ifdef BFD64
13248 case 8: code = BFD_RELOC_64_PCREL; break;
13249 #endif
13250 }
13251 }
13252 else
13253 {
13254 switch (fixp->fx_size)
13255 {
13256 default:
13257 as_bad_where (fixp->fx_file, fixp->fx_line,
13258 _("can not do %d byte relocation"),
13259 fixp->fx_size);
13260 code = BFD_RELOC_32;
13261 break;
13262 case 1: code = BFD_RELOC_8; break;
13263 case 2: code = BFD_RELOC_16; break;
13264 case 4: code = BFD_RELOC_32; break;
13265 #ifdef BFD64
13266 case 8: code = BFD_RELOC_64; break;
13267 #endif
13268 }
13269 }
13270 break;
13271 }
13272
13273 if ((code == BFD_RELOC_32
13274 || code == BFD_RELOC_32_PCREL
13275 || code == BFD_RELOC_X86_64_32S)
13276 && GOT_symbol
13277 && fixp->fx_addsy == GOT_symbol)
13278 {
13279 if (!object_64bit)
13280 code = BFD_RELOC_386_GOTPC;
13281 else
13282 code = BFD_RELOC_X86_64_GOTPC32;
13283 }
13284 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
13285 && GOT_symbol
13286 && fixp->fx_addsy == GOT_symbol)
13287 {
13288 code = BFD_RELOC_X86_64_GOTPC64;
13289 }
13290
13291 rel = XNEW (arelent);
13292 rel->sym_ptr_ptr = XNEW (asymbol *);
13293 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
13294
13295 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
13296
13297 if (!use_rela_relocations)
13298 {
13299 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
13300 vtable entry to be used in the relocation's section offset. */
13301 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
13302 rel->address = fixp->fx_offset;
13303 #if defined (OBJ_COFF) && defined (TE_PE)
13304 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
13305 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
13306 else
13307 #endif
13308 rel->addend = 0;
13309 }
13310 /* Use the rela in 64bit mode. */
13311 else
13312 {
13313 if (disallow_64bit_reloc)
13314 switch (code)
13315 {
13316 case BFD_RELOC_X86_64_DTPOFF64:
13317 case BFD_RELOC_X86_64_TPOFF64:
13318 case BFD_RELOC_64_PCREL:
13319 case BFD_RELOC_X86_64_GOTOFF64:
13320 case BFD_RELOC_X86_64_GOT64:
13321 case BFD_RELOC_X86_64_GOTPCREL64:
13322 case BFD_RELOC_X86_64_GOTPC64:
13323 case BFD_RELOC_X86_64_GOTPLT64:
13324 case BFD_RELOC_X86_64_PLTOFF64:
13325 as_bad_where (fixp->fx_file, fixp->fx_line,
13326 _("cannot represent relocation type %s in x32 mode"),
13327 bfd_get_reloc_code_name (code));
13328 break;
13329 default:
13330 break;
13331 }
13332
13333 if (!fixp->fx_pcrel)
13334 rel->addend = fixp->fx_offset;
13335 else
13336 switch (code)
13337 {
13338 case BFD_RELOC_X86_64_PLT32:
13339 case BFD_RELOC_X86_64_GOT32:
13340 case BFD_RELOC_X86_64_GOTPCREL:
13341 case BFD_RELOC_X86_64_GOTPCRELX:
13342 case BFD_RELOC_X86_64_REX_GOTPCRELX:
13343 case BFD_RELOC_X86_64_TLSGD:
13344 case BFD_RELOC_X86_64_TLSLD:
13345 case BFD_RELOC_X86_64_GOTTPOFF:
13346 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
13347 case BFD_RELOC_X86_64_TLSDESC_CALL:
13348 rel->addend = fixp->fx_offset - fixp->fx_size;
13349 break;
13350 default:
13351 rel->addend = (section->vma
13352 - fixp->fx_size
13353 + fixp->fx_addnumber
13354 + md_pcrel_from (fixp));
13355 break;
13356 }
13357 }
13358
13359 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
13360 if (rel->howto == NULL)
13361 {
13362 as_bad_where (fixp->fx_file, fixp->fx_line,
13363 _("cannot represent relocation type %s"),
13364 bfd_get_reloc_code_name (code));
13365 /* Set howto to a garbage value so that we can keep going. */
13366 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
13367 gas_assert (rel->howto != NULL);
13368 }
13369
13370 return rel;
13371 }
13372
13373 #include "tc-i386-intel.c"
13374
13375 void
13376 tc_x86_parse_to_dw2regnum (expressionS *exp)
13377 {
13378 int saved_naked_reg;
13379 char saved_register_dot;
13380
13381 saved_naked_reg = allow_naked_reg;
13382 allow_naked_reg = 1;
13383 saved_register_dot = register_chars['.'];
13384 register_chars['.'] = '.';
13385 allow_pseudo_reg = 1;
13386 expression_and_evaluate (exp);
13387 allow_pseudo_reg = 0;
13388 register_chars['.'] = saved_register_dot;
13389 allow_naked_reg = saved_naked_reg;
13390
13391 if (exp->X_op == O_register && exp->X_add_number >= 0)
13392 {
13393 if ((addressT) exp->X_add_number < i386_regtab_size)
13394 {
13395 exp->X_op = O_constant;
13396 exp->X_add_number = i386_regtab[exp->X_add_number]
13397 .dw2_regnum[flag_code >> 1];
13398 }
13399 else
13400 exp->X_op = O_illegal;
13401 }
13402 }
13403
13404 void
13405 tc_x86_frame_initial_instructions (void)
13406 {
13407 static unsigned int sp_regno[2];
13408
13409 if (!sp_regno[flag_code >> 1])
13410 {
13411 char *saved_input = input_line_pointer;
13412 char sp[][4] = {"esp", "rsp"};
13413 expressionS exp;
13414
13415 input_line_pointer = sp[flag_code >> 1];
13416 tc_x86_parse_to_dw2regnum (&exp);
13417 gas_assert (exp.X_op == O_constant);
13418 sp_regno[flag_code >> 1] = exp.X_add_number;
13419 input_line_pointer = saved_input;
13420 }
13421
13422 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
13423 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
13424 }
13425
13426 int
13427 x86_dwarf2_addr_size (void)
13428 {
13429 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
13430 if (x86_elf_abi == X86_64_X32_ABI)
13431 return 4;
13432 #endif
13433 return bfd_arch_bits_per_address (stdoutput) / 8;
13434 }
13435
13436 int
13437 i386_elf_section_type (const char *str, size_t len)
13438 {
13439 if (flag_code == CODE_64BIT
13440 && len == sizeof ("unwind") - 1
13441 && strncmp (str, "unwind", 6) == 0)
13442 return SHT_X86_64_UNWIND;
13443
13444 return -1;
13445 }
13446
13447 #ifdef TE_SOLARIS
13448 void
13449 i386_solaris_fix_up_eh_frame (segT sec)
13450 {
13451 if (flag_code == CODE_64BIT)
13452 elf_section_type (sec) = SHT_X86_64_UNWIND;
13453 }
13454 #endif
13455
13456 #ifdef TE_PE
13457 void
13458 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
13459 {
13460 expressionS exp;
13461
13462 exp.X_op = O_secrel;
13463 exp.X_add_symbol = symbol;
13464 exp.X_add_number = 0;
13465 emit_expr (&exp, size);
13466 }
13467 #endif
13468
13469 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13470 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
13471
13472 bfd_vma
13473 x86_64_section_letter (int letter, const char **ptr_msg)
13474 {
13475 if (flag_code == CODE_64BIT)
13476 {
13477 if (letter == 'l')
13478 return SHF_X86_64_LARGE;
13479
13480 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
13481 }
13482 else
13483 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
13484 return -1;
13485 }
13486
13487 bfd_vma
13488 x86_64_section_word (char *str, size_t len)
13489 {
13490 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
13491 return SHF_X86_64_LARGE;
13492
13493 return -1;
13494 }
13495
13496 static void
13497 handle_large_common (int small ATTRIBUTE_UNUSED)
13498 {
13499 if (flag_code != CODE_64BIT)
13500 {
13501 s_comm_internal (0, elf_common_parse);
13502 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
13503 }
13504 else
13505 {
13506 static segT lbss_section;
13507 asection *saved_com_section_ptr = elf_com_section_ptr;
13508 asection *saved_bss_section = bss_section;
13509
13510 if (lbss_section == NULL)
13511 {
13512 flagword applicable;
13513 segT seg = now_seg;
13514 subsegT subseg = now_subseg;
13515
13516 /* The .lbss section is for local .largecomm symbols. */
13517 lbss_section = subseg_new (".lbss", 0);
13518 applicable = bfd_applicable_section_flags (stdoutput);
13519 bfd_set_section_flags (lbss_section, applicable & SEC_ALLOC);
13520 seg_info (lbss_section)->bss = 1;
13521
13522 subseg_set (seg, subseg);
13523 }
13524
13525 elf_com_section_ptr = &_bfd_elf_large_com_section;
13526 bss_section = lbss_section;
13527
13528 s_comm_internal (0, elf_common_parse);
13529
13530 elf_com_section_ptr = saved_com_section_ptr;
13531 bss_section = saved_bss_section;
13532 }
13533 }
13534 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.444263 seconds and 3 git commands to generate.