x86: Rename match_reg_size to match_operand_size
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2018 Free Software Foundation, Inc.
3
4 This file is part of GAS, the GNU Assembler.
5
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
19 02110-1301, USA. */
20
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
27
28 #include "as.h"
29 #include "safe-ctype.h"
30 #include "subsegs.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
35
36 #ifndef REGISTER_WARNINGS
37 #define REGISTER_WARNINGS 1
38 #endif
39
40 #ifndef INFER_ADDR_PREFIX
41 #define INFER_ADDR_PREFIX 1
42 #endif
43
44 #ifndef DEFAULT_ARCH
45 #define DEFAULT_ARCH "i386"
46 #endif
47
48 #ifndef INLINE
49 #if __GNUC__ >= 2
50 #define INLINE __inline__
51 #else
52 #define INLINE
53 #endif
54 #endif
55
56 /* Prefixes will be emitted in the order defined below.
57 WAIT_PREFIX must be the first prefix since FWAIT is really is an
58 instruction, and so must come before any prefixes.
59 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
60 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
61 #define WAIT_PREFIX 0
62 #define SEG_PREFIX 1
63 #define ADDR_PREFIX 2
64 #define DATA_PREFIX 3
65 #define REP_PREFIX 4
66 #define HLE_PREFIX REP_PREFIX
67 #define BND_PREFIX REP_PREFIX
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 /* Intel Syntax. Use a non-ascii letter since since it never appears
85 in instructions. */
86 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
87
88 #define END_OF_INSN '\0'
89
90 /*
91 'templates' is for grouping together 'template' structures for opcodes
92 of the same name. This is only used for storing the insns in the grand
93 ole hash table of insns.
94 The templates themselves start at START and range up to (but not including)
95 END.
96 */
97 typedef struct
98 {
99 const insn_template *start;
100 const insn_template *end;
101 }
102 templates;
103
104 /* 386 operand encoding bytes: see 386 book for details of this. */
105 typedef struct
106 {
107 unsigned int regmem; /* codes register or memory operand */
108 unsigned int reg; /* codes register operand (or extended opcode) */
109 unsigned int mode; /* how to interpret regmem & reg */
110 }
111 modrm_byte;
112
113 /* x86-64 extension prefix. */
114 typedef int rex_byte;
115
116 /* 386 opcode byte to code indirect addressing. */
117 typedef struct
118 {
119 unsigned base;
120 unsigned index;
121 unsigned scale;
122 }
123 sib_byte;
124
125 /* x86 arch names, types and features */
126 typedef struct
127 {
128 const char *name; /* arch name */
129 unsigned int len; /* arch string length */
130 enum processor_type type; /* arch type */
131 i386_cpu_flags flags; /* cpu feature flags */
132 unsigned int skip; /* show_arch should skip this. */
133 }
134 arch_entry;
135
136 /* Used to turn off indicated flags. */
137 typedef struct
138 {
139 const char *name; /* arch name */
140 unsigned int len; /* arch string length */
141 i386_cpu_flags flags; /* cpu feature flags */
142 }
143 noarch_entry;
144
145 static void update_code_flag (int, int);
146 static void set_code_flag (int);
147 static void set_16bit_gcc_code_flag (int);
148 static void set_intel_syntax (int);
149 static void set_intel_mnemonic (int);
150 static void set_allow_index_reg (int);
151 static void set_check (int);
152 static void set_cpu_arch (int);
153 #ifdef TE_PE
154 static void pe_directive_secrel (int);
155 #endif
156 static void signed_cons (int);
157 static char *output_invalid (int c);
158 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
159 const char *);
160 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
161 const char *);
162 static int i386_att_operand (char *);
163 static int i386_intel_operand (char *, int);
164 static int i386_intel_simplify (expressionS *);
165 static int i386_intel_parse_name (const char *, expressionS *);
166 static const reg_entry *parse_register (char *, char **);
167 static char *parse_insn (char *, char *);
168 static char *parse_operands (char *, const char *);
169 static void swap_operands (void);
170 static void swap_2_operands (int, int);
171 static void optimize_imm (void);
172 static void optimize_disp (void);
173 static const insn_template *match_template (char);
174 static int check_string (void);
175 static int process_suffix (void);
176 static int check_byte_reg (void);
177 static int check_long_reg (void);
178 static int check_qword_reg (void);
179 static int check_word_reg (void);
180 static int finalize_imm (void);
181 static int process_operands (void);
182 static const seg_entry *build_modrm_byte (void);
183 static void output_insn (void);
184 static void output_imm (fragS *, offsetT);
185 static void output_disp (fragS *, offsetT);
186 #ifndef I386COFF
187 static void s_bss (int);
188 #endif
189 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
190 static void handle_large_common (int small ATTRIBUTE_UNUSED);
191 #endif
192
193 static const char *default_arch = DEFAULT_ARCH;
194
195 /* This struct describes rounding control and SAE in the instruction. */
196 struct RC_Operation
197 {
198 enum rc_type
199 {
200 rne = 0,
201 rd,
202 ru,
203 rz,
204 saeonly
205 } type;
206 int operand;
207 };
208
209 static struct RC_Operation rc_op;
210
211 /* The struct describes masking, applied to OPERAND in the instruction.
212 MASK is a pointer to the corresponding mask register. ZEROING tells
213 whether merging or zeroing mask is used. */
214 struct Mask_Operation
215 {
216 const reg_entry *mask;
217 unsigned int zeroing;
218 /* The operand where this operation is associated. */
219 int operand;
220 };
221
222 static struct Mask_Operation mask_op;
223
224 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
225 broadcast factor. */
226 struct Broadcast_Operation
227 {
228 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
229 int type;
230
231 /* Index of broadcasted operand. */
232 int operand;
233 };
234
235 static struct Broadcast_Operation broadcast_op;
236
237 /* VEX prefix. */
238 typedef struct
239 {
240 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
241 unsigned char bytes[4];
242 unsigned int length;
243 /* Destination or source register specifier. */
244 const reg_entry *register_specifier;
245 } vex_prefix;
246
247 /* 'md_assemble ()' gathers together information and puts it into a
248 i386_insn. */
249
250 union i386_op
251 {
252 expressionS *disps;
253 expressionS *imms;
254 const reg_entry *regs;
255 };
256
257 enum i386_error
258 {
259 operand_size_mismatch,
260 operand_type_mismatch,
261 register_type_mismatch,
262 number_of_operands_mismatch,
263 invalid_instruction_suffix,
264 bad_imm4,
265 unsupported_with_intel_mnemonic,
266 unsupported_syntax,
267 unsupported,
268 invalid_vsib_address,
269 invalid_vector_register_set,
270 unsupported_vector_index_register,
271 unsupported_broadcast,
272 broadcast_not_on_src_operand,
273 broadcast_needed,
274 unsupported_masking,
275 mask_not_on_destination,
276 no_default_mask,
277 unsupported_rc_sae,
278 rc_sae_operand_not_last_imm,
279 invalid_register_operand,
280 };
281
282 struct _i386_insn
283 {
284 /* TM holds the template for the insn were currently assembling. */
285 insn_template tm;
286
287 /* SUFFIX holds the instruction size suffix for byte, word, dword
288 or qword, if given. */
289 char suffix;
290
291 /* OPERANDS gives the number of given operands. */
292 unsigned int operands;
293
294 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
295 of given register, displacement, memory operands and immediate
296 operands. */
297 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
298
299 /* TYPES [i] is the type (see above #defines) which tells us how to
300 use OP[i] for the corresponding operand. */
301 i386_operand_type types[MAX_OPERANDS];
302
303 /* Displacement expression, immediate expression, or register for each
304 operand. */
305 union i386_op op[MAX_OPERANDS];
306
307 /* Flags for operands. */
308 unsigned int flags[MAX_OPERANDS];
309 #define Operand_PCrel 1
310
311 /* Relocation type for operand */
312 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
313
314 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
315 the base index byte below. */
316 const reg_entry *base_reg;
317 const reg_entry *index_reg;
318 unsigned int log2_scale_factor;
319
320 /* SEG gives the seg_entries of this insn. They are zero unless
321 explicit segment overrides are given. */
322 const seg_entry *seg[2];
323
324 /* Copied first memory operand string, for re-checking. */
325 char *memop1_string;
326
327 /* PREFIX holds all the given prefix opcodes (usually null).
328 PREFIXES is the number of prefix opcodes. */
329 unsigned int prefixes;
330 unsigned char prefix[MAX_PREFIXES];
331
332 /* RM and SIB are the modrm byte and the sib byte where the
333 addressing modes of this insn are encoded. */
334 modrm_byte rm;
335 rex_byte rex;
336 rex_byte vrex;
337 sib_byte sib;
338 vex_prefix vex;
339
340 /* Masking attributes. */
341 struct Mask_Operation *mask;
342
343 /* Rounding control and SAE attributes. */
344 struct RC_Operation *rounding;
345
346 /* Broadcasting attributes. */
347 struct Broadcast_Operation *broadcast;
348
349 /* Compressed disp8*N attribute. */
350 unsigned int memshift;
351
352 /* Prefer load or store in encoding. */
353 enum
354 {
355 dir_encoding_default = 0,
356 dir_encoding_load,
357 dir_encoding_store
358 } dir_encoding;
359
360 /* Prefer 8bit or 32bit displacement in encoding. */
361 enum
362 {
363 disp_encoding_default = 0,
364 disp_encoding_8bit,
365 disp_encoding_32bit
366 } disp_encoding;
367
368 /* Prefer the REX byte in encoding. */
369 bfd_boolean rex_encoding;
370
371 /* Disable instruction size optimization. */
372 bfd_boolean no_optimize;
373
374 /* How to encode vector instructions. */
375 enum
376 {
377 vex_encoding_default = 0,
378 vex_encoding_vex2,
379 vex_encoding_vex3,
380 vex_encoding_evex
381 } vec_encoding;
382
383 /* REP prefix. */
384 const char *rep_prefix;
385
386 /* HLE prefix. */
387 const char *hle_prefix;
388
389 /* Have BND prefix. */
390 const char *bnd_prefix;
391
392 /* Have NOTRACK prefix. */
393 const char *notrack_prefix;
394
395 /* Error message. */
396 enum i386_error error;
397 };
398
399 typedef struct _i386_insn i386_insn;
400
401 /* Link RC type with corresponding string, that'll be looked for in
402 asm. */
403 struct RC_name
404 {
405 enum rc_type type;
406 const char *name;
407 unsigned int len;
408 };
409
410 static const struct RC_name RC_NamesTable[] =
411 {
412 { rne, STRING_COMMA_LEN ("rn-sae") },
413 { rd, STRING_COMMA_LEN ("rd-sae") },
414 { ru, STRING_COMMA_LEN ("ru-sae") },
415 { rz, STRING_COMMA_LEN ("rz-sae") },
416 { saeonly, STRING_COMMA_LEN ("sae") },
417 };
418
419 /* List of chars besides those in app.c:symbol_chars that can start an
420 operand. Used to prevent the scrubber eating vital white-space. */
421 const char extra_symbol_chars[] = "*%-([{}"
422 #ifdef LEX_AT
423 "@"
424 #endif
425 #ifdef LEX_QM
426 "?"
427 #endif
428 ;
429
430 #if (defined (TE_I386AIX) \
431 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
432 && !defined (TE_GNU) \
433 && !defined (TE_LINUX) \
434 && !defined (TE_NACL) \
435 && !defined (TE_FreeBSD) \
436 && !defined (TE_DragonFly) \
437 && !defined (TE_NetBSD)))
438 /* This array holds the chars that always start a comment. If the
439 pre-processor is disabled, these aren't very useful. The option
440 --divide will remove '/' from this list. */
441 const char *i386_comment_chars = "#/";
442 #define SVR4_COMMENT_CHARS 1
443 #define PREFIX_SEPARATOR '\\'
444
445 #else
446 const char *i386_comment_chars = "#";
447 #define PREFIX_SEPARATOR '/'
448 #endif
449
450 /* This array holds the chars that only start a comment at the beginning of
451 a line. If the line seems to have the form '# 123 filename'
452 .line and .file directives will appear in the pre-processed output.
453 Note that input_file.c hand checks for '#' at the beginning of the
454 first line of the input file. This is because the compiler outputs
455 #NO_APP at the beginning of its output.
456 Also note that comments started like this one will always work if
457 '/' isn't otherwise defined. */
458 const char line_comment_chars[] = "#/";
459
460 const char line_separator_chars[] = ";";
461
462 /* Chars that can be used to separate mant from exp in floating point
463 nums. */
464 const char EXP_CHARS[] = "eE";
465
466 /* Chars that mean this number is a floating point constant
467 As in 0f12.456
468 or 0d1.2345e12. */
469 const char FLT_CHARS[] = "fFdDxX";
470
471 /* Tables for lexical analysis. */
472 static char mnemonic_chars[256];
473 static char register_chars[256];
474 static char operand_chars[256];
475 static char identifier_chars[256];
476 static char digit_chars[256];
477
478 /* Lexical macros. */
479 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
480 #define is_operand_char(x) (operand_chars[(unsigned char) x])
481 #define is_register_char(x) (register_chars[(unsigned char) x])
482 #define is_space_char(x) ((x) == ' ')
483 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
484 #define is_digit_char(x) (digit_chars[(unsigned char) x])
485
486 /* All non-digit non-letter characters that may occur in an operand. */
487 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
488
489 /* md_assemble() always leaves the strings it's passed unaltered. To
490 effect this we maintain a stack of saved characters that we've smashed
491 with '\0's (indicating end of strings for various sub-fields of the
492 assembler instruction). */
493 static char save_stack[32];
494 static char *save_stack_p;
495 #define END_STRING_AND_SAVE(s) \
496 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
497 #define RESTORE_END_STRING(s) \
498 do { *(s) = *--save_stack_p; } while (0)
499
500 /* The instruction we're assembling. */
501 static i386_insn i;
502
503 /* Possible templates for current insn. */
504 static const templates *current_templates;
505
506 /* Per instruction expressionS buffers: max displacements & immediates. */
507 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
508 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
509
510 /* Current operand we are working on. */
511 static int this_operand = -1;
512
513 /* We support four different modes. FLAG_CODE variable is used to distinguish
514 these. */
515
516 enum flag_code {
517 CODE_32BIT,
518 CODE_16BIT,
519 CODE_64BIT };
520
521 static enum flag_code flag_code;
522 static unsigned int object_64bit;
523 static unsigned int disallow_64bit_reloc;
524 static int use_rela_relocations = 0;
525
526 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
527 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
528 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
529
530 /* The ELF ABI to use. */
531 enum x86_elf_abi
532 {
533 I386_ABI,
534 X86_64_ABI,
535 X86_64_X32_ABI
536 };
537
538 static enum x86_elf_abi x86_elf_abi = I386_ABI;
539 #endif
540
541 #if defined (TE_PE) || defined (TE_PEP)
542 /* Use big object file format. */
543 static int use_big_obj = 0;
544 #endif
545
546 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
547 /* 1 if generating code for a shared library. */
548 static int shared = 0;
549 #endif
550
551 /* 1 for intel syntax,
552 0 if att syntax. */
553 static int intel_syntax = 0;
554
555 /* 1 for Intel64 ISA,
556 0 if AMD64 ISA. */
557 static int intel64;
558
559 /* 1 for intel mnemonic,
560 0 if att mnemonic. */
561 static int intel_mnemonic = !SYSV386_COMPAT;
562
563 /* 1 if pseudo registers are permitted. */
564 static int allow_pseudo_reg = 0;
565
566 /* 1 if register prefix % not required. */
567 static int allow_naked_reg = 0;
568
569 /* 1 if the assembler should add BND prefix for all control-transferring
570 instructions supporting it, even if this prefix wasn't specified
571 explicitly. */
572 static int add_bnd_prefix = 0;
573
574 /* 1 if pseudo index register, eiz/riz, is allowed . */
575 static int allow_index_reg = 0;
576
577 /* 1 if the assembler should ignore LOCK prefix, even if it was
578 specified explicitly. */
579 static int omit_lock_prefix = 0;
580
581 /* 1 if the assembler should encode lfence, mfence, and sfence as
582 "lock addl $0, (%{re}sp)". */
583 static int avoid_fence = 0;
584
585 /* 1 if the assembler should generate relax relocations. */
586
587 static int generate_relax_relocations
588 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS;
589
590 static enum check_kind
591 {
592 check_none = 0,
593 check_warning,
594 check_error
595 }
596 sse_check, operand_check = check_warning;
597
598 /* Optimization:
599 1. Clear the REX_W bit with register operand if possible.
600 2. Above plus use 128bit vector instruction to clear the full vector
601 register.
602 */
603 static int optimize = 0;
604
605 /* Optimization:
606 1. Clear the REX_W bit with register operand if possible.
607 2. Above plus use 128bit vector instruction to clear the full vector
608 register.
609 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
610 "testb $imm7,%r8".
611 */
612 static int optimize_for_space = 0;
613
614 /* Register prefix used for error message. */
615 static const char *register_prefix = "%";
616
617 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
618 leave, push, and pop instructions so that gcc has the same stack
619 frame as in 32 bit mode. */
620 static char stackop_size = '\0';
621
622 /* Non-zero to optimize code alignment. */
623 int optimize_align_code = 1;
624
625 /* Non-zero to quieten some warnings. */
626 static int quiet_warnings = 0;
627
628 /* CPU name. */
629 static const char *cpu_arch_name = NULL;
630 static char *cpu_sub_arch_name = NULL;
631
632 /* CPU feature flags. */
633 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
634
635 /* If we have selected a cpu we are generating instructions for. */
636 static int cpu_arch_tune_set = 0;
637
638 /* Cpu we are generating instructions for. */
639 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
640
641 /* CPU feature flags of cpu we are generating instructions for. */
642 static i386_cpu_flags cpu_arch_tune_flags;
643
644 /* CPU instruction set architecture used. */
645 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
646
647 /* CPU feature flags of instruction set architecture used. */
648 i386_cpu_flags cpu_arch_isa_flags;
649
650 /* If set, conditional jumps are not automatically promoted to handle
651 larger than a byte offset. */
652 static unsigned int no_cond_jump_promotion = 0;
653
654 /* Encode SSE instructions with VEX prefix. */
655 static unsigned int sse2avx;
656
657 /* Encode scalar AVX instructions with specific vector length. */
658 static enum
659 {
660 vex128 = 0,
661 vex256
662 } avxscalar;
663
664 /* Encode scalar EVEX LIG instructions with specific vector length. */
665 static enum
666 {
667 evexl128 = 0,
668 evexl256,
669 evexl512
670 } evexlig;
671
672 /* Encode EVEX WIG instructions with specific evex.w. */
673 static enum
674 {
675 evexw0 = 0,
676 evexw1
677 } evexwig;
678
679 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
680 static enum rc_type evexrcig = rne;
681
682 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
683 static symbolS *GOT_symbol;
684
685 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
686 unsigned int x86_dwarf2_return_column;
687
688 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
689 int x86_cie_data_alignment;
690
691 /* Interface to relax_segment.
692 There are 3 major relax states for 386 jump insns because the
693 different types of jumps add different sizes to frags when we're
694 figuring out what sort of jump to choose to reach a given label. */
695
696 /* Types. */
697 #define UNCOND_JUMP 0
698 #define COND_JUMP 1
699 #define COND_JUMP86 2
700
701 /* Sizes. */
702 #define CODE16 1
703 #define SMALL 0
704 #define SMALL16 (SMALL | CODE16)
705 #define BIG 2
706 #define BIG16 (BIG | CODE16)
707
708 #ifndef INLINE
709 #ifdef __GNUC__
710 #define INLINE __inline__
711 #else
712 #define INLINE
713 #endif
714 #endif
715
716 #define ENCODE_RELAX_STATE(type, size) \
717 ((relax_substateT) (((type) << 2) | (size)))
718 #define TYPE_FROM_RELAX_STATE(s) \
719 ((s) >> 2)
720 #define DISP_SIZE_FROM_RELAX_STATE(s) \
721 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
722
723 /* This table is used by relax_frag to promote short jumps to long
724 ones where necessary. SMALL (short) jumps may be promoted to BIG
725 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
726 don't allow a short jump in a 32 bit code segment to be promoted to
727 a 16 bit offset jump because it's slower (requires data size
728 prefix), and doesn't work, unless the destination is in the bottom
729 64k of the code segment (The top 16 bits of eip are zeroed). */
730
731 const relax_typeS md_relax_table[] =
732 {
733 /* The fields are:
734 1) most positive reach of this state,
735 2) most negative reach of this state,
736 3) how many bytes this mode will have in the variable part of the frag
737 4) which index into the table to try if we can't fit into this one. */
738
739 /* UNCOND_JUMP states. */
740 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
741 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
742 /* dword jmp adds 4 bytes to frag:
743 0 extra opcode bytes, 4 displacement bytes. */
744 {0, 0, 4, 0},
745 /* word jmp adds 2 byte2 to frag:
746 0 extra opcode bytes, 2 displacement bytes. */
747 {0, 0, 2, 0},
748
749 /* COND_JUMP states. */
750 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
751 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
752 /* dword conditionals adds 5 bytes to frag:
753 1 extra opcode byte, 4 displacement bytes. */
754 {0, 0, 5, 0},
755 /* word conditionals add 3 bytes to frag:
756 1 extra opcode byte, 2 displacement bytes. */
757 {0, 0, 3, 0},
758
759 /* COND_JUMP86 states. */
760 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
761 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
762 /* dword conditionals adds 5 bytes to frag:
763 1 extra opcode byte, 4 displacement bytes. */
764 {0, 0, 5, 0},
765 /* word conditionals add 4 bytes to frag:
766 1 displacement byte and a 3 byte long branch insn. */
767 {0, 0, 4, 0}
768 };
769
770 static const arch_entry cpu_arch[] =
771 {
772 /* Do not replace the first two entries - i386_target_format()
773 relies on them being there in this order. */
774 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
775 CPU_GENERIC32_FLAGS, 0 },
776 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
777 CPU_GENERIC64_FLAGS, 0 },
778 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
779 CPU_NONE_FLAGS, 0 },
780 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
781 CPU_I186_FLAGS, 0 },
782 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
783 CPU_I286_FLAGS, 0 },
784 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
785 CPU_I386_FLAGS, 0 },
786 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
787 CPU_I486_FLAGS, 0 },
788 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
789 CPU_I586_FLAGS, 0 },
790 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
791 CPU_I686_FLAGS, 0 },
792 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
793 CPU_I586_FLAGS, 0 },
794 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
795 CPU_PENTIUMPRO_FLAGS, 0 },
796 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
797 CPU_P2_FLAGS, 0 },
798 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
799 CPU_P3_FLAGS, 0 },
800 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
801 CPU_P4_FLAGS, 0 },
802 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
803 CPU_CORE_FLAGS, 0 },
804 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
805 CPU_NOCONA_FLAGS, 0 },
806 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
807 CPU_CORE_FLAGS, 1 },
808 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
809 CPU_CORE_FLAGS, 0 },
810 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
811 CPU_CORE2_FLAGS, 1 },
812 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
813 CPU_CORE2_FLAGS, 0 },
814 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
815 CPU_COREI7_FLAGS, 0 },
816 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
817 CPU_L1OM_FLAGS, 0 },
818 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
819 CPU_K1OM_FLAGS, 0 },
820 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU,
821 CPU_IAMCU_FLAGS, 0 },
822 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
823 CPU_K6_FLAGS, 0 },
824 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
825 CPU_K6_2_FLAGS, 0 },
826 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
827 CPU_ATHLON_FLAGS, 0 },
828 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
829 CPU_K8_FLAGS, 1 },
830 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
831 CPU_K8_FLAGS, 0 },
832 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
833 CPU_K8_FLAGS, 0 },
834 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
835 CPU_AMDFAM10_FLAGS, 0 },
836 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
837 CPU_BDVER1_FLAGS, 0 },
838 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
839 CPU_BDVER2_FLAGS, 0 },
840 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
841 CPU_BDVER3_FLAGS, 0 },
842 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
843 CPU_BDVER4_FLAGS, 0 },
844 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER,
845 CPU_ZNVER1_FLAGS, 0 },
846 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER,
847 CPU_ZNVER2_FLAGS, 0 },
848 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
849 CPU_BTVER1_FLAGS, 0 },
850 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
851 CPU_BTVER2_FLAGS, 0 },
852 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
853 CPU_8087_FLAGS, 0 },
854 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
855 CPU_287_FLAGS, 0 },
856 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
857 CPU_387_FLAGS, 0 },
858 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN,
859 CPU_687_FLAGS, 0 },
860 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
861 CPU_MMX_FLAGS, 0 },
862 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
863 CPU_SSE_FLAGS, 0 },
864 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
865 CPU_SSE2_FLAGS, 0 },
866 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
867 CPU_SSE3_FLAGS, 0 },
868 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
869 CPU_SSSE3_FLAGS, 0 },
870 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
871 CPU_SSE4_1_FLAGS, 0 },
872 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
873 CPU_SSE4_2_FLAGS, 0 },
874 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
875 CPU_SSE4_2_FLAGS, 0 },
876 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
877 CPU_AVX_FLAGS, 0 },
878 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
879 CPU_AVX2_FLAGS, 0 },
880 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
881 CPU_AVX512F_FLAGS, 0 },
882 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
883 CPU_AVX512CD_FLAGS, 0 },
884 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
885 CPU_AVX512ER_FLAGS, 0 },
886 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
887 CPU_AVX512PF_FLAGS, 0 },
888 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
889 CPU_AVX512DQ_FLAGS, 0 },
890 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
891 CPU_AVX512BW_FLAGS, 0 },
892 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
893 CPU_AVX512VL_FLAGS, 0 },
894 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
895 CPU_VMX_FLAGS, 0 },
896 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
897 CPU_VMFUNC_FLAGS, 0 },
898 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
899 CPU_SMX_FLAGS, 0 },
900 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
901 CPU_XSAVE_FLAGS, 0 },
902 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
903 CPU_XSAVEOPT_FLAGS, 0 },
904 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
905 CPU_XSAVEC_FLAGS, 0 },
906 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
907 CPU_XSAVES_FLAGS, 0 },
908 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
909 CPU_AES_FLAGS, 0 },
910 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
911 CPU_PCLMUL_FLAGS, 0 },
912 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
913 CPU_PCLMUL_FLAGS, 1 },
914 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
915 CPU_FSGSBASE_FLAGS, 0 },
916 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
917 CPU_RDRND_FLAGS, 0 },
918 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
919 CPU_F16C_FLAGS, 0 },
920 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
921 CPU_BMI2_FLAGS, 0 },
922 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
923 CPU_FMA_FLAGS, 0 },
924 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
925 CPU_FMA4_FLAGS, 0 },
926 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
927 CPU_XOP_FLAGS, 0 },
928 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
929 CPU_LWP_FLAGS, 0 },
930 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
931 CPU_MOVBE_FLAGS, 0 },
932 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
933 CPU_CX16_FLAGS, 0 },
934 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
935 CPU_EPT_FLAGS, 0 },
936 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
937 CPU_LZCNT_FLAGS, 0 },
938 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
939 CPU_HLE_FLAGS, 0 },
940 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
941 CPU_RTM_FLAGS, 0 },
942 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
943 CPU_INVPCID_FLAGS, 0 },
944 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
945 CPU_CLFLUSH_FLAGS, 0 },
946 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
947 CPU_NOP_FLAGS, 0 },
948 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
949 CPU_SYSCALL_FLAGS, 0 },
950 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
951 CPU_RDTSCP_FLAGS, 0 },
952 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
953 CPU_3DNOW_FLAGS, 0 },
954 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
955 CPU_3DNOWA_FLAGS, 0 },
956 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
957 CPU_PADLOCK_FLAGS, 0 },
958 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
959 CPU_SVME_FLAGS, 1 },
960 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
961 CPU_SVME_FLAGS, 0 },
962 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
963 CPU_SSE4A_FLAGS, 0 },
964 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
965 CPU_ABM_FLAGS, 0 },
966 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
967 CPU_BMI_FLAGS, 0 },
968 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
969 CPU_TBM_FLAGS, 0 },
970 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
971 CPU_ADX_FLAGS, 0 },
972 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
973 CPU_RDSEED_FLAGS, 0 },
974 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
975 CPU_PRFCHW_FLAGS, 0 },
976 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
977 CPU_SMAP_FLAGS, 0 },
978 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
979 CPU_MPX_FLAGS, 0 },
980 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
981 CPU_SHA_FLAGS, 0 },
982 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
983 CPU_CLFLUSHOPT_FLAGS, 0 },
984 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
985 CPU_PREFETCHWT1_FLAGS, 0 },
986 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
987 CPU_SE1_FLAGS, 0 },
988 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN,
989 CPU_CLWB_FLAGS, 0 },
990 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN,
991 CPU_AVX512IFMA_FLAGS, 0 },
992 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN,
993 CPU_AVX512VBMI_FLAGS, 0 },
994 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN,
995 CPU_AVX512_4FMAPS_FLAGS, 0 },
996 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN,
997 CPU_AVX512_4VNNIW_FLAGS, 0 },
998 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN,
999 CPU_AVX512_VPOPCNTDQ_FLAGS, 0 },
1000 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN,
1001 CPU_AVX512_VBMI2_FLAGS, 0 },
1002 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN,
1003 CPU_AVX512_VNNI_FLAGS, 0 },
1004 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN,
1005 CPU_AVX512_BITALG_FLAGS, 0 },
1006 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN,
1007 CPU_CLZERO_FLAGS, 0 },
1008 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN,
1009 CPU_MWAITX_FLAGS, 0 },
1010 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN,
1011 CPU_OSPKE_FLAGS, 0 },
1012 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN,
1013 CPU_RDPID_FLAGS, 0 },
1014 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN,
1015 CPU_PTWRITE_FLAGS, 0 },
1016 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN,
1017 CPU_IBT_FLAGS, 0 },
1018 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN,
1019 CPU_SHSTK_FLAGS, 0 },
1020 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN,
1021 CPU_GFNI_FLAGS, 0 },
1022 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN,
1023 CPU_VAES_FLAGS, 0 },
1024 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN,
1025 CPU_VPCLMULQDQ_FLAGS, 0 },
1026 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN,
1027 CPU_WBNOINVD_FLAGS, 0 },
1028 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN,
1029 CPU_PCONFIG_FLAGS, 0 },
1030 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN,
1031 CPU_WAITPKG_FLAGS, 0 },
1032 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN,
1033 CPU_CLDEMOTE_FLAGS, 0 },
1034 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN,
1035 CPU_MOVDIRI_FLAGS, 0 },
1036 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN,
1037 CPU_MOVDIR64B_FLAGS, 0 },
1038 };
1039
1040 static const noarch_entry cpu_noarch[] =
1041 {
1042 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS },
1043 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS },
1044 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS },
1045 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS },
1046 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS },
1047 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS },
1048 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS },
1049 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS },
1050 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS },
1051 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS },
1052 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS },
1053 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS },
1054 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS },
1055 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS },
1056 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS },
1057 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS },
1058 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS },
1059 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS },
1060 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS },
1061 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS },
1062 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS },
1063 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS },
1064 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS },
1065 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS },
1066 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS },
1067 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS },
1068 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS },
1069 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS },
1070 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS },
1071 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS },
1072 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS },
1073 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS },
1074 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS },
1075 };
1076
1077 #ifdef I386COFF
1078 /* Like s_lcomm_internal in gas/read.c but the alignment string
1079 is allowed to be optional. */
1080
1081 static symbolS *
1082 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
1083 {
1084 addressT align = 0;
1085
1086 SKIP_WHITESPACE ();
1087
1088 if (needs_align
1089 && *input_line_pointer == ',')
1090 {
1091 align = parse_align (needs_align - 1);
1092
1093 if (align == (addressT) -1)
1094 return NULL;
1095 }
1096 else
1097 {
1098 if (size >= 8)
1099 align = 3;
1100 else if (size >= 4)
1101 align = 2;
1102 else if (size >= 2)
1103 align = 1;
1104 else
1105 align = 0;
1106 }
1107
1108 bss_alloc (symbolP, size, align);
1109 return symbolP;
1110 }
1111
1112 static void
1113 pe_lcomm (int needs_align)
1114 {
1115 s_comm_internal (needs_align * 2, pe_lcomm_internal);
1116 }
1117 #endif
1118
1119 const pseudo_typeS md_pseudo_table[] =
1120 {
1121 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1122 {"align", s_align_bytes, 0},
1123 #else
1124 {"align", s_align_ptwo, 0},
1125 #endif
1126 {"arch", set_cpu_arch, 0},
1127 #ifndef I386COFF
1128 {"bss", s_bss, 0},
1129 #else
1130 {"lcomm", pe_lcomm, 1},
1131 #endif
1132 {"ffloat", float_cons, 'f'},
1133 {"dfloat", float_cons, 'd'},
1134 {"tfloat", float_cons, 'x'},
1135 {"value", cons, 2},
1136 {"slong", signed_cons, 4},
1137 {"noopt", s_ignore, 0},
1138 {"optim", s_ignore, 0},
1139 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
1140 {"code16", set_code_flag, CODE_16BIT},
1141 {"code32", set_code_flag, CODE_32BIT},
1142 #ifdef BFD64
1143 {"code64", set_code_flag, CODE_64BIT},
1144 #endif
1145 {"intel_syntax", set_intel_syntax, 1},
1146 {"att_syntax", set_intel_syntax, 0},
1147 {"intel_mnemonic", set_intel_mnemonic, 1},
1148 {"att_mnemonic", set_intel_mnemonic, 0},
1149 {"allow_index_reg", set_allow_index_reg, 1},
1150 {"disallow_index_reg", set_allow_index_reg, 0},
1151 {"sse_check", set_check, 0},
1152 {"operand_check", set_check, 1},
1153 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1154 {"largecomm", handle_large_common, 0},
1155 #else
1156 {"file", dwarf2_directive_file, 0},
1157 {"loc", dwarf2_directive_loc, 0},
1158 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
1159 #endif
1160 #ifdef TE_PE
1161 {"secrel32", pe_directive_secrel, 0},
1162 #endif
1163 {0, 0, 0}
1164 };
1165
1166 /* For interface with expression (). */
1167 extern char *input_line_pointer;
1168
1169 /* Hash table for instruction mnemonic lookup. */
1170 static struct hash_control *op_hash;
1171
1172 /* Hash table for register lookup. */
1173 static struct hash_control *reg_hash;
1174 \f
1175 /* Various efficient no-op patterns for aligning code labels.
1176 Note: Don't try to assemble the instructions in the comments.
1177 0L and 0w are not legal. */
1178 static const unsigned char f32_1[] =
1179 {0x90}; /* nop */
1180 static const unsigned char f32_2[] =
1181 {0x66,0x90}; /* xchg %ax,%ax */
1182 static const unsigned char f32_3[] =
1183 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1184 static const unsigned char f32_4[] =
1185 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1186 static const unsigned char f32_6[] =
1187 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1188 static const unsigned char f32_7[] =
1189 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1190 static const unsigned char f16_3[] =
1191 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1192 static const unsigned char f16_4[] =
1193 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1194 static const unsigned char jump_disp8[] =
1195 {0xeb}; /* jmp disp8 */
1196 static const unsigned char jump32_disp32[] =
1197 {0xe9}; /* jmp disp32 */
1198 static const unsigned char jump16_disp32[] =
1199 {0x66,0xe9}; /* jmp disp32 */
1200 /* 32-bit NOPs patterns. */
1201 static const unsigned char *const f32_patt[] = {
1202 f32_1, f32_2, f32_3, f32_4, NULL, f32_6, f32_7
1203 };
1204 /* 16-bit NOPs patterns. */
1205 static const unsigned char *const f16_patt[] = {
1206 f32_1, f32_2, f16_3, f16_4
1207 };
1208 /* nopl (%[re]ax) */
1209 static const unsigned char alt_3[] =
1210 {0x0f,0x1f,0x00};
1211 /* nopl 0(%[re]ax) */
1212 static const unsigned char alt_4[] =
1213 {0x0f,0x1f,0x40,0x00};
1214 /* nopl 0(%[re]ax,%[re]ax,1) */
1215 static const unsigned char alt_5[] =
1216 {0x0f,0x1f,0x44,0x00,0x00};
1217 /* nopw 0(%[re]ax,%[re]ax,1) */
1218 static const unsigned char alt_6[] =
1219 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1220 /* nopl 0L(%[re]ax) */
1221 static const unsigned char alt_7[] =
1222 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1223 /* nopl 0L(%[re]ax,%[re]ax,1) */
1224 static const unsigned char alt_8[] =
1225 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1226 /* nopw 0L(%[re]ax,%[re]ax,1) */
1227 static const unsigned char alt_9[] =
1228 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1229 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1230 static const unsigned char alt_10[] =
1231 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1232 /* data16 nopw %cs:0L(%eax,%eax,1) */
1233 static const unsigned char alt_11[] =
1234 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1235 /* 32-bit and 64-bit NOPs patterns. */
1236 static const unsigned char *const alt_patt[] = {
1237 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1238 alt_9, alt_10, alt_11
1239 };
1240
1241 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1242 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1243
1244 static void
1245 i386_output_nops (char *where, const unsigned char *const *patt,
1246 int count, int max_single_nop_size)
1247
1248 {
1249 /* Place the longer NOP first. */
1250 int last;
1251 int offset;
1252 const unsigned char *nops = patt[max_single_nop_size - 1];
1253
1254 /* Use the smaller one if the requsted one isn't available. */
1255 if (nops == NULL)
1256 {
1257 max_single_nop_size--;
1258 nops = patt[max_single_nop_size - 1];
1259 }
1260
1261 last = count % max_single_nop_size;
1262
1263 count -= last;
1264 for (offset = 0; offset < count; offset += max_single_nop_size)
1265 memcpy (where + offset, nops, max_single_nop_size);
1266
1267 if (last)
1268 {
1269 nops = patt[last - 1];
1270 if (nops == NULL)
1271 {
1272 /* Use the smaller one plus one-byte NOP if the needed one
1273 isn't available. */
1274 last--;
1275 nops = patt[last - 1];
1276 memcpy (where + offset, nops, last);
1277 where[offset + last] = *patt[0];
1278 }
1279 else
1280 memcpy (where + offset, nops, last);
1281 }
1282 }
1283
1284 static INLINE int
1285 fits_in_imm7 (offsetT num)
1286 {
1287 return (num & 0x7f) == num;
1288 }
1289
1290 static INLINE int
1291 fits_in_imm31 (offsetT num)
1292 {
1293 return (num & 0x7fffffff) == num;
1294 }
1295
1296 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1297 single NOP instruction LIMIT. */
1298
1299 void
1300 i386_generate_nops (fragS *fragP, char *where, offsetT count, int limit)
1301 {
1302 const unsigned char *const *patt = NULL;
1303 int max_single_nop_size;
1304 /* Maximum number of NOPs before switching to jump over NOPs. */
1305 int max_number_of_nops;
1306
1307 switch (fragP->fr_type)
1308 {
1309 case rs_fill_nop:
1310 case rs_align_code:
1311 break;
1312 default:
1313 return;
1314 }
1315
1316 /* We need to decide which NOP sequence to use for 32bit and
1317 64bit. When -mtune= is used:
1318
1319 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1320 PROCESSOR_GENERIC32, f32_patt will be used.
1321 2. For the rest, alt_patt will be used.
1322
1323 When -mtune= isn't used, alt_patt will be used if
1324 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1325 be used.
1326
1327 When -march= or .arch is used, we can't use anything beyond
1328 cpu_arch_isa_flags. */
1329
1330 if (flag_code == CODE_16BIT)
1331 {
1332 patt = f16_patt;
1333 max_single_nop_size = sizeof (f16_patt) / sizeof (f16_patt[0]);
1334 /* Limit number of NOPs to 2 in 16-bit mode. */
1335 max_number_of_nops = 2;
1336 }
1337 else
1338 {
1339 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1340 {
1341 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1342 switch (cpu_arch_tune)
1343 {
1344 case PROCESSOR_UNKNOWN:
1345 /* We use cpu_arch_isa_flags to check if we SHOULD
1346 optimize with nops. */
1347 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1348 patt = alt_patt;
1349 else
1350 patt = f32_patt;
1351 break;
1352 case PROCESSOR_PENTIUM4:
1353 case PROCESSOR_NOCONA:
1354 case PROCESSOR_CORE:
1355 case PROCESSOR_CORE2:
1356 case PROCESSOR_COREI7:
1357 case PROCESSOR_L1OM:
1358 case PROCESSOR_K1OM:
1359 case PROCESSOR_GENERIC64:
1360 case PROCESSOR_K6:
1361 case PROCESSOR_ATHLON:
1362 case PROCESSOR_K8:
1363 case PROCESSOR_AMDFAM10:
1364 case PROCESSOR_BD:
1365 case PROCESSOR_ZNVER:
1366 case PROCESSOR_BT:
1367 patt = alt_patt;
1368 break;
1369 case PROCESSOR_I386:
1370 case PROCESSOR_I486:
1371 case PROCESSOR_PENTIUM:
1372 case PROCESSOR_PENTIUMPRO:
1373 case PROCESSOR_IAMCU:
1374 case PROCESSOR_GENERIC32:
1375 patt = f32_patt;
1376 break;
1377 }
1378 }
1379 else
1380 {
1381 switch (fragP->tc_frag_data.tune)
1382 {
1383 case PROCESSOR_UNKNOWN:
1384 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1385 PROCESSOR_UNKNOWN. */
1386 abort ();
1387 break;
1388
1389 case PROCESSOR_I386:
1390 case PROCESSOR_I486:
1391 case PROCESSOR_PENTIUM:
1392 case PROCESSOR_IAMCU:
1393 case PROCESSOR_K6:
1394 case PROCESSOR_ATHLON:
1395 case PROCESSOR_K8:
1396 case PROCESSOR_AMDFAM10:
1397 case PROCESSOR_BD:
1398 case PROCESSOR_ZNVER:
1399 case PROCESSOR_BT:
1400 case PROCESSOR_GENERIC32:
1401 /* We use cpu_arch_isa_flags to check if we CAN optimize
1402 with nops. */
1403 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1404 patt = alt_patt;
1405 else
1406 patt = f32_patt;
1407 break;
1408 case PROCESSOR_PENTIUMPRO:
1409 case PROCESSOR_PENTIUM4:
1410 case PROCESSOR_NOCONA:
1411 case PROCESSOR_CORE:
1412 case PROCESSOR_CORE2:
1413 case PROCESSOR_COREI7:
1414 case PROCESSOR_L1OM:
1415 case PROCESSOR_K1OM:
1416 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1417 patt = alt_patt;
1418 else
1419 patt = f32_patt;
1420 break;
1421 case PROCESSOR_GENERIC64:
1422 patt = alt_patt;
1423 break;
1424 }
1425 }
1426
1427 if (patt == f32_patt)
1428 {
1429 max_single_nop_size = sizeof (f32_patt) / sizeof (f32_patt[0]);
1430 /* Limit number of NOPs to 2 for older processors. */
1431 max_number_of_nops = 2;
1432 }
1433 else
1434 {
1435 max_single_nop_size = sizeof (alt_patt) / sizeof (alt_patt[0]);
1436 /* Limit number of NOPs to 7 for newer processors. */
1437 max_number_of_nops = 7;
1438 }
1439 }
1440
1441 if (limit == 0)
1442 limit = max_single_nop_size;
1443
1444 if (fragP->fr_type == rs_fill_nop)
1445 {
1446 /* Output NOPs for .nop directive. */
1447 if (limit > max_single_nop_size)
1448 {
1449 as_bad_where (fragP->fr_file, fragP->fr_line,
1450 _("invalid single nop size: %d "
1451 "(expect within [0, %d])"),
1452 limit, max_single_nop_size);
1453 return;
1454 }
1455 }
1456 else
1457 fragP->fr_var = count;
1458
1459 if ((count / max_single_nop_size) > max_number_of_nops)
1460 {
1461 /* Generate jump over NOPs. */
1462 offsetT disp = count - 2;
1463 if (fits_in_imm7 (disp))
1464 {
1465 /* Use "jmp disp8" if possible. */
1466 count = disp;
1467 where[0] = jump_disp8[0];
1468 where[1] = count;
1469 where += 2;
1470 }
1471 else
1472 {
1473 unsigned int size_of_jump;
1474
1475 if (flag_code == CODE_16BIT)
1476 {
1477 where[0] = jump16_disp32[0];
1478 where[1] = jump16_disp32[1];
1479 size_of_jump = 2;
1480 }
1481 else
1482 {
1483 where[0] = jump32_disp32[0];
1484 size_of_jump = 1;
1485 }
1486
1487 count -= size_of_jump + 4;
1488 if (!fits_in_imm31 (count))
1489 {
1490 as_bad_where (fragP->fr_file, fragP->fr_line,
1491 _("jump over nop padding out of range"));
1492 return;
1493 }
1494
1495 md_number_to_chars (where + size_of_jump, count, 4);
1496 where += size_of_jump + 4;
1497 }
1498 }
1499
1500 /* Generate multiple NOPs. */
1501 i386_output_nops (where, patt, count, limit);
1502 }
1503
1504 static INLINE int
1505 operand_type_all_zero (const union i386_operand_type *x)
1506 {
1507 switch (ARRAY_SIZE(x->array))
1508 {
1509 case 3:
1510 if (x->array[2])
1511 return 0;
1512 /* Fall through. */
1513 case 2:
1514 if (x->array[1])
1515 return 0;
1516 /* Fall through. */
1517 case 1:
1518 return !x->array[0];
1519 default:
1520 abort ();
1521 }
1522 }
1523
1524 static INLINE void
1525 operand_type_set (union i386_operand_type *x, unsigned int v)
1526 {
1527 switch (ARRAY_SIZE(x->array))
1528 {
1529 case 3:
1530 x->array[2] = v;
1531 /* Fall through. */
1532 case 2:
1533 x->array[1] = v;
1534 /* Fall through. */
1535 case 1:
1536 x->array[0] = v;
1537 /* Fall through. */
1538 break;
1539 default:
1540 abort ();
1541 }
1542 }
1543
1544 static INLINE int
1545 operand_type_equal (const union i386_operand_type *x,
1546 const union i386_operand_type *y)
1547 {
1548 switch (ARRAY_SIZE(x->array))
1549 {
1550 case 3:
1551 if (x->array[2] != y->array[2])
1552 return 0;
1553 /* Fall through. */
1554 case 2:
1555 if (x->array[1] != y->array[1])
1556 return 0;
1557 /* Fall through. */
1558 case 1:
1559 return x->array[0] == y->array[0];
1560 break;
1561 default:
1562 abort ();
1563 }
1564 }
1565
1566 static INLINE int
1567 cpu_flags_all_zero (const union i386_cpu_flags *x)
1568 {
1569 switch (ARRAY_SIZE(x->array))
1570 {
1571 case 4:
1572 if (x->array[3])
1573 return 0;
1574 /* Fall through. */
1575 case 3:
1576 if (x->array[2])
1577 return 0;
1578 /* Fall through. */
1579 case 2:
1580 if (x->array[1])
1581 return 0;
1582 /* Fall through. */
1583 case 1:
1584 return !x->array[0];
1585 default:
1586 abort ();
1587 }
1588 }
1589
1590 static INLINE int
1591 cpu_flags_equal (const union i386_cpu_flags *x,
1592 const union i386_cpu_flags *y)
1593 {
1594 switch (ARRAY_SIZE(x->array))
1595 {
1596 case 4:
1597 if (x->array[3] != y->array[3])
1598 return 0;
1599 /* Fall through. */
1600 case 3:
1601 if (x->array[2] != y->array[2])
1602 return 0;
1603 /* Fall through. */
1604 case 2:
1605 if (x->array[1] != y->array[1])
1606 return 0;
1607 /* Fall through. */
1608 case 1:
1609 return x->array[0] == y->array[0];
1610 break;
1611 default:
1612 abort ();
1613 }
1614 }
1615
1616 static INLINE int
1617 cpu_flags_check_cpu64 (i386_cpu_flags f)
1618 {
1619 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1620 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1621 }
1622
1623 static INLINE i386_cpu_flags
1624 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1625 {
1626 switch (ARRAY_SIZE (x.array))
1627 {
1628 case 4:
1629 x.array [3] &= y.array [3];
1630 /* Fall through. */
1631 case 3:
1632 x.array [2] &= y.array [2];
1633 /* Fall through. */
1634 case 2:
1635 x.array [1] &= y.array [1];
1636 /* Fall through. */
1637 case 1:
1638 x.array [0] &= y.array [0];
1639 break;
1640 default:
1641 abort ();
1642 }
1643 return x;
1644 }
1645
1646 static INLINE i386_cpu_flags
1647 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1648 {
1649 switch (ARRAY_SIZE (x.array))
1650 {
1651 case 4:
1652 x.array [3] |= y.array [3];
1653 /* Fall through. */
1654 case 3:
1655 x.array [2] |= y.array [2];
1656 /* Fall through. */
1657 case 2:
1658 x.array [1] |= y.array [1];
1659 /* Fall through. */
1660 case 1:
1661 x.array [0] |= y.array [0];
1662 break;
1663 default:
1664 abort ();
1665 }
1666 return x;
1667 }
1668
1669 static INLINE i386_cpu_flags
1670 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1671 {
1672 switch (ARRAY_SIZE (x.array))
1673 {
1674 case 4:
1675 x.array [3] &= ~y.array [3];
1676 /* Fall through. */
1677 case 3:
1678 x.array [2] &= ~y.array [2];
1679 /* Fall through. */
1680 case 2:
1681 x.array [1] &= ~y.array [1];
1682 /* Fall through. */
1683 case 1:
1684 x.array [0] &= ~y.array [0];
1685 break;
1686 default:
1687 abort ();
1688 }
1689 return x;
1690 }
1691
1692 #define CPU_FLAGS_ARCH_MATCH 0x1
1693 #define CPU_FLAGS_64BIT_MATCH 0x2
1694
1695 #define CPU_FLAGS_PERFECT_MATCH \
1696 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1697
1698 /* Return CPU flags match bits. */
1699
1700 static int
1701 cpu_flags_match (const insn_template *t)
1702 {
1703 i386_cpu_flags x = t->cpu_flags;
1704 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1705
1706 x.bitfield.cpu64 = 0;
1707 x.bitfield.cpuno64 = 0;
1708
1709 if (cpu_flags_all_zero (&x))
1710 {
1711 /* This instruction is available on all archs. */
1712 match |= CPU_FLAGS_ARCH_MATCH;
1713 }
1714 else
1715 {
1716 /* This instruction is available only on some archs. */
1717 i386_cpu_flags cpu = cpu_arch_flags;
1718
1719 /* AVX512VL is no standalone feature - match it and then strip it. */
1720 if (x.bitfield.cpuavx512vl && !cpu.bitfield.cpuavx512vl)
1721 return match;
1722 x.bitfield.cpuavx512vl = 0;
1723
1724 cpu = cpu_flags_and (x, cpu);
1725 if (!cpu_flags_all_zero (&cpu))
1726 {
1727 if (x.bitfield.cpuavx)
1728 {
1729 /* We need to check a few extra flags with AVX. */
1730 if (cpu.bitfield.cpuavx
1731 && (!t->opcode_modifier.sse2avx || sse2avx)
1732 && (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1733 && (!x.bitfield.cpugfni || cpu.bitfield.cpugfni)
1734 && (!x.bitfield.cpupclmul || cpu.bitfield.cpupclmul))
1735 match |= CPU_FLAGS_ARCH_MATCH;
1736 }
1737 else if (x.bitfield.cpuavx512f)
1738 {
1739 /* We need to check a few extra flags with AVX512F. */
1740 if (cpu.bitfield.cpuavx512f
1741 && (!x.bitfield.cpugfni || cpu.bitfield.cpugfni)
1742 && (!x.bitfield.cpuvaes || cpu.bitfield.cpuvaes)
1743 && (!x.bitfield.cpuvpclmulqdq || cpu.bitfield.cpuvpclmulqdq))
1744 match |= CPU_FLAGS_ARCH_MATCH;
1745 }
1746 else
1747 match |= CPU_FLAGS_ARCH_MATCH;
1748 }
1749 }
1750 return match;
1751 }
1752
1753 static INLINE i386_operand_type
1754 operand_type_and (i386_operand_type x, i386_operand_type y)
1755 {
1756 switch (ARRAY_SIZE (x.array))
1757 {
1758 case 3:
1759 x.array [2] &= y.array [2];
1760 /* Fall through. */
1761 case 2:
1762 x.array [1] &= y.array [1];
1763 /* Fall through. */
1764 case 1:
1765 x.array [0] &= y.array [0];
1766 break;
1767 default:
1768 abort ();
1769 }
1770 return x;
1771 }
1772
1773 static INLINE i386_operand_type
1774 operand_type_and_not (i386_operand_type x, i386_operand_type y)
1775 {
1776 switch (ARRAY_SIZE (x.array))
1777 {
1778 case 3:
1779 x.array [2] &= ~y.array [2];
1780 /* Fall through. */
1781 case 2:
1782 x.array [1] &= ~y.array [1];
1783 /* Fall through. */
1784 case 1:
1785 x.array [0] &= ~y.array [0];
1786 break;
1787 default:
1788 abort ();
1789 }
1790 return x;
1791 }
1792
1793 static INLINE i386_operand_type
1794 operand_type_or (i386_operand_type x, i386_operand_type y)
1795 {
1796 switch (ARRAY_SIZE (x.array))
1797 {
1798 case 3:
1799 x.array [2] |= y.array [2];
1800 /* Fall through. */
1801 case 2:
1802 x.array [1] |= y.array [1];
1803 /* Fall through. */
1804 case 1:
1805 x.array [0] |= y.array [0];
1806 break;
1807 default:
1808 abort ();
1809 }
1810 return x;
1811 }
1812
1813 static INLINE i386_operand_type
1814 operand_type_xor (i386_operand_type x, i386_operand_type y)
1815 {
1816 switch (ARRAY_SIZE (x.array))
1817 {
1818 case 3:
1819 x.array [2] ^= y.array [2];
1820 /* Fall through. */
1821 case 2:
1822 x.array [1] ^= y.array [1];
1823 /* Fall through. */
1824 case 1:
1825 x.array [0] ^= y.array [0];
1826 break;
1827 default:
1828 abort ();
1829 }
1830 return x;
1831 }
1832
1833 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1834 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1835 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1836 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1837 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1838 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1839 static const i386_operand_type anydisp
1840 = OPERAND_TYPE_ANYDISP;
1841 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1842 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
1843 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1844 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1845 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1846 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1847 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1848 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1849 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1850 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1851 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1852 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1853
1854 enum operand_type
1855 {
1856 reg,
1857 imm,
1858 disp,
1859 anymem
1860 };
1861
1862 static INLINE int
1863 operand_type_check (i386_operand_type t, enum operand_type c)
1864 {
1865 switch (c)
1866 {
1867 case reg:
1868 return t.bitfield.reg;
1869
1870 case imm:
1871 return (t.bitfield.imm8
1872 || t.bitfield.imm8s
1873 || t.bitfield.imm16
1874 || t.bitfield.imm32
1875 || t.bitfield.imm32s
1876 || t.bitfield.imm64);
1877
1878 case disp:
1879 return (t.bitfield.disp8
1880 || t.bitfield.disp16
1881 || t.bitfield.disp32
1882 || t.bitfield.disp32s
1883 || t.bitfield.disp64);
1884
1885 case anymem:
1886 return (t.bitfield.disp8
1887 || t.bitfield.disp16
1888 || t.bitfield.disp32
1889 || t.bitfield.disp32s
1890 || t.bitfield.disp64
1891 || t.bitfield.baseindex);
1892
1893 default:
1894 abort ();
1895 }
1896
1897 return 0;
1898 }
1899
1900 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
1901 between operand GIVEN and opeand WANTED for instruction template T. */
1902
1903 static INLINE int
1904 match_operand_size (const insn_template *t, unsigned int wanted,
1905 unsigned int given)
1906 {
1907 return !((i.types[given].bitfield.byte
1908 && !t->operand_types[wanted].bitfield.byte)
1909 || (i.types[given].bitfield.word
1910 && !t->operand_types[wanted].bitfield.word)
1911 || (i.types[given].bitfield.dword
1912 && !t->operand_types[wanted].bitfield.dword)
1913 || (i.types[given].bitfield.qword
1914 && !t->operand_types[wanted].bitfield.qword)
1915 || (i.types[given].bitfield.tbyte
1916 && !t->operand_types[wanted].bitfield.tbyte));
1917 }
1918
1919 /* Return 1 if there is no conflict in SIMD register between operand GIVEN
1920 and opeand WANTED for instruction template T. */
1921
1922 static INLINE int
1923 match_simd_size (const insn_template *t, unsigned int wanted, unsigned int given)
1924 {
1925 return !((i.types[given].bitfield.xmmword
1926 && !t->operand_types[wanted].bitfield.xmmword)
1927 || (i.types[given].bitfield.ymmword
1928 && !t->operand_types[wanted].bitfield.ymmword)
1929 || (i.types[given].bitfield.zmmword
1930 && !t->operand_types[wanted].bitfield.zmmword));
1931 }
1932
1933 /* Return 1 if there is no conflict in any size between operand GIVEN
1934 and opeand WANTED for instruction template T. */
1935
1936 static INLINE int
1937 match_mem_size (const insn_template *t, unsigned int wanted, unsigned int given)
1938 {
1939 return (match_operand_size (t, wanted, given)
1940 && !((i.types[given].bitfield.unspecified
1941 && !i.broadcast
1942 && !t->operand_types[wanted].bitfield.unspecified)
1943 || (i.types[given].bitfield.fword
1944 && !t->operand_types[wanted].bitfield.fword)
1945 /* For scalar opcode templates to allow register and memory
1946 operands at the same time, some special casing is needed
1947 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
1948 down-conversion vpmov*. */
1949 || ((t->operand_types[wanted].bitfield.regsimd
1950 && !t->opcode_modifier.broadcast
1951 && (t->operand_types[wanted].bitfield.byte
1952 || t->operand_types[wanted].bitfield.word
1953 || t->operand_types[wanted].bitfield.dword
1954 || t->operand_types[wanted].bitfield.qword))
1955 ? (i.types[given].bitfield.xmmword
1956 || i.types[given].bitfield.ymmword
1957 || i.types[given].bitfield.zmmword)
1958 : !match_simd_size(t, wanted, given))));
1959 }
1960
1961 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
1962 operands for instruction template T, and it has MATCH_REVERSE set if there
1963 is no size conflict on any operands for the template with operands reversed
1964 (and the template allows for reversing in the first place). */
1965
1966 #define MATCH_STRAIGHT 1
1967 #define MATCH_REVERSE 2
1968
1969 static INLINE unsigned int
1970 operand_size_match (const insn_template *t)
1971 {
1972 unsigned int j, match = MATCH_STRAIGHT;
1973
1974 /* Don't check jump instructions. */
1975 if (t->opcode_modifier.jump
1976 || t->opcode_modifier.jumpbyte
1977 || t->opcode_modifier.jumpdword
1978 || t->opcode_modifier.jumpintersegment)
1979 return match;
1980
1981 /* Check memory and accumulator operand size. */
1982 for (j = 0; j < i.operands; j++)
1983 {
1984 if (!i.types[j].bitfield.reg && !i.types[j].bitfield.regsimd
1985 && t->operand_types[j].bitfield.anysize)
1986 continue;
1987
1988 if (t->operand_types[j].bitfield.reg
1989 && !match_operand_size (t, j, j))
1990 {
1991 match = 0;
1992 break;
1993 }
1994
1995 if (t->operand_types[j].bitfield.regsimd
1996 && !match_simd_size (t, j, j))
1997 {
1998 match = 0;
1999 break;
2000 }
2001
2002 if (t->operand_types[j].bitfield.acc
2003 && (!match_operand_size (t, j, j) || !match_simd_size (t, j, j)))
2004 {
2005 match = 0;
2006 break;
2007 }
2008
2009 if (i.types[j].bitfield.mem && !match_mem_size (t, j, j))
2010 {
2011 match = 0;
2012 break;
2013 }
2014 }
2015
2016 if (!t->opcode_modifier.d)
2017 {
2018 mismatch:
2019 if (!match)
2020 i.error = operand_size_mismatch;
2021 return match;
2022 }
2023
2024 /* Check reverse. */
2025 gas_assert (i.operands == 2);
2026
2027 for (j = 0; j < 2; j++)
2028 {
2029 if ((t->operand_types[j].bitfield.reg
2030 || t->operand_types[j].bitfield.acc)
2031 && !match_operand_size (t, j, !j))
2032 goto mismatch;
2033
2034 if (i.types[!j].bitfield.mem
2035 && !match_mem_size (t, j, !j))
2036 goto mismatch;
2037 }
2038
2039 return match | MATCH_REVERSE;
2040 }
2041
2042 static INLINE int
2043 operand_type_match (i386_operand_type overlap,
2044 i386_operand_type given)
2045 {
2046 i386_operand_type temp = overlap;
2047
2048 temp.bitfield.jumpabsolute = 0;
2049 temp.bitfield.unspecified = 0;
2050 temp.bitfield.byte = 0;
2051 temp.bitfield.word = 0;
2052 temp.bitfield.dword = 0;
2053 temp.bitfield.fword = 0;
2054 temp.bitfield.qword = 0;
2055 temp.bitfield.tbyte = 0;
2056 temp.bitfield.xmmword = 0;
2057 temp.bitfield.ymmword = 0;
2058 temp.bitfield.zmmword = 0;
2059 if (operand_type_all_zero (&temp))
2060 goto mismatch;
2061
2062 if (given.bitfield.baseindex == overlap.bitfield.baseindex
2063 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
2064 return 1;
2065
2066 mismatch:
2067 i.error = operand_type_mismatch;
2068 return 0;
2069 }
2070
2071 /* If given types g0 and g1 are registers they must be of the same type
2072 unless the expected operand type register overlap is null.
2073 Memory operand size of certain SIMD instructions is also being checked
2074 here. */
2075
2076 static INLINE int
2077 operand_type_register_match (i386_operand_type g0,
2078 i386_operand_type t0,
2079 i386_operand_type g1,
2080 i386_operand_type t1)
2081 {
2082 if (!g0.bitfield.reg
2083 && !g0.bitfield.regsimd
2084 && (!operand_type_check (g0, anymem)
2085 || g0.bitfield.unspecified
2086 || !t0.bitfield.regsimd))
2087 return 1;
2088
2089 if (!g1.bitfield.reg
2090 && !g1.bitfield.regsimd
2091 && (!operand_type_check (g1, anymem)
2092 || g1.bitfield.unspecified
2093 || !t1.bitfield.regsimd))
2094 return 1;
2095
2096 if (g0.bitfield.byte == g1.bitfield.byte
2097 && g0.bitfield.word == g1.bitfield.word
2098 && g0.bitfield.dword == g1.bitfield.dword
2099 && g0.bitfield.qword == g1.bitfield.qword
2100 && g0.bitfield.xmmword == g1.bitfield.xmmword
2101 && g0.bitfield.ymmword == g1.bitfield.ymmword
2102 && g0.bitfield.zmmword == g1.bitfield.zmmword)
2103 return 1;
2104
2105 if (!(t0.bitfield.byte & t1.bitfield.byte)
2106 && !(t0.bitfield.word & t1.bitfield.word)
2107 && !(t0.bitfield.dword & t1.bitfield.dword)
2108 && !(t0.bitfield.qword & t1.bitfield.qword)
2109 && !(t0.bitfield.xmmword & t1.bitfield.xmmword)
2110 && !(t0.bitfield.ymmword & t1.bitfield.ymmword)
2111 && !(t0.bitfield.zmmword & t1.bitfield.zmmword))
2112 return 1;
2113
2114 i.error = register_type_mismatch;
2115
2116 return 0;
2117 }
2118
2119 static INLINE unsigned int
2120 register_number (const reg_entry *r)
2121 {
2122 unsigned int nr = r->reg_num;
2123
2124 if (r->reg_flags & RegRex)
2125 nr += 8;
2126
2127 if (r->reg_flags & RegVRex)
2128 nr += 16;
2129
2130 return nr;
2131 }
2132
2133 static INLINE unsigned int
2134 mode_from_disp_size (i386_operand_type t)
2135 {
2136 if (t.bitfield.disp8)
2137 return 1;
2138 else if (t.bitfield.disp16
2139 || t.bitfield.disp32
2140 || t.bitfield.disp32s)
2141 return 2;
2142 else
2143 return 0;
2144 }
2145
2146 static INLINE int
2147 fits_in_signed_byte (addressT num)
2148 {
2149 return num + 0x80 <= 0xff;
2150 }
2151
2152 static INLINE int
2153 fits_in_unsigned_byte (addressT num)
2154 {
2155 return num <= 0xff;
2156 }
2157
2158 static INLINE int
2159 fits_in_unsigned_word (addressT num)
2160 {
2161 return num <= 0xffff;
2162 }
2163
2164 static INLINE int
2165 fits_in_signed_word (addressT num)
2166 {
2167 return num + 0x8000 <= 0xffff;
2168 }
2169
2170 static INLINE int
2171 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
2172 {
2173 #ifndef BFD64
2174 return 1;
2175 #else
2176 return num + 0x80000000 <= 0xffffffff;
2177 #endif
2178 } /* fits_in_signed_long() */
2179
2180 static INLINE int
2181 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
2182 {
2183 #ifndef BFD64
2184 return 1;
2185 #else
2186 return num <= 0xffffffff;
2187 #endif
2188 } /* fits_in_unsigned_long() */
2189
2190 static INLINE int
2191 fits_in_disp8 (offsetT num)
2192 {
2193 int shift = i.memshift;
2194 unsigned int mask;
2195
2196 if (shift == -1)
2197 abort ();
2198
2199 mask = (1 << shift) - 1;
2200
2201 /* Return 0 if NUM isn't properly aligned. */
2202 if ((num & mask))
2203 return 0;
2204
2205 /* Check if NUM will fit in 8bit after shift. */
2206 return fits_in_signed_byte (num >> shift);
2207 }
2208
2209 static INLINE int
2210 fits_in_imm4 (offsetT num)
2211 {
2212 return (num & 0xf) == num;
2213 }
2214
2215 static i386_operand_type
2216 smallest_imm_type (offsetT num)
2217 {
2218 i386_operand_type t;
2219
2220 operand_type_set (&t, 0);
2221 t.bitfield.imm64 = 1;
2222
2223 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
2224 {
2225 /* This code is disabled on the 486 because all the Imm1 forms
2226 in the opcode table are slower on the i486. They're the
2227 versions with the implicitly specified single-position
2228 displacement, which has another syntax if you really want to
2229 use that form. */
2230 t.bitfield.imm1 = 1;
2231 t.bitfield.imm8 = 1;
2232 t.bitfield.imm8s = 1;
2233 t.bitfield.imm16 = 1;
2234 t.bitfield.imm32 = 1;
2235 t.bitfield.imm32s = 1;
2236 }
2237 else if (fits_in_signed_byte (num))
2238 {
2239 t.bitfield.imm8 = 1;
2240 t.bitfield.imm8s = 1;
2241 t.bitfield.imm16 = 1;
2242 t.bitfield.imm32 = 1;
2243 t.bitfield.imm32s = 1;
2244 }
2245 else if (fits_in_unsigned_byte (num))
2246 {
2247 t.bitfield.imm8 = 1;
2248 t.bitfield.imm16 = 1;
2249 t.bitfield.imm32 = 1;
2250 t.bitfield.imm32s = 1;
2251 }
2252 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
2253 {
2254 t.bitfield.imm16 = 1;
2255 t.bitfield.imm32 = 1;
2256 t.bitfield.imm32s = 1;
2257 }
2258 else if (fits_in_signed_long (num))
2259 {
2260 t.bitfield.imm32 = 1;
2261 t.bitfield.imm32s = 1;
2262 }
2263 else if (fits_in_unsigned_long (num))
2264 t.bitfield.imm32 = 1;
2265
2266 return t;
2267 }
2268
2269 static offsetT
2270 offset_in_range (offsetT val, int size)
2271 {
2272 addressT mask;
2273
2274 switch (size)
2275 {
2276 case 1: mask = ((addressT) 1 << 8) - 1; break;
2277 case 2: mask = ((addressT) 1 << 16) - 1; break;
2278 case 4: mask = ((addressT) 2 << 31) - 1; break;
2279 #ifdef BFD64
2280 case 8: mask = ((addressT) 2 << 63) - 1; break;
2281 #endif
2282 default: abort ();
2283 }
2284
2285 #ifdef BFD64
2286 /* If BFD64, sign extend val for 32bit address mode. */
2287 if (flag_code != CODE_64BIT
2288 || i.prefix[ADDR_PREFIX])
2289 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2290 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2291 #endif
2292
2293 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2294 {
2295 char buf1[40], buf2[40];
2296
2297 sprint_value (buf1, val);
2298 sprint_value (buf2, val & mask);
2299 as_warn (_("%s shortened to %s"), buf1, buf2);
2300 }
2301 return val & mask;
2302 }
2303
2304 enum PREFIX_GROUP
2305 {
2306 PREFIX_EXIST = 0,
2307 PREFIX_LOCK,
2308 PREFIX_REP,
2309 PREFIX_DS,
2310 PREFIX_OTHER
2311 };
2312
2313 /* Returns
2314 a. PREFIX_EXIST if attempting to add a prefix where one from the
2315 same class already exists.
2316 b. PREFIX_LOCK if lock prefix is added.
2317 c. PREFIX_REP if rep/repne prefix is added.
2318 d. PREFIX_DS if ds prefix is added.
2319 e. PREFIX_OTHER if other prefix is added.
2320 */
2321
2322 static enum PREFIX_GROUP
2323 add_prefix (unsigned int prefix)
2324 {
2325 enum PREFIX_GROUP ret = PREFIX_OTHER;
2326 unsigned int q;
2327
2328 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2329 && flag_code == CODE_64BIT)
2330 {
2331 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2332 || (i.prefix[REX_PREFIX] & prefix & REX_R)
2333 || (i.prefix[REX_PREFIX] & prefix & REX_X)
2334 || (i.prefix[REX_PREFIX] & prefix & REX_B))
2335 ret = PREFIX_EXIST;
2336 q = REX_PREFIX;
2337 }
2338 else
2339 {
2340 switch (prefix)
2341 {
2342 default:
2343 abort ();
2344
2345 case DS_PREFIX_OPCODE:
2346 ret = PREFIX_DS;
2347 /* Fall through. */
2348 case CS_PREFIX_OPCODE:
2349 case ES_PREFIX_OPCODE:
2350 case FS_PREFIX_OPCODE:
2351 case GS_PREFIX_OPCODE:
2352 case SS_PREFIX_OPCODE:
2353 q = SEG_PREFIX;
2354 break;
2355
2356 case REPNE_PREFIX_OPCODE:
2357 case REPE_PREFIX_OPCODE:
2358 q = REP_PREFIX;
2359 ret = PREFIX_REP;
2360 break;
2361
2362 case LOCK_PREFIX_OPCODE:
2363 q = LOCK_PREFIX;
2364 ret = PREFIX_LOCK;
2365 break;
2366
2367 case FWAIT_OPCODE:
2368 q = WAIT_PREFIX;
2369 break;
2370
2371 case ADDR_PREFIX_OPCODE:
2372 q = ADDR_PREFIX;
2373 break;
2374
2375 case DATA_PREFIX_OPCODE:
2376 q = DATA_PREFIX;
2377 break;
2378 }
2379 if (i.prefix[q] != 0)
2380 ret = PREFIX_EXIST;
2381 }
2382
2383 if (ret)
2384 {
2385 if (!i.prefix[q])
2386 ++i.prefixes;
2387 i.prefix[q] |= prefix;
2388 }
2389 else
2390 as_bad (_("same type of prefix used twice"));
2391
2392 return ret;
2393 }
2394
2395 static void
2396 update_code_flag (int value, int check)
2397 {
2398 PRINTF_LIKE ((*as_error));
2399
2400 flag_code = (enum flag_code) value;
2401 if (flag_code == CODE_64BIT)
2402 {
2403 cpu_arch_flags.bitfield.cpu64 = 1;
2404 cpu_arch_flags.bitfield.cpuno64 = 0;
2405 }
2406 else
2407 {
2408 cpu_arch_flags.bitfield.cpu64 = 0;
2409 cpu_arch_flags.bitfield.cpuno64 = 1;
2410 }
2411 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2412 {
2413 if (check)
2414 as_error = as_fatal;
2415 else
2416 as_error = as_bad;
2417 (*as_error) (_("64bit mode not supported on `%s'."),
2418 cpu_arch_name ? cpu_arch_name : default_arch);
2419 }
2420 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2421 {
2422 if (check)
2423 as_error = as_fatal;
2424 else
2425 as_error = as_bad;
2426 (*as_error) (_("32bit mode not supported on `%s'."),
2427 cpu_arch_name ? cpu_arch_name : default_arch);
2428 }
2429 stackop_size = '\0';
2430 }
2431
2432 static void
2433 set_code_flag (int value)
2434 {
2435 update_code_flag (value, 0);
2436 }
2437
2438 static void
2439 set_16bit_gcc_code_flag (int new_code_flag)
2440 {
2441 flag_code = (enum flag_code) new_code_flag;
2442 if (flag_code != CODE_16BIT)
2443 abort ();
2444 cpu_arch_flags.bitfield.cpu64 = 0;
2445 cpu_arch_flags.bitfield.cpuno64 = 1;
2446 stackop_size = LONG_MNEM_SUFFIX;
2447 }
2448
2449 static void
2450 set_intel_syntax (int syntax_flag)
2451 {
2452 /* Find out if register prefixing is specified. */
2453 int ask_naked_reg = 0;
2454
2455 SKIP_WHITESPACE ();
2456 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2457 {
2458 char *string;
2459 int e = get_symbol_name (&string);
2460
2461 if (strcmp (string, "prefix") == 0)
2462 ask_naked_reg = 1;
2463 else if (strcmp (string, "noprefix") == 0)
2464 ask_naked_reg = -1;
2465 else
2466 as_bad (_("bad argument to syntax directive."));
2467 (void) restore_line_pointer (e);
2468 }
2469 demand_empty_rest_of_line ();
2470
2471 intel_syntax = syntax_flag;
2472
2473 if (ask_naked_reg == 0)
2474 allow_naked_reg = (intel_syntax
2475 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2476 else
2477 allow_naked_reg = (ask_naked_reg < 0);
2478
2479 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2480
2481 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2482 identifier_chars['$'] = intel_syntax ? '$' : 0;
2483 register_prefix = allow_naked_reg ? "" : "%";
2484 }
2485
2486 static void
2487 set_intel_mnemonic (int mnemonic_flag)
2488 {
2489 intel_mnemonic = mnemonic_flag;
2490 }
2491
2492 static void
2493 set_allow_index_reg (int flag)
2494 {
2495 allow_index_reg = flag;
2496 }
2497
2498 static void
2499 set_check (int what)
2500 {
2501 enum check_kind *kind;
2502 const char *str;
2503
2504 if (what)
2505 {
2506 kind = &operand_check;
2507 str = "operand";
2508 }
2509 else
2510 {
2511 kind = &sse_check;
2512 str = "sse";
2513 }
2514
2515 SKIP_WHITESPACE ();
2516
2517 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2518 {
2519 char *string;
2520 int e = get_symbol_name (&string);
2521
2522 if (strcmp (string, "none") == 0)
2523 *kind = check_none;
2524 else if (strcmp (string, "warning") == 0)
2525 *kind = check_warning;
2526 else if (strcmp (string, "error") == 0)
2527 *kind = check_error;
2528 else
2529 as_bad (_("bad argument to %s_check directive."), str);
2530 (void) restore_line_pointer (e);
2531 }
2532 else
2533 as_bad (_("missing argument for %s_check directive"), str);
2534
2535 demand_empty_rest_of_line ();
2536 }
2537
2538 static void
2539 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2540 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2541 {
2542 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2543 static const char *arch;
2544
2545 /* Intel LIOM is only supported on ELF. */
2546 if (!IS_ELF)
2547 return;
2548
2549 if (!arch)
2550 {
2551 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2552 use default_arch. */
2553 arch = cpu_arch_name;
2554 if (!arch)
2555 arch = default_arch;
2556 }
2557
2558 /* If we are targeting Intel MCU, we must enable it. */
2559 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_IAMCU
2560 || new_flag.bitfield.cpuiamcu)
2561 return;
2562
2563 /* If we are targeting Intel L1OM, we must enable it. */
2564 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2565 || new_flag.bitfield.cpul1om)
2566 return;
2567
2568 /* If we are targeting Intel K1OM, we must enable it. */
2569 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2570 || new_flag.bitfield.cpuk1om)
2571 return;
2572
2573 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2574 #endif
2575 }
2576
2577 static void
2578 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2579 {
2580 SKIP_WHITESPACE ();
2581
2582 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2583 {
2584 char *string;
2585 int e = get_symbol_name (&string);
2586 unsigned int j;
2587 i386_cpu_flags flags;
2588
2589 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2590 {
2591 if (strcmp (string, cpu_arch[j].name) == 0)
2592 {
2593 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2594
2595 if (*string != '.')
2596 {
2597 cpu_arch_name = cpu_arch[j].name;
2598 cpu_sub_arch_name = NULL;
2599 cpu_arch_flags = cpu_arch[j].flags;
2600 if (flag_code == CODE_64BIT)
2601 {
2602 cpu_arch_flags.bitfield.cpu64 = 1;
2603 cpu_arch_flags.bitfield.cpuno64 = 0;
2604 }
2605 else
2606 {
2607 cpu_arch_flags.bitfield.cpu64 = 0;
2608 cpu_arch_flags.bitfield.cpuno64 = 1;
2609 }
2610 cpu_arch_isa = cpu_arch[j].type;
2611 cpu_arch_isa_flags = cpu_arch[j].flags;
2612 if (!cpu_arch_tune_set)
2613 {
2614 cpu_arch_tune = cpu_arch_isa;
2615 cpu_arch_tune_flags = cpu_arch_isa_flags;
2616 }
2617 break;
2618 }
2619
2620 flags = cpu_flags_or (cpu_arch_flags,
2621 cpu_arch[j].flags);
2622
2623 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2624 {
2625 if (cpu_sub_arch_name)
2626 {
2627 char *name = cpu_sub_arch_name;
2628 cpu_sub_arch_name = concat (name,
2629 cpu_arch[j].name,
2630 (const char *) NULL);
2631 free (name);
2632 }
2633 else
2634 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2635 cpu_arch_flags = flags;
2636 cpu_arch_isa_flags = flags;
2637 }
2638 else
2639 cpu_arch_isa_flags
2640 = cpu_flags_or (cpu_arch_isa_flags,
2641 cpu_arch[j].flags);
2642 (void) restore_line_pointer (e);
2643 demand_empty_rest_of_line ();
2644 return;
2645 }
2646 }
2647
2648 if (*string == '.' && j >= ARRAY_SIZE (cpu_arch))
2649 {
2650 /* Disable an ISA extension. */
2651 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
2652 if (strcmp (string + 1, cpu_noarch [j].name) == 0)
2653 {
2654 flags = cpu_flags_and_not (cpu_arch_flags,
2655 cpu_noarch[j].flags);
2656 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2657 {
2658 if (cpu_sub_arch_name)
2659 {
2660 char *name = cpu_sub_arch_name;
2661 cpu_sub_arch_name = concat (name, string,
2662 (const char *) NULL);
2663 free (name);
2664 }
2665 else
2666 cpu_sub_arch_name = xstrdup (string);
2667 cpu_arch_flags = flags;
2668 cpu_arch_isa_flags = flags;
2669 }
2670 (void) restore_line_pointer (e);
2671 demand_empty_rest_of_line ();
2672 return;
2673 }
2674
2675 j = ARRAY_SIZE (cpu_arch);
2676 }
2677
2678 if (j >= ARRAY_SIZE (cpu_arch))
2679 as_bad (_("no such architecture: `%s'"), string);
2680
2681 *input_line_pointer = e;
2682 }
2683 else
2684 as_bad (_("missing cpu architecture"));
2685
2686 no_cond_jump_promotion = 0;
2687 if (*input_line_pointer == ','
2688 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2689 {
2690 char *string;
2691 char e;
2692
2693 ++input_line_pointer;
2694 e = get_symbol_name (&string);
2695
2696 if (strcmp (string, "nojumps") == 0)
2697 no_cond_jump_promotion = 1;
2698 else if (strcmp (string, "jumps") == 0)
2699 ;
2700 else
2701 as_bad (_("no such architecture modifier: `%s'"), string);
2702
2703 (void) restore_line_pointer (e);
2704 }
2705
2706 demand_empty_rest_of_line ();
2707 }
2708
2709 enum bfd_architecture
2710 i386_arch (void)
2711 {
2712 if (cpu_arch_isa == PROCESSOR_L1OM)
2713 {
2714 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2715 || flag_code != CODE_64BIT)
2716 as_fatal (_("Intel L1OM is 64bit ELF only"));
2717 return bfd_arch_l1om;
2718 }
2719 else if (cpu_arch_isa == PROCESSOR_K1OM)
2720 {
2721 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2722 || flag_code != CODE_64BIT)
2723 as_fatal (_("Intel K1OM is 64bit ELF only"));
2724 return bfd_arch_k1om;
2725 }
2726 else if (cpu_arch_isa == PROCESSOR_IAMCU)
2727 {
2728 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2729 || flag_code == CODE_64BIT)
2730 as_fatal (_("Intel MCU is 32bit ELF only"));
2731 return bfd_arch_iamcu;
2732 }
2733 else
2734 return bfd_arch_i386;
2735 }
2736
2737 unsigned long
2738 i386_mach (void)
2739 {
2740 if (!strncmp (default_arch, "x86_64", 6))
2741 {
2742 if (cpu_arch_isa == PROCESSOR_L1OM)
2743 {
2744 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2745 || default_arch[6] != '\0')
2746 as_fatal (_("Intel L1OM is 64bit ELF only"));
2747 return bfd_mach_l1om;
2748 }
2749 else if (cpu_arch_isa == PROCESSOR_K1OM)
2750 {
2751 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2752 || default_arch[6] != '\0')
2753 as_fatal (_("Intel K1OM is 64bit ELF only"));
2754 return bfd_mach_k1om;
2755 }
2756 else if (default_arch[6] == '\0')
2757 return bfd_mach_x86_64;
2758 else
2759 return bfd_mach_x64_32;
2760 }
2761 else if (!strcmp (default_arch, "i386")
2762 || !strcmp (default_arch, "iamcu"))
2763 {
2764 if (cpu_arch_isa == PROCESSOR_IAMCU)
2765 {
2766 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
2767 as_fatal (_("Intel MCU is 32bit ELF only"));
2768 return bfd_mach_i386_iamcu;
2769 }
2770 else
2771 return bfd_mach_i386_i386;
2772 }
2773 else
2774 as_fatal (_("unknown architecture"));
2775 }
2776 \f
2777 void
2778 md_begin (void)
2779 {
2780 const char *hash_err;
2781
2782 /* Support pseudo prefixes like {disp32}. */
2783 lex_type ['{'] = LEX_BEGIN_NAME;
2784
2785 /* Initialize op_hash hash table. */
2786 op_hash = hash_new ();
2787
2788 {
2789 const insn_template *optab;
2790 templates *core_optab;
2791
2792 /* Setup for loop. */
2793 optab = i386_optab;
2794 core_optab = XNEW (templates);
2795 core_optab->start = optab;
2796
2797 while (1)
2798 {
2799 ++optab;
2800 if (optab->name == NULL
2801 || strcmp (optab->name, (optab - 1)->name) != 0)
2802 {
2803 /* different name --> ship out current template list;
2804 add to hash table; & begin anew. */
2805 core_optab->end = optab;
2806 hash_err = hash_insert (op_hash,
2807 (optab - 1)->name,
2808 (void *) core_optab);
2809 if (hash_err)
2810 {
2811 as_fatal (_("can't hash %s: %s"),
2812 (optab - 1)->name,
2813 hash_err);
2814 }
2815 if (optab->name == NULL)
2816 break;
2817 core_optab = XNEW (templates);
2818 core_optab->start = optab;
2819 }
2820 }
2821 }
2822
2823 /* Initialize reg_hash hash table. */
2824 reg_hash = hash_new ();
2825 {
2826 const reg_entry *regtab;
2827 unsigned int regtab_size = i386_regtab_size;
2828
2829 for (regtab = i386_regtab; regtab_size--; regtab++)
2830 {
2831 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2832 if (hash_err)
2833 as_fatal (_("can't hash %s: %s"),
2834 regtab->reg_name,
2835 hash_err);
2836 }
2837 }
2838
2839 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2840 {
2841 int c;
2842 char *p;
2843
2844 for (c = 0; c < 256; c++)
2845 {
2846 if (ISDIGIT (c))
2847 {
2848 digit_chars[c] = c;
2849 mnemonic_chars[c] = c;
2850 register_chars[c] = c;
2851 operand_chars[c] = c;
2852 }
2853 else if (ISLOWER (c))
2854 {
2855 mnemonic_chars[c] = c;
2856 register_chars[c] = c;
2857 operand_chars[c] = c;
2858 }
2859 else if (ISUPPER (c))
2860 {
2861 mnemonic_chars[c] = TOLOWER (c);
2862 register_chars[c] = mnemonic_chars[c];
2863 operand_chars[c] = c;
2864 }
2865 else if (c == '{' || c == '}')
2866 {
2867 mnemonic_chars[c] = c;
2868 operand_chars[c] = c;
2869 }
2870
2871 if (ISALPHA (c) || ISDIGIT (c))
2872 identifier_chars[c] = c;
2873 else if (c >= 128)
2874 {
2875 identifier_chars[c] = c;
2876 operand_chars[c] = c;
2877 }
2878 }
2879
2880 #ifdef LEX_AT
2881 identifier_chars['@'] = '@';
2882 #endif
2883 #ifdef LEX_QM
2884 identifier_chars['?'] = '?';
2885 operand_chars['?'] = '?';
2886 #endif
2887 digit_chars['-'] = '-';
2888 mnemonic_chars['_'] = '_';
2889 mnemonic_chars['-'] = '-';
2890 mnemonic_chars['.'] = '.';
2891 identifier_chars['_'] = '_';
2892 identifier_chars['.'] = '.';
2893
2894 for (p = operand_special_chars; *p != '\0'; p++)
2895 operand_chars[(unsigned char) *p] = *p;
2896 }
2897
2898 if (flag_code == CODE_64BIT)
2899 {
2900 #if defined (OBJ_COFF) && defined (TE_PE)
2901 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2902 ? 32 : 16);
2903 #else
2904 x86_dwarf2_return_column = 16;
2905 #endif
2906 x86_cie_data_alignment = -8;
2907 }
2908 else
2909 {
2910 x86_dwarf2_return_column = 8;
2911 x86_cie_data_alignment = -4;
2912 }
2913 }
2914
2915 void
2916 i386_print_statistics (FILE *file)
2917 {
2918 hash_print_statistics (file, "i386 opcode", op_hash);
2919 hash_print_statistics (file, "i386 register", reg_hash);
2920 }
2921 \f
2922 #ifdef DEBUG386
2923
2924 /* Debugging routines for md_assemble. */
2925 static void pte (insn_template *);
2926 static void pt (i386_operand_type);
2927 static void pe (expressionS *);
2928 static void ps (symbolS *);
2929
2930 static void
2931 pi (char *line, i386_insn *x)
2932 {
2933 unsigned int j;
2934
2935 fprintf (stdout, "%s: template ", line);
2936 pte (&x->tm);
2937 fprintf (stdout, " address: base %s index %s scale %x\n",
2938 x->base_reg ? x->base_reg->reg_name : "none",
2939 x->index_reg ? x->index_reg->reg_name : "none",
2940 x->log2_scale_factor);
2941 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2942 x->rm.mode, x->rm.reg, x->rm.regmem);
2943 fprintf (stdout, " sib: base %x index %x scale %x\n",
2944 x->sib.base, x->sib.index, x->sib.scale);
2945 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2946 (x->rex & REX_W) != 0,
2947 (x->rex & REX_R) != 0,
2948 (x->rex & REX_X) != 0,
2949 (x->rex & REX_B) != 0);
2950 for (j = 0; j < x->operands; j++)
2951 {
2952 fprintf (stdout, " #%d: ", j + 1);
2953 pt (x->types[j]);
2954 fprintf (stdout, "\n");
2955 if (x->types[j].bitfield.reg
2956 || x->types[j].bitfield.regmmx
2957 || x->types[j].bitfield.regsimd
2958 || x->types[j].bitfield.sreg2
2959 || x->types[j].bitfield.sreg3
2960 || x->types[j].bitfield.control
2961 || x->types[j].bitfield.debug
2962 || x->types[j].bitfield.test)
2963 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2964 if (operand_type_check (x->types[j], imm))
2965 pe (x->op[j].imms);
2966 if (operand_type_check (x->types[j], disp))
2967 pe (x->op[j].disps);
2968 }
2969 }
2970
2971 static void
2972 pte (insn_template *t)
2973 {
2974 unsigned int j;
2975 fprintf (stdout, " %d operands ", t->operands);
2976 fprintf (stdout, "opcode %x ", t->base_opcode);
2977 if (t->extension_opcode != None)
2978 fprintf (stdout, "ext %x ", t->extension_opcode);
2979 if (t->opcode_modifier.d)
2980 fprintf (stdout, "D");
2981 if (t->opcode_modifier.w)
2982 fprintf (stdout, "W");
2983 fprintf (stdout, "\n");
2984 for (j = 0; j < t->operands; j++)
2985 {
2986 fprintf (stdout, " #%d type ", j + 1);
2987 pt (t->operand_types[j]);
2988 fprintf (stdout, "\n");
2989 }
2990 }
2991
2992 static void
2993 pe (expressionS *e)
2994 {
2995 fprintf (stdout, " operation %d\n", e->X_op);
2996 fprintf (stdout, " add_number %ld (%lx)\n",
2997 (long) e->X_add_number, (long) e->X_add_number);
2998 if (e->X_add_symbol)
2999 {
3000 fprintf (stdout, " add_symbol ");
3001 ps (e->X_add_symbol);
3002 fprintf (stdout, "\n");
3003 }
3004 if (e->X_op_symbol)
3005 {
3006 fprintf (stdout, " op_symbol ");
3007 ps (e->X_op_symbol);
3008 fprintf (stdout, "\n");
3009 }
3010 }
3011
3012 static void
3013 ps (symbolS *s)
3014 {
3015 fprintf (stdout, "%s type %s%s",
3016 S_GET_NAME (s),
3017 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
3018 segment_name (S_GET_SEGMENT (s)));
3019 }
3020
3021 static struct type_name
3022 {
3023 i386_operand_type mask;
3024 const char *name;
3025 }
3026 const type_names[] =
3027 {
3028 { OPERAND_TYPE_REG8, "r8" },
3029 { OPERAND_TYPE_REG16, "r16" },
3030 { OPERAND_TYPE_REG32, "r32" },
3031 { OPERAND_TYPE_REG64, "r64" },
3032 { OPERAND_TYPE_IMM8, "i8" },
3033 { OPERAND_TYPE_IMM8, "i8s" },
3034 { OPERAND_TYPE_IMM16, "i16" },
3035 { OPERAND_TYPE_IMM32, "i32" },
3036 { OPERAND_TYPE_IMM32S, "i32s" },
3037 { OPERAND_TYPE_IMM64, "i64" },
3038 { OPERAND_TYPE_IMM1, "i1" },
3039 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
3040 { OPERAND_TYPE_DISP8, "d8" },
3041 { OPERAND_TYPE_DISP16, "d16" },
3042 { OPERAND_TYPE_DISP32, "d32" },
3043 { OPERAND_TYPE_DISP32S, "d32s" },
3044 { OPERAND_TYPE_DISP64, "d64" },
3045 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
3046 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
3047 { OPERAND_TYPE_CONTROL, "control reg" },
3048 { OPERAND_TYPE_TEST, "test reg" },
3049 { OPERAND_TYPE_DEBUG, "debug reg" },
3050 { OPERAND_TYPE_FLOATREG, "FReg" },
3051 { OPERAND_TYPE_FLOATACC, "FAcc" },
3052 { OPERAND_TYPE_SREG2, "SReg2" },
3053 { OPERAND_TYPE_SREG3, "SReg3" },
3054 { OPERAND_TYPE_ACC, "Acc" },
3055 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
3056 { OPERAND_TYPE_REGMMX, "rMMX" },
3057 { OPERAND_TYPE_REGXMM, "rXMM" },
3058 { OPERAND_TYPE_REGYMM, "rYMM" },
3059 { OPERAND_TYPE_REGZMM, "rZMM" },
3060 { OPERAND_TYPE_REGMASK, "Mask reg" },
3061 { OPERAND_TYPE_ESSEG, "es" },
3062 };
3063
3064 static void
3065 pt (i386_operand_type t)
3066 {
3067 unsigned int j;
3068 i386_operand_type a;
3069
3070 for (j = 0; j < ARRAY_SIZE (type_names); j++)
3071 {
3072 a = operand_type_and (t, type_names[j].mask);
3073 if (!operand_type_all_zero (&a))
3074 fprintf (stdout, "%s, ", type_names[j].name);
3075 }
3076 fflush (stdout);
3077 }
3078
3079 #endif /* DEBUG386 */
3080 \f
3081 static bfd_reloc_code_real_type
3082 reloc (unsigned int size,
3083 int pcrel,
3084 int sign,
3085 bfd_reloc_code_real_type other)
3086 {
3087 if (other != NO_RELOC)
3088 {
3089 reloc_howto_type *rel;
3090
3091 if (size == 8)
3092 switch (other)
3093 {
3094 case BFD_RELOC_X86_64_GOT32:
3095 return BFD_RELOC_X86_64_GOT64;
3096 break;
3097 case BFD_RELOC_X86_64_GOTPLT64:
3098 return BFD_RELOC_X86_64_GOTPLT64;
3099 break;
3100 case BFD_RELOC_X86_64_PLTOFF64:
3101 return BFD_RELOC_X86_64_PLTOFF64;
3102 break;
3103 case BFD_RELOC_X86_64_GOTPC32:
3104 other = BFD_RELOC_X86_64_GOTPC64;
3105 break;
3106 case BFD_RELOC_X86_64_GOTPCREL:
3107 other = BFD_RELOC_X86_64_GOTPCREL64;
3108 break;
3109 case BFD_RELOC_X86_64_TPOFF32:
3110 other = BFD_RELOC_X86_64_TPOFF64;
3111 break;
3112 case BFD_RELOC_X86_64_DTPOFF32:
3113 other = BFD_RELOC_X86_64_DTPOFF64;
3114 break;
3115 default:
3116 break;
3117 }
3118
3119 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3120 if (other == BFD_RELOC_SIZE32)
3121 {
3122 if (size == 8)
3123 other = BFD_RELOC_SIZE64;
3124 if (pcrel)
3125 {
3126 as_bad (_("there are no pc-relative size relocations"));
3127 return NO_RELOC;
3128 }
3129 }
3130 #endif
3131
3132 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3133 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
3134 sign = -1;
3135
3136 rel = bfd_reloc_type_lookup (stdoutput, other);
3137 if (!rel)
3138 as_bad (_("unknown relocation (%u)"), other);
3139 else if (size != bfd_get_reloc_size (rel))
3140 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3141 bfd_get_reloc_size (rel),
3142 size);
3143 else if (pcrel && !rel->pc_relative)
3144 as_bad (_("non-pc-relative relocation for pc-relative field"));
3145 else if ((rel->complain_on_overflow == complain_overflow_signed
3146 && !sign)
3147 || (rel->complain_on_overflow == complain_overflow_unsigned
3148 && sign > 0))
3149 as_bad (_("relocated field and relocation type differ in signedness"));
3150 else
3151 return other;
3152 return NO_RELOC;
3153 }
3154
3155 if (pcrel)
3156 {
3157 if (!sign)
3158 as_bad (_("there are no unsigned pc-relative relocations"));
3159 switch (size)
3160 {
3161 case 1: return BFD_RELOC_8_PCREL;
3162 case 2: return BFD_RELOC_16_PCREL;
3163 case 4: return BFD_RELOC_32_PCREL;
3164 case 8: return BFD_RELOC_64_PCREL;
3165 }
3166 as_bad (_("cannot do %u byte pc-relative relocation"), size);
3167 }
3168 else
3169 {
3170 if (sign > 0)
3171 switch (size)
3172 {
3173 case 4: return BFD_RELOC_X86_64_32S;
3174 }
3175 else
3176 switch (size)
3177 {
3178 case 1: return BFD_RELOC_8;
3179 case 2: return BFD_RELOC_16;
3180 case 4: return BFD_RELOC_32;
3181 case 8: return BFD_RELOC_64;
3182 }
3183 as_bad (_("cannot do %s %u byte relocation"),
3184 sign > 0 ? "signed" : "unsigned", size);
3185 }
3186
3187 return NO_RELOC;
3188 }
3189
3190 /* Here we decide which fixups can be adjusted to make them relative to
3191 the beginning of the section instead of the symbol. Basically we need
3192 to make sure that the dynamic relocations are done correctly, so in
3193 some cases we force the original symbol to be used. */
3194
3195 int
3196 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
3197 {
3198 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3199 if (!IS_ELF)
3200 return 1;
3201
3202 /* Don't adjust pc-relative references to merge sections in 64-bit
3203 mode. */
3204 if (use_rela_relocations
3205 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
3206 && fixP->fx_pcrel)
3207 return 0;
3208
3209 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3210 and changed later by validate_fix. */
3211 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
3212 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
3213 return 0;
3214
3215 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3216 for size relocations. */
3217 if (fixP->fx_r_type == BFD_RELOC_SIZE32
3218 || fixP->fx_r_type == BFD_RELOC_SIZE64
3219 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
3220 || fixP->fx_r_type == BFD_RELOC_386_PLT32
3221 || fixP->fx_r_type == BFD_RELOC_386_GOT32
3222 || fixP->fx_r_type == BFD_RELOC_386_GOT32X
3223 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
3224 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
3225 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
3226 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
3227 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
3228 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
3229 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
3230 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
3231 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
3232 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
3233 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
3234 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
3235 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
3236 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCRELX
3237 || fixP->fx_r_type == BFD_RELOC_X86_64_REX_GOTPCRELX
3238 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
3239 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
3240 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
3241 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
3242 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
3243 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
3244 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
3245 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
3246 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
3247 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
3248 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
3249 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
3250 return 0;
3251 #endif
3252 return 1;
3253 }
3254
3255 static int
3256 intel_float_operand (const char *mnemonic)
3257 {
3258 /* Note that the value returned is meaningful only for opcodes with (memory)
3259 operands, hence the code here is free to improperly handle opcodes that
3260 have no operands (for better performance and smaller code). */
3261
3262 if (mnemonic[0] != 'f')
3263 return 0; /* non-math */
3264
3265 switch (mnemonic[1])
3266 {
3267 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3268 the fs segment override prefix not currently handled because no
3269 call path can make opcodes without operands get here */
3270 case 'i':
3271 return 2 /* integer op */;
3272 case 'l':
3273 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
3274 return 3; /* fldcw/fldenv */
3275 break;
3276 case 'n':
3277 if (mnemonic[2] != 'o' /* fnop */)
3278 return 3; /* non-waiting control op */
3279 break;
3280 case 'r':
3281 if (mnemonic[2] == 's')
3282 return 3; /* frstor/frstpm */
3283 break;
3284 case 's':
3285 if (mnemonic[2] == 'a')
3286 return 3; /* fsave */
3287 if (mnemonic[2] == 't')
3288 {
3289 switch (mnemonic[3])
3290 {
3291 case 'c': /* fstcw */
3292 case 'd': /* fstdw */
3293 case 'e': /* fstenv */
3294 case 's': /* fsts[gw] */
3295 return 3;
3296 }
3297 }
3298 break;
3299 case 'x':
3300 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
3301 return 0; /* fxsave/fxrstor are not really math ops */
3302 break;
3303 }
3304
3305 return 1;
3306 }
3307
3308 /* Build the VEX prefix. */
3309
3310 static void
3311 build_vex_prefix (const insn_template *t)
3312 {
3313 unsigned int register_specifier;
3314 unsigned int implied_prefix;
3315 unsigned int vector_length;
3316
3317 /* Check register specifier. */
3318 if (i.vex.register_specifier)
3319 {
3320 register_specifier =
3321 ~register_number (i.vex.register_specifier) & 0xf;
3322 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
3323 }
3324 else
3325 register_specifier = 0xf;
3326
3327 /* Use 2-byte VEX prefix by swapping destination and source
3328 operand. */
3329 if (i.vec_encoding != vex_encoding_vex3
3330 && i.dir_encoding == dir_encoding_default
3331 && i.operands == i.reg_operands
3332 && i.tm.opcode_modifier.vexopcode == VEX0F
3333 && i.tm.opcode_modifier.load
3334 && i.rex == REX_B)
3335 {
3336 unsigned int xchg = i.operands - 1;
3337 union i386_op temp_op;
3338 i386_operand_type temp_type;
3339
3340 temp_type = i.types[xchg];
3341 i.types[xchg] = i.types[0];
3342 i.types[0] = temp_type;
3343 temp_op = i.op[xchg];
3344 i.op[xchg] = i.op[0];
3345 i.op[0] = temp_op;
3346
3347 gas_assert (i.rm.mode == 3);
3348
3349 i.rex = REX_R;
3350 xchg = i.rm.regmem;
3351 i.rm.regmem = i.rm.reg;
3352 i.rm.reg = xchg;
3353
3354 /* Use the next insn. */
3355 i.tm = t[1];
3356 }
3357
3358 if (i.tm.opcode_modifier.vex == VEXScalar)
3359 vector_length = avxscalar;
3360 else if (i.tm.opcode_modifier.vex == VEX256)
3361 vector_length = 1;
3362 else
3363 {
3364 unsigned int op;
3365
3366 vector_length = 0;
3367 for (op = 0; op < t->operands; ++op)
3368 if (t->operand_types[op].bitfield.xmmword
3369 && t->operand_types[op].bitfield.ymmword
3370 && i.types[op].bitfield.ymmword)
3371 {
3372 vector_length = 1;
3373 break;
3374 }
3375 }
3376
3377 switch ((i.tm.base_opcode >> 8) & 0xff)
3378 {
3379 case 0:
3380 implied_prefix = 0;
3381 break;
3382 case DATA_PREFIX_OPCODE:
3383 implied_prefix = 1;
3384 break;
3385 case REPE_PREFIX_OPCODE:
3386 implied_prefix = 2;
3387 break;
3388 case REPNE_PREFIX_OPCODE:
3389 implied_prefix = 3;
3390 break;
3391 default:
3392 abort ();
3393 }
3394
3395 /* Use 2-byte VEX prefix if possible. */
3396 if (i.vec_encoding != vex_encoding_vex3
3397 && i.tm.opcode_modifier.vexopcode == VEX0F
3398 && i.tm.opcode_modifier.vexw != VEXW1
3399 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3400 {
3401 /* 2-byte VEX prefix. */
3402 unsigned int r;
3403
3404 i.vex.length = 2;
3405 i.vex.bytes[0] = 0xc5;
3406
3407 /* Check the REX.R bit. */
3408 r = (i.rex & REX_R) ? 0 : 1;
3409 i.vex.bytes[1] = (r << 7
3410 | register_specifier << 3
3411 | vector_length << 2
3412 | implied_prefix);
3413 }
3414 else
3415 {
3416 /* 3-byte VEX prefix. */
3417 unsigned int m, w;
3418
3419 i.vex.length = 3;
3420
3421 switch (i.tm.opcode_modifier.vexopcode)
3422 {
3423 case VEX0F:
3424 m = 0x1;
3425 i.vex.bytes[0] = 0xc4;
3426 break;
3427 case VEX0F38:
3428 m = 0x2;
3429 i.vex.bytes[0] = 0xc4;
3430 break;
3431 case VEX0F3A:
3432 m = 0x3;
3433 i.vex.bytes[0] = 0xc4;
3434 break;
3435 case XOP08:
3436 m = 0x8;
3437 i.vex.bytes[0] = 0x8f;
3438 break;
3439 case XOP09:
3440 m = 0x9;
3441 i.vex.bytes[0] = 0x8f;
3442 break;
3443 case XOP0A:
3444 m = 0xa;
3445 i.vex.bytes[0] = 0x8f;
3446 break;
3447 default:
3448 abort ();
3449 }
3450
3451 /* The high 3 bits of the second VEX byte are 1's compliment
3452 of RXB bits from REX. */
3453 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3454
3455 /* Check the REX.W bit. */
3456 w = (i.rex & REX_W) ? 1 : 0;
3457 if (i.tm.opcode_modifier.vexw == VEXW1)
3458 w = 1;
3459
3460 i.vex.bytes[2] = (w << 7
3461 | register_specifier << 3
3462 | vector_length << 2
3463 | implied_prefix);
3464 }
3465 }
3466
3467 static INLINE bfd_boolean
3468 is_evex_encoding (const insn_template *t)
3469 {
3470 return t->opcode_modifier.evex || t->opcode_modifier.disp8memshift
3471 || t->opcode_modifier.broadcast || t->opcode_modifier.masking
3472 || t->opcode_modifier.staticrounding || t->opcode_modifier.sae;
3473 }
3474
3475 /* Build the EVEX prefix. */
3476
3477 static void
3478 build_evex_prefix (void)
3479 {
3480 unsigned int register_specifier;
3481 unsigned int implied_prefix;
3482 unsigned int m, w;
3483 rex_byte vrex_used = 0;
3484
3485 /* Check register specifier. */
3486 if (i.vex.register_specifier)
3487 {
3488 gas_assert ((i.vrex & REX_X) == 0);
3489
3490 register_specifier = i.vex.register_specifier->reg_num;
3491 if ((i.vex.register_specifier->reg_flags & RegRex))
3492 register_specifier += 8;
3493 /* The upper 16 registers are encoded in the fourth byte of the
3494 EVEX prefix. */
3495 if (!(i.vex.register_specifier->reg_flags & RegVRex))
3496 i.vex.bytes[3] = 0x8;
3497 register_specifier = ~register_specifier & 0xf;
3498 }
3499 else
3500 {
3501 register_specifier = 0xf;
3502
3503 /* Encode upper 16 vector index register in the fourth byte of
3504 the EVEX prefix. */
3505 if (!(i.vrex & REX_X))
3506 i.vex.bytes[3] = 0x8;
3507 else
3508 vrex_used |= REX_X;
3509 }
3510
3511 switch ((i.tm.base_opcode >> 8) & 0xff)
3512 {
3513 case 0:
3514 implied_prefix = 0;
3515 break;
3516 case DATA_PREFIX_OPCODE:
3517 implied_prefix = 1;
3518 break;
3519 case REPE_PREFIX_OPCODE:
3520 implied_prefix = 2;
3521 break;
3522 case REPNE_PREFIX_OPCODE:
3523 implied_prefix = 3;
3524 break;
3525 default:
3526 abort ();
3527 }
3528
3529 /* 4 byte EVEX prefix. */
3530 i.vex.length = 4;
3531 i.vex.bytes[0] = 0x62;
3532
3533 /* mmmm bits. */
3534 switch (i.tm.opcode_modifier.vexopcode)
3535 {
3536 case VEX0F:
3537 m = 1;
3538 break;
3539 case VEX0F38:
3540 m = 2;
3541 break;
3542 case VEX0F3A:
3543 m = 3;
3544 break;
3545 default:
3546 abort ();
3547 break;
3548 }
3549
3550 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3551 bits from REX. */
3552 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3553
3554 /* The fifth bit of the second EVEX byte is 1's compliment of the
3555 REX_R bit in VREX. */
3556 if (!(i.vrex & REX_R))
3557 i.vex.bytes[1] |= 0x10;
3558 else
3559 vrex_used |= REX_R;
3560
3561 if ((i.reg_operands + i.imm_operands) == i.operands)
3562 {
3563 /* When all operands are registers, the REX_X bit in REX is not
3564 used. We reuse it to encode the upper 16 registers, which is
3565 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3566 as 1's compliment. */
3567 if ((i.vrex & REX_B))
3568 {
3569 vrex_used |= REX_B;
3570 i.vex.bytes[1] &= ~0x40;
3571 }
3572 }
3573
3574 /* EVEX instructions shouldn't need the REX prefix. */
3575 i.vrex &= ~vrex_used;
3576 gas_assert (i.vrex == 0);
3577
3578 /* Check the REX.W bit. */
3579 w = (i.rex & REX_W) ? 1 : 0;
3580 if (i.tm.opcode_modifier.vexw)
3581 {
3582 if (i.tm.opcode_modifier.vexw == VEXW1)
3583 w = 1;
3584 }
3585 /* If w is not set it means we are dealing with WIG instruction. */
3586 else if (!w)
3587 {
3588 if (evexwig == evexw1)
3589 w = 1;
3590 }
3591
3592 /* Encode the U bit. */
3593 implied_prefix |= 0x4;
3594
3595 /* The third byte of the EVEX prefix. */
3596 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3597
3598 /* The fourth byte of the EVEX prefix. */
3599 /* The zeroing-masking bit. */
3600 if (i.mask && i.mask->zeroing)
3601 i.vex.bytes[3] |= 0x80;
3602
3603 /* Don't always set the broadcast bit if there is no RC. */
3604 if (!i.rounding)
3605 {
3606 /* Encode the vector length. */
3607 unsigned int vec_length;
3608
3609 if (!i.tm.opcode_modifier.evex
3610 || i.tm.opcode_modifier.evex == EVEXDYN)
3611 {
3612 unsigned int op;
3613
3614 vec_length = 0;
3615 for (op = 0; op < i.tm.operands; ++op)
3616 if (i.tm.operand_types[op].bitfield.xmmword
3617 + i.tm.operand_types[op].bitfield.ymmword
3618 + i.tm.operand_types[op].bitfield.zmmword > 1)
3619 {
3620 if (i.types[op].bitfield.zmmword)
3621 i.tm.opcode_modifier.evex = EVEX512;
3622 else if (i.types[op].bitfield.ymmword)
3623 i.tm.opcode_modifier.evex = EVEX256;
3624 else if (i.types[op].bitfield.xmmword)
3625 i.tm.opcode_modifier.evex = EVEX128;
3626 else if (i.broadcast && (int) op == i.broadcast->operand)
3627 {
3628 switch ((i.tm.operand_types[op].bitfield.dword ? 4 : 8)
3629 * i.broadcast->type)
3630 {
3631 case 64:
3632 i.tm.opcode_modifier.evex = EVEX512;
3633 break;
3634 case 32:
3635 i.tm.opcode_modifier.evex = EVEX256;
3636 break;
3637 case 16:
3638 i.tm.opcode_modifier.evex = EVEX128;
3639 break;
3640 default:
3641 continue;
3642 }
3643 }
3644 continue;
3645 break;
3646 }
3647 }
3648
3649 switch (i.tm.opcode_modifier.evex)
3650 {
3651 case EVEXLIG: /* LL' is ignored */
3652 vec_length = evexlig << 5;
3653 break;
3654 case EVEX128:
3655 vec_length = 0 << 5;
3656 break;
3657 case EVEX256:
3658 vec_length = 1 << 5;
3659 break;
3660 case EVEX512:
3661 vec_length = 2 << 5;
3662 break;
3663 default:
3664 abort ();
3665 break;
3666 }
3667 i.vex.bytes[3] |= vec_length;
3668 /* Encode the broadcast bit. */
3669 if (i.broadcast)
3670 i.vex.bytes[3] |= 0x10;
3671 }
3672 else
3673 {
3674 if (i.rounding->type != saeonly)
3675 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3676 else
3677 i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
3678 }
3679
3680 if (i.mask && i.mask->mask)
3681 i.vex.bytes[3] |= i.mask->mask->reg_num;
3682 }
3683
3684 static void
3685 process_immext (void)
3686 {
3687 expressionS *exp;
3688
3689 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3690 && i.operands > 0)
3691 {
3692 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3693 with an opcode suffix which is coded in the same place as an
3694 8-bit immediate field would be.
3695 Here we check those operands and remove them afterwards. */
3696 unsigned int x;
3697
3698 for (x = 0; x < i.operands; x++)
3699 if (register_number (i.op[x].regs) != x)
3700 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3701 register_prefix, i.op[x].regs->reg_name, x + 1,
3702 i.tm.name);
3703
3704 i.operands = 0;
3705 }
3706
3707 if (i.tm.cpu_flags.bitfield.cpumwaitx && i.operands > 0)
3708 {
3709 /* MONITORX/MWAITX instructions have fixed operands with an opcode
3710 suffix which is coded in the same place as an 8-bit immediate
3711 field would be.
3712 Here we check those operands and remove them afterwards. */
3713 unsigned int x;
3714
3715 if (i.operands != 3)
3716 abort();
3717
3718 for (x = 0; x < 2; x++)
3719 if (register_number (i.op[x].regs) != x)
3720 goto bad_register_operand;
3721
3722 /* Check for third operand for mwaitx/monitorx insn. */
3723 if (register_number (i.op[x].regs)
3724 != (x + (i.tm.extension_opcode == 0xfb)))
3725 {
3726 bad_register_operand:
3727 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3728 register_prefix, i.op[x].regs->reg_name, x+1,
3729 i.tm.name);
3730 }
3731
3732 i.operands = 0;
3733 }
3734
3735 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3736 which is coded in the same place as an 8-bit immediate field
3737 would be. Here we fake an 8-bit immediate operand from the
3738 opcode suffix stored in tm.extension_opcode.
3739
3740 AVX instructions also use this encoding, for some of
3741 3 argument instructions. */
3742
3743 gas_assert (i.imm_operands <= 1
3744 && (i.operands <= 2
3745 || ((i.tm.opcode_modifier.vex
3746 || i.tm.opcode_modifier.vexopcode
3747 || is_evex_encoding (&i.tm))
3748 && i.operands <= 4)));
3749
3750 exp = &im_expressions[i.imm_operands++];
3751 i.op[i.operands].imms = exp;
3752 i.types[i.operands] = imm8;
3753 i.operands++;
3754 exp->X_op = O_constant;
3755 exp->X_add_number = i.tm.extension_opcode;
3756 i.tm.extension_opcode = None;
3757 }
3758
3759
3760 static int
3761 check_hle (void)
3762 {
3763 switch (i.tm.opcode_modifier.hleprefixok)
3764 {
3765 default:
3766 abort ();
3767 case HLEPrefixNone:
3768 as_bad (_("invalid instruction `%s' after `%s'"),
3769 i.tm.name, i.hle_prefix);
3770 return 0;
3771 case HLEPrefixLock:
3772 if (i.prefix[LOCK_PREFIX])
3773 return 1;
3774 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
3775 return 0;
3776 case HLEPrefixAny:
3777 return 1;
3778 case HLEPrefixRelease:
3779 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3780 {
3781 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3782 i.tm.name);
3783 return 0;
3784 }
3785 if (i.mem_operands == 0
3786 || !operand_type_check (i.types[i.operands - 1], anymem))
3787 {
3788 as_bad (_("memory destination needed for instruction `%s'"
3789 " after `xrelease'"), i.tm.name);
3790 return 0;
3791 }
3792 return 1;
3793 }
3794 }
3795
3796 /* Try the shortest encoding by shortening operand size. */
3797
3798 static void
3799 optimize_encoding (void)
3800 {
3801 int j;
3802
3803 if (optimize_for_space
3804 && i.reg_operands == 1
3805 && i.imm_operands == 1
3806 && !i.types[1].bitfield.byte
3807 && i.op[0].imms->X_op == O_constant
3808 && fits_in_imm7 (i.op[0].imms->X_add_number)
3809 && ((i.tm.base_opcode == 0xa8
3810 && i.tm.extension_opcode == None)
3811 || (i.tm.base_opcode == 0xf6
3812 && i.tm.extension_opcode == 0x0)))
3813 {
3814 /* Optimize: -Os:
3815 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
3816 */
3817 unsigned int base_regnum = i.op[1].regs->reg_num;
3818 if (flag_code == CODE_64BIT || base_regnum < 4)
3819 {
3820 i.types[1].bitfield.byte = 1;
3821 /* Ignore the suffix. */
3822 i.suffix = 0;
3823 if (base_regnum >= 4
3824 && !(i.op[1].regs->reg_flags & RegRex))
3825 {
3826 /* Handle SP, BP, SI and DI registers. */
3827 if (i.types[1].bitfield.word)
3828 j = 16;
3829 else if (i.types[1].bitfield.dword)
3830 j = 32;
3831 else
3832 j = 48;
3833 i.op[1].regs -= j;
3834 }
3835 }
3836 }
3837 else if (flag_code == CODE_64BIT
3838 && ((i.types[1].bitfield.qword
3839 && i.reg_operands == 1
3840 && i.imm_operands == 1
3841 && i.op[0].imms->X_op == O_constant
3842 && ((i.tm.base_opcode == 0xb0
3843 && i.tm.extension_opcode == None
3844 && fits_in_unsigned_long (i.op[0].imms->X_add_number))
3845 || (fits_in_imm31 (i.op[0].imms->X_add_number)
3846 && (((i.tm.base_opcode == 0x24
3847 || i.tm.base_opcode == 0xa8)
3848 && i.tm.extension_opcode == None)
3849 || (i.tm.base_opcode == 0x80
3850 && i.tm.extension_opcode == 0x4)
3851 || ((i.tm.base_opcode == 0xf6
3852 || i.tm.base_opcode == 0xc6)
3853 && i.tm.extension_opcode == 0x0)))))
3854 || (i.types[0].bitfield.qword
3855 && ((i.reg_operands == 2
3856 && i.op[0].regs == i.op[1].regs
3857 && ((i.tm.base_opcode == 0x30
3858 || i.tm.base_opcode == 0x28)
3859 && i.tm.extension_opcode == None))
3860 || (i.reg_operands == 1
3861 && i.operands == 1
3862 && i.tm.base_opcode == 0x30
3863 && i.tm.extension_opcode == None)))))
3864 {
3865 /* Optimize: -O:
3866 andq $imm31, %r64 -> andl $imm31, %r32
3867 testq $imm31, %r64 -> testl $imm31, %r32
3868 xorq %r64, %r64 -> xorl %r32, %r32
3869 subq %r64, %r64 -> subl %r32, %r32
3870 movq $imm31, %r64 -> movl $imm31, %r32
3871 movq $imm32, %r64 -> movl $imm32, %r32
3872 */
3873 i.tm.opcode_modifier.norex64 = 1;
3874 if (i.tm.base_opcode == 0xb0 || i.tm.base_opcode == 0xc6)
3875 {
3876 /* Handle
3877 movq $imm31, %r64 -> movl $imm31, %r32
3878 movq $imm32, %r64 -> movl $imm32, %r32
3879 */
3880 i.tm.operand_types[0].bitfield.imm32 = 1;
3881 i.tm.operand_types[0].bitfield.imm32s = 0;
3882 i.tm.operand_types[0].bitfield.imm64 = 0;
3883 i.types[0].bitfield.imm32 = 1;
3884 i.types[0].bitfield.imm32s = 0;
3885 i.types[0].bitfield.imm64 = 0;
3886 i.types[1].bitfield.dword = 1;
3887 i.types[1].bitfield.qword = 0;
3888 if (i.tm.base_opcode == 0xc6)
3889 {
3890 /* Handle
3891 movq $imm31, %r64 -> movl $imm31, %r32
3892 */
3893 i.tm.base_opcode = 0xb0;
3894 i.tm.extension_opcode = None;
3895 i.tm.opcode_modifier.shortform = 1;
3896 i.tm.opcode_modifier.modrm = 0;
3897 }
3898 }
3899 }
3900 else if (optimize > 1
3901 && i.reg_operands == 3
3902 && i.op[0].regs == i.op[1].regs
3903 && !i.types[2].bitfield.xmmword
3904 && (i.tm.opcode_modifier.vex
3905 || ((!i.mask || i.mask->zeroing)
3906 && !i.rounding
3907 && is_evex_encoding (&i.tm)
3908 && (i.vec_encoding != vex_encoding_evex
3909 || i.tm.cpu_flags.bitfield.cpuavx512vl
3910 || (i.tm.operand_types[2].bitfield.zmmword
3911 && i.types[2].bitfield.ymmword)
3912 || cpu_arch_isa_flags.bitfield.cpuavx512vl)))
3913 && ((i.tm.base_opcode == 0x55
3914 || i.tm.base_opcode == 0x6655
3915 || i.tm.base_opcode == 0x66df
3916 || i.tm.base_opcode == 0x57
3917 || i.tm.base_opcode == 0x6657
3918 || i.tm.base_opcode == 0x66ef
3919 || i.tm.base_opcode == 0x66f8
3920 || i.tm.base_opcode == 0x66f9
3921 || i.tm.base_opcode == 0x66fa
3922 || i.tm.base_opcode == 0x66fb)
3923 && i.tm.extension_opcode == None))
3924 {
3925 /* Optimize: -O2:
3926 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
3927 vpsubq and vpsubw:
3928 EVEX VOP %zmmM, %zmmM, %zmmN
3929 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
3930 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
3931 EVEX VOP %ymmM, %ymmM, %ymmN
3932 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
3933 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
3934 VEX VOP %ymmM, %ymmM, %ymmN
3935 -> VEX VOP %xmmM, %xmmM, %xmmN
3936 VOP, one of vpandn and vpxor:
3937 VEX VOP %ymmM, %ymmM, %ymmN
3938 -> VEX VOP %xmmM, %xmmM, %xmmN
3939 VOP, one of vpandnd and vpandnq:
3940 EVEX VOP %zmmM, %zmmM, %zmmN
3941 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
3942 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
3943 EVEX VOP %ymmM, %ymmM, %ymmN
3944 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
3945 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
3946 VOP, one of vpxord and vpxorq:
3947 EVEX VOP %zmmM, %zmmM, %zmmN
3948 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
3949 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
3950 EVEX VOP %ymmM, %ymmM, %ymmN
3951 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
3952 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
3953 */
3954 if (is_evex_encoding (&i.tm))
3955 {
3956 if (i.vec_encoding == vex_encoding_evex)
3957 i.tm.opcode_modifier.evex = EVEX128;
3958 else
3959 {
3960 i.tm.opcode_modifier.vex = VEX128;
3961 i.tm.opcode_modifier.vexw = VEXW0;
3962 i.tm.opcode_modifier.evex = 0;
3963 }
3964 }
3965 else
3966 i.tm.opcode_modifier.vex = VEX128;
3967
3968 if (i.tm.opcode_modifier.vex)
3969 for (j = 0; j < 3; j++)
3970 {
3971 i.types[j].bitfield.xmmword = 1;
3972 i.types[j].bitfield.ymmword = 0;
3973 }
3974 }
3975 }
3976
3977 /* This is the guts of the machine-dependent assembler. LINE points to a
3978 machine dependent instruction. This function is supposed to emit
3979 the frags/bytes it assembles to. */
3980
3981 void
3982 md_assemble (char *line)
3983 {
3984 unsigned int j;
3985 char mnemonic[MAX_MNEM_SIZE], mnem_suffix;
3986 const insn_template *t;
3987
3988 /* Initialize globals. */
3989 memset (&i, '\0', sizeof (i));
3990 for (j = 0; j < MAX_OPERANDS; j++)
3991 i.reloc[j] = NO_RELOC;
3992 memset (disp_expressions, '\0', sizeof (disp_expressions));
3993 memset (im_expressions, '\0', sizeof (im_expressions));
3994 save_stack_p = save_stack;
3995
3996 /* First parse an instruction mnemonic & call i386_operand for the operands.
3997 We assume that the scrubber has arranged it so that line[0] is the valid
3998 start of a (possibly prefixed) mnemonic. */
3999
4000 line = parse_insn (line, mnemonic);
4001 if (line == NULL)
4002 return;
4003 mnem_suffix = i.suffix;
4004
4005 line = parse_operands (line, mnemonic);
4006 this_operand = -1;
4007 xfree (i.memop1_string);
4008 i.memop1_string = NULL;
4009 if (line == NULL)
4010 return;
4011
4012 /* Now we've parsed the mnemonic into a set of templates, and have the
4013 operands at hand. */
4014
4015 /* All intel opcodes have reversed operands except for "bound" and
4016 "enter". We also don't reverse intersegment "jmp" and "call"
4017 instructions with 2 immediate operands so that the immediate segment
4018 precedes the offset, as it does when in AT&T mode. */
4019 if (intel_syntax
4020 && i.operands > 1
4021 && (strcmp (mnemonic, "bound") != 0)
4022 && (strcmp (mnemonic, "invlpga") != 0)
4023 && !(operand_type_check (i.types[0], imm)
4024 && operand_type_check (i.types[1], imm)))
4025 swap_operands ();
4026
4027 /* The order of the immediates should be reversed
4028 for 2 immediates extrq and insertq instructions */
4029 if (i.imm_operands == 2
4030 && (strcmp (mnemonic, "extrq") == 0
4031 || strcmp (mnemonic, "insertq") == 0))
4032 swap_2_operands (0, 1);
4033
4034 if (i.imm_operands)
4035 optimize_imm ();
4036
4037 /* Don't optimize displacement for movabs since it only takes 64bit
4038 displacement. */
4039 if (i.disp_operands
4040 && i.disp_encoding != disp_encoding_32bit
4041 && (flag_code != CODE_64BIT
4042 || strcmp (mnemonic, "movabs") != 0))
4043 optimize_disp ();
4044
4045 /* Next, we find a template that matches the given insn,
4046 making sure the overlap of the given operands types is consistent
4047 with the template operand types. */
4048
4049 if (!(t = match_template (mnem_suffix)))
4050 return;
4051
4052 if (sse_check != check_none
4053 && !i.tm.opcode_modifier.noavx
4054 && !i.tm.cpu_flags.bitfield.cpuavx
4055 && (i.tm.cpu_flags.bitfield.cpusse
4056 || i.tm.cpu_flags.bitfield.cpusse2
4057 || i.tm.cpu_flags.bitfield.cpusse3
4058 || i.tm.cpu_flags.bitfield.cpussse3
4059 || i.tm.cpu_flags.bitfield.cpusse4_1
4060 || i.tm.cpu_flags.bitfield.cpusse4_2
4061 || i.tm.cpu_flags.bitfield.cpupclmul
4062 || i.tm.cpu_flags.bitfield.cpuaes
4063 || i.tm.cpu_flags.bitfield.cpugfni))
4064 {
4065 (sse_check == check_warning
4066 ? as_warn
4067 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
4068 }
4069
4070 /* Zap movzx and movsx suffix. The suffix has been set from
4071 "word ptr" or "byte ptr" on the source operand in Intel syntax
4072 or extracted from mnemonic in AT&T syntax. But we'll use
4073 the destination register to choose the suffix for encoding. */
4074 if ((i.tm.base_opcode & ~9) == 0x0fb6)
4075 {
4076 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
4077 there is no suffix, the default will be byte extension. */
4078 if (i.reg_operands != 2
4079 && !i.suffix
4080 && intel_syntax)
4081 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4082
4083 i.suffix = 0;
4084 }
4085
4086 if (i.tm.opcode_modifier.fwait)
4087 if (!add_prefix (FWAIT_OPCODE))
4088 return;
4089
4090 /* Check if REP prefix is OK. */
4091 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
4092 {
4093 as_bad (_("invalid instruction `%s' after `%s'"),
4094 i.tm.name, i.rep_prefix);
4095 return;
4096 }
4097
4098 /* Check for lock without a lockable instruction. Destination operand
4099 must be memory unless it is xchg (0x86). */
4100 if (i.prefix[LOCK_PREFIX]
4101 && (!i.tm.opcode_modifier.islockable
4102 || i.mem_operands == 0
4103 || (i.tm.base_opcode != 0x86
4104 && !operand_type_check (i.types[i.operands - 1], anymem))))
4105 {
4106 as_bad (_("expecting lockable instruction after `lock'"));
4107 return;
4108 }
4109
4110 /* Check if HLE prefix is OK. */
4111 if (i.hle_prefix && !check_hle ())
4112 return;
4113
4114 /* Check BND prefix. */
4115 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
4116 as_bad (_("expecting valid branch instruction after `bnd'"));
4117
4118 /* Check NOTRACK prefix. */
4119 if (i.notrack_prefix && !i.tm.opcode_modifier.notrackprefixok)
4120 as_bad (_("expecting indirect branch instruction after `notrack'"));
4121
4122 if (i.tm.cpu_flags.bitfield.cpumpx)
4123 {
4124 if (flag_code == CODE_64BIT && i.prefix[ADDR_PREFIX])
4125 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4126 else if (flag_code != CODE_16BIT
4127 ? i.prefix[ADDR_PREFIX]
4128 : i.mem_operands && !i.prefix[ADDR_PREFIX])
4129 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4130 }
4131
4132 /* Insert BND prefix. */
4133 if (add_bnd_prefix && i.tm.opcode_modifier.bndprefixok)
4134 {
4135 if (!i.prefix[BND_PREFIX])
4136 add_prefix (BND_PREFIX_OPCODE);
4137 else if (i.prefix[BND_PREFIX] != BND_PREFIX_OPCODE)
4138 {
4139 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4140 i.prefix[BND_PREFIX] = BND_PREFIX_OPCODE;
4141 }
4142 }
4143
4144 /* Check string instruction segment overrides. */
4145 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
4146 {
4147 if (!check_string ())
4148 return;
4149 i.disp_operands = 0;
4150 }
4151
4152 if (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize)
4153 optimize_encoding ();
4154
4155 if (!process_suffix ())
4156 return;
4157
4158 /* Update operand types. */
4159 for (j = 0; j < i.operands; j++)
4160 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
4161
4162 /* Make still unresolved immediate matches conform to size of immediate
4163 given in i.suffix. */
4164 if (!finalize_imm ())
4165 return;
4166
4167 if (i.types[0].bitfield.imm1)
4168 i.imm_operands = 0; /* kludge for shift insns. */
4169
4170 /* We only need to check those implicit registers for instructions
4171 with 3 operands or less. */
4172 if (i.operands <= 3)
4173 for (j = 0; j < i.operands; j++)
4174 if (i.types[j].bitfield.inoutportreg
4175 || i.types[j].bitfield.shiftcount
4176 || (i.types[j].bitfield.acc && !i.types[j].bitfield.xmmword))
4177 i.reg_operands--;
4178
4179 /* ImmExt should be processed after SSE2AVX. */
4180 if (!i.tm.opcode_modifier.sse2avx
4181 && i.tm.opcode_modifier.immext)
4182 process_immext ();
4183
4184 /* For insns with operands there are more diddles to do to the opcode. */
4185 if (i.operands)
4186 {
4187 if (!process_operands ())
4188 return;
4189 }
4190 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
4191 {
4192 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4193 as_warn (_("translating to `%sp'"), i.tm.name);
4194 }
4195
4196 if (i.tm.opcode_modifier.vex || i.tm.opcode_modifier.vexopcode
4197 || is_evex_encoding (&i.tm))
4198 {
4199 if (flag_code == CODE_16BIT)
4200 {
4201 as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
4202 i.tm.name);
4203 return;
4204 }
4205
4206 if (i.tm.opcode_modifier.vex)
4207 build_vex_prefix (t);
4208 else
4209 build_evex_prefix ();
4210 }
4211
4212 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4213 instructions may define INT_OPCODE as well, so avoid this corner
4214 case for those instructions that use MODRM. */
4215 if (i.tm.base_opcode == INT_OPCODE
4216 && !i.tm.opcode_modifier.modrm
4217 && i.op[0].imms->X_add_number == 3)
4218 {
4219 i.tm.base_opcode = INT3_OPCODE;
4220 i.imm_operands = 0;
4221 }
4222
4223 if ((i.tm.opcode_modifier.jump
4224 || i.tm.opcode_modifier.jumpbyte
4225 || i.tm.opcode_modifier.jumpdword)
4226 && i.op[0].disps->X_op == O_constant)
4227 {
4228 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4229 the absolute address given by the constant. Since ix86 jumps and
4230 calls are pc relative, we need to generate a reloc. */
4231 i.op[0].disps->X_add_symbol = &abs_symbol;
4232 i.op[0].disps->X_op = O_symbol;
4233 }
4234
4235 if (i.tm.opcode_modifier.rex64)
4236 i.rex |= REX_W;
4237
4238 /* For 8 bit registers we need an empty rex prefix. Also if the
4239 instruction already has a prefix, we need to convert old
4240 registers to new ones. */
4241
4242 if ((i.types[0].bitfield.reg && i.types[0].bitfield.byte
4243 && (i.op[0].regs->reg_flags & RegRex64) != 0)
4244 || (i.types[1].bitfield.reg && i.types[1].bitfield.byte
4245 && (i.op[1].regs->reg_flags & RegRex64) != 0)
4246 || (((i.types[0].bitfield.reg && i.types[0].bitfield.byte)
4247 || (i.types[1].bitfield.reg && i.types[1].bitfield.byte))
4248 && i.rex != 0))
4249 {
4250 int x;
4251
4252 i.rex |= REX_OPCODE;
4253 for (x = 0; x < 2; x++)
4254 {
4255 /* Look for 8 bit operand that uses old registers. */
4256 if (i.types[x].bitfield.reg && i.types[x].bitfield.byte
4257 && (i.op[x].regs->reg_flags & RegRex64) == 0)
4258 {
4259 /* In case it is "hi" register, give up. */
4260 if (i.op[x].regs->reg_num > 3)
4261 as_bad (_("can't encode register '%s%s' in an "
4262 "instruction requiring REX prefix."),
4263 register_prefix, i.op[x].regs->reg_name);
4264
4265 /* Otherwise it is equivalent to the extended register.
4266 Since the encoding doesn't change this is merely
4267 cosmetic cleanup for debug output. */
4268
4269 i.op[x].regs = i.op[x].regs + 8;
4270 }
4271 }
4272 }
4273
4274 if (i.rex == 0 && i.rex_encoding)
4275 {
4276 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4277 that uses legacy register. If it is "hi" register, don't add
4278 the REX_OPCODE byte. */
4279 int x;
4280 for (x = 0; x < 2; x++)
4281 if (i.types[x].bitfield.reg
4282 && i.types[x].bitfield.byte
4283 && (i.op[x].regs->reg_flags & RegRex64) == 0
4284 && i.op[x].regs->reg_num > 3)
4285 {
4286 i.rex_encoding = FALSE;
4287 break;
4288 }
4289
4290 if (i.rex_encoding)
4291 i.rex = REX_OPCODE;
4292 }
4293
4294 if (i.rex != 0)
4295 add_prefix (REX_OPCODE | i.rex);
4296
4297 /* We are ready to output the insn. */
4298 output_insn ();
4299 }
4300
4301 static char *
4302 parse_insn (char *line, char *mnemonic)
4303 {
4304 char *l = line;
4305 char *token_start = l;
4306 char *mnem_p;
4307 int supported;
4308 const insn_template *t;
4309 char *dot_p = NULL;
4310
4311 while (1)
4312 {
4313 mnem_p = mnemonic;
4314 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
4315 {
4316 if (*mnem_p == '.')
4317 dot_p = mnem_p;
4318 mnem_p++;
4319 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
4320 {
4321 as_bad (_("no such instruction: `%s'"), token_start);
4322 return NULL;
4323 }
4324 l++;
4325 }
4326 if (!is_space_char (*l)
4327 && *l != END_OF_INSN
4328 && (intel_syntax
4329 || (*l != PREFIX_SEPARATOR
4330 && *l != ',')))
4331 {
4332 as_bad (_("invalid character %s in mnemonic"),
4333 output_invalid (*l));
4334 return NULL;
4335 }
4336 if (token_start == l)
4337 {
4338 if (!intel_syntax && *l == PREFIX_SEPARATOR)
4339 as_bad (_("expecting prefix; got nothing"));
4340 else
4341 as_bad (_("expecting mnemonic; got nothing"));
4342 return NULL;
4343 }
4344
4345 /* Look up instruction (or prefix) via hash table. */
4346 current_templates = (const templates *) hash_find (op_hash, mnemonic);
4347
4348 if (*l != END_OF_INSN
4349 && (!is_space_char (*l) || l[1] != END_OF_INSN)
4350 && current_templates
4351 && current_templates->start->opcode_modifier.isprefix)
4352 {
4353 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
4354 {
4355 as_bad ((flag_code != CODE_64BIT
4356 ? _("`%s' is only supported in 64-bit mode")
4357 : _("`%s' is not supported in 64-bit mode")),
4358 current_templates->start->name);
4359 return NULL;
4360 }
4361 /* If we are in 16-bit mode, do not allow addr16 or data16.
4362 Similarly, in 32-bit mode, do not allow addr32 or data32. */
4363 if ((current_templates->start->opcode_modifier.size16
4364 || current_templates->start->opcode_modifier.size32)
4365 && flag_code != CODE_64BIT
4366 && (current_templates->start->opcode_modifier.size32
4367 ^ (flag_code == CODE_16BIT)))
4368 {
4369 as_bad (_("redundant %s prefix"),
4370 current_templates->start->name);
4371 return NULL;
4372 }
4373 if (current_templates->start->opcode_length == 0)
4374 {
4375 /* Handle pseudo prefixes. */
4376 switch (current_templates->start->base_opcode)
4377 {
4378 case 0x0:
4379 /* {disp8} */
4380 i.disp_encoding = disp_encoding_8bit;
4381 break;
4382 case 0x1:
4383 /* {disp32} */
4384 i.disp_encoding = disp_encoding_32bit;
4385 break;
4386 case 0x2:
4387 /* {load} */
4388 i.dir_encoding = dir_encoding_load;
4389 break;
4390 case 0x3:
4391 /* {store} */
4392 i.dir_encoding = dir_encoding_store;
4393 break;
4394 case 0x4:
4395 /* {vex2} */
4396 i.vec_encoding = vex_encoding_vex2;
4397 break;
4398 case 0x5:
4399 /* {vex3} */
4400 i.vec_encoding = vex_encoding_vex3;
4401 break;
4402 case 0x6:
4403 /* {evex} */
4404 i.vec_encoding = vex_encoding_evex;
4405 break;
4406 case 0x7:
4407 /* {rex} */
4408 i.rex_encoding = TRUE;
4409 break;
4410 case 0x8:
4411 /* {nooptimize} */
4412 i.no_optimize = TRUE;
4413 break;
4414 default:
4415 abort ();
4416 }
4417 }
4418 else
4419 {
4420 /* Add prefix, checking for repeated prefixes. */
4421 switch (add_prefix (current_templates->start->base_opcode))
4422 {
4423 case PREFIX_EXIST:
4424 return NULL;
4425 case PREFIX_DS:
4426 if (current_templates->start->cpu_flags.bitfield.cpuibt)
4427 i.notrack_prefix = current_templates->start->name;
4428 break;
4429 case PREFIX_REP:
4430 if (current_templates->start->cpu_flags.bitfield.cpuhle)
4431 i.hle_prefix = current_templates->start->name;
4432 else if (current_templates->start->cpu_flags.bitfield.cpumpx)
4433 i.bnd_prefix = current_templates->start->name;
4434 else
4435 i.rep_prefix = current_templates->start->name;
4436 break;
4437 default:
4438 break;
4439 }
4440 }
4441 /* Skip past PREFIX_SEPARATOR and reset token_start. */
4442 token_start = ++l;
4443 }
4444 else
4445 break;
4446 }
4447
4448 if (!current_templates)
4449 {
4450 /* Check if we should swap operand or force 32bit displacement in
4451 encoding. */
4452 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
4453 i.dir_encoding = dir_encoding_store;
4454 else if (mnem_p - 3 == dot_p
4455 && dot_p[1] == 'd'
4456 && dot_p[2] == '8')
4457 i.disp_encoding = disp_encoding_8bit;
4458 else if (mnem_p - 4 == dot_p
4459 && dot_p[1] == 'd'
4460 && dot_p[2] == '3'
4461 && dot_p[3] == '2')
4462 i.disp_encoding = disp_encoding_32bit;
4463 else
4464 goto check_suffix;
4465 mnem_p = dot_p;
4466 *dot_p = '\0';
4467 current_templates = (const templates *) hash_find (op_hash, mnemonic);
4468 }
4469
4470 if (!current_templates)
4471 {
4472 check_suffix:
4473 /* See if we can get a match by trimming off a suffix. */
4474 switch (mnem_p[-1])
4475 {
4476 case WORD_MNEM_SUFFIX:
4477 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
4478 i.suffix = SHORT_MNEM_SUFFIX;
4479 else
4480 /* Fall through. */
4481 case BYTE_MNEM_SUFFIX:
4482 case QWORD_MNEM_SUFFIX:
4483 i.suffix = mnem_p[-1];
4484 mnem_p[-1] = '\0';
4485 current_templates = (const templates *) hash_find (op_hash,
4486 mnemonic);
4487 break;
4488 case SHORT_MNEM_SUFFIX:
4489 case LONG_MNEM_SUFFIX:
4490 if (!intel_syntax)
4491 {
4492 i.suffix = mnem_p[-1];
4493 mnem_p[-1] = '\0';
4494 current_templates = (const templates *) hash_find (op_hash,
4495 mnemonic);
4496 }
4497 break;
4498
4499 /* Intel Syntax. */
4500 case 'd':
4501 if (intel_syntax)
4502 {
4503 if (intel_float_operand (mnemonic) == 1)
4504 i.suffix = SHORT_MNEM_SUFFIX;
4505 else
4506 i.suffix = LONG_MNEM_SUFFIX;
4507 mnem_p[-1] = '\0';
4508 current_templates = (const templates *) hash_find (op_hash,
4509 mnemonic);
4510 }
4511 break;
4512 }
4513 if (!current_templates)
4514 {
4515 as_bad (_("no such instruction: `%s'"), token_start);
4516 return NULL;
4517 }
4518 }
4519
4520 if (current_templates->start->opcode_modifier.jump
4521 || current_templates->start->opcode_modifier.jumpbyte)
4522 {
4523 /* Check for a branch hint. We allow ",pt" and ",pn" for
4524 predict taken and predict not taken respectively.
4525 I'm not sure that branch hints actually do anything on loop
4526 and jcxz insns (JumpByte) for current Pentium4 chips. They
4527 may work in the future and it doesn't hurt to accept them
4528 now. */
4529 if (l[0] == ',' && l[1] == 'p')
4530 {
4531 if (l[2] == 't')
4532 {
4533 if (!add_prefix (DS_PREFIX_OPCODE))
4534 return NULL;
4535 l += 3;
4536 }
4537 else if (l[2] == 'n')
4538 {
4539 if (!add_prefix (CS_PREFIX_OPCODE))
4540 return NULL;
4541 l += 3;
4542 }
4543 }
4544 }
4545 /* Any other comma loses. */
4546 if (*l == ',')
4547 {
4548 as_bad (_("invalid character %s in mnemonic"),
4549 output_invalid (*l));
4550 return NULL;
4551 }
4552
4553 /* Check if instruction is supported on specified architecture. */
4554 supported = 0;
4555 for (t = current_templates->start; t < current_templates->end; ++t)
4556 {
4557 supported |= cpu_flags_match (t);
4558 if (supported == CPU_FLAGS_PERFECT_MATCH)
4559 {
4560 if (!cpu_arch_flags.bitfield.cpui386 && (flag_code != CODE_16BIT))
4561 as_warn (_("use .code16 to ensure correct addressing mode"));
4562
4563 return l;
4564 }
4565 }
4566
4567 if (!(supported & CPU_FLAGS_64BIT_MATCH))
4568 as_bad (flag_code == CODE_64BIT
4569 ? _("`%s' is not supported in 64-bit mode")
4570 : _("`%s' is only supported in 64-bit mode"),
4571 current_templates->start->name);
4572 else
4573 as_bad (_("`%s' is not supported on `%s%s'"),
4574 current_templates->start->name,
4575 cpu_arch_name ? cpu_arch_name : default_arch,
4576 cpu_sub_arch_name ? cpu_sub_arch_name : "");
4577
4578 return NULL;
4579 }
4580
4581 static char *
4582 parse_operands (char *l, const char *mnemonic)
4583 {
4584 char *token_start;
4585
4586 /* 1 if operand is pending after ','. */
4587 unsigned int expecting_operand = 0;
4588
4589 /* Non-zero if operand parens not balanced. */
4590 unsigned int paren_not_balanced;
4591
4592 while (*l != END_OF_INSN)
4593 {
4594 /* Skip optional white space before operand. */
4595 if (is_space_char (*l))
4596 ++l;
4597 if (!is_operand_char (*l) && *l != END_OF_INSN && *l != '"')
4598 {
4599 as_bad (_("invalid character %s before operand %d"),
4600 output_invalid (*l),
4601 i.operands + 1);
4602 return NULL;
4603 }
4604 token_start = l; /* After white space. */
4605 paren_not_balanced = 0;
4606 while (paren_not_balanced || *l != ',')
4607 {
4608 if (*l == END_OF_INSN)
4609 {
4610 if (paren_not_balanced)
4611 {
4612 if (!intel_syntax)
4613 as_bad (_("unbalanced parenthesis in operand %d."),
4614 i.operands + 1);
4615 else
4616 as_bad (_("unbalanced brackets in operand %d."),
4617 i.operands + 1);
4618 return NULL;
4619 }
4620 else
4621 break; /* we are done */
4622 }
4623 else if (!is_operand_char (*l) && !is_space_char (*l) && *l != '"')
4624 {
4625 as_bad (_("invalid character %s in operand %d"),
4626 output_invalid (*l),
4627 i.operands + 1);
4628 return NULL;
4629 }
4630 if (!intel_syntax)
4631 {
4632 if (*l == '(')
4633 ++paren_not_balanced;
4634 if (*l == ')')
4635 --paren_not_balanced;
4636 }
4637 else
4638 {
4639 if (*l == '[')
4640 ++paren_not_balanced;
4641 if (*l == ']')
4642 --paren_not_balanced;
4643 }
4644 l++;
4645 }
4646 if (l != token_start)
4647 { /* Yes, we've read in another operand. */
4648 unsigned int operand_ok;
4649 this_operand = i.operands++;
4650 if (i.operands > MAX_OPERANDS)
4651 {
4652 as_bad (_("spurious operands; (%d operands/instruction max)"),
4653 MAX_OPERANDS);
4654 return NULL;
4655 }
4656 i.types[this_operand].bitfield.unspecified = 1;
4657 /* Now parse operand adding info to 'i' as we go along. */
4658 END_STRING_AND_SAVE (l);
4659
4660 if (intel_syntax)
4661 operand_ok =
4662 i386_intel_operand (token_start,
4663 intel_float_operand (mnemonic));
4664 else
4665 operand_ok = i386_att_operand (token_start);
4666
4667 RESTORE_END_STRING (l);
4668 if (!operand_ok)
4669 return NULL;
4670 }
4671 else
4672 {
4673 if (expecting_operand)
4674 {
4675 expecting_operand_after_comma:
4676 as_bad (_("expecting operand after ','; got nothing"));
4677 return NULL;
4678 }
4679 if (*l == ',')
4680 {
4681 as_bad (_("expecting operand before ','; got nothing"));
4682 return NULL;
4683 }
4684 }
4685
4686 /* Now *l must be either ',' or END_OF_INSN. */
4687 if (*l == ',')
4688 {
4689 if (*++l == END_OF_INSN)
4690 {
4691 /* Just skip it, if it's \n complain. */
4692 goto expecting_operand_after_comma;
4693 }
4694 expecting_operand = 1;
4695 }
4696 }
4697 return l;
4698 }
4699
4700 static void
4701 swap_2_operands (int xchg1, int xchg2)
4702 {
4703 union i386_op temp_op;
4704 i386_operand_type temp_type;
4705 enum bfd_reloc_code_real temp_reloc;
4706
4707 temp_type = i.types[xchg2];
4708 i.types[xchg2] = i.types[xchg1];
4709 i.types[xchg1] = temp_type;
4710 temp_op = i.op[xchg2];
4711 i.op[xchg2] = i.op[xchg1];
4712 i.op[xchg1] = temp_op;
4713 temp_reloc = i.reloc[xchg2];
4714 i.reloc[xchg2] = i.reloc[xchg1];
4715 i.reloc[xchg1] = temp_reloc;
4716
4717 if (i.mask)
4718 {
4719 if (i.mask->operand == xchg1)
4720 i.mask->operand = xchg2;
4721 else if (i.mask->operand == xchg2)
4722 i.mask->operand = xchg1;
4723 }
4724 if (i.broadcast)
4725 {
4726 if (i.broadcast->operand == xchg1)
4727 i.broadcast->operand = xchg2;
4728 else if (i.broadcast->operand == xchg2)
4729 i.broadcast->operand = xchg1;
4730 }
4731 if (i.rounding)
4732 {
4733 if (i.rounding->operand == xchg1)
4734 i.rounding->operand = xchg2;
4735 else if (i.rounding->operand == xchg2)
4736 i.rounding->operand = xchg1;
4737 }
4738 }
4739
4740 static void
4741 swap_operands (void)
4742 {
4743 switch (i.operands)
4744 {
4745 case 5:
4746 case 4:
4747 swap_2_operands (1, i.operands - 2);
4748 /* Fall through. */
4749 case 3:
4750 case 2:
4751 swap_2_operands (0, i.operands - 1);
4752 break;
4753 default:
4754 abort ();
4755 }
4756
4757 if (i.mem_operands == 2)
4758 {
4759 const seg_entry *temp_seg;
4760 temp_seg = i.seg[0];
4761 i.seg[0] = i.seg[1];
4762 i.seg[1] = temp_seg;
4763 }
4764 }
4765
4766 /* Try to ensure constant immediates are represented in the smallest
4767 opcode possible. */
4768 static void
4769 optimize_imm (void)
4770 {
4771 char guess_suffix = 0;
4772 int op;
4773
4774 if (i.suffix)
4775 guess_suffix = i.suffix;
4776 else if (i.reg_operands)
4777 {
4778 /* Figure out a suffix from the last register operand specified.
4779 We can't do this properly yet, ie. excluding InOutPortReg,
4780 but the following works for instructions with immediates.
4781 In any case, we can't set i.suffix yet. */
4782 for (op = i.operands; --op >= 0;)
4783 if (i.types[op].bitfield.reg && i.types[op].bitfield.byte)
4784 {
4785 guess_suffix = BYTE_MNEM_SUFFIX;
4786 break;
4787 }
4788 else if (i.types[op].bitfield.reg && i.types[op].bitfield.word)
4789 {
4790 guess_suffix = WORD_MNEM_SUFFIX;
4791 break;
4792 }
4793 else if (i.types[op].bitfield.reg && i.types[op].bitfield.dword)
4794 {
4795 guess_suffix = LONG_MNEM_SUFFIX;
4796 break;
4797 }
4798 else if (i.types[op].bitfield.reg && i.types[op].bitfield.qword)
4799 {
4800 guess_suffix = QWORD_MNEM_SUFFIX;
4801 break;
4802 }
4803 }
4804 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4805 guess_suffix = WORD_MNEM_SUFFIX;
4806
4807 for (op = i.operands; --op >= 0;)
4808 if (operand_type_check (i.types[op], imm))
4809 {
4810 switch (i.op[op].imms->X_op)
4811 {
4812 case O_constant:
4813 /* If a suffix is given, this operand may be shortened. */
4814 switch (guess_suffix)
4815 {
4816 case LONG_MNEM_SUFFIX:
4817 i.types[op].bitfield.imm32 = 1;
4818 i.types[op].bitfield.imm64 = 1;
4819 break;
4820 case WORD_MNEM_SUFFIX:
4821 i.types[op].bitfield.imm16 = 1;
4822 i.types[op].bitfield.imm32 = 1;
4823 i.types[op].bitfield.imm32s = 1;
4824 i.types[op].bitfield.imm64 = 1;
4825 break;
4826 case BYTE_MNEM_SUFFIX:
4827 i.types[op].bitfield.imm8 = 1;
4828 i.types[op].bitfield.imm8s = 1;
4829 i.types[op].bitfield.imm16 = 1;
4830 i.types[op].bitfield.imm32 = 1;
4831 i.types[op].bitfield.imm32s = 1;
4832 i.types[op].bitfield.imm64 = 1;
4833 break;
4834 }
4835
4836 /* If this operand is at most 16 bits, convert it
4837 to a signed 16 bit number before trying to see
4838 whether it will fit in an even smaller size.
4839 This allows a 16-bit operand such as $0xffe0 to
4840 be recognised as within Imm8S range. */
4841 if ((i.types[op].bitfield.imm16)
4842 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
4843 {
4844 i.op[op].imms->X_add_number =
4845 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
4846 }
4847 #ifdef BFD64
4848 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
4849 if ((i.types[op].bitfield.imm32)
4850 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
4851 == 0))
4852 {
4853 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
4854 ^ ((offsetT) 1 << 31))
4855 - ((offsetT) 1 << 31));
4856 }
4857 #endif
4858 i.types[op]
4859 = operand_type_or (i.types[op],
4860 smallest_imm_type (i.op[op].imms->X_add_number));
4861
4862 /* We must avoid matching of Imm32 templates when 64bit
4863 only immediate is available. */
4864 if (guess_suffix == QWORD_MNEM_SUFFIX)
4865 i.types[op].bitfield.imm32 = 0;
4866 break;
4867
4868 case O_absent:
4869 case O_register:
4870 abort ();
4871
4872 /* Symbols and expressions. */
4873 default:
4874 /* Convert symbolic operand to proper sizes for matching, but don't
4875 prevent matching a set of insns that only supports sizes other
4876 than those matching the insn suffix. */
4877 {
4878 i386_operand_type mask, allowed;
4879 const insn_template *t;
4880
4881 operand_type_set (&mask, 0);
4882 operand_type_set (&allowed, 0);
4883
4884 for (t = current_templates->start;
4885 t < current_templates->end;
4886 ++t)
4887 allowed = operand_type_or (allowed,
4888 t->operand_types[op]);
4889 switch (guess_suffix)
4890 {
4891 case QWORD_MNEM_SUFFIX:
4892 mask.bitfield.imm64 = 1;
4893 mask.bitfield.imm32s = 1;
4894 break;
4895 case LONG_MNEM_SUFFIX:
4896 mask.bitfield.imm32 = 1;
4897 break;
4898 case WORD_MNEM_SUFFIX:
4899 mask.bitfield.imm16 = 1;
4900 break;
4901 case BYTE_MNEM_SUFFIX:
4902 mask.bitfield.imm8 = 1;
4903 break;
4904 default:
4905 break;
4906 }
4907 allowed = operand_type_and (mask, allowed);
4908 if (!operand_type_all_zero (&allowed))
4909 i.types[op] = operand_type_and (i.types[op], mask);
4910 }
4911 break;
4912 }
4913 }
4914 }
4915
4916 /* Try to use the smallest displacement type too. */
4917 static void
4918 optimize_disp (void)
4919 {
4920 int op;
4921
4922 for (op = i.operands; --op >= 0;)
4923 if (operand_type_check (i.types[op], disp))
4924 {
4925 if (i.op[op].disps->X_op == O_constant)
4926 {
4927 offsetT op_disp = i.op[op].disps->X_add_number;
4928
4929 if (i.types[op].bitfield.disp16
4930 && (op_disp & ~(offsetT) 0xffff) == 0)
4931 {
4932 /* If this operand is at most 16 bits, convert
4933 to a signed 16 bit number and don't use 64bit
4934 displacement. */
4935 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
4936 i.types[op].bitfield.disp64 = 0;
4937 }
4938 #ifdef BFD64
4939 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
4940 if (i.types[op].bitfield.disp32
4941 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
4942 {
4943 /* If this operand is at most 32 bits, convert
4944 to a signed 32 bit number and don't use 64bit
4945 displacement. */
4946 op_disp &= (((offsetT) 2 << 31) - 1);
4947 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
4948 i.types[op].bitfield.disp64 = 0;
4949 }
4950 #endif
4951 if (!op_disp && i.types[op].bitfield.baseindex)
4952 {
4953 i.types[op].bitfield.disp8 = 0;
4954 i.types[op].bitfield.disp16 = 0;
4955 i.types[op].bitfield.disp32 = 0;
4956 i.types[op].bitfield.disp32s = 0;
4957 i.types[op].bitfield.disp64 = 0;
4958 i.op[op].disps = 0;
4959 i.disp_operands--;
4960 }
4961 else if (flag_code == CODE_64BIT)
4962 {
4963 if (fits_in_signed_long (op_disp))
4964 {
4965 i.types[op].bitfield.disp64 = 0;
4966 i.types[op].bitfield.disp32s = 1;
4967 }
4968 if (i.prefix[ADDR_PREFIX]
4969 && fits_in_unsigned_long (op_disp))
4970 i.types[op].bitfield.disp32 = 1;
4971 }
4972 if ((i.types[op].bitfield.disp32
4973 || i.types[op].bitfield.disp32s
4974 || i.types[op].bitfield.disp16)
4975 && fits_in_disp8 (op_disp))
4976 i.types[op].bitfield.disp8 = 1;
4977 }
4978 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
4979 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
4980 {
4981 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
4982 i.op[op].disps, 0, i.reloc[op]);
4983 i.types[op].bitfield.disp8 = 0;
4984 i.types[op].bitfield.disp16 = 0;
4985 i.types[op].bitfield.disp32 = 0;
4986 i.types[op].bitfield.disp32s = 0;
4987 i.types[op].bitfield.disp64 = 0;
4988 }
4989 else
4990 /* We only support 64bit displacement on constants. */
4991 i.types[op].bitfield.disp64 = 0;
4992 }
4993 }
4994
4995 /* Check if operands are valid for the instruction. */
4996
4997 static int
4998 check_VecOperands (const insn_template *t)
4999 {
5000 unsigned int op;
5001 i386_cpu_flags cpu;
5002 static const i386_cpu_flags avx512 = CPU_ANY_AVX512F_FLAGS;
5003
5004 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5005 any one operand are implicity requiring AVX512VL support if the actual
5006 operand size is YMMword or XMMword. Since this function runs after
5007 template matching, there's no need to check for YMMword/XMMword in
5008 the template. */
5009 cpu = cpu_flags_and (t->cpu_flags, avx512);
5010 if (!cpu_flags_all_zero (&cpu)
5011 && !t->cpu_flags.bitfield.cpuavx512vl
5012 && !cpu_arch_flags.bitfield.cpuavx512vl)
5013 {
5014 for (op = 0; op < t->operands; ++op)
5015 {
5016 if (t->operand_types[op].bitfield.zmmword
5017 && (i.types[op].bitfield.ymmword
5018 || i.types[op].bitfield.xmmword))
5019 {
5020 i.error = unsupported;
5021 return 1;
5022 }
5023 }
5024 }
5025
5026 /* Without VSIB byte, we can't have a vector register for index. */
5027 if (!t->opcode_modifier.vecsib
5028 && i.index_reg
5029 && (i.index_reg->reg_type.bitfield.xmmword
5030 || i.index_reg->reg_type.bitfield.ymmword
5031 || i.index_reg->reg_type.bitfield.zmmword))
5032 {
5033 i.error = unsupported_vector_index_register;
5034 return 1;
5035 }
5036
5037 /* Check if default mask is allowed. */
5038 if (t->opcode_modifier.nodefmask
5039 && (!i.mask || i.mask->mask->reg_num == 0))
5040 {
5041 i.error = no_default_mask;
5042 return 1;
5043 }
5044
5045 /* For VSIB byte, we need a vector register for index, and all vector
5046 registers must be distinct. */
5047 if (t->opcode_modifier.vecsib)
5048 {
5049 if (!i.index_reg
5050 || !((t->opcode_modifier.vecsib == VecSIB128
5051 && i.index_reg->reg_type.bitfield.xmmword)
5052 || (t->opcode_modifier.vecsib == VecSIB256
5053 && i.index_reg->reg_type.bitfield.ymmword)
5054 || (t->opcode_modifier.vecsib == VecSIB512
5055 && i.index_reg->reg_type.bitfield.zmmword)))
5056 {
5057 i.error = invalid_vsib_address;
5058 return 1;
5059 }
5060
5061 gas_assert (i.reg_operands == 2 || i.mask);
5062 if (i.reg_operands == 2 && !i.mask)
5063 {
5064 gas_assert (i.types[0].bitfield.regsimd);
5065 gas_assert (i.types[0].bitfield.xmmword
5066 || i.types[0].bitfield.ymmword);
5067 gas_assert (i.types[2].bitfield.regsimd);
5068 gas_assert (i.types[2].bitfield.xmmword
5069 || i.types[2].bitfield.ymmword);
5070 if (operand_check == check_none)
5071 return 0;
5072 if (register_number (i.op[0].regs)
5073 != register_number (i.index_reg)
5074 && register_number (i.op[2].regs)
5075 != register_number (i.index_reg)
5076 && register_number (i.op[0].regs)
5077 != register_number (i.op[2].regs))
5078 return 0;
5079 if (operand_check == check_error)
5080 {
5081 i.error = invalid_vector_register_set;
5082 return 1;
5083 }
5084 as_warn (_("mask, index, and destination registers should be distinct"));
5085 }
5086 else if (i.reg_operands == 1 && i.mask)
5087 {
5088 if (i.types[1].bitfield.regsimd
5089 && (i.types[1].bitfield.xmmword
5090 || i.types[1].bitfield.ymmword
5091 || i.types[1].bitfield.zmmword)
5092 && (register_number (i.op[1].regs)
5093 == register_number (i.index_reg)))
5094 {
5095 if (operand_check == check_error)
5096 {
5097 i.error = invalid_vector_register_set;
5098 return 1;
5099 }
5100 if (operand_check != check_none)
5101 as_warn (_("index and destination registers should be distinct"));
5102 }
5103 }
5104 }
5105
5106 /* Check if broadcast is supported by the instruction and is applied
5107 to the memory operand. */
5108 if (i.broadcast)
5109 {
5110 i386_operand_type type, overlap;
5111
5112 /* Check if specified broadcast is supported in this instruction,
5113 and it's applied to memory operand of DWORD or QWORD type. */
5114 op = i.broadcast->operand;
5115 if (!t->opcode_modifier.broadcast
5116 || !i.types[op].bitfield.mem
5117 || (!i.types[op].bitfield.unspecified
5118 && (t->operand_types[op].bitfield.dword
5119 ? !i.types[op].bitfield.dword
5120 : !i.types[op].bitfield.qword)))
5121 {
5122 bad_broadcast:
5123 i.error = unsupported_broadcast;
5124 return 1;
5125 }
5126
5127 operand_type_set (&type, 0);
5128 switch ((t->operand_types[op].bitfield.dword ? 4 : 8) * i.broadcast->type)
5129 {
5130 case 8:
5131 type.bitfield.qword = 1;
5132 break;
5133 case 16:
5134 type.bitfield.xmmword = 1;
5135 break;
5136 case 32:
5137 type.bitfield.ymmword = 1;
5138 break;
5139 case 64:
5140 type.bitfield.zmmword = 1;
5141 break;
5142 default:
5143 goto bad_broadcast;
5144 }
5145
5146 overlap = operand_type_and (type, t->operand_types[op]);
5147 if (operand_type_all_zero (&overlap))
5148 goto bad_broadcast;
5149
5150 if (t->opcode_modifier.checkregsize)
5151 {
5152 unsigned int j;
5153
5154 type.bitfield.baseindex = 1;
5155 for (j = 0; j < i.operands; ++j)
5156 {
5157 if (j != op
5158 && !operand_type_register_match(i.types[j],
5159 t->operand_types[j],
5160 type,
5161 t->operand_types[op]))
5162 goto bad_broadcast;
5163 }
5164 }
5165 }
5166 /* If broadcast is supported in this instruction, we need to check if
5167 operand of one-element size isn't specified without broadcast. */
5168 else if (t->opcode_modifier.broadcast && i.mem_operands)
5169 {
5170 /* Find memory operand. */
5171 for (op = 0; op < i.operands; op++)
5172 if (operand_type_check (i.types[op], anymem))
5173 break;
5174 gas_assert (op < i.operands);
5175 /* Check size of the memory operand. */
5176 if (t->operand_types[op].bitfield.dword
5177 ? i.types[op].bitfield.dword
5178 : i.types[op].bitfield.qword)
5179 {
5180 i.error = broadcast_needed;
5181 return 1;
5182 }
5183 }
5184 else
5185 op = MAX_OPERANDS - 1; /* Avoid uninitialized variable warning. */
5186
5187 /* Check if requested masking is supported. */
5188 if (i.mask
5189 && (!t->opcode_modifier.masking
5190 || (i.mask->zeroing
5191 && t->opcode_modifier.masking == MERGING_MASKING)))
5192 {
5193 i.error = unsupported_masking;
5194 return 1;
5195 }
5196
5197 /* Check if masking is applied to dest operand. */
5198 if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
5199 {
5200 i.error = mask_not_on_destination;
5201 return 1;
5202 }
5203
5204 /* Check RC/SAE. */
5205 if (i.rounding)
5206 {
5207 if ((i.rounding->type != saeonly
5208 && !t->opcode_modifier.staticrounding)
5209 || (i.rounding->type == saeonly
5210 && (t->opcode_modifier.staticrounding
5211 || !t->opcode_modifier.sae)))
5212 {
5213 i.error = unsupported_rc_sae;
5214 return 1;
5215 }
5216 /* If the instruction has several immediate operands and one of
5217 them is rounding, the rounding operand should be the last
5218 immediate operand. */
5219 if (i.imm_operands > 1
5220 && i.rounding->operand != (int) (i.imm_operands - 1))
5221 {
5222 i.error = rc_sae_operand_not_last_imm;
5223 return 1;
5224 }
5225 }
5226
5227 /* Check vector Disp8 operand. */
5228 if (t->opcode_modifier.disp8memshift
5229 && i.disp_encoding != disp_encoding_32bit)
5230 {
5231 if (i.broadcast)
5232 i.memshift = t->operand_types[op].bitfield.dword ? 2 : 3;
5233 else if (t->opcode_modifier.disp8memshift != DISP8_SHIFT_VL)
5234 i.memshift = t->opcode_modifier.disp8memshift;
5235 else
5236 {
5237 const i386_operand_type *type = NULL;
5238
5239 i.memshift = 0;
5240 for (op = 0; op < i.operands; op++)
5241 if (operand_type_check (i.types[op], anymem))
5242 {
5243 if (t->operand_types[op].bitfield.xmmword
5244 + t->operand_types[op].bitfield.ymmword
5245 + t->operand_types[op].bitfield.zmmword <= 1)
5246 type = &t->operand_types[op];
5247 else if (!i.types[op].bitfield.unspecified)
5248 type = &i.types[op];
5249 }
5250 else if (i.types[op].bitfield.regsimd)
5251 {
5252 if (i.types[op].bitfield.zmmword)
5253 i.memshift = 6;
5254 else if (i.types[op].bitfield.ymmword && i.memshift < 5)
5255 i.memshift = 5;
5256 else if (i.types[op].bitfield.xmmword && i.memshift < 4)
5257 i.memshift = 4;
5258 }
5259
5260 if (type)
5261 {
5262 if (type->bitfield.zmmword)
5263 i.memshift = 6;
5264 else if (type->bitfield.ymmword)
5265 i.memshift = 5;
5266 else if (type->bitfield.xmmword)
5267 i.memshift = 4;
5268 }
5269
5270 /* For the check in fits_in_disp8(). */
5271 if (i.memshift == 0)
5272 i.memshift = -1;
5273 }
5274
5275 for (op = 0; op < i.operands; op++)
5276 if (operand_type_check (i.types[op], disp)
5277 && i.op[op].disps->X_op == O_constant)
5278 {
5279 if (fits_in_disp8 (i.op[op].disps->X_add_number))
5280 {
5281 i.types[op].bitfield.disp8 = 1;
5282 return 0;
5283 }
5284 i.types[op].bitfield.disp8 = 0;
5285 }
5286 }
5287
5288 i.memshift = 0;
5289
5290 return 0;
5291 }
5292
5293 /* Check if operands are valid for the instruction. Update VEX
5294 operand types. */
5295
5296 static int
5297 VEX_check_operands (const insn_template *t)
5298 {
5299 if (i.vec_encoding == vex_encoding_evex)
5300 {
5301 /* This instruction must be encoded with EVEX prefix. */
5302 if (!is_evex_encoding (t))
5303 {
5304 i.error = unsupported;
5305 return 1;
5306 }
5307 return 0;
5308 }
5309
5310 if (!t->opcode_modifier.vex)
5311 {
5312 /* This instruction template doesn't have VEX prefix. */
5313 if (i.vec_encoding != vex_encoding_default)
5314 {
5315 i.error = unsupported;
5316 return 1;
5317 }
5318 return 0;
5319 }
5320
5321 /* Only check VEX_Imm4, which must be the first operand. */
5322 if (t->operand_types[0].bitfield.vec_imm4)
5323 {
5324 if (i.op[0].imms->X_op != O_constant
5325 || !fits_in_imm4 (i.op[0].imms->X_add_number))
5326 {
5327 i.error = bad_imm4;
5328 return 1;
5329 }
5330
5331 /* Turn off Imm8 so that update_imm won't complain. */
5332 i.types[0] = vec_imm4;
5333 }
5334
5335 return 0;
5336 }
5337
5338 static const insn_template *
5339 match_template (char mnem_suffix)
5340 {
5341 /* Points to template once we've found it. */
5342 const insn_template *t;
5343 i386_operand_type overlap0, overlap1, overlap2, overlap3;
5344 i386_operand_type overlap4;
5345 unsigned int found_reverse_match;
5346 i386_opcode_modifier suffix_check, mnemsuf_check;
5347 i386_operand_type operand_types [MAX_OPERANDS];
5348 int addr_prefix_disp;
5349 unsigned int j;
5350 unsigned int found_cpu_match, size_match;
5351 unsigned int check_register;
5352 enum i386_error specific_error = 0;
5353
5354 #if MAX_OPERANDS != 5
5355 # error "MAX_OPERANDS must be 5."
5356 #endif
5357
5358 found_reverse_match = 0;
5359 addr_prefix_disp = -1;
5360
5361 memset (&suffix_check, 0, sizeof (suffix_check));
5362 if (intel_syntax && i.broadcast)
5363 /* nothing */;
5364 else if (i.suffix == BYTE_MNEM_SUFFIX)
5365 suffix_check.no_bsuf = 1;
5366 else if (i.suffix == WORD_MNEM_SUFFIX)
5367 suffix_check.no_wsuf = 1;
5368 else if (i.suffix == SHORT_MNEM_SUFFIX)
5369 suffix_check.no_ssuf = 1;
5370 else if (i.suffix == LONG_MNEM_SUFFIX)
5371 suffix_check.no_lsuf = 1;
5372 else if (i.suffix == QWORD_MNEM_SUFFIX)
5373 suffix_check.no_qsuf = 1;
5374 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
5375 suffix_check.no_ldsuf = 1;
5376
5377 memset (&mnemsuf_check, 0, sizeof (mnemsuf_check));
5378 if (intel_syntax)
5379 {
5380 switch (mnem_suffix)
5381 {
5382 case BYTE_MNEM_SUFFIX: mnemsuf_check.no_bsuf = 1; break;
5383 case WORD_MNEM_SUFFIX: mnemsuf_check.no_wsuf = 1; break;
5384 case SHORT_MNEM_SUFFIX: mnemsuf_check.no_ssuf = 1; break;
5385 case LONG_MNEM_SUFFIX: mnemsuf_check.no_lsuf = 1; break;
5386 case QWORD_MNEM_SUFFIX: mnemsuf_check.no_qsuf = 1; break;
5387 }
5388 }
5389
5390 /* Must have right number of operands. */
5391 i.error = number_of_operands_mismatch;
5392
5393 for (t = current_templates->start; t < current_templates->end; t++)
5394 {
5395 addr_prefix_disp = -1;
5396
5397 if (i.operands != t->operands)
5398 continue;
5399
5400 /* Check processor support. */
5401 i.error = unsupported;
5402 found_cpu_match = (cpu_flags_match (t)
5403 == CPU_FLAGS_PERFECT_MATCH);
5404 if (!found_cpu_match)
5405 continue;
5406
5407 /* Check AT&T mnemonic. */
5408 i.error = unsupported_with_intel_mnemonic;
5409 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
5410 continue;
5411
5412 /* Check AT&T/Intel syntax and Intel64/AMD64 ISA. */
5413 i.error = unsupported_syntax;
5414 if ((intel_syntax && t->opcode_modifier.attsyntax)
5415 || (!intel_syntax && t->opcode_modifier.intelsyntax)
5416 || (intel64 && t->opcode_modifier.amd64)
5417 || (!intel64 && t->opcode_modifier.intel64))
5418 continue;
5419
5420 /* Check the suffix, except for some instructions in intel mode. */
5421 i.error = invalid_instruction_suffix;
5422 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
5423 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
5424 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
5425 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
5426 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
5427 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
5428 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
5429 continue;
5430 /* In Intel mode all mnemonic suffixes must be explicitly allowed. */
5431 if ((t->opcode_modifier.no_bsuf && mnemsuf_check.no_bsuf)
5432 || (t->opcode_modifier.no_wsuf && mnemsuf_check.no_wsuf)
5433 || (t->opcode_modifier.no_lsuf && mnemsuf_check.no_lsuf)
5434 || (t->opcode_modifier.no_ssuf && mnemsuf_check.no_ssuf)
5435 || (t->opcode_modifier.no_qsuf && mnemsuf_check.no_qsuf)
5436 || (t->opcode_modifier.no_ldsuf && mnemsuf_check.no_ldsuf))
5437 continue;
5438
5439 size_match = operand_size_match (t);
5440 if (!size_match)
5441 continue;
5442
5443 for (j = 0; j < MAX_OPERANDS; j++)
5444 operand_types[j] = t->operand_types[j];
5445
5446 /* In general, don't allow 64-bit operands in 32-bit mode. */
5447 if (i.suffix == QWORD_MNEM_SUFFIX
5448 && flag_code != CODE_64BIT
5449 && (intel_syntax
5450 ? (!t->opcode_modifier.ignoresize
5451 && !t->opcode_modifier.broadcast
5452 && !intel_float_operand (t->name))
5453 : intel_float_operand (t->name) != 2)
5454 && ((!operand_types[0].bitfield.regmmx
5455 && !operand_types[0].bitfield.regsimd)
5456 || (!operand_types[t->operands > 1].bitfield.regmmx
5457 && !operand_types[t->operands > 1].bitfield.regsimd))
5458 && (t->base_opcode != 0x0fc7
5459 || t->extension_opcode != 1 /* cmpxchg8b */))
5460 continue;
5461
5462 /* In general, don't allow 32-bit operands on pre-386. */
5463 else if (i.suffix == LONG_MNEM_SUFFIX
5464 && !cpu_arch_flags.bitfield.cpui386
5465 && (intel_syntax
5466 ? (!t->opcode_modifier.ignoresize
5467 && !intel_float_operand (t->name))
5468 : intel_float_operand (t->name) != 2)
5469 && ((!operand_types[0].bitfield.regmmx
5470 && !operand_types[0].bitfield.regsimd)
5471 || (!operand_types[t->operands > 1].bitfield.regmmx
5472 && !operand_types[t->operands > 1].bitfield.regsimd)))
5473 continue;
5474
5475 /* Do not verify operands when there are none. */
5476 else
5477 {
5478 if (!t->operands)
5479 /* We've found a match; break out of loop. */
5480 break;
5481 }
5482
5483 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
5484 into Disp32/Disp16/Disp32 operand. */
5485 if (i.prefix[ADDR_PREFIX] != 0)
5486 {
5487 /* There should be only one Disp operand. */
5488 switch (flag_code)
5489 {
5490 case CODE_16BIT:
5491 for (j = 0; j < MAX_OPERANDS; j++)
5492 {
5493 if (operand_types[j].bitfield.disp16)
5494 {
5495 addr_prefix_disp = j;
5496 operand_types[j].bitfield.disp32 = 1;
5497 operand_types[j].bitfield.disp16 = 0;
5498 break;
5499 }
5500 }
5501 break;
5502 case CODE_32BIT:
5503 for (j = 0; j < MAX_OPERANDS; j++)
5504 {
5505 if (operand_types[j].bitfield.disp32)
5506 {
5507 addr_prefix_disp = j;
5508 operand_types[j].bitfield.disp32 = 0;
5509 operand_types[j].bitfield.disp16 = 1;
5510 break;
5511 }
5512 }
5513 break;
5514 case CODE_64BIT:
5515 for (j = 0; j < MAX_OPERANDS; j++)
5516 {
5517 if (operand_types[j].bitfield.disp64)
5518 {
5519 addr_prefix_disp = j;
5520 operand_types[j].bitfield.disp64 = 0;
5521 operand_types[j].bitfield.disp32 = 1;
5522 break;
5523 }
5524 }
5525 break;
5526 }
5527 }
5528
5529 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
5530 if (i.reloc[0] == BFD_RELOC_386_GOT32 && t->base_opcode == 0xa0)
5531 continue;
5532
5533 /* We check register size if needed. */
5534 if (t->opcode_modifier.checkregsize)
5535 {
5536 check_register = (1 << t->operands) - 1;
5537 if (i.broadcast)
5538 check_register &= ~(1 << i.broadcast->operand);
5539 }
5540 else
5541 check_register = 0;
5542
5543 overlap0 = operand_type_and (i.types[0], operand_types[0]);
5544 switch (t->operands)
5545 {
5546 case 1:
5547 if (!operand_type_match (overlap0, i.types[0]))
5548 continue;
5549 break;
5550 case 2:
5551 /* xchg %eax, %eax is a special case. It is an alias for nop
5552 only in 32bit mode and we can use opcode 0x90. In 64bit
5553 mode, we can't use 0x90 for xchg %eax, %eax since it should
5554 zero-extend %eax to %rax. */
5555 if (flag_code == CODE_64BIT
5556 && t->base_opcode == 0x90
5557 && operand_type_equal (&i.types [0], &acc32)
5558 && operand_type_equal (&i.types [1], &acc32))
5559 continue;
5560 /* xrelease mov %eax, <disp> is another special case. It must not
5561 match the accumulator-only encoding of mov. */
5562 if (flag_code != CODE_64BIT
5563 && i.hle_prefix
5564 && t->base_opcode == 0xa0
5565 && i.types[0].bitfield.acc
5566 && operand_type_check (i.types[1], anymem))
5567 continue;
5568 if (!(size_match & MATCH_STRAIGHT))
5569 goto check_reverse;
5570 /* If we want store form, we reverse direction of operands. */
5571 if (i.dir_encoding == dir_encoding_store
5572 && t->opcode_modifier.d)
5573 goto check_reverse;
5574 /* Fall through. */
5575
5576 case 3:
5577 /* If we want store form, we skip the current load. */
5578 if (i.dir_encoding == dir_encoding_store
5579 && i.mem_operands == 0
5580 && t->opcode_modifier.load)
5581 continue;
5582 /* Fall through. */
5583 case 4:
5584 case 5:
5585 overlap1 = operand_type_and (i.types[1], operand_types[1]);
5586 if (!operand_type_match (overlap0, i.types[0])
5587 || !operand_type_match (overlap1, i.types[1])
5588 || ((check_register & 3) == 3
5589 && !operand_type_register_match (i.types[0],
5590 operand_types[0],
5591 i.types[1],
5592 operand_types[1])))
5593 {
5594 /* Check if other direction is valid ... */
5595 if (!t->opcode_modifier.d)
5596 continue;
5597
5598 check_reverse:
5599 if (!(size_match & MATCH_REVERSE))
5600 continue;
5601 /* Try reversing direction of operands. */
5602 overlap0 = operand_type_and (i.types[0], operand_types[1]);
5603 overlap1 = operand_type_and (i.types[1], operand_types[0]);
5604 if (!operand_type_match (overlap0, i.types[0])
5605 || !operand_type_match (overlap1, i.types[1])
5606 || (check_register
5607 && !operand_type_register_match (i.types[0],
5608 operand_types[1],
5609 i.types[1],
5610 operand_types[0])))
5611 {
5612 /* Does not match either direction. */
5613 continue;
5614 }
5615 /* found_reverse_match holds which of D or FloatR
5616 we've found. */
5617 if (!t->opcode_modifier.d)
5618 found_reverse_match = 0;
5619 else if (operand_types[0].bitfield.tbyte)
5620 found_reverse_match = Opcode_FloatD;
5621 else
5622 found_reverse_match = Opcode_D;
5623 if (t->opcode_modifier.floatr)
5624 found_reverse_match |= Opcode_FloatR;
5625 }
5626 else
5627 {
5628 /* Found a forward 2 operand match here. */
5629 switch (t->operands)
5630 {
5631 case 5:
5632 overlap4 = operand_type_and (i.types[4],
5633 operand_types[4]);
5634 /* Fall through. */
5635 case 4:
5636 overlap3 = operand_type_and (i.types[3],
5637 operand_types[3]);
5638 /* Fall through. */
5639 case 3:
5640 overlap2 = operand_type_and (i.types[2],
5641 operand_types[2]);
5642 break;
5643 }
5644
5645 switch (t->operands)
5646 {
5647 case 5:
5648 if (!operand_type_match (overlap4, i.types[4])
5649 || !operand_type_register_match (i.types[3],
5650 operand_types[3],
5651 i.types[4],
5652 operand_types[4]))
5653 continue;
5654 /* Fall through. */
5655 case 4:
5656 if (!operand_type_match (overlap3, i.types[3])
5657 || ((check_register & 0xa) == 0xa
5658 && !operand_type_register_match (i.types[1],
5659 operand_types[1],
5660 i.types[3],
5661 operand_types[3]))
5662 || ((check_register & 0xc) == 0xc
5663 && !operand_type_register_match (i.types[2],
5664 operand_types[2],
5665 i.types[3],
5666 operand_types[3])))
5667 continue;
5668 /* Fall through. */
5669 case 3:
5670 /* Here we make use of the fact that there are no
5671 reverse match 3 operand instructions. */
5672 if (!operand_type_match (overlap2, i.types[2])
5673 || ((check_register & 5) == 5
5674 && !operand_type_register_match (i.types[0],
5675 operand_types[0],
5676 i.types[2],
5677 operand_types[2]))
5678 || ((check_register & 6) == 6
5679 && !operand_type_register_match (i.types[1],
5680 operand_types[1],
5681 i.types[2],
5682 operand_types[2])))
5683 continue;
5684 break;
5685 }
5686 }
5687 /* Found either forward/reverse 2, 3 or 4 operand match here:
5688 slip through to break. */
5689 }
5690 if (!found_cpu_match)
5691 {
5692 found_reverse_match = 0;
5693 continue;
5694 }
5695
5696 /* Check if vector and VEX operands are valid. */
5697 if (check_VecOperands (t) || VEX_check_operands (t))
5698 {
5699 specific_error = i.error;
5700 continue;
5701 }
5702
5703 /* We've found a match; break out of loop. */
5704 break;
5705 }
5706
5707 if (t == current_templates->end)
5708 {
5709 /* We found no match. */
5710 const char *err_msg;
5711 switch (specific_error ? specific_error : i.error)
5712 {
5713 default:
5714 abort ();
5715 case operand_size_mismatch:
5716 err_msg = _("operand size mismatch");
5717 break;
5718 case operand_type_mismatch:
5719 err_msg = _("operand type mismatch");
5720 break;
5721 case register_type_mismatch:
5722 err_msg = _("register type mismatch");
5723 break;
5724 case number_of_operands_mismatch:
5725 err_msg = _("number of operands mismatch");
5726 break;
5727 case invalid_instruction_suffix:
5728 err_msg = _("invalid instruction suffix");
5729 break;
5730 case bad_imm4:
5731 err_msg = _("constant doesn't fit in 4 bits");
5732 break;
5733 case unsupported_with_intel_mnemonic:
5734 err_msg = _("unsupported with Intel mnemonic");
5735 break;
5736 case unsupported_syntax:
5737 err_msg = _("unsupported syntax");
5738 break;
5739 case unsupported:
5740 as_bad (_("unsupported instruction `%s'"),
5741 current_templates->start->name);
5742 return NULL;
5743 case invalid_vsib_address:
5744 err_msg = _("invalid VSIB address");
5745 break;
5746 case invalid_vector_register_set:
5747 err_msg = _("mask, index, and destination registers must be distinct");
5748 break;
5749 case unsupported_vector_index_register:
5750 err_msg = _("unsupported vector index register");
5751 break;
5752 case unsupported_broadcast:
5753 err_msg = _("unsupported broadcast");
5754 break;
5755 case broadcast_not_on_src_operand:
5756 err_msg = _("broadcast not on source memory operand");
5757 break;
5758 case broadcast_needed:
5759 err_msg = _("broadcast is needed for operand of such type");
5760 break;
5761 case unsupported_masking:
5762 err_msg = _("unsupported masking");
5763 break;
5764 case mask_not_on_destination:
5765 err_msg = _("mask not on destination operand");
5766 break;
5767 case no_default_mask:
5768 err_msg = _("default mask isn't allowed");
5769 break;
5770 case unsupported_rc_sae:
5771 err_msg = _("unsupported static rounding/sae");
5772 break;
5773 case rc_sae_operand_not_last_imm:
5774 if (intel_syntax)
5775 err_msg = _("RC/SAE operand must precede immediate operands");
5776 else
5777 err_msg = _("RC/SAE operand must follow immediate operands");
5778 break;
5779 case invalid_register_operand:
5780 err_msg = _("invalid register operand");
5781 break;
5782 }
5783 as_bad (_("%s for `%s'"), err_msg,
5784 current_templates->start->name);
5785 return NULL;
5786 }
5787
5788 if (!quiet_warnings)
5789 {
5790 if (!intel_syntax
5791 && (i.types[0].bitfield.jumpabsolute
5792 != operand_types[0].bitfield.jumpabsolute))
5793 {
5794 as_warn (_("indirect %s without `*'"), t->name);
5795 }
5796
5797 if (t->opcode_modifier.isprefix
5798 && t->opcode_modifier.ignoresize)
5799 {
5800 /* Warn them that a data or address size prefix doesn't
5801 affect assembly of the next line of code. */
5802 as_warn (_("stand-alone `%s' prefix"), t->name);
5803 }
5804 }
5805
5806 /* Copy the template we found. */
5807 i.tm = *t;
5808
5809 if (addr_prefix_disp != -1)
5810 i.tm.operand_types[addr_prefix_disp]
5811 = operand_types[addr_prefix_disp];
5812
5813 if (found_reverse_match)
5814 {
5815 /* If we found a reverse match we must alter the opcode
5816 direction bit. found_reverse_match holds bits to change
5817 (different for int & float insns). */
5818
5819 i.tm.base_opcode ^= found_reverse_match;
5820
5821 i.tm.operand_types[0] = operand_types[1];
5822 i.tm.operand_types[1] = operand_types[0];
5823 }
5824
5825 return t;
5826 }
5827
5828 static int
5829 check_string (void)
5830 {
5831 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
5832 if (i.tm.operand_types[mem_op].bitfield.esseg)
5833 {
5834 if (i.seg[0] != NULL && i.seg[0] != &es)
5835 {
5836 as_bad (_("`%s' operand %d must use `%ses' segment"),
5837 i.tm.name,
5838 mem_op + 1,
5839 register_prefix);
5840 return 0;
5841 }
5842 /* There's only ever one segment override allowed per instruction.
5843 This instruction possibly has a legal segment override on the
5844 second operand, so copy the segment to where non-string
5845 instructions store it, allowing common code. */
5846 i.seg[0] = i.seg[1];
5847 }
5848 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
5849 {
5850 if (i.seg[1] != NULL && i.seg[1] != &es)
5851 {
5852 as_bad (_("`%s' operand %d must use `%ses' segment"),
5853 i.tm.name,
5854 mem_op + 2,
5855 register_prefix);
5856 return 0;
5857 }
5858 }
5859 return 1;
5860 }
5861
5862 static int
5863 process_suffix (void)
5864 {
5865 /* If matched instruction specifies an explicit instruction mnemonic
5866 suffix, use it. */
5867 if (i.tm.opcode_modifier.size16)
5868 i.suffix = WORD_MNEM_SUFFIX;
5869 else if (i.tm.opcode_modifier.size32)
5870 i.suffix = LONG_MNEM_SUFFIX;
5871 else if (i.tm.opcode_modifier.size64)
5872 i.suffix = QWORD_MNEM_SUFFIX;
5873 else if (i.reg_operands)
5874 {
5875 /* If there's no instruction mnemonic suffix we try to invent one
5876 based on register operands. */
5877 if (!i.suffix)
5878 {
5879 /* We take i.suffix from the last register operand specified,
5880 Destination register type is more significant than source
5881 register type. crc32 in SSE4.2 prefers source register
5882 type. */
5883 if (i.tm.base_opcode == 0xf20f38f1)
5884 {
5885 if (i.types[0].bitfield.reg && i.types[0].bitfield.word)
5886 i.suffix = WORD_MNEM_SUFFIX;
5887 else if (i.types[0].bitfield.reg && i.types[0].bitfield.dword)
5888 i.suffix = LONG_MNEM_SUFFIX;
5889 else if (i.types[0].bitfield.reg && i.types[0].bitfield.qword)
5890 i.suffix = QWORD_MNEM_SUFFIX;
5891 }
5892 else if (i.tm.base_opcode == 0xf20f38f0)
5893 {
5894 if (i.types[0].bitfield.reg && i.types[0].bitfield.byte)
5895 i.suffix = BYTE_MNEM_SUFFIX;
5896 }
5897
5898 if (!i.suffix)
5899 {
5900 int op;
5901
5902 if (i.tm.base_opcode == 0xf20f38f1
5903 || i.tm.base_opcode == 0xf20f38f0)
5904 {
5905 /* We have to know the operand size for crc32. */
5906 as_bad (_("ambiguous memory operand size for `%s`"),
5907 i.tm.name);
5908 return 0;
5909 }
5910
5911 for (op = i.operands; --op >= 0;)
5912 if (!i.tm.operand_types[op].bitfield.inoutportreg
5913 && !i.tm.operand_types[op].bitfield.shiftcount)
5914 {
5915 if (!i.types[op].bitfield.reg)
5916 continue;
5917 if (i.types[op].bitfield.byte)
5918 i.suffix = BYTE_MNEM_SUFFIX;
5919 else if (i.types[op].bitfield.word)
5920 i.suffix = WORD_MNEM_SUFFIX;
5921 else if (i.types[op].bitfield.dword)
5922 i.suffix = LONG_MNEM_SUFFIX;
5923 else if (i.types[op].bitfield.qword)
5924 i.suffix = QWORD_MNEM_SUFFIX;
5925 else
5926 continue;
5927 break;
5928 }
5929 }
5930 }
5931 else if (i.suffix == BYTE_MNEM_SUFFIX)
5932 {
5933 if (intel_syntax
5934 && i.tm.opcode_modifier.ignoresize
5935 && i.tm.opcode_modifier.no_bsuf)
5936 i.suffix = 0;
5937 else if (!check_byte_reg ())
5938 return 0;
5939 }
5940 else if (i.suffix == LONG_MNEM_SUFFIX)
5941 {
5942 if (intel_syntax
5943 && i.tm.opcode_modifier.ignoresize
5944 && i.tm.opcode_modifier.no_lsuf
5945 && !i.tm.opcode_modifier.todword
5946 && !i.tm.opcode_modifier.toqword)
5947 i.suffix = 0;
5948 else if (!check_long_reg ())
5949 return 0;
5950 }
5951 else if (i.suffix == QWORD_MNEM_SUFFIX)
5952 {
5953 if (intel_syntax
5954 && i.tm.opcode_modifier.ignoresize
5955 && i.tm.opcode_modifier.no_qsuf
5956 && !i.tm.opcode_modifier.todword
5957 && !i.tm.opcode_modifier.toqword)
5958 i.suffix = 0;
5959 else if (!check_qword_reg ())
5960 return 0;
5961 }
5962 else if (i.suffix == WORD_MNEM_SUFFIX)
5963 {
5964 if (intel_syntax
5965 && i.tm.opcode_modifier.ignoresize
5966 && i.tm.opcode_modifier.no_wsuf)
5967 i.suffix = 0;
5968 else if (!check_word_reg ())
5969 return 0;
5970 }
5971 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
5972 /* Do nothing if the instruction is going to ignore the prefix. */
5973 ;
5974 else
5975 abort ();
5976 }
5977 else if (i.tm.opcode_modifier.defaultsize
5978 && !i.suffix
5979 /* exclude fldenv/frstor/fsave/fstenv */
5980 && i.tm.opcode_modifier.no_ssuf)
5981 {
5982 i.suffix = stackop_size;
5983 }
5984 else if (intel_syntax
5985 && !i.suffix
5986 && (i.tm.operand_types[0].bitfield.jumpabsolute
5987 || i.tm.opcode_modifier.jumpbyte
5988 || i.tm.opcode_modifier.jumpintersegment
5989 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
5990 && i.tm.extension_opcode <= 3)))
5991 {
5992 switch (flag_code)
5993 {
5994 case CODE_64BIT:
5995 if (!i.tm.opcode_modifier.no_qsuf)
5996 {
5997 i.suffix = QWORD_MNEM_SUFFIX;
5998 break;
5999 }
6000 /* Fall through. */
6001 case CODE_32BIT:
6002 if (!i.tm.opcode_modifier.no_lsuf)
6003 i.suffix = LONG_MNEM_SUFFIX;
6004 break;
6005 case CODE_16BIT:
6006 if (!i.tm.opcode_modifier.no_wsuf)
6007 i.suffix = WORD_MNEM_SUFFIX;
6008 break;
6009 }
6010 }
6011
6012 if (!i.suffix)
6013 {
6014 if (!intel_syntax)
6015 {
6016 if (i.tm.opcode_modifier.w)
6017 {
6018 as_bad (_("no instruction mnemonic suffix given and "
6019 "no register operands; can't size instruction"));
6020 return 0;
6021 }
6022 }
6023 else
6024 {
6025 unsigned int suffixes;
6026
6027 suffixes = !i.tm.opcode_modifier.no_bsuf;
6028 if (!i.tm.opcode_modifier.no_wsuf)
6029 suffixes |= 1 << 1;
6030 if (!i.tm.opcode_modifier.no_lsuf)
6031 suffixes |= 1 << 2;
6032 if (!i.tm.opcode_modifier.no_ldsuf)
6033 suffixes |= 1 << 3;
6034 if (!i.tm.opcode_modifier.no_ssuf)
6035 suffixes |= 1 << 4;
6036 if (flag_code == CODE_64BIT && !i.tm.opcode_modifier.no_qsuf)
6037 suffixes |= 1 << 5;
6038
6039 /* There are more than suffix matches. */
6040 if (i.tm.opcode_modifier.w
6041 || ((suffixes & (suffixes - 1))
6042 && !i.tm.opcode_modifier.defaultsize
6043 && !i.tm.opcode_modifier.ignoresize))
6044 {
6045 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
6046 return 0;
6047 }
6048 }
6049 }
6050
6051 /* Change the opcode based on the operand size given by i.suffix. */
6052 switch (i.suffix)
6053 {
6054 /* Size floating point instruction. */
6055 case LONG_MNEM_SUFFIX:
6056 if (i.tm.opcode_modifier.floatmf)
6057 {
6058 i.tm.base_opcode ^= 4;
6059 break;
6060 }
6061 /* fall through */
6062 case WORD_MNEM_SUFFIX:
6063 case QWORD_MNEM_SUFFIX:
6064 /* It's not a byte, select word/dword operation. */
6065 if (i.tm.opcode_modifier.w)
6066 {
6067 if (i.tm.opcode_modifier.shortform)
6068 i.tm.base_opcode |= 8;
6069 else
6070 i.tm.base_opcode |= 1;
6071 }
6072 /* fall through */
6073 case SHORT_MNEM_SUFFIX:
6074 /* Now select between word & dword operations via the operand
6075 size prefix, except for instructions that will ignore this
6076 prefix anyway. */
6077 if (i.reg_operands > 0
6078 && i.types[0].bitfield.reg
6079 && i.tm.opcode_modifier.addrprefixopreg
6080 && (i.tm.opcode_modifier.immext
6081 || i.operands == 1))
6082 {
6083 /* The address size override prefix changes the size of the
6084 first operand. */
6085 if ((flag_code == CODE_32BIT
6086 && i.op[0].regs->reg_type.bitfield.word)
6087 || (flag_code != CODE_32BIT
6088 && i.op[0].regs->reg_type.bitfield.dword))
6089 if (!add_prefix (ADDR_PREFIX_OPCODE))
6090 return 0;
6091 }
6092 else if (i.suffix != QWORD_MNEM_SUFFIX
6093 && !i.tm.opcode_modifier.ignoresize
6094 && !i.tm.opcode_modifier.floatmf
6095 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
6096 || (flag_code == CODE_64BIT
6097 && i.tm.opcode_modifier.jumpbyte)))
6098 {
6099 unsigned int prefix = DATA_PREFIX_OPCODE;
6100
6101 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
6102 prefix = ADDR_PREFIX_OPCODE;
6103
6104 if (!add_prefix (prefix))
6105 return 0;
6106 }
6107
6108 /* Set mode64 for an operand. */
6109 if (i.suffix == QWORD_MNEM_SUFFIX
6110 && flag_code == CODE_64BIT
6111 && !i.tm.opcode_modifier.norex64
6112 /* Special case for xchg %rax,%rax. It is NOP and doesn't
6113 need rex64. */
6114 && ! (i.operands == 2
6115 && i.tm.base_opcode == 0x90
6116 && i.tm.extension_opcode == None
6117 && operand_type_equal (&i.types [0], &acc64)
6118 && operand_type_equal (&i.types [1], &acc64)))
6119 i.rex |= REX_W;
6120
6121 break;
6122 }
6123
6124 if (i.reg_operands != 0
6125 && i.operands > 1
6126 && i.tm.opcode_modifier.addrprefixopreg
6127 && !i.tm.opcode_modifier.immext)
6128 {
6129 /* Check invalid register operand when the address size override
6130 prefix changes the size of register operands. */
6131 unsigned int op;
6132 enum { need_word, need_dword, need_qword } need;
6133
6134 if (flag_code == CODE_32BIT)
6135 need = i.prefix[ADDR_PREFIX] ? need_word : need_dword;
6136 else
6137 {
6138 if (i.prefix[ADDR_PREFIX])
6139 need = need_dword;
6140 else
6141 need = flag_code == CODE_64BIT ? need_qword : need_word;
6142 }
6143
6144 for (op = 0; op < i.operands; op++)
6145 if (i.types[op].bitfield.reg
6146 && ((need == need_word
6147 && !i.op[op].regs->reg_type.bitfield.word)
6148 || (need == need_dword
6149 && !i.op[op].regs->reg_type.bitfield.dword)
6150 || (need == need_qword
6151 && !i.op[op].regs->reg_type.bitfield.qword)))
6152 {
6153 as_bad (_("invalid register operand size for `%s'"),
6154 i.tm.name);
6155 return 0;
6156 }
6157 }
6158
6159 return 1;
6160 }
6161
6162 static int
6163 check_byte_reg (void)
6164 {
6165 int op;
6166
6167 for (op = i.operands; --op >= 0;)
6168 {
6169 /* Skip non-register operands. */
6170 if (!i.types[op].bitfield.reg)
6171 continue;
6172
6173 /* If this is an eight bit register, it's OK. If it's the 16 or
6174 32 bit version of an eight bit register, we will just use the
6175 low portion, and that's OK too. */
6176 if (i.types[op].bitfield.byte)
6177 continue;
6178
6179 /* I/O port address operands are OK too. */
6180 if (i.tm.operand_types[op].bitfield.inoutportreg)
6181 continue;
6182
6183 /* crc32 doesn't generate this warning. */
6184 if (i.tm.base_opcode == 0xf20f38f0)
6185 continue;
6186
6187 if ((i.types[op].bitfield.word
6188 || i.types[op].bitfield.dword
6189 || i.types[op].bitfield.qword)
6190 && i.op[op].regs->reg_num < 4
6191 /* Prohibit these changes in 64bit mode, since the lowering
6192 would be more complicated. */
6193 && flag_code != CODE_64BIT)
6194 {
6195 #if REGISTER_WARNINGS
6196 if (!quiet_warnings)
6197 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6198 register_prefix,
6199 (i.op[op].regs + (i.types[op].bitfield.word
6200 ? REGNAM_AL - REGNAM_AX
6201 : REGNAM_AL - REGNAM_EAX))->reg_name,
6202 register_prefix,
6203 i.op[op].regs->reg_name,
6204 i.suffix);
6205 #endif
6206 continue;
6207 }
6208 /* Any other register is bad. */
6209 if (i.types[op].bitfield.reg
6210 || i.types[op].bitfield.regmmx
6211 || i.types[op].bitfield.regsimd
6212 || i.types[op].bitfield.sreg2
6213 || i.types[op].bitfield.sreg3
6214 || i.types[op].bitfield.control
6215 || i.types[op].bitfield.debug
6216 || i.types[op].bitfield.test)
6217 {
6218 as_bad (_("`%s%s' not allowed with `%s%c'"),
6219 register_prefix,
6220 i.op[op].regs->reg_name,
6221 i.tm.name,
6222 i.suffix);
6223 return 0;
6224 }
6225 }
6226 return 1;
6227 }
6228
6229 static int
6230 check_long_reg (void)
6231 {
6232 int op;
6233
6234 for (op = i.operands; --op >= 0;)
6235 /* Skip non-register operands. */
6236 if (!i.types[op].bitfield.reg)
6237 continue;
6238 /* Reject eight bit registers, except where the template requires
6239 them. (eg. movzb) */
6240 else if (i.types[op].bitfield.byte
6241 && (i.tm.operand_types[op].bitfield.reg
6242 || i.tm.operand_types[op].bitfield.acc)
6243 && (i.tm.operand_types[op].bitfield.word
6244 || i.tm.operand_types[op].bitfield.dword))
6245 {
6246 as_bad (_("`%s%s' not allowed with `%s%c'"),
6247 register_prefix,
6248 i.op[op].regs->reg_name,
6249 i.tm.name,
6250 i.suffix);
6251 return 0;
6252 }
6253 /* Warn if the e prefix on a general reg is missing. */
6254 else if ((!quiet_warnings || flag_code == CODE_64BIT)
6255 && i.types[op].bitfield.word
6256 && (i.tm.operand_types[op].bitfield.reg
6257 || i.tm.operand_types[op].bitfield.acc)
6258 && i.tm.operand_types[op].bitfield.dword)
6259 {
6260 /* Prohibit these changes in the 64bit mode, since the
6261 lowering is more complicated. */
6262 if (flag_code == CODE_64BIT)
6263 {
6264 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6265 register_prefix, i.op[op].regs->reg_name,
6266 i.suffix);
6267 return 0;
6268 }
6269 #if REGISTER_WARNINGS
6270 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6271 register_prefix,
6272 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
6273 register_prefix, i.op[op].regs->reg_name, i.suffix);
6274 #endif
6275 }
6276 /* Warn if the r prefix on a general reg is present. */
6277 else if (i.types[op].bitfield.qword
6278 && (i.tm.operand_types[op].bitfield.reg
6279 || i.tm.operand_types[op].bitfield.acc)
6280 && i.tm.operand_types[op].bitfield.dword)
6281 {
6282 if (intel_syntax
6283 && i.tm.opcode_modifier.toqword
6284 && !i.types[0].bitfield.regsimd)
6285 {
6286 /* Convert to QWORD. We want REX byte. */
6287 i.suffix = QWORD_MNEM_SUFFIX;
6288 }
6289 else
6290 {
6291 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6292 register_prefix, i.op[op].regs->reg_name,
6293 i.suffix);
6294 return 0;
6295 }
6296 }
6297 return 1;
6298 }
6299
6300 static int
6301 check_qword_reg (void)
6302 {
6303 int op;
6304
6305 for (op = i.operands; --op >= 0; )
6306 /* Skip non-register operands. */
6307 if (!i.types[op].bitfield.reg)
6308 continue;
6309 /* Reject eight bit registers, except where the template requires
6310 them. (eg. movzb) */
6311 else if (i.types[op].bitfield.byte
6312 && (i.tm.operand_types[op].bitfield.reg
6313 || i.tm.operand_types[op].bitfield.acc)
6314 && (i.tm.operand_types[op].bitfield.word
6315 || i.tm.operand_types[op].bitfield.dword))
6316 {
6317 as_bad (_("`%s%s' not allowed with `%s%c'"),
6318 register_prefix,
6319 i.op[op].regs->reg_name,
6320 i.tm.name,
6321 i.suffix);
6322 return 0;
6323 }
6324 /* Warn if the r prefix on a general reg is missing. */
6325 else if ((i.types[op].bitfield.word
6326 || i.types[op].bitfield.dword)
6327 && (i.tm.operand_types[op].bitfield.reg
6328 || i.tm.operand_types[op].bitfield.acc)
6329 && i.tm.operand_types[op].bitfield.qword)
6330 {
6331 /* Prohibit these changes in the 64bit mode, since the
6332 lowering is more complicated. */
6333 if (intel_syntax
6334 && i.tm.opcode_modifier.todword
6335 && !i.types[0].bitfield.regsimd)
6336 {
6337 /* Convert to DWORD. We don't want REX byte. */
6338 i.suffix = LONG_MNEM_SUFFIX;
6339 }
6340 else
6341 {
6342 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6343 register_prefix, i.op[op].regs->reg_name,
6344 i.suffix);
6345 return 0;
6346 }
6347 }
6348 return 1;
6349 }
6350
6351 static int
6352 check_word_reg (void)
6353 {
6354 int op;
6355 for (op = i.operands; --op >= 0;)
6356 /* Skip non-register operands. */
6357 if (!i.types[op].bitfield.reg)
6358 continue;
6359 /* Reject eight bit registers, except where the template requires
6360 them. (eg. movzb) */
6361 else if (i.types[op].bitfield.byte
6362 && (i.tm.operand_types[op].bitfield.reg
6363 || i.tm.operand_types[op].bitfield.acc)
6364 && (i.tm.operand_types[op].bitfield.word
6365 || i.tm.operand_types[op].bitfield.dword))
6366 {
6367 as_bad (_("`%s%s' not allowed with `%s%c'"),
6368 register_prefix,
6369 i.op[op].regs->reg_name,
6370 i.tm.name,
6371 i.suffix);
6372 return 0;
6373 }
6374 /* Warn if the e or r prefix on a general reg is present. */
6375 else if ((!quiet_warnings || flag_code == CODE_64BIT)
6376 && (i.types[op].bitfield.dword
6377 || i.types[op].bitfield.qword)
6378 && (i.tm.operand_types[op].bitfield.reg
6379 || i.tm.operand_types[op].bitfield.acc)
6380 && i.tm.operand_types[op].bitfield.word)
6381 {
6382 /* Prohibit these changes in the 64bit mode, since the
6383 lowering is more complicated. */
6384 if (flag_code == CODE_64BIT)
6385 {
6386 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6387 register_prefix, i.op[op].regs->reg_name,
6388 i.suffix);
6389 return 0;
6390 }
6391 #if REGISTER_WARNINGS
6392 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6393 register_prefix,
6394 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
6395 register_prefix, i.op[op].regs->reg_name, i.suffix);
6396 #endif
6397 }
6398 return 1;
6399 }
6400
6401 static int
6402 update_imm (unsigned int j)
6403 {
6404 i386_operand_type overlap = i.types[j];
6405 if ((overlap.bitfield.imm8
6406 || overlap.bitfield.imm8s
6407 || overlap.bitfield.imm16
6408 || overlap.bitfield.imm32
6409 || overlap.bitfield.imm32s
6410 || overlap.bitfield.imm64)
6411 && !operand_type_equal (&overlap, &imm8)
6412 && !operand_type_equal (&overlap, &imm8s)
6413 && !operand_type_equal (&overlap, &imm16)
6414 && !operand_type_equal (&overlap, &imm32)
6415 && !operand_type_equal (&overlap, &imm32s)
6416 && !operand_type_equal (&overlap, &imm64))
6417 {
6418 if (i.suffix)
6419 {
6420 i386_operand_type temp;
6421
6422 operand_type_set (&temp, 0);
6423 if (i.suffix == BYTE_MNEM_SUFFIX)
6424 {
6425 temp.bitfield.imm8 = overlap.bitfield.imm8;
6426 temp.bitfield.imm8s = overlap.bitfield.imm8s;
6427 }
6428 else if (i.suffix == WORD_MNEM_SUFFIX)
6429 temp.bitfield.imm16 = overlap.bitfield.imm16;
6430 else if (i.suffix == QWORD_MNEM_SUFFIX)
6431 {
6432 temp.bitfield.imm64 = overlap.bitfield.imm64;
6433 temp.bitfield.imm32s = overlap.bitfield.imm32s;
6434 }
6435 else
6436 temp.bitfield.imm32 = overlap.bitfield.imm32;
6437 overlap = temp;
6438 }
6439 else if (operand_type_equal (&overlap, &imm16_32_32s)
6440 || operand_type_equal (&overlap, &imm16_32)
6441 || operand_type_equal (&overlap, &imm16_32s))
6442 {
6443 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
6444 overlap = imm16;
6445 else
6446 overlap = imm32s;
6447 }
6448 if (!operand_type_equal (&overlap, &imm8)
6449 && !operand_type_equal (&overlap, &imm8s)
6450 && !operand_type_equal (&overlap, &imm16)
6451 && !operand_type_equal (&overlap, &imm32)
6452 && !operand_type_equal (&overlap, &imm32s)
6453 && !operand_type_equal (&overlap, &imm64))
6454 {
6455 as_bad (_("no instruction mnemonic suffix given; "
6456 "can't determine immediate size"));
6457 return 0;
6458 }
6459 }
6460 i.types[j] = overlap;
6461
6462 return 1;
6463 }
6464
6465 static int
6466 finalize_imm (void)
6467 {
6468 unsigned int j, n;
6469
6470 /* Update the first 2 immediate operands. */
6471 n = i.operands > 2 ? 2 : i.operands;
6472 if (n)
6473 {
6474 for (j = 0; j < n; j++)
6475 if (update_imm (j) == 0)
6476 return 0;
6477
6478 /* The 3rd operand can't be immediate operand. */
6479 gas_assert (operand_type_check (i.types[2], imm) == 0);
6480 }
6481
6482 return 1;
6483 }
6484
6485 static int
6486 process_operands (void)
6487 {
6488 /* Default segment register this instruction will use for memory
6489 accesses. 0 means unknown. This is only for optimizing out
6490 unnecessary segment overrides. */
6491 const seg_entry *default_seg = 0;
6492
6493 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
6494 {
6495 unsigned int dupl = i.operands;
6496 unsigned int dest = dupl - 1;
6497 unsigned int j;
6498
6499 /* The destination must be an xmm register. */
6500 gas_assert (i.reg_operands
6501 && MAX_OPERANDS > dupl
6502 && operand_type_equal (&i.types[dest], &regxmm));
6503
6504 if (i.tm.operand_types[0].bitfield.acc
6505 && i.tm.operand_types[0].bitfield.xmmword)
6506 {
6507 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
6508 {
6509 /* Keep xmm0 for instructions with VEX prefix and 3
6510 sources. */
6511 i.tm.operand_types[0].bitfield.acc = 0;
6512 i.tm.operand_types[0].bitfield.regsimd = 1;
6513 goto duplicate;
6514 }
6515 else
6516 {
6517 /* We remove the first xmm0 and keep the number of
6518 operands unchanged, which in fact duplicates the
6519 destination. */
6520 for (j = 1; j < i.operands; j++)
6521 {
6522 i.op[j - 1] = i.op[j];
6523 i.types[j - 1] = i.types[j];
6524 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
6525 }
6526 }
6527 }
6528 else if (i.tm.opcode_modifier.implicit1stxmm0)
6529 {
6530 gas_assert ((MAX_OPERANDS - 1) > dupl
6531 && (i.tm.opcode_modifier.vexsources
6532 == VEX3SOURCES));
6533
6534 /* Add the implicit xmm0 for instructions with VEX prefix
6535 and 3 sources. */
6536 for (j = i.operands; j > 0; j--)
6537 {
6538 i.op[j] = i.op[j - 1];
6539 i.types[j] = i.types[j - 1];
6540 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
6541 }
6542 i.op[0].regs
6543 = (const reg_entry *) hash_find (reg_hash, "xmm0");
6544 i.types[0] = regxmm;
6545 i.tm.operand_types[0] = regxmm;
6546
6547 i.operands += 2;
6548 i.reg_operands += 2;
6549 i.tm.operands += 2;
6550
6551 dupl++;
6552 dest++;
6553 i.op[dupl] = i.op[dest];
6554 i.types[dupl] = i.types[dest];
6555 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
6556 }
6557 else
6558 {
6559 duplicate:
6560 i.operands++;
6561 i.reg_operands++;
6562 i.tm.operands++;
6563
6564 i.op[dupl] = i.op[dest];
6565 i.types[dupl] = i.types[dest];
6566 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
6567 }
6568
6569 if (i.tm.opcode_modifier.immext)
6570 process_immext ();
6571 }
6572 else if (i.tm.operand_types[0].bitfield.acc
6573 && i.tm.operand_types[0].bitfield.xmmword)
6574 {
6575 unsigned int j;
6576
6577 for (j = 1; j < i.operands; j++)
6578 {
6579 i.op[j - 1] = i.op[j];
6580 i.types[j - 1] = i.types[j];
6581
6582 /* We need to adjust fields in i.tm since they are used by
6583 build_modrm_byte. */
6584 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
6585 }
6586
6587 i.operands--;
6588 i.reg_operands--;
6589 i.tm.operands--;
6590 }
6591 else if (i.tm.opcode_modifier.implicitquadgroup)
6592 {
6593 unsigned int regnum, first_reg_in_group, last_reg_in_group;
6594
6595 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
6596 gas_assert (i.operands >= 2 && i.types[1].bitfield.regsimd);
6597 regnum = register_number (i.op[1].regs);
6598 first_reg_in_group = regnum & ~3;
6599 last_reg_in_group = first_reg_in_group + 3;
6600 if (regnum != first_reg_in_group)
6601 as_warn (_("source register `%s%s' implicitly denotes"
6602 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
6603 register_prefix, i.op[1].regs->reg_name,
6604 register_prefix, i.op[1].regs->reg_name, first_reg_in_group,
6605 register_prefix, i.op[1].regs->reg_name, last_reg_in_group,
6606 i.tm.name);
6607 }
6608 else if (i.tm.opcode_modifier.regkludge)
6609 {
6610 /* The imul $imm, %reg instruction is converted into
6611 imul $imm, %reg, %reg, and the clr %reg instruction
6612 is converted into xor %reg, %reg. */
6613
6614 unsigned int first_reg_op;
6615
6616 if (operand_type_check (i.types[0], reg))
6617 first_reg_op = 0;
6618 else
6619 first_reg_op = 1;
6620 /* Pretend we saw the extra register operand. */
6621 gas_assert (i.reg_operands == 1
6622 && i.op[first_reg_op + 1].regs == 0);
6623 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
6624 i.types[first_reg_op + 1] = i.types[first_reg_op];
6625 i.operands++;
6626 i.reg_operands++;
6627 }
6628
6629 if (i.tm.opcode_modifier.shortform)
6630 {
6631 if (i.types[0].bitfield.sreg2
6632 || i.types[0].bitfield.sreg3)
6633 {
6634 if (i.tm.base_opcode == POP_SEG_SHORT
6635 && i.op[0].regs->reg_num == 1)
6636 {
6637 as_bad (_("you can't `pop %scs'"), register_prefix);
6638 return 0;
6639 }
6640 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
6641 if ((i.op[0].regs->reg_flags & RegRex) != 0)
6642 i.rex |= REX_B;
6643 }
6644 else
6645 {
6646 /* The register or float register operand is in operand
6647 0 or 1. */
6648 unsigned int op;
6649
6650 if ((i.types[0].bitfield.reg && i.types[0].bitfield.tbyte)
6651 || operand_type_check (i.types[0], reg))
6652 op = 0;
6653 else
6654 op = 1;
6655 /* Register goes in low 3 bits of opcode. */
6656 i.tm.base_opcode |= i.op[op].regs->reg_num;
6657 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6658 i.rex |= REX_B;
6659 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
6660 {
6661 /* Warn about some common errors, but press on regardless.
6662 The first case can be generated by gcc (<= 2.8.1). */
6663 if (i.operands == 2)
6664 {
6665 /* Reversed arguments on faddp, fsubp, etc. */
6666 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
6667 register_prefix, i.op[!intel_syntax].regs->reg_name,
6668 register_prefix, i.op[intel_syntax].regs->reg_name);
6669 }
6670 else
6671 {
6672 /* Extraneous `l' suffix on fp insn. */
6673 as_warn (_("translating to `%s %s%s'"), i.tm.name,
6674 register_prefix, i.op[0].regs->reg_name);
6675 }
6676 }
6677 }
6678 }
6679 else if (i.tm.opcode_modifier.modrm)
6680 {
6681 /* The opcode is completed (modulo i.tm.extension_opcode which
6682 must be put into the modrm byte). Now, we make the modrm and
6683 index base bytes based on all the info we've collected. */
6684
6685 default_seg = build_modrm_byte ();
6686 }
6687 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
6688 {
6689 default_seg = &ds;
6690 }
6691 else if (i.tm.opcode_modifier.isstring)
6692 {
6693 /* For the string instructions that allow a segment override
6694 on one of their operands, the default segment is ds. */
6695 default_seg = &ds;
6696 }
6697
6698 if (i.tm.base_opcode == 0x8d /* lea */
6699 && i.seg[0]
6700 && !quiet_warnings)
6701 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
6702
6703 /* If a segment was explicitly specified, and the specified segment
6704 is not the default, use an opcode prefix to select it. If we
6705 never figured out what the default segment is, then default_seg
6706 will be zero at this point, and the specified segment prefix will
6707 always be used. */
6708 if ((i.seg[0]) && (i.seg[0] != default_seg))
6709 {
6710 if (!add_prefix (i.seg[0]->seg_prefix))
6711 return 0;
6712 }
6713 return 1;
6714 }
6715
6716 static const seg_entry *
6717 build_modrm_byte (void)
6718 {
6719 const seg_entry *default_seg = 0;
6720 unsigned int source, dest;
6721 int vex_3_sources;
6722
6723 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
6724 if (vex_3_sources)
6725 {
6726 unsigned int nds, reg_slot;
6727 expressionS *exp;
6728
6729 dest = i.operands - 1;
6730 nds = dest - 1;
6731
6732 /* There are 2 kinds of instructions:
6733 1. 5 operands: 4 register operands or 3 register operands
6734 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
6735 VexW0 or VexW1. The destination must be either XMM, YMM or
6736 ZMM register.
6737 2. 4 operands: 4 register operands or 3 register operands
6738 plus 1 memory operand, with VexXDS. */
6739 gas_assert ((i.reg_operands == 4
6740 || (i.reg_operands == 3 && i.mem_operands == 1))
6741 && i.tm.opcode_modifier.vexvvvv == VEXXDS
6742 && i.tm.opcode_modifier.vexw
6743 && i.tm.operand_types[dest].bitfield.regsimd);
6744
6745 /* If VexW1 is set, the first non-immediate operand is the source and
6746 the second non-immediate one is encoded in the immediate operand. */
6747 if (i.tm.opcode_modifier.vexw == VEXW1)
6748 {
6749 source = i.imm_operands;
6750 reg_slot = i.imm_operands + 1;
6751 }
6752 else
6753 {
6754 source = i.imm_operands + 1;
6755 reg_slot = i.imm_operands;
6756 }
6757
6758 if (i.imm_operands == 0)
6759 {
6760 /* When there is no immediate operand, generate an 8bit
6761 immediate operand to encode the first operand. */
6762 exp = &im_expressions[i.imm_operands++];
6763 i.op[i.operands].imms = exp;
6764 i.types[i.operands] = imm8;
6765 i.operands++;
6766
6767 gas_assert (i.tm.operand_types[reg_slot].bitfield.regsimd);
6768 exp->X_op = O_constant;
6769 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
6770 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6771 }
6772 else
6773 {
6774 unsigned int imm_slot;
6775
6776 gas_assert (i.imm_operands == 1 && i.types[0].bitfield.vec_imm4);
6777
6778 if (i.tm.opcode_modifier.immext)
6779 {
6780 /* When ImmExt is set, the immediate byte is the last
6781 operand. */
6782 imm_slot = i.operands - 1;
6783 source--;
6784 reg_slot--;
6785 }
6786 else
6787 {
6788 imm_slot = 0;
6789
6790 /* Turn on Imm8 so that output_imm will generate it. */
6791 i.types[imm_slot].bitfield.imm8 = 1;
6792 }
6793
6794 gas_assert (i.tm.operand_types[reg_slot].bitfield.regsimd);
6795 i.op[imm_slot].imms->X_add_number
6796 |= register_number (i.op[reg_slot].regs) << 4;
6797 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6798 }
6799
6800 gas_assert (i.tm.operand_types[nds].bitfield.regsimd);
6801 i.vex.register_specifier = i.op[nds].regs;
6802 }
6803 else
6804 source = dest = 0;
6805
6806 /* i.reg_operands MUST be the number of real register operands;
6807 implicit registers do not count. If there are 3 register
6808 operands, it must be a instruction with VexNDS. For a
6809 instruction with VexNDD, the destination register is encoded
6810 in VEX prefix. If there are 4 register operands, it must be
6811 a instruction with VEX prefix and 3 sources. */
6812 if (i.mem_operands == 0
6813 && ((i.reg_operands == 2
6814 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
6815 || (i.reg_operands == 3
6816 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
6817 || (i.reg_operands == 4 && vex_3_sources)))
6818 {
6819 switch (i.operands)
6820 {
6821 case 2:
6822 source = 0;
6823 break;
6824 case 3:
6825 /* When there are 3 operands, one of them may be immediate,
6826 which may be the first or the last operand. Otherwise,
6827 the first operand must be shift count register (cl) or it
6828 is an instruction with VexNDS. */
6829 gas_assert (i.imm_operands == 1
6830 || (i.imm_operands == 0
6831 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
6832 || i.types[0].bitfield.shiftcount)));
6833 if (operand_type_check (i.types[0], imm)
6834 || i.types[0].bitfield.shiftcount)
6835 source = 1;
6836 else
6837 source = 0;
6838 break;
6839 case 4:
6840 /* When there are 4 operands, the first two must be 8bit
6841 immediate operands. The source operand will be the 3rd
6842 one.
6843
6844 For instructions with VexNDS, if the first operand
6845 an imm8, the source operand is the 2nd one. If the last
6846 operand is imm8, the source operand is the first one. */
6847 gas_assert ((i.imm_operands == 2
6848 && i.types[0].bitfield.imm8
6849 && i.types[1].bitfield.imm8)
6850 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
6851 && i.imm_operands == 1
6852 && (i.types[0].bitfield.imm8
6853 || i.types[i.operands - 1].bitfield.imm8
6854 || i.rounding)));
6855 if (i.imm_operands == 2)
6856 source = 2;
6857 else
6858 {
6859 if (i.types[0].bitfield.imm8)
6860 source = 1;
6861 else
6862 source = 0;
6863 }
6864 break;
6865 case 5:
6866 if (is_evex_encoding (&i.tm))
6867 {
6868 /* For EVEX instructions, when there are 5 operands, the
6869 first one must be immediate operand. If the second one
6870 is immediate operand, the source operand is the 3th
6871 one. If the last one is immediate operand, the source
6872 operand is the 2nd one. */
6873 gas_assert (i.imm_operands == 2
6874 && i.tm.opcode_modifier.sae
6875 && operand_type_check (i.types[0], imm));
6876 if (operand_type_check (i.types[1], imm))
6877 source = 2;
6878 else if (operand_type_check (i.types[4], imm))
6879 source = 1;
6880 else
6881 abort ();
6882 }
6883 break;
6884 default:
6885 abort ();
6886 }
6887
6888 if (!vex_3_sources)
6889 {
6890 dest = source + 1;
6891
6892 /* RC/SAE operand could be between DEST and SRC. That happens
6893 when one operand is GPR and the other one is XMM/YMM/ZMM
6894 register. */
6895 if (i.rounding && i.rounding->operand == (int) dest)
6896 dest++;
6897
6898 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6899 {
6900 /* For instructions with VexNDS, the register-only source
6901 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
6902 register. It is encoded in VEX prefix. We need to
6903 clear RegMem bit before calling operand_type_equal. */
6904
6905 i386_operand_type op;
6906 unsigned int vvvv;
6907
6908 /* Check register-only source operand when two source
6909 operands are swapped. */
6910 if (!i.tm.operand_types[source].bitfield.baseindex
6911 && i.tm.operand_types[dest].bitfield.baseindex)
6912 {
6913 vvvv = source;
6914 source = dest;
6915 }
6916 else
6917 vvvv = dest;
6918
6919 op = i.tm.operand_types[vvvv];
6920 op.bitfield.regmem = 0;
6921 if ((dest + 1) >= i.operands
6922 || ((!op.bitfield.reg
6923 || (!op.bitfield.dword && !op.bitfield.qword))
6924 && !op.bitfield.regsimd
6925 && !operand_type_equal (&op, &regmask)))
6926 abort ();
6927 i.vex.register_specifier = i.op[vvvv].regs;
6928 dest++;
6929 }
6930 }
6931
6932 i.rm.mode = 3;
6933 /* One of the register operands will be encoded in the i.tm.reg
6934 field, the other in the combined i.tm.mode and i.tm.regmem
6935 fields. If no form of this instruction supports a memory
6936 destination operand, then we assume the source operand may
6937 sometimes be a memory operand and so we need to store the
6938 destination in the i.rm.reg field. */
6939 if (!i.tm.operand_types[dest].bitfield.regmem
6940 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
6941 {
6942 i.rm.reg = i.op[dest].regs->reg_num;
6943 i.rm.regmem = i.op[source].regs->reg_num;
6944 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6945 i.rex |= REX_R;
6946 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6947 i.vrex |= REX_R;
6948 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6949 i.rex |= REX_B;
6950 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6951 i.vrex |= REX_B;
6952 }
6953 else
6954 {
6955 i.rm.reg = i.op[source].regs->reg_num;
6956 i.rm.regmem = i.op[dest].regs->reg_num;
6957 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6958 i.rex |= REX_B;
6959 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6960 i.vrex |= REX_B;
6961 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6962 i.rex |= REX_R;
6963 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6964 i.vrex |= REX_R;
6965 }
6966 if (flag_code != CODE_64BIT && (i.rex & REX_R))
6967 {
6968 if (!i.types[i.tm.operand_types[0].bitfield.regmem].bitfield.control)
6969 abort ();
6970 i.rex &= ~REX_R;
6971 add_prefix (LOCK_PREFIX_OPCODE);
6972 }
6973 }
6974 else
6975 { /* If it's not 2 reg operands... */
6976 unsigned int mem;
6977
6978 if (i.mem_operands)
6979 {
6980 unsigned int fake_zero_displacement = 0;
6981 unsigned int op;
6982
6983 for (op = 0; op < i.operands; op++)
6984 if (operand_type_check (i.types[op], anymem))
6985 break;
6986 gas_assert (op < i.operands);
6987
6988 if (i.tm.opcode_modifier.vecsib)
6989 {
6990 if (i.index_reg->reg_num == RegEiz
6991 || i.index_reg->reg_num == RegRiz)
6992 abort ();
6993
6994 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6995 if (!i.base_reg)
6996 {
6997 i.sib.base = NO_BASE_REGISTER;
6998 i.sib.scale = i.log2_scale_factor;
6999 i.types[op].bitfield.disp8 = 0;
7000 i.types[op].bitfield.disp16 = 0;
7001 i.types[op].bitfield.disp64 = 0;
7002 if (flag_code != CODE_64BIT || i.prefix[ADDR_PREFIX])
7003 {
7004 /* Must be 32 bit */
7005 i.types[op].bitfield.disp32 = 1;
7006 i.types[op].bitfield.disp32s = 0;
7007 }
7008 else
7009 {
7010 i.types[op].bitfield.disp32 = 0;
7011 i.types[op].bitfield.disp32s = 1;
7012 }
7013 }
7014 i.sib.index = i.index_reg->reg_num;
7015 if ((i.index_reg->reg_flags & RegRex) != 0)
7016 i.rex |= REX_X;
7017 if ((i.index_reg->reg_flags & RegVRex) != 0)
7018 i.vrex |= REX_X;
7019 }
7020
7021 default_seg = &ds;
7022
7023 if (i.base_reg == 0)
7024 {
7025 i.rm.mode = 0;
7026 if (!i.disp_operands)
7027 fake_zero_displacement = 1;
7028 if (i.index_reg == 0)
7029 {
7030 i386_operand_type newdisp;
7031
7032 gas_assert (!i.tm.opcode_modifier.vecsib);
7033 /* Operand is just <disp> */
7034 if (flag_code == CODE_64BIT)
7035 {
7036 /* 64bit mode overwrites the 32bit absolute
7037 addressing by RIP relative addressing and
7038 absolute addressing is encoded by one of the
7039 redundant SIB forms. */
7040 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
7041 i.sib.base = NO_BASE_REGISTER;
7042 i.sib.index = NO_INDEX_REGISTER;
7043 newdisp = (!i.prefix[ADDR_PREFIX] ? disp32s : disp32);
7044 }
7045 else if ((flag_code == CODE_16BIT)
7046 ^ (i.prefix[ADDR_PREFIX] != 0))
7047 {
7048 i.rm.regmem = NO_BASE_REGISTER_16;
7049 newdisp = disp16;
7050 }
7051 else
7052 {
7053 i.rm.regmem = NO_BASE_REGISTER;
7054 newdisp = disp32;
7055 }
7056 i.types[op] = operand_type_and_not (i.types[op], anydisp);
7057 i.types[op] = operand_type_or (i.types[op], newdisp);
7058 }
7059 else if (!i.tm.opcode_modifier.vecsib)
7060 {
7061 /* !i.base_reg && i.index_reg */
7062 if (i.index_reg->reg_num == RegEiz
7063 || i.index_reg->reg_num == RegRiz)
7064 i.sib.index = NO_INDEX_REGISTER;
7065 else
7066 i.sib.index = i.index_reg->reg_num;
7067 i.sib.base = NO_BASE_REGISTER;
7068 i.sib.scale = i.log2_scale_factor;
7069 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
7070 i.types[op].bitfield.disp8 = 0;
7071 i.types[op].bitfield.disp16 = 0;
7072 i.types[op].bitfield.disp64 = 0;
7073 if (flag_code != CODE_64BIT || i.prefix[ADDR_PREFIX])
7074 {
7075 /* Must be 32 bit */
7076 i.types[op].bitfield.disp32 = 1;
7077 i.types[op].bitfield.disp32s = 0;
7078 }
7079 else
7080 {
7081 i.types[op].bitfield.disp32 = 0;
7082 i.types[op].bitfield.disp32s = 1;
7083 }
7084 if ((i.index_reg->reg_flags & RegRex) != 0)
7085 i.rex |= REX_X;
7086 }
7087 }
7088 /* RIP addressing for 64bit mode. */
7089 else if (i.base_reg->reg_num == RegRip ||
7090 i.base_reg->reg_num == RegEip)
7091 {
7092 gas_assert (!i.tm.opcode_modifier.vecsib);
7093 i.rm.regmem = NO_BASE_REGISTER;
7094 i.types[op].bitfield.disp8 = 0;
7095 i.types[op].bitfield.disp16 = 0;
7096 i.types[op].bitfield.disp32 = 0;
7097 i.types[op].bitfield.disp32s = 1;
7098 i.types[op].bitfield.disp64 = 0;
7099 i.flags[op] |= Operand_PCrel;
7100 if (! i.disp_operands)
7101 fake_zero_displacement = 1;
7102 }
7103 else if (i.base_reg->reg_type.bitfield.word)
7104 {
7105 gas_assert (!i.tm.opcode_modifier.vecsib);
7106 switch (i.base_reg->reg_num)
7107 {
7108 case 3: /* (%bx) */
7109 if (i.index_reg == 0)
7110 i.rm.regmem = 7;
7111 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
7112 i.rm.regmem = i.index_reg->reg_num - 6;
7113 break;
7114 case 5: /* (%bp) */
7115 default_seg = &ss;
7116 if (i.index_reg == 0)
7117 {
7118 i.rm.regmem = 6;
7119 if (operand_type_check (i.types[op], disp) == 0)
7120 {
7121 /* fake (%bp) into 0(%bp) */
7122 i.types[op].bitfield.disp8 = 1;
7123 fake_zero_displacement = 1;
7124 }
7125 }
7126 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
7127 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
7128 break;
7129 default: /* (%si) -> 4 or (%di) -> 5 */
7130 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
7131 }
7132 i.rm.mode = mode_from_disp_size (i.types[op]);
7133 }
7134 else /* i.base_reg and 32/64 bit mode */
7135 {
7136 if (flag_code == CODE_64BIT
7137 && operand_type_check (i.types[op], disp))
7138 {
7139 i.types[op].bitfield.disp16 = 0;
7140 i.types[op].bitfield.disp64 = 0;
7141 if (i.prefix[ADDR_PREFIX] == 0)
7142 {
7143 i.types[op].bitfield.disp32 = 0;
7144 i.types[op].bitfield.disp32s = 1;
7145 }
7146 else
7147 {
7148 i.types[op].bitfield.disp32 = 1;
7149 i.types[op].bitfield.disp32s = 0;
7150 }
7151 }
7152
7153 if (!i.tm.opcode_modifier.vecsib)
7154 i.rm.regmem = i.base_reg->reg_num;
7155 if ((i.base_reg->reg_flags & RegRex) != 0)
7156 i.rex |= REX_B;
7157 i.sib.base = i.base_reg->reg_num;
7158 /* x86-64 ignores REX prefix bit here to avoid decoder
7159 complications. */
7160 if (!(i.base_reg->reg_flags & RegRex)
7161 && (i.base_reg->reg_num == EBP_REG_NUM
7162 || i.base_reg->reg_num == ESP_REG_NUM))
7163 default_seg = &ss;
7164 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
7165 {
7166 fake_zero_displacement = 1;
7167 i.types[op].bitfield.disp8 = 1;
7168 }
7169 i.sib.scale = i.log2_scale_factor;
7170 if (i.index_reg == 0)
7171 {
7172 gas_assert (!i.tm.opcode_modifier.vecsib);
7173 /* <disp>(%esp) becomes two byte modrm with no index
7174 register. We've already stored the code for esp
7175 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
7176 Any base register besides %esp will not use the
7177 extra modrm byte. */
7178 i.sib.index = NO_INDEX_REGISTER;
7179 }
7180 else if (!i.tm.opcode_modifier.vecsib)
7181 {
7182 if (i.index_reg->reg_num == RegEiz
7183 || i.index_reg->reg_num == RegRiz)
7184 i.sib.index = NO_INDEX_REGISTER;
7185 else
7186 i.sib.index = i.index_reg->reg_num;
7187 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
7188 if ((i.index_reg->reg_flags & RegRex) != 0)
7189 i.rex |= REX_X;
7190 }
7191
7192 if (i.disp_operands
7193 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
7194 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
7195 i.rm.mode = 0;
7196 else
7197 {
7198 if (!fake_zero_displacement
7199 && !i.disp_operands
7200 && i.disp_encoding)
7201 {
7202 fake_zero_displacement = 1;
7203 if (i.disp_encoding == disp_encoding_8bit)
7204 i.types[op].bitfield.disp8 = 1;
7205 else
7206 i.types[op].bitfield.disp32 = 1;
7207 }
7208 i.rm.mode = mode_from_disp_size (i.types[op]);
7209 }
7210 }
7211
7212 if (fake_zero_displacement)
7213 {
7214 /* Fakes a zero displacement assuming that i.types[op]
7215 holds the correct displacement size. */
7216 expressionS *exp;
7217
7218 gas_assert (i.op[op].disps == 0);
7219 exp = &disp_expressions[i.disp_operands++];
7220 i.op[op].disps = exp;
7221 exp->X_op = O_constant;
7222 exp->X_add_number = 0;
7223 exp->X_add_symbol = (symbolS *) 0;
7224 exp->X_op_symbol = (symbolS *) 0;
7225 }
7226
7227 mem = op;
7228 }
7229 else
7230 mem = ~0;
7231
7232 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
7233 {
7234 if (operand_type_check (i.types[0], imm))
7235 i.vex.register_specifier = NULL;
7236 else
7237 {
7238 /* VEX.vvvv encodes one of the sources when the first
7239 operand is not an immediate. */
7240 if (i.tm.opcode_modifier.vexw == VEXW0)
7241 i.vex.register_specifier = i.op[0].regs;
7242 else
7243 i.vex.register_specifier = i.op[1].regs;
7244 }
7245
7246 /* Destination is a XMM register encoded in the ModRM.reg
7247 and VEX.R bit. */
7248 i.rm.reg = i.op[2].regs->reg_num;
7249 if ((i.op[2].regs->reg_flags & RegRex) != 0)
7250 i.rex |= REX_R;
7251
7252 /* ModRM.rm and VEX.B encodes the other source. */
7253 if (!i.mem_operands)
7254 {
7255 i.rm.mode = 3;
7256
7257 if (i.tm.opcode_modifier.vexw == VEXW0)
7258 i.rm.regmem = i.op[1].regs->reg_num;
7259 else
7260 i.rm.regmem = i.op[0].regs->reg_num;
7261
7262 if ((i.op[1].regs->reg_flags & RegRex) != 0)
7263 i.rex |= REX_B;
7264 }
7265 }
7266 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
7267 {
7268 i.vex.register_specifier = i.op[2].regs;
7269 if (!i.mem_operands)
7270 {
7271 i.rm.mode = 3;
7272 i.rm.regmem = i.op[1].regs->reg_num;
7273 if ((i.op[1].regs->reg_flags & RegRex) != 0)
7274 i.rex |= REX_B;
7275 }
7276 }
7277 /* Fill in i.rm.reg or i.rm.regmem field with register operand
7278 (if any) based on i.tm.extension_opcode. Again, we must be
7279 careful to make sure that segment/control/debug/test/MMX
7280 registers are coded into the i.rm.reg field. */
7281 else if (i.reg_operands)
7282 {
7283 unsigned int op;
7284 unsigned int vex_reg = ~0;
7285
7286 for (op = 0; op < i.operands; op++)
7287 if (i.types[op].bitfield.reg
7288 || i.types[op].bitfield.regmmx
7289 || i.types[op].bitfield.regsimd
7290 || i.types[op].bitfield.regbnd
7291 || i.types[op].bitfield.regmask
7292 || i.types[op].bitfield.sreg2
7293 || i.types[op].bitfield.sreg3
7294 || i.types[op].bitfield.control
7295 || i.types[op].bitfield.debug
7296 || i.types[op].bitfield.test)
7297 break;
7298
7299 if (vex_3_sources)
7300 op = dest;
7301 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
7302 {
7303 /* For instructions with VexNDS, the register-only
7304 source operand is encoded in VEX prefix. */
7305 gas_assert (mem != (unsigned int) ~0);
7306
7307 if (op > mem)
7308 {
7309 vex_reg = op++;
7310 gas_assert (op < i.operands);
7311 }
7312 else
7313 {
7314 /* Check register-only source operand when two source
7315 operands are swapped. */
7316 if (!i.tm.operand_types[op].bitfield.baseindex
7317 && i.tm.operand_types[op + 1].bitfield.baseindex)
7318 {
7319 vex_reg = op;
7320 op += 2;
7321 gas_assert (mem == (vex_reg + 1)
7322 && op < i.operands);
7323 }
7324 else
7325 {
7326 vex_reg = op + 1;
7327 gas_assert (vex_reg < i.operands);
7328 }
7329 }
7330 }
7331 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
7332 {
7333 /* For instructions with VexNDD, the register destination
7334 is encoded in VEX prefix. */
7335 if (i.mem_operands == 0)
7336 {
7337 /* There is no memory operand. */
7338 gas_assert ((op + 2) == i.operands);
7339 vex_reg = op + 1;
7340 }
7341 else
7342 {
7343 /* There are only 2 non-immediate operands. */
7344 gas_assert (op < i.imm_operands + 2
7345 && i.operands == i.imm_operands + 2);
7346 vex_reg = i.imm_operands + 1;
7347 }
7348 }
7349 else
7350 gas_assert (op < i.operands);
7351
7352 if (vex_reg != (unsigned int) ~0)
7353 {
7354 i386_operand_type *type = &i.tm.operand_types[vex_reg];
7355
7356 if ((!type->bitfield.reg
7357 || (!type->bitfield.dword && !type->bitfield.qword))
7358 && !type->bitfield.regsimd
7359 && !operand_type_equal (type, &regmask))
7360 abort ();
7361
7362 i.vex.register_specifier = i.op[vex_reg].regs;
7363 }
7364
7365 /* Don't set OP operand twice. */
7366 if (vex_reg != op)
7367 {
7368 /* If there is an extension opcode to put here, the
7369 register number must be put into the regmem field. */
7370 if (i.tm.extension_opcode != None)
7371 {
7372 i.rm.regmem = i.op[op].regs->reg_num;
7373 if ((i.op[op].regs->reg_flags & RegRex) != 0)
7374 i.rex |= REX_B;
7375 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
7376 i.vrex |= REX_B;
7377 }
7378 else
7379 {
7380 i.rm.reg = i.op[op].regs->reg_num;
7381 if ((i.op[op].regs->reg_flags & RegRex) != 0)
7382 i.rex |= REX_R;
7383 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
7384 i.vrex |= REX_R;
7385 }
7386 }
7387
7388 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
7389 must set it to 3 to indicate this is a register operand
7390 in the regmem field. */
7391 if (!i.mem_operands)
7392 i.rm.mode = 3;
7393 }
7394
7395 /* Fill in i.rm.reg field with extension opcode (if any). */
7396 if (i.tm.extension_opcode != None)
7397 i.rm.reg = i.tm.extension_opcode;
7398 }
7399 return default_seg;
7400 }
7401
7402 static void
7403 output_branch (void)
7404 {
7405 char *p;
7406 int size;
7407 int code16;
7408 int prefix;
7409 relax_substateT subtype;
7410 symbolS *sym;
7411 offsetT off;
7412
7413 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
7414 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
7415
7416 prefix = 0;
7417 if (i.prefix[DATA_PREFIX] != 0)
7418 {
7419 prefix = 1;
7420 i.prefixes -= 1;
7421 code16 ^= CODE16;
7422 }
7423 /* Pentium4 branch hints. */
7424 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
7425 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
7426 {
7427 prefix++;
7428 i.prefixes--;
7429 }
7430 if (i.prefix[REX_PREFIX] != 0)
7431 {
7432 prefix++;
7433 i.prefixes--;
7434 }
7435
7436 /* BND prefixed jump. */
7437 if (i.prefix[BND_PREFIX] != 0)
7438 {
7439 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
7440 i.prefixes -= 1;
7441 }
7442
7443 if (i.prefixes != 0 && !intel_syntax)
7444 as_warn (_("skipping prefixes on this instruction"));
7445
7446 /* It's always a symbol; End frag & setup for relax.
7447 Make sure there is enough room in this frag for the largest
7448 instruction we may generate in md_convert_frag. This is 2
7449 bytes for the opcode and room for the prefix and largest
7450 displacement. */
7451 frag_grow (prefix + 2 + 4);
7452 /* Prefix and 1 opcode byte go in fr_fix. */
7453 p = frag_more (prefix + 1);
7454 if (i.prefix[DATA_PREFIX] != 0)
7455 *p++ = DATA_PREFIX_OPCODE;
7456 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
7457 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
7458 *p++ = i.prefix[SEG_PREFIX];
7459 if (i.prefix[REX_PREFIX] != 0)
7460 *p++ = i.prefix[REX_PREFIX];
7461 *p = i.tm.base_opcode;
7462
7463 if ((unsigned char) *p == JUMP_PC_RELATIVE)
7464 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
7465 else if (cpu_arch_flags.bitfield.cpui386)
7466 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
7467 else
7468 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
7469 subtype |= code16;
7470
7471 sym = i.op[0].disps->X_add_symbol;
7472 off = i.op[0].disps->X_add_number;
7473
7474 if (i.op[0].disps->X_op != O_constant
7475 && i.op[0].disps->X_op != O_symbol)
7476 {
7477 /* Handle complex expressions. */
7478 sym = make_expr_symbol (i.op[0].disps);
7479 off = 0;
7480 }
7481
7482 /* 1 possible extra opcode + 4 byte displacement go in var part.
7483 Pass reloc in fr_var. */
7484 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
7485 }
7486
7487 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7488 /* Return TRUE iff PLT32 relocation should be used for branching to
7489 symbol S. */
7490
7491 static bfd_boolean
7492 need_plt32_p (symbolS *s)
7493 {
7494 /* PLT32 relocation is ELF only. */
7495 if (!IS_ELF)
7496 return FALSE;
7497
7498 /* Since there is no need to prepare for PLT branch on x86-64, we
7499 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
7500 be used as a marker for 32-bit PC-relative branches. */
7501 if (!object_64bit)
7502 return FALSE;
7503
7504 /* Weak or undefined symbol need PLT32 relocation. */
7505 if (S_IS_WEAK (s) || !S_IS_DEFINED (s))
7506 return TRUE;
7507
7508 /* Non-global symbol doesn't need PLT32 relocation. */
7509 if (! S_IS_EXTERNAL (s))
7510 return FALSE;
7511
7512 /* Other global symbols need PLT32 relocation. NB: Symbol with
7513 non-default visibilities are treated as normal global symbol
7514 so that PLT32 relocation can be used as a marker for 32-bit
7515 PC-relative branches. It is useful for linker relaxation. */
7516 return TRUE;
7517 }
7518 #endif
7519
7520 static void
7521 output_jump (void)
7522 {
7523 char *p;
7524 int size;
7525 fixS *fixP;
7526 bfd_reloc_code_real_type jump_reloc = i.reloc[0];
7527
7528 if (i.tm.opcode_modifier.jumpbyte)
7529 {
7530 /* This is a loop or jecxz type instruction. */
7531 size = 1;
7532 if (i.prefix[ADDR_PREFIX] != 0)
7533 {
7534 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
7535 i.prefixes -= 1;
7536 }
7537 /* Pentium4 branch hints. */
7538 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
7539 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
7540 {
7541 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
7542 i.prefixes--;
7543 }
7544 }
7545 else
7546 {
7547 int code16;
7548
7549 code16 = 0;
7550 if (flag_code == CODE_16BIT)
7551 code16 = CODE16;
7552
7553 if (i.prefix[DATA_PREFIX] != 0)
7554 {
7555 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
7556 i.prefixes -= 1;
7557 code16 ^= CODE16;
7558 }
7559
7560 size = 4;
7561 if (code16)
7562 size = 2;
7563 }
7564
7565 if (i.prefix[REX_PREFIX] != 0)
7566 {
7567 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
7568 i.prefixes -= 1;
7569 }
7570
7571 /* BND prefixed jump. */
7572 if (i.prefix[BND_PREFIX] != 0)
7573 {
7574 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
7575 i.prefixes -= 1;
7576 }
7577
7578 if (i.prefixes != 0 && !intel_syntax)
7579 as_warn (_("skipping prefixes on this instruction"));
7580
7581 p = frag_more (i.tm.opcode_length + size);
7582 switch (i.tm.opcode_length)
7583 {
7584 case 2:
7585 *p++ = i.tm.base_opcode >> 8;
7586 /* Fall through. */
7587 case 1:
7588 *p++ = i.tm.base_opcode;
7589 break;
7590 default:
7591 abort ();
7592 }
7593
7594 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7595 if (size == 4
7596 && jump_reloc == NO_RELOC
7597 && need_plt32_p (i.op[0].disps->X_add_symbol))
7598 jump_reloc = BFD_RELOC_X86_64_PLT32;
7599 #endif
7600
7601 jump_reloc = reloc (size, 1, 1, jump_reloc);
7602
7603 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7604 i.op[0].disps, 1, jump_reloc);
7605
7606 /* All jumps handled here are signed, but don't use a signed limit
7607 check for 32 and 16 bit jumps as we want to allow wrap around at
7608 4G and 64k respectively. */
7609 if (size == 1)
7610 fixP->fx_signed = 1;
7611 }
7612
7613 static void
7614 output_interseg_jump (void)
7615 {
7616 char *p;
7617 int size;
7618 int prefix;
7619 int code16;
7620
7621 code16 = 0;
7622 if (flag_code == CODE_16BIT)
7623 code16 = CODE16;
7624
7625 prefix = 0;
7626 if (i.prefix[DATA_PREFIX] != 0)
7627 {
7628 prefix = 1;
7629 i.prefixes -= 1;
7630 code16 ^= CODE16;
7631 }
7632 if (i.prefix[REX_PREFIX] != 0)
7633 {
7634 prefix++;
7635 i.prefixes -= 1;
7636 }
7637
7638 size = 4;
7639 if (code16)
7640 size = 2;
7641
7642 if (i.prefixes != 0 && !intel_syntax)
7643 as_warn (_("skipping prefixes on this instruction"));
7644
7645 /* 1 opcode; 2 segment; offset */
7646 p = frag_more (prefix + 1 + 2 + size);
7647
7648 if (i.prefix[DATA_PREFIX] != 0)
7649 *p++ = DATA_PREFIX_OPCODE;
7650
7651 if (i.prefix[REX_PREFIX] != 0)
7652 *p++ = i.prefix[REX_PREFIX];
7653
7654 *p++ = i.tm.base_opcode;
7655 if (i.op[1].imms->X_op == O_constant)
7656 {
7657 offsetT n = i.op[1].imms->X_add_number;
7658
7659 if (size == 2
7660 && !fits_in_unsigned_word (n)
7661 && !fits_in_signed_word (n))
7662 {
7663 as_bad (_("16-bit jump out of range"));
7664 return;
7665 }
7666 md_number_to_chars (p, n, size);
7667 }
7668 else
7669 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7670 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
7671 if (i.op[0].imms->X_op != O_constant)
7672 as_bad (_("can't handle non absolute segment in `%s'"),
7673 i.tm.name);
7674 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
7675 }
7676
7677 static void
7678 output_insn (void)
7679 {
7680 fragS *insn_start_frag;
7681 offsetT insn_start_off;
7682
7683 /* Tie dwarf2 debug info to the address at the start of the insn.
7684 We can't do this after the insn has been output as the current
7685 frag may have been closed off. eg. by frag_var. */
7686 dwarf2_emit_insn (0);
7687
7688 insn_start_frag = frag_now;
7689 insn_start_off = frag_now_fix ();
7690
7691 /* Output jumps. */
7692 if (i.tm.opcode_modifier.jump)
7693 output_branch ();
7694 else if (i.tm.opcode_modifier.jumpbyte
7695 || i.tm.opcode_modifier.jumpdword)
7696 output_jump ();
7697 else if (i.tm.opcode_modifier.jumpintersegment)
7698 output_interseg_jump ();
7699 else
7700 {
7701 /* Output normal instructions here. */
7702 char *p;
7703 unsigned char *q;
7704 unsigned int j;
7705 unsigned int prefix;
7706
7707 if (avoid_fence
7708 && i.tm.base_opcode == 0xfae
7709 && i.operands == 1
7710 && i.imm_operands == 1
7711 && (i.op[0].imms->X_add_number == 0xe8
7712 || i.op[0].imms->X_add_number == 0xf0
7713 || i.op[0].imms->X_add_number == 0xf8))
7714 {
7715 /* Encode lfence, mfence, and sfence as
7716 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
7717 offsetT val = 0x240483f0ULL;
7718 p = frag_more (5);
7719 md_number_to_chars (p, val, 5);
7720 return;
7721 }
7722
7723 /* Some processors fail on LOCK prefix. This options makes
7724 assembler ignore LOCK prefix and serves as a workaround. */
7725 if (omit_lock_prefix)
7726 {
7727 if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
7728 return;
7729 i.prefix[LOCK_PREFIX] = 0;
7730 }
7731
7732 /* Since the VEX/EVEX prefix contains the implicit prefix, we
7733 don't need the explicit prefix. */
7734 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
7735 {
7736 switch (i.tm.opcode_length)
7737 {
7738 case 3:
7739 if (i.tm.base_opcode & 0xff000000)
7740 {
7741 prefix = (i.tm.base_opcode >> 24) & 0xff;
7742 add_prefix (prefix);
7743 }
7744 break;
7745 case 2:
7746 if ((i.tm.base_opcode & 0xff0000) != 0)
7747 {
7748 prefix = (i.tm.base_opcode >> 16) & 0xff;
7749 if (!i.tm.cpu_flags.bitfield.cpupadlock
7750 || prefix != REPE_PREFIX_OPCODE
7751 || (i.prefix[REP_PREFIX] != REPE_PREFIX_OPCODE))
7752 add_prefix (prefix);
7753 }
7754 break;
7755 case 1:
7756 break;
7757 case 0:
7758 /* Check for pseudo prefixes. */
7759 as_bad_where (insn_start_frag->fr_file,
7760 insn_start_frag->fr_line,
7761 _("pseudo prefix without instruction"));
7762 return;
7763 default:
7764 abort ();
7765 }
7766
7767 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7768 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
7769 R_X86_64_GOTTPOFF relocation so that linker can safely
7770 perform IE->LE optimization. */
7771 if (x86_elf_abi == X86_64_X32_ABI
7772 && i.operands == 2
7773 && i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF
7774 && i.prefix[REX_PREFIX] == 0)
7775 add_prefix (REX_OPCODE);
7776 #endif
7777
7778 /* The prefix bytes. */
7779 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
7780 if (*q)
7781 FRAG_APPEND_1_CHAR (*q);
7782 }
7783 else
7784 {
7785 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
7786 if (*q)
7787 switch (j)
7788 {
7789 case REX_PREFIX:
7790 /* REX byte is encoded in VEX prefix. */
7791 break;
7792 case SEG_PREFIX:
7793 case ADDR_PREFIX:
7794 FRAG_APPEND_1_CHAR (*q);
7795 break;
7796 default:
7797 /* There should be no other prefixes for instructions
7798 with VEX prefix. */
7799 abort ();
7800 }
7801
7802 /* For EVEX instructions i.vrex should become 0 after
7803 build_evex_prefix. For VEX instructions upper 16 registers
7804 aren't available, so VREX should be 0. */
7805 if (i.vrex)
7806 abort ();
7807 /* Now the VEX prefix. */
7808 p = frag_more (i.vex.length);
7809 for (j = 0; j < i.vex.length; j++)
7810 p[j] = i.vex.bytes[j];
7811 }
7812
7813 /* Now the opcode; be careful about word order here! */
7814 if (i.tm.opcode_length == 1)
7815 {
7816 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
7817 }
7818 else
7819 {
7820 switch (i.tm.opcode_length)
7821 {
7822 case 4:
7823 p = frag_more (4);
7824 *p++ = (i.tm.base_opcode >> 24) & 0xff;
7825 *p++ = (i.tm.base_opcode >> 16) & 0xff;
7826 break;
7827 case 3:
7828 p = frag_more (3);
7829 *p++ = (i.tm.base_opcode >> 16) & 0xff;
7830 break;
7831 case 2:
7832 p = frag_more (2);
7833 break;
7834 default:
7835 abort ();
7836 break;
7837 }
7838
7839 /* Put out high byte first: can't use md_number_to_chars! */
7840 *p++ = (i.tm.base_opcode >> 8) & 0xff;
7841 *p = i.tm.base_opcode & 0xff;
7842 }
7843
7844 /* Now the modrm byte and sib byte (if present). */
7845 if (i.tm.opcode_modifier.modrm)
7846 {
7847 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
7848 | i.rm.reg << 3
7849 | i.rm.mode << 6));
7850 /* If i.rm.regmem == ESP (4)
7851 && i.rm.mode != (Register mode)
7852 && not 16 bit
7853 ==> need second modrm byte. */
7854 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
7855 && i.rm.mode != 3
7856 && !(i.base_reg && i.base_reg->reg_type.bitfield.word))
7857 FRAG_APPEND_1_CHAR ((i.sib.base << 0
7858 | i.sib.index << 3
7859 | i.sib.scale << 6));
7860 }
7861
7862 if (i.disp_operands)
7863 output_disp (insn_start_frag, insn_start_off);
7864
7865 if (i.imm_operands)
7866 output_imm (insn_start_frag, insn_start_off);
7867 }
7868
7869 #ifdef DEBUG386
7870 if (flag_debug)
7871 {
7872 pi ("" /*line*/, &i);
7873 }
7874 #endif /* DEBUG386 */
7875 }
7876
7877 /* Return the size of the displacement operand N. */
7878
7879 static int
7880 disp_size (unsigned int n)
7881 {
7882 int size = 4;
7883
7884 if (i.types[n].bitfield.disp64)
7885 size = 8;
7886 else if (i.types[n].bitfield.disp8)
7887 size = 1;
7888 else if (i.types[n].bitfield.disp16)
7889 size = 2;
7890 return size;
7891 }
7892
7893 /* Return the size of the immediate operand N. */
7894
7895 static int
7896 imm_size (unsigned int n)
7897 {
7898 int size = 4;
7899 if (i.types[n].bitfield.imm64)
7900 size = 8;
7901 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
7902 size = 1;
7903 else if (i.types[n].bitfield.imm16)
7904 size = 2;
7905 return size;
7906 }
7907
7908 static void
7909 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
7910 {
7911 char *p;
7912 unsigned int n;
7913
7914 for (n = 0; n < i.operands; n++)
7915 {
7916 if (operand_type_check (i.types[n], disp))
7917 {
7918 if (i.op[n].disps->X_op == O_constant)
7919 {
7920 int size = disp_size (n);
7921 offsetT val = i.op[n].disps->X_add_number;
7922
7923 val = offset_in_range (val >> i.memshift, size);
7924 p = frag_more (size);
7925 md_number_to_chars (p, val, size);
7926 }
7927 else
7928 {
7929 enum bfd_reloc_code_real reloc_type;
7930 int size = disp_size (n);
7931 int sign = i.types[n].bitfield.disp32s;
7932 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
7933 fixS *fixP;
7934
7935 /* We can't have 8 bit displacement here. */
7936 gas_assert (!i.types[n].bitfield.disp8);
7937
7938 /* The PC relative address is computed relative
7939 to the instruction boundary, so in case immediate
7940 fields follows, we need to adjust the value. */
7941 if (pcrel && i.imm_operands)
7942 {
7943 unsigned int n1;
7944 int sz = 0;
7945
7946 for (n1 = 0; n1 < i.operands; n1++)
7947 if (operand_type_check (i.types[n1], imm))
7948 {
7949 /* Only one immediate is allowed for PC
7950 relative address. */
7951 gas_assert (sz == 0);
7952 sz = imm_size (n1);
7953 i.op[n].disps->X_add_number -= sz;
7954 }
7955 /* We should find the immediate. */
7956 gas_assert (sz != 0);
7957 }
7958
7959 p = frag_more (size);
7960 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
7961 if (GOT_symbol
7962 && GOT_symbol == i.op[n].disps->X_add_symbol
7963 && (((reloc_type == BFD_RELOC_32
7964 || reloc_type == BFD_RELOC_X86_64_32S
7965 || (reloc_type == BFD_RELOC_64
7966 && object_64bit))
7967 && (i.op[n].disps->X_op == O_symbol
7968 || (i.op[n].disps->X_op == O_add
7969 && ((symbol_get_value_expression
7970 (i.op[n].disps->X_op_symbol)->X_op)
7971 == O_subtract))))
7972 || reloc_type == BFD_RELOC_32_PCREL))
7973 {
7974 offsetT add;
7975
7976 if (insn_start_frag == frag_now)
7977 add = (p - frag_now->fr_literal) - insn_start_off;
7978 else
7979 {
7980 fragS *fr;
7981
7982 add = insn_start_frag->fr_fix - insn_start_off;
7983 for (fr = insn_start_frag->fr_next;
7984 fr && fr != frag_now; fr = fr->fr_next)
7985 add += fr->fr_fix;
7986 add += p - frag_now->fr_literal;
7987 }
7988
7989 if (!object_64bit)
7990 {
7991 reloc_type = BFD_RELOC_386_GOTPC;
7992 i.op[n].imms->X_add_number += add;
7993 }
7994 else if (reloc_type == BFD_RELOC_64)
7995 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7996 else
7997 /* Don't do the adjustment for x86-64, as there
7998 the pcrel addressing is relative to the _next_
7999 insn, and that is taken care of in other code. */
8000 reloc_type = BFD_RELOC_X86_64_GOTPC32;
8001 }
8002 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal,
8003 size, i.op[n].disps, pcrel,
8004 reloc_type);
8005 /* Check for "call/jmp *mem", "mov mem, %reg",
8006 "test %reg, mem" and "binop mem, %reg" where binop
8007 is one of adc, add, and, cmp, or, sbb, sub, xor
8008 instructions. Always generate R_386_GOT32X for
8009 "sym*GOT" operand in 32-bit mode. */
8010 if ((generate_relax_relocations
8011 || (!object_64bit
8012 && i.rm.mode == 0
8013 && i.rm.regmem == 5))
8014 && (i.rm.mode == 2
8015 || (i.rm.mode == 0 && i.rm.regmem == 5))
8016 && ((i.operands == 1
8017 && i.tm.base_opcode == 0xff
8018 && (i.rm.reg == 2 || i.rm.reg == 4))
8019 || (i.operands == 2
8020 && (i.tm.base_opcode == 0x8b
8021 || i.tm.base_opcode == 0x85
8022 || (i.tm.base_opcode & 0xc7) == 0x03))))
8023 {
8024 if (object_64bit)
8025 {
8026 fixP->fx_tcbit = i.rex != 0;
8027 if (i.base_reg
8028 && (i.base_reg->reg_num == RegRip
8029 || i.base_reg->reg_num == RegEip))
8030 fixP->fx_tcbit2 = 1;
8031 }
8032 else
8033 fixP->fx_tcbit2 = 1;
8034 }
8035 }
8036 }
8037 }
8038 }
8039
8040 static void
8041 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
8042 {
8043 char *p;
8044 unsigned int n;
8045
8046 for (n = 0; n < i.operands; n++)
8047 {
8048 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
8049 if (i.rounding && (int) n == i.rounding->operand)
8050 continue;
8051
8052 if (operand_type_check (i.types[n], imm))
8053 {
8054 if (i.op[n].imms->X_op == O_constant)
8055 {
8056 int size = imm_size (n);
8057 offsetT val;
8058
8059 val = offset_in_range (i.op[n].imms->X_add_number,
8060 size);
8061 p = frag_more (size);
8062 md_number_to_chars (p, val, size);
8063 }
8064 else
8065 {
8066 /* Not absolute_section.
8067 Need a 32-bit fixup (don't support 8bit
8068 non-absolute imms). Try to support other
8069 sizes ... */
8070 enum bfd_reloc_code_real reloc_type;
8071 int size = imm_size (n);
8072 int sign;
8073
8074 if (i.types[n].bitfield.imm32s
8075 && (i.suffix == QWORD_MNEM_SUFFIX
8076 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
8077 sign = 1;
8078 else
8079 sign = 0;
8080
8081 p = frag_more (size);
8082 reloc_type = reloc (size, 0, sign, i.reloc[n]);
8083
8084 /* This is tough to explain. We end up with this one if we
8085 * have operands that look like
8086 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
8087 * obtain the absolute address of the GOT, and it is strongly
8088 * preferable from a performance point of view to avoid using
8089 * a runtime relocation for this. The actual sequence of
8090 * instructions often look something like:
8091 *
8092 * call .L66
8093 * .L66:
8094 * popl %ebx
8095 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
8096 *
8097 * The call and pop essentially return the absolute address
8098 * of the label .L66 and store it in %ebx. The linker itself
8099 * will ultimately change the first operand of the addl so
8100 * that %ebx points to the GOT, but to keep things simple, the
8101 * .o file must have this operand set so that it generates not
8102 * the absolute address of .L66, but the absolute address of
8103 * itself. This allows the linker itself simply treat a GOTPC
8104 * relocation as asking for a pcrel offset to the GOT to be
8105 * added in, and the addend of the relocation is stored in the
8106 * operand field for the instruction itself.
8107 *
8108 * Our job here is to fix the operand so that it would add
8109 * the correct offset so that %ebx would point to itself. The
8110 * thing that is tricky is that .-.L66 will point to the
8111 * beginning of the instruction, so we need to further modify
8112 * the operand so that it will point to itself. There are
8113 * other cases where you have something like:
8114 *
8115 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
8116 *
8117 * and here no correction would be required. Internally in
8118 * the assembler we treat operands of this form as not being
8119 * pcrel since the '.' is explicitly mentioned, and I wonder
8120 * whether it would simplify matters to do it this way. Who
8121 * knows. In earlier versions of the PIC patches, the
8122 * pcrel_adjust field was used to store the correction, but
8123 * since the expression is not pcrel, I felt it would be
8124 * confusing to do it this way. */
8125
8126 if ((reloc_type == BFD_RELOC_32
8127 || reloc_type == BFD_RELOC_X86_64_32S
8128 || reloc_type == BFD_RELOC_64)
8129 && GOT_symbol
8130 && GOT_symbol == i.op[n].imms->X_add_symbol
8131 && (i.op[n].imms->X_op == O_symbol
8132 || (i.op[n].imms->X_op == O_add
8133 && ((symbol_get_value_expression
8134 (i.op[n].imms->X_op_symbol)->X_op)
8135 == O_subtract))))
8136 {
8137 offsetT add;
8138
8139 if (insn_start_frag == frag_now)
8140 add = (p - frag_now->fr_literal) - insn_start_off;
8141 else
8142 {
8143 fragS *fr;
8144
8145 add = insn_start_frag->fr_fix - insn_start_off;
8146 for (fr = insn_start_frag->fr_next;
8147 fr && fr != frag_now; fr = fr->fr_next)
8148 add += fr->fr_fix;
8149 add += p - frag_now->fr_literal;
8150 }
8151
8152 if (!object_64bit)
8153 reloc_type = BFD_RELOC_386_GOTPC;
8154 else if (size == 4)
8155 reloc_type = BFD_RELOC_X86_64_GOTPC32;
8156 else if (size == 8)
8157 reloc_type = BFD_RELOC_X86_64_GOTPC64;
8158 i.op[n].imms->X_add_number += add;
8159 }
8160 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
8161 i.op[n].imms, 0, reloc_type);
8162 }
8163 }
8164 }
8165 }
8166 \f
8167 /* x86_cons_fix_new is called via the expression parsing code when a
8168 reloc is needed. We use this hook to get the correct .got reloc. */
8169 static int cons_sign = -1;
8170
8171 void
8172 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
8173 expressionS *exp, bfd_reloc_code_real_type r)
8174 {
8175 r = reloc (len, 0, cons_sign, r);
8176
8177 #ifdef TE_PE
8178 if (exp->X_op == O_secrel)
8179 {
8180 exp->X_op = O_symbol;
8181 r = BFD_RELOC_32_SECREL;
8182 }
8183 #endif
8184
8185 fix_new_exp (frag, off, len, exp, 0, r);
8186 }
8187
8188 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
8189 purpose of the `.dc.a' internal pseudo-op. */
8190
8191 int
8192 x86_address_bytes (void)
8193 {
8194 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
8195 return 4;
8196 return stdoutput->arch_info->bits_per_address / 8;
8197 }
8198
8199 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
8200 || defined (LEX_AT)
8201 # define lex_got(reloc, adjust, types) NULL
8202 #else
8203 /* Parse operands of the form
8204 <symbol>@GOTOFF+<nnn>
8205 and similar .plt or .got references.
8206
8207 If we find one, set up the correct relocation in RELOC and copy the
8208 input string, minus the `@GOTOFF' into a malloc'd buffer for
8209 parsing by the calling routine. Return this buffer, and if ADJUST
8210 is non-null set it to the length of the string we removed from the
8211 input line. Otherwise return NULL. */
8212 static char *
8213 lex_got (enum bfd_reloc_code_real *rel,
8214 int *adjust,
8215 i386_operand_type *types)
8216 {
8217 /* Some of the relocations depend on the size of what field is to
8218 be relocated. But in our callers i386_immediate and i386_displacement
8219 we don't yet know the operand size (this will be set by insn
8220 matching). Hence we record the word32 relocation here,
8221 and adjust the reloc according to the real size in reloc(). */
8222 static const struct {
8223 const char *str;
8224 int len;
8225 const enum bfd_reloc_code_real rel[2];
8226 const i386_operand_type types64;
8227 } gotrel[] = {
8228 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8229 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
8230 BFD_RELOC_SIZE32 },
8231 OPERAND_TYPE_IMM32_64 },
8232 #endif
8233 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
8234 BFD_RELOC_X86_64_PLTOFF64 },
8235 OPERAND_TYPE_IMM64 },
8236 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
8237 BFD_RELOC_X86_64_PLT32 },
8238 OPERAND_TYPE_IMM32_32S_DISP32 },
8239 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
8240 BFD_RELOC_X86_64_GOTPLT64 },
8241 OPERAND_TYPE_IMM64_DISP64 },
8242 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
8243 BFD_RELOC_X86_64_GOTOFF64 },
8244 OPERAND_TYPE_IMM64_DISP64 },
8245 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
8246 BFD_RELOC_X86_64_GOTPCREL },
8247 OPERAND_TYPE_IMM32_32S_DISP32 },
8248 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
8249 BFD_RELOC_X86_64_TLSGD },
8250 OPERAND_TYPE_IMM32_32S_DISP32 },
8251 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
8252 _dummy_first_bfd_reloc_code_real },
8253 OPERAND_TYPE_NONE },
8254 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
8255 BFD_RELOC_X86_64_TLSLD },
8256 OPERAND_TYPE_IMM32_32S_DISP32 },
8257 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
8258 BFD_RELOC_X86_64_GOTTPOFF },
8259 OPERAND_TYPE_IMM32_32S_DISP32 },
8260 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
8261 BFD_RELOC_X86_64_TPOFF32 },
8262 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
8263 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
8264 _dummy_first_bfd_reloc_code_real },
8265 OPERAND_TYPE_NONE },
8266 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
8267 BFD_RELOC_X86_64_DTPOFF32 },
8268 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
8269 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
8270 _dummy_first_bfd_reloc_code_real },
8271 OPERAND_TYPE_NONE },
8272 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
8273 _dummy_first_bfd_reloc_code_real },
8274 OPERAND_TYPE_NONE },
8275 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
8276 BFD_RELOC_X86_64_GOT32 },
8277 OPERAND_TYPE_IMM32_32S_64_DISP32 },
8278 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
8279 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
8280 OPERAND_TYPE_IMM32_32S_DISP32 },
8281 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
8282 BFD_RELOC_X86_64_TLSDESC_CALL },
8283 OPERAND_TYPE_IMM32_32S_DISP32 },
8284 };
8285 char *cp;
8286 unsigned int j;
8287
8288 #if defined (OBJ_MAYBE_ELF)
8289 if (!IS_ELF)
8290 return NULL;
8291 #endif
8292
8293 for (cp = input_line_pointer; *cp != '@'; cp++)
8294 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
8295 return NULL;
8296
8297 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
8298 {
8299 int len = gotrel[j].len;
8300 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
8301 {
8302 if (gotrel[j].rel[object_64bit] != 0)
8303 {
8304 int first, second;
8305 char *tmpbuf, *past_reloc;
8306
8307 *rel = gotrel[j].rel[object_64bit];
8308
8309 if (types)
8310 {
8311 if (flag_code != CODE_64BIT)
8312 {
8313 types->bitfield.imm32 = 1;
8314 types->bitfield.disp32 = 1;
8315 }
8316 else
8317 *types = gotrel[j].types64;
8318 }
8319
8320 if (j != 0 && GOT_symbol == NULL)
8321 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
8322
8323 /* The length of the first part of our input line. */
8324 first = cp - input_line_pointer;
8325
8326 /* The second part goes from after the reloc token until
8327 (and including) an end_of_line char or comma. */
8328 past_reloc = cp + 1 + len;
8329 cp = past_reloc;
8330 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
8331 ++cp;
8332 second = cp + 1 - past_reloc;
8333
8334 /* Allocate and copy string. The trailing NUL shouldn't
8335 be necessary, but be safe. */
8336 tmpbuf = XNEWVEC (char, first + second + 2);
8337 memcpy (tmpbuf, input_line_pointer, first);
8338 if (second != 0 && *past_reloc != ' ')
8339 /* Replace the relocation token with ' ', so that
8340 errors like foo@GOTOFF1 will be detected. */
8341 tmpbuf[first++] = ' ';
8342 else
8343 /* Increment length by 1 if the relocation token is
8344 removed. */
8345 len++;
8346 if (adjust)
8347 *adjust = len;
8348 memcpy (tmpbuf + first, past_reloc, second);
8349 tmpbuf[first + second] = '\0';
8350 return tmpbuf;
8351 }
8352
8353 as_bad (_("@%s reloc is not supported with %d-bit output format"),
8354 gotrel[j].str, 1 << (5 + object_64bit));
8355 return NULL;
8356 }
8357 }
8358
8359 /* Might be a symbol version string. Don't as_bad here. */
8360 return NULL;
8361 }
8362 #endif
8363
8364 #ifdef TE_PE
8365 #ifdef lex_got
8366 #undef lex_got
8367 #endif
8368 /* Parse operands of the form
8369 <symbol>@SECREL32+<nnn>
8370
8371 If we find one, set up the correct relocation in RELOC and copy the
8372 input string, minus the `@SECREL32' into a malloc'd buffer for
8373 parsing by the calling routine. Return this buffer, and if ADJUST
8374 is non-null set it to the length of the string we removed from the
8375 input line. Otherwise return NULL.
8376
8377 This function is copied from the ELF version above adjusted for PE targets. */
8378
8379 static char *
8380 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
8381 int *adjust ATTRIBUTE_UNUSED,
8382 i386_operand_type *types)
8383 {
8384 static const struct
8385 {
8386 const char *str;
8387 int len;
8388 const enum bfd_reloc_code_real rel[2];
8389 const i386_operand_type types64;
8390 }
8391 gotrel[] =
8392 {
8393 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
8394 BFD_RELOC_32_SECREL },
8395 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
8396 };
8397
8398 char *cp;
8399 unsigned j;
8400
8401 for (cp = input_line_pointer; *cp != '@'; cp++)
8402 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
8403 return NULL;
8404
8405 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
8406 {
8407 int len = gotrel[j].len;
8408
8409 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
8410 {
8411 if (gotrel[j].rel[object_64bit] != 0)
8412 {
8413 int first, second;
8414 char *tmpbuf, *past_reloc;
8415
8416 *rel = gotrel[j].rel[object_64bit];
8417 if (adjust)
8418 *adjust = len;
8419
8420 if (types)
8421 {
8422 if (flag_code != CODE_64BIT)
8423 {
8424 types->bitfield.imm32 = 1;
8425 types->bitfield.disp32 = 1;
8426 }
8427 else
8428 *types = gotrel[j].types64;
8429 }
8430
8431 /* The length of the first part of our input line. */
8432 first = cp - input_line_pointer;
8433
8434 /* The second part goes from after the reloc token until
8435 (and including) an end_of_line char or comma. */
8436 past_reloc = cp + 1 + len;
8437 cp = past_reloc;
8438 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
8439 ++cp;
8440 second = cp + 1 - past_reloc;
8441
8442 /* Allocate and copy string. The trailing NUL shouldn't
8443 be necessary, but be safe. */
8444 tmpbuf = XNEWVEC (char, first + second + 2);
8445 memcpy (tmpbuf, input_line_pointer, first);
8446 if (second != 0 && *past_reloc != ' ')
8447 /* Replace the relocation token with ' ', so that
8448 errors like foo@SECLREL321 will be detected. */
8449 tmpbuf[first++] = ' ';
8450 memcpy (tmpbuf + first, past_reloc, second);
8451 tmpbuf[first + second] = '\0';
8452 return tmpbuf;
8453 }
8454
8455 as_bad (_("@%s reloc is not supported with %d-bit output format"),
8456 gotrel[j].str, 1 << (5 + object_64bit));
8457 return NULL;
8458 }
8459 }
8460
8461 /* Might be a symbol version string. Don't as_bad here. */
8462 return NULL;
8463 }
8464
8465 #endif /* TE_PE */
8466
8467 bfd_reloc_code_real_type
8468 x86_cons (expressionS *exp, int size)
8469 {
8470 bfd_reloc_code_real_type got_reloc = NO_RELOC;
8471
8472 intel_syntax = -intel_syntax;
8473
8474 exp->X_md = 0;
8475 if (size == 4 || (object_64bit && size == 8))
8476 {
8477 /* Handle @GOTOFF and the like in an expression. */
8478 char *save;
8479 char *gotfree_input_line;
8480 int adjust = 0;
8481
8482 save = input_line_pointer;
8483 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
8484 if (gotfree_input_line)
8485 input_line_pointer = gotfree_input_line;
8486
8487 expression (exp);
8488
8489 if (gotfree_input_line)
8490 {
8491 /* expression () has merrily parsed up to the end of line,
8492 or a comma - in the wrong buffer. Transfer how far
8493 input_line_pointer has moved to the right buffer. */
8494 input_line_pointer = (save
8495 + (input_line_pointer - gotfree_input_line)
8496 + adjust);
8497 free (gotfree_input_line);
8498 if (exp->X_op == O_constant
8499 || exp->X_op == O_absent
8500 || exp->X_op == O_illegal
8501 || exp->X_op == O_register
8502 || exp->X_op == O_big)
8503 {
8504 char c = *input_line_pointer;
8505 *input_line_pointer = 0;
8506 as_bad (_("missing or invalid expression `%s'"), save);
8507 *input_line_pointer = c;
8508 }
8509 }
8510 }
8511 else
8512 expression (exp);
8513
8514 intel_syntax = -intel_syntax;
8515
8516 if (intel_syntax)
8517 i386_intel_simplify (exp);
8518
8519 return got_reloc;
8520 }
8521
8522 static void
8523 signed_cons (int size)
8524 {
8525 if (flag_code == CODE_64BIT)
8526 cons_sign = 1;
8527 cons (size);
8528 cons_sign = -1;
8529 }
8530
8531 #ifdef TE_PE
8532 static void
8533 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
8534 {
8535 expressionS exp;
8536
8537 do
8538 {
8539 expression (&exp);
8540 if (exp.X_op == O_symbol)
8541 exp.X_op = O_secrel;
8542
8543 emit_expr (&exp, 4);
8544 }
8545 while (*input_line_pointer++ == ',');
8546
8547 input_line_pointer--;
8548 demand_empty_rest_of_line ();
8549 }
8550 #endif
8551
8552 /* Handle Vector operations. */
8553
8554 static char *
8555 check_VecOperations (char *op_string, char *op_end)
8556 {
8557 const reg_entry *mask;
8558 const char *saved;
8559 char *end_op;
8560
8561 while (*op_string
8562 && (op_end == NULL || op_string < op_end))
8563 {
8564 saved = op_string;
8565 if (*op_string == '{')
8566 {
8567 op_string++;
8568
8569 /* Check broadcasts. */
8570 if (strncmp (op_string, "1to", 3) == 0)
8571 {
8572 int bcst_type;
8573
8574 if (i.broadcast)
8575 goto duplicated_vec_op;
8576
8577 op_string += 3;
8578 if (*op_string == '8')
8579 bcst_type = 8;
8580 else if (*op_string == '4')
8581 bcst_type = 4;
8582 else if (*op_string == '2')
8583 bcst_type = 2;
8584 else if (*op_string == '1'
8585 && *(op_string+1) == '6')
8586 {
8587 bcst_type = 16;
8588 op_string++;
8589 }
8590 else
8591 {
8592 as_bad (_("Unsupported broadcast: `%s'"), saved);
8593 return NULL;
8594 }
8595 op_string++;
8596
8597 broadcast_op.type = bcst_type;
8598 broadcast_op.operand = this_operand;
8599 i.broadcast = &broadcast_op;
8600 }
8601 /* Check masking operation. */
8602 else if ((mask = parse_register (op_string, &end_op)) != NULL)
8603 {
8604 /* k0 can't be used for write mask. */
8605 if (!mask->reg_type.bitfield.regmask || mask->reg_num == 0)
8606 {
8607 as_bad (_("`%s%s' can't be used for write mask"),
8608 register_prefix, mask->reg_name);
8609 return NULL;
8610 }
8611
8612 if (!i.mask)
8613 {
8614 mask_op.mask = mask;
8615 mask_op.zeroing = 0;
8616 mask_op.operand = this_operand;
8617 i.mask = &mask_op;
8618 }
8619 else
8620 {
8621 if (i.mask->mask)
8622 goto duplicated_vec_op;
8623
8624 i.mask->mask = mask;
8625
8626 /* Only "{z}" is allowed here. No need to check
8627 zeroing mask explicitly. */
8628 if (i.mask->operand != this_operand)
8629 {
8630 as_bad (_("invalid write mask `%s'"), saved);
8631 return NULL;
8632 }
8633 }
8634
8635 op_string = end_op;
8636 }
8637 /* Check zeroing-flag for masking operation. */
8638 else if (*op_string == 'z')
8639 {
8640 if (!i.mask)
8641 {
8642 mask_op.mask = NULL;
8643 mask_op.zeroing = 1;
8644 mask_op.operand = this_operand;
8645 i.mask = &mask_op;
8646 }
8647 else
8648 {
8649 if (i.mask->zeroing)
8650 {
8651 duplicated_vec_op:
8652 as_bad (_("duplicated `%s'"), saved);
8653 return NULL;
8654 }
8655
8656 i.mask->zeroing = 1;
8657
8658 /* Only "{%k}" is allowed here. No need to check mask
8659 register explicitly. */
8660 if (i.mask->operand != this_operand)
8661 {
8662 as_bad (_("invalid zeroing-masking `%s'"),
8663 saved);
8664 return NULL;
8665 }
8666 }
8667
8668 op_string++;
8669 }
8670 else
8671 goto unknown_vec_op;
8672
8673 if (*op_string != '}')
8674 {
8675 as_bad (_("missing `}' in `%s'"), saved);
8676 return NULL;
8677 }
8678 op_string++;
8679
8680 /* Strip whitespace since the addition of pseudo prefixes
8681 changed how the scrubber treats '{'. */
8682 if (is_space_char (*op_string))
8683 ++op_string;
8684
8685 continue;
8686 }
8687 unknown_vec_op:
8688 /* We don't know this one. */
8689 as_bad (_("unknown vector operation: `%s'"), saved);
8690 return NULL;
8691 }
8692
8693 if (i.mask && i.mask->zeroing && !i.mask->mask)
8694 {
8695 as_bad (_("zeroing-masking only allowed with write mask"));
8696 return NULL;
8697 }
8698
8699 return op_string;
8700 }
8701
8702 static int
8703 i386_immediate (char *imm_start)
8704 {
8705 char *save_input_line_pointer;
8706 char *gotfree_input_line;
8707 segT exp_seg = 0;
8708 expressionS *exp;
8709 i386_operand_type types;
8710
8711 operand_type_set (&types, ~0);
8712
8713 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
8714 {
8715 as_bad (_("at most %d immediate operands are allowed"),
8716 MAX_IMMEDIATE_OPERANDS);
8717 return 0;
8718 }
8719
8720 exp = &im_expressions[i.imm_operands++];
8721 i.op[this_operand].imms = exp;
8722
8723 if (is_space_char (*imm_start))
8724 ++imm_start;
8725
8726 save_input_line_pointer = input_line_pointer;
8727 input_line_pointer = imm_start;
8728
8729 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
8730 if (gotfree_input_line)
8731 input_line_pointer = gotfree_input_line;
8732
8733 exp_seg = expression (exp);
8734
8735 SKIP_WHITESPACE ();
8736
8737 /* Handle vector operations. */
8738 if (*input_line_pointer == '{')
8739 {
8740 input_line_pointer = check_VecOperations (input_line_pointer,
8741 NULL);
8742 if (input_line_pointer == NULL)
8743 return 0;
8744 }
8745
8746 if (*input_line_pointer)
8747 as_bad (_("junk `%s' after expression"), input_line_pointer);
8748
8749 input_line_pointer = save_input_line_pointer;
8750 if (gotfree_input_line)
8751 {
8752 free (gotfree_input_line);
8753
8754 if (exp->X_op == O_constant || exp->X_op == O_register)
8755 exp->X_op = O_illegal;
8756 }
8757
8758 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
8759 }
8760
8761 static int
8762 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8763 i386_operand_type types, const char *imm_start)
8764 {
8765 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
8766 {
8767 if (imm_start)
8768 as_bad (_("missing or invalid immediate expression `%s'"),
8769 imm_start);
8770 return 0;
8771 }
8772 else if (exp->X_op == O_constant)
8773 {
8774 /* Size it properly later. */
8775 i.types[this_operand].bitfield.imm64 = 1;
8776 /* If not 64bit, sign extend val. */
8777 if (flag_code != CODE_64BIT
8778 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
8779 exp->X_add_number
8780 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
8781 }
8782 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8783 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
8784 && exp_seg != absolute_section
8785 && exp_seg != text_section
8786 && exp_seg != data_section
8787 && exp_seg != bss_section
8788 && exp_seg != undefined_section
8789 && !bfd_is_com_section (exp_seg))
8790 {
8791 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8792 return 0;
8793 }
8794 #endif
8795 else if (!intel_syntax && exp_seg == reg_section)
8796 {
8797 if (imm_start)
8798 as_bad (_("illegal immediate register operand %s"), imm_start);
8799 return 0;
8800 }
8801 else
8802 {
8803 /* This is an address. The size of the address will be
8804 determined later, depending on destination register,
8805 suffix, or the default for the section. */
8806 i.types[this_operand].bitfield.imm8 = 1;
8807 i.types[this_operand].bitfield.imm16 = 1;
8808 i.types[this_operand].bitfield.imm32 = 1;
8809 i.types[this_operand].bitfield.imm32s = 1;
8810 i.types[this_operand].bitfield.imm64 = 1;
8811 i.types[this_operand] = operand_type_and (i.types[this_operand],
8812 types);
8813 }
8814
8815 return 1;
8816 }
8817
8818 static char *
8819 i386_scale (char *scale)
8820 {
8821 offsetT val;
8822 char *save = input_line_pointer;
8823
8824 input_line_pointer = scale;
8825 val = get_absolute_expression ();
8826
8827 switch (val)
8828 {
8829 case 1:
8830 i.log2_scale_factor = 0;
8831 break;
8832 case 2:
8833 i.log2_scale_factor = 1;
8834 break;
8835 case 4:
8836 i.log2_scale_factor = 2;
8837 break;
8838 case 8:
8839 i.log2_scale_factor = 3;
8840 break;
8841 default:
8842 {
8843 char sep = *input_line_pointer;
8844
8845 *input_line_pointer = '\0';
8846 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
8847 scale);
8848 *input_line_pointer = sep;
8849 input_line_pointer = save;
8850 return NULL;
8851 }
8852 }
8853 if (i.log2_scale_factor != 0 && i.index_reg == 0)
8854 {
8855 as_warn (_("scale factor of %d without an index register"),
8856 1 << i.log2_scale_factor);
8857 i.log2_scale_factor = 0;
8858 }
8859 scale = input_line_pointer;
8860 input_line_pointer = save;
8861 return scale;
8862 }
8863
8864 static int
8865 i386_displacement (char *disp_start, char *disp_end)
8866 {
8867 expressionS *exp;
8868 segT exp_seg = 0;
8869 char *save_input_line_pointer;
8870 char *gotfree_input_line;
8871 int override;
8872 i386_operand_type bigdisp, types = anydisp;
8873 int ret;
8874
8875 if (i.disp_operands == MAX_MEMORY_OPERANDS)
8876 {
8877 as_bad (_("at most %d displacement operands are allowed"),
8878 MAX_MEMORY_OPERANDS);
8879 return 0;
8880 }
8881
8882 operand_type_set (&bigdisp, 0);
8883 if ((i.types[this_operand].bitfield.jumpabsolute)
8884 || (!current_templates->start->opcode_modifier.jump
8885 && !current_templates->start->opcode_modifier.jumpdword))
8886 {
8887 bigdisp.bitfield.disp32 = 1;
8888 override = (i.prefix[ADDR_PREFIX] != 0);
8889 if (flag_code == CODE_64BIT)
8890 {
8891 if (!override)
8892 {
8893 bigdisp.bitfield.disp32s = 1;
8894 bigdisp.bitfield.disp64 = 1;
8895 }
8896 }
8897 else if ((flag_code == CODE_16BIT) ^ override)
8898 {
8899 bigdisp.bitfield.disp32 = 0;
8900 bigdisp.bitfield.disp16 = 1;
8901 }
8902 }
8903 else
8904 {
8905 /* For PC-relative branches, the width of the displacement
8906 is dependent upon data size, not address size. */
8907 override = (i.prefix[DATA_PREFIX] != 0);
8908 if (flag_code == CODE_64BIT)
8909 {
8910 if (override || i.suffix == WORD_MNEM_SUFFIX)
8911 bigdisp.bitfield.disp16 = 1;
8912 else
8913 {
8914 bigdisp.bitfield.disp32 = 1;
8915 bigdisp.bitfield.disp32s = 1;
8916 }
8917 }
8918 else
8919 {
8920 if (!override)
8921 override = (i.suffix == (flag_code != CODE_16BIT
8922 ? WORD_MNEM_SUFFIX
8923 : LONG_MNEM_SUFFIX));
8924 bigdisp.bitfield.disp32 = 1;
8925 if ((flag_code == CODE_16BIT) ^ override)
8926 {
8927 bigdisp.bitfield.disp32 = 0;
8928 bigdisp.bitfield.disp16 = 1;
8929 }
8930 }
8931 }
8932 i.types[this_operand] = operand_type_or (i.types[this_operand],
8933 bigdisp);
8934
8935 exp = &disp_expressions[i.disp_operands];
8936 i.op[this_operand].disps = exp;
8937 i.disp_operands++;
8938 save_input_line_pointer = input_line_pointer;
8939 input_line_pointer = disp_start;
8940 END_STRING_AND_SAVE (disp_end);
8941
8942 #ifndef GCC_ASM_O_HACK
8943 #define GCC_ASM_O_HACK 0
8944 #endif
8945 #if GCC_ASM_O_HACK
8946 END_STRING_AND_SAVE (disp_end + 1);
8947 if (i.types[this_operand].bitfield.baseIndex
8948 && displacement_string_end[-1] == '+')
8949 {
8950 /* This hack is to avoid a warning when using the "o"
8951 constraint within gcc asm statements.
8952 For instance:
8953
8954 #define _set_tssldt_desc(n,addr,limit,type) \
8955 __asm__ __volatile__ ( \
8956 "movw %w2,%0\n\t" \
8957 "movw %w1,2+%0\n\t" \
8958 "rorl $16,%1\n\t" \
8959 "movb %b1,4+%0\n\t" \
8960 "movb %4,5+%0\n\t" \
8961 "movb $0,6+%0\n\t" \
8962 "movb %h1,7+%0\n\t" \
8963 "rorl $16,%1" \
8964 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8965
8966 This works great except that the output assembler ends
8967 up looking a bit weird if it turns out that there is
8968 no offset. You end up producing code that looks like:
8969
8970 #APP
8971 movw $235,(%eax)
8972 movw %dx,2+(%eax)
8973 rorl $16,%edx
8974 movb %dl,4+(%eax)
8975 movb $137,5+(%eax)
8976 movb $0,6+(%eax)
8977 movb %dh,7+(%eax)
8978 rorl $16,%edx
8979 #NO_APP
8980
8981 So here we provide the missing zero. */
8982
8983 *displacement_string_end = '0';
8984 }
8985 #endif
8986 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
8987 if (gotfree_input_line)
8988 input_line_pointer = gotfree_input_line;
8989
8990 exp_seg = expression (exp);
8991
8992 SKIP_WHITESPACE ();
8993 if (*input_line_pointer)
8994 as_bad (_("junk `%s' after expression"), input_line_pointer);
8995 #if GCC_ASM_O_HACK
8996 RESTORE_END_STRING (disp_end + 1);
8997 #endif
8998 input_line_pointer = save_input_line_pointer;
8999 if (gotfree_input_line)
9000 {
9001 free (gotfree_input_line);
9002
9003 if (exp->X_op == O_constant || exp->X_op == O_register)
9004 exp->X_op = O_illegal;
9005 }
9006
9007 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
9008
9009 RESTORE_END_STRING (disp_end);
9010
9011 return ret;
9012 }
9013
9014 static int
9015 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
9016 i386_operand_type types, const char *disp_start)
9017 {
9018 i386_operand_type bigdisp;
9019 int ret = 1;
9020
9021 /* We do this to make sure that the section symbol is in
9022 the symbol table. We will ultimately change the relocation
9023 to be relative to the beginning of the section. */
9024 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
9025 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
9026 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
9027 {
9028 if (exp->X_op != O_symbol)
9029 goto inv_disp;
9030
9031 if (S_IS_LOCAL (exp->X_add_symbol)
9032 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
9033 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
9034 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
9035 exp->X_op = O_subtract;
9036 exp->X_op_symbol = GOT_symbol;
9037 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
9038 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
9039 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
9040 i.reloc[this_operand] = BFD_RELOC_64;
9041 else
9042 i.reloc[this_operand] = BFD_RELOC_32;
9043 }
9044
9045 else if (exp->X_op == O_absent
9046 || exp->X_op == O_illegal
9047 || exp->X_op == O_big)
9048 {
9049 inv_disp:
9050 as_bad (_("missing or invalid displacement expression `%s'"),
9051 disp_start);
9052 ret = 0;
9053 }
9054
9055 else if (flag_code == CODE_64BIT
9056 && !i.prefix[ADDR_PREFIX]
9057 && exp->X_op == O_constant)
9058 {
9059 /* Since displacement is signed extended to 64bit, don't allow
9060 disp32 and turn off disp32s if they are out of range. */
9061 i.types[this_operand].bitfield.disp32 = 0;
9062 if (!fits_in_signed_long (exp->X_add_number))
9063 {
9064 i.types[this_operand].bitfield.disp32s = 0;
9065 if (i.types[this_operand].bitfield.baseindex)
9066 {
9067 as_bad (_("0x%lx out range of signed 32bit displacement"),
9068 (long) exp->X_add_number);
9069 ret = 0;
9070 }
9071 }
9072 }
9073
9074 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9075 else if (exp->X_op != O_constant
9076 && OUTPUT_FLAVOR == bfd_target_aout_flavour
9077 && exp_seg != absolute_section
9078 && exp_seg != text_section
9079 && exp_seg != data_section
9080 && exp_seg != bss_section
9081 && exp_seg != undefined_section
9082 && !bfd_is_com_section (exp_seg))
9083 {
9084 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
9085 ret = 0;
9086 }
9087 #endif
9088
9089 /* Check if this is a displacement only operand. */
9090 bigdisp = i.types[this_operand];
9091 bigdisp.bitfield.disp8 = 0;
9092 bigdisp.bitfield.disp16 = 0;
9093 bigdisp.bitfield.disp32 = 0;
9094 bigdisp.bitfield.disp32s = 0;
9095 bigdisp.bitfield.disp64 = 0;
9096 if (operand_type_all_zero (&bigdisp))
9097 i.types[this_operand] = operand_type_and (i.types[this_operand],
9098 types);
9099
9100 return ret;
9101 }
9102
9103 /* Return the active addressing mode, taking address override and
9104 registers forming the address into consideration. Update the
9105 address override prefix if necessary. */
9106
9107 static enum flag_code
9108 i386_addressing_mode (void)
9109 {
9110 enum flag_code addr_mode;
9111
9112 if (i.prefix[ADDR_PREFIX])
9113 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
9114 else
9115 {
9116 addr_mode = flag_code;
9117
9118 #if INFER_ADDR_PREFIX
9119 if (i.mem_operands == 0)
9120 {
9121 /* Infer address prefix from the first memory operand. */
9122 const reg_entry *addr_reg = i.base_reg;
9123
9124 if (addr_reg == NULL)
9125 addr_reg = i.index_reg;
9126
9127 if (addr_reg)
9128 {
9129 if (addr_reg->reg_num == RegEip
9130 || addr_reg->reg_num == RegEiz
9131 || addr_reg->reg_type.bitfield.dword)
9132 addr_mode = CODE_32BIT;
9133 else if (flag_code != CODE_64BIT
9134 && addr_reg->reg_type.bitfield.word)
9135 addr_mode = CODE_16BIT;
9136
9137 if (addr_mode != flag_code)
9138 {
9139 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
9140 i.prefixes += 1;
9141 /* Change the size of any displacement too. At most one
9142 of Disp16 or Disp32 is set.
9143 FIXME. There doesn't seem to be any real need for
9144 separate Disp16 and Disp32 flags. The same goes for
9145 Imm16 and Imm32. Removing them would probably clean
9146 up the code quite a lot. */
9147 if (flag_code != CODE_64BIT
9148 && (i.types[this_operand].bitfield.disp16
9149 || i.types[this_operand].bitfield.disp32))
9150 i.types[this_operand]
9151 = operand_type_xor (i.types[this_operand], disp16_32);
9152 }
9153 }
9154 }
9155 #endif
9156 }
9157
9158 return addr_mode;
9159 }
9160
9161 /* Make sure the memory operand we've been dealt is valid.
9162 Return 1 on success, 0 on a failure. */
9163
9164 static int
9165 i386_index_check (const char *operand_string)
9166 {
9167 const char *kind = "base/index";
9168 enum flag_code addr_mode = i386_addressing_mode ();
9169
9170 if (current_templates->start->opcode_modifier.isstring
9171 && !current_templates->start->opcode_modifier.immext
9172 && (current_templates->end[-1].opcode_modifier.isstring
9173 || i.mem_operands))
9174 {
9175 /* Memory operands of string insns are special in that they only allow
9176 a single register (rDI, rSI, or rBX) as their memory address. */
9177 const reg_entry *expected_reg;
9178 static const char *di_si[][2] =
9179 {
9180 { "esi", "edi" },
9181 { "si", "di" },
9182 { "rsi", "rdi" }
9183 };
9184 static const char *bx[] = { "ebx", "bx", "rbx" };
9185
9186 kind = "string address";
9187
9188 if (current_templates->start->opcode_modifier.repprefixok)
9189 {
9190 i386_operand_type type = current_templates->end[-1].operand_types[0];
9191
9192 if (!type.bitfield.baseindex
9193 || ((!i.mem_operands != !intel_syntax)
9194 && current_templates->end[-1].operand_types[1]
9195 .bitfield.baseindex))
9196 type = current_templates->end[-1].operand_types[1];
9197 expected_reg = hash_find (reg_hash,
9198 di_si[addr_mode][type.bitfield.esseg]);
9199
9200 }
9201 else
9202 expected_reg = hash_find (reg_hash, bx[addr_mode]);
9203
9204 if (i.base_reg != expected_reg
9205 || i.index_reg
9206 || operand_type_check (i.types[this_operand], disp))
9207 {
9208 /* The second memory operand must have the same size as
9209 the first one. */
9210 if (i.mem_operands
9211 && i.base_reg
9212 && !((addr_mode == CODE_64BIT
9213 && i.base_reg->reg_type.bitfield.qword)
9214 || (addr_mode == CODE_32BIT
9215 ? i.base_reg->reg_type.bitfield.dword
9216 : i.base_reg->reg_type.bitfield.word)))
9217 goto bad_address;
9218
9219 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
9220 operand_string,
9221 intel_syntax ? '[' : '(',
9222 register_prefix,
9223 expected_reg->reg_name,
9224 intel_syntax ? ']' : ')');
9225 return 1;
9226 }
9227 else
9228 return 1;
9229
9230 bad_address:
9231 as_bad (_("`%s' is not a valid %s expression"),
9232 operand_string, kind);
9233 return 0;
9234 }
9235 else
9236 {
9237 if (addr_mode != CODE_16BIT)
9238 {
9239 /* 32-bit/64-bit checks. */
9240 if ((i.base_reg
9241 && (addr_mode == CODE_64BIT
9242 ? !i.base_reg->reg_type.bitfield.qword
9243 : !i.base_reg->reg_type.bitfield.dword)
9244 && (i.index_reg
9245 || (i.base_reg->reg_num
9246 != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
9247 || (i.index_reg
9248 && !i.index_reg->reg_type.bitfield.xmmword
9249 && !i.index_reg->reg_type.bitfield.ymmword
9250 && !i.index_reg->reg_type.bitfield.zmmword
9251 && ((addr_mode == CODE_64BIT
9252 ? !(i.index_reg->reg_type.bitfield.qword
9253 || i.index_reg->reg_num == RegRiz)
9254 : !(i.index_reg->reg_type.bitfield.dword
9255 || i.index_reg->reg_num == RegEiz))
9256 || !i.index_reg->reg_type.bitfield.baseindex)))
9257 goto bad_address;
9258
9259 /* bndmk, bndldx, and bndstx have special restrictions. */
9260 if (current_templates->start->base_opcode == 0xf30f1b
9261 || (current_templates->start->base_opcode & ~1) == 0x0f1a)
9262 {
9263 /* They cannot use RIP-relative addressing. */
9264 if (i.base_reg && i.base_reg->reg_num == RegRip)
9265 {
9266 as_bad (_("`%s' cannot be used here"), operand_string);
9267 return 0;
9268 }
9269
9270 /* bndldx and bndstx ignore their scale factor. */
9271 if (current_templates->start->base_opcode != 0xf30f1b
9272 && i.log2_scale_factor)
9273 as_warn (_("register scaling is being ignored here"));
9274 }
9275 }
9276 else
9277 {
9278 /* 16-bit checks. */
9279 if ((i.base_reg
9280 && (!i.base_reg->reg_type.bitfield.word
9281 || !i.base_reg->reg_type.bitfield.baseindex))
9282 || (i.index_reg
9283 && (!i.index_reg->reg_type.bitfield.word
9284 || !i.index_reg->reg_type.bitfield.baseindex
9285 || !(i.base_reg
9286 && i.base_reg->reg_num < 6
9287 && i.index_reg->reg_num >= 6
9288 && i.log2_scale_factor == 0))))
9289 goto bad_address;
9290 }
9291 }
9292 return 1;
9293 }
9294
9295 /* Handle vector immediates. */
9296
9297 static int
9298 RC_SAE_immediate (const char *imm_start)
9299 {
9300 unsigned int match_found, j;
9301 const char *pstr = imm_start;
9302 expressionS *exp;
9303
9304 if (*pstr != '{')
9305 return 0;
9306
9307 pstr++;
9308 match_found = 0;
9309 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
9310 {
9311 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
9312 {
9313 if (!i.rounding)
9314 {
9315 rc_op.type = RC_NamesTable[j].type;
9316 rc_op.operand = this_operand;
9317 i.rounding = &rc_op;
9318 }
9319 else
9320 {
9321 as_bad (_("duplicated `%s'"), imm_start);
9322 return 0;
9323 }
9324 pstr += RC_NamesTable[j].len;
9325 match_found = 1;
9326 break;
9327 }
9328 }
9329 if (!match_found)
9330 return 0;
9331
9332 if (*pstr++ != '}')
9333 {
9334 as_bad (_("Missing '}': '%s'"), imm_start);
9335 return 0;
9336 }
9337 /* RC/SAE immediate string should contain nothing more. */;
9338 if (*pstr != 0)
9339 {
9340 as_bad (_("Junk after '}': '%s'"), imm_start);
9341 return 0;
9342 }
9343
9344 exp = &im_expressions[i.imm_operands++];
9345 i.op[this_operand].imms = exp;
9346
9347 exp->X_op = O_constant;
9348 exp->X_add_number = 0;
9349 exp->X_add_symbol = (symbolS *) 0;
9350 exp->X_op_symbol = (symbolS *) 0;
9351
9352 i.types[this_operand].bitfield.imm8 = 1;
9353 return 1;
9354 }
9355
9356 /* Only string instructions can have a second memory operand, so
9357 reduce current_templates to just those if it contains any. */
9358 static int
9359 maybe_adjust_templates (void)
9360 {
9361 const insn_template *t;
9362
9363 gas_assert (i.mem_operands == 1);
9364
9365 for (t = current_templates->start; t < current_templates->end; ++t)
9366 if (t->opcode_modifier.isstring)
9367 break;
9368
9369 if (t < current_templates->end)
9370 {
9371 static templates aux_templates;
9372 bfd_boolean recheck;
9373
9374 aux_templates.start = t;
9375 for (; t < current_templates->end; ++t)
9376 if (!t->opcode_modifier.isstring)
9377 break;
9378 aux_templates.end = t;
9379
9380 /* Determine whether to re-check the first memory operand. */
9381 recheck = (aux_templates.start != current_templates->start
9382 || t != current_templates->end);
9383
9384 current_templates = &aux_templates;
9385
9386 if (recheck)
9387 {
9388 i.mem_operands = 0;
9389 if (i.memop1_string != NULL
9390 && i386_index_check (i.memop1_string) == 0)
9391 return 0;
9392 i.mem_operands = 1;
9393 }
9394 }
9395
9396 return 1;
9397 }
9398
9399 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
9400 on error. */
9401
9402 static int
9403 i386_att_operand (char *operand_string)
9404 {
9405 const reg_entry *r;
9406 char *end_op;
9407 char *op_string = operand_string;
9408
9409 if (is_space_char (*op_string))
9410 ++op_string;
9411
9412 /* We check for an absolute prefix (differentiating,
9413 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
9414 if (*op_string == ABSOLUTE_PREFIX)
9415 {
9416 ++op_string;
9417 if (is_space_char (*op_string))
9418 ++op_string;
9419 i.types[this_operand].bitfield.jumpabsolute = 1;
9420 }
9421
9422 /* Check if operand is a register. */
9423 if ((r = parse_register (op_string, &end_op)) != NULL)
9424 {
9425 i386_operand_type temp;
9426
9427 /* Check for a segment override by searching for ':' after a
9428 segment register. */
9429 op_string = end_op;
9430 if (is_space_char (*op_string))
9431 ++op_string;
9432 if (*op_string == ':'
9433 && (r->reg_type.bitfield.sreg2
9434 || r->reg_type.bitfield.sreg3))
9435 {
9436 switch (r->reg_num)
9437 {
9438 case 0:
9439 i.seg[i.mem_operands] = &es;
9440 break;
9441 case 1:
9442 i.seg[i.mem_operands] = &cs;
9443 break;
9444 case 2:
9445 i.seg[i.mem_operands] = &ss;
9446 break;
9447 case 3:
9448 i.seg[i.mem_operands] = &ds;
9449 break;
9450 case 4:
9451 i.seg[i.mem_operands] = &fs;
9452 break;
9453 case 5:
9454 i.seg[i.mem_operands] = &gs;
9455 break;
9456 }
9457
9458 /* Skip the ':' and whitespace. */
9459 ++op_string;
9460 if (is_space_char (*op_string))
9461 ++op_string;
9462
9463 if (!is_digit_char (*op_string)
9464 && !is_identifier_char (*op_string)
9465 && *op_string != '('
9466 && *op_string != ABSOLUTE_PREFIX)
9467 {
9468 as_bad (_("bad memory operand `%s'"), op_string);
9469 return 0;
9470 }
9471 /* Handle case of %es:*foo. */
9472 if (*op_string == ABSOLUTE_PREFIX)
9473 {
9474 ++op_string;
9475 if (is_space_char (*op_string))
9476 ++op_string;
9477 i.types[this_operand].bitfield.jumpabsolute = 1;
9478 }
9479 goto do_memory_reference;
9480 }
9481
9482 /* Handle vector operations. */
9483 if (*op_string == '{')
9484 {
9485 op_string = check_VecOperations (op_string, NULL);
9486 if (op_string == NULL)
9487 return 0;
9488 }
9489
9490 if (*op_string)
9491 {
9492 as_bad (_("junk `%s' after register"), op_string);
9493 return 0;
9494 }
9495 temp = r->reg_type;
9496 temp.bitfield.baseindex = 0;
9497 i.types[this_operand] = operand_type_or (i.types[this_operand],
9498 temp);
9499 i.types[this_operand].bitfield.unspecified = 0;
9500 i.op[this_operand].regs = r;
9501 i.reg_operands++;
9502 }
9503 else if (*op_string == REGISTER_PREFIX)
9504 {
9505 as_bad (_("bad register name `%s'"), op_string);
9506 return 0;
9507 }
9508 else if (*op_string == IMMEDIATE_PREFIX)
9509 {
9510 ++op_string;
9511 if (i.types[this_operand].bitfield.jumpabsolute)
9512 {
9513 as_bad (_("immediate operand illegal with absolute jump"));
9514 return 0;
9515 }
9516 if (!i386_immediate (op_string))
9517 return 0;
9518 }
9519 else if (RC_SAE_immediate (operand_string))
9520 {
9521 /* If it is a RC or SAE immediate, do nothing. */
9522 ;
9523 }
9524 else if (is_digit_char (*op_string)
9525 || is_identifier_char (*op_string)
9526 || *op_string == '"'
9527 || *op_string == '(')
9528 {
9529 /* This is a memory reference of some sort. */
9530 char *base_string;
9531
9532 /* Start and end of displacement string expression (if found). */
9533 char *displacement_string_start;
9534 char *displacement_string_end;
9535 char *vop_start;
9536
9537 do_memory_reference:
9538 if (i.mem_operands == 1 && !maybe_adjust_templates ())
9539 return 0;
9540 if ((i.mem_operands == 1
9541 && !current_templates->start->opcode_modifier.isstring)
9542 || i.mem_operands == 2)
9543 {
9544 as_bad (_("too many memory references for `%s'"),
9545 current_templates->start->name);
9546 return 0;
9547 }
9548
9549 /* Check for base index form. We detect the base index form by
9550 looking for an ')' at the end of the operand, searching
9551 for the '(' matching it, and finding a REGISTER_PREFIX or ','
9552 after the '('. */
9553 base_string = op_string + strlen (op_string);
9554
9555 /* Handle vector operations. */
9556 vop_start = strchr (op_string, '{');
9557 if (vop_start && vop_start < base_string)
9558 {
9559 if (check_VecOperations (vop_start, base_string) == NULL)
9560 return 0;
9561 base_string = vop_start;
9562 }
9563
9564 --base_string;
9565 if (is_space_char (*base_string))
9566 --base_string;
9567
9568 /* If we only have a displacement, set-up for it to be parsed later. */
9569 displacement_string_start = op_string;
9570 displacement_string_end = base_string + 1;
9571
9572 if (*base_string == ')')
9573 {
9574 char *temp_string;
9575 unsigned int parens_balanced = 1;
9576 /* We've already checked that the number of left & right ()'s are
9577 equal, so this loop will not be infinite. */
9578 do
9579 {
9580 base_string--;
9581 if (*base_string == ')')
9582 parens_balanced++;
9583 if (*base_string == '(')
9584 parens_balanced--;
9585 }
9586 while (parens_balanced);
9587
9588 temp_string = base_string;
9589
9590 /* Skip past '(' and whitespace. */
9591 ++base_string;
9592 if (is_space_char (*base_string))
9593 ++base_string;
9594
9595 if (*base_string == ','
9596 || ((i.base_reg = parse_register (base_string, &end_op))
9597 != NULL))
9598 {
9599 displacement_string_end = temp_string;
9600
9601 i.types[this_operand].bitfield.baseindex = 1;
9602
9603 if (i.base_reg)
9604 {
9605 base_string = end_op;
9606 if (is_space_char (*base_string))
9607 ++base_string;
9608 }
9609
9610 /* There may be an index reg or scale factor here. */
9611 if (*base_string == ',')
9612 {
9613 ++base_string;
9614 if (is_space_char (*base_string))
9615 ++base_string;
9616
9617 if ((i.index_reg = parse_register (base_string, &end_op))
9618 != NULL)
9619 {
9620 base_string = end_op;
9621 if (is_space_char (*base_string))
9622 ++base_string;
9623 if (*base_string == ',')
9624 {
9625 ++base_string;
9626 if (is_space_char (*base_string))
9627 ++base_string;
9628 }
9629 else if (*base_string != ')')
9630 {
9631 as_bad (_("expecting `,' or `)' "
9632 "after index register in `%s'"),
9633 operand_string);
9634 return 0;
9635 }
9636 }
9637 else if (*base_string == REGISTER_PREFIX)
9638 {
9639 end_op = strchr (base_string, ',');
9640 if (end_op)
9641 *end_op = '\0';
9642 as_bad (_("bad register name `%s'"), base_string);
9643 return 0;
9644 }
9645
9646 /* Check for scale factor. */
9647 if (*base_string != ')')
9648 {
9649 char *end_scale = i386_scale (base_string);
9650
9651 if (!end_scale)
9652 return 0;
9653
9654 base_string = end_scale;
9655 if (is_space_char (*base_string))
9656 ++base_string;
9657 if (*base_string != ')')
9658 {
9659 as_bad (_("expecting `)' "
9660 "after scale factor in `%s'"),
9661 operand_string);
9662 return 0;
9663 }
9664 }
9665 else if (!i.index_reg)
9666 {
9667 as_bad (_("expecting index register or scale factor "
9668 "after `,'; got '%c'"),
9669 *base_string);
9670 return 0;
9671 }
9672 }
9673 else if (*base_string != ')')
9674 {
9675 as_bad (_("expecting `,' or `)' "
9676 "after base register in `%s'"),
9677 operand_string);
9678 return 0;
9679 }
9680 }
9681 else if (*base_string == REGISTER_PREFIX)
9682 {
9683 end_op = strchr (base_string, ',');
9684 if (end_op)
9685 *end_op = '\0';
9686 as_bad (_("bad register name `%s'"), base_string);
9687 return 0;
9688 }
9689 }
9690
9691 /* If there's an expression beginning the operand, parse it,
9692 assuming displacement_string_start and
9693 displacement_string_end are meaningful. */
9694 if (displacement_string_start != displacement_string_end)
9695 {
9696 if (!i386_displacement (displacement_string_start,
9697 displacement_string_end))
9698 return 0;
9699 }
9700
9701 /* Special case for (%dx) while doing input/output op. */
9702 if (i.base_reg
9703 && i.base_reg->reg_type.bitfield.inoutportreg
9704 && i.index_reg == 0
9705 && i.log2_scale_factor == 0
9706 && i.seg[i.mem_operands] == 0
9707 && !operand_type_check (i.types[this_operand], disp))
9708 {
9709 i.types[this_operand] = i.base_reg->reg_type;
9710 return 1;
9711 }
9712
9713 if (i386_index_check (operand_string) == 0)
9714 return 0;
9715 i.types[this_operand].bitfield.mem = 1;
9716 if (i.mem_operands == 0)
9717 i.memop1_string = xstrdup (operand_string);
9718 i.mem_operands++;
9719 }
9720 else
9721 {
9722 /* It's not a memory operand; argh! */
9723 as_bad (_("invalid char %s beginning operand %d `%s'"),
9724 output_invalid (*op_string),
9725 this_operand + 1,
9726 op_string);
9727 return 0;
9728 }
9729 return 1; /* Normal return. */
9730 }
9731 \f
9732 /* Calculate the maximum variable size (i.e., excluding fr_fix)
9733 that an rs_machine_dependent frag may reach. */
9734
9735 unsigned int
9736 i386_frag_max_var (fragS *frag)
9737 {
9738 /* The only relaxable frags are for jumps.
9739 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
9740 gas_assert (frag->fr_type == rs_machine_dependent);
9741 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
9742 }
9743
9744 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9745 static int
9746 elf_symbol_resolved_in_segment_p (symbolS *fr_symbol, offsetT fr_var)
9747 {
9748 /* STT_GNU_IFUNC symbol must go through PLT. */
9749 if ((symbol_get_bfdsym (fr_symbol)->flags
9750 & BSF_GNU_INDIRECT_FUNCTION) != 0)
9751 return 0;
9752
9753 if (!S_IS_EXTERNAL (fr_symbol))
9754 /* Symbol may be weak or local. */
9755 return !S_IS_WEAK (fr_symbol);
9756
9757 /* Global symbols with non-default visibility can't be preempted. */
9758 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol)) != STV_DEFAULT)
9759 return 1;
9760
9761 if (fr_var != NO_RELOC)
9762 switch ((enum bfd_reloc_code_real) fr_var)
9763 {
9764 case BFD_RELOC_386_PLT32:
9765 case BFD_RELOC_X86_64_PLT32:
9766 /* Symbol with PLT relocation may be preempted. */
9767 return 0;
9768 default:
9769 abort ();
9770 }
9771
9772 /* Global symbols with default visibility in a shared library may be
9773 preempted by another definition. */
9774 return !shared;
9775 }
9776 #endif
9777
9778 /* md_estimate_size_before_relax()
9779
9780 Called just before relax() for rs_machine_dependent frags. The x86
9781 assembler uses these frags to handle variable size jump
9782 instructions.
9783
9784 Any symbol that is now undefined will not become defined.
9785 Return the correct fr_subtype in the frag.
9786 Return the initial "guess for variable size of frag" to caller.
9787 The guess is actually the growth beyond the fixed part. Whatever
9788 we do to grow the fixed or variable part contributes to our
9789 returned value. */
9790
9791 int
9792 md_estimate_size_before_relax (fragS *fragP, segT segment)
9793 {
9794 /* We've already got fragP->fr_subtype right; all we have to do is
9795 check for un-relaxable symbols. On an ELF system, we can't relax
9796 an externally visible symbol, because it may be overridden by a
9797 shared library. */
9798 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
9799 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9800 || (IS_ELF
9801 && !elf_symbol_resolved_in_segment_p (fragP->fr_symbol,
9802 fragP->fr_var))
9803 #endif
9804 #if defined (OBJ_COFF) && defined (TE_PE)
9805 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
9806 && S_IS_WEAK (fragP->fr_symbol))
9807 #endif
9808 )
9809 {
9810 /* Symbol is undefined in this segment, or we need to keep a
9811 reloc so that weak symbols can be overridden. */
9812 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
9813 enum bfd_reloc_code_real reloc_type;
9814 unsigned char *opcode;
9815 int old_fr_fix;
9816
9817 if (fragP->fr_var != NO_RELOC)
9818 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
9819 else if (size == 2)
9820 reloc_type = BFD_RELOC_16_PCREL;
9821 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9822 else if (need_plt32_p (fragP->fr_symbol))
9823 reloc_type = BFD_RELOC_X86_64_PLT32;
9824 #endif
9825 else
9826 reloc_type = BFD_RELOC_32_PCREL;
9827
9828 old_fr_fix = fragP->fr_fix;
9829 opcode = (unsigned char *) fragP->fr_opcode;
9830
9831 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
9832 {
9833 case UNCOND_JUMP:
9834 /* Make jmp (0xeb) a (d)word displacement jump. */
9835 opcode[0] = 0xe9;
9836 fragP->fr_fix += size;
9837 fix_new (fragP, old_fr_fix, size,
9838 fragP->fr_symbol,
9839 fragP->fr_offset, 1,
9840 reloc_type);
9841 break;
9842
9843 case COND_JUMP86:
9844 if (size == 2
9845 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
9846 {
9847 /* Negate the condition, and branch past an
9848 unconditional jump. */
9849 opcode[0] ^= 1;
9850 opcode[1] = 3;
9851 /* Insert an unconditional jump. */
9852 opcode[2] = 0xe9;
9853 /* We added two extra opcode bytes, and have a two byte
9854 offset. */
9855 fragP->fr_fix += 2 + 2;
9856 fix_new (fragP, old_fr_fix + 2, 2,
9857 fragP->fr_symbol,
9858 fragP->fr_offset, 1,
9859 reloc_type);
9860 break;
9861 }
9862 /* Fall through. */
9863
9864 case COND_JUMP:
9865 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
9866 {
9867 fixS *fixP;
9868
9869 fragP->fr_fix += 1;
9870 fixP = fix_new (fragP, old_fr_fix, 1,
9871 fragP->fr_symbol,
9872 fragP->fr_offset, 1,
9873 BFD_RELOC_8_PCREL);
9874 fixP->fx_signed = 1;
9875 break;
9876 }
9877
9878 /* This changes the byte-displacement jump 0x7N
9879 to the (d)word-displacement jump 0x0f,0x8N. */
9880 opcode[1] = opcode[0] + 0x10;
9881 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9882 /* We've added an opcode byte. */
9883 fragP->fr_fix += 1 + size;
9884 fix_new (fragP, old_fr_fix + 1, size,
9885 fragP->fr_symbol,
9886 fragP->fr_offset, 1,
9887 reloc_type);
9888 break;
9889
9890 default:
9891 BAD_CASE (fragP->fr_subtype);
9892 break;
9893 }
9894 frag_wane (fragP);
9895 return fragP->fr_fix - old_fr_fix;
9896 }
9897
9898 /* Guess size depending on current relax state. Initially the relax
9899 state will correspond to a short jump and we return 1, because
9900 the variable part of the frag (the branch offset) is one byte
9901 long. However, we can relax a section more than once and in that
9902 case we must either set fr_subtype back to the unrelaxed state,
9903 or return the value for the appropriate branch. */
9904 return md_relax_table[fragP->fr_subtype].rlx_length;
9905 }
9906
9907 /* Called after relax() is finished.
9908
9909 In: Address of frag.
9910 fr_type == rs_machine_dependent.
9911 fr_subtype is what the address relaxed to.
9912
9913 Out: Any fixSs and constants are set up.
9914 Caller will turn frag into a ".space 0". */
9915
9916 void
9917 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
9918 fragS *fragP)
9919 {
9920 unsigned char *opcode;
9921 unsigned char *where_to_put_displacement = NULL;
9922 offsetT target_address;
9923 offsetT opcode_address;
9924 unsigned int extension = 0;
9925 offsetT displacement_from_opcode_start;
9926
9927 opcode = (unsigned char *) fragP->fr_opcode;
9928
9929 /* Address we want to reach in file space. */
9930 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
9931
9932 /* Address opcode resides at in file space. */
9933 opcode_address = fragP->fr_address + fragP->fr_fix;
9934
9935 /* Displacement from opcode start to fill into instruction. */
9936 displacement_from_opcode_start = target_address - opcode_address;
9937
9938 if ((fragP->fr_subtype & BIG) == 0)
9939 {
9940 /* Don't have to change opcode. */
9941 extension = 1; /* 1 opcode + 1 displacement */
9942 where_to_put_displacement = &opcode[1];
9943 }
9944 else
9945 {
9946 if (no_cond_jump_promotion
9947 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
9948 as_warn_where (fragP->fr_file, fragP->fr_line,
9949 _("long jump required"));
9950
9951 switch (fragP->fr_subtype)
9952 {
9953 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
9954 extension = 4; /* 1 opcode + 4 displacement */
9955 opcode[0] = 0xe9;
9956 where_to_put_displacement = &opcode[1];
9957 break;
9958
9959 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
9960 extension = 2; /* 1 opcode + 2 displacement */
9961 opcode[0] = 0xe9;
9962 where_to_put_displacement = &opcode[1];
9963 break;
9964
9965 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
9966 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
9967 extension = 5; /* 2 opcode + 4 displacement */
9968 opcode[1] = opcode[0] + 0x10;
9969 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9970 where_to_put_displacement = &opcode[2];
9971 break;
9972
9973 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
9974 extension = 3; /* 2 opcode + 2 displacement */
9975 opcode[1] = opcode[0] + 0x10;
9976 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9977 where_to_put_displacement = &opcode[2];
9978 break;
9979
9980 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
9981 extension = 4;
9982 opcode[0] ^= 1;
9983 opcode[1] = 3;
9984 opcode[2] = 0xe9;
9985 where_to_put_displacement = &opcode[3];
9986 break;
9987
9988 default:
9989 BAD_CASE (fragP->fr_subtype);
9990 break;
9991 }
9992 }
9993
9994 /* If size if less then four we are sure that the operand fits,
9995 but if it's 4, then it could be that the displacement is larger
9996 then -/+ 2GB. */
9997 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
9998 && object_64bit
9999 && ((addressT) (displacement_from_opcode_start - extension
10000 + ((addressT) 1 << 31))
10001 > (((addressT) 2 << 31) - 1)))
10002 {
10003 as_bad_where (fragP->fr_file, fragP->fr_line,
10004 _("jump target out of range"));
10005 /* Make us emit 0. */
10006 displacement_from_opcode_start = extension;
10007 }
10008 /* Now put displacement after opcode. */
10009 md_number_to_chars ((char *) where_to_put_displacement,
10010 (valueT) (displacement_from_opcode_start - extension),
10011 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
10012 fragP->fr_fix += extension;
10013 }
10014 \f
10015 /* Apply a fixup (fixP) to segment data, once it has been determined
10016 by our caller that we have all the info we need to fix it up.
10017
10018 Parameter valP is the pointer to the value of the bits.
10019
10020 On the 386, immediates, displacements, and data pointers are all in
10021 the same (little-endian) format, so we don't need to care about which
10022 we are handling. */
10023
10024 void
10025 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
10026 {
10027 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
10028 valueT value = *valP;
10029
10030 #if !defined (TE_Mach)
10031 if (fixP->fx_pcrel)
10032 {
10033 switch (fixP->fx_r_type)
10034 {
10035 default:
10036 break;
10037
10038 case BFD_RELOC_64:
10039 fixP->fx_r_type = BFD_RELOC_64_PCREL;
10040 break;
10041 case BFD_RELOC_32:
10042 case BFD_RELOC_X86_64_32S:
10043 fixP->fx_r_type = BFD_RELOC_32_PCREL;
10044 break;
10045 case BFD_RELOC_16:
10046 fixP->fx_r_type = BFD_RELOC_16_PCREL;
10047 break;
10048 case BFD_RELOC_8:
10049 fixP->fx_r_type = BFD_RELOC_8_PCREL;
10050 break;
10051 }
10052 }
10053
10054 if (fixP->fx_addsy != NULL
10055 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
10056 || fixP->fx_r_type == BFD_RELOC_64_PCREL
10057 || fixP->fx_r_type == BFD_RELOC_16_PCREL
10058 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
10059 && !use_rela_relocations)
10060 {
10061 /* This is a hack. There should be a better way to handle this.
10062 This covers for the fact that bfd_install_relocation will
10063 subtract the current location (for partial_inplace, PC relative
10064 relocations); see more below. */
10065 #ifndef OBJ_AOUT
10066 if (IS_ELF
10067 #ifdef TE_PE
10068 || OUTPUT_FLAVOR == bfd_target_coff_flavour
10069 #endif
10070 )
10071 value += fixP->fx_where + fixP->fx_frag->fr_address;
10072 #endif
10073 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10074 if (IS_ELF)
10075 {
10076 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
10077
10078 if ((sym_seg == seg
10079 || (symbol_section_p (fixP->fx_addsy)
10080 && sym_seg != absolute_section))
10081 && !generic_force_reloc (fixP))
10082 {
10083 /* Yes, we add the values in twice. This is because
10084 bfd_install_relocation subtracts them out again. I think
10085 bfd_install_relocation is broken, but I don't dare change
10086 it. FIXME. */
10087 value += fixP->fx_where + fixP->fx_frag->fr_address;
10088 }
10089 }
10090 #endif
10091 #if defined (OBJ_COFF) && defined (TE_PE)
10092 /* For some reason, the PE format does not store a
10093 section address offset for a PC relative symbol. */
10094 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
10095 || S_IS_WEAK (fixP->fx_addsy))
10096 value += md_pcrel_from (fixP);
10097 #endif
10098 }
10099 #if defined (OBJ_COFF) && defined (TE_PE)
10100 if (fixP->fx_addsy != NULL
10101 && S_IS_WEAK (fixP->fx_addsy)
10102 /* PR 16858: Do not modify weak function references. */
10103 && ! fixP->fx_pcrel)
10104 {
10105 #if !defined (TE_PEP)
10106 /* For x86 PE weak function symbols are neither PC-relative
10107 nor do they set S_IS_FUNCTION. So the only reliable way
10108 to detect them is to check the flags of their containing
10109 section. */
10110 if (S_GET_SEGMENT (fixP->fx_addsy) != NULL
10111 && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE)
10112 ;
10113 else
10114 #endif
10115 value -= S_GET_VALUE (fixP->fx_addsy);
10116 }
10117 #endif
10118
10119 /* Fix a few things - the dynamic linker expects certain values here,
10120 and we must not disappoint it. */
10121 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10122 if (IS_ELF && fixP->fx_addsy)
10123 switch (fixP->fx_r_type)
10124 {
10125 case BFD_RELOC_386_PLT32:
10126 case BFD_RELOC_X86_64_PLT32:
10127 /* Make the jump instruction point to the address of the operand. At
10128 runtime we merely add the offset to the actual PLT entry. */
10129 value = -4;
10130 break;
10131
10132 case BFD_RELOC_386_TLS_GD:
10133 case BFD_RELOC_386_TLS_LDM:
10134 case BFD_RELOC_386_TLS_IE_32:
10135 case BFD_RELOC_386_TLS_IE:
10136 case BFD_RELOC_386_TLS_GOTIE:
10137 case BFD_RELOC_386_TLS_GOTDESC:
10138 case BFD_RELOC_X86_64_TLSGD:
10139 case BFD_RELOC_X86_64_TLSLD:
10140 case BFD_RELOC_X86_64_GOTTPOFF:
10141 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10142 value = 0; /* Fully resolved at runtime. No addend. */
10143 /* Fallthrough */
10144 case BFD_RELOC_386_TLS_LE:
10145 case BFD_RELOC_386_TLS_LDO_32:
10146 case BFD_RELOC_386_TLS_LE_32:
10147 case BFD_RELOC_X86_64_DTPOFF32:
10148 case BFD_RELOC_X86_64_DTPOFF64:
10149 case BFD_RELOC_X86_64_TPOFF32:
10150 case BFD_RELOC_X86_64_TPOFF64:
10151 S_SET_THREAD_LOCAL (fixP->fx_addsy);
10152 break;
10153
10154 case BFD_RELOC_386_TLS_DESC_CALL:
10155 case BFD_RELOC_X86_64_TLSDESC_CALL:
10156 value = 0; /* Fully resolved at runtime. No addend. */
10157 S_SET_THREAD_LOCAL (fixP->fx_addsy);
10158 fixP->fx_done = 0;
10159 return;
10160
10161 case BFD_RELOC_VTABLE_INHERIT:
10162 case BFD_RELOC_VTABLE_ENTRY:
10163 fixP->fx_done = 0;
10164 return;
10165
10166 default:
10167 break;
10168 }
10169 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
10170 *valP = value;
10171 #endif /* !defined (TE_Mach) */
10172
10173 /* Are we finished with this relocation now? */
10174 if (fixP->fx_addsy == NULL)
10175 fixP->fx_done = 1;
10176 #if defined (OBJ_COFF) && defined (TE_PE)
10177 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
10178 {
10179 fixP->fx_done = 0;
10180 /* Remember value for tc_gen_reloc. */
10181 fixP->fx_addnumber = value;
10182 /* Clear out the frag for now. */
10183 value = 0;
10184 }
10185 #endif
10186 else if (use_rela_relocations)
10187 {
10188 fixP->fx_no_overflow = 1;
10189 /* Remember value for tc_gen_reloc. */
10190 fixP->fx_addnumber = value;
10191 value = 0;
10192 }
10193
10194 md_number_to_chars (p, value, fixP->fx_size);
10195 }
10196 \f
10197 const char *
10198 md_atof (int type, char *litP, int *sizeP)
10199 {
10200 /* This outputs the LITTLENUMs in REVERSE order;
10201 in accord with the bigendian 386. */
10202 return ieee_md_atof (type, litP, sizeP, FALSE);
10203 }
10204 \f
10205 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
10206
10207 static char *
10208 output_invalid (int c)
10209 {
10210 if (ISPRINT (c))
10211 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
10212 "'%c'", c);
10213 else
10214 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
10215 "(0x%x)", (unsigned char) c);
10216 return output_invalid_buf;
10217 }
10218
10219 /* REG_STRING starts *before* REGISTER_PREFIX. */
10220
10221 static const reg_entry *
10222 parse_real_register (char *reg_string, char **end_op)
10223 {
10224 char *s = reg_string;
10225 char *p;
10226 char reg_name_given[MAX_REG_NAME_SIZE + 1];
10227 const reg_entry *r;
10228
10229 /* Skip possible REGISTER_PREFIX and possible whitespace. */
10230 if (*s == REGISTER_PREFIX)
10231 ++s;
10232
10233 if (is_space_char (*s))
10234 ++s;
10235
10236 p = reg_name_given;
10237 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
10238 {
10239 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
10240 return (const reg_entry *) NULL;
10241 s++;
10242 }
10243
10244 /* For naked regs, make sure that we are not dealing with an identifier.
10245 This prevents confusing an identifier like `eax_var' with register
10246 `eax'. */
10247 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
10248 return (const reg_entry *) NULL;
10249
10250 *end_op = s;
10251
10252 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
10253
10254 /* Handle floating point regs, allowing spaces in the (i) part. */
10255 if (r == i386_regtab /* %st is first entry of table */)
10256 {
10257 if (!cpu_arch_flags.bitfield.cpu8087
10258 && !cpu_arch_flags.bitfield.cpu287
10259 && !cpu_arch_flags.bitfield.cpu387)
10260 return (const reg_entry *) NULL;
10261
10262 if (is_space_char (*s))
10263 ++s;
10264 if (*s == '(')
10265 {
10266 ++s;
10267 if (is_space_char (*s))
10268 ++s;
10269 if (*s >= '0' && *s <= '7')
10270 {
10271 int fpr = *s - '0';
10272 ++s;
10273 if (is_space_char (*s))
10274 ++s;
10275 if (*s == ')')
10276 {
10277 *end_op = s + 1;
10278 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
10279 know (r);
10280 return r + fpr;
10281 }
10282 }
10283 /* We have "%st(" then garbage. */
10284 return (const reg_entry *) NULL;
10285 }
10286 }
10287
10288 if (r == NULL || allow_pseudo_reg)
10289 return r;
10290
10291 if (operand_type_all_zero (&r->reg_type))
10292 return (const reg_entry *) NULL;
10293
10294 if ((r->reg_type.bitfield.dword
10295 || r->reg_type.bitfield.sreg3
10296 || r->reg_type.bitfield.control
10297 || r->reg_type.bitfield.debug
10298 || r->reg_type.bitfield.test)
10299 && !cpu_arch_flags.bitfield.cpui386)
10300 return (const reg_entry *) NULL;
10301
10302 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
10303 return (const reg_entry *) NULL;
10304
10305 if (!cpu_arch_flags.bitfield.cpuavx512f)
10306 {
10307 if (r->reg_type.bitfield.zmmword || r->reg_type.bitfield.regmask)
10308 return (const reg_entry *) NULL;
10309
10310 if (!cpu_arch_flags.bitfield.cpuavx)
10311 {
10312 if (r->reg_type.bitfield.ymmword)
10313 return (const reg_entry *) NULL;
10314
10315 if (!cpu_arch_flags.bitfield.cpusse && r->reg_type.bitfield.xmmword)
10316 return (const reg_entry *) NULL;
10317 }
10318 }
10319
10320 if (r->reg_type.bitfield.regbnd && !cpu_arch_flags.bitfield.cpumpx)
10321 return (const reg_entry *) NULL;
10322
10323 /* Don't allow fake index register unless allow_index_reg isn't 0. */
10324 if (!allow_index_reg
10325 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
10326 return (const reg_entry *) NULL;
10327
10328 /* Upper 16 vector registers are only available with VREX in 64bit
10329 mode, and require EVEX encoding. */
10330 if (r->reg_flags & RegVRex)
10331 {
10332 if (!cpu_arch_flags.bitfield.cpuvrex
10333 || flag_code != CODE_64BIT)
10334 return (const reg_entry *) NULL;
10335
10336 i.vec_encoding = vex_encoding_evex;
10337 }
10338
10339 if (((r->reg_flags & (RegRex64 | RegRex)) || r->reg_type.bitfield.qword)
10340 && (!cpu_arch_flags.bitfield.cpulm || !r->reg_type.bitfield.control)
10341 && flag_code != CODE_64BIT)
10342 return (const reg_entry *) NULL;
10343
10344 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
10345 return (const reg_entry *) NULL;
10346
10347 return r;
10348 }
10349
10350 /* REG_STRING starts *before* REGISTER_PREFIX. */
10351
10352 static const reg_entry *
10353 parse_register (char *reg_string, char **end_op)
10354 {
10355 const reg_entry *r;
10356
10357 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
10358 r = parse_real_register (reg_string, end_op);
10359 else
10360 r = NULL;
10361 if (!r)
10362 {
10363 char *save = input_line_pointer;
10364 char c;
10365 symbolS *symbolP;
10366
10367 input_line_pointer = reg_string;
10368 c = get_symbol_name (&reg_string);
10369 symbolP = symbol_find (reg_string);
10370 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
10371 {
10372 const expressionS *e = symbol_get_value_expression (symbolP);
10373
10374 know (e->X_op == O_register);
10375 know (e->X_add_number >= 0
10376 && (valueT) e->X_add_number < i386_regtab_size);
10377 r = i386_regtab + e->X_add_number;
10378 if ((r->reg_flags & RegVRex))
10379 i.vec_encoding = vex_encoding_evex;
10380 *end_op = input_line_pointer;
10381 }
10382 *input_line_pointer = c;
10383 input_line_pointer = save;
10384 }
10385 return r;
10386 }
10387
10388 int
10389 i386_parse_name (char *name, expressionS *e, char *nextcharP)
10390 {
10391 const reg_entry *r;
10392 char *end = input_line_pointer;
10393
10394 *end = *nextcharP;
10395 r = parse_register (name, &input_line_pointer);
10396 if (r && end <= input_line_pointer)
10397 {
10398 *nextcharP = *input_line_pointer;
10399 *input_line_pointer = 0;
10400 e->X_op = O_register;
10401 e->X_add_number = r - i386_regtab;
10402 return 1;
10403 }
10404 input_line_pointer = end;
10405 *end = 0;
10406 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
10407 }
10408
10409 void
10410 md_operand (expressionS *e)
10411 {
10412 char *end;
10413 const reg_entry *r;
10414
10415 switch (*input_line_pointer)
10416 {
10417 case REGISTER_PREFIX:
10418 r = parse_real_register (input_line_pointer, &end);
10419 if (r)
10420 {
10421 e->X_op = O_register;
10422 e->X_add_number = r - i386_regtab;
10423 input_line_pointer = end;
10424 }
10425 break;
10426
10427 case '[':
10428 gas_assert (intel_syntax);
10429 end = input_line_pointer++;
10430 expression (e);
10431 if (*input_line_pointer == ']')
10432 {
10433 ++input_line_pointer;
10434 e->X_op_symbol = make_expr_symbol (e);
10435 e->X_add_symbol = NULL;
10436 e->X_add_number = 0;
10437 e->X_op = O_index;
10438 }
10439 else
10440 {
10441 e->X_op = O_absent;
10442 input_line_pointer = end;
10443 }
10444 break;
10445 }
10446 }
10447
10448 \f
10449 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10450 const char *md_shortopts = "kVQ:sqnO::";
10451 #else
10452 const char *md_shortopts = "qnO::";
10453 #endif
10454
10455 #define OPTION_32 (OPTION_MD_BASE + 0)
10456 #define OPTION_64 (OPTION_MD_BASE + 1)
10457 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
10458 #define OPTION_MARCH (OPTION_MD_BASE + 3)
10459 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
10460 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
10461 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
10462 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
10463 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
10464 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
10465 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
10466 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
10467 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
10468 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
10469 #define OPTION_X32 (OPTION_MD_BASE + 14)
10470 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
10471 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
10472 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
10473 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
10474 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
10475 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
10476 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
10477 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
10478 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
10479 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
10480
10481 struct option md_longopts[] =
10482 {
10483 {"32", no_argument, NULL, OPTION_32},
10484 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10485 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10486 {"64", no_argument, NULL, OPTION_64},
10487 #endif
10488 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10489 {"x32", no_argument, NULL, OPTION_X32},
10490 {"mshared", no_argument, NULL, OPTION_MSHARED},
10491 #endif
10492 {"divide", no_argument, NULL, OPTION_DIVIDE},
10493 {"march", required_argument, NULL, OPTION_MARCH},
10494 {"mtune", required_argument, NULL, OPTION_MTUNE},
10495 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
10496 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
10497 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
10498 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
10499 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
10500 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
10501 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
10502 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
10503 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
10504 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
10505 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
10506 # if defined (TE_PE) || defined (TE_PEP)
10507 {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ},
10508 #endif
10509 {"momit-lock-prefix", required_argument, NULL, OPTION_MOMIT_LOCK_PREFIX},
10510 {"mfence-as-lock-add", required_argument, NULL, OPTION_MFENCE_AS_LOCK_ADD},
10511 {"mrelax-relocations", required_argument, NULL, OPTION_MRELAX_RELOCATIONS},
10512 {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG},
10513 {"mamd64", no_argument, NULL, OPTION_MAMD64},
10514 {"mintel64", no_argument, NULL, OPTION_MINTEL64},
10515 {NULL, no_argument, NULL, 0}
10516 };
10517 size_t md_longopts_size = sizeof (md_longopts);
10518
10519 int
10520 md_parse_option (int c, const char *arg)
10521 {
10522 unsigned int j;
10523 char *arch, *next, *saved;
10524
10525 switch (c)
10526 {
10527 case 'n':
10528 optimize_align_code = 0;
10529 break;
10530
10531 case 'q':
10532 quiet_warnings = 1;
10533 break;
10534
10535 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10536 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
10537 should be emitted or not. FIXME: Not implemented. */
10538 case 'Q':
10539 break;
10540
10541 /* -V: SVR4 argument to print version ID. */
10542 case 'V':
10543 print_version_id ();
10544 break;
10545
10546 /* -k: Ignore for FreeBSD compatibility. */
10547 case 'k':
10548 break;
10549
10550 case 's':
10551 /* -s: On i386 Solaris, this tells the native assembler to use
10552 .stab instead of .stab.excl. We always use .stab anyhow. */
10553 break;
10554
10555 case OPTION_MSHARED:
10556 shared = 1;
10557 break;
10558 #endif
10559 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10560 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10561 case OPTION_64:
10562 {
10563 const char **list, **l;
10564
10565 list = bfd_target_list ();
10566 for (l = list; *l != NULL; l++)
10567 if (CONST_STRNEQ (*l, "elf64-x86-64")
10568 || strcmp (*l, "coff-x86-64") == 0
10569 || strcmp (*l, "pe-x86-64") == 0
10570 || strcmp (*l, "pei-x86-64") == 0
10571 || strcmp (*l, "mach-o-x86-64") == 0)
10572 {
10573 default_arch = "x86_64";
10574 break;
10575 }
10576 if (*l == NULL)
10577 as_fatal (_("no compiled in support for x86_64"));
10578 free (list);
10579 }
10580 break;
10581 #endif
10582
10583 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10584 case OPTION_X32:
10585 if (IS_ELF)
10586 {
10587 const char **list, **l;
10588
10589 list = bfd_target_list ();
10590 for (l = list; *l != NULL; l++)
10591 if (CONST_STRNEQ (*l, "elf32-x86-64"))
10592 {
10593 default_arch = "x86_64:32";
10594 break;
10595 }
10596 if (*l == NULL)
10597 as_fatal (_("no compiled in support for 32bit x86_64"));
10598 free (list);
10599 }
10600 else
10601 as_fatal (_("32bit x86_64 is only supported for ELF"));
10602 break;
10603 #endif
10604
10605 case OPTION_32:
10606 default_arch = "i386";
10607 break;
10608
10609 case OPTION_DIVIDE:
10610 #ifdef SVR4_COMMENT_CHARS
10611 {
10612 char *n, *t;
10613 const char *s;
10614
10615 n = XNEWVEC (char, strlen (i386_comment_chars) + 1);
10616 t = n;
10617 for (s = i386_comment_chars; *s != '\0'; s++)
10618 if (*s != '/')
10619 *t++ = *s;
10620 *t = '\0';
10621 i386_comment_chars = n;
10622 }
10623 #endif
10624 break;
10625
10626 case OPTION_MARCH:
10627 saved = xstrdup (arg);
10628 arch = saved;
10629 /* Allow -march=+nosse. */
10630 if (*arch == '+')
10631 arch++;
10632 do
10633 {
10634 if (*arch == '.')
10635 as_fatal (_("invalid -march= option: `%s'"), arg);
10636 next = strchr (arch, '+');
10637 if (next)
10638 *next++ = '\0';
10639 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
10640 {
10641 if (strcmp (arch, cpu_arch [j].name) == 0)
10642 {
10643 /* Processor. */
10644 if (! cpu_arch[j].flags.bitfield.cpui386)
10645 continue;
10646
10647 cpu_arch_name = cpu_arch[j].name;
10648 cpu_sub_arch_name = NULL;
10649 cpu_arch_flags = cpu_arch[j].flags;
10650 cpu_arch_isa = cpu_arch[j].type;
10651 cpu_arch_isa_flags = cpu_arch[j].flags;
10652 if (!cpu_arch_tune_set)
10653 {
10654 cpu_arch_tune = cpu_arch_isa;
10655 cpu_arch_tune_flags = cpu_arch_isa_flags;
10656 }
10657 break;
10658 }
10659 else if (*cpu_arch [j].name == '.'
10660 && strcmp (arch, cpu_arch [j].name + 1) == 0)
10661 {
10662 /* ISA extension. */
10663 i386_cpu_flags flags;
10664
10665 flags = cpu_flags_or (cpu_arch_flags,
10666 cpu_arch[j].flags);
10667
10668 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
10669 {
10670 if (cpu_sub_arch_name)
10671 {
10672 char *name = cpu_sub_arch_name;
10673 cpu_sub_arch_name = concat (name,
10674 cpu_arch[j].name,
10675 (const char *) NULL);
10676 free (name);
10677 }
10678 else
10679 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
10680 cpu_arch_flags = flags;
10681 cpu_arch_isa_flags = flags;
10682 }
10683 else
10684 cpu_arch_isa_flags
10685 = cpu_flags_or (cpu_arch_isa_flags,
10686 cpu_arch[j].flags);
10687 break;
10688 }
10689 }
10690
10691 if (j >= ARRAY_SIZE (cpu_arch))
10692 {
10693 /* Disable an ISA extension. */
10694 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
10695 if (strcmp (arch, cpu_noarch [j].name) == 0)
10696 {
10697 i386_cpu_flags flags;
10698
10699 flags = cpu_flags_and_not (cpu_arch_flags,
10700 cpu_noarch[j].flags);
10701 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
10702 {
10703 if (cpu_sub_arch_name)
10704 {
10705 char *name = cpu_sub_arch_name;
10706 cpu_sub_arch_name = concat (arch,
10707 (const char *) NULL);
10708 free (name);
10709 }
10710 else
10711 cpu_sub_arch_name = xstrdup (arch);
10712 cpu_arch_flags = flags;
10713 cpu_arch_isa_flags = flags;
10714 }
10715 break;
10716 }
10717
10718 if (j >= ARRAY_SIZE (cpu_noarch))
10719 j = ARRAY_SIZE (cpu_arch);
10720 }
10721
10722 if (j >= ARRAY_SIZE (cpu_arch))
10723 as_fatal (_("invalid -march= option: `%s'"), arg);
10724
10725 arch = next;
10726 }
10727 while (next != NULL);
10728 free (saved);
10729 break;
10730
10731 case OPTION_MTUNE:
10732 if (*arg == '.')
10733 as_fatal (_("invalid -mtune= option: `%s'"), arg);
10734 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
10735 {
10736 if (strcmp (arg, cpu_arch [j].name) == 0)
10737 {
10738 cpu_arch_tune_set = 1;
10739 cpu_arch_tune = cpu_arch [j].type;
10740 cpu_arch_tune_flags = cpu_arch[j].flags;
10741 break;
10742 }
10743 }
10744 if (j >= ARRAY_SIZE (cpu_arch))
10745 as_fatal (_("invalid -mtune= option: `%s'"), arg);
10746 break;
10747
10748 case OPTION_MMNEMONIC:
10749 if (strcasecmp (arg, "att") == 0)
10750 intel_mnemonic = 0;
10751 else if (strcasecmp (arg, "intel") == 0)
10752 intel_mnemonic = 1;
10753 else
10754 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
10755 break;
10756
10757 case OPTION_MSYNTAX:
10758 if (strcasecmp (arg, "att") == 0)
10759 intel_syntax = 0;
10760 else if (strcasecmp (arg, "intel") == 0)
10761 intel_syntax = 1;
10762 else
10763 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
10764 break;
10765
10766 case OPTION_MINDEX_REG:
10767 allow_index_reg = 1;
10768 break;
10769
10770 case OPTION_MNAKED_REG:
10771 allow_naked_reg = 1;
10772 break;
10773
10774 case OPTION_MSSE2AVX:
10775 sse2avx = 1;
10776 break;
10777
10778 case OPTION_MSSE_CHECK:
10779 if (strcasecmp (arg, "error") == 0)
10780 sse_check = check_error;
10781 else if (strcasecmp (arg, "warning") == 0)
10782 sse_check = check_warning;
10783 else if (strcasecmp (arg, "none") == 0)
10784 sse_check = check_none;
10785 else
10786 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
10787 break;
10788
10789 case OPTION_MOPERAND_CHECK:
10790 if (strcasecmp (arg, "error") == 0)
10791 operand_check = check_error;
10792 else if (strcasecmp (arg, "warning") == 0)
10793 operand_check = check_warning;
10794 else if (strcasecmp (arg, "none") == 0)
10795 operand_check = check_none;
10796 else
10797 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
10798 break;
10799
10800 case OPTION_MAVXSCALAR:
10801 if (strcasecmp (arg, "128") == 0)
10802 avxscalar = vex128;
10803 else if (strcasecmp (arg, "256") == 0)
10804 avxscalar = vex256;
10805 else
10806 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
10807 break;
10808
10809 case OPTION_MADD_BND_PREFIX:
10810 add_bnd_prefix = 1;
10811 break;
10812
10813 case OPTION_MEVEXLIG:
10814 if (strcmp (arg, "128") == 0)
10815 evexlig = evexl128;
10816 else if (strcmp (arg, "256") == 0)
10817 evexlig = evexl256;
10818 else if (strcmp (arg, "512") == 0)
10819 evexlig = evexl512;
10820 else
10821 as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
10822 break;
10823
10824 case OPTION_MEVEXRCIG:
10825 if (strcmp (arg, "rne") == 0)
10826 evexrcig = rne;
10827 else if (strcmp (arg, "rd") == 0)
10828 evexrcig = rd;
10829 else if (strcmp (arg, "ru") == 0)
10830 evexrcig = ru;
10831 else if (strcmp (arg, "rz") == 0)
10832 evexrcig = rz;
10833 else
10834 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg);
10835 break;
10836
10837 case OPTION_MEVEXWIG:
10838 if (strcmp (arg, "0") == 0)
10839 evexwig = evexw0;
10840 else if (strcmp (arg, "1") == 0)
10841 evexwig = evexw1;
10842 else
10843 as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
10844 break;
10845
10846 # if defined (TE_PE) || defined (TE_PEP)
10847 case OPTION_MBIG_OBJ:
10848 use_big_obj = 1;
10849 break;
10850 #endif
10851
10852 case OPTION_MOMIT_LOCK_PREFIX:
10853 if (strcasecmp (arg, "yes") == 0)
10854 omit_lock_prefix = 1;
10855 else if (strcasecmp (arg, "no") == 0)
10856 omit_lock_prefix = 0;
10857 else
10858 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg);
10859 break;
10860
10861 case OPTION_MFENCE_AS_LOCK_ADD:
10862 if (strcasecmp (arg, "yes") == 0)
10863 avoid_fence = 1;
10864 else if (strcasecmp (arg, "no") == 0)
10865 avoid_fence = 0;
10866 else
10867 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg);
10868 break;
10869
10870 case OPTION_MRELAX_RELOCATIONS:
10871 if (strcasecmp (arg, "yes") == 0)
10872 generate_relax_relocations = 1;
10873 else if (strcasecmp (arg, "no") == 0)
10874 generate_relax_relocations = 0;
10875 else
10876 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg);
10877 break;
10878
10879 case OPTION_MAMD64:
10880 intel64 = 0;
10881 break;
10882
10883 case OPTION_MINTEL64:
10884 intel64 = 1;
10885 break;
10886
10887 case 'O':
10888 if (arg == NULL)
10889 {
10890 optimize = 1;
10891 /* Turn off -Os. */
10892 optimize_for_space = 0;
10893 }
10894 else if (*arg == 's')
10895 {
10896 optimize_for_space = 1;
10897 /* Turn on all encoding optimizations. */
10898 optimize = -1;
10899 }
10900 else
10901 {
10902 optimize = atoi (arg);
10903 /* Turn off -Os. */
10904 optimize_for_space = 0;
10905 }
10906 break;
10907
10908 default:
10909 return 0;
10910 }
10911 return 1;
10912 }
10913
10914 #define MESSAGE_TEMPLATE \
10915 " "
10916
10917 static char *
10918 output_message (FILE *stream, char *p, char *message, char *start,
10919 int *left_p, const char *name, int len)
10920 {
10921 int size = sizeof (MESSAGE_TEMPLATE);
10922 int left = *left_p;
10923
10924 /* Reserve 2 spaces for ", " or ",\0" */
10925 left -= len + 2;
10926
10927 /* Check if there is any room. */
10928 if (left >= 0)
10929 {
10930 if (p != start)
10931 {
10932 *p++ = ',';
10933 *p++ = ' ';
10934 }
10935 p = mempcpy (p, name, len);
10936 }
10937 else
10938 {
10939 /* Output the current message now and start a new one. */
10940 *p++ = ',';
10941 *p = '\0';
10942 fprintf (stream, "%s\n", message);
10943 p = start;
10944 left = size - (start - message) - len - 2;
10945
10946 gas_assert (left >= 0);
10947
10948 p = mempcpy (p, name, len);
10949 }
10950
10951 *left_p = left;
10952 return p;
10953 }
10954
10955 static void
10956 show_arch (FILE *stream, int ext, int check)
10957 {
10958 static char message[] = MESSAGE_TEMPLATE;
10959 char *start = message + 27;
10960 char *p;
10961 int size = sizeof (MESSAGE_TEMPLATE);
10962 int left;
10963 const char *name;
10964 int len;
10965 unsigned int j;
10966
10967 p = start;
10968 left = size - (start - message);
10969 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
10970 {
10971 /* Should it be skipped? */
10972 if (cpu_arch [j].skip)
10973 continue;
10974
10975 name = cpu_arch [j].name;
10976 len = cpu_arch [j].len;
10977 if (*name == '.')
10978 {
10979 /* It is an extension. Skip if we aren't asked to show it. */
10980 if (ext)
10981 {
10982 name++;
10983 len--;
10984 }
10985 else
10986 continue;
10987 }
10988 else if (ext)
10989 {
10990 /* It is an processor. Skip if we show only extension. */
10991 continue;
10992 }
10993 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
10994 {
10995 /* It is an impossible processor - skip. */
10996 continue;
10997 }
10998
10999 p = output_message (stream, p, message, start, &left, name, len);
11000 }
11001
11002 /* Display disabled extensions. */
11003 if (ext)
11004 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
11005 {
11006 name = cpu_noarch [j].name;
11007 len = cpu_noarch [j].len;
11008 p = output_message (stream, p, message, start, &left, name,
11009 len);
11010 }
11011
11012 *p = '\0';
11013 fprintf (stream, "%s\n", message);
11014 }
11015
11016 void
11017 md_show_usage (FILE *stream)
11018 {
11019 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11020 fprintf (stream, _("\
11021 -Q ignored\n\
11022 -V print assembler version number\n\
11023 -k ignored\n"));
11024 #endif
11025 fprintf (stream, _("\
11026 -n Do not optimize code alignment\n\
11027 -q quieten some warnings\n"));
11028 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11029 fprintf (stream, _("\
11030 -s ignored\n"));
11031 #endif
11032 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11033 || defined (TE_PE) || defined (TE_PEP))
11034 fprintf (stream, _("\
11035 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
11036 #endif
11037 #ifdef SVR4_COMMENT_CHARS
11038 fprintf (stream, _("\
11039 --divide do not treat `/' as a comment character\n"));
11040 #else
11041 fprintf (stream, _("\
11042 --divide ignored\n"));
11043 #endif
11044 fprintf (stream, _("\
11045 -march=CPU[,+EXTENSION...]\n\
11046 generate code for CPU and EXTENSION, CPU is one of:\n"));
11047 show_arch (stream, 0, 1);
11048 fprintf (stream, _("\
11049 EXTENSION is combination of:\n"));
11050 show_arch (stream, 1, 0);
11051 fprintf (stream, _("\
11052 -mtune=CPU optimize for CPU, CPU is one of:\n"));
11053 show_arch (stream, 0, 0);
11054 fprintf (stream, _("\
11055 -msse2avx encode SSE instructions with VEX prefix\n"));
11056 fprintf (stream, _("\
11057 -msse-check=[none|error|warning]\n\
11058 check SSE instructions\n"));
11059 fprintf (stream, _("\
11060 -moperand-check=[none|error|warning]\n\
11061 check operand combinations for validity\n"));
11062 fprintf (stream, _("\
11063 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
11064 length\n"));
11065 fprintf (stream, _("\
11066 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
11067 length\n"));
11068 fprintf (stream, _("\
11069 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
11070 for EVEX.W bit ignored instructions\n"));
11071 fprintf (stream, _("\
11072 -mevexrcig=[rne|rd|ru|rz]\n\
11073 encode EVEX instructions with specific EVEX.RC value\n\
11074 for SAE-only ignored instructions\n"));
11075 fprintf (stream, _("\
11076 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
11077 fprintf (stream, _("\
11078 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
11079 fprintf (stream, _("\
11080 -mindex-reg support pseudo index registers\n"));
11081 fprintf (stream, _("\
11082 -mnaked-reg don't require `%%' prefix for registers\n"));
11083 fprintf (stream, _("\
11084 -madd-bnd-prefix add BND prefix for all valid branches\n"));
11085 fprintf (stream, _("\
11086 -mshared disable branch optimization for shared code\n"));
11087 # if defined (TE_PE) || defined (TE_PEP)
11088 fprintf (stream, _("\
11089 -mbig-obj generate big object files\n"));
11090 #endif
11091 fprintf (stream, _("\
11092 -momit-lock-prefix=[no|yes]\n\
11093 strip all lock prefixes\n"));
11094 fprintf (stream, _("\
11095 -mfence-as-lock-add=[no|yes]\n\
11096 encode lfence, mfence and sfence as\n\
11097 lock addl $0x0, (%%{re}sp)\n"));
11098 fprintf (stream, _("\
11099 -mrelax-relocations=[no|yes]\n\
11100 generate relax relocations\n"));
11101 fprintf (stream, _("\
11102 -mamd64 accept only AMD64 ISA\n"));
11103 fprintf (stream, _("\
11104 -mintel64 accept only Intel64 ISA\n"));
11105 }
11106
11107 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
11108 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
11109 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
11110
11111 /* Pick the target format to use. */
11112
11113 const char *
11114 i386_target_format (void)
11115 {
11116 if (!strncmp (default_arch, "x86_64", 6))
11117 {
11118 update_code_flag (CODE_64BIT, 1);
11119 if (default_arch[6] == '\0')
11120 x86_elf_abi = X86_64_ABI;
11121 else
11122 x86_elf_abi = X86_64_X32_ABI;
11123 }
11124 else if (!strcmp (default_arch, "i386"))
11125 update_code_flag (CODE_32BIT, 1);
11126 else if (!strcmp (default_arch, "iamcu"))
11127 {
11128 update_code_flag (CODE_32BIT, 1);
11129 if (cpu_arch_isa == PROCESSOR_UNKNOWN)
11130 {
11131 static const i386_cpu_flags iamcu_flags = CPU_IAMCU_FLAGS;
11132 cpu_arch_name = "iamcu";
11133 cpu_sub_arch_name = NULL;
11134 cpu_arch_flags = iamcu_flags;
11135 cpu_arch_isa = PROCESSOR_IAMCU;
11136 cpu_arch_isa_flags = iamcu_flags;
11137 if (!cpu_arch_tune_set)
11138 {
11139 cpu_arch_tune = cpu_arch_isa;
11140 cpu_arch_tune_flags = cpu_arch_isa_flags;
11141 }
11142 }
11143 else if (cpu_arch_isa != PROCESSOR_IAMCU)
11144 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
11145 cpu_arch_name);
11146 }
11147 else
11148 as_fatal (_("unknown architecture"));
11149
11150 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
11151 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
11152 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
11153 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
11154
11155 switch (OUTPUT_FLAVOR)
11156 {
11157 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
11158 case bfd_target_aout_flavour:
11159 return AOUT_TARGET_FORMAT;
11160 #endif
11161 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
11162 # if defined (TE_PE) || defined (TE_PEP)
11163 case bfd_target_coff_flavour:
11164 if (flag_code == CODE_64BIT)
11165 return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
11166 else
11167 return "pe-i386";
11168 # elif defined (TE_GO32)
11169 case bfd_target_coff_flavour:
11170 return "coff-go32";
11171 # else
11172 case bfd_target_coff_flavour:
11173 return "coff-i386";
11174 # endif
11175 #endif
11176 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
11177 case bfd_target_elf_flavour:
11178 {
11179 const char *format;
11180
11181 switch (x86_elf_abi)
11182 {
11183 default:
11184 format = ELF_TARGET_FORMAT;
11185 break;
11186 case X86_64_ABI:
11187 use_rela_relocations = 1;
11188 object_64bit = 1;
11189 format = ELF_TARGET_FORMAT64;
11190 break;
11191 case X86_64_X32_ABI:
11192 use_rela_relocations = 1;
11193 object_64bit = 1;
11194 disallow_64bit_reloc = 1;
11195 format = ELF_TARGET_FORMAT32;
11196 break;
11197 }
11198 if (cpu_arch_isa == PROCESSOR_L1OM)
11199 {
11200 if (x86_elf_abi != X86_64_ABI)
11201 as_fatal (_("Intel L1OM is 64bit only"));
11202 return ELF_TARGET_L1OM_FORMAT;
11203 }
11204 else if (cpu_arch_isa == PROCESSOR_K1OM)
11205 {
11206 if (x86_elf_abi != X86_64_ABI)
11207 as_fatal (_("Intel K1OM is 64bit only"));
11208 return ELF_TARGET_K1OM_FORMAT;
11209 }
11210 else if (cpu_arch_isa == PROCESSOR_IAMCU)
11211 {
11212 if (x86_elf_abi != I386_ABI)
11213 as_fatal (_("Intel MCU is 32bit only"));
11214 return ELF_TARGET_IAMCU_FORMAT;
11215 }
11216 else
11217 return format;
11218 }
11219 #endif
11220 #if defined (OBJ_MACH_O)
11221 case bfd_target_mach_o_flavour:
11222 if (flag_code == CODE_64BIT)
11223 {
11224 use_rela_relocations = 1;
11225 object_64bit = 1;
11226 return "mach-o-x86-64";
11227 }
11228 else
11229 return "mach-o-i386";
11230 #endif
11231 default:
11232 abort ();
11233 return NULL;
11234 }
11235 }
11236
11237 #endif /* OBJ_MAYBE_ more than one */
11238 \f
11239 symbolS *
11240 md_undefined_symbol (char *name)
11241 {
11242 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
11243 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
11244 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
11245 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
11246 {
11247 if (!GOT_symbol)
11248 {
11249 if (symbol_find (name))
11250 as_bad (_("GOT already in symbol table"));
11251 GOT_symbol = symbol_new (name, undefined_section,
11252 (valueT) 0, &zero_address_frag);
11253 };
11254 return GOT_symbol;
11255 }
11256 return 0;
11257 }
11258
11259 /* Round up a section size to the appropriate boundary. */
11260
11261 valueT
11262 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
11263 {
11264 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
11265 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
11266 {
11267 /* For a.out, force the section size to be aligned. If we don't do
11268 this, BFD will align it for us, but it will not write out the
11269 final bytes of the section. This may be a bug in BFD, but it is
11270 easier to fix it here since that is how the other a.out targets
11271 work. */
11272 int align;
11273
11274 align = bfd_get_section_alignment (stdoutput, segment);
11275 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
11276 }
11277 #endif
11278
11279 return size;
11280 }
11281
11282 /* On the i386, PC-relative offsets are relative to the start of the
11283 next instruction. That is, the address of the offset, plus its
11284 size, since the offset is always the last part of the insn. */
11285
11286 long
11287 md_pcrel_from (fixS *fixP)
11288 {
11289 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
11290 }
11291
11292 #ifndef I386COFF
11293
11294 static void
11295 s_bss (int ignore ATTRIBUTE_UNUSED)
11296 {
11297 int temp;
11298
11299 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11300 if (IS_ELF)
11301 obj_elf_section_change_hook ();
11302 #endif
11303 temp = get_absolute_expression ();
11304 subseg_set (bss_section, (subsegT) temp);
11305 demand_empty_rest_of_line ();
11306 }
11307
11308 #endif
11309
11310 void
11311 i386_validate_fix (fixS *fixp)
11312 {
11313 if (fixp->fx_subsy)
11314 {
11315 if (fixp->fx_subsy == GOT_symbol)
11316 {
11317 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
11318 {
11319 if (!object_64bit)
11320 abort ();
11321 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11322 if (fixp->fx_tcbit2)
11323 fixp->fx_r_type = (fixp->fx_tcbit
11324 ? BFD_RELOC_X86_64_REX_GOTPCRELX
11325 : BFD_RELOC_X86_64_GOTPCRELX);
11326 else
11327 #endif
11328 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
11329 }
11330 else
11331 {
11332 if (!object_64bit)
11333 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
11334 else
11335 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
11336 }
11337 fixp->fx_subsy = 0;
11338 }
11339 }
11340 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11341 else if (!object_64bit)
11342 {
11343 if (fixp->fx_r_type == BFD_RELOC_386_GOT32
11344 && fixp->fx_tcbit2)
11345 fixp->fx_r_type = BFD_RELOC_386_GOT32X;
11346 }
11347 #endif
11348 }
11349
11350 arelent *
11351 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
11352 {
11353 arelent *rel;
11354 bfd_reloc_code_real_type code;
11355
11356 switch (fixp->fx_r_type)
11357 {
11358 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11359 case BFD_RELOC_SIZE32:
11360 case BFD_RELOC_SIZE64:
11361 if (S_IS_DEFINED (fixp->fx_addsy)
11362 && !S_IS_EXTERNAL (fixp->fx_addsy))
11363 {
11364 /* Resolve size relocation against local symbol to size of
11365 the symbol plus addend. */
11366 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
11367 if (fixp->fx_r_type == BFD_RELOC_SIZE32
11368 && !fits_in_unsigned_long (value))
11369 as_bad_where (fixp->fx_file, fixp->fx_line,
11370 _("symbol size computation overflow"));
11371 fixp->fx_addsy = NULL;
11372 fixp->fx_subsy = NULL;
11373 md_apply_fix (fixp, (valueT *) &value, NULL);
11374 return NULL;
11375 }
11376 #endif
11377 /* Fall through. */
11378
11379 case BFD_RELOC_X86_64_PLT32:
11380 case BFD_RELOC_X86_64_GOT32:
11381 case BFD_RELOC_X86_64_GOTPCREL:
11382 case BFD_RELOC_X86_64_GOTPCRELX:
11383 case BFD_RELOC_X86_64_REX_GOTPCRELX:
11384 case BFD_RELOC_386_PLT32:
11385 case BFD_RELOC_386_GOT32:
11386 case BFD_RELOC_386_GOT32X:
11387 case BFD_RELOC_386_GOTOFF:
11388 case BFD_RELOC_386_GOTPC:
11389 case BFD_RELOC_386_TLS_GD:
11390 case BFD_RELOC_386_TLS_LDM:
11391 case BFD_RELOC_386_TLS_LDO_32:
11392 case BFD_RELOC_386_TLS_IE_32:
11393 case BFD_RELOC_386_TLS_IE:
11394 case BFD_RELOC_386_TLS_GOTIE:
11395 case BFD_RELOC_386_TLS_LE_32:
11396 case BFD_RELOC_386_TLS_LE:
11397 case BFD_RELOC_386_TLS_GOTDESC:
11398 case BFD_RELOC_386_TLS_DESC_CALL:
11399 case BFD_RELOC_X86_64_TLSGD:
11400 case BFD_RELOC_X86_64_TLSLD:
11401 case BFD_RELOC_X86_64_DTPOFF32:
11402 case BFD_RELOC_X86_64_DTPOFF64:
11403 case BFD_RELOC_X86_64_GOTTPOFF:
11404 case BFD_RELOC_X86_64_TPOFF32:
11405 case BFD_RELOC_X86_64_TPOFF64:
11406 case BFD_RELOC_X86_64_GOTOFF64:
11407 case BFD_RELOC_X86_64_GOTPC32:
11408 case BFD_RELOC_X86_64_GOT64:
11409 case BFD_RELOC_X86_64_GOTPCREL64:
11410 case BFD_RELOC_X86_64_GOTPC64:
11411 case BFD_RELOC_X86_64_GOTPLT64:
11412 case BFD_RELOC_X86_64_PLTOFF64:
11413 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
11414 case BFD_RELOC_X86_64_TLSDESC_CALL:
11415 case BFD_RELOC_RVA:
11416 case BFD_RELOC_VTABLE_ENTRY:
11417 case BFD_RELOC_VTABLE_INHERIT:
11418 #ifdef TE_PE
11419 case BFD_RELOC_32_SECREL:
11420 #endif
11421 code = fixp->fx_r_type;
11422 break;
11423 case BFD_RELOC_X86_64_32S:
11424 if (!fixp->fx_pcrel)
11425 {
11426 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
11427 code = fixp->fx_r_type;
11428 break;
11429 }
11430 /* Fall through. */
11431 default:
11432 if (fixp->fx_pcrel)
11433 {
11434 switch (fixp->fx_size)
11435 {
11436 default:
11437 as_bad_where (fixp->fx_file, fixp->fx_line,
11438 _("can not do %d byte pc-relative relocation"),
11439 fixp->fx_size);
11440 code = BFD_RELOC_32_PCREL;
11441 break;
11442 case 1: code = BFD_RELOC_8_PCREL; break;
11443 case 2: code = BFD_RELOC_16_PCREL; break;
11444 case 4: code = BFD_RELOC_32_PCREL; break;
11445 #ifdef BFD64
11446 case 8: code = BFD_RELOC_64_PCREL; break;
11447 #endif
11448 }
11449 }
11450 else
11451 {
11452 switch (fixp->fx_size)
11453 {
11454 default:
11455 as_bad_where (fixp->fx_file, fixp->fx_line,
11456 _("can not do %d byte relocation"),
11457 fixp->fx_size);
11458 code = BFD_RELOC_32;
11459 break;
11460 case 1: code = BFD_RELOC_8; break;
11461 case 2: code = BFD_RELOC_16; break;
11462 case 4: code = BFD_RELOC_32; break;
11463 #ifdef BFD64
11464 case 8: code = BFD_RELOC_64; break;
11465 #endif
11466 }
11467 }
11468 break;
11469 }
11470
11471 if ((code == BFD_RELOC_32
11472 || code == BFD_RELOC_32_PCREL
11473 || code == BFD_RELOC_X86_64_32S)
11474 && GOT_symbol
11475 && fixp->fx_addsy == GOT_symbol)
11476 {
11477 if (!object_64bit)
11478 code = BFD_RELOC_386_GOTPC;
11479 else
11480 code = BFD_RELOC_X86_64_GOTPC32;
11481 }
11482 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
11483 && GOT_symbol
11484 && fixp->fx_addsy == GOT_symbol)
11485 {
11486 code = BFD_RELOC_X86_64_GOTPC64;
11487 }
11488
11489 rel = XNEW (arelent);
11490 rel->sym_ptr_ptr = XNEW (asymbol *);
11491 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11492
11493 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
11494
11495 if (!use_rela_relocations)
11496 {
11497 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
11498 vtable entry to be used in the relocation's section offset. */
11499 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
11500 rel->address = fixp->fx_offset;
11501 #if defined (OBJ_COFF) && defined (TE_PE)
11502 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
11503 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
11504 else
11505 #endif
11506 rel->addend = 0;
11507 }
11508 /* Use the rela in 64bit mode. */
11509 else
11510 {
11511 if (disallow_64bit_reloc)
11512 switch (code)
11513 {
11514 case BFD_RELOC_X86_64_DTPOFF64:
11515 case BFD_RELOC_X86_64_TPOFF64:
11516 case BFD_RELOC_64_PCREL:
11517 case BFD_RELOC_X86_64_GOTOFF64:
11518 case BFD_RELOC_X86_64_GOT64:
11519 case BFD_RELOC_X86_64_GOTPCREL64:
11520 case BFD_RELOC_X86_64_GOTPC64:
11521 case BFD_RELOC_X86_64_GOTPLT64:
11522 case BFD_RELOC_X86_64_PLTOFF64:
11523 as_bad_where (fixp->fx_file, fixp->fx_line,
11524 _("cannot represent relocation type %s in x32 mode"),
11525 bfd_get_reloc_code_name (code));
11526 break;
11527 default:
11528 break;
11529 }
11530
11531 if (!fixp->fx_pcrel)
11532 rel->addend = fixp->fx_offset;
11533 else
11534 switch (code)
11535 {
11536 case BFD_RELOC_X86_64_PLT32:
11537 case BFD_RELOC_X86_64_GOT32:
11538 case BFD_RELOC_X86_64_GOTPCREL:
11539 case BFD_RELOC_X86_64_GOTPCRELX:
11540 case BFD_RELOC_X86_64_REX_GOTPCRELX:
11541 case BFD_RELOC_X86_64_TLSGD:
11542 case BFD_RELOC_X86_64_TLSLD:
11543 case BFD_RELOC_X86_64_GOTTPOFF:
11544 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
11545 case BFD_RELOC_X86_64_TLSDESC_CALL:
11546 rel->addend = fixp->fx_offset - fixp->fx_size;
11547 break;
11548 default:
11549 rel->addend = (section->vma
11550 - fixp->fx_size
11551 + fixp->fx_addnumber
11552 + md_pcrel_from (fixp));
11553 break;
11554 }
11555 }
11556
11557 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
11558 if (rel->howto == NULL)
11559 {
11560 as_bad_where (fixp->fx_file, fixp->fx_line,
11561 _("cannot represent relocation type %s"),
11562 bfd_get_reloc_code_name (code));
11563 /* Set howto to a garbage value so that we can keep going. */
11564 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
11565 gas_assert (rel->howto != NULL);
11566 }
11567
11568 return rel;
11569 }
11570
11571 #include "tc-i386-intel.c"
11572
11573 void
11574 tc_x86_parse_to_dw2regnum (expressionS *exp)
11575 {
11576 int saved_naked_reg;
11577 char saved_register_dot;
11578
11579 saved_naked_reg = allow_naked_reg;
11580 allow_naked_reg = 1;
11581 saved_register_dot = register_chars['.'];
11582 register_chars['.'] = '.';
11583 allow_pseudo_reg = 1;
11584 expression_and_evaluate (exp);
11585 allow_pseudo_reg = 0;
11586 register_chars['.'] = saved_register_dot;
11587 allow_naked_reg = saved_naked_reg;
11588
11589 if (exp->X_op == O_register && exp->X_add_number >= 0)
11590 {
11591 if ((addressT) exp->X_add_number < i386_regtab_size)
11592 {
11593 exp->X_op = O_constant;
11594 exp->X_add_number = i386_regtab[exp->X_add_number]
11595 .dw2_regnum[flag_code >> 1];
11596 }
11597 else
11598 exp->X_op = O_illegal;
11599 }
11600 }
11601
11602 void
11603 tc_x86_frame_initial_instructions (void)
11604 {
11605 static unsigned int sp_regno[2];
11606
11607 if (!sp_regno[flag_code >> 1])
11608 {
11609 char *saved_input = input_line_pointer;
11610 char sp[][4] = {"esp", "rsp"};
11611 expressionS exp;
11612
11613 input_line_pointer = sp[flag_code >> 1];
11614 tc_x86_parse_to_dw2regnum (&exp);
11615 gas_assert (exp.X_op == O_constant);
11616 sp_regno[flag_code >> 1] = exp.X_add_number;
11617 input_line_pointer = saved_input;
11618 }
11619
11620 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
11621 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
11622 }
11623
11624 int
11625 x86_dwarf2_addr_size (void)
11626 {
11627 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
11628 if (x86_elf_abi == X86_64_X32_ABI)
11629 return 4;
11630 #endif
11631 return bfd_arch_bits_per_address (stdoutput) / 8;
11632 }
11633
11634 int
11635 i386_elf_section_type (const char *str, size_t len)
11636 {
11637 if (flag_code == CODE_64BIT
11638 && len == sizeof ("unwind") - 1
11639 && strncmp (str, "unwind", 6) == 0)
11640 return SHT_X86_64_UNWIND;
11641
11642 return -1;
11643 }
11644
11645 #ifdef TE_SOLARIS
11646 void
11647 i386_solaris_fix_up_eh_frame (segT sec)
11648 {
11649 if (flag_code == CODE_64BIT)
11650 elf_section_type (sec) = SHT_X86_64_UNWIND;
11651 }
11652 #endif
11653
11654 #ifdef TE_PE
11655 void
11656 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
11657 {
11658 expressionS exp;
11659
11660 exp.X_op = O_secrel;
11661 exp.X_add_symbol = symbol;
11662 exp.X_add_number = 0;
11663 emit_expr (&exp, size);
11664 }
11665 #endif
11666
11667 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11668 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
11669
11670 bfd_vma
11671 x86_64_section_letter (int letter, const char **ptr_msg)
11672 {
11673 if (flag_code == CODE_64BIT)
11674 {
11675 if (letter == 'l')
11676 return SHF_X86_64_LARGE;
11677
11678 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
11679 }
11680 else
11681 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
11682 return -1;
11683 }
11684
11685 bfd_vma
11686 x86_64_section_word (char *str, size_t len)
11687 {
11688 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
11689 return SHF_X86_64_LARGE;
11690
11691 return -1;
11692 }
11693
11694 static void
11695 handle_large_common (int small ATTRIBUTE_UNUSED)
11696 {
11697 if (flag_code != CODE_64BIT)
11698 {
11699 s_comm_internal (0, elf_common_parse);
11700 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
11701 }
11702 else
11703 {
11704 static segT lbss_section;
11705 asection *saved_com_section_ptr = elf_com_section_ptr;
11706 asection *saved_bss_section = bss_section;
11707
11708 if (lbss_section == NULL)
11709 {
11710 flagword applicable;
11711 segT seg = now_seg;
11712 subsegT subseg = now_subseg;
11713
11714 /* The .lbss section is for local .largecomm symbols. */
11715 lbss_section = subseg_new (".lbss", 0);
11716 applicable = bfd_applicable_section_flags (stdoutput);
11717 bfd_set_section_flags (stdoutput, lbss_section,
11718 applicable & SEC_ALLOC);
11719 seg_info (lbss_section)->bss = 1;
11720
11721 subseg_set (seg, subseg);
11722 }
11723
11724 elf_com_section_ptr = &_bfd_elf_large_com_section;
11725 bss_section = lbss_section;
11726
11727 s_comm_internal (0, elf_common_parse);
11728
11729 elf_com_section_ptr = saved_com_section_ptr;
11730 bss_section = saved_bss_section;
11731 }
11732 }
11733 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.258596 seconds and 5 git commands to generate.