aff7f9b8f7c0e2c22a74fcc90c687afc299ab530
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2017 Free Software Foundation, Inc.
3
4 This file is part of GAS, the GNU Assembler.
5
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
19 02110-1301, USA. */
20
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
27
28 #include "as.h"
29 #include "safe-ctype.h"
30 #include "subsegs.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
35
36 #ifndef REGISTER_WARNINGS
37 #define REGISTER_WARNINGS 1
38 #endif
39
40 #ifndef INFER_ADDR_PREFIX
41 #define INFER_ADDR_PREFIX 1
42 #endif
43
44 #ifndef DEFAULT_ARCH
45 #define DEFAULT_ARCH "i386"
46 #endif
47
48 #ifndef INLINE
49 #if __GNUC__ >= 2
50 #define INLINE __inline__
51 #else
52 #define INLINE
53 #endif
54 #endif
55
56 /* Prefixes will be emitted in the order defined below.
57 WAIT_PREFIX must be the first prefix since FWAIT is really is an
58 instruction, and so must come before any prefixes.
59 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
60 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
61 #define WAIT_PREFIX 0
62 #define SEG_PREFIX 1
63 #define ADDR_PREFIX 2
64 #define DATA_PREFIX 3
65 #define REP_PREFIX 4
66 #define HLE_PREFIX REP_PREFIX
67 #define BND_PREFIX REP_PREFIX
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 #define ZMMWORD_MNEM_SUFFIX 'z'
87 /* Intel Syntax. Use a non-ascii letter since since it never appears
88 in instructions. */
89 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
90
91 #define END_OF_INSN '\0'
92
93 /*
94 'templates' is for grouping together 'template' structures for opcodes
95 of the same name. This is only used for storing the insns in the grand
96 ole hash table of insns.
97 The templates themselves start at START and range up to (but not including)
98 END.
99 */
100 typedef struct
101 {
102 const insn_template *start;
103 const insn_template *end;
104 }
105 templates;
106
107 /* 386 operand encoding bytes: see 386 book for details of this. */
108 typedef struct
109 {
110 unsigned int regmem; /* codes register or memory operand */
111 unsigned int reg; /* codes register operand (or extended opcode) */
112 unsigned int mode; /* how to interpret regmem & reg */
113 }
114 modrm_byte;
115
116 /* x86-64 extension prefix. */
117 typedef int rex_byte;
118
119 /* 386 opcode byte to code indirect addressing. */
120 typedef struct
121 {
122 unsigned base;
123 unsigned index;
124 unsigned scale;
125 }
126 sib_byte;
127
128 /* x86 arch names, types and features */
129 typedef struct
130 {
131 const char *name; /* arch name */
132 unsigned int len; /* arch string length */
133 enum processor_type type; /* arch type */
134 i386_cpu_flags flags; /* cpu feature flags */
135 unsigned int skip; /* show_arch should skip this. */
136 }
137 arch_entry;
138
139 /* Used to turn off indicated flags. */
140 typedef struct
141 {
142 const char *name; /* arch name */
143 unsigned int len; /* arch string length */
144 i386_cpu_flags flags; /* cpu feature flags */
145 }
146 noarch_entry;
147
148 static void update_code_flag (int, int);
149 static void set_code_flag (int);
150 static void set_16bit_gcc_code_flag (int);
151 static void set_intel_syntax (int);
152 static void set_intel_mnemonic (int);
153 static void set_allow_index_reg (int);
154 static void set_check (int);
155 static void set_cpu_arch (int);
156 #ifdef TE_PE
157 static void pe_directive_secrel (int);
158 #endif
159 static void signed_cons (int);
160 static char *output_invalid (int c);
161 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
162 const char *);
163 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
164 const char *);
165 static int i386_att_operand (char *);
166 static int i386_intel_operand (char *, int);
167 static int i386_intel_simplify (expressionS *);
168 static int i386_intel_parse_name (const char *, expressionS *);
169 static const reg_entry *parse_register (char *, char **);
170 static char *parse_insn (char *, char *);
171 static char *parse_operands (char *, const char *);
172 static void swap_operands (void);
173 static void swap_2_operands (int, int);
174 static void optimize_imm (void);
175 static void optimize_disp (void);
176 static const insn_template *match_template (char);
177 static int check_string (void);
178 static int process_suffix (void);
179 static int check_byte_reg (void);
180 static int check_long_reg (void);
181 static int check_qword_reg (void);
182 static int check_word_reg (void);
183 static int finalize_imm (void);
184 static int process_operands (void);
185 static const seg_entry *build_modrm_byte (void);
186 static void output_insn (void);
187 static void output_imm (fragS *, offsetT);
188 static void output_disp (fragS *, offsetT);
189 #ifndef I386COFF
190 static void s_bss (int);
191 #endif
192 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
193 static void handle_large_common (int small ATTRIBUTE_UNUSED);
194 #endif
195
196 static const char *default_arch = DEFAULT_ARCH;
197
198 /* This struct describes rounding control and SAE in the instruction. */
199 struct RC_Operation
200 {
201 enum rc_type
202 {
203 rne = 0,
204 rd,
205 ru,
206 rz,
207 saeonly
208 } type;
209 int operand;
210 };
211
212 static struct RC_Operation rc_op;
213
214 /* The struct describes masking, applied to OPERAND in the instruction.
215 MASK is a pointer to the corresponding mask register. ZEROING tells
216 whether merging or zeroing mask is used. */
217 struct Mask_Operation
218 {
219 const reg_entry *mask;
220 unsigned int zeroing;
221 /* The operand where this operation is associated. */
222 int operand;
223 };
224
225 static struct Mask_Operation mask_op;
226
227 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
228 broadcast factor. */
229 struct Broadcast_Operation
230 {
231 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
232 int type;
233
234 /* Index of broadcasted operand. */
235 int operand;
236 };
237
238 static struct Broadcast_Operation broadcast_op;
239
240 /* VEX prefix. */
241 typedef struct
242 {
243 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
244 unsigned char bytes[4];
245 unsigned int length;
246 /* Destination or source register specifier. */
247 const reg_entry *register_specifier;
248 } vex_prefix;
249
250 /* 'md_assemble ()' gathers together information and puts it into a
251 i386_insn. */
252
253 union i386_op
254 {
255 expressionS *disps;
256 expressionS *imms;
257 const reg_entry *regs;
258 };
259
260 enum i386_error
261 {
262 operand_size_mismatch,
263 operand_type_mismatch,
264 register_type_mismatch,
265 number_of_operands_mismatch,
266 invalid_instruction_suffix,
267 bad_imm4,
268 old_gcc_only,
269 unsupported_with_intel_mnemonic,
270 unsupported_syntax,
271 unsupported,
272 invalid_vsib_address,
273 invalid_vector_register_set,
274 unsupported_vector_index_register,
275 unsupported_broadcast,
276 broadcast_not_on_src_operand,
277 broadcast_needed,
278 unsupported_masking,
279 mask_not_on_destination,
280 no_default_mask,
281 unsupported_rc_sae,
282 rc_sae_operand_not_last_imm,
283 invalid_register_operand,
284 try_vector_disp8
285 };
286
287 struct _i386_insn
288 {
289 /* TM holds the template for the insn were currently assembling. */
290 insn_template tm;
291
292 /* SUFFIX holds the instruction size suffix for byte, word, dword
293 or qword, if given. */
294 char suffix;
295
296 /* OPERANDS gives the number of given operands. */
297 unsigned int operands;
298
299 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
300 of given register, displacement, memory operands and immediate
301 operands. */
302 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
303
304 /* TYPES [i] is the type (see above #defines) which tells us how to
305 use OP[i] for the corresponding operand. */
306 i386_operand_type types[MAX_OPERANDS];
307
308 /* Displacement expression, immediate expression, or register for each
309 operand. */
310 union i386_op op[MAX_OPERANDS];
311
312 /* Flags for operands. */
313 unsigned int flags[MAX_OPERANDS];
314 #define Operand_PCrel 1
315
316 /* Relocation type for operand */
317 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
318
319 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
320 the base index byte below. */
321 const reg_entry *base_reg;
322 const reg_entry *index_reg;
323 unsigned int log2_scale_factor;
324
325 /* SEG gives the seg_entries of this insn. They are zero unless
326 explicit segment overrides are given. */
327 const seg_entry *seg[2];
328
329 /* Copied first memory operand string, for re-checking. */
330 char *memop1_string;
331
332 /* PREFIX holds all the given prefix opcodes (usually null).
333 PREFIXES is the number of prefix opcodes. */
334 unsigned int prefixes;
335 unsigned char prefix[MAX_PREFIXES];
336
337 /* RM and SIB are the modrm byte and the sib byte where the
338 addressing modes of this insn are encoded. */
339 modrm_byte rm;
340 rex_byte rex;
341 rex_byte vrex;
342 sib_byte sib;
343 vex_prefix vex;
344
345 /* Masking attributes. */
346 struct Mask_Operation *mask;
347
348 /* Rounding control and SAE attributes. */
349 struct RC_Operation *rounding;
350
351 /* Broadcasting attributes. */
352 struct Broadcast_Operation *broadcast;
353
354 /* Compressed disp8*N attribute. */
355 unsigned int memshift;
356
357 /* Prefer load or store in encoding. */
358 enum
359 {
360 dir_encoding_default = 0,
361 dir_encoding_load,
362 dir_encoding_store
363 } dir_encoding;
364
365 /* Prefer 8bit or 32bit displacement in encoding. */
366 enum
367 {
368 disp_encoding_default = 0,
369 disp_encoding_8bit,
370 disp_encoding_32bit
371 } disp_encoding;
372
373 /* How to encode vector instructions. */
374 enum
375 {
376 vex_encoding_default = 0,
377 vex_encoding_vex2,
378 vex_encoding_vex3,
379 vex_encoding_evex
380 } vec_encoding;
381
382 /* REP prefix. */
383 const char *rep_prefix;
384
385 /* HLE prefix. */
386 const char *hle_prefix;
387
388 /* Have BND prefix. */
389 const char *bnd_prefix;
390
391 /* Have NOTRACK prefix. */
392 const char *notrack_prefix;
393
394 /* Error message. */
395 enum i386_error error;
396 };
397
398 typedef struct _i386_insn i386_insn;
399
400 /* Link RC type with corresponding string, that'll be looked for in
401 asm. */
402 struct RC_name
403 {
404 enum rc_type type;
405 const char *name;
406 unsigned int len;
407 };
408
409 static const struct RC_name RC_NamesTable[] =
410 {
411 { rne, STRING_COMMA_LEN ("rn-sae") },
412 { rd, STRING_COMMA_LEN ("rd-sae") },
413 { ru, STRING_COMMA_LEN ("ru-sae") },
414 { rz, STRING_COMMA_LEN ("rz-sae") },
415 { saeonly, STRING_COMMA_LEN ("sae") },
416 };
417
418 /* List of chars besides those in app.c:symbol_chars that can start an
419 operand. Used to prevent the scrubber eating vital white-space. */
420 const char extra_symbol_chars[] = "*%-([{}"
421 #ifdef LEX_AT
422 "@"
423 #endif
424 #ifdef LEX_QM
425 "?"
426 #endif
427 ;
428
429 #if (defined (TE_I386AIX) \
430 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
431 && !defined (TE_GNU) \
432 && !defined (TE_LINUX) \
433 && !defined (TE_NACL) \
434 && !defined (TE_NETWARE) \
435 && !defined (TE_FreeBSD) \
436 && !defined (TE_DragonFly) \
437 && !defined (TE_NetBSD)))
438 /* This array holds the chars that always start a comment. If the
439 pre-processor is disabled, these aren't very useful. The option
440 --divide will remove '/' from this list. */
441 const char *i386_comment_chars = "#/";
442 #define SVR4_COMMENT_CHARS 1
443 #define PREFIX_SEPARATOR '\\'
444
445 #else
446 const char *i386_comment_chars = "#";
447 #define PREFIX_SEPARATOR '/'
448 #endif
449
450 /* This array holds the chars that only start a comment at the beginning of
451 a line. If the line seems to have the form '# 123 filename'
452 .line and .file directives will appear in the pre-processed output.
453 Note that input_file.c hand checks for '#' at the beginning of the
454 first line of the input file. This is because the compiler outputs
455 #NO_APP at the beginning of its output.
456 Also note that comments started like this one will always work if
457 '/' isn't otherwise defined. */
458 const char line_comment_chars[] = "#/";
459
460 const char line_separator_chars[] = ";";
461
462 /* Chars that can be used to separate mant from exp in floating point
463 nums. */
464 const char EXP_CHARS[] = "eE";
465
466 /* Chars that mean this number is a floating point constant
467 As in 0f12.456
468 or 0d1.2345e12. */
469 const char FLT_CHARS[] = "fFdDxX";
470
471 /* Tables for lexical analysis. */
472 static char mnemonic_chars[256];
473 static char register_chars[256];
474 static char operand_chars[256];
475 static char identifier_chars[256];
476 static char digit_chars[256];
477
478 /* Lexical macros. */
479 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
480 #define is_operand_char(x) (operand_chars[(unsigned char) x])
481 #define is_register_char(x) (register_chars[(unsigned char) x])
482 #define is_space_char(x) ((x) == ' ')
483 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
484 #define is_digit_char(x) (digit_chars[(unsigned char) x])
485
486 /* All non-digit non-letter characters that may occur in an operand. */
487 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
488
489 /* md_assemble() always leaves the strings it's passed unaltered. To
490 effect this we maintain a stack of saved characters that we've smashed
491 with '\0's (indicating end of strings for various sub-fields of the
492 assembler instruction). */
493 static char save_stack[32];
494 static char *save_stack_p;
495 #define END_STRING_AND_SAVE(s) \
496 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
497 #define RESTORE_END_STRING(s) \
498 do { *(s) = *--save_stack_p; } while (0)
499
500 /* The instruction we're assembling. */
501 static i386_insn i;
502
503 /* Possible templates for current insn. */
504 static const templates *current_templates;
505
506 /* Per instruction expressionS buffers: max displacements & immediates. */
507 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
508 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
509
510 /* Current operand we are working on. */
511 static int this_operand = -1;
512
513 /* We support four different modes. FLAG_CODE variable is used to distinguish
514 these. */
515
516 enum flag_code {
517 CODE_32BIT,
518 CODE_16BIT,
519 CODE_64BIT };
520
521 static enum flag_code flag_code;
522 static unsigned int object_64bit;
523 static unsigned int disallow_64bit_reloc;
524 static int use_rela_relocations = 0;
525
526 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
527 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
528 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
529
530 /* The ELF ABI to use. */
531 enum x86_elf_abi
532 {
533 I386_ABI,
534 X86_64_ABI,
535 X86_64_X32_ABI
536 };
537
538 static enum x86_elf_abi x86_elf_abi = I386_ABI;
539 #endif
540
541 #if defined (TE_PE) || defined (TE_PEP)
542 /* Use big object file format. */
543 static int use_big_obj = 0;
544 #endif
545
546 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
547 /* 1 if generating code for a shared library. */
548 static int shared = 0;
549 #endif
550
551 /* 1 for intel syntax,
552 0 if att syntax. */
553 static int intel_syntax = 0;
554
555 /* 1 for Intel64 ISA,
556 0 if AMD64 ISA. */
557 static int intel64;
558
559 /* 1 for intel mnemonic,
560 0 if att mnemonic. */
561 static int intel_mnemonic = !SYSV386_COMPAT;
562
563 /* 1 if support old (<= 2.8.1) versions of gcc. */
564 static int old_gcc = OLDGCC_COMPAT;
565
566 /* 1 if pseudo registers are permitted. */
567 static int allow_pseudo_reg = 0;
568
569 /* 1 if register prefix % not required. */
570 static int allow_naked_reg = 0;
571
572 /* 1 if the assembler should add BND prefix for all control-transferring
573 instructions supporting it, even if this prefix wasn't specified
574 explicitly. */
575 static int add_bnd_prefix = 0;
576
577 /* 1 if pseudo index register, eiz/riz, is allowed . */
578 static int allow_index_reg = 0;
579
580 /* 1 if the assembler should ignore LOCK prefix, even if it was
581 specified explicitly. */
582 static int omit_lock_prefix = 0;
583
584 /* 1 if the assembler should encode lfence, mfence, and sfence as
585 "lock addl $0, (%{re}sp)". */
586 static int avoid_fence = 0;
587
588 /* 1 if the assembler should generate relax relocations. */
589
590 static int generate_relax_relocations
591 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS;
592
593 static enum check_kind
594 {
595 check_none = 0,
596 check_warning,
597 check_error
598 }
599 sse_check, operand_check = check_warning;
600
601 /* Register prefix used for error message. */
602 static const char *register_prefix = "%";
603
604 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
605 leave, push, and pop instructions so that gcc has the same stack
606 frame as in 32 bit mode. */
607 static char stackop_size = '\0';
608
609 /* Non-zero to optimize code alignment. */
610 int optimize_align_code = 1;
611
612 /* Non-zero to quieten some warnings. */
613 static int quiet_warnings = 0;
614
615 /* CPU name. */
616 static const char *cpu_arch_name = NULL;
617 static char *cpu_sub_arch_name = NULL;
618
619 /* CPU feature flags. */
620 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
621
622 /* If we have selected a cpu we are generating instructions for. */
623 static int cpu_arch_tune_set = 0;
624
625 /* Cpu we are generating instructions for. */
626 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
627
628 /* CPU feature flags of cpu we are generating instructions for. */
629 static i386_cpu_flags cpu_arch_tune_flags;
630
631 /* CPU instruction set architecture used. */
632 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
633
634 /* CPU feature flags of instruction set architecture used. */
635 i386_cpu_flags cpu_arch_isa_flags;
636
637 /* If set, conditional jumps are not automatically promoted to handle
638 larger than a byte offset. */
639 static unsigned int no_cond_jump_promotion = 0;
640
641 /* Encode SSE instructions with VEX prefix. */
642 static unsigned int sse2avx;
643
644 /* Encode scalar AVX instructions with specific vector length. */
645 static enum
646 {
647 vex128 = 0,
648 vex256
649 } avxscalar;
650
651 /* Encode scalar EVEX LIG instructions with specific vector length. */
652 static enum
653 {
654 evexl128 = 0,
655 evexl256,
656 evexl512
657 } evexlig;
658
659 /* Encode EVEX WIG instructions with specific evex.w. */
660 static enum
661 {
662 evexw0 = 0,
663 evexw1
664 } evexwig;
665
666 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
667 static enum rc_type evexrcig = rne;
668
669 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
670 static symbolS *GOT_symbol;
671
672 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
673 unsigned int x86_dwarf2_return_column;
674
675 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
676 int x86_cie_data_alignment;
677
678 /* Interface to relax_segment.
679 There are 3 major relax states for 386 jump insns because the
680 different types of jumps add different sizes to frags when we're
681 figuring out what sort of jump to choose to reach a given label. */
682
683 /* Types. */
684 #define UNCOND_JUMP 0
685 #define COND_JUMP 1
686 #define COND_JUMP86 2
687
688 /* Sizes. */
689 #define CODE16 1
690 #define SMALL 0
691 #define SMALL16 (SMALL | CODE16)
692 #define BIG 2
693 #define BIG16 (BIG | CODE16)
694
695 #ifndef INLINE
696 #ifdef __GNUC__
697 #define INLINE __inline__
698 #else
699 #define INLINE
700 #endif
701 #endif
702
703 #define ENCODE_RELAX_STATE(type, size) \
704 ((relax_substateT) (((type) << 2) | (size)))
705 #define TYPE_FROM_RELAX_STATE(s) \
706 ((s) >> 2)
707 #define DISP_SIZE_FROM_RELAX_STATE(s) \
708 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
709
710 /* This table is used by relax_frag to promote short jumps to long
711 ones where necessary. SMALL (short) jumps may be promoted to BIG
712 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
713 don't allow a short jump in a 32 bit code segment to be promoted to
714 a 16 bit offset jump because it's slower (requires data size
715 prefix), and doesn't work, unless the destination is in the bottom
716 64k of the code segment (The top 16 bits of eip are zeroed). */
717
718 const relax_typeS md_relax_table[] =
719 {
720 /* The fields are:
721 1) most positive reach of this state,
722 2) most negative reach of this state,
723 3) how many bytes this mode will have in the variable part of the frag
724 4) which index into the table to try if we can't fit into this one. */
725
726 /* UNCOND_JUMP states. */
727 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
728 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
729 /* dword jmp adds 4 bytes to frag:
730 0 extra opcode bytes, 4 displacement bytes. */
731 {0, 0, 4, 0},
732 /* word jmp adds 2 byte2 to frag:
733 0 extra opcode bytes, 2 displacement bytes. */
734 {0, 0, 2, 0},
735
736 /* COND_JUMP states. */
737 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
738 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
739 /* dword conditionals adds 5 bytes to frag:
740 1 extra opcode byte, 4 displacement bytes. */
741 {0, 0, 5, 0},
742 /* word conditionals add 3 bytes to frag:
743 1 extra opcode byte, 2 displacement bytes. */
744 {0, 0, 3, 0},
745
746 /* COND_JUMP86 states. */
747 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
748 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
749 /* dword conditionals adds 5 bytes to frag:
750 1 extra opcode byte, 4 displacement bytes. */
751 {0, 0, 5, 0},
752 /* word conditionals add 4 bytes to frag:
753 1 displacement byte and a 3 byte long branch insn. */
754 {0, 0, 4, 0}
755 };
756
757 static const arch_entry cpu_arch[] =
758 {
759 /* Do not replace the first two entries - i386_target_format()
760 relies on them being there in this order. */
761 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
762 CPU_GENERIC32_FLAGS, 0 },
763 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
764 CPU_GENERIC64_FLAGS, 0 },
765 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
766 CPU_NONE_FLAGS, 0 },
767 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
768 CPU_I186_FLAGS, 0 },
769 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
770 CPU_I286_FLAGS, 0 },
771 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
772 CPU_I386_FLAGS, 0 },
773 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
774 CPU_I486_FLAGS, 0 },
775 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
776 CPU_I586_FLAGS, 0 },
777 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
778 CPU_I686_FLAGS, 0 },
779 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
780 CPU_I586_FLAGS, 0 },
781 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
782 CPU_PENTIUMPRO_FLAGS, 0 },
783 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
784 CPU_P2_FLAGS, 0 },
785 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
786 CPU_P3_FLAGS, 0 },
787 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
788 CPU_P4_FLAGS, 0 },
789 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
790 CPU_CORE_FLAGS, 0 },
791 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
792 CPU_NOCONA_FLAGS, 0 },
793 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
794 CPU_CORE_FLAGS, 1 },
795 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
796 CPU_CORE_FLAGS, 0 },
797 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
798 CPU_CORE2_FLAGS, 1 },
799 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
800 CPU_CORE2_FLAGS, 0 },
801 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
802 CPU_COREI7_FLAGS, 0 },
803 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
804 CPU_L1OM_FLAGS, 0 },
805 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
806 CPU_K1OM_FLAGS, 0 },
807 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU,
808 CPU_IAMCU_FLAGS, 0 },
809 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
810 CPU_K6_FLAGS, 0 },
811 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
812 CPU_K6_2_FLAGS, 0 },
813 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
814 CPU_ATHLON_FLAGS, 0 },
815 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
816 CPU_K8_FLAGS, 1 },
817 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
818 CPU_K8_FLAGS, 0 },
819 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
820 CPU_K8_FLAGS, 0 },
821 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
822 CPU_AMDFAM10_FLAGS, 0 },
823 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
824 CPU_BDVER1_FLAGS, 0 },
825 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
826 CPU_BDVER2_FLAGS, 0 },
827 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
828 CPU_BDVER3_FLAGS, 0 },
829 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
830 CPU_BDVER4_FLAGS, 0 },
831 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER,
832 CPU_ZNVER1_FLAGS, 0 },
833 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
834 CPU_BTVER1_FLAGS, 0 },
835 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
836 CPU_BTVER2_FLAGS, 0 },
837 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
838 CPU_8087_FLAGS, 0 },
839 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
840 CPU_287_FLAGS, 0 },
841 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
842 CPU_387_FLAGS, 0 },
843 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN,
844 CPU_687_FLAGS, 0 },
845 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
846 CPU_MMX_FLAGS, 0 },
847 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
848 CPU_SSE_FLAGS, 0 },
849 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
850 CPU_SSE2_FLAGS, 0 },
851 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
852 CPU_SSE3_FLAGS, 0 },
853 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
854 CPU_SSSE3_FLAGS, 0 },
855 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
856 CPU_SSE4_1_FLAGS, 0 },
857 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
858 CPU_SSE4_2_FLAGS, 0 },
859 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
860 CPU_SSE4_2_FLAGS, 0 },
861 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
862 CPU_AVX_FLAGS, 0 },
863 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
864 CPU_AVX2_FLAGS, 0 },
865 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
866 CPU_AVX512F_FLAGS, 0 },
867 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
868 CPU_AVX512CD_FLAGS, 0 },
869 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
870 CPU_AVX512ER_FLAGS, 0 },
871 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
872 CPU_AVX512PF_FLAGS, 0 },
873 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
874 CPU_AVX512DQ_FLAGS, 0 },
875 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
876 CPU_AVX512BW_FLAGS, 0 },
877 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
878 CPU_AVX512VL_FLAGS, 0 },
879 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
880 CPU_VMX_FLAGS, 0 },
881 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
882 CPU_VMFUNC_FLAGS, 0 },
883 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
884 CPU_SMX_FLAGS, 0 },
885 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
886 CPU_XSAVE_FLAGS, 0 },
887 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
888 CPU_XSAVEOPT_FLAGS, 0 },
889 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
890 CPU_XSAVEC_FLAGS, 0 },
891 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
892 CPU_XSAVES_FLAGS, 0 },
893 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
894 CPU_AES_FLAGS, 0 },
895 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
896 CPU_PCLMUL_FLAGS, 0 },
897 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
898 CPU_PCLMUL_FLAGS, 1 },
899 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
900 CPU_FSGSBASE_FLAGS, 0 },
901 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
902 CPU_RDRND_FLAGS, 0 },
903 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
904 CPU_F16C_FLAGS, 0 },
905 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
906 CPU_BMI2_FLAGS, 0 },
907 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
908 CPU_FMA_FLAGS, 0 },
909 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
910 CPU_FMA4_FLAGS, 0 },
911 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
912 CPU_XOP_FLAGS, 0 },
913 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
914 CPU_LWP_FLAGS, 0 },
915 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
916 CPU_MOVBE_FLAGS, 0 },
917 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
918 CPU_CX16_FLAGS, 0 },
919 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
920 CPU_EPT_FLAGS, 0 },
921 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
922 CPU_LZCNT_FLAGS, 0 },
923 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
924 CPU_HLE_FLAGS, 0 },
925 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
926 CPU_RTM_FLAGS, 0 },
927 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
928 CPU_INVPCID_FLAGS, 0 },
929 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
930 CPU_CLFLUSH_FLAGS, 0 },
931 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
932 CPU_NOP_FLAGS, 0 },
933 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
934 CPU_SYSCALL_FLAGS, 0 },
935 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
936 CPU_RDTSCP_FLAGS, 0 },
937 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
938 CPU_3DNOW_FLAGS, 0 },
939 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
940 CPU_3DNOWA_FLAGS, 0 },
941 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
942 CPU_PADLOCK_FLAGS, 0 },
943 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
944 CPU_SVME_FLAGS, 1 },
945 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
946 CPU_SVME_FLAGS, 0 },
947 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
948 CPU_SSE4A_FLAGS, 0 },
949 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
950 CPU_ABM_FLAGS, 0 },
951 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
952 CPU_BMI_FLAGS, 0 },
953 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
954 CPU_TBM_FLAGS, 0 },
955 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
956 CPU_ADX_FLAGS, 0 },
957 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
958 CPU_RDSEED_FLAGS, 0 },
959 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
960 CPU_PRFCHW_FLAGS, 0 },
961 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
962 CPU_SMAP_FLAGS, 0 },
963 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
964 CPU_MPX_FLAGS, 0 },
965 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
966 CPU_SHA_FLAGS, 0 },
967 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
968 CPU_CLFLUSHOPT_FLAGS, 0 },
969 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
970 CPU_PREFETCHWT1_FLAGS, 0 },
971 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
972 CPU_SE1_FLAGS, 0 },
973 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN,
974 CPU_CLWB_FLAGS, 0 },
975 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN,
976 CPU_AVX512IFMA_FLAGS, 0 },
977 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN,
978 CPU_AVX512VBMI_FLAGS, 0 },
979 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN,
980 CPU_AVX512_4FMAPS_FLAGS, 0 },
981 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN,
982 CPU_AVX512_4VNNIW_FLAGS, 0 },
983 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN,
984 CPU_AVX512_VPOPCNTDQ_FLAGS, 0 },
985 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN,
986 CPU_AVX512_VBMI2_FLAGS, 0 },
987 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN,
988 CPU_CLZERO_FLAGS, 0 },
989 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN,
990 CPU_MWAITX_FLAGS, 0 },
991 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN,
992 CPU_OSPKE_FLAGS, 0 },
993 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN,
994 CPU_RDPID_FLAGS, 0 },
995 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN,
996 CPU_PTWRITE_FLAGS, 0 },
997 { STRING_COMMA_LEN (".cet"), PROCESSOR_UNKNOWN,
998 CPU_CET_FLAGS, 0 },
999 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN,
1000 CPU_GFNI_FLAGS, 0 },
1001 };
1002
1003 static const noarch_entry cpu_noarch[] =
1004 {
1005 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS },
1006 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS },
1007 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS },
1008 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS },
1009 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS },
1010 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS },
1011 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS },
1012 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS },
1013 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS },
1014 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS },
1015 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS },
1016 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS },
1017 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS },
1018 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS },
1019 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS },
1020 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS },
1021 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS },
1022 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS },
1023 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS },
1024 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS },
1025 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS },
1026 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS },
1027 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS },
1028 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS },
1029 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS },
1030 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS },
1031 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS },
1032 };
1033
1034 #ifdef I386COFF
1035 /* Like s_lcomm_internal in gas/read.c but the alignment string
1036 is allowed to be optional. */
1037
1038 static symbolS *
1039 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
1040 {
1041 addressT align = 0;
1042
1043 SKIP_WHITESPACE ();
1044
1045 if (needs_align
1046 && *input_line_pointer == ',')
1047 {
1048 align = parse_align (needs_align - 1);
1049
1050 if (align == (addressT) -1)
1051 return NULL;
1052 }
1053 else
1054 {
1055 if (size >= 8)
1056 align = 3;
1057 else if (size >= 4)
1058 align = 2;
1059 else if (size >= 2)
1060 align = 1;
1061 else
1062 align = 0;
1063 }
1064
1065 bss_alloc (symbolP, size, align);
1066 return symbolP;
1067 }
1068
1069 static void
1070 pe_lcomm (int needs_align)
1071 {
1072 s_comm_internal (needs_align * 2, pe_lcomm_internal);
1073 }
1074 #endif
1075
1076 const pseudo_typeS md_pseudo_table[] =
1077 {
1078 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1079 {"align", s_align_bytes, 0},
1080 #else
1081 {"align", s_align_ptwo, 0},
1082 #endif
1083 {"arch", set_cpu_arch, 0},
1084 #ifndef I386COFF
1085 {"bss", s_bss, 0},
1086 #else
1087 {"lcomm", pe_lcomm, 1},
1088 #endif
1089 {"ffloat", float_cons, 'f'},
1090 {"dfloat", float_cons, 'd'},
1091 {"tfloat", float_cons, 'x'},
1092 {"value", cons, 2},
1093 {"slong", signed_cons, 4},
1094 {"noopt", s_ignore, 0},
1095 {"optim", s_ignore, 0},
1096 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
1097 {"code16", set_code_flag, CODE_16BIT},
1098 {"code32", set_code_flag, CODE_32BIT},
1099 {"code64", set_code_flag, CODE_64BIT},
1100 {"intel_syntax", set_intel_syntax, 1},
1101 {"att_syntax", set_intel_syntax, 0},
1102 {"intel_mnemonic", set_intel_mnemonic, 1},
1103 {"att_mnemonic", set_intel_mnemonic, 0},
1104 {"allow_index_reg", set_allow_index_reg, 1},
1105 {"disallow_index_reg", set_allow_index_reg, 0},
1106 {"sse_check", set_check, 0},
1107 {"operand_check", set_check, 1},
1108 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1109 {"largecomm", handle_large_common, 0},
1110 #else
1111 {"file", (void (*) (int)) dwarf2_directive_file, 0},
1112 {"loc", dwarf2_directive_loc, 0},
1113 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
1114 #endif
1115 #ifdef TE_PE
1116 {"secrel32", pe_directive_secrel, 0},
1117 #endif
1118 {0, 0, 0}
1119 };
1120
1121 /* For interface with expression (). */
1122 extern char *input_line_pointer;
1123
1124 /* Hash table for instruction mnemonic lookup. */
1125 static struct hash_control *op_hash;
1126
1127 /* Hash table for register lookup. */
1128 static struct hash_control *reg_hash;
1129 \f
1130 void
1131 i386_align_code (fragS *fragP, int count)
1132 {
1133 /* Various efficient no-op patterns for aligning code labels.
1134 Note: Don't try to assemble the instructions in the comments.
1135 0L and 0w are not legal. */
1136 static const unsigned char f32_1[] =
1137 {0x90}; /* nop */
1138 static const unsigned char f32_2[] =
1139 {0x66,0x90}; /* xchg %ax,%ax */
1140 static const unsigned char f32_3[] =
1141 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1142 static const unsigned char f32_4[] =
1143 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1144 static const unsigned char f32_5[] =
1145 {0x90, /* nop */
1146 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1147 static const unsigned char f32_6[] =
1148 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1149 static const unsigned char f32_7[] =
1150 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1151 static const unsigned char f32_8[] =
1152 {0x90, /* nop */
1153 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1154 static const unsigned char f32_9[] =
1155 {0x89,0xf6, /* movl %esi,%esi */
1156 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1157 static const unsigned char f32_10[] =
1158 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1159 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1160 static const unsigned char f32_11[] =
1161 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1162 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1163 static const unsigned char f32_12[] =
1164 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1165 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1166 static const unsigned char f32_13[] =
1167 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1168 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1169 static const unsigned char f32_14[] =
1170 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1171 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1172 static const unsigned char f16_3[] =
1173 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1174 static const unsigned char f16_4[] =
1175 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1176 static const unsigned char f16_5[] =
1177 {0x90, /* nop */
1178 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1179 static const unsigned char f16_6[] =
1180 {0x89,0xf6, /* mov %si,%si */
1181 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1182 static const unsigned char f16_7[] =
1183 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1184 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1185 static const unsigned char f16_8[] =
1186 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1187 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1188 static const unsigned char jump_31[] =
1189 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1190 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1191 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1192 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1193 static const unsigned char *const f32_patt[] = {
1194 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
1195 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
1196 };
1197 static const unsigned char *const f16_patt[] = {
1198 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
1199 };
1200 /* nopl (%[re]ax) */
1201 static const unsigned char alt_3[] =
1202 {0x0f,0x1f,0x00};
1203 /* nopl 0(%[re]ax) */
1204 static const unsigned char alt_4[] =
1205 {0x0f,0x1f,0x40,0x00};
1206 /* nopl 0(%[re]ax,%[re]ax,1) */
1207 static const unsigned char alt_5[] =
1208 {0x0f,0x1f,0x44,0x00,0x00};
1209 /* nopw 0(%[re]ax,%[re]ax,1) */
1210 static const unsigned char alt_6[] =
1211 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1212 /* nopl 0L(%[re]ax) */
1213 static const unsigned char alt_7[] =
1214 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1215 /* nopl 0L(%[re]ax,%[re]ax,1) */
1216 static const unsigned char alt_8[] =
1217 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1218 /* nopw 0L(%[re]ax,%[re]ax,1) */
1219 static const unsigned char alt_9[] =
1220 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1221 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1222 static const unsigned char alt_10[] =
1223 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1224 static const unsigned char *const alt_patt[] = {
1225 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1226 alt_9, alt_10
1227 };
1228
1229 /* Only align for at least a positive non-zero boundary. */
1230 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1231 return;
1232
1233 /* We need to decide which NOP sequence to use for 32bit and
1234 64bit. When -mtune= is used:
1235
1236 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1237 PROCESSOR_GENERIC32, f32_patt will be used.
1238 2. For the rest, alt_patt will be used.
1239
1240 When -mtune= isn't used, alt_patt will be used if
1241 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1242 be used.
1243
1244 When -march= or .arch is used, we can't use anything beyond
1245 cpu_arch_isa_flags. */
1246
1247 if (flag_code == CODE_16BIT)
1248 {
1249 if (count > 8)
1250 {
1251 memcpy (fragP->fr_literal + fragP->fr_fix,
1252 jump_31, count);
1253 /* Adjust jump offset. */
1254 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1255 }
1256 else
1257 memcpy (fragP->fr_literal + fragP->fr_fix,
1258 f16_patt[count - 1], count);
1259 }
1260 else
1261 {
1262 const unsigned char *const *patt = NULL;
1263
1264 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1265 {
1266 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1267 switch (cpu_arch_tune)
1268 {
1269 case PROCESSOR_UNKNOWN:
1270 /* We use cpu_arch_isa_flags to check if we SHOULD
1271 optimize with nops. */
1272 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1273 patt = alt_patt;
1274 else
1275 patt = f32_patt;
1276 break;
1277 case PROCESSOR_PENTIUM4:
1278 case PROCESSOR_NOCONA:
1279 case PROCESSOR_CORE:
1280 case PROCESSOR_CORE2:
1281 case PROCESSOR_COREI7:
1282 case PROCESSOR_L1OM:
1283 case PROCESSOR_K1OM:
1284 case PROCESSOR_GENERIC64:
1285 case PROCESSOR_K6:
1286 case PROCESSOR_ATHLON:
1287 case PROCESSOR_K8:
1288 case PROCESSOR_AMDFAM10:
1289 case PROCESSOR_BD:
1290 case PROCESSOR_ZNVER:
1291 case PROCESSOR_BT:
1292 patt = alt_patt;
1293 break;
1294 case PROCESSOR_I386:
1295 case PROCESSOR_I486:
1296 case PROCESSOR_PENTIUM:
1297 case PROCESSOR_PENTIUMPRO:
1298 case PROCESSOR_IAMCU:
1299 case PROCESSOR_GENERIC32:
1300 patt = f32_patt;
1301 break;
1302 }
1303 }
1304 else
1305 {
1306 switch (fragP->tc_frag_data.tune)
1307 {
1308 case PROCESSOR_UNKNOWN:
1309 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1310 PROCESSOR_UNKNOWN. */
1311 abort ();
1312 break;
1313
1314 case PROCESSOR_I386:
1315 case PROCESSOR_I486:
1316 case PROCESSOR_PENTIUM:
1317 case PROCESSOR_IAMCU:
1318 case PROCESSOR_K6:
1319 case PROCESSOR_ATHLON:
1320 case PROCESSOR_K8:
1321 case PROCESSOR_AMDFAM10:
1322 case PROCESSOR_BD:
1323 case PROCESSOR_ZNVER:
1324 case PROCESSOR_BT:
1325 case PROCESSOR_GENERIC32:
1326 /* We use cpu_arch_isa_flags to check if we CAN optimize
1327 with nops. */
1328 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1329 patt = alt_patt;
1330 else
1331 patt = f32_patt;
1332 break;
1333 case PROCESSOR_PENTIUMPRO:
1334 case PROCESSOR_PENTIUM4:
1335 case PROCESSOR_NOCONA:
1336 case PROCESSOR_CORE:
1337 case PROCESSOR_CORE2:
1338 case PROCESSOR_COREI7:
1339 case PROCESSOR_L1OM:
1340 case PROCESSOR_K1OM:
1341 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1342 patt = alt_patt;
1343 else
1344 patt = f32_patt;
1345 break;
1346 case PROCESSOR_GENERIC64:
1347 patt = alt_patt;
1348 break;
1349 }
1350 }
1351
1352 if (patt == f32_patt)
1353 {
1354 /* If the padding is less than 15 bytes, we use the normal
1355 ones. Otherwise, we use a jump instruction and adjust
1356 its offset. */
1357 int limit;
1358
1359 /* For 64bit, the limit is 3 bytes. */
1360 if (flag_code == CODE_64BIT
1361 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1362 limit = 3;
1363 else
1364 limit = 15;
1365 if (count < limit)
1366 memcpy (fragP->fr_literal + fragP->fr_fix,
1367 patt[count - 1], count);
1368 else
1369 {
1370 memcpy (fragP->fr_literal + fragP->fr_fix,
1371 jump_31, count);
1372 /* Adjust jump offset. */
1373 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1374 }
1375 }
1376 else
1377 {
1378 /* Maximum length of an instruction is 10 byte. If the
1379 padding is greater than 10 bytes and we don't use jump,
1380 we have to break it into smaller pieces. */
1381 int padding = count;
1382 while (padding > 10)
1383 {
1384 padding -= 10;
1385 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1386 patt [9], 10);
1387 }
1388
1389 if (padding)
1390 memcpy (fragP->fr_literal + fragP->fr_fix,
1391 patt [padding - 1], padding);
1392 }
1393 }
1394 fragP->fr_var = count;
1395 }
1396
1397 static INLINE int
1398 operand_type_all_zero (const union i386_operand_type *x)
1399 {
1400 switch (ARRAY_SIZE(x->array))
1401 {
1402 case 3:
1403 if (x->array[2])
1404 return 0;
1405 /* Fall through. */
1406 case 2:
1407 if (x->array[1])
1408 return 0;
1409 /* Fall through. */
1410 case 1:
1411 return !x->array[0];
1412 default:
1413 abort ();
1414 }
1415 }
1416
1417 static INLINE void
1418 operand_type_set (union i386_operand_type *x, unsigned int v)
1419 {
1420 switch (ARRAY_SIZE(x->array))
1421 {
1422 case 3:
1423 x->array[2] = v;
1424 /* Fall through. */
1425 case 2:
1426 x->array[1] = v;
1427 /* Fall through. */
1428 case 1:
1429 x->array[0] = v;
1430 /* Fall through. */
1431 break;
1432 default:
1433 abort ();
1434 }
1435 }
1436
1437 static INLINE int
1438 operand_type_equal (const union i386_operand_type *x,
1439 const union i386_operand_type *y)
1440 {
1441 switch (ARRAY_SIZE(x->array))
1442 {
1443 case 3:
1444 if (x->array[2] != y->array[2])
1445 return 0;
1446 /* Fall through. */
1447 case 2:
1448 if (x->array[1] != y->array[1])
1449 return 0;
1450 /* Fall through. */
1451 case 1:
1452 return x->array[0] == y->array[0];
1453 break;
1454 default:
1455 abort ();
1456 }
1457 }
1458
1459 static INLINE int
1460 cpu_flags_all_zero (const union i386_cpu_flags *x)
1461 {
1462 switch (ARRAY_SIZE(x->array))
1463 {
1464 case 4:
1465 if (x->array[3])
1466 return 0;
1467 /* Fall through. */
1468 case 3:
1469 if (x->array[2])
1470 return 0;
1471 /* Fall through. */
1472 case 2:
1473 if (x->array[1])
1474 return 0;
1475 /* Fall through. */
1476 case 1:
1477 return !x->array[0];
1478 default:
1479 abort ();
1480 }
1481 }
1482
1483 static INLINE int
1484 cpu_flags_equal (const union i386_cpu_flags *x,
1485 const union i386_cpu_flags *y)
1486 {
1487 switch (ARRAY_SIZE(x->array))
1488 {
1489 case 4:
1490 if (x->array[3] != y->array[3])
1491 return 0;
1492 /* Fall through. */
1493 case 3:
1494 if (x->array[2] != y->array[2])
1495 return 0;
1496 /* Fall through. */
1497 case 2:
1498 if (x->array[1] != y->array[1])
1499 return 0;
1500 /* Fall through. */
1501 case 1:
1502 return x->array[0] == y->array[0];
1503 break;
1504 default:
1505 abort ();
1506 }
1507 }
1508
1509 static INLINE int
1510 cpu_flags_check_cpu64 (i386_cpu_flags f)
1511 {
1512 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1513 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1514 }
1515
1516 static INLINE i386_cpu_flags
1517 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1518 {
1519 switch (ARRAY_SIZE (x.array))
1520 {
1521 case 4:
1522 x.array [3] &= y.array [3];
1523 /* Fall through. */
1524 case 3:
1525 x.array [2] &= y.array [2];
1526 /* Fall through. */
1527 case 2:
1528 x.array [1] &= y.array [1];
1529 /* Fall through. */
1530 case 1:
1531 x.array [0] &= y.array [0];
1532 break;
1533 default:
1534 abort ();
1535 }
1536 return x;
1537 }
1538
1539 static INLINE i386_cpu_flags
1540 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1541 {
1542 switch (ARRAY_SIZE (x.array))
1543 {
1544 case 4:
1545 x.array [3] |= y.array [3];
1546 /* Fall through. */
1547 case 3:
1548 x.array [2] |= y.array [2];
1549 /* Fall through. */
1550 case 2:
1551 x.array [1] |= y.array [1];
1552 /* Fall through. */
1553 case 1:
1554 x.array [0] |= y.array [0];
1555 break;
1556 default:
1557 abort ();
1558 }
1559 return x;
1560 }
1561
1562 static INLINE i386_cpu_flags
1563 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1564 {
1565 switch (ARRAY_SIZE (x.array))
1566 {
1567 case 4:
1568 x.array [3] &= ~y.array [3];
1569 /* Fall through. */
1570 case 3:
1571 x.array [2] &= ~y.array [2];
1572 /* Fall through. */
1573 case 2:
1574 x.array [1] &= ~y.array [1];
1575 /* Fall through. */
1576 case 1:
1577 x.array [0] &= ~y.array [0];
1578 break;
1579 default:
1580 abort ();
1581 }
1582 return x;
1583 }
1584
1585 #define CPU_FLAGS_ARCH_MATCH 0x1
1586 #define CPU_FLAGS_64BIT_MATCH 0x2
1587 #define CPU_FLAGS_AES_MATCH 0x4
1588 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1589 #define CPU_FLAGS_AVX_MATCH 0x10
1590
1591 #define CPU_FLAGS_32BIT_MATCH \
1592 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1593 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1594 #define CPU_FLAGS_PERFECT_MATCH \
1595 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1596
1597 /* Return CPU flags match bits. */
1598
1599 static int
1600 cpu_flags_match (const insn_template *t)
1601 {
1602 i386_cpu_flags x = t->cpu_flags;
1603 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1604
1605 x.bitfield.cpu64 = 0;
1606 x.bitfield.cpuno64 = 0;
1607
1608 if (cpu_flags_all_zero (&x))
1609 {
1610 /* This instruction is available on all archs. */
1611 match |= CPU_FLAGS_32BIT_MATCH;
1612 }
1613 else
1614 {
1615 /* This instruction is available only on some archs. */
1616 i386_cpu_flags cpu = cpu_arch_flags;
1617
1618 cpu = cpu_flags_and (x, cpu);
1619 if (!cpu_flags_all_zero (&cpu))
1620 {
1621 if (x.bitfield.cpuavx)
1622 {
1623 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1624 if (cpu.bitfield.cpuavx)
1625 {
1626 /* Check SSE2AVX. */
1627 if (!t->opcode_modifier.sse2avx|| sse2avx)
1628 {
1629 match |= (CPU_FLAGS_ARCH_MATCH
1630 | CPU_FLAGS_AVX_MATCH);
1631 /* Check AES. */
1632 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1633 match |= CPU_FLAGS_AES_MATCH;
1634 /* Check PCLMUL. */
1635 if (!x.bitfield.cpupclmul
1636 || cpu.bitfield.cpupclmul)
1637 match |= CPU_FLAGS_PCLMUL_MATCH;
1638 }
1639 }
1640 else
1641 match |= CPU_FLAGS_ARCH_MATCH;
1642 }
1643 else if (x.bitfield.cpuavx512vl)
1644 {
1645 /* Match AVX512VL. */
1646 if (cpu.bitfield.cpuavx512vl)
1647 {
1648 /* Need another match. */
1649 cpu.bitfield.cpuavx512vl = 0;
1650 if (!cpu_flags_all_zero (&cpu))
1651 match |= CPU_FLAGS_32BIT_MATCH;
1652 else
1653 match |= CPU_FLAGS_ARCH_MATCH;
1654 }
1655 else
1656 match |= CPU_FLAGS_ARCH_MATCH;
1657 }
1658 else
1659 match |= CPU_FLAGS_32BIT_MATCH;
1660 }
1661 }
1662 return match;
1663 }
1664
1665 static INLINE i386_operand_type
1666 operand_type_and (i386_operand_type x, i386_operand_type y)
1667 {
1668 switch (ARRAY_SIZE (x.array))
1669 {
1670 case 3:
1671 x.array [2] &= y.array [2];
1672 /* Fall through. */
1673 case 2:
1674 x.array [1] &= y.array [1];
1675 /* Fall through. */
1676 case 1:
1677 x.array [0] &= y.array [0];
1678 break;
1679 default:
1680 abort ();
1681 }
1682 return x;
1683 }
1684
1685 static INLINE i386_operand_type
1686 operand_type_or (i386_operand_type x, i386_operand_type y)
1687 {
1688 switch (ARRAY_SIZE (x.array))
1689 {
1690 case 3:
1691 x.array [2] |= y.array [2];
1692 /* Fall through. */
1693 case 2:
1694 x.array [1] |= y.array [1];
1695 /* Fall through. */
1696 case 1:
1697 x.array [0] |= y.array [0];
1698 break;
1699 default:
1700 abort ();
1701 }
1702 return x;
1703 }
1704
1705 static INLINE i386_operand_type
1706 operand_type_xor (i386_operand_type x, i386_operand_type y)
1707 {
1708 switch (ARRAY_SIZE (x.array))
1709 {
1710 case 3:
1711 x.array [2] ^= y.array [2];
1712 /* Fall through. */
1713 case 2:
1714 x.array [1] ^= y.array [1];
1715 /* Fall through. */
1716 case 1:
1717 x.array [0] ^= y.array [0];
1718 break;
1719 default:
1720 abort ();
1721 }
1722 return x;
1723 }
1724
1725 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1726 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1727 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1728 static const i386_operand_type inoutportreg
1729 = OPERAND_TYPE_INOUTPORTREG;
1730 static const i386_operand_type reg16_inoutportreg
1731 = OPERAND_TYPE_REG16_INOUTPORTREG;
1732 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1733 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1734 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1735 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1736 static const i386_operand_type anydisp
1737 = OPERAND_TYPE_ANYDISP;
1738 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1739 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1740 static const i386_operand_type regzmm = OPERAND_TYPE_REGZMM;
1741 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
1742 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1743 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1744 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1745 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1746 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1747 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1748 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1749 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1750 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1751 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1752
1753 enum operand_type
1754 {
1755 reg,
1756 imm,
1757 disp,
1758 anymem
1759 };
1760
1761 static INLINE int
1762 operand_type_check (i386_operand_type t, enum operand_type c)
1763 {
1764 switch (c)
1765 {
1766 case reg:
1767 return (t.bitfield.reg8
1768 || t.bitfield.reg16
1769 || t.bitfield.reg32
1770 || t.bitfield.reg64);
1771
1772 case imm:
1773 return (t.bitfield.imm8
1774 || t.bitfield.imm8s
1775 || t.bitfield.imm16
1776 || t.bitfield.imm32
1777 || t.bitfield.imm32s
1778 || t.bitfield.imm64);
1779
1780 case disp:
1781 return (t.bitfield.disp8
1782 || t.bitfield.disp16
1783 || t.bitfield.disp32
1784 || t.bitfield.disp32s
1785 || t.bitfield.disp64);
1786
1787 case anymem:
1788 return (t.bitfield.disp8
1789 || t.bitfield.disp16
1790 || t.bitfield.disp32
1791 || t.bitfield.disp32s
1792 || t.bitfield.disp64
1793 || t.bitfield.baseindex);
1794
1795 default:
1796 abort ();
1797 }
1798
1799 return 0;
1800 }
1801
1802 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1803 operand J for instruction template T. */
1804
1805 static INLINE int
1806 match_reg_size (const insn_template *t, unsigned int j)
1807 {
1808 return !((i.types[j].bitfield.byte
1809 && !t->operand_types[j].bitfield.byte)
1810 || (i.types[j].bitfield.word
1811 && !t->operand_types[j].bitfield.word)
1812 || (i.types[j].bitfield.dword
1813 && !t->operand_types[j].bitfield.dword)
1814 || (i.types[j].bitfield.qword
1815 && !t->operand_types[j].bitfield.qword));
1816 }
1817
1818 /* Return 1 if there is no conflict in any size on operand J for
1819 instruction template T. */
1820
1821 static INLINE int
1822 match_mem_size (const insn_template *t, unsigned int j)
1823 {
1824 return (match_reg_size (t, j)
1825 && !((i.types[j].bitfield.unspecified
1826 && !i.broadcast
1827 && !t->operand_types[j].bitfield.unspecified)
1828 || (i.types[j].bitfield.fword
1829 && !t->operand_types[j].bitfield.fword)
1830 || (i.types[j].bitfield.tbyte
1831 && !t->operand_types[j].bitfield.tbyte)
1832 || (i.types[j].bitfield.xmmword
1833 && !t->operand_types[j].bitfield.xmmword)
1834 || (i.types[j].bitfield.ymmword
1835 && !t->operand_types[j].bitfield.ymmword)
1836 || (i.types[j].bitfield.zmmword
1837 && !t->operand_types[j].bitfield.zmmword)));
1838 }
1839
1840 /* Return 1 if there is no size conflict on any operands for
1841 instruction template T. */
1842
1843 static INLINE int
1844 operand_size_match (const insn_template *t)
1845 {
1846 unsigned int j;
1847 int match = 1;
1848
1849 /* Don't check jump instructions. */
1850 if (t->opcode_modifier.jump
1851 || t->opcode_modifier.jumpbyte
1852 || t->opcode_modifier.jumpdword
1853 || t->opcode_modifier.jumpintersegment)
1854 return match;
1855
1856 /* Check memory and accumulator operand size. */
1857 for (j = 0; j < i.operands; j++)
1858 {
1859 if (t->operand_types[j].bitfield.anysize)
1860 continue;
1861
1862 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1863 {
1864 match = 0;
1865 break;
1866 }
1867
1868 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1869 {
1870 match = 0;
1871 break;
1872 }
1873 }
1874
1875 if (match)
1876 return match;
1877 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1878 {
1879 mismatch:
1880 i.error = operand_size_mismatch;
1881 return 0;
1882 }
1883
1884 /* Check reverse. */
1885 gas_assert (i.operands == 2);
1886
1887 match = 1;
1888 for (j = 0; j < 2; j++)
1889 {
1890 if (t->operand_types[j].bitfield.acc
1891 && !match_reg_size (t, j ? 0 : 1))
1892 goto mismatch;
1893
1894 if (i.types[j].bitfield.mem
1895 && !match_mem_size (t, j ? 0 : 1))
1896 goto mismatch;
1897 }
1898
1899 return match;
1900 }
1901
1902 static INLINE int
1903 operand_type_match (i386_operand_type overlap,
1904 i386_operand_type given)
1905 {
1906 i386_operand_type temp = overlap;
1907
1908 temp.bitfield.jumpabsolute = 0;
1909 temp.bitfield.unspecified = 0;
1910 temp.bitfield.byte = 0;
1911 temp.bitfield.word = 0;
1912 temp.bitfield.dword = 0;
1913 temp.bitfield.fword = 0;
1914 temp.bitfield.qword = 0;
1915 temp.bitfield.tbyte = 0;
1916 temp.bitfield.xmmword = 0;
1917 temp.bitfield.ymmword = 0;
1918 temp.bitfield.zmmword = 0;
1919 if (operand_type_all_zero (&temp))
1920 goto mismatch;
1921
1922 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1923 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1924 return 1;
1925
1926 mismatch:
1927 i.error = operand_type_mismatch;
1928 return 0;
1929 }
1930
1931 /* If given types g0 and g1 are registers they must be of the same type
1932 unless the expected operand type register overlap is null.
1933 Note that Acc in a template matches every size of reg. */
1934
1935 static INLINE int
1936 operand_type_register_match (i386_operand_type m0,
1937 i386_operand_type g0,
1938 i386_operand_type t0,
1939 i386_operand_type m1,
1940 i386_operand_type g1,
1941 i386_operand_type t1)
1942 {
1943 if (!operand_type_check (g0, reg))
1944 return 1;
1945
1946 if (!operand_type_check (g1, reg))
1947 return 1;
1948
1949 if (g0.bitfield.reg8 == g1.bitfield.reg8
1950 && g0.bitfield.reg16 == g1.bitfield.reg16
1951 && g0.bitfield.reg32 == g1.bitfield.reg32
1952 && g0.bitfield.reg64 == g1.bitfield.reg64)
1953 return 1;
1954
1955 if (m0.bitfield.acc)
1956 {
1957 t0.bitfield.reg8 = 1;
1958 t0.bitfield.reg16 = 1;
1959 t0.bitfield.reg32 = 1;
1960 t0.bitfield.reg64 = 1;
1961 }
1962
1963 if (m1.bitfield.acc)
1964 {
1965 t1.bitfield.reg8 = 1;
1966 t1.bitfield.reg16 = 1;
1967 t1.bitfield.reg32 = 1;
1968 t1.bitfield.reg64 = 1;
1969 }
1970
1971 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1972 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1973 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1974 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1975 return 1;
1976
1977 i.error = register_type_mismatch;
1978
1979 return 0;
1980 }
1981
1982 static INLINE unsigned int
1983 register_number (const reg_entry *r)
1984 {
1985 unsigned int nr = r->reg_num;
1986
1987 if (r->reg_flags & RegRex)
1988 nr += 8;
1989
1990 if (r->reg_flags & RegVRex)
1991 nr += 16;
1992
1993 return nr;
1994 }
1995
1996 static INLINE unsigned int
1997 mode_from_disp_size (i386_operand_type t)
1998 {
1999 if (t.bitfield.disp8 || t.bitfield.vec_disp8)
2000 return 1;
2001 else if (t.bitfield.disp16
2002 || t.bitfield.disp32
2003 || t.bitfield.disp32s)
2004 return 2;
2005 else
2006 return 0;
2007 }
2008
2009 static INLINE int
2010 fits_in_signed_byte (addressT num)
2011 {
2012 return num + 0x80 <= 0xff;
2013 }
2014
2015 static INLINE int
2016 fits_in_unsigned_byte (addressT num)
2017 {
2018 return num <= 0xff;
2019 }
2020
2021 static INLINE int
2022 fits_in_unsigned_word (addressT num)
2023 {
2024 return num <= 0xffff;
2025 }
2026
2027 static INLINE int
2028 fits_in_signed_word (addressT num)
2029 {
2030 return num + 0x8000 <= 0xffff;
2031 }
2032
2033 static INLINE int
2034 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
2035 {
2036 #ifndef BFD64
2037 return 1;
2038 #else
2039 return num + 0x80000000 <= 0xffffffff;
2040 #endif
2041 } /* fits_in_signed_long() */
2042
2043 static INLINE int
2044 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
2045 {
2046 #ifndef BFD64
2047 return 1;
2048 #else
2049 return num <= 0xffffffff;
2050 #endif
2051 } /* fits_in_unsigned_long() */
2052
2053 static INLINE int
2054 fits_in_vec_disp8 (offsetT num)
2055 {
2056 int shift = i.memshift;
2057 unsigned int mask;
2058
2059 if (shift == -1)
2060 abort ();
2061
2062 mask = (1 << shift) - 1;
2063
2064 /* Return 0 if NUM isn't properly aligned. */
2065 if ((num & mask))
2066 return 0;
2067
2068 /* Check if NUM will fit in 8bit after shift. */
2069 return fits_in_signed_byte (num >> shift);
2070 }
2071
2072 static INLINE int
2073 fits_in_imm4 (offsetT num)
2074 {
2075 return (num & 0xf) == num;
2076 }
2077
2078 static i386_operand_type
2079 smallest_imm_type (offsetT num)
2080 {
2081 i386_operand_type t;
2082
2083 operand_type_set (&t, 0);
2084 t.bitfield.imm64 = 1;
2085
2086 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
2087 {
2088 /* This code is disabled on the 486 because all the Imm1 forms
2089 in the opcode table are slower on the i486. They're the
2090 versions with the implicitly specified single-position
2091 displacement, which has another syntax if you really want to
2092 use that form. */
2093 t.bitfield.imm1 = 1;
2094 t.bitfield.imm8 = 1;
2095 t.bitfield.imm8s = 1;
2096 t.bitfield.imm16 = 1;
2097 t.bitfield.imm32 = 1;
2098 t.bitfield.imm32s = 1;
2099 }
2100 else if (fits_in_signed_byte (num))
2101 {
2102 t.bitfield.imm8 = 1;
2103 t.bitfield.imm8s = 1;
2104 t.bitfield.imm16 = 1;
2105 t.bitfield.imm32 = 1;
2106 t.bitfield.imm32s = 1;
2107 }
2108 else if (fits_in_unsigned_byte (num))
2109 {
2110 t.bitfield.imm8 = 1;
2111 t.bitfield.imm16 = 1;
2112 t.bitfield.imm32 = 1;
2113 t.bitfield.imm32s = 1;
2114 }
2115 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
2116 {
2117 t.bitfield.imm16 = 1;
2118 t.bitfield.imm32 = 1;
2119 t.bitfield.imm32s = 1;
2120 }
2121 else if (fits_in_signed_long (num))
2122 {
2123 t.bitfield.imm32 = 1;
2124 t.bitfield.imm32s = 1;
2125 }
2126 else if (fits_in_unsigned_long (num))
2127 t.bitfield.imm32 = 1;
2128
2129 return t;
2130 }
2131
2132 static offsetT
2133 offset_in_range (offsetT val, int size)
2134 {
2135 addressT mask;
2136
2137 switch (size)
2138 {
2139 case 1: mask = ((addressT) 1 << 8) - 1; break;
2140 case 2: mask = ((addressT) 1 << 16) - 1; break;
2141 case 4: mask = ((addressT) 2 << 31) - 1; break;
2142 #ifdef BFD64
2143 case 8: mask = ((addressT) 2 << 63) - 1; break;
2144 #endif
2145 default: abort ();
2146 }
2147
2148 #ifdef BFD64
2149 /* If BFD64, sign extend val for 32bit address mode. */
2150 if (flag_code != CODE_64BIT
2151 || i.prefix[ADDR_PREFIX])
2152 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2153 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2154 #endif
2155
2156 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2157 {
2158 char buf1[40], buf2[40];
2159
2160 sprint_value (buf1, val);
2161 sprint_value (buf2, val & mask);
2162 as_warn (_("%s shortened to %s"), buf1, buf2);
2163 }
2164 return val & mask;
2165 }
2166
2167 enum PREFIX_GROUP
2168 {
2169 PREFIX_EXIST = 0,
2170 PREFIX_LOCK,
2171 PREFIX_REP,
2172 PREFIX_DS,
2173 PREFIX_OTHER
2174 };
2175
2176 /* Returns
2177 a. PREFIX_EXIST if attempting to add a prefix where one from the
2178 same class already exists.
2179 b. PREFIX_LOCK if lock prefix is added.
2180 c. PREFIX_REP if rep/repne prefix is added.
2181 d. PREFIX_DS if ds prefix is added.
2182 e. PREFIX_OTHER if other prefix is added.
2183 */
2184
2185 static enum PREFIX_GROUP
2186 add_prefix (unsigned int prefix)
2187 {
2188 enum PREFIX_GROUP ret = PREFIX_OTHER;
2189 unsigned int q;
2190
2191 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2192 && flag_code == CODE_64BIT)
2193 {
2194 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2195 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
2196 && (prefix & (REX_R | REX_X | REX_B))))
2197 ret = PREFIX_EXIST;
2198 q = REX_PREFIX;
2199 }
2200 else
2201 {
2202 switch (prefix)
2203 {
2204 default:
2205 abort ();
2206
2207 case DS_PREFIX_OPCODE:
2208 ret = PREFIX_DS;
2209 /* Fall through. */
2210 case CS_PREFIX_OPCODE:
2211 case ES_PREFIX_OPCODE:
2212 case FS_PREFIX_OPCODE:
2213 case GS_PREFIX_OPCODE:
2214 case SS_PREFIX_OPCODE:
2215 q = SEG_PREFIX;
2216 break;
2217
2218 case REPNE_PREFIX_OPCODE:
2219 case REPE_PREFIX_OPCODE:
2220 q = REP_PREFIX;
2221 ret = PREFIX_REP;
2222 break;
2223
2224 case LOCK_PREFIX_OPCODE:
2225 q = LOCK_PREFIX;
2226 ret = PREFIX_LOCK;
2227 break;
2228
2229 case FWAIT_OPCODE:
2230 q = WAIT_PREFIX;
2231 break;
2232
2233 case ADDR_PREFIX_OPCODE:
2234 q = ADDR_PREFIX;
2235 break;
2236
2237 case DATA_PREFIX_OPCODE:
2238 q = DATA_PREFIX;
2239 break;
2240 }
2241 if (i.prefix[q] != 0)
2242 ret = PREFIX_EXIST;
2243 }
2244
2245 if (ret)
2246 {
2247 if (!i.prefix[q])
2248 ++i.prefixes;
2249 i.prefix[q] |= prefix;
2250 }
2251 else
2252 as_bad (_("same type of prefix used twice"));
2253
2254 return ret;
2255 }
2256
2257 static void
2258 update_code_flag (int value, int check)
2259 {
2260 PRINTF_LIKE ((*as_error));
2261
2262 flag_code = (enum flag_code) value;
2263 if (flag_code == CODE_64BIT)
2264 {
2265 cpu_arch_flags.bitfield.cpu64 = 1;
2266 cpu_arch_flags.bitfield.cpuno64 = 0;
2267 }
2268 else
2269 {
2270 cpu_arch_flags.bitfield.cpu64 = 0;
2271 cpu_arch_flags.bitfield.cpuno64 = 1;
2272 }
2273 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2274 {
2275 if (check)
2276 as_error = as_fatal;
2277 else
2278 as_error = as_bad;
2279 (*as_error) (_("64bit mode not supported on `%s'."),
2280 cpu_arch_name ? cpu_arch_name : default_arch);
2281 }
2282 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2283 {
2284 if (check)
2285 as_error = as_fatal;
2286 else
2287 as_error = as_bad;
2288 (*as_error) (_("32bit mode not supported on `%s'."),
2289 cpu_arch_name ? cpu_arch_name : default_arch);
2290 }
2291 stackop_size = '\0';
2292 }
2293
2294 static void
2295 set_code_flag (int value)
2296 {
2297 update_code_flag (value, 0);
2298 }
2299
2300 static void
2301 set_16bit_gcc_code_flag (int new_code_flag)
2302 {
2303 flag_code = (enum flag_code) new_code_flag;
2304 if (flag_code != CODE_16BIT)
2305 abort ();
2306 cpu_arch_flags.bitfield.cpu64 = 0;
2307 cpu_arch_flags.bitfield.cpuno64 = 1;
2308 stackop_size = LONG_MNEM_SUFFIX;
2309 }
2310
2311 static void
2312 set_intel_syntax (int syntax_flag)
2313 {
2314 /* Find out if register prefixing is specified. */
2315 int ask_naked_reg = 0;
2316
2317 SKIP_WHITESPACE ();
2318 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2319 {
2320 char *string;
2321 int e = get_symbol_name (&string);
2322
2323 if (strcmp (string, "prefix") == 0)
2324 ask_naked_reg = 1;
2325 else if (strcmp (string, "noprefix") == 0)
2326 ask_naked_reg = -1;
2327 else
2328 as_bad (_("bad argument to syntax directive."));
2329 (void) restore_line_pointer (e);
2330 }
2331 demand_empty_rest_of_line ();
2332
2333 intel_syntax = syntax_flag;
2334
2335 if (ask_naked_reg == 0)
2336 allow_naked_reg = (intel_syntax
2337 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2338 else
2339 allow_naked_reg = (ask_naked_reg < 0);
2340
2341 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2342
2343 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2344 identifier_chars['$'] = intel_syntax ? '$' : 0;
2345 register_prefix = allow_naked_reg ? "" : "%";
2346 }
2347
2348 static void
2349 set_intel_mnemonic (int mnemonic_flag)
2350 {
2351 intel_mnemonic = mnemonic_flag;
2352 }
2353
2354 static void
2355 set_allow_index_reg (int flag)
2356 {
2357 allow_index_reg = flag;
2358 }
2359
2360 static void
2361 set_check (int what)
2362 {
2363 enum check_kind *kind;
2364 const char *str;
2365
2366 if (what)
2367 {
2368 kind = &operand_check;
2369 str = "operand";
2370 }
2371 else
2372 {
2373 kind = &sse_check;
2374 str = "sse";
2375 }
2376
2377 SKIP_WHITESPACE ();
2378
2379 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2380 {
2381 char *string;
2382 int e = get_symbol_name (&string);
2383
2384 if (strcmp (string, "none") == 0)
2385 *kind = check_none;
2386 else if (strcmp (string, "warning") == 0)
2387 *kind = check_warning;
2388 else if (strcmp (string, "error") == 0)
2389 *kind = check_error;
2390 else
2391 as_bad (_("bad argument to %s_check directive."), str);
2392 (void) restore_line_pointer (e);
2393 }
2394 else
2395 as_bad (_("missing argument for %s_check directive"), str);
2396
2397 demand_empty_rest_of_line ();
2398 }
2399
2400 static void
2401 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2402 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2403 {
2404 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2405 static const char *arch;
2406
2407 /* Intel LIOM is only supported on ELF. */
2408 if (!IS_ELF)
2409 return;
2410
2411 if (!arch)
2412 {
2413 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2414 use default_arch. */
2415 arch = cpu_arch_name;
2416 if (!arch)
2417 arch = default_arch;
2418 }
2419
2420 /* If we are targeting Intel MCU, we must enable it. */
2421 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_IAMCU
2422 || new_flag.bitfield.cpuiamcu)
2423 return;
2424
2425 /* If we are targeting Intel L1OM, we must enable it. */
2426 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2427 || new_flag.bitfield.cpul1om)
2428 return;
2429
2430 /* If we are targeting Intel K1OM, we must enable it. */
2431 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2432 || new_flag.bitfield.cpuk1om)
2433 return;
2434
2435 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2436 #endif
2437 }
2438
2439 static void
2440 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2441 {
2442 SKIP_WHITESPACE ();
2443
2444 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2445 {
2446 char *string;
2447 int e = get_symbol_name (&string);
2448 unsigned int j;
2449 i386_cpu_flags flags;
2450
2451 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2452 {
2453 if (strcmp (string, cpu_arch[j].name) == 0)
2454 {
2455 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2456
2457 if (*string != '.')
2458 {
2459 cpu_arch_name = cpu_arch[j].name;
2460 cpu_sub_arch_name = NULL;
2461 cpu_arch_flags = cpu_arch[j].flags;
2462 if (flag_code == CODE_64BIT)
2463 {
2464 cpu_arch_flags.bitfield.cpu64 = 1;
2465 cpu_arch_flags.bitfield.cpuno64 = 0;
2466 }
2467 else
2468 {
2469 cpu_arch_flags.bitfield.cpu64 = 0;
2470 cpu_arch_flags.bitfield.cpuno64 = 1;
2471 }
2472 cpu_arch_isa = cpu_arch[j].type;
2473 cpu_arch_isa_flags = cpu_arch[j].flags;
2474 if (!cpu_arch_tune_set)
2475 {
2476 cpu_arch_tune = cpu_arch_isa;
2477 cpu_arch_tune_flags = cpu_arch_isa_flags;
2478 }
2479 break;
2480 }
2481
2482 flags = cpu_flags_or (cpu_arch_flags,
2483 cpu_arch[j].flags);
2484
2485 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2486 {
2487 if (cpu_sub_arch_name)
2488 {
2489 char *name = cpu_sub_arch_name;
2490 cpu_sub_arch_name = concat (name,
2491 cpu_arch[j].name,
2492 (const char *) NULL);
2493 free (name);
2494 }
2495 else
2496 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2497 cpu_arch_flags = flags;
2498 cpu_arch_isa_flags = flags;
2499 }
2500 (void) restore_line_pointer (e);
2501 demand_empty_rest_of_line ();
2502 return;
2503 }
2504 }
2505
2506 if (*string == '.' && j >= ARRAY_SIZE (cpu_arch))
2507 {
2508 /* Disable an ISA extension. */
2509 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
2510 if (strcmp (string + 1, cpu_noarch [j].name) == 0)
2511 {
2512 flags = cpu_flags_and_not (cpu_arch_flags,
2513 cpu_noarch[j].flags);
2514 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2515 {
2516 if (cpu_sub_arch_name)
2517 {
2518 char *name = cpu_sub_arch_name;
2519 cpu_sub_arch_name = concat (name, string,
2520 (const char *) NULL);
2521 free (name);
2522 }
2523 else
2524 cpu_sub_arch_name = xstrdup (string);
2525 cpu_arch_flags = flags;
2526 cpu_arch_isa_flags = flags;
2527 }
2528 (void) restore_line_pointer (e);
2529 demand_empty_rest_of_line ();
2530 return;
2531 }
2532
2533 j = ARRAY_SIZE (cpu_arch);
2534 }
2535
2536 if (j >= ARRAY_SIZE (cpu_arch))
2537 as_bad (_("no such architecture: `%s'"), string);
2538
2539 *input_line_pointer = e;
2540 }
2541 else
2542 as_bad (_("missing cpu architecture"));
2543
2544 no_cond_jump_promotion = 0;
2545 if (*input_line_pointer == ','
2546 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2547 {
2548 char *string;
2549 char e;
2550
2551 ++input_line_pointer;
2552 e = get_symbol_name (&string);
2553
2554 if (strcmp (string, "nojumps") == 0)
2555 no_cond_jump_promotion = 1;
2556 else if (strcmp (string, "jumps") == 0)
2557 ;
2558 else
2559 as_bad (_("no such architecture modifier: `%s'"), string);
2560
2561 (void) restore_line_pointer (e);
2562 }
2563
2564 demand_empty_rest_of_line ();
2565 }
2566
2567 enum bfd_architecture
2568 i386_arch (void)
2569 {
2570 if (cpu_arch_isa == PROCESSOR_L1OM)
2571 {
2572 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2573 || flag_code != CODE_64BIT)
2574 as_fatal (_("Intel L1OM is 64bit ELF only"));
2575 return bfd_arch_l1om;
2576 }
2577 else if (cpu_arch_isa == PROCESSOR_K1OM)
2578 {
2579 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2580 || flag_code != CODE_64BIT)
2581 as_fatal (_("Intel K1OM is 64bit ELF only"));
2582 return bfd_arch_k1om;
2583 }
2584 else if (cpu_arch_isa == PROCESSOR_IAMCU)
2585 {
2586 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2587 || flag_code == CODE_64BIT)
2588 as_fatal (_("Intel MCU is 32bit ELF only"));
2589 return bfd_arch_iamcu;
2590 }
2591 else
2592 return bfd_arch_i386;
2593 }
2594
2595 unsigned long
2596 i386_mach (void)
2597 {
2598 if (!strncmp (default_arch, "x86_64", 6))
2599 {
2600 if (cpu_arch_isa == PROCESSOR_L1OM)
2601 {
2602 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2603 || default_arch[6] != '\0')
2604 as_fatal (_("Intel L1OM is 64bit ELF only"));
2605 return bfd_mach_l1om;
2606 }
2607 else if (cpu_arch_isa == PROCESSOR_K1OM)
2608 {
2609 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2610 || default_arch[6] != '\0')
2611 as_fatal (_("Intel K1OM is 64bit ELF only"));
2612 return bfd_mach_k1om;
2613 }
2614 else if (default_arch[6] == '\0')
2615 return bfd_mach_x86_64;
2616 else
2617 return bfd_mach_x64_32;
2618 }
2619 else if (!strcmp (default_arch, "i386")
2620 || !strcmp (default_arch, "iamcu"))
2621 {
2622 if (cpu_arch_isa == PROCESSOR_IAMCU)
2623 {
2624 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
2625 as_fatal (_("Intel MCU is 32bit ELF only"));
2626 return bfd_mach_i386_iamcu;
2627 }
2628 else
2629 return bfd_mach_i386_i386;
2630 }
2631 else
2632 as_fatal (_("unknown architecture"));
2633 }
2634 \f
2635 void
2636 md_begin (void)
2637 {
2638 const char *hash_err;
2639
2640 /* Support pseudo prefixes like {disp32}. */
2641 lex_type ['{'] = LEX_BEGIN_NAME;
2642
2643 /* Initialize op_hash hash table. */
2644 op_hash = hash_new ();
2645
2646 {
2647 const insn_template *optab;
2648 templates *core_optab;
2649
2650 /* Setup for loop. */
2651 optab = i386_optab;
2652 core_optab = XNEW (templates);
2653 core_optab->start = optab;
2654
2655 while (1)
2656 {
2657 ++optab;
2658 if (optab->name == NULL
2659 || strcmp (optab->name, (optab - 1)->name) != 0)
2660 {
2661 /* different name --> ship out current template list;
2662 add to hash table; & begin anew. */
2663 core_optab->end = optab;
2664 hash_err = hash_insert (op_hash,
2665 (optab - 1)->name,
2666 (void *) core_optab);
2667 if (hash_err)
2668 {
2669 as_fatal (_("can't hash %s: %s"),
2670 (optab - 1)->name,
2671 hash_err);
2672 }
2673 if (optab->name == NULL)
2674 break;
2675 core_optab = XNEW (templates);
2676 core_optab->start = optab;
2677 }
2678 }
2679 }
2680
2681 /* Initialize reg_hash hash table. */
2682 reg_hash = hash_new ();
2683 {
2684 const reg_entry *regtab;
2685 unsigned int regtab_size = i386_regtab_size;
2686
2687 for (regtab = i386_regtab; regtab_size--; regtab++)
2688 {
2689 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2690 if (hash_err)
2691 as_fatal (_("can't hash %s: %s"),
2692 regtab->reg_name,
2693 hash_err);
2694 }
2695 }
2696
2697 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2698 {
2699 int c;
2700 char *p;
2701
2702 for (c = 0; c < 256; c++)
2703 {
2704 if (ISDIGIT (c))
2705 {
2706 digit_chars[c] = c;
2707 mnemonic_chars[c] = c;
2708 register_chars[c] = c;
2709 operand_chars[c] = c;
2710 }
2711 else if (ISLOWER (c))
2712 {
2713 mnemonic_chars[c] = c;
2714 register_chars[c] = c;
2715 operand_chars[c] = c;
2716 }
2717 else if (ISUPPER (c))
2718 {
2719 mnemonic_chars[c] = TOLOWER (c);
2720 register_chars[c] = mnemonic_chars[c];
2721 operand_chars[c] = c;
2722 }
2723 else if (c == '{' || c == '}')
2724 {
2725 mnemonic_chars[c] = c;
2726 operand_chars[c] = c;
2727 }
2728
2729 if (ISALPHA (c) || ISDIGIT (c))
2730 identifier_chars[c] = c;
2731 else if (c >= 128)
2732 {
2733 identifier_chars[c] = c;
2734 operand_chars[c] = c;
2735 }
2736 }
2737
2738 #ifdef LEX_AT
2739 identifier_chars['@'] = '@';
2740 #endif
2741 #ifdef LEX_QM
2742 identifier_chars['?'] = '?';
2743 operand_chars['?'] = '?';
2744 #endif
2745 digit_chars['-'] = '-';
2746 mnemonic_chars['_'] = '_';
2747 mnemonic_chars['-'] = '-';
2748 mnemonic_chars['.'] = '.';
2749 identifier_chars['_'] = '_';
2750 identifier_chars['.'] = '.';
2751
2752 for (p = operand_special_chars; *p != '\0'; p++)
2753 operand_chars[(unsigned char) *p] = *p;
2754 }
2755
2756 if (flag_code == CODE_64BIT)
2757 {
2758 #if defined (OBJ_COFF) && defined (TE_PE)
2759 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2760 ? 32 : 16);
2761 #else
2762 x86_dwarf2_return_column = 16;
2763 #endif
2764 x86_cie_data_alignment = -8;
2765 }
2766 else
2767 {
2768 x86_dwarf2_return_column = 8;
2769 x86_cie_data_alignment = -4;
2770 }
2771 }
2772
2773 void
2774 i386_print_statistics (FILE *file)
2775 {
2776 hash_print_statistics (file, "i386 opcode", op_hash);
2777 hash_print_statistics (file, "i386 register", reg_hash);
2778 }
2779 \f
2780 #ifdef DEBUG386
2781
2782 /* Debugging routines for md_assemble. */
2783 static void pte (insn_template *);
2784 static void pt (i386_operand_type);
2785 static void pe (expressionS *);
2786 static void ps (symbolS *);
2787
2788 static void
2789 pi (char *line, i386_insn *x)
2790 {
2791 unsigned int j;
2792
2793 fprintf (stdout, "%s: template ", line);
2794 pte (&x->tm);
2795 fprintf (stdout, " address: base %s index %s scale %x\n",
2796 x->base_reg ? x->base_reg->reg_name : "none",
2797 x->index_reg ? x->index_reg->reg_name : "none",
2798 x->log2_scale_factor);
2799 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2800 x->rm.mode, x->rm.reg, x->rm.regmem);
2801 fprintf (stdout, " sib: base %x index %x scale %x\n",
2802 x->sib.base, x->sib.index, x->sib.scale);
2803 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2804 (x->rex & REX_W) != 0,
2805 (x->rex & REX_R) != 0,
2806 (x->rex & REX_X) != 0,
2807 (x->rex & REX_B) != 0);
2808 for (j = 0; j < x->operands; j++)
2809 {
2810 fprintf (stdout, " #%d: ", j + 1);
2811 pt (x->types[j]);
2812 fprintf (stdout, "\n");
2813 if (x->types[j].bitfield.reg8
2814 || x->types[j].bitfield.reg16
2815 || x->types[j].bitfield.reg32
2816 || x->types[j].bitfield.reg64
2817 || x->types[j].bitfield.regmmx
2818 || x->types[j].bitfield.regxmm
2819 || x->types[j].bitfield.regymm
2820 || x->types[j].bitfield.regzmm
2821 || x->types[j].bitfield.sreg2
2822 || x->types[j].bitfield.sreg3
2823 || x->types[j].bitfield.control
2824 || x->types[j].bitfield.debug
2825 || x->types[j].bitfield.test)
2826 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2827 if (operand_type_check (x->types[j], imm))
2828 pe (x->op[j].imms);
2829 if (operand_type_check (x->types[j], disp))
2830 pe (x->op[j].disps);
2831 }
2832 }
2833
2834 static void
2835 pte (insn_template *t)
2836 {
2837 unsigned int j;
2838 fprintf (stdout, " %d operands ", t->operands);
2839 fprintf (stdout, "opcode %x ", t->base_opcode);
2840 if (t->extension_opcode != None)
2841 fprintf (stdout, "ext %x ", t->extension_opcode);
2842 if (t->opcode_modifier.d)
2843 fprintf (stdout, "D");
2844 if (t->opcode_modifier.w)
2845 fprintf (stdout, "W");
2846 fprintf (stdout, "\n");
2847 for (j = 0; j < t->operands; j++)
2848 {
2849 fprintf (stdout, " #%d type ", j + 1);
2850 pt (t->operand_types[j]);
2851 fprintf (stdout, "\n");
2852 }
2853 }
2854
2855 static void
2856 pe (expressionS *e)
2857 {
2858 fprintf (stdout, " operation %d\n", e->X_op);
2859 fprintf (stdout, " add_number %ld (%lx)\n",
2860 (long) e->X_add_number, (long) e->X_add_number);
2861 if (e->X_add_symbol)
2862 {
2863 fprintf (stdout, " add_symbol ");
2864 ps (e->X_add_symbol);
2865 fprintf (stdout, "\n");
2866 }
2867 if (e->X_op_symbol)
2868 {
2869 fprintf (stdout, " op_symbol ");
2870 ps (e->X_op_symbol);
2871 fprintf (stdout, "\n");
2872 }
2873 }
2874
2875 static void
2876 ps (symbolS *s)
2877 {
2878 fprintf (stdout, "%s type %s%s",
2879 S_GET_NAME (s),
2880 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2881 segment_name (S_GET_SEGMENT (s)));
2882 }
2883
2884 static struct type_name
2885 {
2886 i386_operand_type mask;
2887 const char *name;
2888 }
2889 const type_names[] =
2890 {
2891 { OPERAND_TYPE_REG8, "r8" },
2892 { OPERAND_TYPE_REG16, "r16" },
2893 { OPERAND_TYPE_REG32, "r32" },
2894 { OPERAND_TYPE_REG64, "r64" },
2895 { OPERAND_TYPE_IMM8, "i8" },
2896 { OPERAND_TYPE_IMM8, "i8s" },
2897 { OPERAND_TYPE_IMM16, "i16" },
2898 { OPERAND_TYPE_IMM32, "i32" },
2899 { OPERAND_TYPE_IMM32S, "i32s" },
2900 { OPERAND_TYPE_IMM64, "i64" },
2901 { OPERAND_TYPE_IMM1, "i1" },
2902 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2903 { OPERAND_TYPE_DISP8, "d8" },
2904 { OPERAND_TYPE_DISP16, "d16" },
2905 { OPERAND_TYPE_DISP32, "d32" },
2906 { OPERAND_TYPE_DISP32S, "d32s" },
2907 { OPERAND_TYPE_DISP64, "d64" },
2908 { OPERAND_TYPE_VEC_DISP8, "Vector d8" },
2909 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2910 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2911 { OPERAND_TYPE_CONTROL, "control reg" },
2912 { OPERAND_TYPE_TEST, "test reg" },
2913 { OPERAND_TYPE_DEBUG, "debug reg" },
2914 { OPERAND_TYPE_FLOATREG, "FReg" },
2915 { OPERAND_TYPE_FLOATACC, "FAcc" },
2916 { OPERAND_TYPE_SREG2, "SReg2" },
2917 { OPERAND_TYPE_SREG3, "SReg3" },
2918 { OPERAND_TYPE_ACC, "Acc" },
2919 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2920 { OPERAND_TYPE_REGMMX, "rMMX" },
2921 { OPERAND_TYPE_REGXMM, "rXMM" },
2922 { OPERAND_TYPE_REGYMM, "rYMM" },
2923 { OPERAND_TYPE_REGZMM, "rZMM" },
2924 { OPERAND_TYPE_REGMASK, "Mask reg" },
2925 { OPERAND_TYPE_ESSEG, "es" },
2926 };
2927
2928 static void
2929 pt (i386_operand_type t)
2930 {
2931 unsigned int j;
2932 i386_operand_type a;
2933
2934 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2935 {
2936 a = operand_type_and (t, type_names[j].mask);
2937 if (!operand_type_all_zero (&a))
2938 fprintf (stdout, "%s, ", type_names[j].name);
2939 }
2940 fflush (stdout);
2941 }
2942
2943 #endif /* DEBUG386 */
2944 \f
2945 static bfd_reloc_code_real_type
2946 reloc (unsigned int size,
2947 int pcrel,
2948 int sign,
2949 bfd_reloc_code_real_type other)
2950 {
2951 if (other != NO_RELOC)
2952 {
2953 reloc_howto_type *rel;
2954
2955 if (size == 8)
2956 switch (other)
2957 {
2958 case BFD_RELOC_X86_64_GOT32:
2959 return BFD_RELOC_X86_64_GOT64;
2960 break;
2961 case BFD_RELOC_X86_64_GOTPLT64:
2962 return BFD_RELOC_X86_64_GOTPLT64;
2963 break;
2964 case BFD_RELOC_X86_64_PLTOFF64:
2965 return BFD_RELOC_X86_64_PLTOFF64;
2966 break;
2967 case BFD_RELOC_X86_64_GOTPC32:
2968 other = BFD_RELOC_X86_64_GOTPC64;
2969 break;
2970 case BFD_RELOC_X86_64_GOTPCREL:
2971 other = BFD_RELOC_X86_64_GOTPCREL64;
2972 break;
2973 case BFD_RELOC_X86_64_TPOFF32:
2974 other = BFD_RELOC_X86_64_TPOFF64;
2975 break;
2976 case BFD_RELOC_X86_64_DTPOFF32:
2977 other = BFD_RELOC_X86_64_DTPOFF64;
2978 break;
2979 default:
2980 break;
2981 }
2982
2983 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2984 if (other == BFD_RELOC_SIZE32)
2985 {
2986 if (size == 8)
2987 other = BFD_RELOC_SIZE64;
2988 if (pcrel)
2989 {
2990 as_bad (_("there are no pc-relative size relocations"));
2991 return NO_RELOC;
2992 }
2993 }
2994 #endif
2995
2996 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2997 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2998 sign = -1;
2999
3000 rel = bfd_reloc_type_lookup (stdoutput, other);
3001 if (!rel)
3002 as_bad (_("unknown relocation (%u)"), other);
3003 else if (size != bfd_get_reloc_size (rel))
3004 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3005 bfd_get_reloc_size (rel),
3006 size);
3007 else if (pcrel && !rel->pc_relative)
3008 as_bad (_("non-pc-relative relocation for pc-relative field"));
3009 else if ((rel->complain_on_overflow == complain_overflow_signed
3010 && !sign)
3011 || (rel->complain_on_overflow == complain_overflow_unsigned
3012 && sign > 0))
3013 as_bad (_("relocated field and relocation type differ in signedness"));
3014 else
3015 return other;
3016 return NO_RELOC;
3017 }
3018
3019 if (pcrel)
3020 {
3021 if (!sign)
3022 as_bad (_("there are no unsigned pc-relative relocations"));
3023 switch (size)
3024 {
3025 case 1: return BFD_RELOC_8_PCREL;
3026 case 2: return BFD_RELOC_16_PCREL;
3027 case 4: return BFD_RELOC_32_PCREL;
3028 case 8: return BFD_RELOC_64_PCREL;
3029 }
3030 as_bad (_("cannot do %u byte pc-relative relocation"), size);
3031 }
3032 else
3033 {
3034 if (sign > 0)
3035 switch (size)
3036 {
3037 case 4: return BFD_RELOC_X86_64_32S;
3038 }
3039 else
3040 switch (size)
3041 {
3042 case 1: return BFD_RELOC_8;
3043 case 2: return BFD_RELOC_16;
3044 case 4: return BFD_RELOC_32;
3045 case 8: return BFD_RELOC_64;
3046 }
3047 as_bad (_("cannot do %s %u byte relocation"),
3048 sign > 0 ? "signed" : "unsigned", size);
3049 }
3050
3051 return NO_RELOC;
3052 }
3053
3054 /* Here we decide which fixups can be adjusted to make them relative to
3055 the beginning of the section instead of the symbol. Basically we need
3056 to make sure that the dynamic relocations are done correctly, so in
3057 some cases we force the original symbol to be used. */
3058
3059 int
3060 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
3061 {
3062 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3063 if (!IS_ELF)
3064 return 1;
3065
3066 /* Don't adjust pc-relative references to merge sections in 64-bit
3067 mode. */
3068 if (use_rela_relocations
3069 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
3070 && fixP->fx_pcrel)
3071 return 0;
3072
3073 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3074 and changed later by validate_fix. */
3075 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
3076 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
3077 return 0;
3078
3079 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3080 for size relocations. */
3081 if (fixP->fx_r_type == BFD_RELOC_SIZE32
3082 || fixP->fx_r_type == BFD_RELOC_SIZE64
3083 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
3084 || fixP->fx_r_type == BFD_RELOC_386_PLT32
3085 || fixP->fx_r_type == BFD_RELOC_386_GOT32
3086 || fixP->fx_r_type == BFD_RELOC_386_GOT32X
3087 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
3088 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
3089 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
3090 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
3091 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
3092 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
3093 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
3094 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
3095 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
3096 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
3097 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
3098 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
3099 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
3100 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCRELX
3101 || fixP->fx_r_type == BFD_RELOC_X86_64_REX_GOTPCRELX
3102 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
3103 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
3104 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
3105 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
3106 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
3107 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
3108 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
3109 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
3110 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
3111 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
3112 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
3113 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
3114 return 0;
3115 #endif
3116 return 1;
3117 }
3118
3119 static int
3120 intel_float_operand (const char *mnemonic)
3121 {
3122 /* Note that the value returned is meaningful only for opcodes with (memory)
3123 operands, hence the code here is free to improperly handle opcodes that
3124 have no operands (for better performance and smaller code). */
3125
3126 if (mnemonic[0] != 'f')
3127 return 0; /* non-math */
3128
3129 switch (mnemonic[1])
3130 {
3131 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3132 the fs segment override prefix not currently handled because no
3133 call path can make opcodes without operands get here */
3134 case 'i':
3135 return 2 /* integer op */;
3136 case 'l':
3137 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
3138 return 3; /* fldcw/fldenv */
3139 break;
3140 case 'n':
3141 if (mnemonic[2] != 'o' /* fnop */)
3142 return 3; /* non-waiting control op */
3143 break;
3144 case 'r':
3145 if (mnemonic[2] == 's')
3146 return 3; /* frstor/frstpm */
3147 break;
3148 case 's':
3149 if (mnemonic[2] == 'a')
3150 return 3; /* fsave */
3151 if (mnemonic[2] == 't')
3152 {
3153 switch (mnemonic[3])
3154 {
3155 case 'c': /* fstcw */
3156 case 'd': /* fstdw */
3157 case 'e': /* fstenv */
3158 case 's': /* fsts[gw] */
3159 return 3;
3160 }
3161 }
3162 break;
3163 case 'x':
3164 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
3165 return 0; /* fxsave/fxrstor are not really math ops */
3166 break;
3167 }
3168
3169 return 1;
3170 }
3171
3172 /* Build the VEX prefix. */
3173
3174 static void
3175 build_vex_prefix (const insn_template *t)
3176 {
3177 unsigned int register_specifier;
3178 unsigned int implied_prefix;
3179 unsigned int vector_length;
3180
3181 /* Check register specifier. */
3182 if (i.vex.register_specifier)
3183 {
3184 register_specifier =
3185 ~register_number (i.vex.register_specifier) & 0xf;
3186 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
3187 }
3188 else
3189 register_specifier = 0xf;
3190
3191 /* Use 2-byte VEX prefix by swapping destination and source
3192 operand. */
3193 if (i.vec_encoding != vex_encoding_vex3
3194 && i.dir_encoding == dir_encoding_default
3195 && i.operands == i.reg_operands
3196 && i.tm.opcode_modifier.vexopcode == VEX0F
3197 && i.tm.opcode_modifier.load
3198 && i.rex == REX_B)
3199 {
3200 unsigned int xchg = i.operands - 1;
3201 union i386_op temp_op;
3202 i386_operand_type temp_type;
3203
3204 temp_type = i.types[xchg];
3205 i.types[xchg] = i.types[0];
3206 i.types[0] = temp_type;
3207 temp_op = i.op[xchg];
3208 i.op[xchg] = i.op[0];
3209 i.op[0] = temp_op;
3210
3211 gas_assert (i.rm.mode == 3);
3212
3213 i.rex = REX_R;
3214 xchg = i.rm.regmem;
3215 i.rm.regmem = i.rm.reg;
3216 i.rm.reg = xchg;
3217
3218 /* Use the next insn. */
3219 i.tm = t[1];
3220 }
3221
3222 if (i.tm.opcode_modifier.vex == VEXScalar)
3223 vector_length = avxscalar;
3224 else
3225 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
3226
3227 switch ((i.tm.base_opcode >> 8) & 0xff)
3228 {
3229 case 0:
3230 implied_prefix = 0;
3231 break;
3232 case DATA_PREFIX_OPCODE:
3233 implied_prefix = 1;
3234 break;
3235 case REPE_PREFIX_OPCODE:
3236 implied_prefix = 2;
3237 break;
3238 case REPNE_PREFIX_OPCODE:
3239 implied_prefix = 3;
3240 break;
3241 default:
3242 abort ();
3243 }
3244
3245 /* Use 2-byte VEX prefix if possible. */
3246 if (i.vec_encoding != vex_encoding_vex3
3247 && i.tm.opcode_modifier.vexopcode == VEX0F
3248 && i.tm.opcode_modifier.vexw != VEXW1
3249 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3250 {
3251 /* 2-byte VEX prefix. */
3252 unsigned int r;
3253
3254 i.vex.length = 2;
3255 i.vex.bytes[0] = 0xc5;
3256
3257 /* Check the REX.R bit. */
3258 r = (i.rex & REX_R) ? 0 : 1;
3259 i.vex.bytes[1] = (r << 7
3260 | register_specifier << 3
3261 | vector_length << 2
3262 | implied_prefix);
3263 }
3264 else
3265 {
3266 /* 3-byte VEX prefix. */
3267 unsigned int m, w;
3268
3269 i.vex.length = 3;
3270
3271 switch (i.tm.opcode_modifier.vexopcode)
3272 {
3273 case VEX0F:
3274 m = 0x1;
3275 i.vex.bytes[0] = 0xc4;
3276 break;
3277 case VEX0F38:
3278 m = 0x2;
3279 i.vex.bytes[0] = 0xc4;
3280 break;
3281 case VEX0F3A:
3282 m = 0x3;
3283 i.vex.bytes[0] = 0xc4;
3284 break;
3285 case XOP08:
3286 m = 0x8;
3287 i.vex.bytes[0] = 0x8f;
3288 break;
3289 case XOP09:
3290 m = 0x9;
3291 i.vex.bytes[0] = 0x8f;
3292 break;
3293 case XOP0A:
3294 m = 0xa;
3295 i.vex.bytes[0] = 0x8f;
3296 break;
3297 default:
3298 abort ();
3299 }
3300
3301 /* The high 3 bits of the second VEX byte are 1's compliment
3302 of RXB bits from REX. */
3303 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3304
3305 /* Check the REX.W bit. */
3306 w = (i.rex & REX_W) ? 1 : 0;
3307 if (i.tm.opcode_modifier.vexw == VEXW1)
3308 w = 1;
3309
3310 i.vex.bytes[2] = (w << 7
3311 | register_specifier << 3
3312 | vector_length << 2
3313 | implied_prefix);
3314 }
3315 }
3316
3317 /* Build the EVEX prefix. */
3318
3319 static void
3320 build_evex_prefix (void)
3321 {
3322 unsigned int register_specifier;
3323 unsigned int implied_prefix;
3324 unsigned int m, w;
3325 rex_byte vrex_used = 0;
3326
3327 /* Check register specifier. */
3328 if (i.vex.register_specifier)
3329 {
3330 gas_assert ((i.vrex & REX_X) == 0);
3331
3332 register_specifier = i.vex.register_specifier->reg_num;
3333 if ((i.vex.register_specifier->reg_flags & RegRex))
3334 register_specifier += 8;
3335 /* The upper 16 registers are encoded in the fourth byte of the
3336 EVEX prefix. */
3337 if (!(i.vex.register_specifier->reg_flags & RegVRex))
3338 i.vex.bytes[3] = 0x8;
3339 register_specifier = ~register_specifier & 0xf;
3340 }
3341 else
3342 {
3343 register_specifier = 0xf;
3344
3345 /* Encode upper 16 vector index register in the fourth byte of
3346 the EVEX prefix. */
3347 if (!(i.vrex & REX_X))
3348 i.vex.bytes[3] = 0x8;
3349 else
3350 vrex_used |= REX_X;
3351 }
3352
3353 switch ((i.tm.base_opcode >> 8) & 0xff)
3354 {
3355 case 0:
3356 implied_prefix = 0;
3357 break;
3358 case DATA_PREFIX_OPCODE:
3359 implied_prefix = 1;
3360 break;
3361 case REPE_PREFIX_OPCODE:
3362 implied_prefix = 2;
3363 break;
3364 case REPNE_PREFIX_OPCODE:
3365 implied_prefix = 3;
3366 break;
3367 default:
3368 abort ();
3369 }
3370
3371 /* 4 byte EVEX prefix. */
3372 i.vex.length = 4;
3373 i.vex.bytes[0] = 0x62;
3374
3375 /* mmmm bits. */
3376 switch (i.tm.opcode_modifier.vexopcode)
3377 {
3378 case VEX0F:
3379 m = 1;
3380 break;
3381 case VEX0F38:
3382 m = 2;
3383 break;
3384 case VEX0F3A:
3385 m = 3;
3386 break;
3387 default:
3388 abort ();
3389 break;
3390 }
3391
3392 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3393 bits from REX. */
3394 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3395
3396 /* The fifth bit of the second EVEX byte is 1's compliment of the
3397 REX_R bit in VREX. */
3398 if (!(i.vrex & REX_R))
3399 i.vex.bytes[1] |= 0x10;
3400 else
3401 vrex_used |= REX_R;
3402
3403 if ((i.reg_operands + i.imm_operands) == i.operands)
3404 {
3405 /* When all operands are registers, the REX_X bit in REX is not
3406 used. We reuse it to encode the upper 16 registers, which is
3407 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3408 as 1's compliment. */
3409 if ((i.vrex & REX_B))
3410 {
3411 vrex_used |= REX_B;
3412 i.vex.bytes[1] &= ~0x40;
3413 }
3414 }
3415
3416 /* EVEX instructions shouldn't need the REX prefix. */
3417 i.vrex &= ~vrex_used;
3418 gas_assert (i.vrex == 0);
3419
3420 /* Check the REX.W bit. */
3421 w = (i.rex & REX_W) ? 1 : 0;
3422 if (i.tm.opcode_modifier.vexw)
3423 {
3424 if (i.tm.opcode_modifier.vexw == VEXW1)
3425 w = 1;
3426 }
3427 /* If w is not set it means we are dealing with WIG instruction. */
3428 else if (!w)
3429 {
3430 if (evexwig == evexw1)
3431 w = 1;
3432 }
3433
3434 /* Encode the U bit. */
3435 implied_prefix |= 0x4;
3436
3437 /* The third byte of the EVEX prefix. */
3438 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3439
3440 /* The fourth byte of the EVEX prefix. */
3441 /* The zeroing-masking bit. */
3442 if (i.mask && i.mask->zeroing)
3443 i.vex.bytes[3] |= 0x80;
3444
3445 /* Don't always set the broadcast bit if there is no RC. */
3446 if (!i.rounding)
3447 {
3448 /* Encode the vector length. */
3449 unsigned int vec_length;
3450
3451 switch (i.tm.opcode_modifier.evex)
3452 {
3453 case EVEXLIG: /* LL' is ignored */
3454 vec_length = evexlig << 5;
3455 break;
3456 case EVEX128:
3457 vec_length = 0 << 5;
3458 break;
3459 case EVEX256:
3460 vec_length = 1 << 5;
3461 break;
3462 case EVEX512:
3463 vec_length = 2 << 5;
3464 break;
3465 default:
3466 abort ();
3467 break;
3468 }
3469 i.vex.bytes[3] |= vec_length;
3470 /* Encode the broadcast bit. */
3471 if (i.broadcast)
3472 i.vex.bytes[3] |= 0x10;
3473 }
3474 else
3475 {
3476 if (i.rounding->type != saeonly)
3477 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3478 else
3479 i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
3480 }
3481
3482 if (i.mask && i.mask->mask)
3483 i.vex.bytes[3] |= i.mask->mask->reg_num;
3484 }
3485
3486 static void
3487 process_immext (void)
3488 {
3489 expressionS *exp;
3490
3491 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3492 && i.operands > 0)
3493 {
3494 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3495 with an opcode suffix which is coded in the same place as an
3496 8-bit immediate field would be.
3497 Here we check those operands and remove them afterwards. */
3498 unsigned int x;
3499
3500 for (x = 0; x < i.operands; x++)
3501 if (register_number (i.op[x].regs) != x)
3502 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3503 register_prefix, i.op[x].regs->reg_name, x + 1,
3504 i.tm.name);
3505
3506 i.operands = 0;
3507 }
3508
3509 if (i.tm.cpu_flags.bitfield.cpumwaitx && i.operands > 0)
3510 {
3511 /* MONITORX/MWAITX instructions have fixed operands with an opcode
3512 suffix which is coded in the same place as an 8-bit immediate
3513 field would be.
3514 Here we check those operands and remove them afterwards. */
3515 unsigned int x;
3516
3517 if (i.operands != 3)
3518 abort();
3519
3520 for (x = 0; x < 2; x++)
3521 if (register_number (i.op[x].regs) != x)
3522 goto bad_register_operand;
3523
3524 /* Check for third operand for mwaitx/monitorx insn. */
3525 if (register_number (i.op[x].regs)
3526 != (x + (i.tm.extension_opcode == 0xfb)))
3527 {
3528 bad_register_operand:
3529 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3530 register_prefix, i.op[x].regs->reg_name, x+1,
3531 i.tm.name);
3532 }
3533
3534 i.operands = 0;
3535 }
3536
3537 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3538 which is coded in the same place as an 8-bit immediate field
3539 would be. Here we fake an 8-bit immediate operand from the
3540 opcode suffix stored in tm.extension_opcode.
3541
3542 AVX instructions also use this encoding, for some of
3543 3 argument instructions. */
3544
3545 gas_assert (i.imm_operands <= 1
3546 && (i.operands <= 2
3547 || ((i.tm.opcode_modifier.vex
3548 || i.tm.opcode_modifier.evex)
3549 && i.operands <= 4)));
3550
3551 exp = &im_expressions[i.imm_operands++];
3552 i.op[i.operands].imms = exp;
3553 i.types[i.operands] = imm8;
3554 i.operands++;
3555 exp->X_op = O_constant;
3556 exp->X_add_number = i.tm.extension_opcode;
3557 i.tm.extension_opcode = None;
3558 }
3559
3560
3561 static int
3562 check_hle (void)
3563 {
3564 switch (i.tm.opcode_modifier.hleprefixok)
3565 {
3566 default:
3567 abort ();
3568 case HLEPrefixNone:
3569 as_bad (_("invalid instruction `%s' after `%s'"),
3570 i.tm.name, i.hle_prefix);
3571 return 0;
3572 case HLEPrefixLock:
3573 if (i.prefix[LOCK_PREFIX])
3574 return 1;
3575 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
3576 return 0;
3577 case HLEPrefixAny:
3578 return 1;
3579 case HLEPrefixRelease:
3580 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3581 {
3582 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3583 i.tm.name);
3584 return 0;
3585 }
3586 if (i.mem_operands == 0
3587 || !operand_type_check (i.types[i.operands - 1], anymem))
3588 {
3589 as_bad (_("memory destination needed for instruction `%s'"
3590 " after `xrelease'"), i.tm.name);
3591 return 0;
3592 }
3593 return 1;
3594 }
3595 }
3596
3597 /* This is the guts of the machine-dependent assembler. LINE points to a
3598 machine dependent instruction. This function is supposed to emit
3599 the frags/bytes it assembles to. */
3600
3601 void
3602 md_assemble (char *line)
3603 {
3604 unsigned int j;
3605 char mnemonic[MAX_MNEM_SIZE], mnem_suffix;
3606 const insn_template *t;
3607
3608 /* Initialize globals. */
3609 memset (&i, '\0', sizeof (i));
3610 for (j = 0; j < MAX_OPERANDS; j++)
3611 i.reloc[j] = NO_RELOC;
3612 memset (disp_expressions, '\0', sizeof (disp_expressions));
3613 memset (im_expressions, '\0', sizeof (im_expressions));
3614 save_stack_p = save_stack;
3615
3616 /* First parse an instruction mnemonic & call i386_operand for the operands.
3617 We assume that the scrubber has arranged it so that line[0] is the valid
3618 start of a (possibly prefixed) mnemonic. */
3619
3620 line = parse_insn (line, mnemonic);
3621 if (line == NULL)
3622 return;
3623 mnem_suffix = i.suffix;
3624
3625 line = parse_operands (line, mnemonic);
3626 this_operand = -1;
3627 xfree (i.memop1_string);
3628 i.memop1_string = NULL;
3629 if (line == NULL)
3630 return;
3631
3632 /* Now we've parsed the mnemonic into a set of templates, and have the
3633 operands at hand. */
3634
3635 /* All intel opcodes have reversed operands except for "bound" and
3636 "enter". We also don't reverse intersegment "jmp" and "call"
3637 instructions with 2 immediate operands so that the immediate segment
3638 precedes the offset, as it does when in AT&T mode. */
3639 if (intel_syntax
3640 && i.operands > 1
3641 && (strcmp (mnemonic, "bound") != 0)
3642 && (strcmp (mnemonic, "invlpga") != 0)
3643 && !(operand_type_check (i.types[0], imm)
3644 && operand_type_check (i.types[1], imm)))
3645 swap_operands ();
3646
3647 /* The order of the immediates should be reversed
3648 for 2 immediates extrq and insertq instructions */
3649 if (i.imm_operands == 2
3650 && (strcmp (mnemonic, "extrq") == 0
3651 || strcmp (mnemonic, "insertq") == 0))
3652 swap_2_operands (0, 1);
3653
3654 if (i.imm_operands)
3655 optimize_imm ();
3656
3657 /* Don't optimize displacement for movabs since it only takes 64bit
3658 displacement. */
3659 if (i.disp_operands
3660 && i.disp_encoding != disp_encoding_32bit
3661 && (flag_code != CODE_64BIT
3662 || strcmp (mnemonic, "movabs") != 0))
3663 optimize_disp ();
3664
3665 /* Next, we find a template that matches the given insn,
3666 making sure the overlap of the given operands types is consistent
3667 with the template operand types. */
3668
3669 if (!(t = match_template (mnem_suffix)))
3670 return;
3671
3672 if (sse_check != check_none
3673 && !i.tm.opcode_modifier.noavx
3674 && (i.tm.cpu_flags.bitfield.cpusse
3675 || i.tm.cpu_flags.bitfield.cpusse2
3676 || i.tm.cpu_flags.bitfield.cpusse3
3677 || i.tm.cpu_flags.bitfield.cpussse3
3678 || i.tm.cpu_flags.bitfield.cpusse4_1
3679 || i.tm.cpu_flags.bitfield.cpusse4_2))
3680 {
3681 (sse_check == check_warning
3682 ? as_warn
3683 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3684 }
3685
3686 /* Zap movzx and movsx suffix. The suffix has been set from
3687 "word ptr" or "byte ptr" on the source operand in Intel syntax
3688 or extracted from mnemonic in AT&T syntax. But we'll use
3689 the destination register to choose the suffix for encoding. */
3690 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3691 {
3692 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3693 there is no suffix, the default will be byte extension. */
3694 if (i.reg_operands != 2
3695 && !i.suffix
3696 && intel_syntax)
3697 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3698
3699 i.suffix = 0;
3700 }
3701
3702 if (i.tm.opcode_modifier.fwait)
3703 if (!add_prefix (FWAIT_OPCODE))
3704 return;
3705
3706 /* Check if REP prefix is OK. */
3707 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
3708 {
3709 as_bad (_("invalid instruction `%s' after `%s'"),
3710 i.tm.name, i.rep_prefix);
3711 return;
3712 }
3713
3714 /* Check for lock without a lockable instruction. Destination operand
3715 must be memory unless it is xchg (0x86). */
3716 if (i.prefix[LOCK_PREFIX]
3717 && (!i.tm.opcode_modifier.islockable
3718 || i.mem_operands == 0
3719 || (i.tm.base_opcode != 0x86
3720 && !operand_type_check (i.types[i.operands - 1], anymem))))
3721 {
3722 as_bad (_("expecting lockable instruction after `lock'"));
3723 return;
3724 }
3725
3726 /* Check if HLE prefix is OK. */
3727 if (i.hle_prefix && !check_hle ())
3728 return;
3729
3730 /* Check BND prefix. */
3731 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
3732 as_bad (_("expecting valid branch instruction after `bnd'"));
3733
3734 /* Check NOTRACK prefix. */
3735 if (i.notrack_prefix && !i.tm.opcode_modifier.notrackprefixok)
3736 as_bad (_("expecting indirect branch instruction after `notrack'"));
3737
3738 if (i.tm.cpu_flags.bitfield.cpumpx)
3739 {
3740 if (flag_code == CODE_64BIT && i.prefix[ADDR_PREFIX])
3741 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3742 else if (flag_code != CODE_16BIT
3743 ? i.prefix[ADDR_PREFIX]
3744 : i.mem_operands && !i.prefix[ADDR_PREFIX])
3745 as_bad (_("16-bit address isn't allowed in MPX instructions"));
3746 }
3747
3748 /* Insert BND prefix. */
3749 if (add_bnd_prefix
3750 && i.tm.opcode_modifier.bndprefixok
3751 && !i.prefix[BND_PREFIX])
3752 add_prefix (BND_PREFIX_OPCODE);
3753
3754 /* Check string instruction segment overrides. */
3755 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3756 {
3757 if (!check_string ())
3758 return;
3759 i.disp_operands = 0;
3760 }
3761
3762 if (!process_suffix ())
3763 return;
3764
3765 /* Update operand types. */
3766 for (j = 0; j < i.operands; j++)
3767 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3768
3769 /* Make still unresolved immediate matches conform to size of immediate
3770 given in i.suffix. */
3771 if (!finalize_imm ())
3772 return;
3773
3774 if (i.types[0].bitfield.imm1)
3775 i.imm_operands = 0; /* kludge for shift insns. */
3776
3777 /* We only need to check those implicit registers for instructions
3778 with 3 operands or less. */
3779 if (i.operands <= 3)
3780 for (j = 0; j < i.operands; j++)
3781 if (i.types[j].bitfield.inoutportreg
3782 || i.types[j].bitfield.shiftcount
3783 || i.types[j].bitfield.acc
3784 || i.types[j].bitfield.floatacc)
3785 i.reg_operands--;
3786
3787 /* ImmExt should be processed after SSE2AVX. */
3788 if (!i.tm.opcode_modifier.sse2avx
3789 && i.tm.opcode_modifier.immext)
3790 process_immext ();
3791
3792 /* For insns with operands there are more diddles to do to the opcode. */
3793 if (i.operands)
3794 {
3795 if (!process_operands ())
3796 return;
3797 }
3798 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3799 {
3800 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3801 as_warn (_("translating to `%sp'"), i.tm.name);
3802 }
3803
3804 if (i.tm.opcode_modifier.vex || i.tm.opcode_modifier.evex)
3805 {
3806 if (flag_code == CODE_16BIT)
3807 {
3808 as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
3809 i.tm.name);
3810 return;
3811 }
3812
3813 if (i.tm.opcode_modifier.vex)
3814 build_vex_prefix (t);
3815 else
3816 build_evex_prefix ();
3817 }
3818
3819 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3820 instructions may define INT_OPCODE as well, so avoid this corner
3821 case for those instructions that use MODRM. */
3822 if (i.tm.base_opcode == INT_OPCODE
3823 && !i.tm.opcode_modifier.modrm
3824 && i.op[0].imms->X_add_number == 3)
3825 {
3826 i.tm.base_opcode = INT3_OPCODE;
3827 i.imm_operands = 0;
3828 }
3829
3830 if ((i.tm.opcode_modifier.jump
3831 || i.tm.opcode_modifier.jumpbyte
3832 || i.tm.opcode_modifier.jumpdword)
3833 && i.op[0].disps->X_op == O_constant)
3834 {
3835 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3836 the absolute address given by the constant. Since ix86 jumps and
3837 calls are pc relative, we need to generate a reloc. */
3838 i.op[0].disps->X_add_symbol = &abs_symbol;
3839 i.op[0].disps->X_op = O_symbol;
3840 }
3841
3842 if (i.tm.opcode_modifier.rex64)
3843 i.rex |= REX_W;
3844
3845 /* For 8 bit registers we need an empty rex prefix. Also if the
3846 instruction already has a prefix, we need to convert old
3847 registers to new ones. */
3848
3849 if ((i.types[0].bitfield.reg8
3850 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3851 || (i.types[1].bitfield.reg8
3852 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3853 || ((i.types[0].bitfield.reg8
3854 || i.types[1].bitfield.reg8)
3855 && i.rex != 0))
3856 {
3857 int x;
3858
3859 i.rex |= REX_OPCODE;
3860 for (x = 0; x < 2; x++)
3861 {
3862 /* Look for 8 bit operand that uses old registers. */
3863 if (i.types[x].bitfield.reg8
3864 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3865 {
3866 /* In case it is "hi" register, give up. */
3867 if (i.op[x].regs->reg_num > 3)
3868 as_bad (_("can't encode register '%s%s' in an "
3869 "instruction requiring REX prefix."),
3870 register_prefix, i.op[x].regs->reg_name);
3871
3872 /* Otherwise it is equivalent to the extended register.
3873 Since the encoding doesn't change this is merely
3874 cosmetic cleanup for debug output. */
3875
3876 i.op[x].regs = i.op[x].regs + 8;
3877 }
3878 }
3879 }
3880
3881 if (i.rex != 0)
3882 add_prefix (REX_OPCODE | i.rex);
3883
3884 /* We are ready to output the insn. */
3885 output_insn ();
3886 }
3887
3888 static char *
3889 parse_insn (char *line, char *mnemonic)
3890 {
3891 char *l = line;
3892 char *token_start = l;
3893 char *mnem_p;
3894 int supported;
3895 const insn_template *t;
3896 char *dot_p = NULL;
3897
3898 while (1)
3899 {
3900 mnem_p = mnemonic;
3901 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3902 {
3903 if (*mnem_p == '.')
3904 dot_p = mnem_p;
3905 mnem_p++;
3906 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3907 {
3908 as_bad (_("no such instruction: `%s'"), token_start);
3909 return NULL;
3910 }
3911 l++;
3912 }
3913 if (!is_space_char (*l)
3914 && *l != END_OF_INSN
3915 && (intel_syntax
3916 || (*l != PREFIX_SEPARATOR
3917 && *l != ',')))
3918 {
3919 as_bad (_("invalid character %s in mnemonic"),
3920 output_invalid (*l));
3921 return NULL;
3922 }
3923 if (token_start == l)
3924 {
3925 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3926 as_bad (_("expecting prefix; got nothing"));
3927 else
3928 as_bad (_("expecting mnemonic; got nothing"));
3929 return NULL;
3930 }
3931
3932 /* Look up instruction (or prefix) via hash table. */
3933 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3934
3935 if (*l != END_OF_INSN
3936 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3937 && current_templates
3938 && current_templates->start->opcode_modifier.isprefix)
3939 {
3940 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3941 {
3942 as_bad ((flag_code != CODE_64BIT
3943 ? _("`%s' is only supported in 64-bit mode")
3944 : _("`%s' is not supported in 64-bit mode")),
3945 current_templates->start->name);
3946 return NULL;
3947 }
3948 /* If we are in 16-bit mode, do not allow addr16 or data16.
3949 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3950 if ((current_templates->start->opcode_modifier.size16
3951 || current_templates->start->opcode_modifier.size32)
3952 && flag_code != CODE_64BIT
3953 && (current_templates->start->opcode_modifier.size32
3954 ^ (flag_code == CODE_16BIT)))
3955 {
3956 as_bad (_("redundant %s prefix"),
3957 current_templates->start->name);
3958 return NULL;
3959 }
3960 if (current_templates->start->opcode_length == 0)
3961 {
3962 /* Handle pseudo prefixes. */
3963 switch (current_templates->start->base_opcode)
3964 {
3965 case 0x0:
3966 /* {disp8} */
3967 i.disp_encoding = disp_encoding_8bit;
3968 break;
3969 case 0x1:
3970 /* {disp32} */
3971 i.disp_encoding = disp_encoding_32bit;
3972 break;
3973 case 0x2:
3974 /* {load} */
3975 i.dir_encoding = dir_encoding_load;
3976 break;
3977 case 0x3:
3978 /* {store} */
3979 i.dir_encoding = dir_encoding_store;
3980 break;
3981 case 0x4:
3982 /* {vex2} */
3983 i.vec_encoding = vex_encoding_vex2;
3984 break;
3985 case 0x5:
3986 /* {vex3} */
3987 i.vec_encoding = vex_encoding_vex3;
3988 break;
3989 case 0x6:
3990 /* {evex} */
3991 i.vec_encoding = vex_encoding_evex;
3992 break;
3993 default:
3994 abort ();
3995 }
3996 }
3997 else
3998 {
3999 /* Add prefix, checking for repeated prefixes. */
4000 switch (add_prefix (current_templates->start->base_opcode))
4001 {
4002 case PREFIX_EXIST:
4003 return NULL;
4004 case PREFIX_DS:
4005 if (current_templates->start->cpu_flags.bitfield.cpucet)
4006 i.notrack_prefix = current_templates->start->name;
4007 break;
4008 case PREFIX_REP:
4009 if (current_templates->start->cpu_flags.bitfield.cpuhle)
4010 i.hle_prefix = current_templates->start->name;
4011 else if (current_templates->start->cpu_flags.bitfield.cpumpx)
4012 i.bnd_prefix = current_templates->start->name;
4013 else
4014 i.rep_prefix = current_templates->start->name;
4015 break;
4016 default:
4017 break;
4018 }
4019 }
4020 /* Skip past PREFIX_SEPARATOR and reset token_start. */
4021 token_start = ++l;
4022 }
4023 else
4024 break;
4025 }
4026
4027 if (!current_templates)
4028 {
4029 /* Check if we should swap operand or force 32bit displacement in
4030 encoding. */
4031 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
4032 i.dir_encoding = dir_encoding_store;
4033 else if (mnem_p - 3 == dot_p
4034 && dot_p[1] == 'd'
4035 && dot_p[2] == '8')
4036 i.disp_encoding = disp_encoding_8bit;
4037 else if (mnem_p - 4 == dot_p
4038 && dot_p[1] == 'd'
4039 && dot_p[2] == '3'
4040 && dot_p[3] == '2')
4041 i.disp_encoding = disp_encoding_32bit;
4042 else
4043 goto check_suffix;
4044 mnem_p = dot_p;
4045 *dot_p = '\0';
4046 current_templates = (const templates *) hash_find (op_hash, mnemonic);
4047 }
4048
4049 if (!current_templates)
4050 {
4051 check_suffix:
4052 /* See if we can get a match by trimming off a suffix. */
4053 switch (mnem_p[-1])
4054 {
4055 case WORD_MNEM_SUFFIX:
4056 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
4057 i.suffix = SHORT_MNEM_SUFFIX;
4058 else
4059 /* Fall through. */
4060 case BYTE_MNEM_SUFFIX:
4061 case QWORD_MNEM_SUFFIX:
4062 i.suffix = mnem_p[-1];
4063 mnem_p[-1] = '\0';
4064 current_templates = (const templates *) hash_find (op_hash,
4065 mnemonic);
4066 break;
4067 case SHORT_MNEM_SUFFIX:
4068 case LONG_MNEM_SUFFIX:
4069 if (!intel_syntax)
4070 {
4071 i.suffix = mnem_p[-1];
4072 mnem_p[-1] = '\0';
4073 current_templates = (const templates *) hash_find (op_hash,
4074 mnemonic);
4075 }
4076 break;
4077
4078 /* Intel Syntax. */
4079 case 'd':
4080 if (intel_syntax)
4081 {
4082 if (intel_float_operand (mnemonic) == 1)
4083 i.suffix = SHORT_MNEM_SUFFIX;
4084 else
4085 i.suffix = LONG_MNEM_SUFFIX;
4086 mnem_p[-1] = '\0';
4087 current_templates = (const templates *) hash_find (op_hash,
4088 mnemonic);
4089 }
4090 break;
4091 }
4092 if (!current_templates)
4093 {
4094 as_bad (_("no such instruction: `%s'"), token_start);
4095 return NULL;
4096 }
4097 }
4098
4099 if (current_templates->start->opcode_modifier.jump
4100 || current_templates->start->opcode_modifier.jumpbyte)
4101 {
4102 /* Check for a branch hint. We allow ",pt" and ",pn" for
4103 predict taken and predict not taken respectively.
4104 I'm not sure that branch hints actually do anything on loop
4105 and jcxz insns (JumpByte) for current Pentium4 chips. They
4106 may work in the future and it doesn't hurt to accept them
4107 now. */
4108 if (l[0] == ',' && l[1] == 'p')
4109 {
4110 if (l[2] == 't')
4111 {
4112 if (!add_prefix (DS_PREFIX_OPCODE))
4113 return NULL;
4114 l += 3;
4115 }
4116 else if (l[2] == 'n')
4117 {
4118 if (!add_prefix (CS_PREFIX_OPCODE))
4119 return NULL;
4120 l += 3;
4121 }
4122 }
4123 }
4124 /* Any other comma loses. */
4125 if (*l == ',')
4126 {
4127 as_bad (_("invalid character %s in mnemonic"),
4128 output_invalid (*l));
4129 return NULL;
4130 }
4131
4132 /* Check if instruction is supported on specified architecture. */
4133 supported = 0;
4134 for (t = current_templates->start; t < current_templates->end; ++t)
4135 {
4136 supported |= cpu_flags_match (t);
4137 if (supported == CPU_FLAGS_PERFECT_MATCH)
4138 goto skip;
4139 }
4140
4141 if (!(supported & CPU_FLAGS_64BIT_MATCH))
4142 {
4143 as_bad (flag_code == CODE_64BIT
4144 ? _("`%s' is not supported in 64-bit mode")
4145 : _("`%s' is only supported in 64-bit mode"),
4146 current_templates->start->name);
4147 return NULL;
4148 }
4149 if (supported != CPU_FLAGS_PERFECT_MATCH)
4150 {
4151 as_bad (_("`%s' is not supported on `%s%s'"),
4152 current_templates->start->name,
4153 cpu_arch_name ? cpu_arch_name : default_arch,
4154 cpu_sub_arch_name ? cpu_sub_arch_name : "");
4155 return NULL;
4156 }
4157
4158 skip:
4159 if (!cpu_arch_flags.bitfield.cpui386
4160 && (flag_code != CODE_16BIT))
4161 {
4162 as_warn (_("use .code16 to ensure correct addressing mode"));
4163 }
4164
4165 return l;
4166 }
4167
4168 static char *
4169 parse_operands (char *l, const char *mnemonic)
4170 {
4171 char *token_start;
4172
4173 /* 1 if operand is pending after ','. */
4174 unsigned int expecting_operand = 0;
4175
4176 /* Non-zero if operand parens not balanced. */
4177 unsigned int paren_not_balanced;
4178
4179 while (*l != END_OF_INSN)
4180 {
4181 /* Skip optional white space before operand. */
4182 if (is_space_char (*l))
4183 ++l;
4184 if (!is_operand_char (*l) && *l != END_OF_INSN && *l != '"')
4185 {
4186 as_bad (_("invalid character %s before operand %d"),
4187 output_invalid (*l),
4188 i.operands + 1);
4189 return NULL;
4190 }
4191 token_start = l; /* After white space. */
4192 paren_not_balanced = 0;
4193 while (paren_not_balanced || *l != ',')
4194 {
4195 if (*l == END_OF_INSN)
4196 {
4197 if (paren_not_balanced)
4198 {
4199 if (!intel_syntax)
4200 as_bad (_("unbalanced parenthesis in operand %d."),
4201 i.operands + 1);
4202 else
4203 as_bad (_("unbalanced brackets in operand %d."),
4204 i.operands + 1);
4205 return NULL;
4206 }
4207 else
4208 break; /* we are done */
4209 }
4210 else if (!is_operand_char (*l) && !is_space_char (*l) && *l != '"')
4211 {
4212 as_bad (_("invalid character %s in operand %d"),
4213 output_invalid (*l),
4214 i.operands + 1);
4215 return NULL;
4216 }
4217 if (!intel_syntax)
4218 {
4219 if (*l == '(')
4220 ++paren_not_balanced;
4221 if (*l == ')')
4222 --paren_not_balanced;
4223 }
4224 else
4225 {
4226 if (*l == '[')
4227 ++paren_not_balanced;
4228 if (*l == ']')
4229 --paren_not_balanced;
4230 }
4231 l++;
4232 }
4233 if (l != token_start)
4234 { /* Yes, we've read in another operand. */
4235 unsigned int operand_ok;
4236 this_operand = i.operands++;
4237 if (i.operands > MAX_OPERANDS)
4238 {
4239 as_bad (_("spurious operands; (%d operands/instruction max)"),
4240 MAX_OPERANDS);
4241 return NULL;
4242 }
4243 i.types[this_operand].bitfield.unspecified = 1;
4244 /* Now parse operand adding info to 'i' as we go along. */
4245 END_STRING_AND_SAVE (l);
4246
4247 if (intel_syntax)
4248 operand_ok =
4249 i386_intel_operand (token_start,
4250 intel_float_operand (mnemonic));
4251 else
4252 operand_ok = i386_att_operand (token_start);
4253
4254 RESTORE_END_STRING (l);
4255 if (!operand_ok)
4256 return NULL;
4257 }
4258 else
4259 {
4260 if (expecting_operand)
4261 {
4262 expecting_operand_after_comma:
4263 as_bad (_("expecting operand after ','; got nothing"));
4264 return NULL;
4265 }
4266 if (*l == ',')
4267 {
4268 as_bad (_("expecting operand before ','; got nothing"));
4269 return NULL;
4270 }
4271 }
4272
4273 /* Now *l must be either ',' or END_OF_INSN. */
4274 if (*l == ',')
4275 {
4276 if (*++l == END_OF_INSN)
4277 {
4278 /* Just skip it, if it's \n complain. */
4279 goto expecting_operand_after_comma;
4280 }
4281 expecting_operand = 1;
4282 }
4283 }
4284 return l;
4285 }
4286
4287 static void
4288 swap_2_operands (int xchg1, int xchg2)
4289 {
4290 union i386_op temp_op;
4291 i386_operand_type temp_type;
4292 enum bfd_reloc_code_real temp_reloc;
4293
4294 temp_type = i.types[xchg2];
4295 i.types[xchg2] = i.types[xchg1];
4296 i.types[xchg1] = temp_type;
4297 temp_op = i.op[xchg2];
4298 i.op[xchg2] = i.op[xchg1];
4299 i.op[xchg1] = temp_op;
4300 temp_reloc = i.reloc[xchg2];
4301 i.reloc[xchg2] = i.reloc[xchg1];
4302 i.reloc[xchg1] = temp_reloc;
4303
4304 if (i.mask)
4305 {
4306 if (i.mask->operand == xchg1)
4307 i.mask->operand = xchg2;
4308 else if (i.mask->operand == xchg2)
4309 i.mask->operand = xchg1;
4310 }
4311 if (i.broadcast)
4312 {
4313 if (i.broadcast->operand == xchg1)
4314 i.broadcast->operand = xchg2;
4315 else if (i.broadcast->operand == xchg2)
4316 i.broadcast->operand = xchg1;
4317 }
4318 if (i.rounding)
4319 {
4320 if (i.rounding->operand == xchg1)
4321 i.rounding->operand = xchg2;
4322 else if (i.rounding->operand == xchg2)
4323 i.rounding->operand = xchg1;
4324 }
4325 }
4326
4327 static void
4328 swap_operands (void)
4329 {
4330 switch (i.operands)
4331 {
4332 case 5:
4333 case 4:
4334 swap_2_operands (1, i.operands - 2);
4335 /* Fall through. */
4336 case 3:
4337 case 2:
4338 swap_2_operands (0, i.operands - 1);
4339 break;
4340 default:
4341 abort ();
4342 }
4343
4344 if (i.mem_operands == 2)
4345 {
4346 const seg_entry *temp_seg;
4347 temp_seg = i.seg[0];
4348 i.seg[0] = i.seg[1];
4349 i.seg[1] = temp_seg;
4350 }
4351 }
4352
4353 /* Try to ensure constant immediates are represented in the smallest
4354 opcode possible. */
4355 static void
4356 optimize_imm (void)
4357 {
4358 char guess_suffix = 0;
4359 int op;
4360
4361 if (i.suffix)
4362 guess_suffix = i.suffix;
4363 else if (i.reg_operands)
4364 {
4365 /* Figure out a suffix from the last register operand specified.
4366 We can't do this properly yet, ie. excluding InOutPortReg,
4367 but the following works for instructions with immediates.
4368 In any case, we can't set i.suffix yet. */
4369 for (op = i.operands; --op >= 0;)
4370 if (i.types[op].bitfield.reg8)
4371 {
4372 guess_suffix = BYTE_MNEM_SUFFIX;
4373 break;
4374 }
4375 else if (i.types[op].bitfield.reg16)
4376 {
4377 guess_suffix = WORD_MNEM_SUFFIX;
4378 break;
4379 }
4380 else if (i.types[op].bitfield.reg32)
4381 {
4382 guess_suffix = LONG_MNEM_SUFFIX;
4383 break;
4384 }
4385 else if (i.types[op].bitfield.reg64)
4386 {
4387 guess_suffix = QWORD_MNEM_SUFFIX;
4388 break;
4389 }
4390 }
4391 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4392 guess_suffix = WORD_MNEM_SUFFIX;
4393
4394 for (op = i.operands; --op >= 0;)
4395 if (operand_type_check (i.types[op], imm))
4396 {
4397 switch (i.op[op].imms->X_op)
4398 {
4399 case O_constant:
4400 /* If a suffix is given, this operand may be shortened. */
4401 switch (guess_suffix)
4402 {
4403 case LONG_MNEM_SUFFIX:
4404 i.types[op].bitfield.imm32 = 1;
4405 i.types[op].bitfield.imm64 = 1;
4406 break;
4407 case WORD_MNEM_SUFFIX:
4408 i.types[op].bitfield.imm16 = 1;
4409 i.types[op].bitfield.imm32 = 1;
4410 i.types[op].bitfield.imm32s = 1;
4411 i.types[op].bitfield.imm64 = 1;
4412 break;
4413 case BYTE_MNEM_SUFFIX:
4414 i.types[op].bitfield.imm8 = 1;
4415 i.types[op].bitfield.imm8s = 1;
4416 i.types[op].bitfield.imm16 = 1;
4417 i.types[op].bitfield.imm32 = 1;
4418 i.types[op].bitfield.imm32s = 1;
4419 i.types[op].bitfield.imm64 = 1;
4420 break;
4421 }
4422
4423 /* If this operand is at most 16 bits, convert it
4424 to a signed 16 bit number before trying to see
4425 whether it will fit in an even smaller size.
4426 This allows a 16-bit operand such as $0xffe0 to
4427 be recognised as within Imm8S range. */
4428 if ((i.types[op].bitfield.imm16)
4429 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
4430 {
4431 i.op[op].imms->X_add_number =
4432 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
4433 }
4434 #ifdef BFD64
4435 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
4436 if ((i.types[op].bitfield.imm32)
4437 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
4438 == 0))
4439 {
4440 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
4441 ^ ((offsetT) 1 << 31))
4442 - ((offsetT) 1 << 31));
4443 }
4444 #endif
4445 i.types[op]
4446 = operand_type_or (i.types[op],
4447 smallest_imm_type (i.op[op].imms->X_add_number));
4448
4449 /* We must avoid matching of Imm32 templates when 64bit
4450 only immediate is available. */
4451 if (guess_suffix == QWORD_MNEM_SUFFIX)
4452 i.types[op].bitfield.imm32 = 0;
4453 break;
4454
4455 case O_absent:
4456 case O_register:
4457 abort ();
4458
4459 /* Symbols and expressions. */
4460 default:
4461 /* Convert symbolic operand to proper sizes for matching, but don't
4462 prevent matching a set of insns that only supports sizes other
4463 than those matching the insn suffix. */
4464 {
4465 i386_operand_type mask, allowed;
4466 const insn_template *t;
4467
4468 operand_type_set (&mask, 0);
4469 operand_type_set (&allowed, 0);
4470
4471 for (t = current_templates->start;
4472 t < current_templates->end;
4473 ++t)
4474 allowed = operand_type_or (allowed,
4475 t->operand_types[op]);
4476 switch (guess_suffix)
4477 {
4478 case QWORD_MNEM_SUFFIX:
4479 mask.bitfield.imm64 = 1;
4480 mask.bitfield.imm32s = 1;
4481 break;
4482 case LONG_MNEM_SUFFIX:
4483 mask.bitfield.imm32 = 1;
4484 break;
4485 case WORD_MNEM_SUFFIX:
4486 mask.bitfield.imm16 = 1;
4487 break;
4488 case BYTE_MNEM_SUFFIX:
4489 mask.bitfield.imm8 = 1;
4490 break;
4491 default:
4492 break;
4493 }
4494 allowed = operand_type_and (mask, allowed);
4495 if (!operand_type_all_zero (&allowed))
4496 i.types[op] = operand_type_and (i.types[op], mask);
4497 }
4498 break;
4499 }
4500 }
4501 }
4502
4503 /* Try to use the smallest displacement type too. */
4504 static void
4505 optimize_disp (void)
4506 {
4507 int op;
4508
4509 for (op = i.operands; --op >= 0;)
4510 if (operand_type_check (i.types[op], disp))
4511 {
4512 if (i.op[op].disps->X_op == O_constant)
4513 {
4514 offsetT op_disp = i.op[op].disps->X_add_number;
4515
4516 if (i.types[op].bitfield.disp16
4517 && (op_disp & ~(offsetT) 0xffff) == 0)
4518 {
4519 /* If this operand is at most 16 bits, convert
4520 to a signed 16 bit number and don't use 64bit
4521 displacement. */
4522 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
4523 i.types[op].bitfield.disp64 = 0;
4524 }
4525 #ifdef BFD64
4526 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
4527 if (i.types[op].bitfield.disp32
4528 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
4529 {
4530 /* If this operand is at most 32 bits, convert
4531 to a signed 32 bit number and don't use 64bit
4532 displacement. */
4533 op_disp &= (((offsetT) 2 << 31) - 1);
4534 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
4535 i.types[op].bitfield.disp64 = 0;
4536 }
4537 #endif
4538 if (!op_disp && i.types[op].bitfield.baseindex)
4539 {
4540 i.types[op].bitfield.disp8 = 0;
4541 i.types[op].bitfield.disp16 = 0;
4542 i.types[op].bitfield.disp32 = 0;
4543 i.types[op].bitfield.disp32s = 0;
4544 i.types[op].bitfield.disp64 = 0;
4545 i.op[op].disps = 0;
4546 i.disp_operands--;
4547 }
4548 else if (flag_code == CODE_64BIT)
4549 {
4550 if (fits_in_signed_long (op_disp))
4551 {
4552 i.types[op].bitfield.disp64 = 0;
4553 i.types[op].bitfield.disp32s = 1;
4554 }
4555 if (i.prefix[ADDR_PREFIX]
4556 && fits_in_unsigned_long (op_disp))
4557 i.types[op].bitfield.disp32 = 1;
4558 }
4559 if ((i.types[op].bitfield.disp32
4560 || i.types[op].bitfield.disp32s
4561 || i.types[op].bitfield.disp16)
4562 && fits_in_signed_byte (op_disp))
4563 i.types[op].bitfield.disp8 = 1;
4564 }
4565 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
4566 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
4567 {
4568 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
4569 i.op[op].disps, 0, i.reloc[op]);
4570 i.types[op].bitfield.disp8 = 0;
4571 i.types[op].bitfield.disp16 = 0;
4572 i.types[op].bitfield.disp32 = 0;
4573 i.types[op].bitfield.disp32s = 0;
4574 i.types[op].bitfield.disp64 = 0;
4575 }
4576 else
4577 /* We only support 64bit displacement on constants. */
4578 i.types[op].bitfield.disp64 = 0;
4579 }
4580 }
4581
4582 /* Check if operands are valid for the instruction. */
4583
4584 static int
4585 check_VecOperands (const insn_template *t)
4586 {
4587 unsigned int op;
4588
4589 /* Without VSIB byte, we can't have a vector register for index. */
4590 if (!t->opcode_modifier.vecsib
4591 && i.index_reg
4592 && (i.index_reg->reg_type.bitfield.regxmm
4593 || i.index_reg->reg_type.bitfield.regymm
4594 || i.index_reg->reg_type.bitfield.regzmm))
4595 {
4596 i.error = unsupported_vector_index_register;
4597 return 1;
4598 }
4599
4600 /* Check if default mask is allowed. */
4601 if (t->opcode_modifier.nodefmask
4602 && (!i.mask || i.mask->mask->reg_num == 0))
4603 {
4604 i.error = no_default_mask;
4605 return 1;
4606 }
4607
4608 /* For VSIB byte, we need a vector register for index, and all vector
4609 registers must be distinct. */
4610 if (t->opcode_modifier.vecsib)
4611 {
4612 if (!i.index_reg
4613 || !((t->opcode_modifier.vecsib == VecSIB128
4614 && i.index_reg->reg_type.bitfield.regxmm)
4615 || (t->opcode_modifier.vecsib == VecSIB256
4616 && i.index_reg->reg_type.bitfield.regymm)
4617 || (t->opcode_modifier.vecsib == VecSIB512
4618 && i.index_reg->reg_type.bitfield.regzmm)))
4619 {
4620 i.error = invalid_vsib_address;
4621 return 1;
4622 }
4623
4624 gas_assert (i.reg_operands == 2 || i.mask);
4625 if (i.reg_operands == 2 && !i.mask)
4626 {
4627 gas_assert (i.types[0].bitfield.regxmm
4628 || i.types[0].bitfield.regymm);
4629 gas_assert (i.types[2].bitfield.regxmm
4630 || i.types[2].bitfield.regymm);
4631 if (operand_check == check_none)
4632 return 0;
4633 if (register_number (i.op[0].regs)
4634 != register_number (i.index_reg)
4635 && register_number (i.op[2].regs)
4636 != register_number (i.index_reg)
4637 && register_number (i.op[0].regs)
4638 != register_number (i.op[2].regs))
4639 return 0;
4640 if (operand_check == check_error)
4641 {
4642 i.error = invalid_vector_register_set;
4643 return 1;
4644 }
4645 as_warn (_("mask, index, and destination registers should be distinct"));
4646 }
4647 else if (i.reg_operands == 1 && i.mask)
4648 {
4649 if ((i.types[1].bitfield.regymm
4650 || i.types[1].bitfield.regzmm)
4651 && (register_number (i.op[1].regs)
4652 == register_number (i.index_reg)))
4653 {
4654 if (operand_check == check_error)
4655 {
4656 i.error = invalid_vector_register_set;
4657 return 1;
4658 }
4659 if (operand_check != check_none)
4660 as_warn (_("index and destination registers should be distinct"));
4661 }
4662 }
4663 }
4664
4665 /* Check if broadcast is supported by the instruction and is applied
4666 to the memory operand. */
4667 if (i.broadcast)
4668 {
4669 int broadcasted_opnd_size;
4670
4671 /* Check if specified broadcast is supported in this instruction,
4672 and it's applied to memory operand of DWORD or QWORD type,
4673 depending on VecESize. */
4674 if (i.broadcast->type != t->opcode_modifier.broadcast
4675 || !i.types[i.broadcast->operand].bitfield.mem
4676 || (t->opcode_modifier.vecesize == 0
4677 && !i.types[i.broadcast->operand].bitfield.dword
4678 && !i.types[i.broadcast->operand].bitfield.unspecified)
4679 || (t->opcode_modifier.vecesize == 1
4680 && !i.types[i.broadcast->operand].bitfield.qword
4681 && !i.types[i.broadcast->operand].bitfield.unspecified))
4682 goto bad_broadcast;
4683
4684 broadcasted_opnd_size = t->opcode_modifier.vecesize ? 64 : 32;
4685 if (i.broadcast->type == BROADCAST_1TO16)
4686 broadcasted_opnd_size <<= 4; /* Broadcast 1to16. */
4687 else if (i.broadcast->type == BROADCAST_1TO8)
4688 broadcasted_opnd_size <<= 3; /* Broadcast 1to8. */
4689 else if (i.broadcast->type == BROADCAST_1TO4)
4690 broadcasted_opnd_size <<= 2; /* Broadcast 1to4. */
4691 else if (i.broadcast->type == BROADCAST_1TO2)
4692 broadcasted_opnd_size <<= 1; /* Broadcast 1to2. */
4693 else
4694 goto bad_broadcast;
4695
4696 if ((broadcasted_opnd_size == 256
4697 && !t->operand_types[i.broadcast->operand].bitfield.ymmword)
4698 || (broadcasted_opnd_size == 512
4699 && !t->operand_types[i.broadcast->operand].bitfield.zmmword))
4700 {
4701 bad_broadcast:
4702 i.error = unsupported_broadcast;
4703 return 1;
4704 }
4705 }
4706 /* If broadcast is supported in this instruction, we need to check if
4707 operand of one-element size isn't specified without broadcast. */
4708 else if (t->opcode_modifier.broadcast && i.mem_operands)
4709 {
4710 /* Find memory operand. */
4711 for (op = 0; op < i.operands; op++)
4712 if (operand_type_check (i.types[op], anymem))
4713 break;
4714 gas_assert (op < i.operands);
4715 /* Check size of the memory operand. */
4716 if ((t->opcode_modifier.vecesize == 0
4717 && i.types[op].bitfield.dword)
4718 || (t->opcode_modifier.vecesize == 1
4719 && i.types[op].bitfield.qword))
4720 {
4721 i.error = broadcast_needed;
4722 return 1;
4723 }
4724 }
4725
4726 /* Check if requested masking is supported. */
4727 if (i.mask
4728 && (!t->opcode_modifier.masking
4729 || (i.mask->zeroing
4730 && t->opcode_modifier.masking == MERGING_MASKING)))
4731 {
4732 i.error = unsupported_masking;
4733 return 1;
4734 }
4735
4736 /* Check if masking is applied to dest operand. */
4737 if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
4738 {
4739 i.error = mask_not_on_destination;
4740 return 1;
4741 }
4742
4743 /* Check RC/SAE. */
4744 if (i.rounding)
4745 {
4746 if ((i.rounding->type != saeonly
4747 && !t->opcode_modifier.staticrounding)
4748 || (i.rounding->type == saeonly
4749 && (t->opcode_modifier.staticrounding
4750 || !t->opcode_modifier.sae)))
4751 {
4752 i.error = unsupported_rc_sae;
4753 return 1;
4754 }
4755 /* If the instruction has several immediate operands and one of
4756 them is rounding, the rounding operand should be the last
4757 immediate operand. */
4758 if (i.imm_operands > 1
4759 && i.rounding->operand != (int) (i.imm_operands - 1))
4760 {
4761 i.error = rc_sae_operand_not_last_imm;
4762 return 1;
4763 }
4764 }
4765
4766 /* Check vector Disp8 operand. */
4767 if (t->opcode_modifier.disp8memshift)
4768 {
4769 if (i.broadcast)
4770 i.memshift = t->opcode_modifier.vecesize ? 3 : 2;
4771 else
4772 i.memshift = t->opcode_modifier.disp8memshift;
4773
4774 for (op = 0; op < i.operands; op++)
4775 if (operand_type_check (i.types[op], disp)
4776 && i.op[op].disps->X_op == O_constant)
4777 {
4778 offsetT value = i.op[op].disps->X_add_number;
4779 int vec_disp8_ok
4780 = (i.disp_encoding != disp_encoding_32bit
4781 && fits_in_vec_disp8 (value));
4782 if (t->operand_types [op].bitfield.vec_disp8)
4783 {
4784 if (vec_disp8_ok)
4785 i.types[op].bitfield.vec_disp8 = 1;
4786 else
4787 {
4788 /* Vector insn can only have Vec_Disp8/Disp32 in
4789 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4790 mode. */
4791 i.types[op].bitfield.disp8 = 0;
4792 if (flag_code != CODE_16BIT)
4793 i.types[op].bitfield.disp16 = 0;
4794 }
4795 }
4796 else if (flag_code != CODE_16BIT)
4797 {
4798 /* One form of this instruction supports vector Disp8.
4799 Try vector Disp8 if we need to use Disp32. */
4800 if (vec_disp8_ok && !fits_in_signed_byte (value))
4801 {
4802 i.error = try_vector_disp8;
4803 return 1;
4804 }
4805 }
4806 }
4807 }
4808 else
4809 i.memshift = -1;
4810
4811 return 0;
4812 }
4813
4814 /* Check if operands are valid for the instruction. Update VEX
4815 operand types. */
4816
4817 static int
4818 VEX_check_operands (const insn_template *t)
4819 {
4820 if (i.vec_encoding == vex_encoding_evex)
4821 {
4822 /* This instruction must be encoded with EVEX prefix. */
4823 if (!t->opcode_modifier.evex)
4824 {
4825 i.error = unsupported;
4826 return 1;
4827 }
4828 return 0;
4829 }
4830
4831 if (!t->opcode_modifier.vex)
4832 {
4833 /* This instruction template doesn't have VEX prefix. */
4834 if (i.vec_encoding != vex_encoding_default)
4835 {
4836 i.error = unsupported;
4837 return 1;
4838 }
4839 return 0;
4840 }
4841
4842 /* Only check VEX_Imm4, which must be the first operand. */
4843 if (t->operand_types[0].bitfield.vec_imm4)
4844 {
4845 if (i.op[0].imms->X_op != O_constant
4846 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4847 {
4848 i.error = bad_imm4;
4849 return 1;
4850 }
4851
4852 /* Turn off Imm8 so that update_imm won't complain. */
4853 i.types[0] = vec_imm4;
4854 }
4855
4856 return 0;
4857 }
4858
4859 static const insn_template *
4860 match_template (char mnem_suffix)
4861 {
4862 /* Points to template once we've found it. */
4863 const insn_template *t;
4864 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4865 i386_operand_type overlap4;
4866 unsigned int found_reverse_match;
4867 i386_opcode_modifier suffix_check, mnemsuf_check;
4868 i386_operand_type operand_types [MAX_OPERANDS];
4869 int addr_prefix_disp;
4870 unsigned int j;
4871 unsigned int found_cpu_match;
4872 unsigned int check_register;
4873 enum i386_error specific_error = 0;
4874
4875 #if MAX_OPERANDS != 5
4876 # error "MAX_OPERANDS must be 5."
4877 #endif
4878
4879 found_reverse_match = 0;
4880 addr_prefix_disp = -1;
4881
4882 memset (&suffix_check, 0, sizeof (suffix_check));
4883 if (i.suffix == BYTE_MNEM_SUFFIX)
4884 suffix_check.no_bsuf = 1;
4885 else if (i.suffix == WORD_MNEM_SUFFIX)
4886 suffix_check.no_wsuf = 1;
4887 else if (i.suffix == SHORT_MNEM_SUFFIX)
4888 suffix_check.no_ssuf = 1;
4889 else if (i.suffix == LONG_MNEM_SUFFIX)
4890 suffix_check.no_lsuf = 1;
4891 else if (i.suffix == QWORD_MNEM_SUFFIX)
4892 suffix_check.no_qsuf = 1;
4893 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4894 suffix_check.no_ldsuf = 1;
4895
4896 memset (&mnemsuf_check, 0, sizeof (mnemsuf_check));
4897 if (intel_syntax)
4898 {
4899 switch (mnem_suffix)
4900 {
4901 case BYTE_MNEM_SUFFIX: mnemsuf_check.no_bsuf = 1; break;
4902 case WORD_MNEM_SUFFIX: mnemsuf_check.no_wsuf = 1; break;
4903 case SHORT_MNEM_SUFFIX: mnemsuf_check.no_ssuf = 1; break;
4904 case LONG_MNEM_SUFFIX: mnemsuf_check.no_lsuf = 1; break;
4905 case QWORD_MNEM_SUFFIX: mnemsuf_check.no_qsuf = 1; break;
4906 }
4907 }
4908
4909 /* Must have right number of operands. */
4910 i.error = number_of_operands_mismatch;
4911
4912 for (t = current_templates->start; t < current_templates->end; t++)
4913 {
4914 addr_prefix_disp = -1;
4915
4916 if (i.operands != t->operands)
4917 continue;
4918
4919 /* Check processor support. */
4920 i.error = unsupported;
4921 found_cpu_match = (cpu_flags_match (t)
4922 == CPU_FLAGS_PERFECT_MATCH);
4923 if (!found_cpu_match)
4924 continue;
4925
4926 /* Check old gcc support. */
4927 i.error = old_gcc_only;
4928 if (!old_gcc && t->opcode_modifier.oldgcc)
4929 continue;
4930
4931 /* Check AT&T mnemonic. */
4932 i.error = unsupported_with_intel_mnemonic;
4933 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4934 continue;
4935
4936 /* Check AT&T/Intel syntax and Intel64/AMD64 ISA. */
4937 i.error = unsupported_syntax;
4938 if ((intel_syntax && t->opcode_modifier.attsyntax)
4939 || (!intel_syntax && t->opcode_modifier.intelsyntax)
4940 || (intel64 && t->opcode_modifier.amd64)
4941 || (!intel64 && t->opcode_modifier.intel64))
4942 continue;
4943
4944 /* Check the suffix, except for some instructions in intel mode. */
4945 i.error = invalid_instruction_suffix;
4946 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4947 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4948 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4949 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4950 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4951 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4952 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4953 continue;
4954 /* In Intel mode all mnemonic suffixes must be explicitly allowed. */
4955 if ((t->opcode_modifier.no_bsuf && mnemsuf_check.no_bsuf)
4956 || (t->opcode_modifier.no_wsuf && mnemsuf_check.no_wsuf)
4957 || (t->opcode_modifier.no_lsuf && mnemsuf_check.no_lsuf)
4958 || (t->opcode_modifier.no_ssuf && mnemsuf_check.no_ssuf)
4959 || (t->opcode_modifier.no_qsuf && mnemsuf_check.no_qsuf)
4960 || (t->opcode_modifier.no_ldsuf && mnemsuf_check.no_ldsuf))
4961 continue;
4962
4963 if (!operand_size_match (t))
4964 continue;
4965
4966 for (j = 0; j < MAX_OPERANDS; j++)
4967 operand_types[j] = t->operand_types[j];
4968
4969 /* In general, don't allow 64-bit operands in 32-bit mode. */
4970 if (i.suffix == QWORD_MNEM_SUFFIX
4971 && flag_code != CODE_64BIT
4972 && (intel_syntax
4973 ? (!t->opcode_modifier.ignoresize
4974 && !intel_float_operand (t->name))
4975 : intel_float_operand (t->name) != 2)
4976 && ((!operand_types[0].bitfield.regmmx
4977 && !operand_types[0].bitfield.regxmm
4978 && !operand_types[0].bitfield.regymm
4979 && !operand_types[0].bitfield.regzmm)
4980 || (!operand_types[t->operands > 1].bitfield.regmmx
4981 && operand_types[t->operands > 1].bitfield.regxmm
4982 && operand_types[t->operands > 1].bitfield.regymm
4983 && operand_types[t->operands > 1].bitfield.regzmm))
4984 && (t->base_opcode != 0x0fc7
4985 || t->extension_opcode != 1 /* cmpxchg8b */))
4986 continue;
4987
4988 /* In general, don't allow 32-bit operands on pre-386. */
4989 else if (i.suffix == LONG_MNEM_SUFFIX
4990 && !cpu_arch_flags.bitfield.cpui386
4991 && (intel_syntax
4992 ? (!t->opcode_modifier.ignoresize
4993 && !intel_float_operand (t->name))
4994 : intel_float_operand (t->name) != 2)
4995 && ((!operand_types[0].bitfield.regmmx
4996 && !operand_types[0].bitfield.regxmm)
4997 || (!operand_types[t->operands > 1].bitfield.regmmx
4998 && operand_types[t->operands > 1].bitfield.regxmm)))
4999 continue;
5000
5001 /* Do not verify operands when there are none. */
5002 else
5003 {
5004 if (!t->operands)
5005 /* We've found a match; break out of loop. */
5006 break;
5007 }
5008
5009 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
5010 into Disp32/Disp16/Disp32 operand. */
5011 if (i.prefix[ADDR_PREFIX] != 0)
5012 {
5013 /* There should be only one Disp operand. */
5014 switch (flag_code)
5015 {
5016 case CODE_16BIT:
5017 for (j = 0; j < MAX_OPERANDS; j++)
5018 {
5019 if (operand_types[j].bitfield.disp16)
5020 {
5021 addr_prefix_disp = j;
5022 operand_types[j].bitfield.disp32 = 1;
5023 operand_types[j].bitfield.disp16 = 0;
5024 break;
5025 }
5026 }
5027 break;
5028 case CODE_32BIT:
5029 for (j = 0; j < MAX_OPERANDS; j++)
5030 {
5031 if (operand_types[j].bitfield.disp32)
5032 {
5033 addr_prefix_disp = j;
5034 operand_types[j].bitfield.disp32 = 0;
5035 operand_types[j].bitfield.disp16 = 1;
5036 break;
5037 }
5038 }
5039 break;
5040 case CODE_64BIT:
5041 for (j = 0; j < MAX_OPERANDS; j++)
5042 {
5043 if (operand_types[j].bitfield.disp64)
5044 {
5045 addr_prefix_disp = j;
5046 operand_types[j].bitfield.disp64 = 0;
5047 operand_types[j].bitfield.disp32 = 1;
5048 break;
5049 }
5050 }
5051 break;
5052 }
5053 }
5054
5055 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
5056 if (i.reloc[0] == BFD_RELOC_386_GOT32 && t->base_opcode == 0xa0)
5057 continue;
5058
5059 /* We check register size if needed. */
5060 check_register = t->opcode_modifier.checkregsize;
5061 overlap0 = operand_type_and (i.types[0], operand_types[0]);
5062 switch (t->operands)
5063 {
5064 case 1:
5065 if (!operand_type_match (overlap0, i.types[0]))
5066 continue;
5067 break;
5068 case 2:
5069 /* xchg %eax, %eax is a special case. It is an alias for nop
5070 only in 32bit mode and we can use opcode 0x90. In 64bit
5071 mode, we can't use 0x90 for xchg %eax, %eax since it should
5072 zero-extend %eax to %rax. */
5073 if (flag_code == CODE_64BIT
5074 && t->base_opcode == 0x90
5075 && operand_type_equal (&i.types [0], &acc32)
5076 && operand_type_equal (&i.types [1], &acc32))
5077 continue;
5078 /* If we want store form, we reverse direction of operands. */
5079 if (i.dir_encoding == dir_encoding_store
5080 && t->opcode_modifier.d)
5081 goto check_reverse;
5082 /* Fall through. */
5083
5084 case 3:
5085 /* If we want store form, we skip the current load. */
5086 if (i.dir_encoding == dir_encoding_store
5087 && i.mem_operands == 0
5088 && t->opcode_modifier.load)
5089 continue;
5090 /* Fall through. */
5091 case 4:
5092 case 5:
5093 overlap1 = operand_type_and (i.types[1], operand_types[1]);
5094 if (!operand_type_match (overlap0, i.types[0])
5095 || !operand_type_match (overlap1, i.types[1])
5096 || (check_register
5097 && !operand_type_register_match (overlap0, i.types[0],
5098 operand_types[0],
5099 overlap1, i.types[1],
5100 operand_types[1])))
5101 {
5102 /* Check if other direction is valid ... */
5103 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
5104 continue;
5105
5106 check_reverse:
5107 /* Try reversing direction of operands. */
5108 overlap0 = operand_type_and (i.types[0], operand_types[1]);
5109 overlap1 = operand_type_and (i.types[1], operand_types[0]);
5110 if (!operand_type_match (overlap0, i.types[0])
5111 || !operand_type_match (overlap1, i.types[1])
5112 || (check_register
5113 && !operand_type_register_match (overlap0,
5114 i.types[0],
5115 operand_types[1],
5116 overlap1,
5117 i.types[1],
5118 operand_types[0])))
5119 {
5120 /* Does not match either direction. */
5121 continue;
5122 }
5123 /* found_reverse_match holds which of D or FloatDR
5124 we've found. */
5125 if (t->opcode_modifier.d)
5126 found_reverse_match = Opcode_D;
5127 else if (t->opcode_modifier.floatd)
5128 found_reverse_match = Opcode_FloatD;
5129 else
5130 found_reverse_match = 0;
5131 if (t->opcode_modifier.floatr)
5132 found_reverse_match |= Opcode_FloatR;
5133 }
5134 else
5135 {
5136 /* Found a forward 2 operand match here. */
5137 switch (t->operands)
5138 {
5139 case 5:
5140 overlap4 = operand_type_and (i.types[4],
5141 operand_types[4]);
5142 /* Fall through. */
5143 case 4:
5144 overlap3 = operand_type_and (i.types[3],
5145 operand_types[3]);
5146 /* Fall through. */
5147 case 3:
5148 overlap2 = operand_type_and (i.types[2],
5149 operand_types[2]);
5150 break;
5151 }
5152
5153 switch (t->operands)
5154 {
5155 case 5:
5156 if (!operand_type_match (overlap4, i.types[4])
5157 || !operand_type_register_match (overlap3,
5158 i.types[3],
5159 operand_types[3],
5160 overlap4,
5161 i.types[4],
5162 operand_types[4]))
5163 continue;
5164 /* Fall through. */
5165 case 4:
5166 if (!operand_type_match (overlap3, i.types[3])
5167 || (check_register
5168 && !operand_type_register_match (overlap2,
5169 i.types[2],
5170 operand_types[2],
5171 overlap3,
5172 i.types[3],
5173 operand_types[3])))
5174 continue;
5175 /* Fall through. */
5176 case 3:
5177 /* Here we make use of the fact that there are no
5178 reverse match 3 operand instructions, and all 3
5179 operand instructions only need to be checked for
5180 register consistency between operands 2 and 3. */
5181 if (!operand_type_match (overlap2, i.types[2])
5182 || (check_register
5183 && !operand_type_register_match (overlap1,
5184 i.types[1],
5185 operand_types[1],
5186 overlap2,
5187 i.types[2],
5188 operand_types[2])))
5189 continue;
5190 break;
5191 }
5192 }
5193 /* Found either forward/reverse 2, 3 or 4 operand match here:
5194 slip through to break. */
5195 }
5196 if (!found_cpu_match)
5197 {
5198 found_reverse_match = 0;
5199 continue;
5200 }
5201
5202 /* Check if vector and VEX operands are valid. */
5203 if (check_VecOperands (t) || VEX_check_operands (t))
5204 {
5205 specific_error = i.error;
5206 continue;
5207 }
5208
5209 /* We've found a match; break out of loop. */
5210 break;
5211 }
5212
5213 if (t == current_templates->end)
5214 {
5215 /* We found no match. */
5216 const char *err_msg;
5217 switch (specific_error ? specific_error : i.error)
5218 {
5219 default:
5220 abort ();
5221 case operand_size_mismatch:
5222 err_msg = _("operand size mismatch");
5223 break;
5224 case operand_type_mismatch:
5225 err_msg = _("operand type mismatch");
5226 break;
5227 case register_type_mismatch:
5228 err_msg = _("register type mismatch");
5229 break;
5230 case number_of_operands_mismatch:
5231 err_msg = _("number of operands mismatch");
5232 break;
5233 case invalid_instruction_suffix:
5234 err_msg = _("invalid instruction suffix");
5235 break;
5236 case bad_imm4:
5237 err_msg = _("constant doesn't fit in 4 bits");
5238 break;
5239 case old_gcc_only:
5240 err_msg = _("only supported with old gcc");
5241 break;
5242 case unsupported_with_intel_mnemonic:
5243 err_msg = _("unsupported with Intel mnemonic");
5244 break;
5245 case unsupported_syntax:
5246 err_msg = _("unsupported syntax");
5247 break;
5248 case unsupported:
5249 as_bad (_("unsupported instruction `%s'"),
5250 current_templates->start->name);
5251 return NULL;
5252 case invalid_vsib_address:
5253 err_msg = _("invalid VSIB address");
5254 break;
5255 case invalid_vector_register_set:
5256 err_msg = _("mask, index, and destination registers must be distinct");
5257 break;
5258 case unsupported_vector_index_register:
5259 err_msg = _("unsupported vector index register");
5260 break;
5261 case unsupported_broadcast:
5262 err_msg = _("unsupported broadcast");
5263 break;
5264 case broadcast_not_on_src_operand:
5265 err_msg = _("broadcast not on source memory operand");
5266 break;
5267 case broadcast_needed:
5268 err_msg = _("broadcast is needed for operand of such type");
5269 break;
5270 case unsupported_masking:
5271 err_msg = _("unsupported masking");
5272 break;
5273 case mask_not_on_destination:
5274 err_msg = _("mask not on destination operand");
5275 break;
5276 case no_default_mask:
5277 err_msg = _("default mask isn't allowed");
5278 break;
5279 case unsupported_rc_sae:
5280 err_msg = _("unsupported static rounding/sae");
5281 break;
5282 case rc_sae_operand_not_last_imm:
5283 if (intel_syntax)
5284 err_msg = _("RC/SAE operand must precede immediate operands");
5285 else
5286 err_msg = _("RC/SAE operand must follow immediate operands");
5287 break;
5288 case invalid_register_operand:
5289 err_msg = _("invalid register operand");
5290 break;
5291 }
5292 as_bad (_("%s for `%s'"), err_msg,
5293 current_templates->start->name);
5294 return NULL;
5295 }
5296
5297 if (!quiet_warnings)
5298 {
5299 if (!intel_syntax
5300 && (i.types[0].bitfield.jumpabsolute
5301 != operand_types[0].bitfield.jumpabsolute))
5302 {
5303 as_warn (_("indirect %s without `*'"), t->name);
5304 }
5305
5306 if (t->opcode_modifier.isprefix
5307 && t->opcode_modifier.ignoresize)
5308 {
5309 /* Warn them that a data or address size prefix doesn't
5310 affect assembly of the next line of code. */
5311 as_warn (_("stand-alone `%s' prefix"), t->name);
5312 }
5313 }
5314
5315 /* Copy the template we found. */
5316 i.tm = *t;
5317
5318 if (addr_prefix_disp != -1)
5319 i.tm.operand_types[addr_prefix_disp]
5320 = operand_types[addr_prefix_disp];
5321
5322 if (found_reverse_match)
5323 {
5324 /* If we found a reverse match we must alter the opcode
5325 direction bit. found_reverse_match holds bits to change
5326 (different for int & float insns). */
5327
5328 i.tm.base_opcode ^= found_reverse_match;
5329
5330 i.tm.operand_types[0] = operand_types[1];
5331 i.tm.operand_types[1] = operand_types[0];
5332 }
5333
5334 return t;
5335 }
5336
5337 static int
5338 check_string (void)
5339 {
5340 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
5341 if (i.tm.operand_types[mem_op].bitfield.esseg)
5342 {
5343 if (i.seg[0] != NULL && i.seg[0] != &es)
5344 {
5345 as_bad (_("`%s' operand %d must use `%ses' segment"),
5346 i.tm.name,
5347 mem_op + 1,
5348 register_prefix);
5349 return 0;
5350 }
5351 /* There's only ever one segment override allowed per instruction.
5352 This instruction possibly has a legal segment override on the
5353 second operand, so copy the segment to where non-string
5354 instructions store it, allowing common code. */
5355 i.seg[0] = i.seg[1];
5356 }
5357 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
5358 {
5359 if (i.seg[1] != NULL && i.seg[1] != &es)
5360 {
5361 as_bad (_("`%s' operand %d must use `%ses' segment"),
5362 i.tm.name,
5363 mem_op + 2,
5364 register_prefix);
5365 return 0;
5366 }
5367 }
5368 return 1;
5369 }
5370
5371 static int
5372 process_suffix (void)
5373 {
5374 /* If matched instruction specifies an explicit instruction mnemonic
5375 suffix, use it. */
5376 if (i.tm.opcode_modifier.size16)
5377 i.suffix = WORD_MNEM_SUFFIX;
5378 else if (i.tm.opcode_modifier.size32)
5379 i.suffix = LONG_MNEM_SUFFIX;
5380 else if (i.tm.opcode_modifier.size64)
5381 i.suffix = QWORD_MNEM_SUFFIX;
5382 else if (i.reg_operands)
5383 {
5384 /* If there's no instruction mnemonic suffix we try to invent one
5385 based on register operands. */
5386 if (!i.suffix)
5387 {
5388 /* We take i.suffix from the last register operand specified,
5389 Destination register type is more significant than source
5390 register type. crc32 in SSE4.2 prefers source register
5391 type. */
5392 if (i.tm.base_opcode == 0xf20f38f1)
5393 {
5394 if (i.types[0].bitfield.reg16)
5395 i.suffix = WORD_MNEM_SUFFIX;
5396 else if (i.types[0].bitfield.reg32)
5397 i.suffix = LONG_MNEM_SUFFIX;
5398 else if (i.types[0].bitfield.reg64)
5399 i.suffix = QWORD_MNEM_SUFFIX;
5400 }
5401 else if (i.tm.base_opcode == 0xf20f38f0)
5402 {
5403 if (i.types[0].bitfield.reg8)
5404 i.suffix = BYTE_MNEM_SUFFIX;
5405 }
5406
5407 if (!i.suffix)
5408 {
5409 int op;
5410
5411 if (i.tm.base_opcode == 0xf20f38f1
5412 || i.tm.base_opcode == 0xf20f38f0)
5413 {
5414 /* We have to know the operand size for crc32. */
5415 as_bad (_("ambiguous memory operand size for `%s`"),
5416 i.tm.name);
5417 return 0;
5418 }
5419
5420 for (op = i.operands; --op >= 0;)
5421 if (!i.tm.operand_types[op].bitfield.inoutportreg)
5422 {
5423 if (i.types[op].bitfield.reg8)
5424 {
5425 i.suffix = BYTE_MNEM_SUFFIX;
5426 break;
5427 }
5428 else if (i.types[op].bitfield.reg16)
5429 {
5430 i.suffix = WORD_MNEM_SUFFIX;
5431 break;
5432 }
5433 else if (i.types[op].bitfield.reg32)
5434 {
5435 i.suffix = LONG_MNEM_SUFFIX;
5436 break;
5437 }
5438 else if (i.types[op].bitfield.reg64)
5439 {
5440 i.suffix = QWORD_MNEM_SUFFIX;
5441 break;
5442 }
5443 }
5444 }
5445 }
5446 else if (i.suffix == BYTE_MNEM_SUFFIX)
5447 {
5448 if (intel_syntax
5449 && i.tm.opcode_modifier.ignoresize
5450 && i.tm.opcode_modifier.no_bsuf)
5451 i.suffix = 0;
5452 else if (!check_byte_reg ())
5453 return 0;
5454 }
5455 else if (i.suffix == LONG_MNEM_SUFFIX)
5456 {
5457 if (intel_syntax
5458 && i.tm.opcode_modifier.ignoresize
5459 && i.tm.opcode_modifier.no_lsuf)
5460 i.suffix = 0;
5461 else if (!check_long_reg ())
5462 return 0;
5463 }
5464 else if (i.suffix == QWORD_MNEM_SUFFIX)
5465 {
5466 if (intel_syntax
5467 && i.tm.opcode_modifier.ignoresize
5468 && i.tm.opcode_modifier.no_qsuf)
5469 i.suffix = 0;
5470 else if (!check_qword_reg ())
5471 return 0;
5472 }
5473 else if (i.suffix == WORD_MNEM_SUFFIX)
5474 {
5475 if (intel_syntax
5476 && i.tm.opcode_modifier.ignoresize
5477 && i.tm.opcode_modifier.no_wsuf)
5478 i.suffix = 0;
5479 else if (!check_word_reg ())
5480 return 0;
5481 }
5482 else if (i.suffix == XMMWORD_MNEM_SUFFIX
5483 || i.suffix == YMMWORD_MNEM_SUFFIX
5484 || i.suffix == ZMMWORD_MNEM_SUFFIX)
5485 {
5486 /* Skip if the instruction has x/y/z suffix. match_template
5487 should check if it is a valid suffix. */
5488 }
5489 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
5490 /* Do nothing if the instruction is going to ignore the prefix. */
5491 ;
5492 else
5493 abort ();
5494 }
5495 else if (i.tm.opcode_modifier.defaultsize
5496 && !i.suffix
5497 /* exclude fldenv/frstor/fsave/fstenv */
5498 && i.tm.opcode_modifier.no_ssuf)
5499 {
5500 i.suffix = stackop_size;
5501 }
5502 else if (intel_syntax
5503 && !i.suffix
5504 && (i.tm.operand_types[0].bitfield.jumpabsolute
5505 || i.tm.opcode_modifier.jumpbyte
5506 || i.tm.opcode_modifier.jumpintersegment
5507 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
5508 && i.tm.extension_opcode <= 3)))
5509 {
5510 switch (flag_code)
5511 {
5512 case CODE_64BIT:
5513 if (!i.tm.opcode_modifier.no_qsuf)
5514 {
5515 i.suffix = QWORD_MNEM_SUFFIX;
5516 break;
5517 }
5518 /* Fall through. */
5519 case CODE_32BIT:
5520 if (!i.tm.opcode_modifier.no_lsuf)
5521 i.suffix = LONG_MNEM_SUFFIX;
5522 break;
5523 case CODE_16BIT:
5524 if (!i.tm.opcode_modifier.no_wsuf)
5525 i.suffix = WORD_MNEM_SUFFIX;
5526 break;
5527 }
5528 }
5529
5530 if (!i.suffix)
5531 {
5532 if (!intel_syntax)
5533 {
5534 if (i.tm.opcode_modifier.w)
5535 {
5536 as_bad (_("no instruction mnemonic suffix given and "
5537 "no register operands; can't size instruction"));
5538 return 0;
5539 }
5540 }
5541 else
5542 {
5543 unsigned int suffixes;
5544
5545 suffixes = !i.tm.opcode_modifier.no_bsuf;
5546 if (!i.tm.opcode_modifier.no_wsuf)
5547 suffixes |= 1 << 1;
5548 if (!i.tm.opcode_modifier.no_lsuf)
5549 suffixes |= 1 << 2;
5550 if (!i.tm.opcode_modifier.no_ldsuf)
5551 suffixes |= 1 << 3;
5552 if (!i.tm.opcode_modifier.no_ssuf)
5553 suffixes |= 1 << 4;
5554 if (!i.tm.opcode_modifier.no_qsuf)
5555 suffixes |= 1 << 5;
5556
5557 /* There are more than suffix matches. */
5558 if (i.tm.opcode_modifier.w
5559 || ((suffixes & (suffixes - 1))
5560 && !i.tm.opcode_modifier.defaultsize
5561 && !i.tm.opcode_modifier.ignoresize))
5562 {
5563 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
5564 return 0;
5565 }
5566 }
5567 }
5568
5569 /* Change the opcode based on the operand size given by i.suffix;
5570 We don't need to change things for byte insns. */
5571
5572 if (i.suffix
5573 && i.suffix != BYTE_MNEM_SUFFIX
5574 && i.suffix != XMMWORD_MNEM_SUFFIX
5575 && i.suffix != YMMWORD_MNEM_SUFFIX
5576 && i.suffix != ZMMWORD_MNEM_SUFFIX)
5577 {
5578 /* It's not a byte, select word/dword operation. */
5579 if (i.tm.opcode_modifier.w)
5580 {
5581 if (i.tm.opcode_modifier.shortform)
5582 i.tm.base_opcode |= 8;
5583 else
5584 i.tm.base_opcode |= 1;
5585 }
5586
5587 /* Now select between word & dword operations via the operand
5588 size prefix, except for instructions that will ignore this
5589 prefix anyway. */
5590 if (i.tm.opcode_modifier.addrprefixop0)
5591 {
5592 /* The address size override prefix changes the size of the
5593 first operand. */
5594 if ((flag_code == CODE_32BIT
5595 && i.op->regs[0].reg_type.bitfield.reg16)
5596 || (flag_code != CODE_32BIT
5597 && i.op->regs[0].reg_type.bitfield.reg32))
5598 if (!add_prefix (ADDR_PREFIX_OPCODE))
5599 return 0;
5600 }
5601 else if (i.suffix != QWORD_MNEM_SUFFIX
5602 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
5603 && !i.tm.opcode_modifier.ignoresize
5604 && !i.tm.opcode_modifier.floatmf
5605 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
5606 || (flag_code == CODE_64BIT
5607 && i.tm.opcode_modifier.jumpbyte)))
5608 {
5609 unsigned int prefix = DATA_PREFIX_OPCODE;
5610
5611 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
5612 prefix = ADDR_PREFIX_OPCODE;
5613
5614 if (!add_prefix (prefix))
5615 return 0;
5616 }
5617
5618 /* Set mode64 for an operand. */
5619 if (i.suffix == QWORD_MNEM_SUFFIX
5620 && flag_code == CODE_64BIT
5621 && !i.tm.opcode_modifier.norex64)
5622 {
5623 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5624 need rex64. cmpxchg8b is also a special case. */
5625 if (! (i.operands == 2
5626 && i.tm.base_opcode == 0x90
5627 && i.tm.extension_opcode == None
5628 && operand_type_equal (&i.types [0], &acc64)
5629 && operand_type_equal (&i.types [1], &acc64))
5630 && ! (i.operands == 1
5631 && i.tm.base_opcode == 0xfc7
5632 && i.tm.extension_opcode == 1
5633 && !operand_type_check (i.types [0], reg)
5634 && operand_type_check (i.types [0], anymem)))
5635 i.rex |= REX_W;
5636 }
5637
5638 /* Size floating point instruction. */
5639 if (i.suffix == LONG_MNEM_SUFFIX)
5640 if (i.tm.opcode_modifier.floatmf)
5641 i.tm.base_opcode ^= 4;
5642 }
5643
5644 return 1;
5645 }
5646
5647 static int
5648 check_byte_reg (void)
5649 {
5650 int op;
5651
5652 for (op = i.operands; --op >= 0;)
5653 {
5654 /* If this is an eight bit register, it's OK. If it's the 16 or
5655 32 bit version of an eight bit register, we will just use the
5656 low portion, and that's OK too. */
5657 if (i.types[op].bitfield.reg8)
5658 continue;
5659
5660 /* I/O port address operands are OK too. */
5661 if (i.tm.operand_types[op].bitfield.inoutportreg)
5662 continue;
5663
5664 /* crc32 doesn't generate this warning. */
5665 if (i.tm.base_opcode == 0xf20f38f0)
5666 continue;
5667
5668 if ((i.types[op].bitfield.reg16
5669 || i.types[op].bitfield.reg32
5670 || i.types[op].bitfield.reg64)
5671 && i.op[op].regs->reg_num < 4
5672 /* Prohibit these changes in 64bit mode, since the lowering
5673 would be more complicated. */
5674 && flag_code != CODE_64BIT)
5675 {
5676 #if REGISTER_WARNINGS
5677 if (!quiet_warnings)
5678 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5679 register_prefix,
5680 (i.op[op].regs + (i.types[op].bitfield.reg16
5681 ? REGNAM_AL - REGNAM_AX
5682 : REGNAM_AL - REGNAM_EAX))->reg_name,
5683 register_prefix,
5684 i.op[op].regs->reg_name,
5685 i.suffix);
5686 #endif
5687 continue;
5688 }
5689 /* Any other register is bad. */
5690 if (i.types[op].bitfield.reg16
5691 || i.types[op].bitfield.reg32
5692 || i.types[op].bitfield.reg64
5693 || i.types[op].bitfield.regmmx
5694 || i.types[op].bitfield.regxmm
5695 || i.types[op].bitfield.regymm
5696 || i.types[op].bitfield.regzmm
5697 || i.types[op].bitfield.sreg2
5698 || i.types[op].bitfield.sreg3
5699 || i.types[op].bitfield.control
5700 || i.types[op].bitfield.debug
5701 || i.types[op].bitfield.test
5702 || i.types[op].bitfield.floatreg
5703 || i.types[op].bitfield.floatacc)
5704 {
5705 as_bad (_("`%s%s' not allowed with `%s%c'"),
5706 register_prefix,
5707 i.op[op].regs->reg_name,
5708 i.tm.name,
5709 i.suffix);
5710 return 0;
5711 }
5712 }
5713 return 1;
5714 }
5715
5716 static int
5717 check_long_reg (void)
5718 {
5719 int op;
5720
5721 for (op = i.operands; --op >= 0;)
5722 /* Reject eight bit registers, except where the template requires
5723 them. (eg. movzb) */
5724 if (i.types[op].bitfield.reg8
5725 && (i.tm.operand_types[op].bitfield.reg16
5726 || i.tm.operand_types[op].bitfield.reg32
5727 || i.tm.operand_types[op].bitfield.acc))
5728 {
5729 as_bad (_("`%s%s' not allowed with `%s%c'"),
5730 register_prefix,
5731 i.op[op].regs->reg_name,
5732 i.tm.name,
5733 i.suffix);
5734 return 0;
5735 }
5736 /* Warn if the e prefix on a general reg is missing. */
5737 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5738 && i.types[op].bitfield.reg16
5739 && (i.tm.operand_types[op].bitfield.reg32
5740 || i.tm.operand_types[op].bitfield.acc))
5741 {
5742 /* Prohibit these changes in the 64bit mode, since the
5743 lowering is more complicated. */
5744 if (flag_code == CODE_64BIT)
5745 {
5746 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5747 register_prefix, i.op[op].regs->reg_name,
5748 i.suffix);
5749 return 0;
5750 }
5751 #if REGISTER_WARNINGS
5752 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5753 register_prefix,
5754 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
5755 register_prefix, i.op[op].regs->reg_name, i.suffix);
5756 #endif
5757 }
5758 /* Warn if the r prefix on a general reg is present. */
5759 else if (i.types[op].bitfield.reg64
5760 && (i.tm.operand_types[op].bitfield.reg32
5761 || i.tm.operand_types[op].bitfield.acc))
5762 {
5763 if (intel_syntax
5764 && i.tm.opcode_modifier.toqword
5765 && !i.types[0].bitfield.regxmm)
5766 {
5767 /* Convert to QWORD. We want REX byte. */
5768 i.suffix = QWORD_MNEM_SUFFIX;
5769 }
5770 else
5771 {
5772 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5773 register_prefix, i.op[op].regs->reg_name,
5774 i.suffix);
5775 return 0;
5776 }
5777 }
5778 return 1;
5779 }
5780
5781 static int
5782 check_qword_reg (void)
5783 {
5784 int op;
5785
5786 for (op = i.operands; --op >= 0; )
5787 /* Reject eight bit registers, except where the template requires
5788 them. (eg. movzb) */
5789 if (i.types[op].bitfield.reg8
5790 && (i.tm.operand_types[op].bitfield.reg16
5791 || i.tm.operand_types[op].bitfield.reg32
5792 || i.tm.operand_types[op].bitfield.acc))
5793 {
5794 as_bad (_("`%s%s' not allowed with `%s%c'"),
5795 register_prefix,
5796 i.op[op].regs->reg_name,
5797 i.tm.name,
5798 i.suffix);
5799 return 0;
5800 }
5801 /* Warn if the r prefix on a general reg is missing. */
5802 else if ((i.types[op].bitfield.reg16
5803 || i.types[op].bitfield.reg32)
5804 && (i.tm.operand_types[op].bitfield.reg64
5805 || i.tm.operand_types[op].bitfield.acc))
5806 {
5807 /* Prohibit these changes in the 64bit mode, since the
5808 lowering is more complicated. */
5809 if (intel_syntax
5810 && i.tm.opcode_modifier.todword
5811 && !i.types[0].bitfield.regxmm)
5812 {
5813 /* Convert to DWORD. We don't want REX byte. */
5814 i.suffix = LONG_MNEM_SUFFIX;
5815 }
5816 else
5817 {
5818 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5819 register_prefix, i.op[op].regs->reg_name,
5820 i.suffix);
5821 return 0;
5822 }
5823 }
5824 return 1;
5825 }
5826
5827 static int
5828 check_word_reg (void)
5829 {
5830 int op;
5831 for (op = i.operands; --op >= 0;)
5832 /* Reject eight bit registers, except where the template requires
5833 them. (eg. movzb) */
5834 if (i.types[op].bitfield.reg8
5835 && (i.tm.operand_types[op].bitfield.reg16
5836 || i.tm.operand_types[op].bitfield.reg32
5837 || i.tm.operand_types[op].bitfield.acc))
5838 {
5839 as_bad (_("`%s%s' not allowed with `%s%c'"),
5840 register_prefix,
5841 i.op[op].regs->reg_name,
5842 i.tm.name,
5843 i.suffix);
5844 return 0;
5845 }
5846 /* Warn if the e or r prefix on a general reg is present. */
5847 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5848 && (i.types[op].bitfield.reg32
5849 || i.types[op].bitfield.reg64)
5850 && (i.tm.operand_types[op].bitfield.reg16
5851 || i.tm.operand_types[op].bitfield.acc))
5852 {
5853 /* Prohibit these changes in the 64bit mode, since the
5854 lowering is more complicated. */
5855 if (flag_code == CODE_64BIT)
5856 {
5857 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5858 register_prefix, i.op[op].regs->reg_name,
5859 i.suffix);
5860 return 0;
5861 }
5862 #if REGISTER_WARNINGS
5863 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5864 register_prefix,
5865 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5866 register_prefix, i.op[op].regs->reg_name, i.suffix);
5867 #endif
5868 }
5869 return 1;
5870 }
5871
5872 static int
5873 update_imm (unsigned int j)
5874 {
5875 i386_operand_type overlap = i.types[j];
5876 if ((overlap.bitfield.imm8
5877 || overlap.bitfield.imm8s
5878 || overlap.bitfield.imm16
5879 || overlap.bitfield.imm32
5880 || overlap.bitfield.imm32s
5881 || overlap.bitfield.imm64)
5882 && !operand_type_equal (&overlap, &imm8)
5883 && !operand_type_equal (&overlap, &imm8s)
5884 && !operand_type_equal (&overlap, &imm16)
5885 && !operand_type_equal (&overlap, &imm32)
5886 && !operand_type_equal (&overlap, &imm32s)
5887 && !operand_type_equal (&overlap, &imm64))
5888 {
5889 if (i.suffix)
5890 {
5891 i386_operand_type temp;
5892
5893 operand_type_set (&temp, 0);
5894 if (i.suffix == BYTE_MNEM_SUFFIX)
5895 {
5896 temp.bitfield.imm8 = overlap.bitfield.imm8;
5897 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5898 }
5899 else if (i.suffix == WORD_MNEM_SUFFIX)
5900 temp.bitfield.imm16 = overlap.bitfield.imm16;
5901 else if (i.suffix == QWORD_MNEM_SUFFIX)
5902 {
5903 temp.bitfield.imm64 = overlap.bitfield.imm64;
5904 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5905 }
5906 else
5907 temp.bitfield.imm32 = overlap.bitfield.imm32;
5908 overlap = temp;
5909 }
5910 else if (operand_type_equal (&overlap, &imm16_32_32s)
5911 || operand_type_equal (&overlap, &imm16_32)
5912 || operand_type_equal (&overlap, &imm16_32s))
5913 {
5914 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5915 overlap = imm16;
5916 else
5917 overlap = imm32s;
5918 }
5919 if (!operand_type_equal (&overlap, &imm8)
5920 && !operand_type_equal (&overlap, &imm8s)
5921 && !operand_type_equal (&overlap, &imm16)
5922 && !operand_type_equal (&overlap, &imm32)
5923 && !operand_type_equal (&overlap, &imm32s)
5924 && !operand_type_equal (&overlap, &imm64))
5925 {
5926 as_bad (_("no instruction mnemonic suffix given; "
5927 "can't determine immediate size"));
5928 return 0;
5929 }
5930 }
5931 i.types[j] = overlap;
5932
5933 return 1;
5934 }
5935
5936 static int
5937 finalize_imm (void)
5938 {
5939 unsigned int j, n;
5940
5941 /* Update the first 2 immediate operands. */
5942 n = i.operands > 2 ? 2 : i.operands;
5943 if (n)
5944 {
5945 for (j = 0; j < n; j++)
5946 if (update_imm (j) == 0)
5947 return 0;
5948
5949 /* The 3rd operand can't be immediate operand. */
5950 gas_assert (operand_type_check (i.types[2], imm) == 0);
5951 }
5952
5953 return 1;
5954 }
5955
5956 static int
5957 bad_implicit_operand (int xmm)
5958 {
5959 const char *ireg = xmm ? "xmm0" : "ymm0";
5960
5961 if (intel_syntax)
5962 as_bad (_("the last operand of `%s' must be `%s%s'"),
5963 i.tm.name, register_prefix, ireg);
5964 else
5965 as_bad (_("the first operand of `%s' must be `%s%s'"),
5966 i.tm.name, register_prefix, ireg);
5967 return 0;
5968 }
5969
5970 static int
5971 process_operands (void)
5972 {
5973 /* Default segment register this instruction will use for memory
5974 accesses. 0 means unknown. This is only for optimizing out
5975 unnecessary segment overrides. */
5976 const seg_entry *default_seg = 0;
5977
5978 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5979 {
5980 unsigned int dupl = i.operands;
5981 unsigned int dest = dupl - 1;
5982 unsigned int j;
5983
5984 /* The destination must be an xmm register. */
5985 gas_assert (i.reg_operands
5986 && MAX_OPERANDS > dupl
5987 && operand_type_equal (&i.types[dest], &regxmm));
5988
5989 if (i.tm.opcode_modifier.firstxmm0)
5990 {
5991 /* The first operand is implicit and must be xmm0. */
5992 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5993 if (register_number (i.op[0].regs) != 0)
5994 return bad_implicit_operand (1);
5995
5996 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5997 {
5998 /* Keep xmm0 for instructions with VEX prefix and 3
5999 sources. */
6000 goto duplicate;
6001 }
6002 else
6003 {
6004 /* We remove the first xmm0 and keep the number of
6005 operands unchanged, which in fact duplicates the
6006 destination. */
6007 for (j = 1; j < i.operands; j++)
6008 {
6009 i.op[j - 1] = i.op[j];
6010 i.types[j - 1] = i.types[j];
6011 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
6012 }
6013 }
6014 }
6015 else if (i.tm.opcode_modifier.implicit1stxmm0)
6016 {
6017 gas_assert ((MAX_OPERANDS - 1) > dupl
6018 && (i.tm.opcode_modifier.vexsources
6019 == VEX3SOURCES));
6020
6021 /* Add the implicit xmm0 for instructions with VEX prefix
6022 and 3 sources. */
6023 for (j = i.operands; j > 0; j--)
6024 {
6025 i.op[j] = i.op[j - 1];
6026 i.types[j] = i.types[j - 1];
6027 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
6028 }
6029 i.op[0].regs
6030 = (const reg_entry *) hash_find (reg_hash, "xmm0");
6031 i.types[0] = regxmm;
6032 i.tm.operand_types[0] = regxmm;
6033
6034 i.operands += 2;
6035 i.reg_operands += 2;
6036 i.tm.operands += 2;
6037
6038 dupl++;
6039 dest++;
6040 i.op[dupl] = i.op[dest];
6041 i.types[dupl] = i.types[dest];
6042 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
6043 }
6044 else
6045 {
6046 duplicate:
6047 i.operands++;
6048 i.reg_operands++;
6049 i.tm.operands++;
6050
6051 i.op[dupl] = i.op[dest];
6052 i.types[dupl] = i.types[dest];
6053 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
6054 }
6055
6056 if (i.tm.opcode_modifier.immext)
6057 process_immext ();
6058 }
6059 else if (i.tm.opcode_modifier.firstxmm0)
6060 {
6061 unsigned int j;
6062
6063 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
6064 gas_assert (i.reg_operands
6065 && (operand_type_equal (&i.types[0], &regxmm)
6066 || operand_type_equal (&i.types[0], &regymm)
6067 || operand_type_equal (&i.types[0], &regzmm)));
6068 if (register_number (i.op[0].regs) != 0)
6069 return bad_implicit_operand (i.types[0].bitfield.regxmm);
6070
6071 for (j = 1; j < i.operands; j++)
6072 {
6073 i.op[j - 1] = i.op[j];
6074 i.types[j - 1] = i.types[j];
6075
6076 /* We need to adjust fields in i.tm since they are used by
6077 build_modrm_byte. */
6078 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
6079 }
6080
6081 i.operands--;
6082 i.reg_operands--;
6083 i.tm.operands--;
6084 }
6085 else if (i.tm.opcode_modifier.implicitquadgroup)
6086 {
6087 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
6088 gas_assert (i.operands >= 2
6089 && (operand_type_equal (&i.types[1], &regxmm)
6090 || operand_type_equal (&i.types[1], &regymm)
6091 || operand_type_equal (&i.types[1], &regzmm)));
6092 unsigned int regnum = register_number (i.op[1].regs);
6093 unsigned int first_reg_in_group = regnum & ~3;
6094 unsigned int last_reg_in_group = first_reg_in_group + 3;
6095 if (regnum != first_reg_in_group) {
6096 as_warn (_("the second source register `%s%s' implicitly denotes"
6097 " `%s%.3s%d' to `%s%.3s%d' source group in `%s'"),
6098 register_prefix, i.op[1].regs->reg_name,
6099 register_prefix, i.op[1].regs->reg_name, first_reg_in_group,
6100 register_prefix, i.op[1].regs->reg_name, last_reg_in_group,
6101 i.tm.name);
6102 }
6103 }
6104 else if (i.tm.opcode_modifier.regkludge)
6105 {
6106 /* The imul $imm, %reg instruction is converted into
6107 imul $imm, %reg, %reg, and the clr %reg instruction
6108 is converted into xor %reg, %reg. */
6109
6110 unsigned int first_reg_op;
6111
6112 if (operand_type_check (i.types[0], reg))
6113 first_reg_op = 0;
6114 else
6115 first_reg_op = 1;
6116 /* Pretend we saw the extra register operand. */
6117 gas_assert (i.reg_operands == 1
6118 && i.op[first_reg_op + 1].regs == 0);
6119 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
6120 i.types[first_reg_op + 1] = i.types[first_reg_op];
6121 i.operands++;
6122 i.reg_operands++;
6123 }
6124
6125 if (i.tm.opcode_modifier.shortform)
6126 {
6127 if (i.types[0].bitfield.sreg2
6128 || i.types[0].bitfield.sreg3)
6129 {
6130 if (i.tm.base_opcode == POP_SEG_SHORT
6131 && i.op[0].regs->reg_num == 1)
6132 {
6133 as_bad (_("you can't `pop %scs'"), register_prefix);
6134 return 0;
6135 }
6136 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
6137 if ((i.op[0].regs->reg_flags & RegRex) != 0)
6138 i.rex |= REX_B;
6139 }
6140 else
6141 {
6142 /* The register or float register operand is in operand
6143 0 or 1. */
6144 unsigned int op;
6145
6146 if (i.types[0].bitfield.floatreg
6147 || operand_type_check (i.types[0], reg))
6148 op = 0;
6149 else
6150 op = 1;
6151 /* Register goes in low 3 bits of opcode. */
6152 i.tm.base_opcode |= i.op[op].regs->reg_num;
6153 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6154 i.rex |= REX_B;
6155 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
6156 {
6157 /* Warn about some common errors, but press on regardless.
6158 The first case can be generated by gcc (<= 2.8.1). */
6159 if (i.operands == 2)
6160 {
6161 /* Reversed arguments on faddp, fsubp, etc. */
6162 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
6163 register_prefix, i.op[!intel_syntax].regs->reg_name,
6164 register_prefix, i.op[intel_syntax].regs->reg_name);
6165 }
6166 else
6167 {
6168 /* Extraneous `l' suffix on fp insn. */
6169 as_warn (_("translating to `%s %s%s'"), i.tm.name,
6170 register_prefix, i.op[0].regs->reg_name);
6171 }
6172 }
6173 }
6174 }
6175 else if (i.tm.opcode_modifier.modrm)
6176 {
6177 /* The opcode is completed (modulo i.tm.extension_opcode which
6178 must be put into the modrm byte). Now, we make the modrm and
6179 index base bytes based on all the info we've collected. */
6180
6181 default_seg = build_modrm_byte ();
6182 }
6183 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
6184 {
6185 default_seg = &ds;
6186 }
6187 else if (i.tm.opcode_modifier.isstring)
6188 {
6189 /* For the string instructions that allow a segment override
6190 on one of their operands, the default segment is ds. */
6191 default_seg = &ds;
6192 }
6193
6194 if (i.tm.base_opcode == 0x8d /* lea */
6195 && i.seg[0]
6196 && !quiet_warnings)
6197 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
6198
6199 /* If a segment was explicitly specified, and the specified segment
6200 is not the default, use an opcode prefix to select it. If we
6201 never figured out what the default segment is, then default_seg
6202 will be zero at this point, and the specified segment prefix will
6203 always be used. */
6204 if ((i.seg[0]) && (i.seg[0] != default_seg))
6205 {
6206 if (!add_prefix (i.seg[0]->seg_prefix))
6207 return 0;
6208 }
6209 return 1;
6210 }
6211
6212 static const seg_entry *
6213 build_modrm_byte (void)
6214 {
6215 const seg_entry *default_seg = 0;
6216 unsigned int source, dest;
6217 int vex_3_sources;
6218
6219 /* The first operand of instructions with VEX prefix and 3 sources
6220 must be VEX_Imm4. */
6221 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
6222 if (vex_3_sources)
6223 {
6224 unsigned int nds, reg_slot;
6225 expressionS *exp;
6226
6227 if (i.tm.opcode_modifier.veximmext
6228 && i.tm.opcode_modifier.immext)
6229 {
6230 dest = i.operands - 2;
6231 gas_assert (dest == 3);
6232 }
6233 else
6234 dest = i.operands - 1;
6235 nds = dest - 1;
6236
6237 /* There are 2 kinds of instructions:
6238 1. 5 operands: 4 register operands or 3 register operands
6239 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
6240 VexW0 or VexW1. The destination must be either XMM, YMM or
6241 ZMM register.
6242 2. 4 operands: 4 register operands or 3 register operands
6243 plus 1 memory operand, VexXDS, and VexImmExt */
6244 gas_assert ((i.reg_operands == 4
6245 || (i.reg_operands == 3 && i.mem_operands == 1))
6246 && i.tm.opcode_modifier.vexvvvv == VEXXDS
6247 && (i.tm.opcode_modifier.veximmext
6248 || (i.imm_operands == 1
6249 && i.types[0].bitfield.vec_imm4
6250 && (i.tm.opcode_modifier.vexw == VEXW0
6251 || i.tm.opcode_modifier.vexw == VEXW1)
6252 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
6253 || operand_type_equal (&i.tm.operand_types[dest], &regymm)
6254 || operand_type_equal (&i.tm.operand_types[dest], &regzmm)))));
6255
6256 if (i.imm_operands == 0)
6257 {
6258 /* When there is no immediate operand, generate an 8bit
6259 immediate operand to encode the first operand. */
6260 exp = &im_expressions[i.imm_operands++];
6261 i.op[i.operands].imms = exp;
6262 i.types[i.operands] = imm8;
6263 i.operands++;
6264 /* If VexW1 is set, the first operand is the source and
6265 the second operand is encoded in the immediate operand. */
6266 if (i.tm.opcode_modifier.vexw == VEXW1)
6267 {
6268 source = 0;
6269 reg_slot = 1;
6270 }
6271 else
6272 {
6273 source = 1;
6274 reg_slot = 0;
6275 }
6276
6277 /* FMA swaps REG and NDS. */
6278 if (i.tm.cpu_flags.bitfield.cpufma)
6279 {
6280 unsigned int tmp;
6281 tmp = reg_slot;
6282 reg_slot = nds;
6283 nds = tmp;
6284 }
6285
6286 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
6287 &regxmm)
6288 || operand_type_equal (&i.tm.operand_types[reg_slot],
6289 &regymm)
6290 || operand_type_equal (&i.tm.operand_types[reg_slot],
6291 &regzmm));
6292 exp->X_op = O_constant;
6293 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
6294 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6295 }
6296 else
6297 {
6298 unsigned int imm_slot;
6299
6300 if (i.tm.opcode_modifier.vexw == VEXW0)
6301 {
6302 /* If VexW0 is set, the third operand is the source and
6303 the second operand is encoded in the immediate
6304 operand. */
6305 source = 2;
6306 reg_slot = 1;
6307 }
6308 else
6309 {
6310 /* VexW1 is set, the second operand is the source and
6311 the third operand is encoded in the immediate
6312 operand. */
6313 source = 1;
6314 reg_slot = 2;
6315 }
6316
6317 if (i.tm.opcode_modifier.immext)
6318 {
6319 /* When ImmExt is set, the immediate byte is the last
6320 operand. */
6321 imm_slot = i.operands - 1;
6322 source--;
6323 reg_slot--;
6324 }
6325 else
6326 {
6327 imm_slot = 0;
6328
6329 /* Turn on Imm8 so that output_imm will generate it. */
6330 i.types[imm_slot].bitfield.imm8 = 1;
6331 }
6332
6333 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
6334 &regxmm)
6335 || operand_type_equal (&i.tm.operand_types[reg_slot],
6336 &regymm)
6337 || operand_type_equal (&i.tm.operand_types[reg_slot],
6338 &regzmm));
6339 i.op[imm_slot].imms->X_add_number
6340 |= register_number (i.op[reg_slot].regs) << 4;
6341 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6342 }
6343
6344 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
6345 || operand_type_equal (&i.tm.operand_types[nds],
6346 &regymm)
6347 || operand_type_equal (&i.tm.operand_types[nds],
6348 &regzmm));
6349 i.vex.register_specifier = i.op[nds].regs;
6350 }
6351 else
6352 source = dest = 0;
6353
6354 /* i.reg_operands MUST be the number of real register operands;
6355 implicit registers do not count. If there are 3 register
6356 operands, it must be a instruction with VexNDS. For a
6357 instruction with VexNDD, the destination register is encoded
6358 in VEX prefix. If there are 4 register operands, it must be
6359 a instruction with VEX prefix and 3 sources. */
6360 if (i.mem_operands == 0
6361 && ((i.reg_operands == 2
6362 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
6363 || (i.reg_operands == 3
6364 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
6365 || (i.reg_operands == 4 && vex_3_sources)))
6366 {
6367 switch (i.operands)
6368 {
6369 case 2:
6370 source = 0;
6371 break;
6372 case 3:
6373 /* When there are 3 operands, one of them may be immediate,
6374 which may be the first or the last operand. Otherwise,
6375 the first operand must be shift count register (cl) or it
6376 is an instruction with VexNDS. */
6377 gas_assert (i.imm_operands == 1
6378 || (i.imm_operands == 0
6379 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
6380 || i.types[0].bitfield.shiftcount)));
6381 if (operand_type_check (i.types[0], imm)
6382 || i.types[0].bitfield.shiftcount)
6383 source = 1;
6384 else
6385 source = 0;
6386 break;
6387 case 4:
6388 /* When there are 4 operands, the first two must be 8bit
6389 immediate operands. The source operand will be the 3rd
6390 one.
6391
6392 For instructions with VexNDS, if the first operand
6393 an imm8, the source operand is the 2nd one. If the last
6394 operand is imm8, the source operand is the first one. */
6395 gas_assert ((i.imm_operands == 2
6396 && i.types[0].bitfield.imm8
6397 && i.types[1].bitfield.imm8)
6398 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
6399 && i.imm_operands == 1
6400 && (i.types[0].bitfield.imm8
6401 || i.types[i.operands - 1].bitfield.imm8
6402 || i.rounding)));
6403 if (i.imm_operands == 2)
6404 source = 2;
6405 else
6406 {
6407 if (i.types[0].bitfield.imm8)
6408 source = 1;
6409 else
6410 source = 0;
6411 }
6412 break;
6413 case 5:
6414 if (i.tm.opcode_modifier.evex)
6415 {
6416 /* For EVEX instructions, when there are 5 operands, the
6417 first one must be immediate operand. If the second one
6418 is immediate operand, the source operand is the 3th
6419 one. If the last one is immediate operand, the source
6420 operand is the 2nd one. */
6421 gas_assert (i.imm_operands == 2
6422 && i.tm.opcode_modifier.sae
6423 && operand_type_check (i.types[0], imm));
6424 if (operand_type_check (i.types[1], imm))
6425 source = 2;
6426 else if (operand_type_check (i.types[4], imm))
6427 source = 1;
6428 else
6429 abort ();
6430 }
6431 break;
6432 default:
6433 abort ();
6434 }
6435
6436 if (!vex_3_sources)
6437 {
6438 dest = source + 1;
6439
6440 /* RC/SAE operand could be between DEST and SRC. That happens
6441 when one operand is GPR and the other one is XMM/YMM/ZMM
6442 register. */
6443 if (i.rounding && i.rounding->operand == (int) dest)
6444 dest++;
6445
6446 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6447 {
6448 /* For instructions with VexNDS, the register-only source
6449 operand must be 32/64bit integer, XMM, YMM or ZMM
6450 register. It is encoded in VEX prefix. We need to
6451 clear RegMem bit before calling operand_type_equal. */
6452
6453 i386_operand_type op;
6454 unsigned int vvvv;
6455
6456 /* Check register-only source operand when two source
6457 operands are swapped. */
6458 if (!i.tm.operand_types[source].bitfield.baseindex
6459 && i.tm.operand_types[dest].bitfield.baseindex)
6460 {
6461 vvvv = source;
6462 source = dest;
6463 }
6464 else
6465 vvvv = dest;
6466
6467 op = i.tm.operand_types[vvvv];
6468 op.bitfield.regmem = 0;
6469 if ((dest + 1) >= i.operands
6470 || (!op.bitfield.reg32
6471 && op.bitfield.reg64
6472 && !operand_type_equal (&op, &regxmm)
6473 && !operand_type_equal (&op, &regymm)
6474 && !operand_type_equal (&op, &regzmm)
6475 && !operand_type_equal (&op, &regmask)))
6476 abort ();
6477 i.vex.register_specifier = i.op[vvvv].regs;
6478 dest++;
6479 }
6480 }
6481
6482 i.rm.mode = 3;
6483 /* One of the register operands will be encoded in the i.tm.reg
6484 field, the other in the combined i.tm.mode and i.tm.regmem
6485 fields. If no form of this instruction supports a memory
6486 destination operand, then we assume the source operand may
6487 sometimes be a memory operand and so we need to store the
6488 destination in the i.rm.reg field. */
6489 if (!i.tm.operand_types[dest].bitfield.regmem
6490 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
6491 {
6492 i.rm.reg = i.op[dest].regs->reg_num;
6493 i.rm.regmem = i.op[source].regs->reg_num;
6494 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6495 i.rex |= REX_R;
6496 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6497 i.vrex |= REX_R;
6498 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6499 i.rex |= REX_B;
6500 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6501 i.vrex |= REX_B;
6502 }
6503 else
6504 {
6505 i.rm.reg = i.op[source].regs->reg_num;
6506 i.rm.regmem = i.op[dest].regs->reg_num;
6507 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6508 i.rex |= REX_B;
6509 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6510 i.vrex |= REX_B;
6511 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6512 i.rex |= REX_R;
6513 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6514 i.vrex |= REX_R;
6515 }
6516 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
6517 {
6518 if (!i.types[0].bitfield.control
6519 && !i.types[1].bitfield.control)
6520 abort ();
6521 i.rex &= ~(REX_R | REX_B);
6522 add_prefix (LOCK_PREFIX_OPCODE);
6523 }
6524 }
6525 else
6526 { /* If it's not 2 reg operands... */
6527 unsigned int mem;
6528
6529 if (i.mem_operands)
6530 {
6531 unsigned int fake_zero_displacement = 0;
6532 unsigned int op;
6533
6534 for (op = 0; op < i.operands; op++)
6535 if (operand_type_check (i.types[op], anymem))
6536 break;
6537 gas_assert (op < i.operands);
6538
6539 if (i.tm.opcode_modifier.vecsib)
6540 {
6541 if (i.index_reg->reg_num == RegEiz
6542 || i.index_reg->reg_num == RegRiz)
6543 abort ();
6544
6545 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6546 if (!i.base_reg)
6547 {
6548 i.sib.base = NO_BASE_REGISTER;
6549 i.sib.scale = i.log2_scale_factor;
6550 /* No Vec_Disp8 if there is no base. */
6551 i.types[op].bitfield.vec_disp8 = 0;
6552 i.types[op].bitfield.disp8 = 0;
6553 i.types[op].bitfield.disp16 = 0;
6554 i.types[op].bitfield.disp64 = 0;
6555 if (flag_code != CODE_64BIT)
6556 {
6557 /* Must be 32 bit */
6558 i.types[op].bitfield.disp32 = 1;
6559 i.types[op].bitfield.disp32s = 0;
6560 }
6561 else
6562 {
6563 i.types[op].bitfield.disp32 = 0;
6564 i.types[op].bitfield.disp32s = 1;
6565 }
6566 }
6567 i.sib.index = i.index_reg->reg_num;
6568 if ((i.index_reg->reg_flags & RegRex) != 0)
6569 i.rex |= REX_X;
6570 if ((i.index_reg->reg_flags & RegVRex) != 0)
6571 i.vrex |= REX_X;
6572 }
6573
6574 default_seg = &ds;
6575
6576 if (i.base_reg == 0)
6577 {
6578 i.rm.mode = 0;
6579 if (!i.disp_operands)
6580 {
6581 fake_zero_displacement = 1;
6582 /* Instructions with VSIB byte need 32bit displacement
6583 if there is no base register. */
6584 if (i.tm.opcode_modifier.vecsib)
6585 i.types[op].bitfield.disp32 = 1;
6586 }
6587 if (i.index_reg == 0)
6588 {
6589 gas_assert (!i.tm.opcode_modifier.vecsib);
6590 /* Operand is just <disp> */
6591 if (flag_code == CODE_64BIT)
6592 {
6593 /* 64bit mode overwrites the 32bit absolute
6594 addressing by RIP relative addressing and
6595 absolute addressing is encoded by one of the
6596 redundant SIB forms. */
6597 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6598 i.sib.base = NO_BASE_REGISTER;
6599 i.sib.index = NO_INDEX_REGISTER;
6600 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
6601 ? disp32s : disp32);
6602 }
6603 else if ((flag_code == CODE_16BIT)
6604 ^ (i.prefix[ADDR_PREFIX] != 0))
6605 {
6606 i.rm.regmem = NO_BASE_REGISTER_16;
6607 i.types[op] = disp16;
6608 }
6609 else
6610 {
6611 i.rm.regmem = NO_BASE_REGISTER;
6612 i.types[op] = disp32;
6613 }
6614 }
6615 else if (!i.tm.opcode_modifier.vecsib)
6616 {
6617 /* !i.base_reg && i.index_reg */
6618 if (i.index_reg->reg_num == RegEiz
6619 || i.index_reg->reg_num == RegRiz)
6620 i.sib.index = NO_INDEX_REGISTER;
6621 else
6622 i.sib.index = i.index_reg->reg_num;
6623 i.sib.base = NO_BASE_REGISTER;
6624 i.sib.scale = i.log2_scale_factor;
6625 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6626 /* No Vec_Disp8 if there is no base. */
6627 i.types[op].bitfield.vec_disp8 = 0;
6628 i.types[op].bitfield.disp8 = 0;
6629 i.types[op].bitfield.disp16 = 0;
6630 i.types[op].bitfield.disp64 = 0;
6631 if (flag_code != CODE_64BIT)
6632 {
6633 /* Must be 32 bit */
6634 i.types[op].bitfield.disp32 = 1;
6635 i.types[op].bitfield.disp32s = 0;
6636 }
6637 else
6638 {
6639 i.types[op].bitfield.disp32 = 0;
6640 i.types[op].bitfield.disp32s = 1;
6641 }
6642 if ((i.index_reg->reg_flags & RegRex) != 0)
6643 i.rex |= REX_X;
6644 }
6645 }
6646 /* RIP addressing for 64bit mode. */
6647 else if (i.base_reg->reg_num == RegRip ||
6648 i.base_reg->reg_num == RegEip)
6649 {
6650 gas_assert (!i.tm.opcode_modifier.vecsib);
6651 i.rm.regmem = NO_BASE_REGISTER;
6652 i.types[op].bitfield.disp8 = 0;
6653 i.types[op].bitfield.disp16 = 0;
6654 i.types[op].bitfield.disp32 = 0;
6655 i.types[op].bitfield.disp32s = 1;
6656 i.types[op].bitfield.disp64 = 0;
6657 i.types[op].bitfield.vec_disp8 = 0;
6658 i.flags[op] |= Operand_PCrel;
6659 if (! i.disp_operands)
6660 fake_zero_displacement = 1;
6661 }
6662 else if (i.base_reg->reg_type.bitfield.reg16)
6663 {
6664 gas_assert (!i.tm.opcode_modifier.vecsib);
6665 switch (i.base_reg->reg_num)
6666 {
6667 case 3: /* (%bx) */
6668 if (i.index_reg == 0)
6669 i.rm.regmem = 7;
6670 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6671 i.rm.regmem = i.index_reg->reg_num - 6;
6672 break;
6673 case 5: /* (%bp) */
6674 default_seg = &ss;
6675 if (i.index_reg == 0)
6676 {
6677 i.rm.regmem = 6;
6678 if (operand_type_check (i.types[op], disp) == 0)
6679 {
6680 /* fake (%bp) into 0(%bp) */
6681 if (i.tm.operand_types[op].bitfield.vec_disp8)
6682 i.types[op].bitfield.vec_disp8 = 1;
6683 else
6684 i.types[op].bitfield.disp8 = 1;
6685 fake_zero_displacement = 1;
6686 }
6687 }
6688 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6689 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
6690 break;
6691 default: /* (%si) -> 4 or (%di) -> 5 */
6692 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
6693 }
6694 i.rm.mode = mode_from_disp_size (i.types[op]);
6695 }
6696 else /* i.base_reg and 32/64 bit mode */
6697 {
6698 if (flag_code == CODE_64BIT
6699 && operand_type_check (i.types[op], disp))
6700 {
6701 i386_operand_type temp;
6702 operand_type_set (&temp, 0);
6703 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
6704 temp.bitfield.vec_disp8
6705 = i.types[op].bitfield.vec_disp8;
6706 i.types[op] = temp;
6707 if (i.prefix[ADDR_PREFIX] == 0)
6708 i.types[op].bitfield.disp32s = 1;
6709 else
6710 i.types[op].bitfield.disp32 = 1;
6711 }
6712
6713 if (!i.tm.opcode_modifier.vecsib)
6714 i.rm.regmem = i.base_reg->reg_num;
6715 if ((i.base_reg->reg_flags & RegRex) != 0)
6716 i.rex |= REX_B;
6717 i.sib.base = i.base_reg->reg_num;
6718 /* x86-64 ignores REX prefix bit here to avoid decoder
6719 complications. */
6720 if (!(i.base_reg->reg_flags & RegRex)
6721 && (i.base_reg->reg_num == EBP_REG_NUM
6722 || i.base_reg->reg_num == ESP_REG_NUM))
6723 default_seg = &ss;
6724 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
6725 {
6726 fake_zero_displacement = 1;
6727 if (i.tm.operand_types [op].bitfield.vec_disp8)
6728 i.types[op].bitfield.vec_disp8 = 1;
6729 else
6730 i.types[op].bitfield.disp8 = 1;
6731 }
6732 i.sib.scale = i.log2_scale_factor;
6733 if (i.index_reg == 0)
6734 {
6735 gas_assert (!i.tm.opcode_modifier.vecsib);
6736 /* <disp>(%esp) becomes two byte modrm with no index
6737 register. We've already stored the code for esp
6738 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6739 Any base register besides %esp will not use the
6740 extra modrm byte. */
6741 i.sib.index = NO_INDEX_REGISTER;
6742 }
6743 else if (!i.tm.opcode_modifier.vecsib)
6744 {
6745 if (i.index_reg->reg_num == RegEiz
6746 || i.index_reg->reg_num == RegRiz)
6747 i.sib.index = NO_INDEX_REGISTER;
6748 else
6749 i.sib.index = i.index_reg->reg_num;
6750 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6751 if ((i.index_reg->reg_flags & RegRex) != 0)
6752 i.rex |= REX_X;
6753 }
6754
6755 if (i.disp_operands
6756 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
6757 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
6758 i.rm.mode = 0;
6759 else
6760 {
6761 if (!fake_zero_displacement
6762 && !i.disp_operands
6763 && i.disp_encoding)
6764 {
6765 fake_zero_displacement = 1;
6766 if (i.disp_encoding == disp_encoding_8bit)
6767 i.types[op].bitfield.disp8 = 1;
6768 else
6769 i.types[op].bitfield.disp32 = 1;
6770 }
6771 i.rm.mode = mode_from_disp_size (i.types[op]);
6772 }
6773 }
6774
6775 if (fake_zero_displacement)
6776 {
6777 /* Fakes a zero displacement assuming that i.types[op]
6778 holds the correct displacement size. */
6779 expressionS *exp;
6780
6781 gas_assert (i.op[op].disps == 0);
6782 exp = &disp_expressions[i.disp_operands++];
6783 i.op[op].disps = exp;
6784 exp->X_op = O_constant;
6785 exp->X_add_number = 0;
6786 exp->X_add_symbol = (symbolS *) 0;
6787 exp->X_op_symbol = (symbolS *) 0;
6788 }
6789
6790 mem = op;
6791 }
6792 else
6793 mem = ~0;
6794
6795 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
6796 {
6797 if (operand_type_check (i.types[0], imm))
6798 i.vex.register_specifier = NULL;
6799 else
6800 {
6801 /* VEX.vvvv encodes one of the sources when the first
6802 operand is not an immediate. */
6803 if (i.tm.opcode_modifier.vexw == VEXW0)
6804 i.vex.register_specifier = i.op[0].regs;
6805 else
6806 i.vex.register_specifier = i.op[1].regs;
6807 }
6808
6809 /* Destination is a XMM register encoded in the ModRM.reg
6810 and VEX.R bit. */
6811 i.rm.reg = i.op[2].regs->reg_num;
6812 if ((i.op[2].regs->reg_flags & RegRex) != 0)
6813 i.rex |= REX_R;
6814
6815 /* ModRM.rm and VEX.B encodes the other source. */
6816 if (!i.mem_operands)
6817 {
6818 i.rm.mode = 3;
6819
6820 if (i.tm.opcode_modifier.vexw == VEXW0)
6821 i.rm.regmem = i.op[1].regs->reg_num;
6822 else
6823 i.rm.regmem = i.op[0].regs->reg_num;
6824
6825 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6826 i.rex |= REX_B;
6827 }
6828 }
6829 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
6830 {
6831 i.vex.register_specifier = i.op[2].regs;
6832 if (!i.mem_operands)
6833 {
6834 i.rm.mode = 3;
6835 i.rm.regmem = i.op[1].regs->reg_num;
6836 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6837 i.rex |= REX_B;
6838 }
6839 }
6840 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6841 (if any) based on i.tm.extension_opcode. Again, we must be
6842 careful to make sure that segment/control/debug/test/MMX
6843 registers are coded into the i.rm.reg field. */
6844 else if (i.reg_operands)
6845 {
6846 unsigned int op;
6847 unsigned int vex_reg = ~0;
6848
6849 for (op = 0; op < i.operands; op++)
6850 if (i.types[op].bitfield.reg8
6851 || i.types[op].bitfield.reg16
6852 || i.types[op].bitfield.reg32
6853 || i.types[op].bitfield.reg64
6854 || i.types[op].bitfield.regmmx
6855 || i.types[op].bitfield.regxmm
6856 || i.types[op].bitfield.regymm
6857 || i.types[op].bitfield.regbnd
6858 || i.types[op].bitfield.regzmm
6859 || i.types[op].bitfield.regmask
6860 || i.types[op].bitfield.sreg2
6861 || i.types[op].bitfield.sreg3
6862 || i.types[op].bitfield.control
6863 || i.types[op].bitfield.debug
6864 || i.types[op].bitfield.test)
6865 break;
6866
6867 if (vex_3_sources)
6868 op = dest;
6869 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6870 {
6871 /* For instructions with VexNDS, the register-only
6872 source operand is encoded in VEX prefix. */
6873 gas_assert (mem != (unsigned int) ~0);
6874
6875 if (op > mem)
6876 {
6877 vex_reg = op++;
6878 gas_assert (op < i.operands);
6879 }
6880 else
6881 {
6882 /* Check register-only source operand when two source
6883 operands are swapped. */
6884 if (!i.tm.operand_types[op].bitfield.baseindex
6885 && i.tm.operand_types[op + 1].bitfield.baseindex)
6886 {
6887 vex_reg = op;
6888 op += 2;
6889 gas_assert (mem == (vex_reg + 1)
6890 && op < i.operands);
6891 }
6892 else
6893 {
6894 vex_reg = op + 1;
6895 gas_assert (vex_reg < i.operands);
6896 }
6897 }
6898 }
6899 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
6900 {
6901 /* For instructions with VexNDD, the register destination
6902 is encoded in VEX prefix. */
6903 if (i.mem_operands == 0)
6904 {
6905 /* There is no memory operand. */
6906 gas_assert ((op + 2) == i.operands);
6907 vex_reg = op + 1;
6908 }
6909 else
6910 {
6911 /* There are only 2 operands. */
6912 gas_assert (op < 2 && i.operands == 2);
6913 vex_reg = 1;
6914 }
6915 }
6916 else
6917 gas_assert (op < i.operands);
6918
6919 if (vex_reg != (unsigned int) ~0)
6920 {
6921 i386_operand_type *type = &i.tm.operand_types[vex_reg];
6922
6923 if (type->bitfield.reg32 != 1
6924 && type->bitfield.reg64 != 1
6925 && !operand_type_equal (type, &regxmm)
6926 && !operand_type_equal (type, &regymm)
6927 && !operand_type_equal (type, &regzmm)
6928 && !operand_type_equal (type, &regmask))
6929 abort ();
6930
6931 i.vex.register_specifier = i.op[vex_reg].regs;
6932 }
6933
6934 /* Don't set OP operand twice. */
6935 if (vex_reg != op)
6936 {
6937 /* If there is an extension opcode to put here, the
6938 register number must be put into the regmem field. */
6939 if (i.tm.extension_opcode != None)
6940 {
6941 i.rm.regmem = i.op[op].regs->reg_num;
6942 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6943 i.rex |= REX_B;
6944 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6945 i.vrex |= REX_B;
6946 }
6947 else
6948 {
6949 i.rm.reg = i.op[op].regs->reg_num;
6950 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6951 i.rex |= REX_R;
6952 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6953 i.vrex |= REX_R;
6954 }
6955 }
6956
6957 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6958 must set it to 3 to indicate this is a register operand
6959 in the regmem field. */
6960 if (!i.mem_operands)
6961 i.rm.mode = 3;
6962 }
6963
6964 /* Fill in i.rm.reg field with extension opcode (if any). */
6965 if (i.tm.extension_opcode != None)
6966 i.rm.reg = i.tm.extension_opcode;
6967 }
6968 return default_seg;
6969 }
6970
6971 static void
6972 output_branch (void)
6973 {
6974 char *p;
6975 int size;
6976 int code16;
6977 int prefix;
6978 relax_substateT subtype;
6979 symbolS *sym;
6980 offsetT off;
6981
6982 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6983 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6984
6985 prefix = 0;
6986 if (i.prefix[DATA_PREFIX] != 0)
6987 {
6988 prefix = 1;
6989 i.prefixes -= 1;
6990 code16 ^= CODE16;
6991 }
6992 /* Pentium4 branch hints. */
6993 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6994 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6995 {
6996 prefix++;
6997 i.prefixes--;
6998 }
6999 if (i.prefix[REX_PREFIX] != 0)
7000 {
7001 prefix++;
7002 i.prefixes--;
7003 }
7004
7005 /* BND prefixed jump. */
7006 if (i.prefix[BND_PREFIX] != 0)
7007 {
7008 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
7009 i.prefixes -= 1;
7010 }
7011
7012 if (i.prefixes != 0 && !intel_syntax)
7013 as_warn (_("skipping prefixes on this instruction"));
7014
7015 /* It's always a symbol; End frag & setup for relax.
7016 Make sure there is enough room in this frag for the largest
7017 instruction we may generate in md_convert_frag. This is 2
7018 bytes for the opcode and room for the prefix and largest
7019 displacement. */
7020 frag_grow (prefix + 2 + 4);
7021 /* Prefix and 1 opcode byte go in fr_fix. */
7022 p = frag_more (prefix + 1);
7023 if (i.prefix[DATA_PREFIX] != 0)
7024 *p++ = DATA_PREFIX_OPCODE;
7025 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
7026 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
7027 *p++ = i.prefix[SEG_PREFIX];
7028 if (i.prefix[REX_PREFIX] != 0)
7029 *p++ = i.prefix[REX_PREFIX];
7030 *p = i.tm.base_opcode;
7031
7032 if ((unsigned char) *p == JUMP_PC_RELATIVE)
7033 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
7034 else if (cpu_arch_flags.bitfield.cpui386)
7035 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
7036 else
7037 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
7038 subtype |= code16;
7039
7040 sym = i.op[0].disps->X_add_symbol;
7041 off = i.op[0].disps->X_add_number;
7042
7043 if (i.op[0].disps->X_op != O_constant
7044 && i.op[0].disps->X_op != O_symbol)
7045 {
7046 /* Handle complex expressions. */
7047 sym = make_expr_symbol (i.op[0].disps);
7048 off = 0;
7049 }
7050
7051 /* 1 possible extra opcode + 4 byte displacement go in var part.
7052 Pass reloc in fr_var. */
7053 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
7054 }
7055
7056 static void
7057 output_jump (void)
7058 {
7059 char *p;
7060 int size;
7061 fixS *fixP;
7062
7063 if (i.tm.opcode_modifier.jumpbyte)
7064 {
7065 /* This is a loop or jecxz type instruction. */
7066 size = 1;
7067 if (i.prefix[ADDR_PREFIX] != 0)
7068 {
7069 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
7070 i.prefixes -= 1;
7071 }
7072 /* Pentium4 branch hints. */
7073 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
7074 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
7075 {
7076 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
7077 i.prefixes--;
7078 }
7079 }
7080 else
7081 {
7082 int code16;
7083
7084 code16 = 0;
7085 if (flag_code == CODE_16BIT)
7086 code16 = CODE16;
7087
7088 if (i.prefix[DATA_PREFIX] != 0)
7089 {
7090 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
7091 i.prefixes -= 1;
7092 code16 ^= CODE16;
7093 }
7094
7095 size = 4;
7096 if (code16)
7097 size = 2;
7098 }
7099
7100 if (i.prefix[REX_PREFIX] != 0)
7101 {
7102 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
7103 i.prefixes -= 1;
7104 }
7105
7106 /* BND prefixed jump. */
7107 if (i.prefix[BND_PREFIX] != 0)
7108 {
7109 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
7110 i.prefixes -= 1;
7111 }
7112
7113 if (i.prefixes != 0 && !intel_syntax)
7114 as_warn (_("skipping prefixes on this instruction"));
7115
7116 p = frag_more (i.tm.opcode_length + size);
7117 switch (i.tm.opcode_length)
7118 {
7119 case 2:
7120 *p++ = i.tm.base_opcode >> 8;
7121 /* Fall through. */
7122 case 1:
7123 *p++ = i.tm.base_opcode;
7124 break;
7125 default:
7126 abort ();
7127 }
7128
7129 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7130 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
7131
7132 /* All jumps handled here are signed, but don't use a signed limit
7133 check for 32 and 16 bit jumps as we want to allow wrap around at
7134 4G and 64k respectively. */
7135 if (size == 1)
7136 fixP->fx_signed = 1;
7137 }
7138
7139 static void
7140 output_interseg_jump (void)
7141 {
7142 char *p;
7143 int size;
7144 int prefix;
7145 int code16;
7146
7147 code16 = 0;
7148 if (flag_code == CODE_16BIT)
7149 code16 = CODE16;
7150
7151 prefix = 0;
7152 if (i.prefix[DATA_PREFIX] != 0)
7153 {
7154 prefix = 1;
7155 i.prefixes -= 1;
7156 code16 ^= CODE16;
7157 }
7158 if (i.prefix[REX_PREFIX] != 0)
7159 {
7160 prefix++;
7161 i.prefixes -= 1;
7162 }
7163
7164 size = 4;
7165 if (code16)
7166 size = 2;
7167
7168 if (i.prefixes != 0 && !intel_syntax)
7169 as_warn (_("skipping prefixes on this instruction"));
7170
7171 /* 1 opcode; 2 segment; offset */
7172 p = frag_more (prefix + 1 + 2 + size);
7173
7174 if (i.prefix[DATA_PREFIX] != 0)
7175 *p++ = DATA_PREFIX_OPCODE;
7176
7177 if (i.prefix[REX_PREFIX] != 0)
7178 *p++ = i.prefix[REX_PREFIX];
7179
7180 *p++ = i.tm.base_opcode;
7181 if (i.op[1].imms->X_op == O_constant)
7182 {
7183 offsetT n = i.op[1].imms->X_add_number;
7184
7185 if (size == 2
7186 && !fits_in_unsigned_word (n)
7187 && !fits_in_signed_word (n))
7188 {
7189 as_bad (_("16-bit jump out of range"));
7190 return;
7191 }
7192 md_number_to_chars (p, n, size);
7193 }
7194 else
7195 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7196 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
7197 if (i.op[0].imms->X_op != O_constant)
7198 as_bad (_("can't handle non absolute segment in `%s'"),
7199 i.tm.name);
7200 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
7201 }
7202
7203 static void
7204 output_insn (void)
7205 {
7206 fragS *insn_start_frag;
7207 offsetT insn_start_off;
7208
7209 /* Tie dwarf2 debug info to the address at the start of the insn.
7210 We can't do this after the insn has been output as the current
7211 frag may have been closed off. eg. by frag_var. */
7212 dwarf2_emit_insn (0);
7213
7214 insn_start_frag = frag_now;
7215 insn_start_off = frag_now_fix ();
7216
7217 /* Output jumps. */
7218 if (i.tm.opcode_modifier.jump)
7219 output_branch ();
7220 else if (i.tm.opcode_modifier.jumpbyte
7221 || i.tm.opcode_modifier.jumpdword)
7222 output_jump ();
7223 else if (i.tm.opcode_modifier.jumpintersegment)
7224 output_interseg_jump ();
7225 else
7226 {
7227 /* Output normal instructions here. */
7228 char *p;
7229 unsigned char *q;
7230 unsigned int j;
7231 unsigned int prefix;
7232
7233 if (avoid_fence
7234 && i.tm.base_opcode == 0xfae
7235 && i.operands == 1
7236 && i.imm_operands == 1
7237 && (i.op[0].imms->X_add_number == 0xe8
7238 || i.op[0].imms->X_add_number == 0xf0
7239 || i.op[0].imms->X_add_number == 0xf8))
7240 {
7241 /* Encode lfence, mfence, and sfence as
7242 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
7243 offsetT val = 0x240483f0ULL;
7244 p = frag_more (5);
7245 md_number_to_chars (p, val, 5);
7246 return;
7247 }
7248
7249 /* Some processors fail on LOCK prefix. This options makes
7250 assembler ignore LOCK prefix and serves as a workaround. */
7251 if (omit_lock_prefix)
7252 {
7253 if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
7254 return;
7255 i.prefix[LOCK_PREFIX] = 0;
7256 }
7257
7258 /* Since the VEX/EVEX prefix contains the implicit prefix, we
7259 don't need the explicit prefix. */
7260 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
7261 {
7262 switch (i.tm.opcode_length)
7263 {
7264 case 3:
7265 if (i.tm.base_opcode & 0xff000000)
7266 {
7267 prefix = (i.tm.base_opcode >> 24) & 0xff;
7268 goto check_prefix;
7269 }
7270 break;
7271 case 2:
7272 if ((i.tm.base_opcode & 0xff0000) != 0)
7273 {
7274 prefix = (i.tm.base_opcode >> 16) & 0xff;
7275 if (i.tm.cpu_flags.bitfield.cpupadlock)
7276 {
7277 check_prefix:
7278 if (prefix != REPE_PREFIX_OPCODE
7279 || (i.prefix[REP_PREFIX]
7280 != REPE_PREFIX_OPCODE))
7281 add_prefix (prefix);
7282 }
7283 else
7284 add_prefix (prefix);
7285 }
7286 break;
7287 case 1:
7288 break;
7289 default:
7290 abort ();
7291 }
7292
7293 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7294 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
7295 R_X86_64_GOTTPOFF relocation so that linker can safely
7296 perform IE->LE optimization. */
7297 if (x86_elf_abi == X86_64_X32_ABI
7298 && i.operands == 2
7299 && i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF
7300 && i.prefix[REX_PREFIX] == 0)
7301 add_prefix (REX_OPCODE);
7302 #endif
7303
7304 /* The prefix bytes. */
7305 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
7306 if (*q)
7307 FRAG_APPEND_1_CHAR (*q);
7308 }
7309 else
7310 {
7311 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
7312 if (*q)
7313 switch (j)
7314 {
7315 case REX_PREFIX:
7316 /* REX byte is encoded in VEX prefix. */
7317 break;
7318 case SEG_PREFIX:
7319 case ADDR_PREFIX:
7320 FRAG_APPEND_1_CHAR (*q);
7321 break;
7322 default:
7323 /* There should be no other prefixes for instructions
7324 with VEX prefix. */
7325 abort ();
7326 }
7327
7328 /* For EVEX instructions i.vrex should become 0 after
7329 build_evex_prefix. For VEX instructions upper 16 registers
7330 aren't available, so VREX should be 0. */
7331 if (i.vrex)
7332 abort ();
7333 /* Now the VEX prefix. */
7334 p = frag_more (i.vex.length);
7335 for (j = 0; j < i.vex.length; j++)
7336 p[j] = i.vex.bytes[j];
7337 }
7338
7339 /* Now the opcode; be careful about word order here! */
7340 if (i.tm.opcode_length == 1)
7341 {
7342 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
7343 }
7344 else
7345 {
7346 switch (i.tm.opcode_length)
7347 {
7348 case 4:
7349 p = frag_more (4);
7350 *p++ = (i.tm.base_opcode >> 24) & 0xff;
7351 *p++ = (i.tm.base_opcode >> 16) & 0xff;
7352 break;
7353 case 3:
7354 p = frag_more (3);
7355 *p++ = (i.tm.base_opcode >> 16) & 0xff;
7356 break;
7357 case 2:
7358 p = frag_more (2);
7359 break;
7360 default:
7361 abort ();
7362 break;
7363 }
7364
7365 /* Put out high byte first: can't use md_number_to_chars! */
7366 *p++ = (i.tm.base_opcode >> 8) & 0xff;
7367 *p = i.tm.base_opcode & 0xff;
7368 }
7369
7370 /* Now the modrm byte and sib byte (if present). */
7371 if (i.tm.opcode_modifier.modrm)
7372 {
7373 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
7374 | i.rm.reg << 3
7375 | i.rm.mode << 6));
7376 /* If i.rm.regmem == ESP (4)
7377 && i.rm.mode != (Register mode)
7378 && not 16 bit
7379 ==> need second modrm byte. */
7380 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
7381 && i.rm.mode != 3
7382 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
7383 FRAG_APPEND_1_CHAR ((i.sib.base << 0
7384 | i.sib.index << 3
7385 | i.sib.scale << 6));
7386 }
7387
7388 if (i.disp_operands)
7389 output_disp (insn_start_frag, insn_start_off);
7390
7391 if (i.imm_operands)
7392 output_imm (insn_start_frag, insn_start_off);
7393 }
7394
7395 #ifdef DEBUG386
7396 if (flag_debug)
7397 {
7398 pi ("" /*line*/, &i);
7399 }
7400 #endif /* DEBUG386 */
7401 }
7402
7403 /* Return the size of the displacement operand N. */
7404
7405 static int
7406 disp_size (unsigned int n)
7407 {
7408 int size = 4;
7409
7410 /* Vec_Disp8 has to be 8bit. */
7411 if (i.types[n].bitfield.vec_disp8)
7412 size = 1;
7413 else if (i.types[n].bitfield.disp64)
7414 size = 8;
7415 else if (i.types[n].bitfield.disp8)
7416 size = 1;
7417 else if (i.types[n].bitfield.disp16)
7418 size = 2;
7419 return size;
7420 }
7421
7422 /* Return the size of the immediate operand N. */
7423
7424 static int
7425 imm_size (unsigned int n)
7426 {
7427 int size = 4;
7428 if (i.types[n].bitfield.imm64)
7429 size = 8;
7430 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
7431 size = 1;
7432 else if (i.types[n].bitfield.imm16)
7433 size = 2;
7434 return size;
7435 }
7436
7437 static void
7438 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
7439 {
7440 char *p;
7441 unsigned int n;
7442
7443 for (n = 0; n < i.operands; n++)
7444 {
7445 if (i.types[n].bitfield.vec_disp8
7446 || operand_type_check (i.types[n], disp))
7447 {
7448 if (i.op[n].disps->X_op == O_constant)
7449 {
7450 int size = disp_size (n);
7451 offsetT val = i.op[n].disps->X_add_number;
7452
7453 if (i.types[n].bitfield.vec_disp8)
7454 val >>= i.memshift;
7455 val = offset_in_range (val, size);
7456 p = frag_more (size);
7457 md_number_to_chars (p, val, size);
7458 }
7459 else
7460 {
7461 enum bfd_reloc_code_real reloc_type;
7462 int size = disp_size (n);
7463 int sign = i.types[n].bitfield.disp32s;
7464 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
7465 fixS *fixP;
7466
7467 /* We can't have 8 bit displacement here. */
7468 gas_assert (!i.types[n].bitfield.disp8);
7469
7470 /* The PC relative address is computed relative
7471 to the instruction boundary, so in case immediate
7472 fields follows, we need to adjust the value. */
7473 if (pcrel && i.imm_operands)
7474 {
7475 unsigned int n1;
7476 int sz = 0;
7477
7478 for (n1 = 0; n1 < i.operands; n1++)
7479 if (operand_type_check (i.types[n1], imm))
7480 {
7481 /* Only one immediate is allowed for PC
7482 relative address. */
7483 gas_assert (sz == 0);
7484 sz = imm_size (n1);
7485 i.op[n].disps->X_add_number -= sz;
7486 }
7487 /* We should find the immediate. */
7488 gas_assert (sz != 0);
7489 }
7490
7491 p = frag_more (size);
7492 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
7493 if (GOT_symbol
7494 && GOT_symbol == i.op[n].disps->X_add_symbol
7495 && (((reloc_type == BFD_RELOC_32
7496 || reloc_type == BFD_RELOC_X86_64_32S
7497 || (reloc_type == BFD_RELOC_64
7498 && object_64bit))
7499 && (i.op[n].disps->X_op == O_symbol
7500 || (i.op[n].disps->X_op == O_add
7501 && ((symbol_get_value_expression
7502 (i.op[n].disps->X_op_symbol)->X_op)
7503 == O_subtract))))
7504 || reloc_type == BFD_RELOC_32_PCREL))
7505 {
7506 offsetT add;
7507
7508 if (insn_start_frag == frag_now)
7509 add = (p - frag_now->fr_literal) - insn_start_off;
7510 else
7511 {
7512 fragS *fr;
7513
7514 add = insn_start_frag->fr_fix - insn_start_off;
7515 for (fr = insn_start_frag->fr_next;
7516 fr && fr != frag_now; fr = fr->fr_next)
7517 add += fr->fr_fix;
7518 add += p - frag_now->fr_literal;
7519 }
7520
7521 if (!object_64bit)
7522 {
7523 reloc_type = BFD_RELOC_386_GOTPC;
7524 i.op[n].imms->X_add_number += add;
7525 }
7526 else if (reloc_type == BFD_RELOC_64)
7527 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7528 else
7529 /* Don't do the adjustment for x86-64, as there
7530 the pcrel addressing is relative to the _next_
7531 insn, and that is taken care of in other code. */
7532 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7533 }
7534 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal,
7535 size, i.op[n].disps, pcrel,
7536 reloc_type);
7537 /* Check for "call/jmp *mem", "mov mem, %reg",
7538 "test %reg, mem" and "binop mem, %reg" where binop
7539 is one of adc, add, and, cmp, or, sbb, sub, xor
7540 instructions. Always generate R_386_GOT32X for
7541 "sym*GOT" operand in 32-bit mode. */
7542 if ((generate_relax_relocations
7543 || (!object_64bit
7544 && i.rm.mode == 0
7545 && i.rm.regmem == 5))
7546 && (i.rm.mode == 2
7547 || (i.rm.mode == 0 && i.rm.regmem == 5))
7548 && ((i.operands == 1
7549 && i.tm.base_opcode == 0xff
7550 && (i.rm.reg == 2 || i.rm.reg == 4))
7551 || (i.operands == 2
7552 && (i.tm.base_opcode == 0x8b
7553 || i.tm.base_opcode == 0x85
7554 || (i.tm.base_opcode & 0xc7) == 0x03))))
7555 {
7556 if (object_64bit)
7557 {
7558 fixP->fx_tcbit = i.rex != 0;
7559 if (i.base_reg
7560 && (i.base_reg->reg_num == RegRip
7561 || i.base_reg->reg_num == RegEip))
7562 fixP->fx_tcbit2 = 1;
7563 }
7564 else
7565 fixP->fx_tcbit2 = 1;
7566 }
7567 }
7568 }
7569 }
7570 }
7571
7572 static void
7573 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
7574 {
7575 char *p;
7576 unsigned int n;
7577
7578 for (n = 0; n < i.operands; n++)
7579 {
7580 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7581 if (i.rounding && (int) n == i.rounding->operand)
7582 continue;
7583
7584 if (operand_type_check (i.types[n], imm))
7585 {
7586 if (i.op[n].imms->X_op == O_constant)
7587 {
7588 int size = imm_size (n);
7589 offsetT val;
7590
7591 val = offset_in_range (i.op[n].imms->X_add_number,
7592 size);
7593 p = frag_more (size);
7594 md_number_to_chars (p, val, size);
7595 }
7596 else
7597 {
7598 /* Not absolute_section.
7599 Need a 32-bit fixup (don't support 8bit
7600 non-absolute imms). Try to support other
7601 sizes ... */
7602 enum bfd_reloc_code_real reloc_type;
7603 int size = imm_size (n);
7604 int sign;
7605
7606 if (i.types[n].bitfield.imm32s
7607 && (i.suffix == QWORD_MNEM_SUFFIX
7608 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
7609 sign = 1;
7610 else
7611 sign = 0;
7612
7613 p = frag_more (size);
7614 reloc_type = reloc (size, 0, sign, i.reloc[n]);
7615
7616 /* This is tough to explain. We end up with this one if we
7617 * have operands that look like
7618 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7619 * obtain the absolute address of the GOT, and it is strongly
7620 * preferable from a performance point of view to avoid using
7621 * a runtime relocation for this. The actual sequence of
7622 * instructions often look something like:
7623 *
7624 * call .L66
7625 * .L66:
7626 * popl %ebx
7627 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7628 *
7629 * The call and pop essentially return the absolute address
7630 * of the label .L66 and store it in %ebx. The linker itself
7631 * will ultimately change the first operand of the addl so
7632 * that %ebx points to the GOT, but to keep things simple, the
7633 * .o file must have this operand set so that it generates not
7634 * the absolute address of .L66, but the absolute address of
7635 * itself. This allows the linker itself simply treat a GOTPC
7636 * relocation as asking for a pcrel offset to the GOT to be
7637 * added in, and the addend of the relocation is stored in the
7638 * operand field for the instruction itself.
7639 *
7640 * Our job here is to fix the operand so that it would add
7641 * the correct offset so that %ebx would point to itself. The
7642 * thing that is tricky is that .-.L66 will point to the
7643 * beginning of the instruction, so we need to further modify
7644 * the operand so that it will point to itself. There are
7645 * other cases where you have something like:
7646 *
7647 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7648 *
7649 * and here no correction would be required. Internally in
7650 * the assembler we treat operands of this form as not being
7651 * pcrel since the '.' is explicitly mentioned, and I wonder
7652 * whether it would simplify matters to do it this way. Who
7653 * knows. In earlier versions of the PIC patches, the
7654 * pcrel_adjust field was used to store the correction, but
7655 * since the expression is not pcrel, I felt it would be
7656 * confusing to do it this way. */
7657
7658 if ((reloc_type == BFD_RELOC_32
7659 || reloc_type == BFD_RELOC_X86_64_32S
7660 || reloc_type == BFD_RELOC_64)
7661 && GOT_symbol
7662 && GOT_symbol == i.op[n].imms->X_add_symbol
7663 && (i.op[n].imms->X_op == O_symbol
7664 || (i.op[n].imms->X_op == O_add
7665 && ((symbol_get_value_expression
7666 (i.op[n].imms->X_op_symbol)->X_op)
7667 == O_subtract))))
7668 {
7669 offsetT add;
7670
7671 if (insn_start_frag == frag_now)
7672 add = (p - frag_now->fr_literal) - insn_start_off;
7673 else
7674 {
7675 fragS *fr;
7676
7677 add = insn_start_frag->fr_fix - insn_start_off;
7678 for (fr = insn_start_frag->fr_next;
7679 fr && fr != frag_now; fr = fr->fr_next)
7680 add += fr->fr_fix;
7681 add += p - frag_now->fr_literal;
7682 }
7683
7684 if (!object_64bit)
7685 reloc_type = BFD_RELOC_386_GOTPC;
7686 else if (size == 4)
7687 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7688 else if (size == 8)
7689 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7690 i.op[n].imms->X_add_number += add;
7691 }
7692 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7693 i.op[n].imms, 0, reloc_type);
7694 }
7695 }
7696 }
7697 }
7698 \f
7699 /* x86_cons_fix_new is called via the expression parsing code when a
7700 reloc is needed. We use this hook to get the correct .got reloc. */
7701 static int cons_sign = -1;
7702
7703 void
7704 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
7705 expressionS *exp, bfd_reloc_code_real_type r)
7706 {
7707 r = reloc (len, 0, cons_sign, r);
7708
7709 #ifdef TE_PE
7710 if (exp->X_op == O_secrel)
7711 {
7712 exp->X_op = O_symbol;
7713 r = BFD_RELOC_32_SECREL;
7714 }
7715 #endif
7716
7717 fix_new_exp (frag, off, len, exp, 0, r);
7718 }
7719
7720 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7721 purpose of the `.dc.a' internal pseudo-op. */
7722
7723 int
7724 x86_address_bytes (void)
7725 {
7726 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
7727 return 4;
7728 return stdoutput->arch_info->bits_per_address / 8;
7729 }
7730
7731 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7732 || defined (LEX_AT)
7733 # define lex_got(reloc, adjust, types) NULL
7734 #else
7735 /* Parse operands of the form
7736 <symbol>@GOTOFF+<nnn>
7737 and similar .plt or .got references.
7738
7739 If we find one, set up the correct relocation in RELOC and copy the
7740 input string, minus the `@GOTOFF' into a malloc'd buffer for
7741 parsing by the calling routine. Return this buffer, and if ADJUST
7742 is non-null set it to the length of the string we removed from the
7743 input line. Otherwise return NULL. */
7744 static char *
7745 lex_got (enum bfd_reloc_code_real *rel,
7746 int *adjust,
7747 i386_operand_type *types)
7748 {
7749 /* Some of the relocations depend on the size of what field is to
7750 be relocated. But in our callers i386_immediate and i386_displacement
7751 we don't yet know the operand size (this will be set by insn
7752 matching). Hence we record the word32 relocation here,
7753 and adjust the reloc according to the real size in reloc(). */
7754 static const struct {
7755 const char *str;
7756 int len;
7757 const enum bfd_reloc_code_real rel[2];
7758 const i386_operand_type types64;
7759 } gotrel[] = {
7760 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7761 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
7762 BFD_RELOC_SIZE32 },
7763 OPERAND_TYPE_IMM32_64 },
7764 #endif
7765 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
7766 BFD_RELOC_X86_64_PLTOFF64 },
7767 OPERAND_TYPE_IMM64 },
7768 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
7769 BFD_RELOC_X86_64_PLT32 },
7770 OPERAND_TYPE_IMM32_32S_DISP32 },
7771 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
7772 BFD_RELOC_X86_64_GOTPLT64 },
7773 OPERAND_TYPE_IMM64_DISP64 },
7774 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
7775 BFD_RELOC_X86_64_GOTOFF64 },
7776 OPERAND_TYPE_IMM64_DISP64 },
7777 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
7778 BFD_RELOC_X86_64_GOTPCREL },
7779 OPERAND_TYPE_IMM32_32S_DISP32 },
7780 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
7781 BFD_RELOC_X86_64_TLSGD },
7782 OPERAND_TYPE_IMM32_32S_DISP32 },
7783 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
7784 _dummy_first_bfd_reloc_code_real },
7785 OPERAND_TYPE_NONE },
7786 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
7787 BFD_RELOC_X86_64_TLSLD },
7788 OPERAND_TYPE_IMM32_32S_DISP32 },
7789 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
7790 BFD_RELOC_X86_64_GOTTPOFF },
7791 OPERAND_TYPE_IMM32_32S_DISP32 },
7792 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
7793 BFD_RELOC_X86_64_TPOFF32 },
7794 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7795 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
7796 _dummy_first_bfd_reloc_code_real },
7797 OPERAND_TYPE_NONE },
7798 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
7799 BFD_RELOC_X86_64_DTPOFF32 },
7800 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7801 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
7802 _dummy_first_bfd_reloc_code_real },
7803 OPERAND_TYPE_NONE },
7804 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
7805 _dummy_first_bfd_reloc_code_real },
7806 OPERAND_TYPE_NONE },
7807 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
7808 BFD_RELOC_X86_64_GOT32 },
7809 OPERAND_TYPE_IMM32_32S_64_DISP32 },
7810 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
7811 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
7812 OPERAND_TYPE_IMM32_32S_DISP32 },
7813 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
7814 BFD_RELOC_X86_64_TLSDESC_CALL },
7815 OPERAND_TYPE_IMM32_32S_DISP32 },
7816 };
7817 char *cp;
7818 unsigned int j;
7819
7820 #if defined (OBJ_MAYBE_ELF)
7821 if (!IS_ELF)
7822 return NULL;
7823 #endif
7824
7825 for (cp = input_line_pointer; *cp != '@'; cp++)
7826 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7827 return NULL;
7828
7829 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7830 {
7831 int len = gotrel[j].len;
7832 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7833 {
7834 if (gotrel[j].rel[object_64bit] != 0)
7835 {
7836 int first, second;
7837 char *tmpbuf, *past_reloc;
7838
7839 *rel = gotrel[j].rel[object_64bit];
7840
7841 if (types)
7842 {
7843 if (flag_code != CODE_64BIT)
7844 {
7845 types->bitfield.imm32 = 1;
7846 types->bitfield.disp32 = 1;
7847 }
7848 else
7849 *types = gotrel[j].types64;
7850 }
7851
7852 if (j != 0 && GOT_symbol == NULL)
7853 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
7854
7855 /* The length of the first part of our input line. */
7856 first = cp - input_line_pointer;
7857
7858 /* The second part goes from after the reloc token until
7859 (and including) an end_of_line char or comma. */
7860 past_reloc = cp + 1 + len;
7861 cp = past_reloc;
7862 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7863 ++cp;
7864 second = cp + 1 - past_reloc;
7865
7866 /* Allocate and copy string. The trailing NUL shouldn't
7867 be necessary, but be safe. */
7868 tmpbuf = XNEWVEC (char, first + second + 2);
7869 memcpy (tmpbuf, input_line_pointer, first);
7870 if (second != 0 && *past_reloc != ' ')
7871 /* Replace the relocation token with ' ', so that
7872 errors like foo@GOTOFF1 will be detected. */
7873 tmpbuf[first++] = ' ';
7874 else
7875 /* Increment length by 1 if the relocation token is
7876 removed. */
7877 len++;
7878 if (adjust)
7879 *adjust = len;
7880 memcpy (tmpbuf + first, past_reloc, second);
7881 tmpbuf[first + second] = '\0';
7882 return tmpbuf;
7883 }
7884
7885 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7886 gotrel[j].str, 1 << (5 + object_64bit));
7887 return NULL;
7888 }
7889 }
7890
7891 /* Might be a symbol version string. Don't as_bad here. */
7892 return NULL;
7893 }
7894 #endif
7895
7896 #ifdef TE_PE
7897 #ifdef lex_got
7898 #undef lex_got
7899 #endif
7900 /* Parse operands of the form
7901 <symbol>@SECREL32+<nnn>
7902
7903 If we find one, set up the correct relocation in RELOC and copy the
7904 input string, minus the `@SECREL32' into a malloc'd buffer for
7905 parsing by the calling routine. Return this buffer, and if ADJUST
7906 is non-null set it to the length of the string we removed from the
7907 input line. Otherwise return NULL.
7908
7909 This function is copied from the ELF version above adjusted for PE targets. */
7910
7911 static char *
7912 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
7913 int *adjust ATTRIBUTE_UNUSED,
7914 i386_operand_type *types)
7915 {
7916 static const struct
7917 {
7918 const char *str;
7919 int len;
7920 const enum bfd_reloc_code_real rel[2];
7921 const i386_operand_type types64;
7922 }
7923 gotrel[] =
7924 {
7925 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
7926 BFD_RELOC_32_SECREL },
7927 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7928 };
7929
7930 char *cp;
7931 unsigned j;
7932
7933 for (cp = input_line_pointer; *cp != '@'; cp++)
7934 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7935 return NULL;
7936
7937 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7938 {
7939 int len = gotrel[j].len;
7940
7941 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7942 {
7943 if (gotrel[j].rel[object_64bit] != 0)
7944 {
7945 int first, second;
7946 char *tmpbuf, *past_reloc;
7947
7948 *rel = gotrel[j].rel[object_64bit];
7949 if (adjust)
7950 *adjust = len;
7951
7952 if (types)
7953 {
7954 if (flag_code != CODE_64BIT)
7955 {
7956 types->bitfield.imm32 = 1;
7957 types->bitfield.disp32 = 1;
7958 }
7959 else
7960 *types = gotrel[j].types64;
7961 }
7962
7963 /* The length of the first part of our input line. */
7964 first = cp - input_line_pointer;
7965
7966 /* The second part goes from after the reloc token until
7967 (and including) an end_of_line char or comma. */
7968 past_reloc = cp + 1 + len;
7969 cp = past_reloc;
7970 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7971 ++cp;
7972 second = cp + 1 - past_reloc;
7973
7974 /* Allocate and copy string. The trailing NUL shouldn't
7975 be necessary, but be safe. */
7976 tmpbuf = XNEWVEC (char, first + second + 2);
7977 memcpy (tmpbuf, input_line_pointer, first);
7978 if (second != 0 && *past_reloc != ' ')
7979 /* Replace the relocation token with ' ', so that
7980 errors like foo@SECLREL321 will be detected. */
7981 tmpbuf[first++] = ' ';
7982 memcpy (tmpbuf + first, past_reloc, second);
7983 tmpbuf[first + second] = '\0';
7984 return tmpbuf;
7985 }
7986
7987 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7988 gotrel[j].str, 1 << (5 + object_64bit));
7989 return NULL;
7990 }
7991 }
7992
7993 /* Might be a symbol version string. Don't as_bad here. */
7994 return NULL;
7995 }
7996
7997 #endif /* TE_PE */
7998
7999 bfd_reloc_code_real_type
8000 x86_cons (expressionS *exp, int size)
8001 {
8002 bfd_reloc_code_real_type got_reloc = NO_RELOC;
8003
8004 intel_syntax = -intel_syntax;
8005
8006 exp->X_md = 0;
8007 if (size == 4 || (object_64bit && size == 8))
8008 {
8009 /* Handle @GOTOFF and the like in an expression. */
8010 char *save;
8011 char *gotfree_input_line;
8012 int adjust = 0;
8013
8014 save = input_line_pointer;
8015 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
8016 if (gotfree_input_line)
8017 input_line_pointer = gotfree_input_line;
8018
8019 expression (exp);
8020
8021 if (gotfree_input_line)
8022 {
8023 /* expression () has merrily parsed up to the end of line,
8024 or a comma - in the wrong buffer. Transfer how far
8025 input_line_pointer has moved to the right buffer. */
8026 input_line_pointer = (save
8027 + (input_line_pointer - gotfree_input_line)
8028 + adjust);
8029 free (gotfree_input_line);
8030 if (exp->X_op == O_constant
8031 || exp->X_op == O_absent
8032 || exp->X_op == O_illegal
8033 || exp->X_op == O_register
8034 || exp->X_op == O_big)
8035 {
8036 char c = *input_line_pointer;
8037 *input_line_pointer = 0;
8038 as_bad (_("missing or invalid expression `%s'"), save);
8039 *input_line_pointer = c;
8040 }
8041 }
8042 }
8043 else
8044 expression (exp);
8045
8046 intel_syntax = -intel_syntax;
8047
8048 if (intel_syntax)
8049 i386_intel_simplify (exp);
8050
8051 return got_reloc;
8052 }
8053
8054 static void
8055 signed_cons (int size)
8056 {
8057 if (flag_code == CODE_64BIT)
8058 cons_sign = 1;
8059 cons (size);
8060 cons_sign = -1;
8061 }
8062
8063 #ifdef TE_PE
8064 static void
8065 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
8066 {
8067 expressionS exp;
8068
8069 do
8070 {
8071 expression (&exp);
8072 if (exp.X_op == O_symbol)
8073 exp.X_op = O_secrel;
8074
8075 emit_expr (&exp, 4);
8076 }
8077 while (*input_line_pointer++ == ',');
8078
8079 input_line_pointer--;
8080 demand_empty_rest_of_line ();
8081 }
8082 #endif
8083
8084 /* Handle Vector operations. */
8085
8086 static char *
8087 check_VecOperations (char *op_string, char *op_end)
8088 {
8089 const reg_entry *mask;
8090 const char *saved;
8091 char *end_op;
8092
8093 while (*op_string
8094 && (op_end == NULL || op_string < op_end))
8095 {
8096 saved = op_string;
8097 if (*op_string == '{')
8098 {
8099 op_string++;
8100
8101 /* Check broadcasts. */
8102 if (strncmp (op_string, "1to", 3) == 0)
8103 {
8104 int bcst_type;
8105
8106 if (i.broadcast)
8107 goto duplicated_vec_op;
8108
8109 op_string += 3;
8110 if (*op_string == '8')
8111 bcst_type = BROADCAST_1TO8;
8112 else if (*op_string == '4')
8113 bcst_type = BROADCAST_1TO4;
8114 else if (*op_string == '2')
8115 bcst_type = BROADCAST_1TO2;
8116 else if (*op_string == '1'
8117 && *(op_string+1) == '6')
8118 {
8119 bcst_type = BROADCAST_1TO16;
8120 op_string++;
8121 }
8122 else
8123 {
8124 as_bad (_("Unsupported broadcast: `%s'"), saved);
8125 return NULL;
8126 }
8127 op_string++;
8128
8129 broadcast_op.type = bcst_type;
8130 broadcast_op.operand = this_operand;
8131 i.broadcast = &broadcast_op;
8132 }
8133 /* Check masking operation. */
8134 else if ((mask = parse_register (op_string, &end_op)) != NULL)
8135 {
8136 /* k0 can't be used for write mask. */
8137 if (mask->reg_num == 0)
8138 {
8139 as_bad (_("`%s' can't be used for write mask"),
8140 op_string);
8141 return NULL;
8142 }
8143
8144 if (!i.mask)
8145 {
8146 mask_op.mask = mask;
8147 mask_op.zeroing = 0;
8148 mask_op.operand = this_operand;
8149 i.mask = &mask_op;
8150 }
8151 else
8152 {
8153 if (i.mask->mask)
8154 goto duplicated_vec_op;
8155
8156 i.mask->mask = mask;
8157
8158 /* Only "{z}" is allowed here. No need to check
8159 zeroing mask explicitly. */
8160 if (i.mask->operand != this_operand)
8161 {
8162 as_bad (_("invalid write mask `%s'"), saved);
8163 return NULL;
8164 }
8165 }
8166
8167 op_string = end_op;
8168 }
8169 /* Check zeroing-flag for masking operation. */
8170 else if (*op_string == 'z')
8171 {
8172 if (!i.mask)
8173 {
8174 mask_op.mask = NULL;
8175 mask_op.zeroing = 1;
8176 mask_op.operand = this_operand;
8177 i.mask = &mask_op;
8178 }
8179 else
8180 {
8181 if (i.mask->zeroing)
8182 {
8183 duplicated_vec_op:
8184 as_bad (_("duplicated `%s'"), saved);
8185 return NULL;
8186 }
8187
8188 i.mask->zeroing = 1;
8189
8190 /* Only "{%k}" is allowed here. No need to check mask
8191 register explicitly. */
8192 if (i.mask->operand != this_operand)
8193 {
8194 as_bad (_("invalid zeroing-masking `%s'"),
8195 saved);
8196 return NULL;
8197 }
8198 }
8199
8200 op_string++;
8201 }
8202 else
8203 goto unknown_vec_op;
8204
8205 if (*op_string != '}')
8206 {
8207 as_bad (_("missing `}' in `%s'"), saved);
8208 return NULL;
8209 }
8210 op_string++;
8211 continue;
8212 }
8213 unknown_vec_op:
8214 /* We don't know this one. */
8215 as_bad (_("unknown vector operation: `%s'"), saved);
8216 return NULL;
8217 }
8218
8219 return op_string;
8220 }
8221
8222 static int
8223 i386_immediate (char *imm_start)
8224 {
8225 char *save_input_line_pointer;
8226 char *gotfree_input_line;
8227 segT exp_seg = 0;
8228 expressionS *exp;
8229 i386_operand_type types;
8230
8231 operand_type_set (&types, ~0);
8232
8233 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
8234 {
8235 as_bad (_("at most %d immediate operands are allowed"),
8236 MAX_IMMEDIATE_OPERANDS);
8237 return 0;
8238 }
8239
8240 exp = &im_expressions[i.imm_operands++];
8241 i.op[this_operand].imms = exp;
8242
8243 if (is_space_char (*imm_start))
8244 ++imm_start;
8245
8246 save_input_line_pointer = input_line_pointer;
8247 input_line_pointer = imm_start;
8248
8249 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
8250 if (gotfree_input_line)
8251 input_line_pointer = gotfree_input_line;
8252
8253 exp_seg = expression (exp);
8254
8255 SKIP_WHITESPACE ();
8256
8257 /* Handle vector operations. */
8258 if (*input_line_pointer == '{')
8259 {
8260 input_line_pointer = check_VecOperations (input_line_pointer,
8261 NULL);
8262 if (input_line_pointer == NULL)
8263 return 0;
8264 }
8265
8266 if (*input_line_pointer)
8267 as_bad (_("junk `%s' after expression"), input_line_pointer);
8268
8269 input_line_pointer = save_input_line_pointer;
8270 if (gotfree_input_line)
8271 {
8272 free (gotfree_input_line);
8273
8274 if (exp->X_op == O_constant || exp->X_op == O_register)
8275 exp->X_op = O_illegal;
8276 }
8277
8278 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
8279 }
8280
8281 static int
8282 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8283 i386_operand_type types, const char *imm_start)
8284 {
8285 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
8286 {
8287 if (imm_start)
8288 as_bad (_("missing or invalid immediate expression `%s'"),
8289 imm_start);
8290 return 0;
8291 }
8292 else if (exp->X_op == O_constant)
8293 {
8294 /* Size it properly later. */
8295 i.types[this_operand].bitfield.imm64 = 1;
8296 /* If not 64bit, sign extend val. */
8297 if (flag_code != CODE_64BIT
8298 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
8299 exp->X_add_number
8300 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
8301 }
8302 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8303 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
8304 && exp_seg != absolute_section
8305 && exp_seg != text_section
8306 && exp_seg != data_section
8307 && exp_seg != bss_section
8308 && exp_seg != undefined_section
8309 && !bfd_is_com_section (exp_seg))
8310 {
8311 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8312 return 0;
8313 }
8314 #endif
8315 else if (!intel_syntax && exp_seg == reg_section)
8316 {
8317 if (imm_start)
8318 as_bad (_("illegal immediate register operand %s"), imm_start);
8319 return 0;
8320 }
8321 else
8322 {
8323 /* This is an address. The size of the address will be
8324 determined later, depending on destination register,
8325 suffix, or the default for the section. */
8326 i.types[this_operand].bitfield.imm8 = 1;
8327 i.types[this_operand].bitfield.imm16 = 1;
8328 i.types[this_operand].bitfield.imm32 = 1;
8329 i.types[this_operand].bitfield.imm32s = 1;
8330 i.types[this_operand].bitfield.imm64 = 1;
8331 i.types[this_operand] = operand_type_and (i.types[this_operand],
8332 types);
8333 }
8334
8335 return 1;
8336 }
8337
8338 static char *
8339 i386_scale (char *scale)
8340 {
8341 offsetT val;
8342 char *save = input_line_pointer;
8343
8344 input_line_pointer = scale;
8345 val = get_absolute_expression ();
8346
8347 switch (val)
8348 {
8349 case 1:
8350 i.log2_scale_factor = 0;
8351 break;
8352 case 2:
8353 i.log2_scale_factor = 1;
8354 break;
8355 case 4:
8356 i.log2_scale_factor = 2;
8357 break;
8358 case 8:
8359 i.log2_scale_factor = 3;
8360 break;
8361 default:
8362 {
8363 char sep = *input_line_pointer;
8364
8365 *input_line_pointer = '\0';
8366 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
8367 scale);
8368 *input_line_pointer = sep;
8369 input_line_pointer = save;
8370 return NULL;
8371 }
8372 }
8373 if (i.log2_scale_factor != 0 && i.index_reg == 0)
8374 {
8375 as_warn (_("scale factor of %d without an index register"),
8376 1 << i.log2_scale_factor);
8377 i.log2_scale_factor = 0;
8378 }
8379 scale = input_line_pointer;
8380 input_line_pointer = save;
8381 return scale;
8382 }
8383
8384 static int
8385 i386_displacement (char *disp_start, char *disp_end)
8386 {
8387 expressionS *exp;
8388 segT exp_seg = 0;
8389 char *save_input_line_pointer;
8390 char *gotfree_input_line;
8391 int override;
8392 i386_operand_type bigdisp, types = anydisp;
8393 int ret;
8394
8395 if (i.disp_operands == MAX_MEMORY_OPERANDS)
8396 {
8397 as_bad (_("at most %d displacement operands are allowed"),
8398 MAX_MEMORY_OPERANDS);
8399 return 0;
8400 }
8401
8402 operand_type_set (&bigdisp, 0);
8403 if ((i.types[this_operand].bitfield.jumpabsolute)
8404 || (!current_templates->start->opcode_modifier.jump
8405 && !current_templates->start->opcode_modifier.jumpdword))
8406 {
8407 bigdisp.bitfield.disp32 = 1;
8408 override = (i.prefix[ADDR_PREFIX] != 0);
8409 if (flag_code == CODE_64BIT)
8410 {
8411 if (!override)
8412 {
8413 bigdisp.bitfield.disp32s = 1;
8414 bigdisp.bitfield.disp64 = 1;
8415 }
8416 }
8417 else if ((flag_code == CODE_16BIT) ^ override)
8418 {
8419 bigdisp.bitfield.disp32 = 0;
8420 bigdisp.bitfield.disp16 = 1;
8421 }
8422 }
8423 else
8424 {
8425 /* For PC-relative branches, the width of the displacement
8426 is dependent upon data size, not address size. */
8427 override = (i.prefix[DATA_PREFIX] != 0);
8428 if (flag_code == CODE_64BIT)
8429 {
8430 if (override || i.suffix == WORD_MNEM_SUFFIX)
8431 bigdisp.bitfield.disp16 = 1;
8432 else
8433 {
8434 bigdisp.bitfield.disp32 = 1;
8435 bigdisp.bitfield.disp32s = 1;
8436 }
8437 }
8438 else
8439 {
8440 if (!override)
8441 override = (i.suffix == (flag_code != CODE_16BIT
8442 ? WORD_MNEM_SUFFIX
8443 : LONG_MNEM_SUFFIX));
8444 bigdisp.bitfield.disp32 = 1;
8445 if ((flag_code == CODE_16BIT) ^ override)
8446 {
8447 bigdisp.bitfield.disp32 = 0;
8448 bigdisp.bitfield.disp16 = 1;
8449 }
8450 }
8451 }
8452 i.types[this_operand] = operand_type_or (i.types[this_operand],
8453 bigdisp);
8454
8455 exp = &disp_expressions[i.disp_operands];
8456 i.op[this_operand].disps = exp;
8457 i.disp_operands++;
8458 save_input_line_pointer = input_line_pointer;
8459 input_line_pointer = disp_start;
8460 END_STRING_AND_SAVE (disp_end);
8461
8462 #ifndef GCC_ASM_O_HACK
8463 #define GCC_ASM_O_HACK 0
8464 #endif
8465 #if GCC_ASM_O_HACK
8466 END_STRING_AND_SAVE (disp_end + 1);
8467 if (i.types[this_operand].bitfield.baseIndex
8468 && displacement_string_end[-1] == '+')
8469 {
8470 /* This hack is to avoid a warning when using the "o"
8471 constraint within gcc asm statements.
8472 For instance:
8473
8474 #define _set_tssldt_desc(n,addr,limit,type) \
8475 __asm__ __volatile__ ( \
8476 "movw %w2,%0\n\t" \
8477 "movw %w1,2+%0\n\t" \
8478 "rorl $16,%1\n\t" \
8479 "movb %b1,4+%0\n\t" \
8480 "movb %4,5+%0\n\t" \
8481 "movb $0,6+%0\n\t" \
8482 "movb %h1,7+%0\n\t" \
8483 "rorl $16,%1" \
8484 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8485
8486 This works great except that the output assembler ends
8487 up looking a bit weird if it turns out that there is
8488 no offset. You end up producing code that looks like:
8489
8490 #APP
8491 movw $235,(%eax)
8492 movw %dx,2+(%eax)
8493 rorl $16,%edx
8494 movb %dl,4+(%eax)
8495 movb $137,5+(%eax)
8496 movb $0,6+(%eax)
8497 movb %dh,7+(%eax)
8498 rorl $16,%edx
8499 #NO_APP
8500
8501 So here we provide the missing zero. */
8502
8503 *displacement_string_end = '0';
8504 }
8505 #endif
8506 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
8507 if (gotfree_input_line)
8508 input_line_pointer = gotfree_input_line;
8509
8510 exp_seg = expression (exp);
8511
8512 SKIP_WHITESPACE ();
8513 if (*input_line_pointer)
8514 as_bad (_("junk `%s' after expression"), input_line_pointer);
8515 #if GCC_ASM_O_HACK
8516 RESTORE_END_STRING (disp_end + 1);
8517 #endif
8518 input_line_pointer = save_input_line_pointer;
8519 if (gotfree_input_line)
8520 {
8521 free (gotfree_input_line);
8522
8523 if (exp->X_op == O_constant || exp->X_op == O_register)
8524 exp->X_op = O_illegal;
8525 }
8526
8527 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
8528
8529 RESTORE_END_STRING (disp_end);
8530
8531 return ret;
8532 }
8533
8534 static int
8535 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8536 i386_operand_type types, const char *disp_start)
8537 {
8538 i386_operand_type bigdisp;
8539 int ret = 1;
8540
8541 /* We do this to make sure that the section symbol is in
8542 the symbol table. We will ultimately change the relocation
8543 to be relative to the beginning of the section. */
8544 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
8545 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
8546 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8547 {
8548 if (exp->X_op != O_symbol)
8549 goto inv_disp;
8550
8551 if (S_IS_LOCAL (exp->X_add_symbol)
8552 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
8553 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
8554 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
8555 exp->X_op = O_subtract;
8556 exp->X_op_symbol = GOT_symbol;
8557 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
8558 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
8559 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8560 i.reloc[this_operand] = BFD_RELOC_64;
8561 else
8562 i.reloc[this_operand] = BFD_RELOC_32;
8563 }
8564
8565 else if (exp->X_op == O_absent
8566 || exp->X_op == O_illegal
8567 || exp->X_op == O_big)
8568 {
8569 inv_disp:
8570 as_bad (_("missing or invalid displacement expression `%s'"),
8571 disp_start);
8572 ret = 0;
8573 }
8574
8575 else if (flag_code == CODE_64BIT
8576 && !i.prefix[ADDR_PREFIX]
8577 && exp->X_op == O_constant)
8578 {
8579 /* Since displacement is signed extended to 64bit, don't allow
8580 disp32 and turn off disp32s if they are out of range. */
8581 i.types[this_operand].bitfield.disp32 = 0;
8582 if (!fits_in_signed_long (exp->X_add_number))
8583 {
8584 i.types[this_operand].bitfield.disp32s = 0;
8585 if (i.types[this_operand].bitfield.baseindex)
8586 {
8587 as_bad (_("0x%lx out range of signed 32bit displacement"),
8588 (long) exp->X_add_number);
8589 ret = 0;
8590 }
8591 }
8592 }
8593
8594 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8595 else if (exp->X_op != O_constant
8596 && OUTPUT_FLAVOR == bfd_target_aout_flavour
8597 && exp_seg != absolute_section
8598 && exp_seg != text_section
8599 && exp_seg != data_section
8600 && exp_seg != bss_section
8601 && exp_seg != undefined_section
8602 && !bfd_is_com_section (exp_seg))
8603 {
8604 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8605 ret = 0;
8606 }
8607 #endif
8608
8609 /* Check if this is a displacement only operand. */
8610 bigdisp = i.types[this_operand];
8611 bigdisp.bitfield.disp8 = 0;
8612 bigdisp.bitfield.disp16 = 0;
8613 bigdisp.bitfield.disp32 = 0;
8614 bigdisp.bitfield.disp32s = 0;
8615 bigdisp.bitfield.disp64 = 0;
8616 if (operand_type_all_zero (&bigdisp))
8617 i.types[this_operand] = operand_type_and (i.types[this_operand],
8618 types);
8619
8620 return ret;
8621 }
8622
8623 /* Make sure the memory operand we've been dealt is valid.
8624 Return 1 on success, 0 on a failure. */
8625
8626 static int
8627 i386_index_check (const char *operand_string)
8628 {
8629 const char *kind = "base/index";
8630 enum flag_code addr_mode;
8631
8632 if (i.prefix[ADDR_PREFIX])
8633 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
8634 else
8635 {
8636 addr_mode = flag_code;
8637
8638 #if INFER_ADDR_PREFIX
8639 if (i.mem_operands == 0)
8640 {
8641 /* Infer address prefix from the first memory operand. */
8642 const reg_entry *addr_reg = i.base_reg;
8643
8644 if (addr_reg == NULL)
8645 addr_reg = i.index_reg;
8646
8647 if (addr_reg)
8648 {
8649 if (addr_reg->reg_num == RegEip
8650 || addr_reg->reg_num == RegEiz
8651 || addr_reg->reg_type.bitfield.reg32)
8652 addr_mode = CODE_32BIT;
8653 else if (flag_code != CODE_64BIT
8654 && addr_reg->reg_type.bitfield.reg16)
8655 addr_mode = CODE_16BIT;
8656
8657 if (addr_mode != flag_code)
8658 {
8659 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
8660 i.prefixes += 1;
8661 /* Change the size of any displacement too. At most one
8662 of Disp16 or Disp32 is set.
8663 FIXME. There doesn't seem to be any real need for
8664 separate Disp16 and Disp32 flags. The same goes for
8665 Imm16 and Imm32. Removing them would probably clean
8666 up the code quite a lot. */
8667 if (flag_code != CODE_64BIT
8668 && (i.types[this_operand].bitfield.disp16
8669 || i.types[this_operand].bitfield.disp32))
8670 i.types[this_operand]
8671 = operand_type_xor (i.types[this_operand], disp16_32);
8672 }
8673 }
8674 }
8675 #endif
8676 }
8677
8678 if (current_templates->start->opcode_modifier.isstring
8679 && !current_templates->start->opcode_modifier.immext
8680 && (current_templates->end[-1].opcode_modifier.isstring
8681 || i.mem_operands))
8682 {
8683 /* Memory operands of string insns are special in that they only allow
8684 a single register (rDI, rSI, or rBX) as their memory address. */
8685 const reg_entry *expected_reg;
8686 static const char *di_si[][2] =
8687 {
8688 { "esi", "edi" },
8689 { "si", "di" },
8690 { "rsi", "rdi" }
8691 };
8692 static const char *bx[] = { "ebx", "bx", "rbx" };
8693
8694 kind = "string address";
8695
8696 if (current_templates->start->opcode_modifier.repprefixok)
8697 {
8698 i386_operand_type type = current_templates->end[-1].operand_types[0];
8699
8700 if (!type.bitfield.baseindex
8701 || ((!i.mem_operands != !intel_syntax)
8702 && current_templates->end[-1].operand_types[1]
8703 .bitfield.baseindex))
8704 type = current_templates->end[-1].operand_types[1];
8705 expected_reg = hash_find (reg_hash,
8706 di_si[addr_mode][type.bitfield.esseg]);
8707
8708 }
8709 else
8710 expected_reg = hash_find (reg_hash, bx[addr_mode]);
8711
8712 if (i.base_reg != expected_reg
8713 || i.index_reg
8714 || operand_type_check (i.types[this_operand], disp))
8715 {
8716 /* The second memory operand must have the same size as
8717 the first one. */
8718 if (i.mem_operands
8719 && i.base_reg
8720 && !((addr_mode == CODE_64BIT
8721 && i.base_reg->reg_type.bitfield.reg64)
8722 || (addr_mode == CODE_32BIT
8723 ? i.base_reg->reg_type.bitfield.reg32
8724 : i.base_reg->reg_type.bitfield.reg16)))
8725 goto bad_address;
8726
8727 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8728 operand_string,
8729 intel_syntax ? '[' : '(',
8730 register_prefix,
8731 expected_reg->reg_name,
8732 intel_syntax ? ']' : ')');
8733 return 1;
8734 }
8735 else
8736 return 1;
8737
8738 bad_address:
8739 as_bad (_("`%s' is not a valid %s expression"),
8740 operand_string, kind);
8741 return 0;
8742 }
8743 else
8744 {
8745 if (addr_mode != CODE_16BIT)
8746 {
8747 /* 32-bit/64-bit checks. */
8748 if ((i.base_reg
8749 && (addr_mode == CODE_64BIT
8750 ? !i.base_reg->reg_type.bitfield.reg64
8751 : !i.base_reg->reg_type.bitfield.reg32)
8752 && (i.index_reg
8753 || (i.base_reg->reg_num
8754 != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
8755 || (i.index_reg
8756 && !i.index_reg->reg_type.bitfield.regxmm
8757 && !i.index_reg->reg_type.bitfield.regymm
8758 && !i.index_reg->reg_type.bitfield.regzmm
8759 && ((addr_mode == CODE_64BIT
8760 ? !(i.index_reg->reg_type.bitfield.reg64
8761 || i.index_reg->reg_num == RegRiz)
8762 : !(i.index_reg->reg_type.bitfield.reg32
8763 || i.index_reg->reg_num == RegEiz))
8764 || !i.index_reg->reg_type.bitfield.baseindex)))
8765 goto bad_address;
8766
8767 /* bndmk, bndldx, and bndstx have special restrictions. */
8768 if (current_templates->start->base_opcode == 0xf30f1b
8769 || (current_templates->start->base_opcode & ~1) == 0x0f1a)
8770 {
8771 /* They cannot use RIP-relative addressing. */
8772 if (i.base_reg && i.base_reg->reg_num == RegRip)
8773 {
8774 as_bad (_("`%s' cannot be used here"), operand_string);
8775 return 0;
8776 }
8777
8778 /* bndldx and bndstx ignore their scale factor. */
8779 if (current_templates->start->base_opcode != 0xf30f1b
8780 && i.log2_scale_factor)
8781 as_warn (_("register scaling is being ignored here"));
8782 }
8783 }
8784 else
8785 {
8786 /* 16-bit checks. */
8787 if ((i.base_reg
8788 && (!i.base_reg->reg_type.bitfield.reg16
8789 || !i.base_reg->reg_type.bitfield.baseindex))
8790 || (i.index_reg
8791 && (!i.index_reg->reg_type.bitfield.reg16
8792 || !i.index_reg->reg_type.bitfield.baseindex
8793 || !(i.base_reg
8794 && i.base_reg->reg_num < 6
8795 && i.index_reg->reg_num >= 6
8796 && i.log2_scale_factor == 0))))
8797 goto bad_address;
8798 }
8799 }
8800 return 1;
8801 }
8802
8803 /* Handle vector immediates. */
8804
8805 static int
8806 RC_SAE_immediate (const char *imm_start)
8807 {
8808 unsigned int match_found, j;
8809 const char *pstr = imm_start;
8810 expressionS *exp;
8811
8812 if (*pstr != '{')
8813 return 0;
8814
8815 pstr++;
8816 match_found = 0;
8817 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
8818 {
8819 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
8820 {
8821 if (!i.rounding)
8822 {
8823 rc_op.type = RC_NamesTable[j].type;
8824 rc_op.operand = this_operand;
8825 i.rounding = &rc_op;
8826 }
8827 else
8828 {
8829 as_bad (_("duplicated `%s'"), imm_start);
8830 return 0;
8831 }
8832 pstr += RC_NamesTable[j].len;
8833 match_found = 1;
8834 break;
8835 }
8836 }
8837 if (!match_found)
8838 return 0;
8839
8840 if (*pstr++ != '}')
8841 {
8842 as_bad (_("Missing '}': '%s'"), imm_start);
8843 return 0;
8844 }
8845 /* RC/SAE immediate string should contain nothing more. */;
8846 if (*pstr != 0)
8847 {
8848 as_bad (_("Junk after '}': '%s'"), imm_start);
8849 return 0;
8850 }
8851
8852 exp = &im_expressions[i.imm_operands++];
8853 i.op[this_operand].imms = exp;
8854
8855 exp->X_op = O_constant;
8856 exp->X_add_number = 0;
8857 exp->X_add_symbol = (symbolS *) 0;
8858 exp->X_op_symbol = (symbolS *) 0;
8859
8860 i.types[this_operand].bitfield.imm8 = 1;
8861 return 1;
8862 }
8863
8864 /* Only string instructions can have a second memory operand, so
8865 reduce current_templates to just those if it contains any. */
8866 static int
8867 maybe_adjust_templates (void)
8868 {
8869 const insn_template *t;
8870
8871 gas_assert (i.mem_operands == 1);
8872
8873 for (t = current_templates->start; t < current_templates->end; ++t)
8874 if (t->opcode_modifier.isstring)
8875 break;
8876
8877 if (t < current_templates->end)
8878 {
8879 static templates aux_templates;
8880 bfd_boolean recheck;
8881
8882 aux_templates.start = t;
8883 for (; t < current_templates->end; ++t)
8884 if (!t->opcode_modifier.isstring)
8885 break;
8886 aux_templates.end = t;
8887
8888 /* Determine whether to re-check the first memory operand. */
8889 recheck = (aux_templates.start != current_templates->start
8890 || t != current_templates->end);
8891
8892 current_templates = &aux_templates;
8893
8894 if (recheck)
8895 {
8896 i.mem_operands = 0;
8897 if (i.memop1_string != NULL
8898 && i386_index_check (i.memop1_string) == 0)
8899 return 0;
8900 i.mem_operands = 1;
8901 }
8902 }
8903
8904 return 1;
8905 }
8906
8907 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8908 on error. */
8909
8910 static int
8911 i386_att_operand (char *operand_string)
8912 {
8913 const reg_entry *r;
8914 char *end_op;
8915 char *op_string = operand_string;
8916
8917 if (is_space_char (*op_string))
8918 ++op_string;
8919
8920 /* We check for an absolute prefix (differentiating,
8921 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8922 if (*op_string == ABSOLUTE_PREFIX)
8923 {
8924 ++op_string;
8925 if (is_space_char (*op_string))
8926 ++op_string;
8927 i.types[this_operand].bitfield.jumpabsolute = 1;
8928 }
8929
8930 /* Check if operand is a register. */
8931 if ((r = parse_register (op_string, &end_op)) != NULL)
8932 {
8933 i386_operand_type temp;
8934
8935 /* Check for a segment override by searching for ':' after a
8936 segment register. */
8937 op_string = end_op;
8938 if (is_space_char (*op_string))
8939 ++op_string;
8940 if (*op_string == ':'
8941 && (r->reg_type.bitfield.sreg2
8942 || r->reg_type.bitfield.sreg3))
8943 {
8944 switch (r->reg_num)
8945 {
8946 case 0:
8947 i.seg[i.mem_operands] = &es;
8948 break;
8949 case 1:
8950 i.seg[i.mem_operands] = &cs;
8951 break;
8952 case 2:
8953 i.seg[i.mem_operands] = &ss;
8954 break;
8955 case 3:
8956 i.seg[i.mem_operands] = &ds;
8957 break;
8958 case 4:
8959 i.seg[i.mem_operands] = &fs;
8960 break;
8961 case 5:
8962 i.seg[i.mem_operands] = &gs;
8963 break;
8964 }
8965
8966 /* Skip the ':' and whitespace. */
8967 ++op_string;
8968 if (is_space_char (*op_string))
8969 ++op_string;
8970
8971 if (!is_digit_char (*op_string)
8972 && !is_identifier_char (*op_string)
8973 && *op_string != '('
8974 && *op_string != ABSOLUTE_PREFIX)
8975 {
8976 as_bad (_("bad memory operand `%s'"), op_string);
8977 return 0;
8978 }
8979 /* Handle case of %es:*foo. */
8980 if (*op_string == ABSOLUTE_PREFIX)
8981 {
8982 ++op_string;
8983 if (is_space_char (*op_string))
8984 ++op_string;
8985 i.types[this_operand].bitfield.jumpabsolute = 1;
8986 }
8987 goto do_memory_reference;
8988 }
8989
8990 /* Handle vector operations. */
8991 if (*op_string == '{')
8992 {
8993 op_string = check_VecOperations (op_string, NULL);
8994 if (op_string == NULL)
8995 return 0;
8996 }
8997
8998 if (*op_string)
8999 {
9000 as_bad (_("junk `%s' after register"), op_string);
9001 return 0;
9002 }
9003 temp = r->reg_type;
9004 temp.bitfield.baseindex = 0;
9005 i.types[this_operand] = operand_type_or (i.types[this_operand],
9006 temp);
9007 i.types[this_operand].bitfield.unspecified = 0;
9008 i.op[this_operand].regs = r;
9009 i.reg_operands++;
9010 }
9011 else if (*op_string == REGISTER_PREFIX)
9012 {
9013 as_bad (_("bad register name `%s'"), op_string);
9014 return 0;
9015 }
9016 else if (*op_string == IMMEDIATE_PREFIX)
9017 {
9018 ++op_string;
9019 if (i.types[this_operand].bitfield.jumpabsolute)
9020 {
9021 as_bad (_("immediate operand illegal with absolute jump"));
9022 return 0;
9023 }
9024 if (!i386_immediate (op_string))
9025 return 0;
9026 }
9027 else if (RC_SAE_immediate (operand_string))
9028 {
9029 /* If it is a RC or SAE immediate, do nothing. */
9030 ;
9031 }
9032 else if (is_digit_char (*op_string)
9033 || is_identifier_char (*op_string)
9034 || *op_string == '"'
9035 || *op_string == '(')
9036 {
9037 /* This is a memory reference of some sort. */
9038 char *base_string;
9039
9040 /* Start and end of displacement string expression (if found). */
9041 char *displacement_string_start;
9042 char *displacement_string_end;
9043 char *vop_start;
9044
9045 do_memory_reference:
9046 if (i.mem_operands == 1 && !maybe_adjust_templates ())
9047 return 0;
9048 if ((i.mem_operands == 1
9049 && !current_templates->start->opcode_modifier.isstring)
9050 || i.mem_operands == 2)
9051 {
9052 as_bad (_("too many memory references for `%s'"),
9053 current_templates->start->name);
9054 return 0;
9055 }
9056
9057 /* Check for base index form. We detect the base index form by
9058 looking for an ')' at the end of the operand, searching
9059 for the '(' matching it, and finding a REGISTER_PREFIX or ','
9060 after the '('. */
9061 base_string = op_string + strlen (op_string);
9062
9063 /* Handle vector operations. */
9064 vop_start = strchr (op_string, '{');
9065 if (vop_start && vop_start < base_string)
9066 {
9067 if (check_VecOperations (vop_start, base_string) == NULL)
9068 return 0;
9069 base_string = vop_start;
9070 }
9071
9072 --base_string;
9073 if (is_space_char (*base_string))
9074 --base_string;
9075
9076 /* If we only have a displacement, set-up for it to be parsed later. */
9077 displacement_string_start = op_string;
9078 displacement_string_end = base_string + 1;
9079
9080 if (*base_string == ')')
9081 {
9082 char *temp_string;
9083 unsigned int parens_balanced = 1;
9084 /* We've already checked that the number of left & right ()'s are
9085 equal, so this loop will not be infinite. */
9086 do
9087 {
9088 base_string--;
9089 if (*base_string == ')')
9090 parens_balanced++;
9091 if (*base_string == '(')
9092 parens_balanced--;
9093 }
9094 while (parens_balanced);
9095
9096 temp_string = base_string;
9097
9098 /* Skip past '(' and whitespace. */
9099 ++base_string;
9100 if (is_space_char (*base_string))
9101 ++base_string;
9102
9103 if (*base_string == ','
9104 || ((i.base_reg = parse_register (base_string, &end_op))
9105 != NULL))
9106 {
9107 displacement_string_end = temp_string;
9108
9109 i.types[this_operand].bitfield.baseindex = 1;
9110
9111 if (i.base_reg)
9112 {
9113 base_string = end_op;
9114 if (is_space_char (*base_string))
9115 ++base_string;
9116 }
9117
9118 /* There may be an index reg or scale factor here. */
9119 if (*base_string == ',')
9120 {
9121 ++base_string;
9122 if (is_space_char (*base_string))
9123 ++base_string;
9124
9125 if ((i.index_reg = parse_register (base_string, &end_op))
9126 != NULL)
9127 {
9128 base_string = end_op;
9129 if (is_space_char (*base_string))
9130 ++base_string;
9131 if (*base_string == ',')
9132 {
9133 ++base_string;
9134 if (is_space_char (*base_string))
9135 ++base_string;
9136 }
9137 else if (*base_string != ')')
9138 {
9139 as_bad (_("expecting `,' or `)' "
9140 "after index register in `%s'"),
9141 operand_string);
9142 return 0;
9143 }
9144 }
9145 else if (*base_string == REGISTER_PREFIX)
9146 {
9147 end_op = strchr (base_string, ',');
9148 if (end_op)
9149 *end_op = '\0';
9150 as_bad (_("bad register name `%s'"), base_string);
9151 return 0;
9152 }
9153
9154 /* Check for scale factor. */
9155 if (*base_string != ')')
9156 {
9157 char *end_scale = i386_scale (base_string);
9158
9159 if (!end_scale)
9160 return 0;
9161
9162 base_string = end_scale;
9163 if (is_space_char (*base_string))
9164 ++base_string;
9165 if (*base_string != ')')
9166 {
9167 as_bad (_("expecting `)' "
9168 "after scale factor in `%s'"),
9169 operand_string);
9170 return 0;
9171 }
9172 }
9173 else if (!i.index_reg)
9174 {
9175 as_bad (_("expecting index register or scale factor "
9176 "after `,'; got '%c'"),
9177 *base_string);
9178 return 0;
9179 }
9180 }
9181 else if (*base_string != ')')
9182 {
9183 as_bad (_("expecting `,' or `)' "
9184 "after base register in `%s'"),
9185 operand_string);
9186 return 0;
9187 }
9188 }
9189 else if (*base_string == REGISTER_PREFIX)
9190 {
9191 end_op = strchr (base_string, ',');
9192 if (end_op)
9193 *end_op = '\0';
9194 as_bad (_("bad register name `%s'"), base_string);
9195 return 0;
9196 }
9197 }
9198
9199 /* If there's an expression beginning the operand, parse it,
9200 assuming displacement_string_start and
9201 displacement_string_end are meaningful. */
9202 if (displacement_string_start != displacement_string_end)
9203 {
9204 if (!i386_displacement (displacement_string_start,
9205 displacement_string_end))
9206 return 0;
9207 }
9208
9209 /* Special case for (%dx) while doing input/output op. */
9210 if (i.base_reg
9211 && operand_type_equal (&i.base_reg->reg_type,
9212 &reg16_inoutportreg)
9213 && i.index_reg == 0
9214 && i.log2_scale_factor == 0
9215 && i.seg[i.mem_operands] == 0
9216 && !operand_type_check (i.types[this_operand], disp))
9217 {
9218 i.types[this_operand] = inoutportreg;
9219 return 1;
9220 }
9221
9222 if (i386_index_check (operand_string) == 0)
9223 return 0;
9224 i.types[this_operand].bitfield.mem = 1;
9225 if (i.mem_operands == 0)
9226 i.memop1_string = xstrdup (operand_string);
9227 i.mem_operands++;
9228 }
9229 else
9230 {
9231 /* It's not a memory operand; argh! */
9232 as_bad (_("invalid char %s beginning operand %d `%s'"),
9233 output_invalid (*op_string),
9234 this_operand + 1,
9235 op_string);
9236 return 0;
9237 }
9238 return 1; /* Normal return. */
9239 }
9240 \f
9241 /* Calculate the maximum variable size (i.e., excluding fr_fix)
9242 that an rs_machine_dependent frag may reach. */
9243
9244 unsigned int
9245 i386_frag_max_var (fragS *frag)
9246 {
9247 /* The only relaxable frags are for jumps.
9248 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
9249 gas_assert (frag->fr_type == rs_machine_dependent);
9250 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
9251 }
9252
9253 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9254 static int
9255 elf_symbol_resolved_in_segment_p (symbolS *fr_symbol, offsetT fr_var)
9256 {
9257 /* STT_GNU_IFUNC symbol must go through PLT. */
9258 if ((symbol_get_bfdsym (fr_symbol)->flags
9259 & BSF_GNU_INDIRECT_FUNCTION) != 0)
9260 return 0;
9261
9262 if (!S_IS_EXTERNAL (fr_symbol))
9263 /* Symbol may be weak or local. */
9264 return !S_IS_WEAK (fr_symbol);
9265
9266 /* Global symbols with non-default visibility can't be preempted. */
9267 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol)) != STV_DEFAULT)
9268 return 1;
9269
9270 if (fr_var != NO_RELOC)
9271 switch ((enum bfd_reloc_code_real) fr_var)
9272 {
9273 case BFD_RELOC_386_PLT32:
9274 case BFD_RELOC_X86_64_PLT32:
9275 /* Symbol with PLT relocation may be preempted. */
9276 return 0;
9277 default:
9278 abort ();
9279 }
9280
9281 /* Global symbols with default visibility in a shared library may be
9282 preempted by another definition. */
9283 return !shared;
9284 }
9285 #endif
9286
9287 /* md_estimate_size_before_relax()
9288
9289 Called just before relax() for rs_machine_dependent frags. The x86
9290 assembler uses these frags to handle variable size jump
9291 instructions.
9292
9293 Any symbol that is now undefined will not become defined.
9294 Return the correct fr_subtype in the frag.
9295 Return the initial "guess for variable size of frag" to caller.
9296 The guess is actually the growth beyond the fixed part. Whatever
9297 we do to grow the fixed or variable part contributes to our
9298 returned value. */
9299
9300 int
9301 md_estimate_size_before_relax (fragS *fragP, segT segment)
9302 {
9303 /* We've already got fragP->fr_subtype right; all we have to do is
9304 check for un-relaxable symbols. On an ELF system, we can't relax
9305 an externally visible symbol, because it may be overridden by a
9306 shared library. */
9307 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
9308 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9309 || (IS_ELF
9310 && !elf_symbol_resolved_in_segment_p (fragP->fr_symbol,
9311 fragP->fr_var))
9312 #endif
9313 #if defined (OBJ_COFF) && defined (TE_PE)
9314 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
9315 && S_IS_WEAK (fragP->fr_symbol))
9316 #endif
9317 )
9318 {
9319 /* Symbol is undefined in this segment, or we need to keep a
9320 reloc so that weak symbols can be overridden. */
9321 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
9322 enum bfd_reloc_code_real reloc_type;
9323 unsigned char *opcode;
9324 int old_fr_fix;
9325
9326 if (fragP->fr_var != NO_RELOC)
9327 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
9328 else if (size == 2)
9329 reloc_type = BFD_RELOC_16_PCREL;
9330 else
9331 reloc_type = BFD_RELOC_32_PCREL;
9332
9333 old_fr_fix = fragP->fr_fix;
9334 opcode = (unsigned char *) fragP->fr_opcode;
9335
9336 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
9337 {
9338 case UNCOND_JUMP:
9339 /* Make jmp (0xeb) a (d)word displacement jump. */
9340 opcode[0] = 0xe9;
9341 fragP->fr_fix += size;
9342 fix_new (fragP, old_fr_fix, size,
9343 fragP->fr_symbol,
9344 fragP->fr_offset, 1,
9345 reloc_type);
9346 break;
9347
9348 case COND_JUMP86:
9349 if (size == 2
9350 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
9351 {
9352 /* Negate the condition, and branch past an
9353 unconditional jump. */
9354 opcode[0] ^= 1;
9355 opcode[1] = 3;
9356 /* Insert an unconditional jump. */
9357 opcode[2] = 0xe9;
9358 /* We added two extra opcode bytes, and have a two byte
9359 offset. */
9360 fragP->fr_fix += 2 + 2;
9361 fix_new (fragP, old_fr_fix + 2, 2,
9362 fragP->fr_symbol,
9363 fragP->fr_offset, 1,
9364 reloc_type);
9365 break;
9366 }
9367 /* Fall through. */
9368
9369 case COND_JUMP:
9370 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
9371 {
9372 fixS *fixP;
9373
9374 fragP->fr_fix += 1;
9375 fixP = fix_new (fragP, old_fr_fix, 1,
9376 fragP->fr_symbol,
9377 fragP->fr_offset, 1,
9378 BFD_RELOC_8_PCREL);
9379 fixP->fx_signed = 1;
9380 break;
9381 }
9382
9383 /* This changes the byte-displacement jump 0x7N
9384 to the (d)word-displacement jump 0x0f,0x8N. */
9385 opcode[1] = opcode[0] + 0x10;
9386 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9387 /* We've added an opcode byte. */
9388 fragP->fr_fix += 1 + size;
9389 fix_new (fragP, old_fr_fix + 1, size,
9390 fragP->fr_symbol,
9391 fragP->fr_offset, 1,
9392 reloc_type);
9393 break;
9394
9395 default:
9396 BAD_CASE (fragP->fr_subtype);
9397 break;
9398 }
9399 frag_wane (fragP);
9400 return fragP->fr_fix - old_fr_fix;
9401 }
9402
9403 /* Guess size depending on current relax state. Initially the relax
9404 state will correspond to a short jump and we return 1, because
9405 the variable part of the frag (the branch offset) is one byte
9406 long. However, we can relax a section more than once and in that
9407 case we must either set fr_subtype back to the unrelaxed state,
9408 or return the value for the appropriate branch. */
9409 return md_relax_table[fragP->fr_subtype].rlx_length;
9410 }
9411
9412 /* Called after relax() is finished.
9413
9414 In: Address of frag.
9415 fr_type == rs_machine_dependent.
9416 fr_subtype is what the address relaxed to.
9417
9418 Out: Any fixSs and constants are set up.
9419 Caller will turn frag into a ".space 0". */
9420
9421 void
9422 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
9423 fragS *fragP)
9424 {
9425 unsigned char *opcode;
9426 unsigned char *where_to_put_displacement = NULL;
9427 offsetT target_address;
9428 offsetT opcode_address;
9429 unsigned int extension = 0;
9430 offsetT displacement_from_opcode_start;
9431
9432 opcode = (unsigned char *) fragP->fr_opcode;
9433
9434 /* Address we want to reach in file space. */
9435 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
9436
9437 /* Address opcode resides at in file space. */
9438 opcode_address = fragP->fr_address + fragP->fr_fix;
9439
9440 /* Displacement from opcode start to fill into instruction. */
9441 displacement_from_opcode_start = target_address - opcode_address;
9442
9443 if ((fragP->fr_subtype & BIG) == 0)
9444 {
9445 /* Don't have to change opcode. */
9446 extension = 1; /* 1 opcode + 1 displacement */
9447 where_to_put_displacement = &opcode[1];
9448 }
9449 else
9450 {
9451 if (no_cond_jump_promotion
9452 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
9453 as_warn_where (fragP->fr_file, fragP->fr_line,
9454 _("long jump required"));
9455
9456 switch (fragP->fr_subtype)
9457 {
9458 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
9459 extension = 4; /* 1 opcode + 4 displacement */
9460 opcode[0] = 0xe9;
9461 where_to_put_displacement = &opcode[1];
9462 break;
9463
9464 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
9465 extension = 2; /* 1 opcode + 2 displacement */
9466 opcode[0] = 0xe9;
9467 where_to_put_displacement = &opcode[1];
9468 break;
9469
9470 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
9471 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
9472 extension = 5; /* 2 opcode + 4 displacement */
9473 opcode[1] = opcode[0] + 0x10;
9474 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9475 where_to_put_displacement = &opcode[2];
9476 break;
9477
9478 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
9479 extension = 3; /* 2 opcode + 2 displacement */
9480 opcode[1] = opcode[0] + 0x10;
9481 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9482 where_to_put_displacement = &opcode[2];
9483 break;
9484
9485 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
9486 extension = 4;
9487 opcode[0] ^= 1;
9488 opcode[1] = 3;
9489 opcode[2] = 0xe9;
9490 where_to_put_displacement = &opcode[3];
9491 break;
9492
9493 default:
9494 BAD_CASE (fragP->fr_subtype);
9495 break;
9496 }
9497 }
9498
9499 /* If size if less then four we are sure that the operand fits,
9500 but if it's 4, then it could be that the displacement is larger
9501 then -/+ 2GB. */
9502 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
9503 && object_64bit
9504 && ((addressT) (displacement_from_opcode_start - extension
9505 + ((addressT) 1 << 31))
9506 > (((addressT) 2 << 31) - 1)))
9507 {
9508 as_bad_where (fragP->fr_file, fragP->fr_line,
9509 _("jump target out of range"));
9510 /* Make us emit 0. */
9511 displacement_from_opcode_start = extension;
9512 }
9513 /* Now put displacement after opcode. */
9514 md_number_to_chars ((char *) where_to_put_displacement,
9515 (valueT) (displacement_from_opcode_start - extension),
9516 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
9517 fragP->fr_fix += extension;
9518 }
9519 \f
9520 /* Apply a fixup (fixP) to segment data, once it has been determined
9521 by our caller that we have all the info we need to fix it up.
9522
9523 Parameter valP is the pointer to the value of the bits.
9524
9525 On the 386, immediates, displacements, and data pointers are all in
9526 the same (little-endian) format, so we don't need to care about which
9527 we are handling. */
9528
9529 void
9530 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
9531 {
9532 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
9533 valueT value = *valP;
9534
9535 #if !defined (TE_Mach)
9536 if (fixP->fx_pcrel)
9537 {
9538 switch (fixP->fx_r_type)
9539 {
9540 default:
9541 break;
9542
9543 case BFD_RELOC_64:
9544 fixP->fx_r_type = BFD_RELOC_64_PCREL;
9545 break;
9546 case BFD_RELOC_32:
9547 case BFD_RELOC_X86_64_32S:
9548 fixP->fx_r_type = BFD_RELOC_32_PCREL;
9549 break;
9550 case BFD_RELOC_16:
9551 fixP->fx_r_type = BFD_RELOC_16_PCREL;
9552 break;
9553 case BFD_RELOC_8:
9554 fixP->fx_r_type = BFD_RELOC_8_PCREL;
9555 break;
9556 }
9557 }
9558
9559 if (fixP->fx_addsy != NULL
9560 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
9561 || fixP->fx_r_type == BFD_RELOC_64_PCREL
9562 || fixP->fx_r_type == BFD_RELOC_16_PCREL
9563 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
9564 && !use_rela_relocations)
9565 {
9566 /* This is a hack. There should be a better way to handle this.
9567 This covers for the fact that bfd_install_relocation will
9568 subtract the current location (for partial_inplace, PC relative
9569 relocations); see more below. */
9570 #ifndef OBJ_AOUT
9571 if (IS_ELF
9572 #ifdef TE_PE
9573 || OUTPUT_FLAVOR == bfd_target_coff_flavour
9574 #endif
9575 )
9576 value += fixP->fx_where + fixP->fx_frag->fr_address;
9577 #endif
9578 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9579 if (IS_ELF)
9580 {
9581 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
9582
9583 if ((sym_seg == seg
9584 || (symbol_section_p (fixP->fx_addsy)
9585 && sym_seg != absolute_section))
9586 && !generic_force_reloc (fixP))
9587 {
9588 /* Yes, we add the values in twice. This is because
9589 bfd_install_relocation subtracts them out again. I think
9590 bfd_install_relocation is broken, but I don't dare change
9591 it. FIXME. */
9592 value += fixP->fx_where + fixP->fx_frag->fr_address;
9593 }
9594 }
9595 #endif
9596 #if defined (OBJ_COFF) && defined (TE_PE)
9597 /* For some reason, the PE format does not store a
9598 section address offset for a PC relative symbol. */
9599 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
9600 || S_IS_WEAK (fixP->fx_addsy))
9601 value += md_pcrel_from (fixP);
9602 #endif
9603 }
9604 #if defined (OBJ_COFF) && defined (TE_PE)
9605 if (fixP->fx_addsy != NULL
9606 && S_IS_WEAK (fixP->fx_addsy)
9607 /* PR 16858: Do not modify weak function references. */
9608 && ! fixP->fx_pcrel)
9609 {
9610 #if !defined (TE_PEP)
9611 /* For x86 PE weak function symbols are neither PC-relative
9612 nor do they set S_IS_FUNCTION. So the only reliable way
9613 to detect them is to check the flags of their containing
9614 section. */
9615 if (S_GET_SEGMENT (fixP->fx_addsy) != NULL
9616 && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE)
9617 ;
9618 else
9619 #endif
9620 value -= S_GET_VALUE (fixP->fx_addsy);
9621 }
9622 #endif
9623
9624 /* Fix a few things - the dynamic linker expects certain values here,
9625 and we must not disappoint it. */
9626 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9627 if (IS_ELF && fixP->fx_addsy)
9628 switch (fixP->fx_r_type)
9629 {
9630 case BFD_RELOC_386_PLT32:
9631 case BFD_RELOC_X86_64_PLT32:
9632 /* Make the jump instruction point to the address of the operand. At
9633 runtime we merely add the offset to the actual PLT entry. */
9634 value = -4;
9635 break;
9636
9637 case BFD_RELOC_386_TLS_GD:
9638 case BFD_RELOC_386_TLS_LDM:
9639 case BFD_RELOC_386_TLS_IE_32:
9640 case BFD_RELOC_386_TLS_IE:
9641 case BFD_RELOC_386_TLS_GOTIE:
9642 case BFD_RELOC_386_TLS_GOTDESC:
9643 case BFD_RELOC_X86_64_TLSGD:
9644 case BFD_RELOC_X86_64_TLSLD:
9645 case BFD_RELOC_X86_64_GOTTPOFF:
9646 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9647 value = 0; /* Fully resolved at runtime. No addend. */
9648 /* Fallthrough */
9649 case BFD_RELOC_386_TLS_LE:
9650 case BFD_RELOC_386_TLS_LDO_32:
9651 case BFD_RELOC_386_TLS_LE_32:
9652 case BFD_RELOC_X86_64_DTPOFF32:
9653 case BFD_RELOC_X86_64_DTPOFF64:
9654 case BFD_RELOC_X86_64_TPOFF32:
9655 case BFD_RELOC_X86_64_TPOFF64:
9656 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9657 break;
9658
9659 case BFD_RELOC_386_TLS_DESC_CALL:
9660 case BFD_RELOC_X86_64_TLSDESC_CALL:
9661 value = 0; /* Fully resolved at runtime. No addend. */
9662 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9663 fixP->fx_done = 0;
9664 return;
9665
9666 case BFD_RELOC_VTABLE_INHERIT:
9667 case BFD_RELOC_VTABLE_ENTRY:
9668 fixP->fx_done = 0;
9669 return;
9670
9671 default:
9672 break;
9673 }
9674 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9675 *valP = value;
9676 #endif /* !defined (TE_Mach) */
9677
9678 /* Are we finished with this relocation now? */
9679 if (fixP->fx_addsy == NULL)
9680 fixP->fx_done = 1;
9681 #if defined (OBJ_COFF) && defined (TE_PE)
9682 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
9683 {
9684 fixP->fx_done = 0;
9685 /* Remember value for tc_gen_reloc. */
9686 fixP->fx_addnumber = value;
9687 /* Clear out the frag for now. */
9688 value = 0;
9689 }
9690 #endif
9691 else if (use_rela_relocations)
9692 {
9693 fixP->fx_no_overflow = 1;
9694 /* Remember value for tc_gen_reloc. */
9695 fixP->fx_addnumber = value;
9696 value = 0;
9697 }
9698
9699 md_number_to_chars (p, value, fixP->fx_size);
9700 }
9701 \f
9702 const char *
9703 md_atof (int type, char *litP, int *sizeP)
9704 {
9705 /* This outputs the LITTLENUMs in REVERSE order;
9706 in accord with the bigendian 386. */
9707 return ieee_md_atof (type, litP, sizeP, FALSE);
9708 }
9709 \f
9710 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
9711
9712 static char *
9713 output_invalid (int c)
9714 {
9715 if (ISPRINT (c))
9716 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9717 "'%c'", c);
9718 else
9719 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9720 "(0x%x)", (unsigned char) c);
9721 return output_invalid_buf;
9722 }
9723
9724 /* REG_STRING starts *before* REGISTER_PREFIX. */
9725
9726 static const reg_entry *
9727 parse_real_register (char *reg_string, char **end_op)
9728 {
9729 char *s = reg_string;
9730 char *p;
9731 char reg_name_given[MAX_REG_NAME_SIZE + 1];
9732 const reg_entry *r;
9733
9734 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9735 if (*s == REGISTER_PREFIX)
9736 ++s;
9737
9738 if (is_space_char (*s))
9739 ++s;
9740
9741 p = reg_name_given;
9742 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
9743 {
9744 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
9745 return (const reg_entry *) NULL;
9746 s++;
9747 }
9748
9749 /* For naked regs, make sure that we are not dealing with an identifier.
9750 This prevents confusing an identifier like `eax_var' with register
9751 `eax'. */
9752 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
9753 return (const reg_entry *) NULL;
9754
9755 *end_op = s;
9756
9757 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
9758
9759 /* Handle floating point regs, allowing spaces in the (i) part. */
9760 if (r == i386_regtab /* %st is first entry of table */)
9761 {
9762 if (is_space_char (*s))
9763 ++s;
9764 if (*s == '(')
9765 {
9766 ++s;
9767 if (is_space_char (*s))
9768 ++s;
9769 if (*s >= '0' && *s <= '7')
9770 {
9771 int fpr = *s - '0';
9772 ++s;
9773 if (is_space_char (*s))
9774 ++s;
9775 if (*s == ')')
9776 {
9777 *end_op = s + 1;
9778 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
9779 know (r);
9780 return r + fpr;
9781 }
9782 }
9783 /* We have "%st(" then garbage. */
9784 return (const reg_entry *) NULL;
9785 }
9786 }
9787
9788 if (r == NULL || allow_pseudo_reg)
9789 return r;
9790
9791 if (operand_type_all_zero (&r->reg_type))
9792 return (const reg_entry *) NULL;
9793
9794 if ((r->reg_type.bitfield.reg32
9795 || r->reg_type.bitfield.sreg3
9796 || r->reg_type.bitfield.control
9797 || r->reg_type.bitfield.debug
9798 || r->reg_type.bitfield.test)
9799 && !cpu_arch_flags.bitfield.cpui386)
9800 return (const reg_entry *) NULL;
9801
9802 if (r->reg_type.bitfield.floatreg
9803 && !cpu_arch_flags.bitfield.cpu8087
9804 && !cpu_arch_flags.bitfield.cpu287
9805 && !cpu_arch_flags.bitfield.cpu387)
9806 return (const reg_entry *) NULL;
9807
9808 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpuregmmx)
9809 return (const reg_entry *) NULL;
9810
9811 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpuregxmm)
9812 return (const reg_entry *) NULL;
9813
9814 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuregymm)
9815 return (const reg_entry *) NULL;
9816
9817 if (r->reg_type.bitfield.regzmm && !cpu_arch_flags.bitfield.cpuregzmm)
9818 return (const reg_entry *) NULL;
9819
9820 if (r->reg_type.bitfield.regmask
9821 && !cpu_arch_flags.bitfield.cpuregmask)
9822 return (const reg_entry *) NULL;
9823
9824 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9825 if (!allow_index_reg
9826 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
9827 return (const reg_entry *) NULL;
9828
9829 /* Upper 16 vector register is only available with VREX in 64bit
9830 mode. */
9831 if ((r->reg_flags & RegVRex))
9832 {
9833 if (i.vec_encoding == vex_encoding_default)
9834 i.vec_encoding = vex_encoding_evex;
9835
9836 if (!cpu_arch_flags.bitfield.cpuvrex
9837 || i.vec_encoding != vex_encoding_evex
9838 || flag_code != CODE_64BIT)
9839 return (const reg_entry *) NULL;
9840 }
9841
9842 if (((r->reg_flags & (RegRex64 | RegRex))
9843 || r->reg_type.bitfield.reg64)
9844 && (!cpu_arch_flags.bitfield.cpulm
9845 || !operand_type_equal (&r->reg_type, &control))
9846 && flag_code != CODE_64BIT)
9847 return (const reg_entry *) NULL;
9848
9849 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
9850 return (const reg_entry *) NULL;
9851
9852 return r;
9853 }
9854
9855 /* REG_STRING starts *before* REGISTER_PREFIX. */
9856
9857 static const reg_entry *
9858 parse_register (char *reg_string, char **end_op)
9859 {
9860 const reg_entry *r;
9861
9862 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
9863 r = parse_real_register (reg_string, end_op);
9864 else
9865 r = NULL;
9866 if (!r)
9867 {
9868 char *save = input_line_pointer;
9869 char c;
9870 symbolS *symbolP;
9871
9872 input_line_pointer = reg_string;
9873 c = get_symbol_name (&reg_string);
9874 symbolP = symbol_find (reg_string);
9875 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
9876 {
9877 const expressionS *e = symbol_get_value_expression (symbolP);
9878
9879 know (e->X_op == O_register);
9880 know (e->X_add_number >= 0
9881 && (valueT) e->X_add_number < i386_regtab_size);
9882 r = i386_regtab + e->X_add_number;
9883 if ((r->reg_flags & RegVRex))
9884 i.vec_encoding = vex_encoding_evex;
9885 *end_op = input_line_pointer;
9886 }
9887 *input_line_pointer = c;
9888 input_line_pointer = save;
9889 }
9890 return r;
9891 }
9892
9893 int
9894 i386_parse_name (char *name, expressionS *e, char *nextcharP)
9895 {
9896 const reg_entry *r;
9897 char *end = input_line_pointer;
9898
9899 *end = *nextcharP;
9900 r = parse_register (name, &input_line_pointer);
9901 if (r && end <= input_line_pointer)
9902 {
9903 *nextcharP = *input_line_pointer;
9904 *input_line_pointer = 0;
9905 e->X_op = O_register;
9906 e->X_add_number = r - i386_regtab;
9907 return 1;
9908 }
9909 input_line_pointer = end;
9910 *end = 0;
9911 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
9912 }
9913
9914 void
9915 md_operand (expressionS *e)
9916 {
9917 char *end;
9918 const reg_entry *r;
9919
9920 switch (*input_line_pointer)
9921 {
9922 case REGISTER_PREFIX:
9923 r = parse_real_register (input_line_pointer, &end);
9924 if (r)
9925 {
9926 e->X_op = O_register;
9927 e->X_add_number = r - i386_regtab;
9928 input_line_pointer = end;
9929 }
9930 break;
9931
9932 case '[':
9933 gas_assert (intel_syntax);
9934 end = input_line_pointer++;
9935 expression (e);
9936 if (*input_line_pointer == ']')
9937 {
9938 ++input_line_pointer;
9939 e->X_op_symbol = make_expr_symbol (e);
9940 e->X_add_symbol = NULL;
9941 e->X_add_number = 0;
9942 e->X_op = O_index;
9943 }
9944 else
9945 {
9946 e->X_op = O_absent;
9947 input_line_pointer = end;
9948 }
9949 break;
9950 }
9951 }
9952
9953 \f
9954 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9955 const char *md_shortopts = "kVQ:sqn";
9956 #else
9957 const char *md_shortopts = "qn";
9958 #endif
9959
9960 #define OPTION_32 (OPTION_MD_BASE + 0)
9961 #define OPTION_64 (OPTION_MD_BASE + 1)
9962 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9963 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9964 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9965 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9966 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9967 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9968 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9969 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9970 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9971 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9972 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9973 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9974 #define OPTION_X32 (OPTION_MD_BASE + 14)
9975 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9976 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9977 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9978 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
9979 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
9980 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
9981 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
9982 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
9983 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
9984 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
9985 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 25)
9986
9987 struct option md_longopts[] =
9988 {
9989 {"32", no_argument, NULL, OPTION_32},
9990 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9991 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9992 {"64", no_argument, NULL, OPTION_64},
9993 #endif
9994 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9995 {"x32", no_argument, NULL, OPTION_X32},
9996 {"mshared", no_argument, NULL, OPTION_MSHARED},
9997 #endif
9998 {"divide", no_argument, NULL, OPTION_DIVIDE},
9999 {"march", required_argument, NULL, OPTION_MARCH},
10000 {"mtune", required_argument, NULL, OPTION_MTUNE},
10001 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
10002 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
10003 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
10004 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
10005 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
10006 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
10007 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
10008 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
10009 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
10010 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
10011 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
10012 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
10013 # if defined (TE_PE) || defined (TE_PEP)
10014 {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ},
10015 #endif
10016 {"momit-lock-prefix", required_argument, NULL, OPTION_MOMIT_LOCK_PREFIX},
10017 {"mfence-as-lock-add", required_argument, NULL, OPTION_MFENCE_AS_LOCK_ADD},
10018 {"mrelax-relocations", required_argument, NULL, OPTION_MRELAX_RELOCATIONS},
10019 {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG},
10020 {"mamd64", no_argument, NULL, OPTION_MAMD64},
10021 {"mintel64", no_argument, NULL, OPTION_MINTEL64},
10022 {NULL, no_argument, NULL, 0}
10023 };
10024 size_t md_longopts_size = sizeof (md_longopts);
10025
10026 int
10027 md_parse_option (int c, const char *arg)
10028 {
10029 unsigned int j;
10030 char *arch, *next, *saved;
10031
10032 switch (c)
10033 {
10034 case 'n':
10035 optimize_align_code = 0;
10036 break;
10037
10038 case 'q':
10039 quiet_warnings = 1;
10040 break;
10041
10042 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10043 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
10044 should be emitted or not. FIXME: Not implemented. */
10045 case 'Q':
10046 break;
10047
10048 /* -V: SVR4 argument to print version ID. */
10049 case 'V':
10050 print_version_id ();
10051 break;
10052
10053 /* -k: Ignore for FreeBSD compatibility. */
10054 case 'k':
10055 break;
10056
10057 case 's':
10058 /* -s: On i386 Solaris, this tells the native assembler to use
10059 .stab instead of .stab.excl. We always use .stab anyhow. */
10060 break;
10061
10062 case OPTION_MSHARED:
10063 shared = 1;
10064 break;
10065 #endif
10066 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10067 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10068 case OPTION_64:
10069 {
10070 const char **list, **l;
10071
10072 list = bfd_target_list ();
10073 for (l = list; *l != NULL; l++)
10074 if (CONST_STRNEQ (*l, "elf64-x86-64")
10075 || strcmp (*l, "coff-x86-64") == 0
10076 || strcmp (*l, "pe-x86-64") == 0
10077 || strcmp (*l, "pei-x86-64") == 0
10078 || strcmp (*l, "mach-o-x86-64") == 0)
10079 {
10080 default_arch = "x86_64";
10081 break;
10082 }
10083 if (*l == NULL)
10084 as_fatal (_("no compiled in support for x86_64"));
10085 free (list);
10086 }
10087 break;
10088 #endif
10089
10090 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10091 case OPTION_X32:
10092 if (IS_ELF)
10093 {
10094 const char **list, **l;
10095
10096 list = bfd_target_list ();
10097 for (l = list; *l != NULL; l++)
10098 if (CONST_STRNEQ (*l, "elf32-x86-64"))
10099 {
10100 default_arch = "x86_64:32";
10101 break;
10102 }
10103 if (*l == NULL)
10104 as_fatal (_("no compiled in support for 32bit x86_64"));
10105 free (list);
10106 }
10107 else
10108 as_fatal (_("32bit x86_64 is only supported for ELF"));
10109 break;
10110 #endif
10111
10112 case OPTION_32:
10113 default_arch = "i386";
10114 break;
10115
10116 case OPTION_DIVIDE:
10117 #ifdef SVR4_COMMENT_CHARS
10118 {
10119 char *n, *t;
10120 const char *s;
10121
10122 n = XNEWVEC (char, strlen (i386_comment_chars) + 1);
10123 t = n;
10124 for (s = i386_comment_chars; *s != '\0'; s++)
10125 if (*s != '/')
10126 *t++ = *s;
10127 *t = '\0';
10128 i386_comment_chars = n;
10129 }
10130 #endif
10131 break;
10132
10133 case OPTION_MARCH:
10134 saved = xstrdup (arg);
10135 arch = saved;
10136 /* Allow -march=+nosse. */
10137 if (*arch == '+')
10138 arch++;
10139 do
10140 {
10141 if (*arch == '.')
10142 as_fatal (_("invalid -march= option: `%s'"), arg);
10143 next = strchr (arch, '+');
10144 if (next)
10145 *next++ = '\0';
10146 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
10147 {
10148 if (strcmp (arch, cpu_arch [j].name) == 0)
10149 {
10150 /* Processor. */
10151 if (! cpu_arch[j].flags.bitfield.cpui386)
10152 continue;
10153
10154 cpu_arch_name = cpu_arch[j].name;
10155 cpu_sub_arch_name = NULL;
10156 cpu_arch_flags = cpu_arch[j].flags;
10157 cpu_arch_isa = cpu_arch[j].type;
10158 cpu_arch_isa_flags = cpu_arch[j].flags;
10159 if (!cpu_arch_tune_set)
10160 {
10161 cpu_arch_tune = cpu_arch_isa;
10162 cpu_arch_tune_flags = cpu_arch_isa_flags;
10163 }
10164 break;
10165 }
10166 else if (*cpu_arch [j].name == '.'
10167 && strcmp (arch, cpu_arch [j].name + 1) == 0)
10168 {
10169 /* ISA extension. */
10170 i386_cpu_flags flags;
10171
10172 flags = cpu_flags_or (cpu_arch_flags,
10173 cpu_arch[j].flags);
10174
10175 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
10176 {
10177 if (cpu_sub_arch_name)
10178 {
10179 char *name = cpu_sub_arch_name;
10180 cpu_sub_arch_name = concat (name,
10181 cpu_arch[j].name,
10182 (const char *) NULL);
10183 free (name);
10184 }
10185 else
10186 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
10187 cpu_arch_flags = flags;
10188 cpu_arch_isa_flags = flags;
10189 }
10190 break;
10191 }
10192 }
10193
10194 if (j >= ARRAY_SIZE (cpu_arch))
10195 {
10196 /* Disable an ISA extension. */
10197 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
10198 if (strcmp (arch, cpu_noarch [j].name) == 0)
10199 {
10200 i386_cpu_flags flags;
10201
10202 flags = cpu_flags_and_not (cpu_arch_flags,
10203 cpu_noarch[j].flags);
10204 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
10205 {
10206 if (cpu_sub_arch_name)
10207 {
10208 char *name = cpu_sub_arch_name;
10209 cpu_sub_arch_name = concat (arch,
10210 (const char *) NULL);
10211 free (name);
10212 }
10213 else
10214 cpu_sub_arch_name = xstrdup (arch);
10215 cpu_arch_flags = flags;
10216 cpu_arch_isa_flags = flags;
10217 }
10218 break;
10219 }
10220
10221 if (j >= ARRAY_SIZE (cpu_noarch))
10222 j = ARRAY_SIZE (cpu_arch);
10223 }
10224
10225 if (j >= ARRAY_SIZE (cpu_arch))
10226 as_fatal (_("invalid -march= option: `%s'"), arg);
10227
10228 arch = next;
10229 }
10230 while (next != NULL);
10231 free (saved);
10232 break;
10233
10234 case OPTION_MTUNE:
10235 if (*arg == '.')
10236 as_fatal (_("invalid -mtune= option: `%s'"), arg);
10237 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
10238 {
10239 if (strcmp (arg, cpu_arch [j].name) == 0)
10240 {
10241 cpu_arch_tune_set = 1;
10242 cpu_arch_tune = cpu_arch [j].type;
10243 cpu_arch_tune_flags = cpu_arch[j].flags;
10244 break;
10245 }
10246 }
10247 if (j >= ARRAY_SIZE (cpu_arch))
10248 as_fatal (_("invalid -mtune= option: `%s'"), arg);
10249 break;
10250
10251 case OPTION_MMNEMONIC:
10252 if (strcasecmp (arg, "att") == 0)
10253 intel_mnemonic = 0;
10254 else if (strcasecmp (arg, "intel") == 0)
10255 intel_mnemonic = 1;
10256 else
10257 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
10258 break;
10259
10260 case OPTION_MSYNTAX:
10261 if (strcasecmp (arg, "att") == 0)
10262 intel_syntax = 0;
10263 else if (strcasecmp (arg, "intel") == 0)
10264 intel_syntax = 1;
10265 else
10266 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
10267 break;
10268
10269 case OPTION_MINDEX_REG:
10270 allow_index_reg = 1;
10271 break;
10272
10273 case OPTION_MNAKED_REG:
10274 allow_naked_reg = 1;
10275 break;
10276
10277 case OPTION_MOLD_GCC:
10278 old_gcc = 1;
10279 break;
10280
10281 case OPTION_MSSE2AVX:
10282 sse2avx = 1;
10283 break;
10284
10285 case OPTION_MSSE_CHECK:
10286 if (strcasecmp (arg, "error") == 0)
10287 sse_check = check_error;
10288 else if (strcasecmp (arg, "warning") == 0)
10289 sse_check = check_warning;
10290 else if (strcasecmp (arg, "none") == 0)
10291 sse_check = check_none;
10292 else
10293 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
10294 break;
10295
10296 case OPTION_MOPERAND_CHECK:
10297 if (strcasecmp (arg, "error") == 0)
10298 operand_check = check_error;
10299 else if (strcasecmp (arg, "warning") == 0)
10300 operand_check = check_warning;
10301 else if (strcasecmp (arg, "none") == 0)
10302 operand_check = check_none;
10303 else
10304 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
10305 break;
10306
10307 case OPTION_MAVXSCALAR:
10308 if (strcasecmp (arg, "128") == 0)
10309 avxscalar = vex128;
10310 else if (strcasecmp (arg, "256") == 0)
10311 avxscalar = vex256;
10312 else
10313 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
10314 break;
10315
10316 case OPTION_MADD_BND_PREFIX:
10317 add_bnd_prefix = 1;
10318 break;
10319
10320 case OPTION_MEVEXLIG:
10321 if (strcmp (arg, "128") == 0)
10322 evexlig = evexl128;
10323 else if (strcmp (arg, "256") == 0)
10324 evexlig = evexl256;
10325 else if (strcmp (arg, "512") == 0)
10326 evexlig = evexl512;
10327 else
10328 as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
10329 break;
10330
10331 case OPTION_MEVEXRCIG:
10332 if (strcmp (arg, "rne") == 0)
10333 evexrcig = rne;
10334 else if (strcmp (arg, "rd") == 0)
10335 evexrcig = rd;
10336 else if (strcmp (arg, "ru") == 0)
10337 evexrcig = ru;
10338 else if (strcmp (arg, "rz") == 0)
10339 evexrcig = rz;
10340 else
10341 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg);
10342 break;
10343
10344 case OPTION_MEVEXWIG:
10345 if (strcmp (arg, "0") == 0)
10346 evexwig = evexw0;
10347 else if (strcmp (arg, "1") == 0)
10348 evexwig = evexw1;
10349 else
10350 as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
10351 break;
10352
10353 # if defined (TE_PE) || defined (TE_PEP)
10354 case OPTION_MBIG_OBJ:
10355 use_big_obj = 1;
10356 break;
10357 #endif
10358
10359 case OPTION_MOMIT_LOCK_PREFIX:
10360 if (strcasecmp (arg, "yes") == 0)
10361 omit_lock_prefix = 1;
10362 else if (strcasecmp (arg, "no") == 0)
10363 omit_lock_prefix = 0;
10364 else
10365 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg);
10366 break;
10367
10368 case OPTION_MFENCE_AS_LOCK_ADD:
10369 if (strcasecmp (arg, "yes") == 0)
10370 avoid_fence = 1;
10371 else if (strcasecmp (arg, "no") == 0)
10372 avoid_fence = 0;
10373 else
10374 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg);
10375 break;
10376
10377 case OPTION_MRELAX_RELOCATIONS:
10378 if (strcasecmp (arg, "yes") == 0)
10379 generate_relax_relocations = 1;
10380 else if (strcasecmp (arg, "no") == 0)
10381 generate_relax_relocations = 0;
10382 else
10383 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg);
10384 break;
10385
10386 case OPTION_MAMD64:
10387 intel64 = 0;
10388 break;
10389
10390 case OPTION_MINTEL64:
10391 intel64 = 1;
10392 break;
10393
10394 default:
10395 return 0;
10396 }
10397 return 1;
10398 }
10399
10400 #define MESSAGE_TEMPLATE \
10401 " "
10402
10403 static char *
10404 output_message (FILE *stream, char *p, char *message, char *start,
10405 int *left_p, const char *name, int len)
10406 {
10407 int size = sizeof (MESSAGE_TEMPLATE);
10408 int left = *left_p;
10409
10410 /* Reserve 2 spaces for ", " or ",\0" */
10411 left -= len + 2;
10412
10413 /* Check if there is any room. */
10414 if (left >= 0)
10415 {
10416 if (p != start)
10417 {
10418 *p++ = ',';
10419 *p++ = ' ';
10420 }
10421 p = mempcpy (p, name, len);
10422 }
10423 else
10424 {
10425 /* Output the current message now and start a new one. */
10426 *p++ = ',';
10427 *p = '\0';
10428 fprintf (stream, "%s\n", message);
10429 p = start;
10430 left = size - (start - message) - len - 2;
10431
10432 gas_assert (left >= 0);
10433
10434 p = mempcpy (p, name, len);
10435 }
10436
10437 *left_p = left;
10438 return p;
10439 }
10440
10441 static void
10442 show_arch (FILE *stream, int ext, int check)
10443 {
10444 static char message[] = MESSAGE_TEMPLATE;
10445 char *start = message + 27;
10446 char *p;
10447 int size = sizeof (MESSAGE_TEMPLATE);
10448 int left;
10449 const char *name;
10450 int len;
10451 unsigned int j;
10452
10453 p = start;
10454 left = size - (start - message);
10455 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
10456 {
10457 /* Should it be skipped? */
10458 if (cpu_arch [j].skip)
10459 continue;
10460
10461 name = cpu_arch [j].name;
10462 len = cpu_arch [j].len;
10463 if (*name == '.')
10464 {
10465 /* It is an extension. Skip if we aren't asked to show it. */
10466 if (ext)
10467 {
10468 name++;
10469 len--;
10470 }
10471 else
10472 continue;
10473 }
10474 else if (ext)
10475 {
10476 /* It is an processor. Skip if we show only extension. */
10477 continue;
10478 }
10479 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
10480 {
10481 /* It is an impossible processor - skip. */
10482 continue;
10483 }
10484
10485 p = output_message (stream, p, message, start, &left, name, len);
10486 }
10487
10488 /* Display disabled extensions. */
10489 if (ext)
10490 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
10491 {
10492 name = cpu_noarch [j].name;
10493 len = cpu_noarch [j].len;
10494 p = output_message (stream, p, message, start, &left, name,
10495 len);
10496 }
10497
10498 *p = '\0';
10499 fprintf (stream, "%s\n", message);
10500 }
10501
10502 void
10503 md_show_usage (FILE *stream)
10504 {
10505 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10506 fprintf (stream, _("\
10507 -Q ignored\n\
10508 -V print assembler version number\n\
10509 -k ignored\n"));
10510 #endif
10511 fprintf (stream, _("\
10512 -n Do not optimize code alignment\n\
10513 -q quieten some warnings\n"));
10514 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10515 fprintf (stream, _("\
10516 -s ignored\n"));
10517 #endif
10518 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10519 || defined (TE_PE) || defined (TE_PEP))
10520 fprintf (stream, _("\
10521 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
10522 #endif
10523 #ifdef SVR4_COMMENT_CHARS
10524 fprintf (stream, _("\
10525 --divide do not treat `/' as a comment character\n"));
10526 #else
10527 fprintf (stream, _("\
10528 --divide ignored\n"));
10529 #endif
10530 fprintf (stream, _("\
10531 -march=CPU[,+EXTENSION...]\n\
10532 generate code for CPU and EXTENSION, CPU is one of:\n"));
10533 show_arch (stream, 0, 1);
10534 fprintf (stream, _("\
10535 EXTENSION is combination of:\n"));
10536 show_arch (stream, 1, 0);
10537 fprintf (stream, _("\
10538 -mtune=CPU optimize for CPU, CPU is one of:\n"));
10539 show_arch (stream, 0, 0);
10540 fprintf (stream, _("\
10541 -msse2avx encode SSE instructions with VEX prefix\n"));
10542 fprintf (stream, _("\
10543 -msse-check=[none|error|warning]\n\
10544 check SSE instructions\n"));
10545 fprintf (stream, _("\
10546 -moperand-check=[none|error|warning]\n\
10547 check operand combinations for validity\n"));
10548 fprintf (stream, _("\
10549 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
10550 length\n"));
10551 fprintf (stream, _("\
10552 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
10553 length\n"));
10554 fprintf (stream, _("\
10555 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
10556 for EVEX.W bit ignored instructions\n"));
10557 fprintf (stream, _("\
10558 -mevexrcig=[rne|rd|ru|rz]\n\
10559 encode EVEX instructions with specific EVEX.RC value\n\
10560 for SAE-only ignored instructions\n"));
10561 fprintf (stream, _("\
10562 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
10563 fprintf (stream, _("\
10564 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
10565 fprintf (stream, _("\
10566 -mindex-reg support pseudo index registers\n"));
10567 fprintf (stream, _("\
10568 -mnaked-reg don't require `%%' prefix for registers\n"));
10569 fprintf (stream, _("\
10570 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
10571 fprintf (stream, _("\
10572 -madd-bnd-prefix add BND prefix for all valid branches\n"));
10573 fprintf (stream, _("\
10574 -mshared disable branch optimization for shared code\n"));
10575 # if defined (TE_PE) || defined (TE_PEP)
10576 fprintf (stream, _("\
10577 -mbig-obj generate big object files\n"));
10578 #endif
10579 fprintf (stream, _("\
10580 -momit-lock-prefix=[no|yes]\n\
10581 strip all lock prefixes\n"));
10582 fprintf (stream, _("\
10583 -mfence-as-lock-add=[no|yes]\n\
10584 encode lfence, mfence and sfence as\n\
10585 lock addl $0x0, (%%{re}sp)\n"));
10586 fprintf (stream, _("\
10587 -mrelax-relocations=[no|yes]\n\
10588 generate relax relocations\n"));
10589 fprintf (stream, _("\
10590 -mamd64 accept only AMD64 ISA\n"));
10591 fprintf (stream, _("\
10592 -mintel64 accept only Intel64 ISA\n"));
10593 }
10594
10595 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
10596 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10597 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10598
10599 /* Pick the target format to use. */
10600
10601 const char *
10602 i386_target_format (void)
10603 {
10604 if (!strncmp (default_arch, "x86_64", 6))
10605 {
10606 update_code_flag (CODE_64BIT, 1);
10607 if (default_arch[6] == '\0')
10608 x86_elf_abi = X86_64_ABI;
10609 else
10610 x86_elf_abi = X86_64_X32_ABI;
10611 }
10612 else if (!strcmp (default_arch, "i386"))
10613 update_code_flag (CODE_32BIT, 1);
10614 else if (!strcmp (default_arch, "iamcu"))
10615 {
10616 update_code_flag (CODE_32BIT, 1);
10617 if (cpu_arch_isa == PROCESSOR_UNKNOWN)
10618 {
10619 static const i386_cpu_flags iamcu_flags = CPU_IAMCU_FLAGS;
10620 cpu_arch_name = "iamcu";
10621 cpu_sub_arch_name = NULL;
10622 cpu_arch_flags = iamcu_flags;
10623 cpu_arch_isa = PROCESSOR_IAMCU;
10624 cpu_arch_isa_flags = iamcu_flags;
10625 if (!cpu_arch_tune_set)
10626 {
10627 cpu_arch_tune = cpu_arch_isa;
10628 cpu_arch_tune_flags = cpu_arch_isa_flags;
10629 }
10630 }
10631 else if (cpu_arch_isa != PROCESSOR_IAMCU)
10632 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
10633 cpu_arch_name);
10634 }
10635 else
10636 as_fatal (_("unknown architecture"));
10637
10638 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
10639 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10640 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
10641 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10642
10643 switch (OUTPUT_FLAVOR)
10644 {
10645 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
10646 case bfd_target_aout_flavour:
10647 return AOUT_TARGET_FORMAT;
10648 #endif
10649 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
10650 # if defined (TE_PE) || defined (TE_PEP)
10651 case bfd_target_coff_flavour:
10652 if (flag_code == CODE_64BIT)
10653 return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
10654 else
10655 return "pe-i386";
10656 # elif defined (TE_GO32)
10657 case bfd_target_coff_flavour:
10658 return "coff-go32";
10659 # else
10660 case bfd_target_coff_flavour:
10661 return "coff-i386";
10662 # endif
10663 #endif
10664 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10665 case bfd_target_elf_flavour:
10666 {
10667 const char *format;
10668
10669 switch (x86_elf_abi)
10670 {
10671 default:
10672 format = ELF_TARGET_FORMAT;
10673 break;
10674 case X86_64_ABI:
10675 use_rela_relocations = 1;
10676 object_64bit = 1;
10677 format = ELF_TARGET_FORMAT64;
10678 break;
10679 case X86_64_X32_ABI:
10680 use_rela_relocations = 1;
10681 object_64bit = 1;
10682 disallow_64bit_reloc = 1;
10683 format = ELF_TARGET_FORMAT32;
10684 break;
10685 }
10686 if (cpu_arch_isa == PROCESSOR_L1OM)
10687 {
10688 if (x86_elf_abi != X86_64_ABI)
10689 as_fatal (_("Intel L1OM is 64bit only"));
10690 return ELF_TARGET_L1OM_FORMAT;
10691 }
10692 else if (cpu_arch_isa == PROCESSOR_K1OM)
10693 {
10694 if (x86_elf_abi != X86_64_ABI)
10695 as_fatal (_("Intel K1OM is 64bit only"));
10696 return ELF_TARGET_K1OM_FORMAT;
10697 }
10698 else if (cpu_arch_isa == PROCESSOR_IAMCU)
10699 {
10700 if (x86_elf_abi != I386_ABI)
10701 as_fatal (_("Intel MCU is 32bit only"));
10702 return ELF_TARGET_IAMCU_FORMAT;
10703 }
10704 else
10705 return format;
10706 }
10707 #endif
10708 #if defined (OBJ_MACH_O)
10709 case bfd_target_mach_o_flavour:
10710 if (flag_code == CODE_64BIT)
10711 {
10712 use_rela_relocations = 1;
10713 object_64bit = 1;
10714 return "mach-o-x86-64";
10715 }
10716 else
10717 return "mach-o-i386";
10718 #endif
10719 default:
10720 abort ();
10721 return NULL;
10722 }
10723 }
10724
10725 #endif /* OBJ_MAYBE_ more than one */
10726 \f
10727 symbolS *
10728 md_undefined_symbol (char *name)
10729 {
10730 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
10731 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
10732 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
10733 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
10734 {
10735 if (!GOT_symbol)
10736 {
10737 if (symbol_find (name))
10738 as_bad (_("GOT already in symbol table"));
10739 GOT_symbol = symbol_new (name, undefined_section,
10740 (valueT) 0, &zero_address_frag);
10741 };
10742 return GOT_symbol;
10743 }
10744 return 0;
10745 }
10746
10747 /* Round up a section size to the appropriate boundary. */
10748
10749 valueT
10750 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
10751 {
10752 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10753 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
10754 {
10755 /* For a.out, force the section size to be aligned. If we don't do
10756 this, BFD will align it for us, but it will not write out the
10757 final bytes of the section. This may be a bug in BFD, but it is
10758 easier to fix it here since that is how the other a.out targets
10759 work. */
10760 int align;
10761
10762 align = bfd_get_section_alignment (stdoutput, segment);
10763 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
10764 }
10765 #endif
10766
10767 return size;
10768 }
10769
10770 /* On the i386, PC-relative offsets are relative to the start of the
10771 next instruction. That is, the address of the offset, plus its
10772 size, since the offset is always the last part of the insn. */
10773
10774 long
10775 md_pcrel_from (fixS *fixP)
10776 {
10777 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
10778 }
10779
10780 #ifndef I386COFF
10781
10782 static void
10783 s_bss (int ignore ATTRIBUTE_UNUSED)
10784 {
10785 int temp;
10786
10787 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10788 if (IS_ELF)
10789 obj_elf_section_change_hook ();
10790 #endif
10791 temp = get_absolute_expression ();
10792 subseg_set (bss_section, (subsegT) temp);
10793 demand_empty_rest_of_line ();
10794 }
10795
10796 #endif
10797
10798 void
10799 i386_validate_fix (fixS *fixp)
10800 {
10801 if (fixp->fx_subsy)
10802 {
10803 if (fixp->fx_subsy == GOT_symbol)
10804 {
10805 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
10806 {
10807 if (!object_64bit)
10808 abort ();
10809 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10810 if (fixp->fx_tcbit2)
10811 fixp->fx_r_type = (fixp->fx_tcbit
10812 ? BFD_RELOC_X86_64_REX_GOTPCRELX
10813 : BFD_RELOC_X86_64_GOTPCRELX);
10814 else
10815 #endif
10816 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
10817 }
10818 else
10819 {
10820 if (!object_64bit)
10821 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
10822 else
10823 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
10824 }
10825 fixp->fx_subsy = 0;
10826 }
10827 }
10828 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10829 else if (!object_64bit)
10830 {
10831 if (fixp->fx_r_type == BFD_RELOC_386_GOT32
10832 && fixp->fx_tcbit2)
10833 fixp->fx_r_type = BFD_RELOC_386_GOT32X;
10834 }
10835 #endif
10836 }
10837
10838 arelent *
10839 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
10840 {
10841 arelent *rel;
10842 bfd_reloc_code_real_type code;
10843
10844 switch (fixp->fx_r_type)
10845 {
10846 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10847 case BFD_RELOC_SIZE32:
10848 case BFD_RELOC_SIZE64:
10849 if (S_IS_DEFINED (fixp->fx_addsy)
10850 && !S_IS_EXTERNAL (fixp->fx_addsy))
10851 {
10852 /* Resolve size relocation against local symbol to size of
10853 the symbol plus addend. */
10854 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
10855 if (fixp->fx_r_type == BFD_RELOC_SIZE32
10856 && !fits_in_unsigned_long (value))
10857 as_bad_where (fixp->fx_file, fixp->fx_line,
10858 _("symbol size computation overflow"));
10859 fixp->fx_addsy = NULL;
10860 fixp->fx_subsy = NULL;
10861 md_apply_fix (fixp, (valueT *) &value, NULL);
10862 return NULL;
10863 }
10864 #endif
10865 /* Fall through. */
10866
10867 case BFD_RELOC_X86_64_PLT32:
10868 case BFD_RELOC_X86_64_GOT32:
10869 case BFD_RELOC_X86_64_GOTPCREL:
10870 case BFD_RELOC_X86_64_GOTPCRELX:
10871 case BFD_RELOC_X86_64_REX_GOTPCRELX:
10872 case BFD_RELOC_386_PLT32:
10873 case BFD_RELOC_386_GOT32:
10874 case BFD_RELOC_386_GOT32X:
10875 case BFD_RELOC_386_GOTOFF:
10876 case BFD_RELOC_386_GOTPC:
10877 case BFD_RELOC_386_TLS_GD:
10878 case BFD_RELOC_386_TLS_LDM:
10879 case BFD_RELOC_386_TLS_LDO_32:
10880 case BFD_RELOC_386_TLS_IE_32:
10881 case BFD_RELOC_386_TLS_IE:
10882 case BFD_RELOC_386_TLS_GOTIE:
10883 case BFD_RELOC_386_TLS_LE_32:
10884 case BFD_RELOC_386_TLS_LE:
10885 case BFD_RELOC_386_TLS_GOTDESC:
10886 case BFD_RELOC_386_TLS_DESC_CALL:
10887 case BFD_RELOC_X86_64_TLSGD:
10888 case BFD_RELOC_X86_64_TLSLD:
10889 case BFD_RELOC_X86_64_DTPOFF32:
10890 case BFD_RELOC_X86_64_DTPOFF64:
10891 case BFD_RELOC_X86_64_GOTTPOFF:
10892 case BFD_RELOC_X86_64_TPOFF32:
10893 case BFD_RELOC_X86_64_TPOFF64:
10894 case BFD_RELOC_X86_64_GOTOFF64:
10895 case BFD_RELOC_X86_64_GOTPC32:
10896 case BFD_RELOC_X86_64_GOT64:
10897 case BFD_RELOC_X86_64_GOTPCREL64:
10898 case BFD_RELOC_X86_64_GOTPC64:
10899 case BFD_RELOC_X86_64_GOTPLT64:
10900 case BFD_RELOC_X86_64_PLTOFF64:
10901 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10902 case BFD_RELOC_X86_64_TLSDESC_CALL:
10903 case BFD_RELOC_RVA:
10904 case BFD_RELOC_VTABLE_ENTRY:
10905 case BFD_RELOC_VTABLE_INHERIT:
10906 #ifdef TE_PE
10907 case BFD_RELOC_32_SECREL:
10908 #endif
10909 code = fixp->fx_r_type;
10910 break;
10911 case BFD_RELOC_X86_64_32S:
10912 if (!fixp->fx_pcrel)
10913 {
10914 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10915 code = fixp->fx_r_type;
10916 break;
10917 }
10918 /* Fall through. */
10919 default:
10920 if (fixp->fx_pcrel)
10921 {
10922 switch (fixp->fx_size)
10923 {
10924 default:
10925 as_bad_where (fixp->fx_file, fixp->fx_line,
10926 _("can not do %d byte pc-relative relocation"),
10927 fixp->fx_size);
10928 code = BFD_RELOC_32_PCREL;
10929 break;
10930 case 1: code = BFD_RELOC_8_PCREL; break;
10931 case 2: code = BFD_RELOC_16_PCREL; break;
10932 case 4: code = BFD_RELOC_32_PCREL; break;
10933 #ifdef BFD64
10934 case 8: code = BFD_RELOC_64_PCREL; break;
10935 #endif
10936 }
10937 }
10938 else
10939 {
10940 switch (fixp->fx_size)
10941 {
10942 default:
10943 as_bad_where (fixp->fx_file, fixp->fx_line,
10944 _("can not do %d byte relocation"),
10945 fixp->fx_size);
10946 code = BFD_RELOC_32;
10947 break;
10948 case 1: code = BFD_RELOC_8; break;
10949 case 2: code = BFD_RELOC_16; break;
10950 case 4: code = BFD_RELOC_32; break;
10951 #ifdef BFD64
10952 case 8: code = BFD_RELOC_64; break;
10953 #endif
10954 }
10955 }
10956 break;
10957 }
10958
10959 if ((code == BFD_RELOC_32
10960 || code == BFD_RELOC_32_PCREL
10961 || code == BFD_RELOC_X86_64_32S)
10962 && GOT_symbol
10963 && fixp->fx_addsy == GOT_symbol)
10964 {
10965 if (!object_64bit)
10966 code = BFD_RELOC_386_GOTPC;
10967 else
10968 code = BFD_RELOC_X86_64_GOTPC32;
10969 }
10970 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
10971 && GOT_symbol
10972 && fixp->fx_addsy == GOT_symbol)
10973 {
10974 code = BFD_RELOC_X86_64_GOTPC64;
10975 }
10976
10977 rel = XNEW (arelent);
10978 rel->sym_ptr_ptr = XNEW (asymbol *);
10979 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10980
10981 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
10982
10983 if (!use_rela_relocations)
10984 {
10985 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10986 vtable entry to be used in the relocation's section offset. */
10987 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
10988 rel->address = fixp->fx_offset;
10989 #if defined (OBJ_COFF) && defined (TE_PE)
10990 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
10991 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
10992 else
10993 #endif
10994 rel->addend = 0;
10995 }
10996 /* Use the rela in 64bit mode. */
10997 else
10998 {
10999 if (disallow_64bit_reloc)
11000 switch (code)
11001 {
11002 case BFD_RELOC_X86_64_DTPOFF64:
11003 case BFD_RELOC_X86_64_TPOFF64:
11004 case BFD_RELOC_64_PCREL:
11005 case BFD_RELOC_X86_64_GOTOFF64:
11006 case BFD_RELOC_X86_64_GOT64:
11007 case BFD_RELOC_X86_64_GOTPCREL64:
11008 case BFD_RELOC_X86_64_GOTPC64:
11009 case BFD_RELOC_X86_64_GOTPLT64:
11010 case BFD_RELOC_X86_64_PLTOFF64:
11011 as_bad_where (fixp->fx_file, fixp->fx_line,
11012 _("cannot represent relocation type %s in x32 mode"),
11013 bfd_get_reloc_code_name (code));
11014 break;
11015 default:
11016 break;
11017 }
11018
11019 if (!fixp->fx_pcrel)
11020 rel->addend = fixp->fx_offset;
11021 else
11022 switch (code)
11023 {
11024 case BFD_RELOC_X86_64_PLT32:
11025 case BFD_RELOC_X86_64_GOT32:
11026 case BFD_RELOC_X86_64_GOTPCREL:
11027 case BFD_RELOC_X86_64_GOTPCRELX:
11028 case BFD_RELOC_X86_64_REX_GOTPCRELX:
11029 case BFD_RELOC_X86_64_TLSGD:
11030 case BFD_RELOC_X86_64_TLSLD:
11031 case BFD_RELOC_X86_64_GOTTPOFF:
11032 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
11033 case BFD_RELOC_X86_64_TLSDESC_CALL:
11034 rel->addend = fixp->fx_offset - fixp->fx_size;
11035 break;
11036 default:
11037 rel->addend = (section->vma
11038 - fixp->fx_size
11039 + fixp->fx_addnumber
11040 + md_pcrel_from (fixp));
11041 break;
11042 }
11043 }
11044
11045 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
11046 if (rel->howto == NULL)
11047 {
11048 as_bad_where (fixp->fx_file, fixp->fx_line,
11049 _("cannot represent relocation type %s"),
11050 bfd_get_reloc_code_name (code));
11051 /* Set howto to a garbage value so that we can keep going. */
11052 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
11053 gas_assert (rel->howto != NULL);
11054 }
11055
11056 return rel;
11057 }
11058
11059 #include "tc-i386-intel.c"
11060
11061 void
11062 tc_x86_parse_to_dw2regnum (expressionS *exp)
11063 {
11064 int saved_naked_reg;
11065 char saved_register_dot;
11066
11067 saved_naked_reg = allow_naked_reg;
11068 allow_naked_reg = 1;
11069 saved_register_dot = register_chars['.'];
11070 register_chars['.'] = '.';
11071 allow_pseudo_reg = 1;
11072 expression_and_evaluate (exp);
11073 allow_pseudo_reg = 0;
11074 register_chars['.'] = saved_register_dot;
11075 allow_naked_reg = saved_naked_reg;
11076
11077 if (exp->X_op == O_register && exp->X_add_number >= 0)
11078 {
11079 if ((addressT) exp->X_add_number < i386_regtab_size)
11080 {
11081 exp->X_op = O_constant;
11082 exp->X_add_number = i386_regtab[exp->X_add_number]
11083 .dw2_regnum[flag_code >> 1];
11084 }
11085 else
11086 exp->X_op = O_illegal;
11087 }
11088 }
11089
11090 void
11091 tc_x86_frame_initial_instructions (void)
11092 {
11093 static unsigned int sp_regno[2];
11094
11095 if (!sp_regno[flag_code >> 1])
11096 {
11097 char *saved_input = input_line_pointer;
11098 char sp[][4] = {"esp", "rsp"};
11099 expressionS exp;
11100
11101 input_line_pointer = sp[flag_code >> 1];
11102 tc_x86_parse_to_dw2regnum (&exp);
11103 gas_assert (exp.X_op == O_constant);
11104 sp_regno[flag_code >> 1] = exp.X_add_number;
11105 input_line_pointer = saved_input;
11106 }
11107
11108 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
11109 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
11110 }
11111
11112 int
11113 x86_dwarf2_addr_size (void)
11114 {
11115 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
11116 if (x86_elf_abi == X86_64_X32_ABI)
11117 return 4;
11118 #endif
11119 return bfd_arch_bits_per_address (stdoutput) / 8;
11120 }
11121
11122 int
11123 i386_elf_section_type (const char *str, size_t len)
11124 {
11125 if (flag_code == CODE_64BIT
11126 && len == sizeof ("unwind") - 1
11127 && strncmp (str, "unwind", 6) == 0)
11128 return SHT_X86_64_UNWIND;
11129
11130 return -1;
11131 }
11132
11133 #ifdef TE_SOLARIS
11134 void
11135 i386_solaris_fix_up_eh_frame (segT sec)
11136 {
11137 if (flag_code == CODE_64BIT)
11138 elf_section_type (sec) = SHT_X86_64_UNWIND;
11139 }
11140 #endif
11141
11142 #ifdef TE_PE
11143 void
11144 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
11145 {
11146 expressionS exp;
11147
11148 exp.X_op = O_secrel;
11149 exp.X_add_symbol = symbol;
11150 exp.X_add_number = 0;
11151 emit_expr (&exp, size);
11152 }
11153 #endif
11154
11155 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11156 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
11157
11158 bfd_vma
11159 x86_64_section_letter (int letter, const char **ptr_msg)
11160 {
11161 if (flag_code == CODE_64BIT)
11162 {
11163 if (letter == 'l')
11164 return SHF_X86_64_LARGE;
11165
11166 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
11167 }
11168 else
11169 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
11170 return -1;
11171 }
11172
11173 bfd_vma
11174 x86_64_section_word (char *str, size_t len)
11175 {
11176 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
11177 return SHF_X86_64_LARGE;
11178
11179 return -1;
11180 }
11181
11182 static void
11183 handle_large_common (int small ATTRIBUTE_UNUSED)
11184 {
11185 if (flag_code != CODE_64BIT)
11186 {
11187 s_comm_internal (0, elf_common_parse);
11188 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
11189 }
11190 else
11191 {
11192 static segT lbss_section;
11193 asection *saved_com_section_ptr = elf_com_section_ptr;
11194 asection *saved_bss_section = bss_section;
11195
11196 if (lbss_section == NULL)
11197 {
11198 flagword applicable;
11199 segT seg = now_seg;
11200 subsegT subseg = now_subseg;
11201
11202 /* The .lbss section is for local .largecomm symbols. */
11203 lbss_section = subseg_new (".lbss", 0);
11204 applicable = bfd_applicable_section_flags (stdoutput);
11205 bfd_set_section_flags (stdoutput, lbss_section,
11206 applicable & SEC_ALLOC);
11207 seg_info (lbss_section)->bss = 1;
11208
11209 subseg_set (seg, subseg);
11210 }
11211
11212 elf_com_section_ptr = &_bfd_elf_large_com_section;
11213 bss_section = lbss_section;
11214
11215 s_comm_internal (0, elf_common_parse);
11216
11217 elf_com_section_ptr = saved_com_section_ptr;
11218 bss_section = saved_bss_section;
11219 }
11220 }
11221 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.253771 seconds and 3 git commands to generate.