2009-08-10 Paul Pluzhnikov <ppluzhnikov@google.com>
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
... / ...
CommitLineData
1/* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23/* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30#include "as.h"
31#include "safe-ctype.h"
32#include "subsegs.h"
33#include "dwarf2dbg.h"
34#include "dw2gencfi.h"
35#include "elf/x86-64.h"
36#include "opcodes/i386-init.h"
37
38#ifndef REGISTER_WARNINGS
39#define REGISTER_WARNINGS 1
40#endif
41
42#ifndef INFER_ADDR_PREFIX
43#define INFER_ADDR_PREFIX 1
44#endif
45
46#ifndef DEFAULT_ARCH
47#define DEFAULT_ARCH "i386"
48#endif
49
50#ifndef INLINE
51#if __GNUC__ >= 2
52#define INLINE __inline__
53#else
54#define INLINE
55#endif
56#endif
57
58/* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 LOCKREP_PREFIX. */
63#define WAIT_PREFIX 0
64#define SEG_PREFIX 1
65#define ADDR_PREFIX 2
66#define DATA_PREFIX 3
67#define LOCKREP_PREFIX 4
68#define REX_PREFIX 5 /* must come last. */
69#define MAX_PREFIXES 6 /* max prefixes per opcode */
70
71/* we define the syntax here (modulo base,index,scale syntax) */
72#define REGISTER_PREFIX '%'
73#define IMMEDIATE_PREFIX '$'
74#define ABSOLUTE_PREFIX '*'
75
76/* these are the instruction mnemonic suffixes in AT&T syntax or
77 memory operand size in Intel syntax. */
78#define WORD_MNEM_SUFFIX 'w'
79#define BYTE_MNEM_SUFFIX 'b'
80#define SHORT_MNEM_SUFFIX 's'
81#define LONG_MNEM_SUFFIX 'l'
82#define QWORD_MNEM_SUFFIX 'q'
83#define XMMWORD_MNEM_SUFFIX 'x'
84#define YMMWORD_MNEM_SUFFIX 'y'
85/* Intel Syntax. Use a non-ascii letter since since it never appears
86 in instructions. */
87#define LONG_DOUBLE_MNEM_SUFFIX '\1'
88
89#define END_OF_INSN '\0'
90
91/*
92 'templates' is for grouping together 'template' structures for opcodes
93 of the same name. This is only used for storing the insns in the grand
94 ole hash table of insns.
95 The templates themselves start at START and range up to (but not including)
96 END.
97 */
98typedef struct
99{
100 const template *start;
101 const template *end;
102}
103templates;
104
105/* 386 operand encoding bytes: see 386 book for details of this. */
106typedef struct
107{
108 unsigned int regmem; /* codes register or memory operand */
109 unsigned int reg; /* codes register operand (or extended opcode) */
110 unsigned int mode; /* how to interpret regmem & reg */
111}
112modrm_byte;
113
114/* x86-64 extension prefix. */
115typedef int rex_byte;
116
117/* 386 opcode byte to code indirect addressing. */
118typedef struct
119{
120 unsigned base;
121 unsigned index;
122 unsigned scale;
123}
124sib_byte;
125
126/* x86 arch names, types and features */
127typedef struct
128{
129 const char *name; /* arch name */
130 enum processor_type type; /* arch type */
131 i386_cpu_flags flags; /* cpu feature flags */
132}
133arch_entry;
134
135static void set_code_flag (int);
136static void set_16bit_gcc_code_flag (int);
137static void set_intel_syntax (int);
138static void set_intel_mnemonic (int);
139static void set_allow_index_reg (int);
140static void set_sse_check (int);
141static void set_cpu_arch (int);
142#ifdef TE_PE
143static void pe_directive_secrel (int);
144#endif
145static void signed_cons (int);
146static char *output_invalid (int c);
147static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
148 const char *);
149static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
150 const char *);
151static int i386_att_operand (char *);
152static int i386_intel_operand (char *, int);
153static int i386_intel_simplify (expressionS *);
154static int i386_intel_parse_name (const char *, expressionS *);
155static const reg_entry *parse_register (char *, char **);
156static char *parse_insn (char *, char *);
157static char *parse_operands (char *, const char *);
158static void swap_operands (void);
159static void swap_2_operands (int, int);
160static void optimize_imm (void);
161static void optimize_disp (void);
162static const template *match_template (void);
163static int check_string (void);
164static int process_suffix (void);
165static int check_byte_reg (void);
166static int check_long_reg (void);
167static int check_qword_reg (void);
168static int check_word_reg (void);
169static int finalize_imm (void);
170static int process_operands (void);
171static const seg_entry *build_modrm_byte (void);
172static void output_insn (void);
173static void output_imm (fragS *, offsetT);
174static void output_disp (fragS *, offsetT);
175#ifndef I386COFF
176static void s_bss (int);
177#endif
178#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
179static void handle_large_common (int small ATTRIBUTE_UNUSED);
180#endif
181
182static const char *default_arch = DEFAULT_ARCH;
183
184/* VEX prefix. */
185typedef struct
186{
187 /* VEX prefix is either 2 byte or 3 byte. */
188 unsigned char bytes[3];
189 unsigned int length;
190 /* Destination or source register specifier. */
191 const reg_entry *register_specifier;
192} vex_prefix;
193
194/* 'md_assemble ()' gathers together information and puts it into a
195 i386_insn. */
196
197union i386_op
198 {
199 expressionS *disps;
200 expressionS *imms;
201 const reg_entry *regs;
202 };
203
204struct _i386_insn
205 {
206 /* TM holds the template for the insn were currently assembling. */
207 template tm;
208
209 /* SUFFIX holds the instruction size suffix for byte, word, dword
210 or qword, if given. */
211 char suffix;
212
213 /* OPERANDS gives the number of given operands. */
214 unsigned int operands;
215
216 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
217 of given register, displacement, memory operands and immediate
218 operands. */
219 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
220
221 /* TYPES [i] is the type (see above #defines) which tells us how to
222 use OP[i] for the corresponding operand. */
223 i386_operand_type types[MAX_OPERANDS];
224
225 /* Displacement expression, immediate expression, or register for each
226 operand. */
227 union i386_op op[MAX_OPERANDS];
228
229 /* Flags for operands. */
230 unsigned int flags[MAX_OPERANDS];
231#define Operand_PCrel 1
232
233 /* Relocation type for operand */
234 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
235
236 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
237 the base index byte below. */
238 const reg_entry *base_reg;
239 const reg_entry *index_reg;
240 unsigned int log2_scale_factor;
241
242 /* SEG gives the seg_entries of this insn. They are zero unless
243 explicit segment overrides are given. */
244 const seg_entry *seg[2];
245
246 /* PREFIX holds all the given prefix opcodes (usually null).
247 PREFIXES is the number of prefix opcodes. */
248 unsigned int prefixes;
249 unsigned char prefix[MAX_PREFIXES];
250
251 /* RM and SIB are the modrm byte and the sib byte where the
252 addressing modes of this insn are encoded. */
253 modrm_byte rm;
254 rex_byte rex;
255 sib_byte sib;
256 vex_prefix vex;
257
258 /* Swap operand in encoding. */
259 unsigned int swap_operand : 1;
260 };
261
262typedef struct _i386_insn i386_insn;
263
264/* List of chars besides those in app.c:symbol_chars that can start an
265 operand. Used to prevent the scrubber eating vital white-space. */
266const char extra_symbol_chars[] = "*%-(["
267#ifdef LEX_AT
268 "@"
269#endif
270#ifdef LEX_QM
271 "?"
272#endif
273 ;
274
275#if (defined (TE_I386AIX) \
276 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
277 && !defined (TE_GNU) \
278 && !defined (TE_LINUX) \
279 && !defined (TE_NETWARE) \
280 && !defined (TE_FreeBSD) \
281 && !defined (TE_NetBSD)))
282/* This array holds the chars that always start a comment. If the
283 pre-processor is disabled, these aren't very useful. The option
284 --divide will remove '/' from this list. */
285const char *i386_comment_chars = "#/";
286#define SVR4_COMMENT_CHARS 1
287#define PREFIX_SEPARATOR '\\'
288
289#else
290const char *i386_comment_chars = "#";
291#define PREFIX_SEPARATOR '/'
292#endif
293
294/* This array holds the chars that only start a comment at the beginning of
295 a line. If the line seems to have the form '# 123 filename'
296 .line and .file directives will appear in the pre-processed output.
297 Note that input_file.c hand checks for '#' at the beginning of the
298 first line of the input file. This is because the compiler outputs
299 #NO_APP at the beginning of its output.
300 Also note that comments started like this one will always work if
301 '/' isn't otherwise defined. */
302const char line_comment_chars[] = "#/";
303
304const char line_separator_chars[] = ";";
305
306/* Chars that can be used to separate mant from exp in floating point
307 nums. */
308const char EXP_CHARS[] = "eE";
309
310/* Chars that mean this number is a floating point constant
311 As in 0f12.456
312 or 0d1.2345e12. */
313const char FLT_CHARS[] = "fFdDxX";
314
315/* Tables for lexical analysis. */
316static char mnemonic_chars[256];
317static char register_chars[256];
318static char operand_chars[256];
319static char identifier_chars[256];
320static char digit_chars[256];
321
322/* Lexical macros. */
323#define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
324#define is_operand_char(x) (operand_chars[(unsigned char) x])
325#define is_register_char(x) (register_chars[(unsigned char) x])
326#define is_space_char(x) ((x) == ' ')
327#define is_identifier_char(x) (identifier_chars[(unsigned char) x])
328#define is_digit_char(x) (digit_chars[(unsigned char) x])
329
330/* All non-digit non-letter characters that may occur in an operand. */
331static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
332
333/* md_assemble() always leaves the strings it's passed unaltered. To
334 effect this we maintain a stack of saved characters that we've smashed
335 with '\0's (indicating end of strings for various sub-fields of the
336 assembler instruction). */
337static char save_stack[32];
338static char *save_stack_p;
339#define END_STRING_AND_SAVE(s) \
340 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
341#define RESTORE_END_STRING(s) \
342 do { *(s) = *--save_stack_p; } while (0)
343
344/* The instruction we're assembling. */
345static i386_insn i;
346
347/* Possible templates for current insn. */
348static const templates *current_templates;
349
350/* Per instruction expressionS buffers: max displacements & immediates. */
351static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
352static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
353
354/* Current operand we are working on. */
355static int this_operand = -1;
356
357/* We support four different modes. FLAG_CODE variable is used to distinguish
358 these. */
359
360enum flag_code {
361 CODE_32BIT,
362 CODE_16BIT,
363 CODE_64BIT };
364
365static enum flag_code flag_code;
366static unsigned int object_64bit;
367static int use_rela_relocations = 0;
368
369/* The names used to print error messages. */
370static const char *flag_code_names[] =
371 {
372 "32",
373 "16",
374 "64"
375 };
376
377/* 1 for intel syntax,
378 0 if att syntax. */
379static int intel_syntax = 0;
380
381/* 1 for intel mnemonic,
382 0 if att mnemonic. */
383static int intel_mnemonic = !SYSV386_COMPAT;
384
385/* 1 if support old (<= 2.8.1) versions of gcc. */
386static int old_gcc = OLDGCC_COMPAT;
387
388/* 1 if pseudo registers are permitted. */
389static int allow_pseudo_reg = 0;
390
391/* 1 if register prefix % not required. */
392static int allow_naked_reg = 0;
393
394/* 1 if pseudo index register, eiz/riz, is allowed . */
395static int allow_index_reg = 0;
396
397static enum
398 {
399 sse_check_none = 0,
400 sse_check_warning,
401 sse_check_error
402 }
403sse_check;
404
405/* Register prefix used for error message. */
406static const char *register_prefix = "%";
407
408/* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
409 leave, push, and pop instructions so that gcc has the same stack
410 frame as in 32 bit mode. */
411static char stackop_size = '\0';
412
413/* Non-zero to optimize code alignment. */
414int optimize_align_code = 1;
415
416/* Non-zero to quieten some warnings. */
417static int quiet_warnings = 0;
418
419/* CPU name. */
420static const char *cpu_arch_name = NULL;
421static char *cpu_sub_arch_name = NULL;
422
423/* CPU feature flags. */
424static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
425
426/* If we have selected a cpu we are generating instructions for. */
427static int cpu_arch_tune_set = 0;
428
429/* Cpu we are generating instructions for. */
430enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
431
432/* CPU feature flags of cpu we are generating instructions for. */
433static i386_cpu_flags cpu_arch_tune_flags;
434
435/* CPU instruction set architecture used. */
436enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
437
438/* CPU feature flags of instruction set architecture used. */
439i386_cpu_flags cpu_arch_isa_flags;
440
441/* If set, conditional jumps are not automatically promoted to handle
442 larger than a byte offset. */
443static unsigned int no_cond_jump_promotion = 0;
444
445/* Encode SSE instructions with VEX prefix. */
446static unsigned int sse2avx;
447
448/* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
449static symbolS *GOT_symbol;
450
451/* The dwarf2 return column, adjusted for 32 or 64 bit. */
452unsigned int x86_dwarf2_return_column;
453
454/* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
455int x86_cie_data_alignment;
456
457/* Interface to relax_segment.
458 There are 3 major relax states for 386 jump insns because the
459 different types of jumps add different sizes to frags when we're
460 figuring out what sort of jump to choose to reach a given label. */
461
462/* Types. */
463#define UNCOND_JUMP 0
464#define COND_JUMP 1
465#define COND_JUMP86 2
466
467/* Sizes. */
468#define CODE16 1
469#define SMALL 0
470#define SMALL16 (SMALL | CODE16)
471#define BIG 2
472#define BIG16 (BIG | CODE16)
473
474#ifndef INLINE
475#ifdef __GNUC__
476#define INLINE __inline__
477#else
478#define INLINE
479#endif
480#endif
481
482#define ENCODE_RELAX_STATE(type, size) \
483 ((relax_substateT) (((type) << 2) | (size)))
484#define TYPE_FROM_RELAX_STATE(s) \
485 ((s) >> 2)
486#define DISP_SIZE_FROM_RELAX_STATE(s) \
487 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
488
489/* This table is used by relax_frag to promote short jumps to long
490 ones where necessary. SMALL (short) jumps may be promoted to BIG
491 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
492 don't allow a short jump in a 32 bit code segment to be promoted to
493 a 16 bit offset jump because it's slower (requires data size
494 prefix), and doesn't work, unless the destination is in the bottom
495 64k of the code segment (The top 16 bits of eip are zeroed). */
496
497const relax_typeS md_relax_table[] =
498{
499 /* The fields are:
500 1) most positive reach of this state,
501 2) most negative reach of this state,
502 3) how many bytes this mode will have in the variable part of the frag
503 4) which index into the table to try if we can't fit into this one. */
504
505 /* UNCOND_JUMP states. */
506 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
507 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
508 /* dword jmp adds 4 bytes to frag:
509 0 extra opcode bytes, 4 displacement bytes. */
510 {0, 0, 4, 0},
511 /* word jmp adds 2 byte2 to frag:
512 0 extra opcode bytes, 2 displacement bytes. */
513 {0, 0, 2, 0},
514
515 /* COND_JUMP states. */
516 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
517 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
518 /* dword conditionals adds 5 bytes to frag:
519 1 extra opcode byte, 4 displacement bytes. */
520 {0, 0, 5, 0},
521 /* word conditionals add 3 bytes to frag:
522 1 extra opcode byte, 2 displacement bytes. */
523 {0, 0, 3, 0},
524
525 /* COND_JUMP86 states. */
526 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
527 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
528 /* dword conditionals adds 5 bytes to frag:
529 1 extra opcode byte, 4 displacement bytes. */
530 {0, 0, 5, 0},
531 /* word conditionals add 4 bytes to frag:
532 1 displacement byte and a 3 byte long branch insn. */
533 {0, 0, 4, 0}
534};
535
536static const arch_entry cpu_arch[] =
537{
538 { "generic32", PROCESSOR_GENERIC32,
539 CPU_GENERIC32_FLAGS },
540 { "generic64", PROCESSOR_GENERIC64,
541 CPU_GENERIC64_FLAGS },
542 { "i8086", PROCESSOR_UNKNOWN,
543 CPU_NONE_FLAGS },
544 { "i186", PROCESSOR_UNKNOWN,
545 CPU_I186_FLAGS },
546 { "i286", PROCESSOR_UNKNOWN,
547 CPU_I286_FLAGS },
548 { "i386", PROCESSOR_I386,
549 CPU_I386_FLAGS },
550 { "i486", PROCESSOR_I486,
551 CPU_I486_FLAGS },
552 { "i586", PROCESSOR_PENTIUM,
553 CPU_I586_FLAGS },
554 { "i686", PROCESSOR_PENTIUMPRO,
555 CPU_I686_FLAGS },
556 { "pentium", PROCESSOR_PENTIUM,
557 CPU_I586_FLAGS },
558 { "pentiumpro", PROCESSOR_PENTIUMPRO,
559 CPU_I686_FLAGS },
560 { "pentiumii", PROCESSOR_PENTIUMPRO,
561 CPU_P2_FLAGS },
562 { "pentiumiii",PROCESSOR_PENTIUMPRO,
563 CPU_P3_FLAGS },
564 { "pentium4", PROCESSOR_PENTIUM4,
565 CPU_P4_FLAGS },
566 { "prescott", PROCESSOR_NOCONA,
567 CPU_CORE_FLAGS },
568 { "nocona", PROCESSOR_NOCONA,
569 CPU_NOCONA_FLAGS },
570 { "yonah", PROCESSOR_CORE,
571 CPU_CORE_FLAGS },
572 { "core", PROCESSOR_CORE,
573 CPU_CORE_FLAGS },
574 { "merom", PROCESSOR_CORE2,
575 CPU_CORE2_FLAGS },
576 { "core2", PROCESSOR_CORE2,
577 CPU_CORE2_FLAGS },
578 { "corei7", PROCESSOR_COREI7,
579 CPU_COREI7_FLAGS },
580 { "l1om", PROCESSOR_GENERIC64,
581 CPU_L1OM_FLAGS },
582 { "k6", PROCESSOR_K6,
583 CPU_K6_FLAGS },
584 { "k6_2", PROCESSOR_K6,
585 CPU_K6_2_FLAGS },
586 { "athlon", PROCESSOR_ATHLON,
587 CPU_ATHLON_FLAGS },
588 { "sledgehammer", PROCESSOR_K8,
589 CPU_K8_FLAGS },
590 { "opteron", PROCESSOR_K8,
591 CPU_K8_FLAGS },
592 { "k8", PROCESSOR_K8,
593 CPU_K8_FLAGS },
594 { "amdfam10", PROCESSOR_AMDFAM10,
595 CPU_AMDFAM10_FLAGS },
596 { ".8087", PROCESSOR_UNKNOWN,
597 CPU_8087_FLAGS },
598 { ".287", PROCESSOR_UNKNOWN,
599 CPU_287_FLAGS },
600 { ".387", PROCESSOR_UNKNOWN,
601 CPU_387_FLAGS },
602 { ".no87", PROCESSOR_UNKNOWN,
603 CPU_ANY87_FLAGS },
604 { ".mmx", PROCESSOR_UNKNOWN,
605 CPU_MMX_FLAGS },
606 { ".nommx", PROCESSOR_UNKNOWN,
607 CPU_3DNOWA_FLAGS },
608 { ".sse", PROCESSOR_UNKNOWN,
609 CPU_SSE_FLAGS },
610 { ".sse2", PROCESSOR_UNKNOWN,
611 CPU_SSE2_FLAGS },
612 { ".sse3", PROCESSOR_UNKNOWN,
613 CPU_SSE3_FLAGS },
614 { ".ssse3", PROCESSOR_UNKNOWN,
615 CPU_SSSE3_FLAGS },
616 { ".sse4.1", PROCESSOR_UNKNOWN,
617 CPU_SSE4_1_FLAGS },
618 { ".sse4.2", PROCESSOR_UNKNOWN,
619 CPU_SSE4_2_FLAGS },
620 { ".sse4", PROCESSOR_UNKNOWN,
621 CPU_SSE4_2_FLAGS },
622 { ".nosse", PROCESSOR_UNKNOWN,
623 CPU_ANY_SSE_FLAGS },
624 { ".avx", PROCESSOR_UNKNOWN,
625 CPU_AVX_FLAGS },
626 { ".noavx", PROCESSOR_UNKNOWN,
627 CPU_ANY_AVX_FLAGS },
628 { ".vmx", PROCESSOR_UNKNOWN,
629 CPU_VMX_FLAGS },
630 { ".smx", PROCESSOR_UNKNOWN,
631 CPU_SMX_FLAGS },
632 { ".xsave", PROCESSOR_UNKNOWN,
633 CPU_XSAVE_FLAGS },
634 { ".aes", PROCESSOR_UNKNOWN,
635 CPU_AES_FLAGS },
636 { ".pclmul", PROCESSOR_UNKNOWN,
637 CPU_PCLMUL_FLAGS },
638 { ".clmul", PROCESSOR_UNKNOWN,
639 CPU_PCLMUL_FLAGS },
640 { ".fma", PROCESSOR_UNKNOWN,
641 CPU_FMA_FLAGS },
642 { ".fma4", PROCESSOR_UNKNOWN,
643 CPU_FMA4_FLAGS },
644 { ".movbe", PROCESSOR_UNKNOWN,
645 CPU_MOVBE_FLAGS },
646 { ".ept", PROCESSOR_UNKNOWN,
647 CPU_EPT_FLAGS },
648 { ".clflush", PROCESSOR_UNKNOWN,
649 CPU_CLFLUSH_FLAGS },
650 { ".syscall", PROCESSOR_UNKNOWN,
651 CPU_SYSCALL_FLAGS },
652 { ".rdtscp", PROCESSOR_UNKNOWN,
653 CPU_RDTSCP_FLAGS },
654 { ".3dnow", PROCESSOR_UNKNOWN,
655 CPU_3DNOW_FLAGS },
656 { ".3dnowa", PROCESSOR_UNKNOWN,
657 CPU_3DNOWA_FLAGS },
658 { ".padlock", PROCESSOR_UNKNOWN,
659 CPU_PADLOCK_FLAGS },
660 { ".pacifica", PROCESSOR_UNKNOWN,
661 CPU_SVME_FLAGS },
662 { ".svme", PROCESSOR_UNKNOWN,
663 CPU_SVME_FLAGS },
664 { ".sse4a", PROCESSOR_UNKNOWN,
665 CPU_SSE4A_FLAGS },
666 { ".abm", PROCESSOR_UNKNOWN,
667 CPU_ABM_FLAGS },
668};
669
670#ifdef I386COFF
671/* Like s_lcomm_internal in gas/read.c but the alignment string
672 is allowed to be optional. */
673
674static symbolS *
675pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
676{
677 addressT align = 0;
678
679 SKIP_WHITESPACE ();
680
681 if (needs_align
682 && *input_line_pointer == ',')
683 {
684 align = parse_align (needs_align - 1);
685
686 if (align == (addressT) -1)
687 return NULL;
688 }
689 else
690 {
691 if (size >= 8)
692 align = 3;
693 else if (size >= 4)
694 align = 2;
695 else if (size >= 2)
696 align = 1;
697 else
698 align = 0;
699 }
700
701 bss_alloc (symbolP, size, align);
702 return symbolP;
703}
704
705static void
706pe_lcomm (int needs_align)
707{
708 s_comm_internal (needs_align * 2, pe_lcomm_internal);
709}
710#endif
711
712const pseudo_typeS md_pseudo_table[] =
713{
714#if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
715 {"align", s_align_bytes, 0},
716#else
717 {"align", s_align_ptwo, 0},
718#endif
719 {"arch", set_cpu_arch, 0},
720#ifndef I386COFF
721 {"bss", s_bss, 0},
722#else
723 {"lcomm", pe_lcomm, 1},
724#endif
725 {"ffloat", float_cons, 'f'},
726 {"dfloat", float_cons, 'd'},
727 {"tfloat", float_cons, 'x'},
728 {"value", cons, 2},
729 {"slong", signed_cons, 4},
730 {"noopt", s_ignore, 0},
731 {"optim", s_ignore, 0},
732 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
733 {"code16", set_code_flag, CODE_16BIT},
734 {"code32", set_code_flag, CODE_32BIT},
735 {"code64", set_code_flag, CODE_64BIT},
736 {"intel_syntax", set_intel_syntax, 1},
737 {"att_syntax", set_intel_syntax, 0},
738 {"intel_mnemonic", set_intel_mnemonic, 1},
739 {"att_mnemonic", set_intel_mnemonic, 0},
740 {"allow_index_reg", set_allow_index_reg, 1},
741 {"disallow_index_reg", set_allow_index_reg, 0},
742 {"sse_check", set_sse_check, 0},
743#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
744 {"largecomm", handle_large_common, 0},
745#else
746 {"file", (void (*) (int)) dwarf2_directive_file, 0},
747 {"loc", dwarf2_directive_loc, 0},
748 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
749#endif
750#ifdef TE_PE
751 {"secrel32", pe_directive_secrel, 0},
752#endif
753 {0, 0, 0}
754};
755
756/* For interface with expression (). */
757extern char *input_line_pointer;
758
759/* Hash table for instruction mnemonic lookup. */
760static struct hash_control *op_hash;
761
762/* Hash table for register lookup. */
763static struct hash_control *reg_hash;
764\f
765void
766i386_align_code (fragS *fragP, int count)
767{
768 /* Various efficient no-op patterns for aligning code labels.
769 Note: Don't try to assemble the instructions in the comments.
770 0L and 0w are not legal. */
771 static const char f32_1[] =
772 {0x90}; /* nop */
773 static const char f32_2[] =
774 {0x66,0x90}; /* xchg %ax,%ax */
775 static const char f32_3[] =
776 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
777 static const char f32_4[] =
778 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
779 static const char f32_5[] =
780 {0x90, /* nop */
781 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
782 static const char f32_6[] =
783 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
784 static const char f32_7[] =
785 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
786 static const char f32_8[] =
787 {0x90, /* nop */
788 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
789 static const char f32_9[] =
790 {0x89,0xf6, /* movl %esi,%esi */
791 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
792 static const char f32_10[] =
793 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
794 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
795 static const char f32_11[] =
796 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
797 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
798 static const char f32_12[] =
799 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
800 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
801 static const char f32_13[] =
802 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
803 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
804 static const char f32_14[] =
805 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
806 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
807 static const char f16_3[] =
808 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
809 static const char f16_4[] =
810 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
811 static const char f16_5[] =
812 {0x90, /* nop */
813 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
814 static const char f16_6[] =
815 {0x89,0xf6, /* mov %si,%si */
816 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
817 static const char f16_7[] =
818 {0x8d,0x74,0x00, /* lea 0(%si),%si */
819 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
820 static const char f16_8[] =
821 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
822 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
823 static const char jump_31[] =
824 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
825 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
826 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
827 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
828 static const char *const f32_patt[] = {
829 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
830 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
831 };
832 static const char *const f16_patt[] = {
833 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
834 };
835 /* nopl (%[re]ax) */
836 static const char alt_3[] =
837 {0x0f,0x1f,0x00};
838 /* nopl 0(%[re]ax) */
839 static const char alt_4[] =
840 {0x0f,0x1f,0x40,0x00};
841 /* nopl 0(%[re]ax,%[re]ax,1) */
842 static const char alt_5[] =
843 {0x0f,0x1f,0x44,0x00,0x00};
844 /* nopw 0(%[re]ax,%[re]ax,1) */
845 static const char alt_6[] =
846 {0x66,0x0f,0x1f,0x44,0x00,0x00};
847 /* nopl 0L(%[re]ax) */
848 static const char alt_7[] =
849 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
850 /* nopl 0L(%[re]ax,%[re]ax,1) */
851 static const char alt_8[] =
852 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
853 /* nopw 0L(%[re]ax,%[re]ax,1) */
854 static const char alt_9[] =
855 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
856 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
857 static const char alt_10[] =
858 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
859 /* data16
860 nopw %cs:0L(%[re]ax,%[re]ax,1) */
861 static const char alt_long_11[] =
862 {0x66,
863 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
864 /* data16
865 data16
866 nopw %cs:0L(%[re]ax,%[re]ax,1) */
867 static const char alt_long_12[] =
868 {0x66,
869 0x66,
870 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
871 /* data16
872 data16
873 data16
874 nopw %cs:0L(%[re]ax,%[re]ax,1) */
875 static const char alt_long_13[] =
876 {0x66,
877 0x66,
878 0x66,
879 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
880 /* data16
881 data16
882 data16
883 data16
884 nopw %cs:0L(%[re]ax,%[re]ax,1) */
885 static const char alt_long_14[] =
886 {0x66,
887 0x66,
888 0x66,
889 0x66,
890 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
891 /* data16
892 data16
893 data16
894 data16
895 data16
896 nopw %cs:0L(%[re]ax,%[re]ax,1) */
897 static const char alt_long_15[] =
898 {0x66,
899 0x66,
900 0x66,
901 0x66,
902 0x66,
903 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
904 /* nopl 0(%[re]ax,%[re]ax,1)
905 nopw 0(%[re]ax,%[re]ax,1) */
906 static const char alt_short_11[] =
907 {0x0f,0x1f,0x44,0x00,0x00,
908 0x66,0x0f,0x1f,0x44,0x00,0x00};
909 /* nopw 0(%[re]ax,%[re]ax,1)
910 nopw 0(%[re]ax,%[re]ax,1) */
911 static const char alt_short_12[] =
912 {0x66,0x0f,0x1f,0x44,0x00,0x00,
913 0x66,0x0f,0x1f,0x44,0x00,0x00};
914 /* nopw 0(%[re]ax,%[re]ax,1)
915 nopl 0L(%[re]ax) */
916 static const char alt_short_13[] =
917 {0x66,0x0f,0x1f,0x44,0x00,0x00,
918 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
919 /* nopl 0L(%[re]ax)
920 nopl 0L(%[re]ax) */
921 static const char alt_short_14[] =
922 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
923 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
924 /* nopl 0L(%[re]ax)
925 nopl 0L(%[re]ax,%[re]ax,1) */
926 static const char alt_short_15[] =
927 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
928 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
929 static const char *const alt_short_patt[] = {
930 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
931 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
932 alt_short_14, alt_short_15
933 };
934 static const char *const alt_long_patt[] = {
935 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
936 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
937 alt_long_14, alt_long_15
938 };
939
940 /* Only align for at least a positive non-zero boundary. */
941 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
942 return;
943
944 /* We need to decide which NOP sequence to use for 32bit and
945 64bit. When -mtune= is used:
946
947 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
948 PROCESSOR_GENERIC32, f32_patt will be used.
949 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
950 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
951 PROCESSOR_GENERIC64, alt_long_patt will be used.
952 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
953 PROCESSOR_AMDFAM10, alt_short_patt will be used.
954
955 When -mtune= isn't used, alt_long_patt will be used if
956 cpu_arch_isa_flags has Cpu686. Otherwise, f32_patt will
957 be used.
958
959 When -march= or .arch is used, we can't use anything beyond
960 cpu_arch_isa_flags. */
961
962 if (flag_code == CODE_16BIT)
963 {
964 if (count > 8)
965 {
966 memcpy (fragP->fr_literal + fragP->fr_fix,
967 jump_31, count);
968 /* Adjust jump offset. */
969 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
970 }
971 else
972 memcpy (fragP->fr_literal + fragP->fr_fix,
973 f16_patt[count - 1], count);
974 }
975 else
976 {
977 const char *const *patt = NULL;
978
979 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
980 {
981 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
982 switch (cpu_arch_tune)
983 {
984 case PROCESSOR_UNKNOWN:
985 /* We use cpu_arch_isa_flags to check if we SHOULD
986 optimize for Cpu686. */
987 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
988 patt = alt_long_patt;
989 else
990 patt = f32_patt;
991 break;
992 case PROCESSOR_PENTIUMPRO:
993 case PROCESSOR_PENTIUM4:
994 case PROCESSOR_NOCONA:
995 case PROCESSOR_CORE:
996 case PROCESSOR_CORE2:
997 case PROCESSOR_COREI7:
998 case PROCESSOR_GENERIC64:
999 patt = alt_long_patt;
1000 break;
1001 case PROCESSOR_K6:
1002 case PROCESSOR_ATHLON:
1003 case PROCESSOR_K8:
1004 case PROCESSOR_AMDFAM10:
1005 patt = alt_short_patt;
1006 break;
1007 case PROCESSOR_I386:
1008 case PROCESSOR_I486:
1009 case PROCESSOR_PENTIUM:
1010 case PROCESSOR_GENERIC32:
1011 patt = f32_patt;
1012 break;
1013 }
1014 }
1015 else
1016 {
1017 switch (fragP->tc_frag_data.tune)
1018 {
1019 case PROCESSOR_UNKNOWN:
1020 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1021 PROCESSOR_UNKNOWN. */
1022 abort ();
1023 break;
1024
1025 case PROCESSOR_I386:
1026 case PROCESSOR_I486:
1027 case PROCESSOR_PENTIUM:
1028 case PROCESSOR_K6:
1029 case PROCESSOR_ATHLON:
1030 case PROCESSOR_K8:
1031 case PROCESSOR_AMDFAM10:
1032 case PROCESSOR_GENERIC32:
1033 /* We use cpu_arch_isa_flags to check if we CAN optimize
1034 for Cpu686. */
1035 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
1036 patt = alt_short_patt;
1037 else
1038 patt = f32_patt;
1039 break;
1040 case PROCESSOR_PENTIUMPRO:
1041 case PROCESSOR_PENTIUM4:
1042 case PROCESSOR_NOCONA:
1043 case PROCESSOR_CORE:
1044 case PROCESSOR_CORE2:
1045 case PROCESSOR_COREI7:
1046 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
1047 patt = alt_long_patt;
1048 else
1049 patt = f32_patt;
1050 break;
1051 case PROCESSOR_GENERIC64:
1052 patt = alt_long_patt;
1053 break;
1054 }
1055 }
1056
1057 if (patt == f32_patt)
1058 {
1059 /* If the padding is less than 15 bytes, we use the normal
1060 ones. Otherwise, we use a jump instruction and adjust
1061 its offset. */
1062 int limit;
1063
1064 /* For 64bit, the limit is 3 bytes. */
1065 if (flag_code == CODE_64BIT
1066 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1067 limit = 3;
1068 else
1069 limit = 15;
1070 if (count < limit)
1071 memcpy (fragP->fr_literal + fragP->fr_fix,
1072 patt[count - 1], count);
1073 else
1074 {
1075 memcpy (fragP->fr_literal + fragP->fr_fix,
1076 jump_31, count);
1077 /* Adjust jump offset. */
1078 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1079 }
1080 }
1081 else
1082 {
1083 /* Maximum length of an instruction is 15 byte. If the
1084 padding is greater than 15 bytes and we don't use jump,
1085 we have to break it into smaller pieces. */
1086 int padding = count;
1087 while (padding > 15)
1088 {
1089 padding -= 15;
1090 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1091 patt [14], 15);
1092 }
1093
1094 if (padding)
1095 memcpy (fragP->fr_literal + fragP->fr_fix,
1096 patt [padding - 1], padding);
1097 }
1098 }
1099 fragP->fr_var = count;
1100}
1101
1102static INLINE int
1103operand_type_all_zero (const union i386_operand_type *x)
1104{
1105 switch (ARRAY_SIZE(x->array))
1106 {
1107 case 3:
1108 if (x->array[2])
1109 return 0;
1110 case 2:
1111 if (x->array[1])
1112 return 0;
1113 case 1:
1114 return !x->array[0];
1115 default:
1116 abort ();
1117 }
1118}
1119
1120static INLINE void
1121operand_type_set (union i386_operand_type *x, unsigned int v)
1122{
1123 switch (ARRAY_SIZE(x->array))
1124 {
1125 case 3:
1126 x->array[2] = v;
1127 case 2:
1128 x->array[1] = v;
1129 case 1:
1130 x->array[0] = v;
1131 break;
1132 default:
1133 abort ();
1134 }
1135}
1136
1137static INLINE int
1138operand_type_equal (const union i386_operand_type *x,
1139 const union i386_operand_type *y)
1140{
1141 switch (ARRAY_SIZE(x->array))
1142 {
1143 case 3:
1144 if (x->array[2] != y->array[2])
1145 return 0;
1146 case 2:
1147 if (x->array[1] != y->array[1])
1148 return 0;
1149 case 1:
1150 return x->array[0] == y->array[0];
1151 break;
1152 default:
1153 abort ();
1154 }
1155}
1156
1157static INLINE int
1158cpu_flags_all_zero (const union i386_cpu_flags *x)
1159{
1160 switch (ARRAY_SIZE(x->array))
1161 {
1162 case 3:
1163 if (x->array[2])
1164 return 0;
1165 case 2:
1166 if (x->array[1])
1167 return 0;
1168 case 1:
1169 return !x->array[0];
1170 default:
1171 abort ();
1172 }
1173}
1174
1175static INLINE void
1176cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1177{
1178 switch (ARRAY_SIZE(x->array))
1179 {
1180 case 3:
1181 x->array[2] = v;
1182 case 2:
1183 x->array[1] = v;
1184 case 1:
1185 x->array[0] = v;
1186 break;
1187 default:
1188 abort ();
1189 }
1190}
1191
1192static INLINE int
1193cpu_flags_equal (const union i386_cpu_flags *x,
1194 const union i386_cpu_flags *y)
1195{
1196 switch (ARRAY_SIZE(x->array))
1197 {
1198 case 3:
1199 if (x->array[2] != y->array[2])
1200 return 0;
1201 case 2:
1202 if (x->array[1] != y->array[1])
1203 return 0;
1204 case 1:
1205 return x->array[0] == y->array[0];
1206 break;
1207 default:
1208 abort ();
1209 }
1210}
1211
1212static INLINE int
1213cpu_flags_check_cpu64 (i386_cpu_flags f)
1214{
1215 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1216 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1217}
1218
1219static INLINE i386_cpu_flags
1220cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1221{
1222 switch (ARRAY_SIZE (x.array))
1223 {
1224 case 3:
1225 x.array [2] &= y.array [2];
1226 case 2:
1227 x.array [1] &= y.array [1];
1228 case 1:
1229 x.array [0] &= y.array [0];
1230 break;
1231 default:
1232 abort ();
1233 }
1234 return x;
1235}
1236
1237static INLINE i386_cpu_flags
1238cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1239{
1240 switch (ARRAY_SIZE (x.array))
1241 {
1242 case 3:
1243 x.array [2] |= y.array [2];
1244 case 2:
1245 x.array [1] |= y.array [1];
1246 case 1:
1247 x.array [0] |= y.array [0];
1248 break;
1249 default:
1250 abort ();
1251 }
1252 return x;
1253}
1254
1255static INLINE i386_cpu_flags
1256cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1257{
1258 switch (ARRAY_SIZE (x.array))
1259 {
1260 case 3:
1261 x.array [2] &= ~y.array [2];
1262 case 2:
1263 x.array [1] &= ~y.array [1];
1264 case 1:
1265 x.array [0] &= ~y.array [0];
1266 break;
1267 default:
1268 abort ();
1269 }
1270 return x;
1271}
1272
1273#define CPU_FLAGS_ARCH_MATCH 0x1
1274#define CPU_FLAGS_64BIT_MATCH 0x2
1275#define CPU_FLAGS_AES_MATCH 0x4
1276#define CPU_FLAGS_PCLMUL_MATCH 0x8
1277#define CPU_FLAGS_AVX_MATCH 0x10
1278
1279#define CPU_FLAGS_32BIT_MATCH \
1280 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1281 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1282#define CPU_FLAGS_PERFECT_MATCH \
1283 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1284
1285/* Return CPU flags match bits. */
1286
1287static int
1288cpu_flags_match (const template *t)
1289{
1290 i386_cpu_flags x = t->cpu_flags;
1291 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1292
1293 x.bitfield.cpu64 = 0;
1294 x.bitfield.cpuno64 = 0;
1295
1296 if (cpu_flags_all_zero (&x))
1297 {
1298 /* This instruction is available on all archs. */
1299 match |= CPU_FLAGS_32BIT_MATCH;
1300 }
1301 else
1302 {
1303 /* This instruction is available only on some archs. */
1304 i386_cpu_flags cpu = cpu_arch_flags;
1305
1306 cpu.bitfield.cpu64 = 0;
1307 cpu.bitfield.cpuno64 = 0;
1308 cpu = cpu_flags_and (x, cpu);
1309 if (!cpu_flags_all_zero (&cpu))
1310 {
1311 if (x.bitfield.cpuavx)
1312 {
1313 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1314 if (cpu.bitfield.cpuavx)
1315 {
1316 /* Check SSE2AVX. */
1317 if (!t->opcode_modifier.sse2avx|| sse2avx)
1318 {
1319 match |= (CPU_FLAGS_ARCH_MATCH
1320 | CPU_FLAGS_AVX_MATCH);
1321 /* Check AES. */
1322 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1323 match |= CPU_FLAGS_AES_MATCH;
1324 /* Check PCLMUL. */
1325 if (!x.bitfield.cpupclmul
1326 || cpu.bitfield.cpupclmul)
1327 match |= CPU_FLAGS_PCLMUL_MATCH;
1328 }
1329 }
1330 else
1331 match |= CPU_FLAGS_ARCH_MATCH;
1332 }
1333 else
1334 match |= CPU_FLAGS_32BIT_MATCH;
1335 }
1336 }
1337 return match;
1338}
1339
1340static INLINE i386_operand_type
1341operand_type_and (i386_operand_type x, i386_operand_type y)
1342{
1343 switch (ARRAY_SIZE (x.array))
1344 {
1345 case 3:
1346 x.array [2] &= y.array [2];
1347 case 2:
1348 x.array [1] &= y.array [1];
1349 case 1:
1350 x.array [0] &= y.array [0];
1351 break;
1352 default:
1353 abort ();
1354 }
1355 return x;
1356}
1357
1358static INLINE i386_operand_type
1359operand_type_or (i386_operand_type x, i386_operand_type y)
1360{
1361 switch (ARRAY_SIZE (x.array))
1362 {
1363 case 3:
1364 x.array [2] |= y.array [2];
1365 case 2:
1366 x.array [1] |= y.array [1];
1367 case 1:
1368 x.array [0] |= y.array [0];
1369 break;
1370 default:
1371 abort ();
1372 }
1373 return x;
1374}
1375
1376static INLINE i386_operand_type
1377operand_type_xor (i386_operand_type x, i386_operand_type y)
1378{
1379 switch (ARRAY_SIZE (x.array))
1380 {
1381 case 3:
1382 x.array [2] ^= y.array [2];
1383 case 2:
1384 x.array [1] ^= y.array [1];
1385 case 1:
1386 x.array [0] ^= y.array [0];
1387 break;
1388 default:
1389 abort ();
1390 }
1391 return x;
1392}
1393
1394static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1395static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1396static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1397static const i386_operand_type inoutportreg
1398 = OPERAND_TYPE_INOUTPORTREG;
1399static const i386_operand_type reg16_inoutportreg
1400 = OPERAND_TYPE_REG16_INOUTPORTREG;
1401static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1402static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1403static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1404static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1405static const i386_operand_type anydisp
1406 = OPERAND_TYPE_ANYDISP;
1407static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1408static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1409static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1410static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1411static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1412static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1413static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1414static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1415static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1416static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1417static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1418
1419enum operand_type
1420{
1421 reg,
1422 imm,
1423 disp,
1424 anymem
1425};
1426
1427static INLINE int
1428operand_type_check (i386_operand_type t, enum operand_type c)
1429{
1430 switch (c)
1431 {
1432 case reg:
1433 return (t.bitfield.reg8
1434 || t.bitfield.reg16
1435 || t.bitfield.reg32
1436 || t.bitfield.reg64);
1437
1438 case imm:
1439 return (t.bitfield.imm8
1440 || t.bitfield.imm8s
1441 || t.bitfield.imm16
1442 || t.bitfield.imm32
1443 || t.bitfield.imm32s
1444 || t.bitfield.imm64);
1445
1446 case disp:
1447 return (t.bitfield.disp8
1448 || t.bitfield.disp16
1449 || t.bitfield.disp32
1450 || t.bitfield.disp32s
1451 || t.bitfield.disp64);
1452
1453 case anymem:
1454 return (t.bitfield.disp8
1455 || t.bitfield.disp16
1456 || t.bitfield.disp32
1457 || t.bitfield.disp32s
1458 || t.bitfield.disp64
1459 || t.bitfield.baseindex);
1460
1461 default:
1462 abort ();
1463 }
1464
1465 return 0;
1466}
1467
1468/* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1469 operand J for instruction template T. */
1470
1471static INLINE int
1472match_reg_size (const template *t, unsigned int j)
1473{
1474 return !((i.types[j].bitfield.byte
1475 && !t->operand_types[j].bitfield.byte)
1476 || (i.types[j].bitfield.word
1477 && !t->operand_types[j].bitfield.word)
1478 || (i.types[j].bitfield.dword
1479 && !t->operand_types[j].bitfield.dword)
1480 || (i.types[j].bitfield.qword
1481 && !t->operand_types[j].bitfield.qword));
1482}
1483
1484/* Return 1 if there is no conflict in any size on operand J for
1485 instruction template T. */
1486
1487static INLINE int
1488match_mem_size (const template *t, unsigned int j)
1489{
1490 return (match_reg_size (t, j)
1491 && !((i.types[j].bitfield.unspecified
1492 && !t->operand_types[j].bitfield.unspecified)
1493 || (i.types[j].bitfield.fword
1494 && !t->operand_types[j].bitfield.fword)
1495 || (i.types[j].bitfield.tbyte
1496 && !t->operand_types[j].bitfield.tbyte)
1497 || (i.types[j].bitfield.xmmword
1498 && !t->operand_types[j].bitfield.xmmword)
1499 || (i.types[j].bitfield.ymmword
1500 && !t->operand_types[j].bitfield.ymmword)));
1501}
1502
1503/* Return 1 if there is no size conflict on any operands for
1504 instruction template T. */
1505
1506static INLINE int
1507operand_size_match (const template *t)
1508{
1509 unsigned int j;
1510 int match = 1;
1511
1512 /* Don't check jump instructions. */
1513 if (t->opcode_modifier.jump
1514 || t->opcode_modifier.jumpbyte
1515 || t->opcode_modifier.jumpdword
1516 || t->opcode_modifier.jumpintersegment)
1517 return match;
1518
1519 /* Check memory and accumulator operand size. */
1520 for (j = 0; j < i.operands; j++)
1521 {
1522 if (t->operand_types[j].bitfield.anysize)
1523 continue;
1524
1525 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1526 {
1527 match = 0;
1528 break;
1529 }
1530
1531 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1532 {
1533 match = 0;
1534 break;
1535 }
1536 }
1537
1538 if (match
1539 || (!t->opcode_modifier.d && !t->opcode_modifier.floatd))
1540 return match;
1541
1542 /* Check reverse. */
1543 gas_assert (i.operands == 2);
1544
1545 match = 1;
1546 for (j = 0; j < 2; j++)
1547 {
1548 if (t->operand_types[j].bitfield.acc
1549 && !match_reg_size (t, j ? 0 : 1))
1550 {
1551 match = 0;
1552 break;
1553 }
1554
1555 if (i.types[j].bitfield.mem
1556 && !match_mem_size (t, j ? 0 : 1))
1557 {
1558 match = 0;
1559 break;
1560 }
1561 }
1562
1563 return match;
1564}
1565
1566static INLINE int
1567operand_type_match (i386_operand_type overlap,
1568 i386_operand_type given)
1569{
1570 i386_operand_type temp = overlap;
1571
1572 temp.bitfield.jumpabsolute = 0;
1573 temp.bitfield.unspecified = 0;
1574 temp.bitfield.byte = 0;
1575 temp.bitfield.word = 0;
1576 temp.bitfield.dword = 0;
1577 temp.bitfield.fword = 0;
1578 temp.bitfield.qword = 0;
1579 temp.bitfield.tbyte = 0;
1580 temp.bitfield.xmmword = 0;
1581 temp.bitfield.ymmword = 0;
1582 if (operand_type_all_zero (&temp))
1583 return 0;
1584
1585 return (given.bitfield.baseindex == overlap.bitfield.baseindex
1586 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute);
1587}
1588
1589/* If given types g0 and g1 are registers they must be of the same type
1590 unless the expected operand type register overlap is null.
1591 Note that Acc in a template matches every size of reg. */
1592
1593static INLINE int
1594operand_type_register_match (i386_operand_type m0,
1595 i386_operand_type g0,
1596 i386_operand_type t0,
1597 i386_operand_type m1,
1598 i386_operand_type g1,
1599 i386_operand_type t1)
1600{
1601 if (!operand_type_check (g0, reg))
1602 return 1;
1603
1604 if (!operand_type_check (g1, reg))
1605 return 1;
1606
1607 if (g0.bitfield.reg8 == g1.bitfield.reg8
1608 && g0.bitfield.reg16 == g1.bitfield.reg16
1609 && g0.bitfield.reg32 == g1.bitfield.reg32
1610 && g0.bitfield.reg64 == g1.bitfield.reg64)
1611 return 1;
1612
1613 if (m0.bitfield.acc)
1614 {
1615 t0.bitfield.reg8 = 1;
1616 t0.bitfield.reg16 = 1;
1617 t0.bitfield.reg32 = 1;
1618 t0.bitfield.reg64 = 1;
1619 }
1620
1621 if (m1.bitfield.acc)
1622 {
1623 t1.bitfield.reg8 = 1;
1624 t1.bitfield.reg16 = 1;
1625 t1.bitfield.reg32 = 1;
1626 t1.bitfield.reg64 = 1;
1627 }
1628
1629 return (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1630 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1631 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1632 && !(t0.bitfield.reg64 & t1.bitfield.reg64));
1633}
1634
1635static INLINE unsigned int
1636mode_from_disp_size (i386_operand_type t)
1637{
1638 if (t.bitfield.disp8)
1639 return 1;
1640 else if (t.bitfield.disp16
1641 || t.bitfield.disp32
1642 || t.bitfield.disp32s)
1643 return 2;
1644 else
1645 return 0;
1646}
1647
1648static INLINE int
1649fits_in_signed_byte (offsetT num)
1650{
1651 return (num >= -128) && (num <= 127);
1652}
1653
1654static INLINE int
1655fits_in_unsigned_byte (offsetT num)
1656{
1657 return (num & 0xff) == num;
1658}
1659
1660static INLINE int
1661fits_in_unsigned_word (offsetT num)
1662{
1663 return (num & 0xffff) == num;
1664}
1665
1666static INLINE int
1667fits_in_signed_word (offsetT num)
1668{
1669 return (-32768 <= num) && (num <= 32767);
1670}
1671
1672static INLINE int
1673fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1674{
1675#ifndef BFD64
1676 return 1;
1677#else
1678 return (!(((offsetT) -1 << 31) & num)
1679 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1680#endif
1681} /* fits_in_signed_long() */
1682
1683static INLINE int
1684fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1685{
1686#ifndef BFD64
1687 return 1;
1688#else
1689 return (num & (((offsetT) 2 << 31) - 1)) == num;
1690#endif
1691} /* fits_in_unsigned_long() */
1692
1693static i386_operand_type
1694smallest_imm_type (offsetT num)
1695{
1696 i386_operand_type t;
1697
1698 operand_type_set (&t, 0);
1699 t.bitfield.imm64 = 1;
1700
1701 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1702 {
1703 /* This code is disabled on the 486 because all the Imm1 forms
1704 in the opcode table are slower on the i486. They're the
1705 versions with the implicitly specified single-position
1706 displacement, which has another syntax if you really want to
1707 use that form. */
1708 t.bitfield.imm1 = 1;
1709 t.bitfield.imm8 = 1;
1710 t.bitfield.imm8s = 1;
1711 t.bitfield.imm16 = 1;
1712 t.bitfield.imm32 = 1;
1713 t.bitfield.imm32s = 1;
1714 }
1715 else if (fits_in_signed_byte (num))
1716 {
1717 t.bitfield.imm8 = 1;
1718 t.bitfield.imm8s = 1;
1719 t.bitfield.imm16 = 1;
1720 t.bitfield.imm32 = 1;
1721 t.bitfield.imm32s = 1;
1722 }
1723 else if (fits_in_unsigned_byte (num))
1724 {
1725 t.bitfield.imm8 = 1;
1726 t.bitfield.imm16 = 1;
1727 t.bitfield.imm32 = 1;
1728 t.bitfield.imm32s = 1;
1729 }
1730 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1731 {
1732 t.bitfield.imm16 = 1;
1733 t.bitfield.imm32 = 1;
1734 t.bitfield.imm32s = 1;
1735 }
1736 else if (fits_in_signed_long (num))
1737 {
1738 t.bitfield.imm32 = 1;
1739 t.bitfield.imm32s = 1;
1740 }
1741 else if (fits_in_unsigned_long (num))
1742 t.bitfield.imm32 = 1;
1743
1744 return t;
1745}
1746
1747static offsetT
1748offset_in_range (offsetT val, int size)
1749{
1750 addressT mask;
1751
1752 switch (size)
1753 {
1754 case 1: mask = ((addressT) 1 << 8) - 1; break;
1755 case 2: mask = ((addressT) 1 << 16) - 1; break;
1756 case 4: mask = ((addressT) 2 << 31) - 1; break;
1757#ifdef BFD64
1758 case 8: mask = ((addressT) 2 << 63) - 1; break;
1759#endif
1760 default: abort ();
1761 }
1762
1763 /* If BFD64, sign extend val. */
1764 if (!use_rela_relocations)
1765 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1766 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1767
1768 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1769 {
1770 char buf1[40], buf2[40];
1771
1772 sprint_value (buf1, val);
1773 sprint_value (buf2, val & mask);
1774 as_warn (_("%s shortened to %s"), buf1, buf2);
1775 }
1776 return val & mask;
1777}
1778
1779/* Returns 0 if attempting to add a prefix where one from the same
1780 class already exists, 1 if non rep/repne added, 2 if rep/repne
1781 added. */
1782static int
1783add_prefix (unsigned int prefix)
1784{
1785 int ret = 1;
1786 unsigned int q;
1787
1788 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1789 && flag_code == CODE_64BIT)
1790 {
1791 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1792 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1793 && (prefix & (REX_R | REX_X | REX_B))))
1794 ret = 0;
1795 q = REX_PREFIX;
1796 }
1797 else
1798 {
1799 switch (prefix)
1800 {
1801 default:
1802 abort ();
1803
1804 case CS_PREFIX_OPCODE:
1805 case DS_PREFIX_OPCODE:
1806 case ES_PREFIX_OPCODE:
1807 case FS_PREFIX_OPCODE:
1808 case GS_PREFIX_OPCODE:
1809 case SS_PREFIX_OPCODE:
1810 q = SEG_PREFIX;
1811 break;
1812
1813 case REPNE_PREFIX_OPCODE:
1814 case REPE_PREFIX_OPCODE:
1815 ret = 2;
1816 /* fall thru */
1817 case LOCK_PREFIX_OPCODE:
1818 q = LOCKREP_PREFIX;
1819 break;
1820
1821 case FWAIT_OPCODE:
1822 q = WAIT_PREFIX;
1823 break;
1824
1825 case ADDR_PREFIX_OPCODE:
1826 q = ADDR_PREFIX;
1827 break;
1828
1829 case DATA_PREFIX_OPCODE:
1830 q = DATA_PREFIX;
1831 break;
1832 }
1833 if (i.prefix[q] != 0)
1834 ret = 0;
1835 }
1836
1837 if (ret)
1838 {
1839 if (!i.prefix[q])
1840 ++i.prefixes;
1841 i.prefix[q] |= prefix;
1842 }
1843 else
1844 as_bad (_("same type of prefix used twice"));
1845
1846 return ret;
1847}
1848
1849static void
1850set_code_flag (int value)
1851{
1852 flag_code = value;
1853 if (flag_code == CODE_64BIT)
1854 {
1855 cpu_arch_flags.bitfield.cpu64 = 1;
1856 cpu_arch_flags.bitfield.cpuno64 = 0;
1857 }
1858 else
1859 {
1860 cpu_arch_flags.bitfield.cpu64 = 0;
1861 cpu_arch_flags.bitfield.cpuno64 = 1;
1862 }
1863 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1864 {
1865 as_bad (_("64bit mode not supported on this CPU."));
1866 }
1867 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
1868 {
1869 as_bad (_("32bit mode not supported on this CPU."));
1870 }
1871 stackop_size = '\0';
1872}
1873
1874static void
1875set_16bit_gcc_code_flag (int new_code_flag)
1876{
1877 flag_code = new_code_flag;
1878 if (flag_code != CODE_16BIT)
1879 abort ();
1880 cpu_arch_flags.bitfield.cpu64 = 0;
1881 cpu_arch_flags.bitfield.cpuno64 = 1;
1882 stackop_size = LONG_MNEM_SUFFIX;
1883}
1884
1885static void
1886set_intel_syntax (int syntax_flag)
1887{
1888 /* Find out if register prefixing is specified. */
1889 int ask_naked_reg = 0;
1890
1891 SKIP_WHITESPACE ();
1892 if (!is_end_of_line[(unsigned char) *input_line_pointer])
1893 {
1894 char *string = input_line_pointer;
1895 int e = get_symbol_end ();
1896
1897 if (strcmp (string, "prefix") == 0)
1898 ask_naked_reg = 1;
1899 else if (strcmp (string, "noprefix") == 0)
1900 ask_naked_reg = -1;
1901 else
1902 as_bad (_("bad argument to syntax directive."));
1903 *input_line_pointer = e;
1904 }
1905 demand_empty_rest_of_line ();
1906
1907 intel_syntax = syntax_flag;
1908
1909 if (ask_naked_reg == 0)
1910 allow_naked_reg = (intel_syntax
1911 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
1912 else
1913 allow_naked_reg = (ask_naked_reg < 0);
1914
1915 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
1916
1917 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
1918 identifier_chars['$'] = intel_syntax ? '$' : 0;
1919 register_prefix = allow_naked_reg ? "" : "%";
1920}
1921
1922static void
1923set_intel_mnemonic (int mnemonic_flag)
1924{
1925 intel_mnemonic = mnemonic_flag;
1926}
1927
1928static void
1929set_allow_index_reg (int flag)
1930{
1931 allow_index_reg = flag;
1932}
1933
1934static void
1935set_sse_check (int dummy ATTRIBUTE_UNUSED)
1936{
1937 SKIP_WHITESPACE ();
1938
1939 if (!is_end_of_line[(unsigned char) *input_line_pointer])
1940 {
1941 char *string = input_line_pointer;
1942 int e = get_symbol_end ();
1943
1944 if (strcmp (string, "none") == 0)
1945 sse_check = sse_check_none;
1946 else if (strcmp (string, "warning") == 0)
1947 sse_check = sse_check_warning;
1948 else if (strcmp (string, "error") == 0)
1949 sse_check = sse_check_error;
1950 else
1951 as_bad (_("bad argument to sse_check directive."));
1952 *input_line_pointer = e;
1953 }
1954 else
1955 as_bad (_("missing argument for sse_check directive"));
1956
1957 demand_empty_rest_of_line ();
1958}
1959
1960static void
1961check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
1962 i386_cpu_flags new ATTRIBUTE_UNUSED)
1963{
1964#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1965 static const char *arch;
1966
1967 /* Intel LIOM is only supported on ELF. */
1968 if (!IS_ELF)
1969 return;
1970
1971 if (!arch)
1972 {
1973 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
1974 use default_arch. */
1975 arch = cpu_arch_name;
1976 if (!arch)
1977 arch = default_arch;
1978 }
1979
1980 /* If we are targeting Intel L1OM, wm must enable it. */
1981 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
1982 || new.bitfield.cpul1om)
1983 return;
1984
1985 as_bad (_("`%s' is not supported on `%s'"), name, arch);
1986#endif
1987}
1988
1989static void
1990set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
1991{
1992 SKIP_WHITESPACE ();
1993
1994 if (!is_end_of_line[(unsigned char) *input_line_pointer])
1995 {
1996 char *string = input_line_pointer;
1997 int e = get_symbol_end ();
1998 unsigned int i;
1999 i386_cpu_flags flags;
2000
2001 for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
2002 {
2003 if (strcmp (string, cpu_arch[i].name) == 0)
2004 {
2005 check_cpu_arch_compatible (string, cpu_arch[i].flags);
2006
2007 if (*string != '.')
2008 {
2009 cpu_arch_name = cpu_arch[i].name;
2010 cpu_sub_arch_name = NULL;
2011 cpu_arch_flags = cpu_arch[i].flags;
2012 if (flag_code == CODE_64BIT)
2013 {
2014 cpu_arch_flags.bitfield.cpu64 = 1;
2015 cpu_arch_flags.bitfield.cpuno64 = 0;
2016 }
2017 else
2018 {
2019 cpu_arch_flags.bitfield.cpu64 = 0;
2020 cpu_arch_flags.bitfield.cpuno64 = 1;
2021 }
2022 cpu_arch_isa = cpu_arch[i].type;
2023 cpu_arch_isa_flags = cpu_arch[i].flags;
2024 if (!cpu_arch_tune_set)
2025 {
2026 cpu_arch_tune = cpu_arch_isa;
2027 cpu_arch_tune_flags = cpu_arch_isa_flags;
2028 }
2029 break;
2030 }
2031
2032 if (strncmp (string + 1, "no", 2))
2033 flags = cpu_flags_or (cpu_arch_flags,
2034 cpu_arch[i].flags);
2035 else
2036 flags = cpu_flags_and_not (cpu_arch_flags,
2037 cpu_arch[i].flags);
2038 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2039 {
2040 if (cpu_sub_arch_name)
2041 {
2042 char *name = cpu_sub_arch_name;
2043 cpu_sub_arch_name = concat (name,
2044 cpu_arch[i].name,
2045 (const char *) NULL);
2046 free (name);
2047 }
2048 else
2049 cpu_sub_arch_name = xstrdup (cpu_arch[i].name);
2050 cpu_arch_flags = flags;
2051 }
2052 *input_line_pointer = e;
2053 demand_empty_rest_of_line ();
2054 return;
2055 }
2056 }
2057 if (i >= ARRAY_SIZE (cpu_arch))
2058 as_bad (_("no such architecture: `%s'"), string);
2059
2060 *input_line_pointer = e;
2061 }
2062 else
2063 as_bad (_("missing cpu architecture"));
2064
2065 no_cond_jump_promotion = 0;
2066 if (*input_line_pointer == ','
2067 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2068 {
2069 char *string = ++input_line_pointer;
2070 int e = get_symbol_end ();
2071
2072 if (strcmp (string, "nojumps") == 0)
2073 no_cond_jump_promotion = 1;
2074 else if (strcmp (string, "jumps") == 0)
2075 ;
2076 else
2077 as_bad (_("no such architecture modifier: `%s'"), string);
2078
2079 *input_line_pointer = e;
2080 }
2081
2082 demand_empty_rest_of_line ();
2083}
2084
2085enum bfd_architecture
2086i386_arch (void)
2087{
2088 if (cpu_arch_isa_flags.bitfield.cpul1om)
2089 {
2090 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2091 || flag_code != CODE_64BIT)
2092 as_fatal (_("Intel L1OM is 64bit ELF only"));
2093 return bfd_arch_l1om;
2094 }
2095 else
2096 return bfd_arch_i386;
2097}
2098
2099unsigned long
2100i386_mach ()
2101{
2102 if (!strcmp (default_arch, "x86_64"))
2103 {
2104 if (cpu_arch_isa_flags.bitfield.cpul1om)
2105 {
2106 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
2107 as_fatal (_("Intel L1OM is 64bit ELF only"));
2108 return bfd_mach_l1om;
2109 }
2110 else
2111 return bfd_mach_x86_64;
2112 }
2113 else if (!strcmp (default_arch, "i386"))
2114 return bfd_mach_i386_i386;
2115 else
2116 as_fatal (_("Unknown architecture"));
2117}
2118\f
2119void
2120md_begin ()
2121{
2122 const char *hash_err;
2123
2124 /* Initialize op_hash hash table. */
2125 op_hash = hash_new ();
2126
2127 {
2128 const template *optab;
2129 templates *core_optab;
2130
2131 /* Setup for loop. */
2132 optab = i386_optab;
2133 core_optab = (templates *) xmalloc (sizeof (templates));
2134 core_optab->start = optab;
2135
2136 while (1)
2137 {
2138 ++optab;
2139 if (optab->name == NULL
2140 || strcmp (optab->name, (optab - 1)->name) != 0)
2141 {
2142 /* different name --> ship out current template list;
2143 add to hash table; & begin anew. */
2144 core_optab->end = optab;
2145 hash_err = hash_insert (op_hash,
2146 (optab - 1)->name,
2147 (void *) core_optab);
2148 if (hash_err)
2149 {
2150 as_fatal (_("Internal Error: Can't hash %s: %s"),
2151 (optab - 1)->name,
2152 hash_err);
2153 }
2154 if (optab->name == NULL)
2155 break;
2156 core_optab = (templates *) xmalloc (sizeof (templates));
2157 core_optab->start = optab;
2158 }
2159 }
2160 }
2161
2162 /* Initialize reg_hash hash table. */
2163 reg_hash = hash_new ();
2164 {
2165 const reg_entry *regtab;
2166 unsigned int regtab_size = i386_regtab_size;
2167
2168 for (regtab = i386_regtab; regtab_size--; regtab++)
2169 {
2170 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2171 if (hash_err)
2172 as_fatal (_("Internal Error: Can't hash %s: %s"),
2173 regtab->reg_name,
2174 hash_err);
2175 }
2176 }
2177
2178 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2179 {
2180 int c;
2181 char *p;
2182
2183 for (c = 0; c < 256; c++)
2184 {
2185 if (ISDIGIT (c))
2186 {
2187 digit_chars[c] = c;
2188 mnemonic_chars[c] = c;
2189 register_chars[c] = c;
2190 operand_chars[c] = c;
2191 }
2192 else if (ISLOWER (c))
2193 {
2194 mnemonic_chars[c] = c;
2195 register_chars[c] = c;
2196 operand_chars[c] = c;
2197 }
2198 else if (ISUPPER (c))
2199 {
2200 mnemonic_chars[c] = TOLOWER (c);
2201 register_chars[c] = mnemonic_chars[c];
2202 operand_chars[c] = c;
2203 }
2204
2205 if (ISALPHA (c) || ISDIGIT (c))
2206 identifier_chars[c] = c;
2207 else if (c >= 128)
2208 {
2209 identifier_chars[c] = c;
2210 operand_chars[c] = c;
2211 }
2212 }
2213
2214#ifdef LEX_AT
2215 identifier_chars['@'] = '@';
2216#endif
2217#ifdef LEX_QM
2218 identifier_chars['?'] = '?';
2219 operand_chars['?'] = '?';
2220#endif
2221 digit_chars['-'] = '-';
2222 mnemonic_chars['_'] = '_';
2223 mnemonic_chars['-'] = '-';
2224 mnemonic_chars['.'] = '.';
2225 identifier_chars['_'] = '_';
2226 identifier_chars['.'] = '.';
2227
2228 for (p = operand_special_chars; *p != '\0'; p++)
2229 operand_chars[(unsigned char) *p] = *p;
2230 }
2231
2232#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2233 if (IS_ELF)
2234 {
2235 record_alignment (text_section, 2);
2236 record_alignment (data_section, 2);
2237 record_alignment (bss_section, 2);
2238 }
2239#endif
2240
2241 if (flag_code == CODE_64BIT)
2242 {
2243 x86_dwarf2_return_column = 16;
2244 x86_cie_data_alignment = -8;
2245 }
2246 else
2247 {
2248 x86_dwarf2_return_column = 8;
2249 x86_cie_data_alignment = -4;
2250 }
2251}
2252
2253void
2254i386_print_statistics (FILE *file)
2255{
2256 hash_print_statistics (file, "i386 opcode", op_hash);
2257 hash_print_statistics (file, "i386 register", reg_hash);
2258}
2259\f
2260#ifdef DEBUG386
2261
2262/* Debugging routines for md_assemble. */
2263static void pte (template *);
2264static void pt (i386_operand_type);
2265static void pe (expressionS *);
2266static void ps (symbolS *);
2267
2268static void
2269pi (char *line, i386_insn *x)
2270{
2271 unsigned int i;
2272
2273 fprintf (stdout, "%s: template ", line);
2274 pte (&x->tm);
2275 fprintf (stdout, " address: base %s index %s scale %x\n",
2276 x->base_reg ? x->base_reg->reg_name : "none",
2277 x->index_reg ? x->index_reg->reg_name : "none",
2278 x->log2_scale_factor);
2279 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2280 x->rm.mode, x->rm.reg, x->rm.regmem);
2281 fprintf (stdout, " sib: base %x index %x scale %x\n",
2282 x->sib.base, x->sib.index, x->sib.scale);
2283 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2284 (x->rex & REX_W) != 0,
2285 (x->rex & REX_R) != 0,
2286 (x->rex & REX_X) != 0,
2287 (x->rex & REX_B) != 0);
2288 for (i = 0; i < x->operands; i++)
2289 {
2290 fprintf (stdout, " #%d: ", i + 1);
2291 pt (x->types[i]);
2292 fprintf (stdout, "\n");
2293 if (x->types[i].bitfield.reg8
2294 || x->types[i].bitfield.reg16
2295 || x->types[i].bitfield.reg32
2296 || x->types[i].bitfield.reg64
2297 || x->types[i].bitfield.regmmx
2298 || x->types[i].bitfield.regxmm
2299 || x->types[i].bitfield.regymm
2300 || x->types[i].bitfield.sreg2
2301 || x->types[i].bitfield.sreg3
2302 || x->types[i].bitfield.control
2303 || x->types[i].bitfield.debug
2304 || x->types[i].bitfield.test)
2305 fprintf (stdout, "%s\n", x->op[i].regs->reg_name);
2306 if (operand_type_check (x->types[i], imm))
2307 pe (x->op[i].imms);
2308 if (operand_type_check (x->types[i], disp))
2309 pe (x->op[i].disps);
2310 }
2311}
2312
2313static void
2314pte (template *t)
2315{
2316 unsigned int i;
2317 fprintf (stdout, " %d operands ", t->operands);
2318 fprintf (stdout, "opcode %x ", t->base_opcode);
2319 if (t->extension_opcode != None)
2320 fprintf (stdout, "ext %x ", t->extension_opcode);
2321 if (t->opcode_modifier.d)
2322 fprintf (stdout, "D");
2323 if (t->opcode_modifier.w)
2324 fprintf (stdout, "W");
2325 fprintf (stdout, "\n");
2326 for (i = 0; i < t->operands; i++)
2327 {
2328 fprintf (stdout, " #%d type ", i + 1);
2329 pt (t->operand_types[i]);
2330 fprintf (stdout, "\n");
2331 }
2332}
2333
2334static void
2335pe (expressionS *e)
2336{
2337 fprintf (stdout, " operation %d\n", e->X_op);
2338 fprintf (stdout, " add_number %ld (%lx)\n",
2339 (long) e->X_add_number, (long) e->X_add_number);
2340 if (e->X_add_symbol)
2341 {
2342 fprintf (stdout, " add_symbol ");
2343 ps (e->X_add_symbol);
2344 fprintf (stdout, "\n");
2345 }
2346 if (e->X_op_symbol)
2347 {
2348 fprintf (stdout, " op_symbol ");
2349 ps (e->X_op_symbol);
2350 fprintf (stdout, "\n");
2351 }
2352}
2353
2354static void
2355ps (symbolS *s)
2356{
2357 fprintf (stdout, "%s type %s%s",
2358 S_GET_NAME (s),
2359 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2360 segment_name (S_GET_SEGMENT (s)));
2361}
2362
2363static struct type_name
2364 {
2365 i386_operand_type mask;
2366 const char *name;
2367 }
2368const type_names[] =
2369{
2370 { OPERAND_TYPE_REG8, "r8" },
2371 { OPERAND_TYPE_REG16, "r16" },
2372 { OPERAND_TYPE_REG32, "r32" },
2373 { OPERAND_TYPE_REG64, "r64" },
2374 { OPERAND_TYPE_IMM8, "i8" },
2375 { OPERAND_TYPE_IMM8, "i8s" },
2376 { OPERAND_TYPE_IMM16, "i16" },
2377 { OPERAND_TYPE_IMM32, "i32" },
2378 { OPERAND_TYPE_IMM32S, "i32s" },
2379 { OPERAND_TYPE_IMM64, "i64" },
2380 { OPERAND_TYPE_IMM1, "i1" },
2381 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2382 { OPERAND_TYPE_DISP8, "d8" },
2383 { OPERAND_TYPE_DISP16, "d16" },
2384 { OPERAND_TYPE_DISP32, "d32" },
2385 { OPERAND_TYPE_DISP32S, "d32s" },
2386 { OPERAND_TYPE_DISP64, "d64" },
2387 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2388 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2389 { OPERAND_TYPE_CONTROL, "control reg" },
2390 { OPERAND_TYPE_TEST, "test reg" },
2391 { OPERAND_TYPE_DEBUG, "debug reg" },
2392 { OPERAND_TYPE_FLOATREG, "FReg" },
2393 { OPERAND_TYPE_FLOATACC, "FAcc" },
2394 { OPERAND_TYPE_SREG2, "SReg2" },
2395 { OPERAND_TYPE_SREG3, "SReg3" },
2396 { OPERAND_TYPE_ACC, "Acc" },
2397 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2398 { OPERAND_TYPE_REGMMX, "rMMX" },
2399 { OPERAND_TYPE_REGXMM, "rXMM" },
2400 { OPERAND_TYPE_REGYMM, "rYMM" },
2401 { OPERAND_TYPE_ESSEG, "es" },
2402};
2403
2404static void
2405pt (i386_operand_type t)
2406{
2407 unsigned int j;
2408 i386_operand_type a;
2409
2410 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2411 {
2412 a = operand_type_and (t, type_names[j].mask);
2413 if (!operand_type_all_zero (&a))
2414 fprintf (stdout, "%s, ", type_names[j].name);
2415 }
2416 fflush (stdout);
2417}
2418
2419#endif /* DEBUG386 */
2420\f
2421static bfd_reloc_code_real_type
2422reloc (unsigned int size,
2423 int pcrel,
2424 int sign,
2425 bfd_reloc_code_real_type other)
2426{
2427 if (other != NO_RELOC)
2428 {
2429 reloc_howto_type *reloc;
2430
2431 if (size == 8)
2432 switch (other)
2433 {
2434 case BFD_RELOC_X86_64_GOT32:
2435 return BFD_RELOC_X86_64_GOT64;
2436 break;
2437 case BFD_RELOC_X86_64_PLTOFF64:
2438 return BFD_RELOC_X86_64_PLTOFF64;
2439 break;
2440 case BFD_RELOC_X86_64_GOTPC32:
2441 other = BFD_RELOC_X86_64_GOTPC64;
2442 break;
2443 case BFD_RELOC_X86_64_GOTPCREL:
2444 other = BFD_RELOC_X86_64_GOTPCREL64;
2445 break;
2446 case BFD_RELOC_X86_64_TPOFF32:
2447 other = BFD_RELOC_X86_64_TPOFF64;
2448 break;
2449 case BFD_RELOC_X86_64_DTPOFF32:
2450 other = BFD_RELOC_X86_64_DTPOFF64;
2451 break;
2452 default:
2453 break;
2454 }
2455
2456 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2457 if (size == 4 && flag_code != CODE_64BIT)
2458 sign = -1;
2459
2460 reloc = bfd_reloc_type_lookup (stdoutput, other);
2461 if (!reloc)
2462 as_bad (_("unknown relocation (%u)"), other);
2463 else if (size != bfd_get_reloc_size (reloc))
2464 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2465 bfd_get_reloc_size (reloc),
2466 size);
2467 else if (pcrel && !reloc->pc_relative)
2468 as_bad (_("non-pc-relative relocation for pc-relative field"));
2469 else if ((reloc->complain_on_overflow == complain_overflow_signed
2470 && !sign)
2471 || (reloc->complain_on_overflow == complain_overflow_unsigned
2472 && sign > 0))
2473 as_bad (_("relocated field and relocation type differ in signedness"));
2474 else
2475 return other;
2476 return NO_RELOC;
2477 }
2478
2479 if (pcrel)
2480 {
2481 if (!sign)
2482 as_bad (_("there are no unsigned pc-relative relocations"));
2483 switch (size)
2484 {
2485 case 1: return BFD_RELOC_8_PCREL;
2486 case 2: return BFD_RELOC_16_PCREL;
2487 case 4: return BFD_RELOC_32_PCREL;
2488 case 8: return BFD_RELOC_64_PCREL;
2489 }
2490 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2491 }
2492 else
2493 {
2494 if (sign > 0)
2495 switch (size)
2496 {
2497 case 4: return BFD_RELOC_X86_64_32S;
2498 }
2499 else
2500 switch (size)
2501 {
2502 case 1: return BFD_RELOC_8;
2503 case 2: return BFD_RELOC_16;
2504 case 4: return BFD_RELOC_32;
2505 case 8: return BFD_RELOC_64;
2506 }
2507 as_bad (_("cannot do %s %u byte relocation"),
2508 sign > 0 ? "signed" : "unsigned", size);
2509 }
2510
2511 return NO_RELOC;
2512}
2513
2514/* Here we decide which fixups can be adjusted to make them relative to
2515 the beginning of the section instead of the symbol. Basically we need
2516 to make sure that the dynamic relocations are done correctly, so in
2517 some cases we force the original symbol to be used. */
2518
2519int
2520tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2521{
2522#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2523 if (!IS_ELF)
2524 return 1;
2525
2526 /* Don't adjust pc-relative references to merge sections in 64-bit
2527 mode. */
2528 if (use_rela_relocations
2529 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2530 && fixP->fx_pcrel)
2531 return 0;
2532
2533 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2534 and changed later by validate_fix. */
2535 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2536 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2537 return 0;
2538
2539 /* adjust_reloc_syms doesn't know about the GOT. */
2540 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2541 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2542 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2543 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2544 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2545 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2546 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2547 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2548 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2549 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2550 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2551 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2552 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2553 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2554 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2555 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2556 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2557 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2558 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2559 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2560 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2561 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2562 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2563 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2564 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2565 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2566 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2567 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2568 return 0;
2569#endif
2570 return 1;
2571}
2572
2573static int
2574intel_float_operand (const char *mnemonic)
2575{
2576 /* Note that the value returned is meaningful only for opcodes with (memory)
2577 operands, hence the code here is free to improperly handle opcodes that
2578 have no operands (for better performance and smaller code). */
2579
2580 if (mnemonic[0] != 'f')
2581 return 0; /* non-math */
2582
2583 switch (mnemonic[1])
2584 {
2585 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2586 the fs segment override prefix not currently handled because no
2587 call path can make opcodes without operands get here */
2588 case 'i':
2589 return 2 /* integer op */;
2590 case 'l':
2591 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2592 return 3; /* fldcw/fldenv */
2593 break;
2594 case 'n':
2595 if (mnemonic[2] != 'o' /* fnop */)
2596 return 3; /* non-waiting control op */
2597 break;
2598 case 'r':
2599 if (mnemonic[2] == 's')
2600 return 3; /* frstor/frstpm */
2601 break;
2602 case 's':
2603 if (mnemonic[2] == 'a')
2604 return 3; /* fsave */
2605 if (mnemonic[2] == 't')
2606 {
2607 switch (mnemonic[3])
2608 {
2609 case 'c': /* fstcw */
2610 case 'd': /* fstdw */
2611 case 'e': /* fstenv */
2612 case 's': /* fsts[gw] */
2613 return 3;
2614 }
2615 }
2616 break;
2617 case 'x':
2618 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2619 return 0; /* fxsave/fxrstor are not really math ops */
2620 break;
2621 }
2622
2623 return 1;
2624}
2625
2626/* Build the VEX prefix. */
2627
2628static void
2629build_vex_prefix (const template *t)
2630{
2631 unsigned int register_specifier;
2632 unsigned int implied_prefix;
2633 unsigned int vector_length;
2634
2635 /* Check register specifier. */
2636 if (i.vex.register_specifier)
2637 {
2638 register_specifier = i.vex.register_specifier->reg_num;
2639 if ((i.vex.register_specifier->reg_flags & RegRex))
2640 register_specifier += 8;
2641 register_specifier = ~register_specifier & 0xf;
2642 }
2643 else
2644 register_specifier = 0xf;
2645
2646 /* Use 2-byte VEX prefix by swappping destination and source
2647 operand. */
2648 if (!i.swap_operand
2649 && i.operands == i.reg_operands
2650 && i.tm.opcode_modifier.vex0f
2651 && i.tm.opcode_modifier.s
2652 && i.rex == REX_B)
2653 {
2654 unsigned int xchg = i.operands - 1;
2655 union i386_op temp_op;
2656 i386_operand_type temp_type;
2657
2658 temp_type = i.types[xchg];
2659 i.types[xchg] = i.types[0];
2660 i.types[0] = temp_type;
2661 temp_op = i.op[xchg];
2662 i.op[xchg] = i.op[0];
2663 i.op[0] = temp_op;
2664
2665 gas_assert (i.rm.mode == 3);
2666
2667 i.rex = REX_R;
2668 xchg = i.rm.regmem;
2669 i.rm.regmem = i.rm.reg;
2670 i.rm.reg = xchg;
2671
2672 /* Use the next insn. */
2673 i.tm = t[1];
2674 }
2675
2676 vector_length = i.tm.opcode_modifier.vex256 ? 1 : 0;
2677
2678 switch ((i.tm.base_opcode >> 8) & 0xff)
2679 {
2680 case 0:
2681 implied_prefix = 0;
2682 break;
2683 case DATA_PREFIX_OPCODE:
2684 implied_prefix = 1;
2685 break;
2686 case REPE_PREFIX_OPCODE:
2687 implied_prefix = 2;
2688 break;
2689 case REPNE_PREFIX_OPCODE:
2690 implied_prefix = 3;
2691 break;
2692 default:
2693 abort ();
2694 }
2695
2696 /* Use 2-byte VEX prefix if possible. */
2697 if (i.tm.opcode_modifier.vex0f
2698 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2699 {
2700 /* 2-byte VEX prefix. */
2701 unsigned int r;
2702
2703 i.vex.length = 2;
2704 i.vex.bytes[0] = 0xc5;
2705
2706 /* Check the REX.R bit. */
2707 r = (i.rex & REX_R) ? 0 : 1;
2708 i.vex.bytes[1] = (r << 7
2709 | register_specifier << 3
2710 | vector_length << 2
2711 | implied_prefix);
2712 }
2713 else
2714 {
2715 /* 3-byte VEX prefix. */
2716 unsigned int m, w;
2717
2718 if (i.tm.opcode_modifier.vex0f)
2719 m = 0x1;
2720 else if (i.tm.opcode_modifier.vex0f38)
2721 m = 0x2;
2722 else if (i.tm.opcode_modifier.vex0f3a)
2723 m = 0x3;
2724 else
2725 abort ();
2726
2727 i.vex.length = 3;
2728 i.vex.bytes[0] = 0xc4;
2729
2730 /* The high 3 bits of the second VEX byte are 1's compliment
2731 of RXB bits from REX. */
2732 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2733
2734 /* Check the REX.W bit. */
2735 w = (i.rex & REX_W) ? 1 : 0;
2736 if (i.tm.opcode_modifier.vexw0 || i.tm.opcode_modifier.vexw1)
2737 {
2738 if (w)
2739 abort ();
2740
2741 if (i.tm.opcode_modifier.vexw1)
2742 w = 1;
2743 }
2744
2745 i.vex.bytes[2] = (w << 7
2746 | register_specifier << 3
2747 | vector_length << 2
2748 | implied_prefix);
2749 }
2750}
2751
2752static void
2753process_immext (void)
2754{
2755 expressionS *exp;
2756
2757 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2758 {
2759 /* SSE3 Instructions have the fixed operands with an opcode
2760 suffix which is coded in the same place as an 8-bit immediate
2761 field would be. Here we check those operands and remove them
2762 afterwards. */
2763 unsigned int x;
2764
2765 for (x = 0; x < i.operands; x++)
2766 if (i.op[x].regs->reg_num != x)
2767 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2768 register_prefix, i.op[x].regs->reg_name, x + 1,
2769 i.tm.name);
2770
2771 i.operands = 0;
2772 }
2773
2774 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2775 which is coded in the same place as an 8-bit immediate field
2776 would be. Here we fake an 8-bit immediate operand from the
2777 opcode suffix stored in tm.extension_opcode.
2778
2779 AVX instructions also use this encoding, for some of
2780 3 argument instructions. */
2781
2782 gas_assert (i.imm_operands == 0
2783 && (i.operands <= 2
2784 || (i.tm.opcode_modifier.vex
2785 && i.operands <= 4)));
2786
2787 exp = &im_expressions[i.imm_operands++];
2788 i.op[i.operands].imms = exp;
2789 i.types[i.operands] = imm8;
2790 i.operands++;
2791 exp->X_op = O_constant;
2792 exp->X_add_number = i.tm.extension_opcode;
2793 i.tm.extension_opcode = None;
2794}
2795
2796/* This is the guts of the machine-dependent assembler. LINE points to a
2797 machine dependent instruction. This function is supposed to emit
2798 the frags/bytes it assembles to. */
2799
2800void
2801md_assemble (char *line)
2802{
2803 unsigned int j;
2804 char mnemonic[MAX_MNEM_SIZE];
2805 const template *t;
2806
2807 /* Initialize globals. */
2808 memset (&i, '\0', sizeof (i));
2809 for (j = 0; j < MAX_OPERANDS; j++)
2810 i.reloc[j] = NO_RELOC;
2811 memset (disp_expressions, '\0', sizeof (disp_expressions));
2812 memset (im_expressions, '\0', sizeof (im_expressions));
2813 save_stack_p = save_stack;
2814
2815 /* First parse an instruction mnemonic & call i386_operand for the operands.
2816 We assume that the scrubber has arranged it so that line[0] is the valid
2817 start of a (possibly prefixed) mnemonic. */
2818
2819 line = parse_insn (line, mnemonic);
2820 if (line == NULL)
2821 return;
2822
2823 line = parse_operands (line, mnemonic);
2824 this_operand = -1;
2825 if (line == NULL)
2826 return;
2827
2828 /* Now we've parsed the mnemonic into a set of templates, and have the
2829 operands at hand. */
2830
2831 /* All intel opcodes have reversed operands except for "bound" and
2832 "enter". We also don't reverse intersegment "jmp" and "call"
2833 instructions with 2 immediate operands so that the immediate segment
2834 precedes the offset, as it does when in AT&T mode. */
2835 if (intel_syntax
2836 && i.operands > 1
2837 && (strcmp (mnemonic, "bound") != 0)
2838 && (strcmp (mnemonic, "invlpga") != 0)
2839 && !(operand_type_check (i.types[0], imm)
2840 && operand_type_check (i.types[1], imm)))
2841 swap_operands ();
2842
2843 /* The order of the immediates should be reversed
2844 for 2 immediates extrq and insertq instructions */
2845 if (i.imm_operands == 2
2846 && (strcmp (mnemonic, "extrq") == 0
2847 || strcmp (mnemonic, "insertq") == 0))
2848 swap_2_operands (0, 1);
2849
2850 if (i.imm_operands)
2851 optimize_imm ();
2852
2853 /* Don't optimize displacement for movabs since it only takes 64bit
2854 displacement. */
2855 if (i.disp_operands
2856 && (flag_code != CODE_64BIT
2857 || strcmp (mnemonic, "movabs") != 0))
2858 optimize_disp ();
2859
2860 /* Next, we find a template that matches the given insn,
2861 making sure the overlap of the given operands types is consistent
2862 with the template operand types. */
2863
2864 if (!(t = match_template ()))
2865 return;
2866
2867 if (sse_check != sse_check_none
2868 && !i.tm.opcode_modifier.noavx
2869 && (i.tm.cpu_flags.bitfield.cpusse
2870 || i.tm.cpu_flags.bitfield.cpusse2
2871 || i.tm.cpu_flags.bitfield.cpusse3
2872 || i.tm.cpu_flags.bitfield.cpussse3
2873 || i.tm.cpu_flags.bitfield.cpusse4_1
2874 || i.tm.cpu_flags.bitfield.cpusse4_2))
2875 {
2876 (sse_check == sse_check_warning
2877 ? as_warn
2878 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
2879 }
2880
2881 /* Zap movzx and movsx suffix. The suffix has been set from
2882 "word ptr" or "byte ptr" on the source operand in Intel syntax
2883 or extracted from mnemonic in AT&T syntax. But we'll use
2884 the destination register to choose the suffix for encoding. */
2885 if ((i.tm.base_opcode & ~9) == 0x0fb6)
2886 {
2887 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
2888 there is no suffix, the default will be byte extension. */
2889 if (i.reg_operands != 2
2890 && !i.suffix
2891 && intel_syntax)
2892 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
2893
2894 i.suffix = 0;
2895 }
2896
2897 if (i.tm.opcode_modifier.fwait)
2898 if (!add_prefix (FWAIT_OPCODE))
2899 return;
2900
2901 /* Check string instruction segment overrides. */
2902 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
2903 {
2904 if (!check_string ())
2905 return;
2906 i.disp_operands = 0;
2907 }
2908
2909 if (!process_suffix ())
2910 return;
2911
2912 /* Update operand types. */
2913 for (j = 0; j < i.operands; j++)
2914 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
2915
2916 /* Make still unresolved immediate matches conform to size of immediate
2917 given in i.suffix. */
2918 if (!finalize_imm ())
2919 return;
2920
2921 if (i.types[0].bitfield.imm1)
2922 i.imm_operands = 0; /* kludge for shift insns. */
2923
2924 /* We only need to check those implicit registers for instructions
2925 with 3 operands or less. */
2926 if (i.operands <= 3)
2927 for (j = 0; j < i.operands; j++)
2928 if (i.types[j].bitfield.inoutportreg
2929 || i.types[j].bitfield.shiftcount
2930 || i.types[j].bitfield.acc
2931 || i.types[j].bitfield.floatacc)
2932 i.reg_operands--;
2933
2934 /* ImmExt should be processed after SSE2AVX. */
2935 if (!i.tm.opcode_modifier.sse2avx
2936 && i.tm.opcode_modifier.immext)
2937 process_immext ();
2938
2939 /* For insns with operands there are more diddles to do to the opcode. */
2940 if (i.operands)
2941 {
2942 if (!process_operands ())
2943 return;
2944 }
2945 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
2946 {
2947 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
2948 as_warn (_("translating to `%sp'"), i.tm.name);
2949 }
2950
2951 if (i.tm.opcode_modifier.vex)
2952 build_vex_prefix (t);
2953
2954 /* Handle conversion of 'int $3' --> special int3 insn. */
2955 if (i.tm.base_opcode == INT_OPCODE && i.op[0].imms->X_add_number == 3)
2956 {
2957 i.tm.base_opcode = INT3_OPCODE;
2958 i.imm_operands = 0;
2959 }
2960
2961 if ((i.tm.opcode_modifier.jump
2962 || i.tm.opcode_modifier.jumpbyte
2963 || i.tm.opcode_modifier.jumpdword)
2964 && i.op[0].disps->X_op == O_constant)
2965 {
2966 /* Convert "jmp constant" (and "call constant") to a jump (call) to
2967 the absolute address given by the constant. Since ix86 jumps and
2968 calls are pc relative, we need to generate a reloc. */
2969 i.op[0].disps->X_add_symbol = &abs_symbol;
2970 i.op[0].disps->X_op = O_symbol;
2971 }
2972
2973 if (i.tm.opcode_modifier.rex64)
2974 i.rex |= REX_W;
2975
2976 /* For 8 bit registers we need an empty rex prefix. Also if the
2977 instruction already has a prefix, we need to convert old
2978 registers to new ones. */
2979
2980 if ((i.types[0].bitfield.reg8
2981 && (i.op[0].regs->reg_flags & RegRex64) != 0)
2982 || (i.types[1].bitfield.reg8
2983 && (i.op[1].regs->reg_flags & RegRex64) != 0)
2984 || ((i.types[0].bitfield.reg8
2985 || i.types[1].bitfield.reg8)
2986 && i.rex != 0))
2987 {
2988 int x;
2989
2990 i.rex |= REX_OPCODE;
2991 for (x = 0; x < 2; x++)
2992 {
2993 /* Look for 8 bit operand that uses old registers. */
2994 if (i.types[x].bitfield.reg8
2995 && (i.op[x].regs->reg_flags & RegRex64) == 0)
2996 {
2997 /* In case it is "hi" register, give up. */
2998 if (i.op[x].regs->reg_num > 3)
2999 as_bad (_("can't encode register '%s%s' in an "
3000 "instruction requiring REX prefix."),
3001 register_prefix, i.op[x].regs->reg_name);
3002
3003 /* Otherwise it is equivalent to the extended register.
3004 Since the encoding doesn't change this is merely
3005 cosmetic cleanup for debug output. */
3006
3007 i.op[x].regs = i.op[x].regs + 8;
3008 }
3009 }
3010 }
3011
3012 if (i.rex != 0)
3013 add_prefix (REX_OPCODE | i.rex);
3014
3015 /* We are ready to output the insn. */
3016 output_insn ();
3017}
3018
3019static char *
3020parse_insn (char *line, char *mnemonic)
3021{
3022 char *l = line;
3023 char *token_start = l;
3024 char *mnem_p;
3025 int supported;
3026 const template *t;
3027 char *dot_p = NULL;
3028
3029 /* Non-zero if we found a prefix only acceptable with string insns. */
3030 const char *expecting_string_instruction = NULL;
3031
3032 while (1)
3033 {
3034 mnem_p = mnemonic;
3035 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3036 {
3037 if (*mnem_p == '.')
3038 dot_p = mnem_p;
3039 mnem_p++;
3040 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3041 {
3042 as_bad (_("no such instruction: `%s'"), token_start);
3043 return NULL;
3044 }
3045 l++;
3046 }
3047 if (!is_space_char (*l)
3048 && *l != END_OF_INSN
3049 && (intel_syntax
3050 || (*l != PREFIX_SEPARATOR
3051 && *l != ',')))
3052 {
3053 as_bad (_("invalid character %s in mnemonic"),
3054 output_invalid (*l));
3055 return NULL;
3056 }
3057 if (token_start == l)
3058 {
3059 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3060 as_bad (_("expecting prefix; got nothing"));
3061 else
3062 as_bad (_("expecting mnemonic; got nothing"));
3063 return NULL;
3064 }
3065
3066 /* Look up instruction (or prefix) via hash table. */
3067 current_templates = hash_find (op_hash, mnemonic);
3068
3069 if (*l != END_OF_INSN
3070 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3071 && current_templates
3072 && current_templates->start->opcode_modifier.isprefix)
3073 {
3074 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3075 {
3076 as_bad ((flag_code != CODE_64BIT
3077 ? _("`%s' is only supported in 64-bit mode")
3078 : _("`%s' is not supported in 64-bit mode")),
3079 current_templates->start->name);
3080 return NULL;
3081 }
3082 /* If we are in 16-bit mode, do not allow addr16 or data16.
3083 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3084 if ((current_templates->start->opcode_modifier.size16
3085 || current_templates->start->opcode_modifier.size32)
3086 && flag_code != CODE_64BIT
3087 && (current_templates->start->opcode_modifier.size32
3088 ^ (flag_code == CODE_16BIT)))
3089 {
3090 as_bad (_("redundant %s prefix"),
3091 current_templates->start->name);
3092 return NULL;
3093 }
3094 /* Add prefix, checking for repeated prefixes. */
3095 switch (add_prefix (current_templates->start->base_opcode))
3096 {
3097 case 0:
3098 return NULL;
3099 case 2:
3100 expecting_string_instruction = current_templates->start->name;
3101 break;
3102 }
3103 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3104 token_start = ++l;
3105 }
3106 else
3107 break;
3108 }
3109
3110 if (!current_templates)
3111 {
3112 /* Check if we should swap operand in encoding. */
3113 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3114 i.swap_operand = 1;
3115 else
3116 goto check_suffix;
3117 mnem_p = dot_p;
3118 *dot_p = '\0';
3119 current_templates = hash_find (op_hash, mnemonic);
3120 }
3121
3122 if (!current_templates)
3123 {
3124check_suffix:
3125 /* See if we can get a match by trimming off a suffix. */
3126 switch (mnem_p[-1])
3127 {
3128 case WORD_MNEM_SUFFIX:
3129 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3130 i.suffix = SHORT_MNEM_SUFFIX;
3131 else
3132 case BYTE_MNEM_SUFFIX:
3133 case QWORD_MNEM_SUFFIX:
3134 i.suffix = mnem_p[-1];
3135 mnem_p[-1] = '\0';
3136 current_templates = hash_find (op_hash, mnemonic);
3137 break;
3138 case SHORT_MNEM_SUFFIX:
3139 case LONG_MNEM_SUFFIX:
3140 if (!intel_syntax)
3141 {
3142 i.suffix = mnem_p[-1];
3143 mnem_p[-1] = '\0';
3144 current_templates = hash_find (op_hash, mnemonic);
3145 }
3146 break;
3147
3148 /* Intel Syntax. */
3149 case 'd':
3150 if (intel_syntax)
3151 {
3152 if (intel_float_operand (mnemonic) == 1)
3153 i.suffix = SHORT_MNEM_SUFFIX;
3154 else
3155 i.suffix = LONG_MNEM_SUFFIX;
3156 mnem_p[-1] = '\0';
3157 current_templates = hash_find (op_hash, mnemonic);
3158 }
3159 break;
3160 }
3161 if (!current_templates)
3162 {
3163 as_bad (_("no such instruction: `%s'"), token_start);
3164 return NULL;
3165 }
3166 }
3167
3168 if (current_templates->start->opcode_modifier.jump
3169 || current_templates->start->opcode_modifier.jumpbyte)
3170 {
3171 /* Check for a branch hint. We allow ",pt" and ",pn" for
3172 predict taken and predict not taken respectively.
3173 I'm not sure that branch hints actually do anything on loop
3174 and jcxz insns (JumpByte) for current Pentium4 chips. They
3175 may work in the future and it doesn't hurt to accept them
3176 now. */
3177 if (l[0] == ',' && l[1] == 'p')
3178 {
3179 if (l[2] == 't')
3180 {
3181 if (!add_prefix (DS_PREFIX_OPCODE))
3182 return NULL;
3183 l += 3;
3184 }
3185 else if (l[2] == 'n')
3186 {
3187 if (!add_prefix (CS_PREFIX_OPCODE))
3188 return NULL;
3189 l += 3;
3190 }
3191 }
3192 }
3193 /* Any other comma loses. */
3194 if (*l == ',')
3195 {
3196 as_bad (_("invalid character %s in mnemonic"),
3197 output_invalid (*l));
3198 return NULL;
3199 }
3200
3201 /* Check if instruction is supported on specified architecture. */
3202 supported = 0;
3203 for (t = current_templates->start; t < current_templates->end; ++t)
3204 {
3205 supported |= cpu_flags_match (t);
3206 if (supported == CPU_FLAGS_PERFECT_MATCH)
3207 goto skip;
3208 }
3209
3210 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3211 {
3212 as_bad (flag_code == CODE_64BIT
3213 ? _("`%s' is not supported in 64-bit mode")
3214 : _("`%s' is only supported in 64-bit mode"),
3215 current_templates->start->name);
3216 return NULL;
3217 }
3218 if (supported != CPU_FLAGS_PERFECT_MATCH)
3219 {
3220 as_bad (_("`%s' is not supported on `%s%s'"),
3221 current_templates->start->name,
3222 cpu_arch_name ? cpu_arch_name : default_arch,
3223 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3224 return NULL;
3225 }
3226
3227skip:
3228 if (!cpu_arch_flags.bitfield.cpui386
3229 && (flag_code != CODE_16BIT))
3230 {
3231 as_warn (_("use .code16 to ensure correct addressing mode"));
3232 }
3233
3234 /* Check for rep/repne without a string instruction. */
3235 if (expecting_string_instruction)
3236 {
3237 static templates override;
3238
3239 for (t = current_templates->start; t < current_templates->end; ++t)
3240 if (t->opcode_modifier.isstring)
3241 break;
3242 if (t >= current_templates->end)
3243 {
3244 as_bad (_("expecting string instruction after `%s'"),
3245 expecting_string_instruction);
3246 return NULL;
3247 }
3248 for (override.start = t; t < current_templates->end; ++t)
3249 if (!t->opcode_modifier.isstring)
3250 break;
3251 override.end = t;
3252 current_templates = &override;
3253 }
3254
3255 return l;
3256}
3257
3258static char *
3259parse_operands (char *l, const char *mnemonic)
3260{
3261 char *token_start;
3262
3263 /* 1 if operand is pending after ','. */
3264 unsigned int expecting_operand = 0;
3265
3266 /* Non-zero if operand parens not balanced. */
3267 unsigned int paren_not_balanced;
3268
3269 while (*l != END_OF_INSN)
3270 {
3271 /* Skip optional white space before operand. */
3272 if (is_space_char (*l))
3273 ++l;
3274 if (!is_operand_char (*l) && *l != END_OF_INSN)
3275 {
3276 as_bad (_("invalid character %s before operand %d"),
3277 output_invalid (*l),
3278 i.operands + 1);
3279 return NULL;
3280 }
3281 token_start = l; /* after white space */
3282 paren_not_balanced = 0;
3283 while (paren_not_balanced || *l != ',')
3284 {
3285 if (*l == END_OF_INSN)
3286 {
3287 if (paren_not_balanced)
3288 {
3289 if (!intel_syntax)
3290 as_bad (_("unbalanced parenthesis in operand %d."),
3291 i.operands + 1);
3292 else
3293 as_bad (_("unbalanced brackets in operand %d."),
3294 i.operands + 1);
3295 return NULL;
3296 }
3297 else
3298 break; /* we are done */
3299 }
3300 else if (!is_operand_char (*l) && !is_space_char (*l))
3301 {
3302 as_bad (_("invalid character %s in operand %d"),
3303 output_invalid (*l),
3304 i.operands + 1);
3305 return NULL;
3306 }
3307 if (!intel_syntax)
3308 {
3309 if (*l == '(')
3310 ++paren_not_balanced;
3311 if (*l == ')')
3312 --paren_not_balanced;
3313 }
3314 else
3315 {
3316 if (*l == '[')
3317 ++paren_not_balanced;
3318 if (*l == ']')
3319 --paren_not_balanced;
3320 }
3321 l++;
3322 }
3323 if (l != token_start)
3324 { /* Yes, we've read in another operand. */
3325 unsigned int operand_ok;
3326 this_operand = i.operands++;
3327 i.types[this_operand].bitfield.unspecified = 1;
3328 if (i.operands > MAX_OPERANDS)
3329 {
3330 as_bad (_("spurious operands; (%d operands/instruction max)"),
3331 MAX_OPERANDS);
3332 return NULL;
3333 }
3334 /* Now parse operand adding info to 'i' as we go along. */
3335 END_STRING_AND_SAVE (l);
3336
3337 if (intel_syntax)
3338 operand_ok =
3339 i386_intel_operand (token_start,
3340 intel_float_operand (mnemonic));
3341 else
3342 operand_ok = i386_att_operand (token_start);
3343
3344 RESTORE_END_STRING (l);
3345 if (!operand_ok)
3346 return NULL;
3347 }
3348 else
3349 {
3350 if (expecting_operand)
3351 {
3352 expecting_operand_after_comma:
3353 as_bad (_("expecting operand after ','; got nothing"));
3354 return NULL;
3355 }
3356 if (*l == ',')
3357 {
3358 as_bad (_("expecting operand before ','; got nothing"));
3359 return NULL;
3360 }
3361 }
3362
3363 /* Now *l must be either ',' or END_OF_INSN. */
3364 if (*l == ',')
3365 {
3366 if (*++l == END_OF_INSN)
3367 {
3368 /* Just skip it, if it's \n complain. */
3369 goto expecting_operand_after_comma;
3370 }
3371 expecting_operand = 1;
3372 }
3373 }
3374 return l;
3375}
3376
3377static void
3378swap_2_operands (int xchg1, int xchg2)
3379{
3380 union i386_op temp_op;
3381 i386_operand_type temp_type;
3382 enum bfd_reloc_code_real temp_reloc;
3383
3384 temp_type = i.types[xchg2];
3385 i.types[xchg2] = i.types[xchg1];
3386 i.types[xchg1] = temp_type;
3387 temp_op = i.op[xchg2];
3388 i.op[xchg2] = i.op[xchg1];
3389 i.op[xchg1] = temp_op;
3390 temp_reloc = i.reloc[xchg2];
3391 i.reloc[xchg2] = i.reloc[xchg1];
3392 i.reloc[xchg1] = temp_reloc;
3393}
3394
3395static void
3396swap_operands (void)
3397{
3398 switch (i.operands)
3399 {
3400 case 5:
3401 case 4:
3402 swap_2_operands (1, i.operands - 2);
3403 case 3:
3404 case 2:
3405 swap_2_operands (0, i.operands - 1);
3406 break;
3407 default:
3408 abort ();
3409 }
3410
3411 if (i.mem_operands == 2)
3412 {
3413 const seg_entry *temp_seg;
3414 temp_seg = i.seg[0];
3415 i.seg[0] = i.seg[1];
3416 i.seg[1] = temp_seg;
3417 }
3418}
3419
3420/* Try to ensure constant immediates are represented in the smallest
3421 opcode possible. */
3422static void
3423optimize_imm (void)
3424{
3425 char guess_suffix = 0;
3426 int op;
3427
3428 if (i.suffix)
3429 guess_suffix = i.suffix;
3430 else if (i.reg_operands)
3431 {
3432 /* Figure out a suffix from the last register operand specified.
3433 We can't do this properly yet, ie. excluding InOutPortReg,
3434 but the following works for instructions with immediates.
3435 In any case, we can't set i.suffix yet. */
3436 for (op = i.operands; --op >= 0;)
3437 if (i.types[op].bitfield.reg8)
3438 {
3439 guess_suffix = BYTE_MNEM_SUFFIX;
3440 break;
3441 }
3442 else if (i.types[op].bitfield.reg16)
3443 {
3444 guess_suffix = WORD_MNEM_SUFFIX;
3445 break;
3446 }
3447 else if (i.types[op].bitfield.reg32)
3448 {
3449 guess_suffix = LONG_MNEM_SUFFIX;
3450 break;
3451 }
3452 else if (i.types[op].bitfield.reg64)
3453 {
3454 guess_suffix = QWORD_MNEM_SUFFIX;
3455 break;
3456 }
3457 }
3458 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3459 guess_suffix = WORD_MNEM_SUFFIX;
3460
3461 for (op = i.operands; --op >= 0;)
3462 if (operand_type_check (i.types[op], imm))
3463 {
3464 switch (i.op[op].imms->X_op)
3465 {
3466 case O_constant:
3467 /* If a suffix is given, this operand may be shortened. */
3468 switch (guess_suffix)
3469 {
3470 case LONG_MNEM_SUFFIX:
3471 i.types[op].bitfield.imm32 = 1;
3472 i.types[op].bitfield.imm64 = 1;
3473 break;
3474 case WORD_MNEM_SUFFIX:
3475 i.types[op].bitfield.imm16 = 1;
3476 i.types[op].bitfield.imm32 = 1;
3477 i.types[op].bitfield.imm32s = 1;
3478 i.types[op].bitfield.imm64 = 1;
3479 break;
3480 case BYTE_MNEM_SUFFIX:
3481 i.types[op].bitfield.imm8 = 1;
3482 i.types[op].bitfield.imm8s = 1;
3483 i.types[op].bitfield.imm16 = 1;
3484 i.types[op].bitfield.imm32 = 1;
3485 i.types[op].bitfield.imm32s = 1;
3486 i.types[op].bitfield.imm64 = 1;
3487 break;
3488 }
3489
3490 /* If this operand is at most 16 bits, convert it
3491 to a signed 16 bit number before trying to see
3492 whether it will fit in an even smaller size.
3493 This allows a 16-bit operand such as $0xffe0 to
3494 be recognised as within Imm8S range. */
3495 if ((i.types[op].bitfield.imm16)
3496 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3497 {
3498 i.op[op].imms->X_add_number =
3499 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3500 }
3501 if ((i.types[op].bitfield.imm32)
3502 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3503 == 0))
3504 {
3505 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3506 ^ ((offsetT) 1 << 31))
3507 - ((offsetT) 1 << 31));
3508 }
3509 i.types[op]
3510 = operand_type_or (i.types[op],
3511 smallest_imm_type (i.op[op].imms->X_add_number));
3512
3513 /* We must avoid matching of Imm32 templates when 64bit
3514 only immediate is available. */
3515 if (guess_suffix == QWORD_MNEM_SUFFIX)
3516 i.types[op].bitfield.imm32 = 0;
3517 break;
3518
3519 case O_absent:
3520 case O_register:
3521 abort ();
3522
3523 /* Symbols and expressions. */
3524 default:
3525 /* Convert symbolic operand to proper sizes for matching, but don't
3526 prevent matching a set of insns that only supports sizes other
3527 than those matching the insn suffix. */
3528 {
3529 i386_operand_type mask, allowed;
3530 const template *t;
3531
3532 operand_type_set (&mask, 0);
3533 operand_type_set (&allowed, 0);
3534
3535 for (t = current_templates->start;
3536 t < current_templates->end;
3537 ++t)
3538 allowed = operand_type_or (allowed,
3539 t->operand_types[op]);
3540 switch (guess_suffix)
3541 {
3542 case QWORD_MNEM_SUFFIX:
3543 mask.bitfield.imm64 = 1;
3544 mask.bitfield.imm32s = 1;
3545 break;
3546 case LONG_MNEM_SUFFIX:
3547 mask.bitfield.imm32 = 1;
3548 break;
3549 case WORD_MNEM_SUFFIX:
3550 mask.bitfield.imm16 = 1;
3551 break;
3552 case BYTE_MNEM_SUFFIX:
3553 mask.bitfield.imm8 = 1;
3554 break;
3555 default:
3556 break;
3557 }
3558 allowed = operand_type_and (mask, allowed);
3559 if (!operand_type_all_zero (&allowed))
3560 i.types[op] = operand_type_and (i.types[op], mask);
3561 }
3562 break;
3563 }
3564 }
3565}
3566
3567/* Try to use the smallest displacement type too. */
3568static void
3569optimize_disp (void)
3570{
3571 int op;
3572
3573 for (op = i.operands; --op >= 0;)
3574 if (operand_type_check (i.types[op], disp))
3575 {
3576 if (i.op[op].disps->X_op == O_constant)
3577 {
3578 offsetT disp = i.op[op].disps->X_add_number;
3579
3580 if (i.types[op].bitfield.disp16
3581 && (disp & ~(offsetT) 0xffff) == 0)
3582 {
3583 /* If this operand is at most 16 bits, convert
3584 to a signed 16 bit number and don't use 64bit
3585 displacement. */
3586 disp = (((disp & 0xffff) ^ 0x8000) - 0x8000);
3587 i.types[op].bitfield.disp64 = 0;
3588 }
3589 if (i.types[op].bitfield.disp32
3590 && (disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3591 {
3592 /* If this operand is at most 32 bits, convert
3593 to a signed 32 bit number and don't use 64bit
3594 displacement. */
3595 disp &= (((offsetT) 2 << 31) - 1);
3596 disp = (disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3597 i.types[op].bitfield.disp64 = 0;
3598 }
3599 if (!disp && i.types[op].bitfield.baseindex)
3600 {
3601 i.types[op].bitfield.disp8 = 0;
3602 i.types[op].bitfield.disp16 = 0;
3603 i.types[op].bitfield.disp32 = 0;
3604 i.types[op].bitfield.disp32s = 0;
3605 i.types[op].bitfield.disp64 = 0;
3606 i.op[op].disps = 0;
3607 i.disp_operands--;
3608 }
3609 else if (flag_code == CODE_64BIT)
3610 {
3611 if (fits_in_signed_long (disp))
3612 {
3613 i.types[op].bitfield.disp64 = 0;
3614 i.types[op].bitfield.disp32s = 1;
3615 }
3616 if (fits_in_unsigned_long (disp))
3617 i.types[op].bitfield.disp32 = 1;
3618 }
3619 if ((i.types[op].bitfield.disp32
3620 || i.types[op].bitfield.disp32s
3621 || i.types[op].bitfield.disp16)
3622 && fits_in_signed_byte (disp))
3623 i.types[op].bitfield.disp8 = 1;
3624 }
3625 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3626 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3627 {
3628 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3629 i.op[op].disps, 0, i.reloc[op]);
3630 i.types[op].bitfield.disp8 = 0;
3631 i.types[op].bitfield.disp16 = 0;
3632 i.types[op].bitfield.disp32 = 0;
3633 i.types[op].bitfield.disp32s = 0;
3634 i.types[op].bitfield.disp64 = 0;
3635 }
3636 else
3637 /* We only support 64bit displacement on constants. */
3638 i.types[op].bitfield.disp64 = 0;
3639 }
3640}
3641
3642static const template *
3643match_template (void)
3644{
3645 /* Points to template once we've found it. */
3646 const template *t;
3647 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3648 i386_operand_type overlap4;
3649 unsigned int found_reverse_match;
3650 i386_opcode_modifier suffix_check;
3651 i386_operand_type operand_types [MAX_OPERANDS];
3652 int addr_prefix_disp;
3653 unsigned int j;
3654 unsigned int found_cpu_match;
3655 unsigned int check_register;
3656
3657#if MAX_OPERANDS != 5
3658# error "MAX_OPERANDS must be 5."
3659#endif
3660
3661 found_reverse_match = 0;
3662 addr_prefix_disp = -1;
3663
3664 memset (&suffix_check, 0, sizeof (suffix_check));
3665 if (i.suffix == BYTE_MNEM_SUFFIX)
3666 suffix_check.no_bsuf = 1;
3667 else if (i.suffix == WORD_MNEM_SUFFIX)
3668 suffix_check.no_wsuf = 1;
3669 else if (i.suffix == SHORT_MNEM_SUFFIX)
3670 suffix_check.no_ssuf = 1;
3671 else if (i.suffix == LONG_MNEM_SUFFIX)
3672 suffix_check.no_lsuf = 1;
3673 else if (i.suffix == QWORD_MNEM_SUFFIX)
3674 suffix_check.no_qsuf = 1;
3675 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3676 suffix_check.no_ldsuf = 1;
3677
3678 for (t = current_templates->start; t < current_templates->end; t++)
3679 {
3680 addr_prefix_disp = -1;
3681
3682 /* Must have right number of operands. */
3683 if (i.operands != t->operands)
3684 continue;
3685
3686 /* Check processor support. */
3687 found_cpu_match = (cpu_flags_match (t)
3688 == CPU_FLAGS_PERFECT_MATCH);
3689 if (!found_cpu_match)
3690 continue;
3691
3692 /* Check old gcc support. */
3693 if (!old_gcc && t->opcode_modifier.oldgcc)
3694 continue;
3695
3696 /* Check AT&T mnemonic. */
3697 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3698 continue;
3699
3700 /* Check AT&T syntax Intel syntax. */
3701 if ((intel_syntax && t->opcode_modifier.attsyntax)
3702 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3703 continue;
3704
3705 /* Check the suffix, except for some instructions in intel mode. */
3706 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3707 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3708 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3709 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3710 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3711 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3712 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3713 continue;
3714
3715 if (!operand_size_match (t))
3716 continue;
3717
3718 for (j = 0; j < MAX_OPERANDS; j++)
3719 operand_types[j] = t->operand_types[j];
3720
3721 /* In general, don't allow 64-bit operands in 32-bit mode. */
3722 if (i.suffix == QWORD_MNEM_SUFFIX
3723 && flag_code != CODE_64BIT
3724 && (intel_syntax
3725 ? (!t->opcode_modifier.ignoresize
3726 && !intel_float_operand (t->name))
3727 : intel_float_operand (t->name) != 2)
3728 && ((!operand_types[0].bitfield.regmmx
3729 && !operand_types[0].bitfield.regxmm
3730 && !operand_types[0].bitfield.regymm)
3731 || (!operand_types[t->operands > 1].bitfield.regmmx
3732 && !!operand_types[t->operands > 1].bitfield.regxmm
3733 && !!operand_types[t->operands > 1].bitfield.regymm))
3734 && (t->base_opcode != 0x0fc7
3735 || t->extension_opcode != 1 /* cmpxchg8b */))
3736 continue;
3737
3738 /* In general, don't allow 32-bit operands on pre-386. */
3739 else if (i.suffix == LONG_MNEM_SUFFIX
3740 && !cpu_arch_flags.bitfield.cpui386
3741 && (intel_syntax
3742 ? (!t->opcode_modifier.ignoresize
3743 && !intel_float_operand (t->name))
3744 : intel_float_operand (t->name) != 2)
3745 && ((!operand_types[0].bitfield.regmmx
3746 && !operand_types[0].bitfield.regxmm)
3747 || (!operand_types[t->operands > 1].bitfield.regmmx
3748 && !!operand_types[t->operands > 1].bitfield.regxmm)))
3749 continue;
3750
3751 /* Do not verify operands when there are none. */
3752 else
3753 {
3754 if (!t->operands)
3755 /* We've found a match; break out of loop. */
3756 break;
3757 }
3758
3759 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
3760 into Disp32/Disp16/Disp32 operand. */
3761 if (i.prefix[ADDR_PREFIX] != 0)
3762 {
3763 /* There should be only one Disp operand. */
3764 switch (flag_code)
3765 {
3766 case CODE_16BIT:
3767 for (j = 0; j < MAX_OPERANDS; j++)
3768 {
3769 if (operand_types[j].bitfield.disp16)
3770 {
3771 addr_prefix_disp = j;
3772 operand_types[j].bitfield.disp32 = 1;
3773 operand_types[j].bitfield.disp16 = 0;
3774 break;
3775 }
3776 }
3777 break;
3778 case CODE_32BIT:
3779 for (j = 0; j < MAX_OPERANDS; j++)
3780 {
3781 if (operand_types[j].bitfield.disp32)
3782 {
3783 addr_prefix_disp = j;
3784 operand_types[j].bitfield.disp32 = 0;
3785 operand_types[j].bitfield.disp16 = 1;
3786 break;
3787 }
3788 }
3789 break;
3790 case CODE_64BIT:
3791 for (j = 0; j < MAX_OPERANDS; j++)
3792 {
3793 if (operand_types[j].bitfield.disp64)
3794 {
3795 addr_prefix_disp = j;
3796 operand_types[j].bitfield.disp64 = 0;
3797 operand_types[j].bitfield.disp32 = 1;
3798 break;
3799 }
3800 }
3801 break;
3802 }
3803 }
3804
3805 /* We check register size only if size of operands can be
3806 encoded the canonical way. */
3807 check_register = t->opcode_modifier.w;
3808 overlap0 = operand_type_and (i.types[0], operand_types[0]);
3809 switch (t->operands)
3810 {
3811 case 1:
3812 if (!operand_type_match (overlap0, i.types[0]))
3813 continue;
3814 break;
3815 case 2:
3816 /* xchg %eax, %eax is a special case. It is an aliase for nop
3817 only in 32bit mode and we can use opcode 0x90. In 64bit
3818 mode, we can't use 0x90 for xchg %eax, %eax since it should
3819 zero-extend %eax to %rax. */
3820 if (flag_code == CODE_64BIT
3821 && t->base_opcode == 0x90
3822 && operand_type_equal (&i.types [0], &acc32)
3823 && operand_type_equal (&i.types [1], &acc32))
3824 continue;
3825 if (i.swap_operand)
3826 {
3827 /* If we swap operand in encoding, we either match
3828 the next one or reverse direction of operands. */
3829 if (t->opcode_modifier.s)
3830 continue;
3831 else if (t->opcode_modifier.d)
3832 goto check_reverse;
3833 }
3834
3835 case 3:
3836 /* If we swap operand in encoding, we match the next one. */
3837 if (i.swap_operand && t->opcode_modifier.s)
3838 continue;
3839 case 4:
3840 case 5:
3841 overlap1 = operand_type_and (i.types[1], operand_types[1]);
3842 if (!operand_type_match (overlap0, i.types[0])
3843 || !operand_type_match (overlap1, i.types[1])
3844 || (check_register
3845 && !operand_type_register_match (overlap0, i.types[0],
3846 operand_types[0],
3847 overlap1, i.types[1],
3848 operand_types[1])))
3849 {
3850 /* Check if other direction is valid ... */
3851 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
3852 continue;
3853
3854check_reverse:
3855 /* Try reversing direction of operands. */
3856 overlap0 = operand_type_and (i.types[0], operand_types[1]);
3857 overlap1 = operand_type_and (i.types[1], operand_types[0]);
3858 if (!operand_type_match (overlap0, i.types[0])
3859 || !operand_type_match (overlap1, i.types[1])
3860 || (check_register
3861 && !operand_type_register_match (overlap0,
3862 i.types[0],
3863 operand_types[1],
3864 overlap1,
3865 i.types[1],
3866 operand_types[0])))
3867 {
3868 /* Does not match either direction. */
3869 continue;
3870 }
3871 /* found_reverse_match holds which of D or FloatDR
3872 we've found. */
3873 if (t->opcode_modifier.d)
3874 found_reverse_match = Opcode_D;
3875 else if (t->opcode_modifier.floatd)
3876 found_reverse_match = Opcode_FloatD;
3877 else
3878 found_reverse_match = 0;
3879 if (t->opcode_modifier.floatr)
3880 found_reverse_match |= Opcode_FloatR;
3881 }
3882 else
3883 {
3884 /* Found a forward 2 operand match here. */
3885 switch (t->operands)
3886 {
3887 case 5:
3888 overlap4 = operand_type_and (i.types[4],
3889 operand_types[4]);
3890 case 4:
3891 overlap3 = operand_type_and (i.types[3],
3892 operand_types[3]);
3893 case 3:
3894 overlap2 = operand_type_and (i.types[2],
3895 operand_types[2]);
3896 break;
3897 }
3898
3899 switch (t->operands)
3900 {
3901 case 5:
3902 if (!operand_type_match (overlap4, i.types[4])
3903 || !operand_type_register_match (overlap3,
3904 i.types[3],
3905 operand_types[3],
3906 overlap4,
3907 i.types[4],
3908 operand_types[4]))
3909 continue;
3910 case 4:
3911 if (!operand_type_match (overlap3, i.types[3])
3912 || (check_register
3913 && !operand_type_register_match (overlap2,
3914 i.types[2],
3915 operand_types[2],
3916 overlap3,
3917 i.types[3],
3918 operand_types[3])))
3919 continue;
3920 case 3:
3921 /* Here we make use of the fact that there are no
3922 reverse match 3 operand instructions, and all 3
3923 operand instructions only need to be checked for
3924 register consistency between operands 2 and 3. */
3925 if (!operand_type_match (overlap2, i.types[2])
3926 || (check_register
3927 && !operand_type_register_match (overlap1,
3928 i.types[1],
3929 operand_types[1],
3930 overlap2,
3931 i.types[2],
3932 operand_types[2])))
3933 continue;
3934 break;
3935 }
3936 }
3937 /* Found either forward/reverse 2, 3 or 4 operand match here:
3938 slip through to break. */
3939 }
3940 if (!found_cpu_match)
3941 {
3942 found_reverse_match = 0;
3943 continue;
3944 }
3945
3946 /* We've found a match; break out of loop. */
3947 break;
3948 }
3949
3950 if (t == current_templates->end)
3951 {
3952 /* We found no match. */
3953 if (intel_syntax)
3954 as_bad (_("ambiguous operand size or operands invalid for `%s'"),
3955 current_templates->start->name);
3956 else
3957 as_bad (_("suffix or operands invalid for `%s'"),
3958 current_templates->start->name);
3959 return NULL;
3960 }
3961
3962 if (!quiet_warnings)
3963 {
3964 if (!intel_syntax
3965 && (i.types[0].bitfield.jumpabsolute
3966 != operand_types[0].bitfield.jumpabsolute))
3967 {
3968 as_warn (_("indirect %s without `*'"), t->name);
3969 }
3970
3971 if (t->opcode_modifier.isprefix
3972 && t->opcode_modifier.ignoresize)
3973 {
3974 /* Warn them that a data or address size prefix doesn't
3975 affect assembly of the next line of code. */
3976 as_warn (_("stand-alone `%s' prefix"), t->name);
3977 }
3978 }
3979
3980 /* Copy the template we found. */
3981 i.tm = *t;
3982
3983 if (addr_prefix_disp != -1)
3984 i.tm.operand_types[addr_prefix_disp]
3985 = operand_types[addr_prefix_disp];
3986
3987 if (found_reverse_match)
3988 {
3989 /* If we found a reverse match we must alter the opcode
3990 direction bit. found_reverse_match holds bits to change
3991 (different for int & float insns). */
3992
3993 i.tm.base_opcode ^= found_reverse_match;
3994
3995 i.tm.operand_types[0] = operand_types[1];
3996 i.tm.operand_types[1] = operand_types[0];
3997 }
3998
3999 return t;
4000}
4001
4002static int
4003check_string (void)
4004{
4005 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4006 if (i.tm.operand_types[mem_op].bitfield.esseg)
4007 {
4008 if (i.seg[0] != NULL && i.seg[0] != &es)
4009 {
4010 as_bad (_("`%s' operand %d must use `%ses' segment"),
4011 i.tm.name,
4012 mem_op + 1,
4013 register_prefix);
4014 return 0;
4015 }
4016 /* There's only ever one segment override allowed per instruction.
4017 This instruction possibly has a legal segment override on the
4018 second operand, so copy the segment to where non-string
4019 instructions store it, allowing common code. */
4020 i.seg[0] = i.seg[1];
4021 }
4022 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4023 {
4024 if (i.seg[1] != NULL && i.seg[1] != &es)
4025 {
4026 as_bad (_("`%s' operand %d must use `%ses' segment"),
4027 i.tm.name,
4028 mem_op + 2,
4029 register_prefix);
4030 return 0;
4031 }
4032 }
4033 return 1;
4034}
4035
4036static int
4037process_suffix (void)
4038{
4039 /* If matched instruction specifies an explicit instruction mnemonic
4040 suffix, use it. */
4041 if (i.tm.opcode_modifier.size16)
4042 i.suffix = WORD_MNEM_SUFFIX;
4043 else if (i.tm.opcode_modifier.size32)
4044 i.suffix = LONG_MNEM_SUFFIX;
4045 else if (i.tm.opcode_modifier.size64)
4046 i.suffix = QWORD_MNEM_SUFFIX;
4047 else if (i.reg_operands)
4048 {
4049 /* If there's no instruction mnemonic suffix we try to invent one
4050 based on register operands. */
4051 if (!i.suffix)
4052 {
4053 /* We take i.suffix from the last register operand specified,
4054 Destination register type is more significant than source
4055 register type. crc32 in SSE4.2 prefers source register
4056 type. */
4057 if (i.tm.base_opcode == 0xf20f38f1)
4058 {
4059 if (i.types[0].bitfield.reg16)
4060 i.suffix = WORD_MNEM_SUFFIX;
4061 else if (i.types[0].bitfield.reg32)
4062 i.suffix = LONG_MNEM_SUFFIX;
4063 else if (i.types[0].bitfield.reg64)
4064 i.suffix = QWORD_MNEM_SUFFIX;
4065 }
4066 else if (i.tm.base_opcode == 0xf20f38f0)
4067 {
4068 if (i.types[0].bitfield.reg8)
4069 i.suffix = BYTE_MNEM_SUFFIX;
4070 }
4071
4072 if (!i.suffix)
4073 {
4074 int op;
4075
4076 if (i.tm.base_opcode == 0xf20f38f1
4077 || i.tm.base_opcode == 0xf20f38f0)
4078 {
4079 /* We have to know the operand size for crc32. */
4080 as_bad (_("ambiguous memory operand size for `%s`"),
4081 i.tm.name);
4082 return 0;
4083 }
4084
4085 for (op = i.operands; --op >= 0;)
4086 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4087 {
4088 if (i.types[op].bitfield.reg8)
4089 {
4090 i.suffix = BYTE_MNEM_SUFFIX;
4091 break;
4092 }
4093 else if (i.types[op].bitfield.reg16)
4094 {
4095 i.suffix = WORD_MNEM_SUFFIX;
4096 break;
4097 }
4098 else if (i.types[op].bitfield.reg32)
4099 {
4100 i.suffix = LONG_MNEM_SUFFIX;
4101 break;
4102 }
4103 else if (i.types[op].bitfield.reg64)
4104 {
4105 i.suffix = QWORD_MNEM_SUFFIX;
4106 break;
4107 }
4108 }
4109 }
4110 }
4111 else if (i.suffix == BYTE_MNEM_SUFFIX)
4112 {
4113 if (!check_byte_reg ())
4114 return 0;
4115 }
4116 else if (i.suffix == LONG_MNEM_SUFFIX)
4117 {
4118 if (!check_long_reg ())
4119 return 0;
4120 }
4121 else if (i.suffix == QWORD_MNEM_SUFFIX)
4122 {
4123 if (intel_syntax
4124 && i.tm.opcode_modifier.ignoresize
4125 && i.tm.opcode_modifier.no_qsuf)
4126 i.suffix = 0;
4127 else if (!check_qword_reg ())
4128 return 0;
4129 }
4130 else if (i.suffix == WORD_MNEM_SUFFIX)
4131 {
4132 if (!check_word_reg ())
4133 return 0;
4134 }
4135 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4136 || i.suffix == YMMWORD_MNEM_SUFFIX)
4137 {
4138 /* Skip if the instruction has x/y suffix. match_template
4139 should check if it is a valid suffix. */
4140 }
4141 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4142 /* Do nothing if the instruction is going to ignore the prefix. */
4143 ;
4144 else
4145 abort ();
4146 }
4147 else if (i.tm.opcode_modifier.defaultsize
4148 && !i.suffix
4149 /* exclude fldenv/frstor/fsave/fstenv */
4150 && i.tm.opcode_modifier.no_ssuf)
4151 {
4152 i.suffix = stackop_size;
4153 }
4154 else if (intel_syntax
4155 && !i.suffix
4156 && (i.tm.operand_types[0].bitfield.jumpabsolute
4157 || i.tm.opcode_modifier.jumpbyte
4158 || i.tm.opcode_modifier.jumpintersegment
4159 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4160 && i.tm.extension_opcode <= 3)))
4161 {
4162 switch (flag_code)
4163 {
4164 case CODE_64BIT:
4165 if (!i.tm.opcode_modifier.no_qsuf)
4166 {
4167 i.suffix = QWORD_MNEM_SUFFIX;
4168 break;
4169 }
4170 case CODE_32BIT:
4171 if (!i.tm.opcode_modifier.no_lsuf)
4172 i.suffix = LONG_MNEM_SUFFIX;
4173 break;
4174 case CODE_16BIT:
4175 if (!i.tm.opcode_modifier.no_wsuf)
4176 i.suffix = WORD_MNEM_SUFFIX;
4177 break;
4178 }
4179 }
4180
4181 if (!i.suffix)
4182 {
4183 if (!intel_syntax)
4184 {
4185 if (i.tm.opcode_modifier.w)
4186 {
4187 as_bad (_("no instruction mnemonic suffix given and "
4188 "no register operands; can't size instruction"));
4189 return 0;
4190 }
4191 }
4192 else
4193 {
4194 unsigned int suffixes;
4195
4196 suffixes = !i.tm.opcode_modifier.no_bsuf;
4197 if (!i.tm.opcode_modifier.no_wsuf)
4198 suffixes |= 1 << 1;
4199 if (!i.tm.opcode_modifier.no_lsuf)
4200 suffixes |= 1 << 2;
4201 if (!i.tm.opcode_modifier.no_ldsuf)
4202 suffixes |= 1 << 3;
4203 if (!i.tm.opcode_modifier.no_ssuf)
4204 suffixes |= 1 << 4;
4205 if (!i.tm.opcode_modifier.no_qsuf)
4206 suffixes |= 1 << 5;
4207
4208 /* There are more than suffix matches. */
4209 if (i.tm.opcode_modifier.w
4210 || ((suffixes & (suffixes - 1))
4211 && !i.tm.opcode_modifier.defaultsize
4212 && !i.tm.opcode_modifier.ignoresize))
4213 {
4214 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4215 return 0;
4216 }
4217 }
4218 }
4219
4220 /* Change the opcode based on the operand size given by i.suffix;
4221 We don't need to change things for byte insns. */
4222
4223 if (i.suffix
4224 && i.suffix != BYTE_MNEM_SUFFIX
4225 && i.suffix != XMMWORD_MNEM_SUFFIX
4226 && i.suffix != YMMWORD_MNEM_SUFFIX)
4227 {
4228 /* It's not a byte, select word/dword operation. */
4229 if (i.tm.opcode_modifier.w)
4230 {
4231 if (i.tm.opcode_modifier.shortform)
4232 i.tm.base_opcode |= 8;
4233 else
4234 i.tm.base_opcode |= 1;
4235 }
4236
4237 /* Now select between word & dword operations via the operand
4238 size prefix, except for instructions that will ignore this
4239 prefix anyway. */
4240 if (i.tm.opcode_modifier.addrprefixop0)
4241 {
4242 /* The address size override prefix changes the size of the
4243 first operand. */
4244 if ((flag_code == CODE_32BIT
4245 && i.op->regs[0].reg_type.bitfield.reg16)
4246 || (flag_code != CODE_32BIT
4247 && i.op->regs[0].reg_type.bitfield.reg32))
4248 if (!add_prefix (ADDR_PREFIX_OPCODE))
4249 return 0;
4250 }
4251 else if (i.suffix != QWORD_MNEM_SUFFIX
4252 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4253 && !i.tm.opcode_modifier.ignoresize
4254 && !i.tm.opcode_modifier.floatmf
4255 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4256 || (flag_code == CODE_64BIT
4257 && i.tm.opcode_modifier.jumpbyte)))
4258 {
4259 unsigned int prefix = DATA_PREFIX_OPCODE;
4260
4261 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4262 prefix = ADDR_PREFIX_OPCODE;
4263
4264 if (!add_prefix (prefix))
4265 return 0;
4266 }
4267
4268 /* Set mode64 for an operand. */
4269 if (i.suffix == QWORD_MNEM_SUFFIX
4270 && flag_code == CODE_64BIT
4271 && !i.tm.opcode_modifier.norex64)
4272 {
4273 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4274 need rex64. cmpxchg8b is also a special case. */
4275 if (! (i.operands == 2
4276 && i.tm.base_opcode == 0x90
4277 && i.tm.extension_opcode == None
4278 && operand_type_equal (&i.types [0], &acc64)
4279 && operand_type_equal (&i.types [1], &acc64))
4280 && ! (i.operands == 1
4281 && i.tm.base_opcode == 0xfc7
4282 && i.tm.extension_opcode == 1
4283 && !operand_type_check (i.types [0], reg)
4284 && operand_type_check (i.types [0], anymem)))
4285 i.rex |= REX_W;
4286 }
4287
4288 /* Size floating point instruction. */
4289 if (i.suffix == LONG_MNEM_SUFFIX)
4290 if (i.tm.opcode_modifier.floatmf)
4291 i.tm.base_opcode ^= 4;
4292 }
4293
4294 return 1;
4295}
4296
4297static int
4298check_byte_reg (void)
4299{
4300 int op;
4301
4302 for (op = i.operands; --op >= 0;)
4303 {
4304 /* If this is an eight bit register, it's OK. If it's the 16 or
4305 32 bit version of an eight bit register, we will just use the
4306 low portion, and that's OK too. */
4307 if (i.types[op].bitfield.reg8)
4308 continue;
4309
4310 /* Don't generate this warning if not needed. */
4311 if (intel_syntax && i.tm.opcode_modifier.byteokintel)
4312 continue;
4313
4314 /* crc32 doesn't generate this warning. */
4315 if (i.tm.base_opcode == 0xf20f38f0)
4316 continue;
4317
4318 if ((i.types[op].bitfield.reg16
4319 || i.types[op].bitfield.reg32
4320 || i.types[op].bitfield.reg64)
4321 && i.op[op].regs->reg_num < 4)
4322 {
4323 /* Prohibit these changes in the 64bit mode, since the
4324 lowering is more complicated. */
4325 if (flag_code == CODE_64BIT
4326 && !i.tm.operand_types[op].bitfield.inoutportreg)
4327 {
4328 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4329 register_prefix, i.op[op].regs->reg_name,
4330 i.suffix);
4331 return 0;
4332 }
4333#if REGISTER_WARNINGS
4334 if (!quiet_warnings
4335 && !i.tm.operand_types[op].bitfield.inoutportreg)
4336 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4337 register_prefix,
4338 (i.op[op].regs + (i.types[op].bitfield.reg16
4339 ? REGNAM_AL - REGNAM_AX
4340 : REGNAM_AL - REGNAM_EAX))->reg_name,
4341 register_prefix,
4342 i.op[op].regs->reg_name,
4343 i.suffix);
4344#endif
4345 continue;
4346 }
4347 /* Any other register is bad. */
4348 if (i.types[op].bitfield.reg16
4349 || i.types[op].bitfield.reg32
4350 || i.types[op].bitfield.reg64
4351 || i.types[op].bitfield.regmmx
4352 || i.types[op].bitfield.regxmm
4353 || i.types[op].bitfield.regymm
4354 || i.types[op].bitfield.sreg2
4355 || i.types[op].bitfield.sreg3
4356 || i.types[op].bitfield.control
4357 || i.types[op].bitfield.debug
4358 || i.types[op].bitfield.test
4359 || i.types[op].bitfield.floatreg
4360 || i.types[op].bitfield.floatacc)
4361 {
4362 as_bad (_("`%s%s' not allowed with `%s%c'"),
4363 register_prefix,
4364 i.op[op].regs->reg_name,
4365 i.tm.name,
4366 i.suffix);
4367 return 0;
4368 }
4369 }
4370 return 1;
4371}
4372
4373static int
4374check_long_reg (void)
4375{
4376 int op;
4377
4378 for (op = i.operands; --op >= 0;)
4379 /* Reject eight bit registers, except where the template requires
4380 them. (eg. movzb) */
4381 if (i.types[op].bitfield.reg8
4382 && (i.tm.operand_types[op].bitfield.reg16
4383 || i.tm.operand_types[op].bitfield.reg32
4384 || i.tm.operand_types[op].bitfield.acc))
4385 {
4386 as_bad (_("`%s%s' not allowed with `%s%c'"),
4387 register_prefix,
4388 i.op[op].regs->reg_name,
4389 i.tm.name,
4390 i.suffix);
4391 return 0;
4392 }
4393 /* Warn if the e prefix on a general reg is missing. */
4394 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4395 && i.types[op].bitfield.reg16
4396 && (i.tm.operand_types[op].bitfield.reg32
4397 || i.tm.operand_types[op].bitfield.acc))
4398 {
4399 /* Prohibit these changes in the 64bit mode, since the
4400 lowering is more complicated. */
4401 if (flag_code == CODE_64BIT)
4402 {
4403 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4404 register_prefix, i.op[op].regs->reg_name,
4405 i.suffix);
4406 return 0;
4407 }
4408#if REGISTER_WARNINGS
4409 else
4410 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4411 register_prefix,
4412 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4413 register_prefix,
4414 i.op[op].regs->reg_name,
4415 i.suffix);
4416#endif
4417 }
4418 /* Warn if the r prefix on a general reg is missing. */
4419 else if (i.types[op].bitfield.reg64
4420 && (i.tm.operand_types[op].bitfield.reg32
4421 || i.tm.operand_types[op].bitfield.acc))
4422 {
4423 if (intel_syntax
4424 && i.tm.opcode_modifier.toqword
4425 && !i.types[0].bitfield.regxmm)
4426 {
4427 /* Convert to QWORD. We want REX byte. */
4428 i.suffix = QWORD_MNEM_SUFFIX;
4429 }
4430 else
4431 {
4432 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4433 register_prefix, i.op[op].regs->reg_name,
4434 i.suffix);
4435 return 0;
4436 }
4437 }
4438 return 1;
4439}
4440
4441static int
4442check_qword_reg (void)
4443{
4444 int op;
4445
4446 for (op = i.operands; --op >= 0; )
4447 /* Reject eight bit registers, except where the template requires
4448 them. (eg. movzb) */
4449 if (i.types[op].bitfield.reg8
4450 && (i.tm.operand_types[op].bitfield.reg16
4451 || i.tm.operand_types[op].bitfield.reg32
4452 || i.tm.operand_types[op].bitfield.acc))
4453 {
4454 as_bad (_("`%s%s' not allowed with `%s%c'"),
4455 register_prefix,
4456 i.op[op].regs->reg_name,
4457 i.tm.name,
4458 i.suffix);
4459 return 0;
4460 }
4461 /* Warn if the e prefix on a general reg is missing. */
4462 else if ((i.types[op].bitfield.reg16
4463 || i.types[op].bitfield.reg32)
4464 && (i.tm.operand_types[op].bitfield.reg32
4465 || i.tm.operand_types[op].bitfield.acc))
4466 {
4467 /* Prohibit these changes in the 64bit mode, since the
4468 lowering is more complicated. */
4469 if (intel_syntax
4470 && i.tm.opcode_modifier.todword
4471 && !i.types[0].bitfield.regxmm)
4472 {
4473 /* Convert to DWORD. We don't want REX byte. */
4474 i.suffix = LONG_MNEM_SUFFIX;
4475 }
4476 else
4477 {
4478 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4479 register_prefix, i.op[op].regs->reg_name,
4480 i.suffix);
4481 return 0;
4482 }
4483 }
4484 return 1;
4485}
4486
4487static int
4488check_word_reg (void)
4489{
4490 int op;
4491 for (op = i.operands; --op >= 0;)
4492 /* Reject eight bit registers, except where the template requires
4493 them. (eg. movzb) */
4494 if (i.types[op].bitfield.reg8
4495 && (i.tm.operand_types[op].bitfield.reg16
4496 || i.tm.operand_types[op].bitfield.reg32
4497 || i.tm.operand_types[op].bitfield.acc))
4498 {
4499 as_bad (_("`%s%s' not allowed with `%s%c'"),
4500 register_prefix,
4501 i.op[op].regs->reg_name,
4502 i.tm.name,
4503 i.suffix);
4504 return 0;
4505 }
4506 /* Warn if the e prefix on a general reg is present. */
4507 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4508 && i.types[op].bitfield.reg32
4509 && (i.tm.operand_types[op].bitfield.reg16
4510 || i.tm.operand_types[op].bitfield.acc))
4511 {
4512 /* Prohibit these changes in the 64bit mode, since the
4513 lowering is more complicated. */
4514 if (flag_code == CODE_64BIT)
4515 {
4516 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4517 register_prefix, i.op[op].regs->reg_name,
4518 i.suffix);
4519 return 0;
4520 }
4521 else
4522#if REGISTER_WARNINGS
4523 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4524 register_prefix,
4525 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4526 register_prefix,
4527 i.op[op].regs->reg_name,
4528 i.suffix);
4529#endif
4530 }
4531 return 1;
4532}
4533
4534static int
4535update_imm (unsigned int j)
4536{
4537 i386_operand_type overlap = i.types[j];
4538 if ((overlap.bitfield.imm8
4539 || overlap.bitfield.imm8s
4540 || overlap.bitfield.imm16
4541 || overlap.bitfield.imm32
4542 || overlap.bitfield.imm32s
4543 || overlap.bitfield.imm64)
4544 && !operand_type_equal (&overlap, &imm8)
4545 && !operand_type_equal (&overlap, &imm8s)
4546 && !operand_type_equal (&overlap, &imm16)
4547 && !operand_type_equal (&overlap, &imm32)
4548 && !operand_type_equal (&overlap, &imm32s)
4549 && !operand_type_equal (&overlap, &imm64))
4550 {
4551 if (i.suffix)
4552 {
4553 i386_operand_type temp;
4554
4555 operand_type_set (&temp, 0);
4556 if (i.suffix == BYTE_MNEM_SUFFIX)
4557 {
4558 temp.bitfield.imm8 = overlap.bitfield.imm8;
4559 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4560 }
4561 else if (i.suffix == WORD_MNEM_SUFFIX)
4562 temp.bitfield.imm16 = overlap.bitfield.imm16;
4563 else if (i.suffix == QWORD_MNEM_SUFFIX)
4564 {
4565 temp.bitfield.imm64 = overlap.bitfield.imm64;
4566 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4567 }
4568 else
4569 temp.bitfield.imm32 = overlap.bitfield.imm32;
4570 overlap = temp;
4571 }
4572 else if (operand_type_equal (&overlap, &imm16_32_32s)
4573 || operand_type_equal (&overlap, &imm16_32)
4574 || operand_type_equal (&overlap, &imm16_32s))
4575 {
4576 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4577 overlap = imm16;
4578 else
4579 overlap = imm32s;
4580 }
4581 if (!operand_type_equal (&overlap, &imm8)
4582 && !operand_type_equal (&overlap, &imm8s)
4583 && !operand_type_equal (&overlap, &imm16)
4584 && !operand_type_equal (&overlap, &imm32)
4585 && !operand_type_equal (&overlap, &imm32s)
4586 && !operand_type_equal (&overlap, &imm64))
4587 {
4588 as_bad (_("no instruction mnemonic suffix given; "
4589 "can't determine immediate size"));
4590 return 0;
4591 }
4592 }
4593 i.types[j] = overlap;
4594
4595 return 1;
4596}
4597
4598static int
4599finalize_imm (void)
4600{
4601 unsigned int j, n;
4602
4603 /* Update the first 2 immediate operands. */
4604 n = i.operands > 2 ? 2 : i.operands;
4605 if (n)
4606 {
4607 for (j = 0; j < n; j++)
4608 if (update_imm (j) == 0)
4609 return 0;
4610
4611 /* The 3rd operand can't be immediate operand. */
4612 gas_assert (operand_type_check (i.types[2], imm) == 0);
4613 }
4614
4615 return 1;
4616}
4617
4618static int
4619bad_implicit_operand (int xmm)
4620{
4621 const char *reg = xmm ? "xmm0" : "ymm0";
4622 if (intel_syntax)
4623 as_bad (_("the last operand of `%s' must be `%s%s'"),
4624 i.tm.name, register_prefix, reg);
4625 else
4626 as_bad (_("the first operand of `%s' must be `%s%s'"),
4627 i.tm.name, register_prefix, reg);
4628 return 0;
4629}
4630
4631static int
4632process_operands (void)
4633{
4634 /* Default segment register this instruction will use for memory
4635 accesses. 0 means unknown. This is only for optimizing out
4636 unnecessary segment overrides. */
4637 const seg_entry *default_seg = 0;
4638
4639 if (i.tm.opcode_modifier.sse2avx
4640 && (i.tm.opcode_modifier.vexnds
4641 || i.tm.opcode_modifier.vexndd))
4642 {
4643 unsigned int dup = i.operands;
4644 unsigned int dest = dup - 1;
4645 unsigned int j;
4646
4647 /* The destination must be an xmm register. */
4648 gas_assert (i.reg_operands
4649 && MAX_OPERANDS > dup
4650 && operand_type_equal (&i.types[dest], &regxmm));
4651
4652 if (i.tm.opcode_modifier.firstxmm0)
4653 {
4654 /* The first operand is implicit and must be xmm0. */
4655 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4656 if (i.op[0].regs->reg_num != 0)
4657 return bad_implicit_operand (1);
4658
4659 if (i.tm.opcode_modifier.vex3sources)
4660 {
4661 /* Keep xmm0 for instructions with VEX prefix and 3
4662 sources. */
4663 goto duplicate;
4664 }
4665 else
4666 {
4667 /* We remove the first xmm0 and keep the number of
4668 operands unchanged, which in fact duplicates the
4669 destination. */
4670 for (j = 1; j < i.operands; j++)
4671 {
4672 i.op[j - 1] = i.op[j];
4673 i.types[j - 1] = i.types[j];
4674 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4675 }
4676 }
4677 }
4678 else if (i.tm.opcode_modifier.implicit1stxmm0)
4679 {
4680 gas_assert ((MAX_OPERANDS - 1) > dup
4681 && i.tm.opcode_modifier.vex3sources);
4682
4683 /* Add the implicit xmm0 for instructions with VEX prefix
4684 and 3 sources. */
4685 for (j = i.operands; j > 0; j--)
4686 {
4687 i.op[j] = i.op[j - 1];
4688 i.types[j] = i.types[j - 1];
4689 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
4690 }
4691 i.op[0].regs
4692 = (const reg_entry *) hash_find (reg_hash, "xmm0");
4693 i.types[0] = regxmm;
4694 i.tm.operand_types[0] = regxmm;
4695
4696 i.operands += 2;
4697 i.reg_operands += 2;
4698 i.tm.operands += 2;
4699
4700 dup++;
4701 dest++;
4702 i.op[dup] = i.op[dest];
4703 i.types[dup] = i.types[dest];
4704 i.tm.operand_types[dup] = i.tm.operand_types[dest];
4705 }
4706 else
4707 {
4708duplicate:
4709 i.operands++;
4710 i.reg_operands++;
4711 i.tm.operands++;
4712
4713 i.op[dup] = i.op[dest];
4714 i.types[dup] = i.types[dest];
4715 i.tm.operand_types[dup] = i.tm.operand_types[dest];
4716 }
4717
4718 if (i.tm.opcode_modifier.immext)
4719 process_immext ();
4720 }
4721 else if (i.tm.opcode_modifier.firstxmm0)
4722 {
4723 unsigned int j;
4724
4725 /* The first operand is implicit and must be xmm0/ymm0. */
4726 gas_assert (i.reg_operands
4727 && (operand_type_equal (&i.types[0], &regxmm)
4728 || operand_type_equal (&i.types[0], &regymm)));
4729 if (i.op[0].regs->reg_num != 0)
4730 return bad_implicit_operand (i.types[0].bitfield.regxmm);
4731
4732 for (j = 1; j < i.operands; j++)
4733 {
4734 i.op[j - 1] = i.op[j];
4735 i.types[j - 1] = i.types[j];
4736
4737 /* We need to adjust fields in i.tm since they are used by
4738 build_modrm_byte. */
4739 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
4740 }
4741
4742 i.operands--;
4743 i.reg_operands--;
4744 i.tm.operands--;
4745 }
4746 else if (i.tm.opcode_modifier.regkludge)
4747 {
4748 /* The imul $imm, %reg instruction is converted into
4749 imul $imm, %reg, %reg, and the clr %reg instruction
4750 is converted into xor %reg, %reg. */
4751
4752 unsigned int first_reg_op;
4753
4754 if (operand_type_check (i.types[0], reg))
4755 first_reg_op = 0;
4756 else
4757 first_reg_op = 1;
4758 /* Pretend we saw the extra register operand. */
4759 gas_assert (i.reg_operands == 1
4760 && i.op[first_reg_op + 1].regs == 0);
4761 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
4762 i.types[first_reg_op + 1] = i.types[first_reg_op];
4763 i.operands++;
4764 i.reg_operands++;
4765 }
4766
4767 if (i.tm.opcode_modifier.shortform)
4768 {
4769 if (i.types[0].bitfield.sreg2
4770 || i.types[0].bitfield.sreg3)
4771 {
4772 if (i.tm.base_opcode == POP_SEG_SHORT
4773 && i.op[0].regs->reg_num == 1)
4774 {
4775 as_bad (_("you can't `pop %scs'"), register_prefix);
4776 return 0;
4777 }
4778 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
4779 if ((i.op[0].regs->reg_flags & RegRex) != 0)
4780 i.rex |= REX_B;
4781 }
4782 else
4783 {
4784 /* The register or float register operand is in operand
4785 0 or 1. */
4786 unsigned int op;
4787
4788 if (i.types[0].bitfield.floatreg
4789 || operand_type_check (i.types[0], reg))
4790 op = 0;
4791 else
4792 op = 1;
4793 /* Register goes in low 3 bits of opcode. */
4794 i.tm.base_opcode |= i.op[op].regs->reg_num;
4795 if ((i.op[op].regs->reg_flags & RegRex) != 0)
4796 i.rex |= REX_B;
4797 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
4798 {
4799 /* Warn about some common errors, but press on regardless.
4800 The first case can be generated by gcc (<= 2.8.1). */
4801 if (i.operands == 2)
4802 {
4803 /* Reversed arguments on faddp, fsubp, etc. */
4804 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
4805 register_prefix, i.op[!intel_syntax].regs->reg_name,
4806 register_prefix, i.op[intel_syntax].regs->reg_name);
4807 }
4808 else
4809 {
4810 /* Extraneous `l' suffix on fp insn. */
4811 as_warn (_("translating to `%s %s%s'"), i.tm.name,
4812 register_prefix, i.op[0].regs->reg_name);
4813 }
4814 }
4815 }
4816 }
4817 else if (i.tm.opcode_modifier.modrm)
4818 {
4819 /* The opcode is completed (modulo i.tm.extension_opcode which
4820 must be put into the modrm byte). Now, we make the modrm and
4821 index base bytes based on all the info we've collected. */
4822
4823 default_seg = build_modrm_byte ();
4824 }
4825 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
4826 {
4827 default_seg = &ds;
4828 }
4829 else if (i.tm.opcode_modifier.isstring)
4830 {
4831 /* For the string instructions that allow a segment override
4832 on one of their operands, the default segment is ds. */
4833 default_seg = &ds;
4834 }
4835
4836 if (i.tm.base_opcode == 0x8d /* lea */
4837 && i.seg[0]
4838 && !quiet_warnings)
4839 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
4840
4841 /* If a segment was explicitly specified, and the specified segment
4842 is not the default, use an opcode prefix to select it. If we
4843 never figured out what the default segment is, then default_seg
4844 will be zero at this point, and the specified segment prefix will
4845 always be used. */
4846 if ((i.seg[0]) && (i.seg[0] != default_seg))
4847 {
4848 if (!add_prefix (i.seg[0]->seg_prefix))
4849 return 0;
4850 }
4851 return 1;
4852}
4853
4854static const seg_entry *
4855build_modrm_byte (void)
4856{
4857 const seg_entry *default_seg = 0;
4858 unsigned int source, dest;
4859 int vex_3_sources;
4860
4861 /* The first operand of instructions with VEX prefix and 3 sources
4862 must be VEX_Imm4. */
4863 vex_3_sources = i.tm.opcode_modifier.vex3sources;
4864 if (vex_3_sources)
4865 {
4866 unsigned int nds, reg;
4867
4868 if (i.tm.opcode_modifier.veximmext
4869 && i.tm.opcode_modifier.immext)
4870 {
4871 dest = i.operands - 2;
4872 gas_assert (dest == 3);
4873 }
4874 else
4875 dest = i.operands - 1;
4876 nds = dest - 1;
4877
4878 /* This instruction must have 4 register operands
4879 or 3 register operands plus 1 memory operand.
4880 It must have VexNDS and VexImmExt. */
4881 gas_assert ((i.reg_operands == 4
4882 || (i.reg_operands == 3 && i.mem_operands == 1))
4883 && i.tm.opcode_modifier.vexnds
4884 && i.tm.opcode_modifier.veximmext
4885 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
4886 || operand_type_equal (&i.tm.operand_types[dest], &regymm)));
4887
4888 /* Generate an 8bit immediate operand to encode the register
4889 operand. */
4890 expressionS *exp = &im_expressions[i.imm_operands++];
4891 i.op[i.operands].imms = exp;
4892 i.types[i.operands] = imm8;
4893 i.operands++;
4894 /* If VexW1 is set, the first operand is the source and
4895 the second operand is encoded in the immediate operand. */
4896 if (i.tm.opcode_modifier.vexw1)
4897 {
4898 source = 0;
4899 reg = 1;
4900 }
4901 else
4902 {
4903 source = 1;
4904 reg = 0;
4905 }
4906 /* FMA4 swaps REG and NDS. */
4907 if (i.tm.cpu_flags.bitfield.cpufma4)
4908 {
4909 unsigned int tmp;
4910 tmp = reg;
4911 reg = nds;
4912 nds = tmp;
4913 }
4914 gas_assert ((operand_type_equal (&i.tm.operand_types[reg], &regxmm)
4915 || operand_type_equal (&i.tm.operand_types[reg],
4916 &regymm))
4917 && (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
4918 || operand_type_equal (&i.tm.operand_types[nds],
4919 &regymm)));
4920 exp->X_op = O_constant;
4921 exp->X_add_number
4922 = ((i.op[reg].regs->reg_num
4923 + ((i.op[reg].regs->reg_flags & RegRex) ? 8 : 0)) << 4);
4924 i.vex.register_specifier = i.op[nds].regs;
4925 }
4926 else
4927 source = dest = 0;
4928
4929 /* i.reg_operands MUST be the number of real register operands;
4930 implicit registers do not count. If there are 3 register
4931 operands, it must be a instruction with VexNDS. For a
4932 instruction with VexNDD, the destination register is encoded
4933 in VEX prefix. If there are 4 register operands, it must be
4934 a instruction with VEX prefix and 3 sources. */
4935 if (i.mem_operands == 0
4936 && ((i.reg_operands == 2
4937 && !i.tm.opcode_modifier.vexndd)
4938 || (i.reg_operands == 3
4939 && i.tm.opcode_modifier.vexnds)
4940 || (i.reg_operands == 4 && vex_3_sources)))
4941 {
4942 switch (i.operands)
4943 {
4944 case 2:
4945 source = 0;
4946 break;
4947 case 3:
4948 /* When there are 3 operands, one of them may be immediate,
4949 which may be the first or the last operand. Otherwise,
4950 the first operand must be shift count register (cl) or it
4951 is an instruction with VexNDS. */
4952 gas_assert (i.imm_operands == 1
4953 || (i.imm_operands == 0
4954 && (i.tm.opcode_modifier.vexnds
4955 || i.types[0].bitfield.shiftcount)));
4956 if (operand_type_check (i.types[0], imm)
4957 || i.types[0].bitfield.shiftcount)
4958 source = 1;
4959 else
4960 source = 0;
4961 break;
4962 case 4:
4963 /* When there are 4 operands, the first two must be 8bit
4964 immediate operands. The source operand will be the 3rd
4965 one.
4966
4967 For instructions with VexNDS, if the first operand
4968 an imm8, the source operand is the 2nd one. If the last
4969 operand is imm8, the source operand is the first one. */
4970 gas_assert ((i.imm_operands == 2
4971 && i.types[0].bitfield.imm8
4972 && i.types[1].bitfield.imm8)
4973 || (i.tm.opcode_modifier.vexnds
4974 && i.imm_operands == 1
4975 && (i.types[0].bitfield.imm8
4976 || i.types[i.operands - 1].bitfield.imm8)));
4977 if (i.tm.opcode_modifier.vexnds)
4978 {
4979 if (i.types[0].bitfield.imm8)
4980 source = 1;
4981 else
4982 source = 0;
4983 }
4984 else
4985 source = 2;
4986 break;
4987 case 5:
4988 break;
4989 default:
4990 abort ();
4991 }
4992
4993 if (!vex_3_sources)
4994 {
4995 dest = source + 1;
4996
4997 if (i.tm.opcode_modifier.vexnds)
4998 {
4999 /* For instructions with VexNDS, the register-only
5000 source operand must be XMM or YMM register. It is
5001 encoded in VEX prefix. We need to clear RegMem bit
5002 before calling operand_type_equal. */
5003 i386_operand_type op = i.tm.operand_types[dest];
5004 op.bitfield.regmem = 0;
5005 if ((dest + 1) >= i.operands
5006 || (!operand_type_equal (&op, &regxmm)
5007 && !operand_type_equal (&op, &regymm)))
5008 abort ();
5009 i.vex.register_specifier = i.op[dest].regs;
5010 dest++;
5011 }
5012 }
5013
5014 i.rm.mode = 3;
5015 /* One of the register operands will be encoded in the i.tm.reg
5016 field, the other in the combined i.tm.mode and i.tm.regmem
5017 fields. If no form of this instruction supports a memory
5018 destination operand, then we assume the source operand may
5019 sometimes be a memory operand and so we need to store the
5020 destination in the i.rm.reg field. */
5021 if (!i.tm.operand_types[dest].bitfield.regmem
5022 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5023 {
5024 i.rm.reg = i.op[dest].regs->reg_num;
5025 i.rm.regmem = i.op[source].regs->reg_num;
5026 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5027 i.rex |= REX_R;
5028 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5029 i.rex |= REX_B;
5030 }
5031 else
5032 {
5033 i.rm.reg = i.op[source].regs->reg_num;
5034 i.rm.regmem = i.op[dest].regs->reg_num;
5035 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5036 i.rex |= REX_B;
5037 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5038 i.rex |= REX_R;
5039 }
5040 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5041 {
5042 if (!i.types[0].bitfield.control
5043 && !i.types[1].bitfield.control)
5044 abort ();
5045 i.rex &= ~(REX_R | REX_B);
5046 add_prefix (LOCK_PREFIX_OPCODE);
5047 }
5048 }
5049 else
5050 { /* If it's not 2 reg operands... */
5051 unsigned int mem;
5052
5053 if (i.mem_operands)
5054 {
5055 unsigned int fake_zero_displacement = 0;
5056 unsigned int op;
5057
5058 for (op = 0; op < i.operands; op++)
5059 if (operand_type_check (i.types[op], anymem))
5060 break;
5061 gas_assert (op < i.operands);
5062
5063 default_seg = &ds;
5064
5065 if (i.base_reg == 0)
5066 {
5067 i.rm.mode = 0;
5068 if (!i.disp_operands)
5069 fake_zero_displacement = 1;
5070 if (i.index_reg == 0)
5071 {
5072 /* Operand is just <disp> */
5073 if (flag_code == CODE_64BIT)
5074 {
5075 /* 64bit mode overwrites the 32bit absolute
5076 addressing by RIP relative addressing and
5077 absolute addressing is encoded by one of the
5078 redundant SIB forms. */
5079 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5080 i.sib.base = NO_BASE_REGISTER;
5081 i.sib.index = NO_INDEX_REGISTER;
5082 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5083 ? disp32s : disp32);
5084 }
5085 else if ((flag_code == CODE_16BIT)
5086 ^ (i.prefix[ADDR_PREFIX] != 0))
5087 {
5088 i.rm.regmem = NO_BASE_REGISTER_16;
5089 i.types[op] = disp16;
5090 }
5091 else
5092 {
5093 i.rm.regmem = NO_BASE_REGISTER;
5094 i.types[op] = disp32;
5095 }
5096 }
5097 else /* !i.base_reg && i.index_reg */
5098 {
5099 if (i.index_reg->reg_num == RegEiz
5100 || i.index_reg->reg_num == RegRiz)
5101 i.sib.index = NO_INDEX_REGISTER;
5102 else
5103 i.sib.index = i.index_reg->reg_num;
5104 i.sib.base = NO_BASE_REGISTER;
5105 i.sib.scale = i.log2_scale_factor;
5106 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5107 i.types[op].bitfield.disp8 = 0;
5108 i.types[op].bitfield.disp16 = 0;
5109 i.types[op].bitfield.disp64 = 0;
5110 if (flag_code != CODE_64BIT)
5111 {
5112 /* Must be 32 bit */
5113 i.types[op].bitfield.disp32 = 1;
5114 i.types[op].bitfield.disp32s = 0;
5115 }
5116 else
5117 {
5118 i.types[op].bitfield.disp32 = 0;
5119 i.types[op].bitfield.disp32s = 1;
5120 }
5121 if ((i.index_reg->reg_flags & RegRex) != 0)
5122 i.rex |= REX_X;
5123 }
5124 }
5125 /* RIP addressing for 64bit mode. */
5126 else if (i.base_reg->reg_num == RegRip ||
5127 i.base_reg->reg_num == RegEip)
5128 {
5129 i.rm.regmem = NO_BASE_REGISTER;
5130 i.types[op].bitfield.disp8 = 0;
5131 i.types[op].bitfield.disp16 = 0;
5132 i.types[op].bitfield.disp32 = 0;
5133 i.types[op].bitfield.disp32s = 1;
5134 i.types[op].bitfield.disp64 = 0;
5135 i.flags[op] |= Operand_PCrel;
5136 if (! i.disp_operands)
5137 fake_zero_displacement = 1;
5138 }
5139 else if (i.base_reg->reg_type.bitfield.reg16)
5140 {
5141 switch (i.base_reg->reg_num)
5142 {
5143 case 3: /* (%bx) */
5144 if (i.index_reg == 0)
5145 i.rm.regmem = 7;
5146 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5147 i.rm.regmem = i.index_reg->reg_num - 6;
5148 break;
5149 case 5: /* (%bp) */
5150 default_seg = &ss;
5151 if (i.index_reg == 0)
5152 {
5153 i.rm.regmem = 6;
5154 if (operand_type_check (i.types[op], disp) == 0)
5155 {
5156 /* fake (%bp) into 0(%bp) */
5157 i.types[op].bitfield.disp8 = 1;
5158 fake_zero_displacement = 1;
5159 }
5160 }
5161 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5162 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5163 break;
5164 default: /* (%si) -> 4 or (%di) -> 5 */
5165 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5166 }
5167 i.rm.mode = mode_from_disp_size (i.types[op]);
5168 }
5169 else /* i.base_reg and 32/64 bit mode */
5170 {
5171 if (flag_code == CODE_64BIT
5172 && operand_type_check (i.types[op], disp))
5173 {
5174 i386_operand_type temp;
5175 operand_type_set (&temp, 0);
5176 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5177 i.types[op] = temp;
5178 if (i.prefix[ADDR_PREFIX] == 0)
5179 i.types[op].bitfield.disp32s = 1;
5180 else
5181 i.types[op].bitfield.disp32 = 1;
5182 }
5183
5184 i.rm.regmem = i.base_reg->reg_num;
5185 if ((i.base_reg->reg_flags & RegRex) != 0)
5186 i.rex |= REX_B;
5187 i.sib.base = i.base_reg->reg_num;
5188 /* x86-64 ignores REX prefix bit here to avoid decoder
5189 complications. */
5190 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5191 {
5192 default_seg = &ss;
5193 if (i.disp_operands == 0)
5194 {
5195 fake_zero_displacement = 1;
5196 i.types[op].bitfield.disp8 = 1;
5197 }
5198 }
5199 else if (i.base_reg->reg_num == ESP_REG_NUM)
5200 {
5201 default_seg = &ss;
5202 }
5203 i.sib.scale = i.log2_scale_factor;
5204 if (i.index_reg == 0)
5205 {
5206 /* <disp>(%esp) becomes two byte modrm with no index
5207 register. We've already stored the code for esp
5208 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5209 Any base register besides %esp will not use the
5210 extra modrm byte. */
5211 i.sib.index = NO_INDEX_REGISTER;
5212 }
5213 else
5214 {
5215 if (i.index_reg->reg_num == RegEiz
5216 || i.index_reg->reg_num == RegRiz)
5217 i.sib.index = NO_INDEX_REGISTER;
5218 else
5219 i.sib.index = i.index_reg->reg_num;
5220 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5221 if ((i.index_reg->reg_flags & RegRex) != 0)
5222 i.rex |= REX_X;
5223 }
5224
5225 if (i.disp_operands
5226 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5227 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5228 i.rm.mode = 0;
5229 else
5230 i.rm.mode = mode_from_disp_size (i.types[op]);
5231 }
5232
5233 if (fake_zero_displacement)
5234 {
5235 /* Fakes a zero displacement assuming that i.types[op]
5236 holds the correct displacement size. */
5237 expressionS *exp;
5238
5239 gas_assert (i.op[op].disps == 0);
5240 exp = &disp_expressions[i.disp_operands++];
5241 i.op[op].disps = exp;
5242 exp->X_op = O_constant;
5243 exp->X_add_number = 0;
5244 exp->X_add_symbol = (symbolS *) 0;
5245 exp->X_op_symbol = (symbolS *) 0;
5246 }
5247
5248 mem = op;
5249 }
5250 else
5251 mem = ~0;
5252
5253 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5254 (if any) based on i.tm.extension_opcode. Again, we must be
5255 careful to make sure that segment/control/debug/test/MMX
5256 registers are coded into the i.rm.reg field. */
5257 if (i.reg_operands)
5258 {
5259 unsigned int op;
5260 unsigned int vex_reg = ~0;
5261
5262 for (op = 0; op < i.operands; op++)
5263 if (i.types[op].bitfield.reg8
5264 || i.types[op].bitfield.reg16
5265 || i.types[op].bitfield.reg32
5266 || i.types[op].bitfield.reg64
5267 || i.types[op].bitfield.regmmx
5268 || i.types[op].bitfield.regxmm
5269 || i.types[op].bitfield.regymm
5270 || i.types[op].bitfield.sreg2
5271 || i.types[op].bitfield.sreg3
5272 || i.types[op].bitfield.control
5273 || i.types[op].bitfield.debug
5274 || i.types[op].bitfield.test)
5275 break;
5276
5277 if (vex_3_sources)
5278 op = dest;
5279 else if (i.tm.opcode_modifier.vexnds)
5280 {
5281 /* For instructions with VexNDS, the register-only
5282 source operand is encoded in VEX prefix. */
5283 gas_assert (mem != (unsigned int) ~0);
5284
5285 if (op > mem)
5286 {
5287 vex_reg = op++;
5288 gas_assert (op < i.operands);
5289 }
5290 else
5291 {
5292 vex_reg = op + 1;
5293 gas_assert (vex_reg < i.operands);
5294 }
5295 }
5296 else if (i.tm.opcode_modifier.vexndd)
5297 {
5298 /* For instructions with VexNDD, there should be
5299 no memory operand and the register destination
5300 is encoded in VEX prefix. */
5301 gas_assert (i.mem_operands == 0
5302 && (op + 2) == i.operands);
5303 vex_reg = op + 1;
5304 }
5305 else
5306 gas_assert (op < i.operands);
5307
5308 if (vex_reg != (unsigned int) ~0)
5309 {
5310 gas_assert (i.reg_operands == 2);
5311
5312 if (!operand_type_equal (&i.tm.operand_types[vex_reg],
5313 & regxmm)
5314 && !operand_type_equal (&i.tm.operand_types[vex_reg],
5315 &regymm))
5316 abort ();
5317 i.vex.register_specifier = i.op[vex_reg].regs;
5318 }
5319
5320 /* If there is an extension opcode to put here, the
5321 register number must be put into the regmem field. */
5322 if (i.tm.extension_opcode != None)
5323 {
5324 i.rm.regmem = i.op[op].regs->reg_num;
5325 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5326 i.rex |= REX_B;
5327 }
5328 else
5329 {
5330 i.rm.reg = i.op[op].regs->reg_num;
5331 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5332 i.rex |= REX_R;
5333 }
5334
5335 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5336 must set it to 3 to indicate this is a register operand
5337 in the regmem field. */
5338 if (!i.mem_operands)
5339 i.rm.mode = 3;
5340 }
5341
5342 /* Fill in i.rm.reg field with extension opcode (if any). */
5343 if (i.tm.extension_opcode != None)
5344 i.rm.reg = i.tm.extension_opcode;
5345 }
5346 return default_seg;
5347}
5348
5349static void
5350output_branch (void)
5351{
5352 char *p;
5353 int code16;
5354 int prefix;
5355 relax_substateT subtype;
5356 symbolS *sym;
5357 offsetT off;
5358
5359 code16 = 0;
5360 if (flag_code == CODE_16BIT)
5361 code16 = CODE16;
5362
5363 prefix = 0;
5364 if (i.prefix[DATA_PREFIX] != 0)
5365 {
5366 prefix = 1;
5367 i.prefixes -= 1;
5368 code16 ^= CODE16;
5369 }
5370 /* Pentium4 branch hints. */
5371 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5372 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5373 {
5374 prefix++;
5375 i.prefixes--;
5376 }
5377 if (i.prefix[REX_PREFIX] != 0)
5378 {
5379 prefix++;
5380 i.prefixes--;
5381 }
5382
5383 if (i.prefixes != 0 && !intel_syntax)
5384 as_warn (_("skipping prefixes on this instruction"));
5385
5386 /* It's always a symbol; End frag & setup for relax.
5387 Make sure there is enough room in this frag for the largest
5388 instruction we may generate in md_convert_frag. This is 2
5389 bytes for the opcode and room for the prefix and largest
5390 displacement. */
5391 frag_grow (prefix + 2 + 4);
5392 /* Prefix and 1 opcode byte go in fr_fix. */
5393 p = frag_more (prefix + 1);
5394 if (i.prefix[DATA_PREFIX] != 0)
5395 *p++ = DATA_PREFIX_OPCODE;
5396 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5397 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5398 *p++ = i.prefix[SEG_PREFIX];
5399 if (i.prefix[REX_PREFIX] != 0)
5400 *p++ = i.prefix[REX_PREFIX];
5401 *p = i.tm.base_opcode;
5402
5403 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5404 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, SMALL);
5405 else if (cpu_arch_flags.bitfield.cpui386)
5406 subtype = ENCODE_RELAX_STATE (COND_JUMP, SMALL);
5407 else
5408 subtype = ENCODE_RELAX_STATE (COND_JUMP86, SMALL);
5409 subtype |= code16;
5410
5411 sym = i.op[0].disps->X_add_symbol;
5412 off = i.op[0].disps->X_add_number;
5413
5414 if (i.op[0].disps->X_op != O_constant
5415 && i.op[0].disps->X_op != O_symbol)
5416 {
5417 /* Handle complex expressions. */
5418 sym = make_expr_symbol (i.op[0].disps);
5419 off = 0;
5420 }
5421
5422 /* 1 possible extra opcode + 4 byte displacement go in var part.
5423 Pass reloc in fr_var. */
5424 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5425}
5426
5427static void
5428output_jump (void)
5429{
5430 char *p;
5431 int size;
5432 fixS *fixP;
5433
5434 if (i.tm.opcode_modifier.jumpbyte)
5435 {
5436 /* This is a loop or jecxz type instruction. */
5437 size = 1;
5438 if (i.prefix[ADDR_PREFIX] != 0)
5439 {
5440 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5441 i.prefixes -= 1;
5442 }
5443 /* Pentium4 branch hints. */
5444 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5445 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5446 {
5447 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5448 i.prefixes--;
5449 }
5450 }
5451 else
5452 {
5453 int code16;
5454
5455 code16 = 0;
5456 if (flag_code == CODE_16BIT)
5457 code16 = CODE16;
5458
5459 if (i.prefix[DATA_PREFIX] != 0)
5460 {
5461 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5462 i.prefixes -= 1;
5463 code16 ^= CODE16;
5464 }
5465
5466 size = 4;
5467 if (code16)
5468 size = 2;
5469 }
5470
5471 if (i.prefix[REX_PREFIX] != 0)
5472 {
5473 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5474 i.prefixes -= 1;
5475 }
5476
5477 if (i.prefixes != 0 && !intel_syntax)
5478 as_warn (_("skipping prefixes on this instruction"));
5479
5480 p = frag_more (1 + size);
5481 *p++ = i.tm.base_opcode;
5482
5483 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5484 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
5485
5486 /* All jumps handled here are signed, but don't use a signed limit
5487 check for 32 and 16 bit jumps as we want to allow wrap around at
5488 4G and 64k respectively. */
5489 if (size == 1)
5490 fixP->fx_signed = 1;
5491}
5492
5493static void
5494output_interseg_jump (void)
5495{
5496 char *p;
5497 int size;
5498 int prefix;
5499 int code16;
5500
5501 code16 = 0;
5502 if (flag_code == CODE_16BIT)
5503 code16 = CODE16;
5504
5505 prefix = 0;
5506 if (i.prefix[DATA_PREFIX] != 0)
5507 {
5508 prefix = 1;
5509 i.prefixes -= 1;
5510 code16 ^= CODE16;
5511 }
5512 if (i.prefix[REX_PREFIX] != 0)
5513 {
5514 prefix++;
5515 i.prefixes -= 1;
5516 }
5517
5518 size = 4;
5519 if (code16)
5520 size = 2;
5521
5522 if (i.prefixes != 0 && !intel_syntax)
5523 as_warn (_("skipping prefixes on this instruction"));
5524
5525 /* 1 opcode; 2 segment; offset */
5526 p = frag_more (prefix + 1 + 2 + size);
5527
5528 if (i.prefix[DATA_PREFIX] != 0)
5529 *p++ = DATA_PREFIX_OPCODE;
5530
5531 if (i.prefix[REX_PREFIX] != 0)
5532 *p++ = i.prefix[REX_PREFIX];
5533
5534 *p++ = i.tm.base_opcode;
5535 if (i.op[1].imms->X_op == O_constant)
5536 {
5537 offsetT n = i.op[1].imms->X_add_number;
5538
5539 if (size == 2
5540 && !fits_in_unsigned_word (n)
5541 && !fits_in_signed_word (n))
5542 {
5543 as_bad (_("16-bit jump out of range"));
5544 return;
5545 }
5546 md_number_to_chars (p, n, size);
5547 }
5548 else
5549 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5550 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
5551 if (i.op[0].imms->X_op != O_constant)
5552 as_bad (_("can't handle non absolute segment in `%s'"),
5553 i.tm.name);
5554 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
5555}
5556
5557static void
5558output_insn (void)
5559{
5560 fragS *insn_start_frag;
5561 offsetT insn_start_off;
5562
5563 /* Tie dwarf2 debug info to the address at the start of the insn.
5564 We can't do this after the insn has been output as the current
5565 frag may have been closed off. eg. by frag_var. */
5566 dwarf2_emit_insn (0);
5567
5568 insn_start_frag = frag_now;
5569 insn_start_off = frag_now_fix ();
5570
5571 /* Output jumps. */
5572 if (i.tm.opcode_modifier.jump)
5573 output_branch ();
5574 else if (i.tm.opcode_modifier.jumpbyte
5575 || i.tm.opcode_modifier.jumpdword)
5576 output_jump ();
5577 else if (i.tm.opcode_modifier.jumpintersegment)
5578 output_interseg_jump ();
5579 else
5580 {
5581 /* Output normal instructions here. */
5582 char *p;
5583 unsigned char *q;
5584 unsigned int j;
5585 unsigned int prefix;
5586
5587 /* Since the VEX prefix contains the implicit prefix, we don't
5588 need the explicit prefix. */
5589 if (!i.tm.opcode_modifier.vex)
5590 {
5591 switch (i.tm.opcode_length)
5592 {
5593 case 3:
5594 if (i.tm.base_opcode & 0xff000000)
5595 {
5596 prefix = (i.tm.base_opcode >> 24) & 0xff;
5597 goto check_prefix;
5598 }
5599 break;
5600 case 2:
5601 if ((i.tm.base_opcode & 0xff0000) != 0)
5602 {
5603 prefix = (i.tm.base_opcode >> 16) & 0xff;
5604 if (i.tm.cpu_flags.bitfield.cpupadlock)
5605 {
5606check_prefix:
5607 if (prefix != REPE_PREFIX_OPCODE
5608 || (i.prefix[LOCKREP_PREFIX]
5609 != REPE_PREFIX_OPCODE))
5610 add_prefix (prefix);
5611 }
5612 else
5613 add_prefix (prefix);
5614 }
5615 break;
5616 case 1:
5617 break;
5618 default:
5619 abort ();
5620 }
5621
5622 /* The prefix bytes. */
5623 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
5624 if (*q)
5625 FRAG_APPEND_1_CHAR (*q);
5626 }
5627
5628 if (i.tm.opcode_modifier.vex)
5629 {
5630 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
5631 if (*q)
5632 switch (j)
5633 {
5634 case REX_PREFIX:
5635 /* REX byte is encoded in VEX prefix. */
5636 break;
5637 case SEG_PREFIX:
5638 case ADDR_PREFIX:
5639 FRAG_APPEND_1_CHAR (*q);
5640 break;
5641 default:
5642 /* There should be no other prefixes for instructions
5643 with VEX prefix. */
5644 abort ();
5645 }
5646
5647 /* Now the VEX prefix. */
5648 p = frag_more (i.vex.length);
5649 for (j = 0; j < i.vex.length; j++)
5650 p[j] = i.vex.bytes[j];
5651 }
5652
5653 /* Now the opcode; be careful about word order here! */
5654 if (i.tm.opcode_length == 1)
5655 {
5656 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
5657 }
5658 else
5659 {
5660 switch (i.tm.opcode_length)
5661 {
5662 case 3:
5663 p = frag_more (3);
5664 *p++ = (i.tm.base_opcode >> 16) & 0xff;
5665 break;
5666 case 2:
5667 p = frag_more (2);
5668 break;
5669 default:
5670 abort ();
5671 break;
5672 }
5673
5674 /* Put out high byte first: can't use md_number_to_chars! */
5675 *p++ = (i.tm.base_opcode >> 8) & 0xff;
5676 *p = i.tm.base_opcode & 0xff;
5677 }
5678
5679 /* Now the modrm byte and sib byte (if present). */
5680 if (i.tm.opcode_modifier.modrm)
5681 {
5682 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
5683 | i.rm.reg << 3
5684 | i.rm.mode << 6));
5685 /* If i.rm.regmem == ESP (4)
5686 && i.rm.mode != (Register mode)
5687 && not 16 bit
5688 ==> need second modrm byte. */
5689 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
5690 && i.rm.mode != 3
5691 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
5692 FRAG_APPEND_1_CHAR ((i.sib.base << 0
5693 | i.sib.index << 3
5694 | i.sib.scale << 6));
5695 }
5696
5697 if (i.disp_operands)
5698 output_disp (insn_start_frag, insn_start_off);
5699
5700 if (i.imm_operands)
5701 output_imm (insn_start_frag, insn_start_off);
5702 }
5703
5704#ifdef DEBUG386
5705 if (flag_debug)
5706 {
5707 pi ("" /*line*/, &i);
5708 }
5709#endif /* DEBUG386 */
5710}
5711
5712/* Return the size of the displacement operand N. */
5713
5714static int
5715disp_size (unsigned int n)
5716{
5717 int size = 4;
5718 if (i.types[n].bitfield.disp64)
5719 size = 8;
5720 else if (i.types[n].bitfield.disp8)
5721 size = 1;
5722 else if (i.types[n].bitfield.disp16)
5723 size = 2;
5724 return size;
5725}
5726
5727/* Return the size of the immediate operand N. */
5728
5729static int
5730imm_size (unsigned int n)
5731{
5732 int size = 4;
5733 if (i.types[n].bitfield.imm64)
5734 size = 8;
5735 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
5736 size = 1;
5737 else if (i.types[n].bitfield.imm16)
5738 size = 2;
5739 return size;
5740}
5741
5742static void
5743output_disp (fragS *insn_start_frag, offsetT insn_start_off)
5744{
5745 char *p;
5746 unsigned int n;
5747
5748 for (n = 0; n < i.operands; n++)
5749 {
5750 if (operand_type_check (i.types[n], disp))
5751 {
5752 if (i.op[n].disps->X_op == O_constant)
5753 {
5754 int size = disp_size (n);
5755 offsetT val;
5756
5757 val = offset_in_range (i.op[n].disps->X_add_number,
5758 size);
5759 p = frag_more (size);
5760 md_number_to_chars (p, val, size);
5761 }
5762 else
5763 {
5764 enum bfd_reloc_code_real reloc_type;
5765 int size = disp_size (n);
5766 int sign = i.types[n].bitfield.disp32s;
5767 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
5768
5769 /* We can't have 8 bit displacement here. */
5770 gas_assert (!i.types[n].bitfield.disp8);
5771
5772 /* The PC relative address is computed relative
5773 to the instruction boundary, so in case immediate
5774 fields follows, we need to adjust the value. */
5775 if (pcrel && i.imm_operands)
5776 {
5777 unsigned int n1;
5778 int sz = 0;
5779
5780 for (n1 = 0; n1 < i.operands; n1++)
5781 if (operand_type_check (i.types[n1], imm))
5782 {
5783 /* Only one immediate is allowed for PC
5784 relative address. */
5785 gas_assert (sz == 0);
5786 sz = imm_size (n1);
5787 i.op[n].disps->X_add_number -= sz;
5788 }
5789 /* We should find the immediate. */
5790 gas_assert (sz != 0);
5791 }
5792
5793 p = frag_more (size);
5794 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
5795 if (GOT_symbol
5796 && GOT_symbol == i.op[n].disps->X_add_symbol
5797 && (((reloc_type == BFD_RELOC_32
5798 || reloc_type == BFD_RELOC_X86_64_32S
5799 || (reloc_type == BFD_RELOC_64
5800 && object_64bit))
5801 && (i.op[n].disps->X_op == O_symbol
5802 || (i.op[n].disps->X_op == O_add
5803 && ((symbol_get_value_expression
5804 (i.op[n].disps->X_op_symbol)->X_op)
5805 == O_subtract))))
5806 || reloc_type == BFD_RELOC_32_PCREL))
5807 {
5808 offsetT add;
5809
5810 if (insn_start_frag == frag_now)
5811 add = (p - frag_now->fr_literal) - insn_start_off;
5812 else
5813 {
5814 fragS *fr;
5815
5816 add = insn_start_frag->fr_fix - insn_start_off;
5817 for (fr = insn_start_frag->fr_next;
5818 fr && fr != frag_now; fr = fr->fr_next)
5819 add += fr->fr_fix;
5820 add += p - frag_now->fr_literal;
5821 }
5822
5823 if (!object_64bit)
5824 {
5825 reloc_type = BFD_RELOC_386_GOTPC;
5826 i.op[n].imms->X_add_number += add;
5827 }
5828 else if (reloc_type == BFD_RELOC_64)
5829 reloc_type = BFD_RELOC_X86_64_GOTPC64;
5830 else
5831 /* Don't do the adjustment for x86-64, as there
5832 the pcrel addressing is relative to the _next_
5833 insn, and that is taken care of in other code. */
5834 reloc_type = BFD_RELOC_X86_64_GOTPC32;
5835 }
5836 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5837 i.op[n].disps, pcrel, reloc_type);
5838 }
5839 }
5840 }
5841}
5842
5843static void
5844output_imm (fragS *insn_start_frag, offsetT insn_start_off)
5845{
5846 char *p;
5847 unsigned int n;
5848
5849 for (n = 0; n < i.operands; n++)
5850 {
5851 if (operand_type_check (i.types[n], imm))
5852 {
5853 if (i.op[n].imms->X_op == O_constant)
5854 {
5855 int size = imm_size (n);
5856 offsetT val;
5857
5858 val = offset_in_range (i.op[n].imms->X_add_number,
5859 size);
5860 p = frag_more (size);
5861 md_number_to_chars (p, val, size);
5862 }
5863 else
5864 {
5865 /* Not absolute_section.
5866 Need a 32-bit fixup (don't support 8bit
5867 non-absolute imms). Try to support other
5868 sizes ... */
5869 enum bfd_reloc_code_real reloc_type;
5870 int size = imm_size (n);
5871 int sign;
5872
5873 if (i.types[n].bitfield.imm32s
5874 && (i.suffix == QWORD_MNEM_SUFFIX
5875 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
5876 sign = 1;
5877 else
5878 sign = 0;
5879
5880 p = frag_more (size);
5881 reloc_type = reloc (size, 0, sign, i.reloc[n]);
5882
5883 /* This is tough to explain. We end up with this one if we
5884 * have operands that look like
5885 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
5886 * obtain the absolute address of the GOT, and it is strongly
5887 * preferable from a performance point of view to avoid using
5888 * a runtime relocation for this. The actual sequence of
5889 * instructions often look something like:
5890 *
5891 * call .L66
5892 * .L66:
5893 * popl %ebx
5894 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
5895 *
5896 * The call and pop essentially return the absolute address
5897 * of the label .L66 and store it in %ebx. The linker itself
5898 * will ultimately change the first operand of the addl so
5899 * that %ebx points to the GOT, but to keep things simple, the
5900 * .o file must have this operand set so that it generates not
5901 * the absolute address of .L66, but the absolute address of
5902 * itself. This allows the linker itself simply treat a GOTPC
5903 * relocation as asking for a pcrel offset to the GOT to be
5904 * added in, and the addend of the relocation is stored in the
5905 * operand field for the instruction itself.
5906 *
5907 * Our job here is to fix the operand so that it would add
5908 * the correct offset so that %ebx would point to itself. The
5909 * thing that is tricky is that .-.L66 will point to the
5910 * beginning of the instruction, so we need to further modify
5911 * the operand so that it will point to itself. There are
5912 * other cases where you have something like:
5913 *
5914 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
5915 *
5916 * and here no correction would be required. Internally in
5917 * the assembler we treat operands of this form as not being
5918 * pcrel since the '.' is explicitly mentioned, and I wonder
5919 * whether it would simplify matters to do it this way. Who
5920 * knows. In earlier versions of the PIC patches, the
5921 * pcrel_adjust field was used to store the correction, but
5922 * since the expression is not pcrel, I felt it would be
5923 * confusing to do it this way. */
5924
5925 if ((reloc_type == BFD_RELOC_32
5926 || reloc_type == BFD_RELOC_X86_64_32S
5927 || reloc_type == BFD_RELOC_64)
5928 && GOT_symbol
5929 && GOT_symbol == i.op[n].imms->X_add_symbol
5930 && (i.op[n].imms->X_op == O_symbol
5931 || (i.op[n].imms->X_op == O_add
5932 && ((symbol_get_value_expression
5933 (i.op[n].imms->X_op_symbol)->X_op)
5934 == O_subtract))))
5935 {
5936 offsetT add;
5937
5938 if (insn_start_frag == frag_now)
5939 add = (p - frag_now->fr_literal) - insn_start_off;
5940 else
5941 {
5942 fragS *fr;
5943
5944 add = insn_start_frag->fr_fix - insn_start_off;
5945 for (fr = insn_start_frag->fr_next;
5946 fr && fr != frag_now; fr = fr->fr_next)
5947 add += fr->fr_fix;
5948 add += p - frag_now->fr_literal;
5949 }
5950
5951 if (!object_64bit)
5952 reloc_type = BFD_RELOC_386_GOTPC;
5953 else if (size == 4)
5954 reloc_type = BFD_RELOC_X86_64_GOTPC32;
5955 else if (size == 8)
5956 reloc_type = BFD_RELOC_X86_64_GOTPC64;
5957 i.op[n].imms->X_add_number += add;
5958 }
5959 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5960 i.op[n].imms, 0, reloc_type);
5961 }
5962 }
5963 }
5964}
5965\f
5966/* x86_cons_fix_new is called via the expression parsing code when a
5967 reloc is needed. We use this hook to get the correct .got reloc. */
5968static enum bfd_reloc_code_real got_reloc = NO_RELOC;
5969static int cons_sign = -1;
5970
5971void
5972x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
5973 expressionS *exp)
5974{
5975 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
5976
5977 got_reloc = NO_RELOC;
5978
5979#ifdef TE_PE
5980 if (exp->X_op == O_secrel)
5981 {
5982 exp->X_op = O_symbol;
5983 r = BFD_RELOC_32_SECREL;
5984 }
5985#endif
5986
5987 fix_new_exp (frag, off, len, exp, 0, r);
5988}
5989
5990#if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
5991# define lex_got(reloc, adjust, types) NULL
5992#else
5993/* Parse operands of the form
5994 <symbol>@GOTOFF+<nnn>
5995 and similar .plt or .got references.
5996
5997 If we find one, set up the correct relocation in RELOC and copy the
5998 input string, minus the `@GOTOFF' into a malloc'd buffer for
5999 parsing by the calling routine. Return this buffer, and if ADJUST
6000 is non-null set it to the length of the string we removed from the
6001 input line. Otherwise return NULL. */
6002static char *
6003lex_got (enum bfd_reloc_code_real *reloc,
6004 int *adjust,
6005 i386_operand_type *types)
6006{
6007 /* Some of the relocations depend on the size of what field is to
6008 be relocated. But in our callers i386_immediate and i386_displacement
6009 we don't yet know the operand size (this will be set by insn
6010 matching). Hence we record the word32 relocation here,
6011 and adjust the reloc according to the real size in reloc(). */
6012 static const struct {
6013 const char *str;
6014 const enum bfd_reloc_code_real rel[2];
6015 const i386_operand_type types64;
6016 } gotrel[] = {
6017 { "PLTOFF", { 0,
6018 BFD_RELOC_X86_64_PLTOFF64 },
6019 OPERAND_TYPE_IMM64 },
6020 { "PLT", { BFD_RELOC_386_PLT32,
6021 BFD_RELOC_X86_64_PLT32 },
6022 OPERAND_TYPE_IMM32_32S_DISP32 },
6023 { "GOTPLT", { 0,
6024 BFD_RELOC_X86_64_GOTPLT64 },
6025 OPERAND_TYPE_IMM64_DISP64 },
6026 { "GOTOFF", { BFD_RELOC_386_GOTOFF,
6027 BFD_RELOC_X86_64_GOTOFF64 },
6028 OPERAND_TYPE_IMM64_DISP64 },
6029 { "GOTPCREL", { 0,
6030 BFD_RELOC_X86_64_GOTPCREL },
6031 OPERAND_TYPE_IMM32_32S_DISP32 },
6032 { "TLSGD", { BFD_RELOC_386_TLS_GD,
6033 BFD_RELOC_X86_64_TLSGD },
6034 OPERAND_TYPE_IMM32_32S_DISP32 },
6035 { "TLSLDM", { BFD_RELOC_386_TLS_LDM,
6036 0 },
6037 OPERAND_TYPE_NONE },
6038 { "TLSLD", { 0,
6039 BFD_RELOC_X86_64_TLSLD },
6040 OPERAND_TYPE_IMM32_32S_DISP32 },
6041 { "GOTTPOFF", { BFD_RELOC_386_TLS_IE_32,
6042 BFD_RELOC_X86_64_GOTTPOFF },
6043 OPERAND_TYPE_IMM32_32S_DISP32 },
6044 { "TPOFF", { BFD_RELOC_386_TLS_LE_32,
6045 BFD_RELOC_X86_64_TPOFF32 },
6046 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6047 { "NTPOFF", { BFD_RELOC_386_TLS_LE,
6048 0 },
6049 OPERAND_TYPE_NONE },
6050 { "DTPOFF", { BFD_RELOC_386_TLS_LDO_32,
6051 BFD_RELOC_X86_64_DTPOFF32 },
6052
6053 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6054 { "GOTNTPOFF",{ BFD_RELOC_386_TLS_GOTIE,
6055 0 },
6056 OPERAND_TYPE_NONE },
6057 { "INDNTPOFF",{ BFD_RELOC_386_TLS_IE,
6058 0 },
6059 OPERAND_TYPE_NONE },
6060 { "GOT", { BFD_RELOC_386_GOT32,
6061 BFD_RELOC_X86_64_GOT32 },
6062 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6063 { "TLSDESC", { BFD_RELOC_386_TLS_GOTDESC,
6064 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6065 OPERAND_TYPE_IMM32_32S_DISP32 },
6066 { "TLSCALL", { BFD_RELOC_386_TLS_DESC_CALL,
6067 BFD_RELOC_X86_64_TLSDESC_CALL },
6068 OPERAND_TYPE_IMM32_32S_DISP32 },
6069 };
6070 char *cp;
6071 unsigned int j;
6072
6073 if (!IS_ELF)
6074 return NULL;
6075
6076 for (cp = input_line_pointer; *cp != '@'; cp++)
6077 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6078 return NULL;
6079
6080 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6081 {
6082 int len;
6083
6084 len = strlen (gotrel[j].str);
6085 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6086 {
6087 if (gotrel[j].rel[object_64bit] != 0)
6088 {
6089 int first, second;
6090 char *tmpbuf, *past_reloc;
6091
6092 *reloc = gotrel[j].rel[object_64bit];
6093 if (adjust)
6094 *adjust = len;
6095
6096 if (types)
6097 {
6098 if (flag_code != CODE_64BIT)
6099 {
6100 types->bitfield.imm32 = 1;
6101 types->bitfield.disp32 = 1;
6102 }
6103 else
6104 *types = gotrel[j].types64;
6105 }
6106
6107 if (GOT_symbol == NULL)
6108 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6109
6110 /* The length of the first part of our input line. */
6111 first = cp - input_line_pointer;
6112
6113 /* The second part goes from after the reloc token until
6114 (and including) an end_of_line char or comma. */
6115 past_reloc = cp + 1 + len;
6116 cp = past_reloc;
6117 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6118 ++cp;
6119 second = cp + 1 - past_reloc;
6120
6121 /* Allocate and copy string. The trailing NUL shouldn't
6122 be necessary, but be safe. */
6123 tmpbuf = xmalloc (first + second + 2);
6124 memcpy (tmpbuf, input_line_pointer, first);
6125 if (second != 0 && *past_reloc != ' ')
6126 /* Replace the relocation token with ' ', so that
6127 errors like foo@GOTOFF1 will be detected. */
6128 tmpbuf[first++] = ' ';
6129 memcpy (tmpbuf + first, past_reloc, second);
6130 tmpbuf[first + second] = '\0';
6131 return tmpbuf;
6132 }
6133
6134 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6135 gotrel[j].str, 1 << (5 + object_64bit));
6136 return NULL;
6137 }
6138 }
6139
6140 /* Might be a symbol version string. Don't as_bad here. */
6141 return NULL;
6142}
6143
6144void
6145x86_cons (expressionS *exp, int size)
6146{
6147 intel_syntax = -intel_syntax;
6148
6149 if (size == 4 || (object_64bit && size == 8))
6150 {
6151 /* Handle @GOTOFF and the like in an expression. */
6152 char *save;
6153 char *gotfree_input_line;
6154 int adjust;
6155
6156 save = input_line_pointer;
6157 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6158 if (gotfree_input_line)
6159 input_line_pointer = gotfree_input_line;
6160
6161 expression (exp);
6162
6163 if (gotfree_input_line)
6164 {
6165 /* expression () has merrily parsed up to the end of line,
6166 or a comma - in the wrong buffer. Transfer how far
6167 input_line_pointer has moved to the right buffer. */
6168 input_line_pointer = (save
6169 + (input_line_pointer - gotfree_input_line)
6170 + adjust);
6171 free (gotfree_input_line);
6172 if (exp->X_op == O_constant
6173 || exp->X_op == O_absent
6174 || exp->X_op == O_illegal
6175 || exp->X_op == O_register
6176 || exp->X_op == O_big)
6177 {
6178 char c = *input_line_pointer;
6179 *input_line_pointer = 0;
6180 as_bad (_("missing or invalid expression `%s'"), save);
6181 *input_line_pointer = c;
6182 }
6183 }
6184 }
6185 else
6186 expression (exp);
6187
6188 intel_syntax = -intel_syntax;
6189
6190 if (intel_syntax)
6191 i386_intel_simplify (exp);
6192}
6193#endif
6194
6195static void signed_cons (int size)
6196{
6197 if (flag_code == CODE_64BIT)
6198 cons_sign = 1;
6199 cons (size);
6200 cons_sign = -1;
6201}
6202
6203#ifdef TE_PE
6204static void
6205pe_directive_secrel (dummy)
6206 int dummy ATTRIBUTE_UNUSED;
6207{
6208 expressionS exp;
6209
6210 do
6211 {
6212 expression (&exp);
6213 if (exp.X_op == O_symbol)
6214 exp.X_op = O_secrel;
6215
6216 emit_expr (&exp, 4);
6217 }
6218 while (*input_line_pointer++ == ',');
6219
6220 input_line_pointer--;
6221 demand_empty_rest_of_line ();
6222}
6223#endif
6224
6225static int
6226i386_immediate (char *imm_start)
6227{
6228 char *save_input_line_pointer;
6229 char *gotfree_input_line;
6230 segT exp_seg = 0;
6231 expressionS *exp;
6232 i386_operand_type types;
6233
6234 operand_type_set (&types, ~0);
6235
6236 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6237 {
6238 as_bad (_("at most %d immediate operands are allowed"),
6239 MAX_IMMEDIATE_OPERANDS);
6240 return 0;
6241 }
6242
6243 exp = &im_expressions[i.imm_operands++];
6244 i.op[this_operand].imms = exp;
6245
6246 if (is_space_char (*imm_start))
6247 ++imm_start;
6248
6249 save_input_line_pointer = input_line_pointer;
6250 input_line_pointer = imm_start;
6251
6252 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6253 if (gotfree_input_line)
6254 input_line_pointer = gotfree_input_line;
6255
6256 exp_seg = expression (exp);
6257
6258 SKIP_WHITESPACE ();
6259 if (*input_line_pointer)
6260 as_bad (_("junk `%s' after expression"), input_line_pointer);
6261
6262 input_line_pointer = save_input_line_pointer;
6263 if (gotfree_input_line)
6264 {
6265 free (gotfree_input_line);
6266
6267 if (exp->X_op == O_constant || exp->X_op == O_register)
6268 exp->X_op = O_illegal;
6269 }
6270
6271 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6272}
6273
6274static int
6275i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6276 i386_operand_type types, const char *imm_start)
6277{
6278 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6279 {
6280 as_bad (_("missing or invalid immediate expression `%s'"),
6281 imm_start);
6282 return 0;
6283 }
6284 else if (exp->X_op == O_constant)
6285 {
6286 /* Size it properly later. */
6287 i.types[this_operand].bitfield.imm64 = 1;
6288 /* If BFD64, sign extend val. */
6289 if (!use_rela_relocations
6290 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6291 exp->X_add_number
6292 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6293 }
6294#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6295 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6296 && exp_seg != absolute_section
6297 && exp_seg != text_section
6298 && exp_seg != data_section
6299 && exp_seg != bss_section
6300 && exp_seg != undefined_section
6301 && !bfd_is_com_section (exp_seg))
6302 {
6303 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6304 return 0;
6305 }
6306#endif
6307 else if (!intel_syntax && exp->X_op == O_register)
6308 {
6309 as_bad (_("illegal immediate register operand %s"), imm_start);
6310 return 0;
6311 }
6312 else
6313 {
6314 /* This is an address. The size of the address will be
6315 determined later, depending on destination register,
6316 suffix, or the default for the section. */
6317 i.types[this_operand].bitfield.imm8 = 1;
6318 i.types[this_operand].bitfield.imm16 = 1;
6319 i.types[this_operand].bitfield.imm32 = 1;
6320 i.types[this_operand].bitfield.imm32s = 1;
6321 i.types[this_operand].bitfield.imm64 = 1;
6322 i.types[this_operand] = operand_type_and (i.types[this_operand],
6323 types);
6324 }
6325
6326 return 1;
6327}
6328
6329static char *
6330i386_scale (char *scale)
6331{
6332 offsetT val;
6333 char *save = input_line_pointer;
6334
6335 input_line_pointer = scale;
6336 val = get_absolute_expression ();
6337
6338 switch (val)
6339 {
6340 case 1:
6341 i.log2_scale_factor = 0;
6342 break;
6343 case 2:
6344 i.log2_scale_factor = 1;
6345 break;
6346 case 4:
6347 i.log2_scale_factor = 2;
6348 break;
6349 case 8:
6350 i.log2_scale_factor = 3;
6351 break;
6352 default:
6353 {
6354 char sep = *input_line_pointer;
6355
6356 *input_line_pointer = '\0';
6357 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6358 scale);
6359 *input_line_pointer = sep;
6360 input_line_pointer = save;
6361 return NULL;
6362 }
6363 }
6364 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6365 {
6366 as_warn (_("scale factor of %d without an index register"),
6367 1 << i.log2_scale_factor);
6368 i.log2_scale_factor = 0;
6369 }
6370 scale = input_line_pointer;
6371 input_line_pointer = save;
6372 return scale;
6373}
6374
6375static int
6376i386_displacement (char *disp_start, char *disp_end)
6377{
6378 expressionS *exp;
6379 segT exp_seg = 0;
6380 char *save_input_line_pointer;
6381 char *gotfree_input_line;
6382 int override;
6383 i386_operand_type bigdisp, types = anydisp;
6384 int ret;
6385
6386 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6387 {
6388 as_bad (_("at most %d displacement operands are allowed"),
6389 MAX_MEMORY_OPERANDS);
6390 return 0;
6391 }
6392
6393 operand_type_set (&bigdisp, 0);
6394 if ((i.types[this_operand].bitfield.jumpabsolute)
6395 || (!current_templates->start->opcode_modifier.jump
6396 && !current_templates->start->opcode_modifier.jumpdword))
6397 {
6398 bigdisp.bitfield.disp32 = 1;
6399 override = (i.prefix[ADDR_PREFIX] != 0);
6400 if (flag_code == CODE_64BIT)
6401 {
6402 if (!override)
6403 {
6404 bigdisp.bitfield.disp32s = 1;
6405 bigdisp.bitfield.disp64 = 1;
6406 }
6407 }
6408 else if ((flag_code == CODE_16BIT) ^ override)
6409 {
6410 bigdisp.bitfield.disp32 = 0;
6411 bigdisp.bitfield.disp16 = 1;
6412 }
6413 }
6414 else
6415 {
6416 /* For PC-relative branches, the width of the displacement
6417 is dependent upon data size, not address size. */
6418 override = (i.prefix[DATA_PREFIX] != 0);
6419 if (flag_code == CODE_64BIT)
6420 {
6421 if (override || i.suffix == WORD_MNEM_SUFFIX)
6422 bigdisp.bitfield.disp16 = 1;
6423 else
6424 {
6425 bigdisp.bitfield.disp32 = 1;
6426 bigdisp.bitfield.disp32s = 1;
6427 }
6428 }
6429 else
6430 {
6431 if (!override)
6432 override = (i.suffix == (flag_code != CODE_16BIT
6433 ? WORD_MNEM_SUFFIX
6434 : LONG_MNEM_SUFFIX));
6435 bigdisp.bitfield.disp32 = 1;
6436 if ((flag_code == CODE_16BIT) ^ override)
6437 {
6438 bigdisp.bitfield.disp32 = 0;
6439 bigdisp.bitfield.disp16 = 1;
6440 }
6441 }
6442 }
6443 i.types[this_operand] = operand_type_or (i.types[this_operand],
6444 bigdisp);
6445
6446 exp = &disp_expressions[i.disp_operands];
6447 i.op[this_operand].disps = exp;
6448 i.disp_operands++;
6449 save_input_line_pointer = input_line_pointer;
6450 input_line_pointer = disp_start;
6451 END_STRING_AND_SAVE (disp_end);
6452
6453#ifndef GCC_ASM_O_HACK
6454#define GCC_ASM_O_HACK 0
6455#endif
6456#if GCC_ASM_O_HACK
6457 END_STRING_AND_SAVE (disp_end + 1);
6458 if (i.types[this_operand].bitfield.baseIndex
6459 && displacement_string_end[-1] == '+')
6460 {
6461 /* This hack is to avoid a warning when using the "o"
6462 constraint within gcc asm statements.
6463 For instance:
6464
6465 #define _set_tssldt_desc(n,addr,limit,type) \
6466 __asm__ __volatile__ ( \
6467 "movw %w2,%0\n\t" \
6468 "movw %w1,2+%0\n\t" \
6469 "rorl $16,%1\n\t" \
6470 "movb %b1,4+%0\n\t" \
6471 "movb %4,5+%0\n\t" \
6472 "movb $0,6+%0\n\t" \
6473 "movb %h1,7+%0\n\t" \
6474 "rorl $16,%1" \
6475 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6476
6477 This works great except that the output assembler ends
6478 up looking a bit weird if it turns out that there is
6479 no offset. You end up producing code that looks like:
6480
6481 #APP
6482 movw $235,(%eax)
6483 movw %dx,2+(%eax)
6484 rorl $16,%edx
6485 movb %dl,4+(%eax)
6486 movb $137,5+(%eax)
6487 movb $0,6+(%eax)
6488 movb %dh,7+(%eax)
6489 rorl $16,%edx
6490 #NO_APP
6491
6492 So here we provide the missing zero. */
6493
6494 *displacement_string_end = '0';
6495 }
6496#endif
6497 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6498 if (gotfree_input_line)
6499 input_line_pointer = gotfree_input_line;
6500
6501 exp_seg = expression (exp);
6502
6503 SKIP_WHITESPACE ();
6504 if (*input_line_pointer)
6505 as_bad (_("junk `%s' after expression"), input_line_pointer);
6506#if GCC_ASM_O_HACK
6507 RESTORE_END_STRING (disp_end + 1);
6508#endif
6509 input_line_pointer = save_input_line_pointer;
6510 if (gotfree_input_line)
6511 {
6512 free (gotfree_input_line);
6513
6514 if (exp->X_op == O_constant || exp->X_op == O_register)
6515 exp->X_op = O_illegal;
6516 }
6517
6518 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
6519
6520 RESTORE_END_STRING (disp_end);
6521
6522 return ret;
6523}
6524
6525static int
6526i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6527 i386_operand_type types, const char *disp_start)
6528{
6529 i386_operand_type bigdisp;
6530 int ret = 1;
6531
6532 /* We do this to make sure that the section symbol is in
6533 the symbol table. We will ultimately change the relocation
6534 to be relative to the beginning of the section. */
6535 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
6536 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
6537 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6538 {
6539 if (exp->X_op != O_symbol)
6540 goto inv_disp;
6541
6542 if (S_IS_LOCAL (exp->X_add_symbol)
6543 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section)
6544 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
6545 exp->X_op = O_subtract;
6546 exp->X_op_symbol = GOT_symbol;
6547 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
6548 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
6549 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6550 i.reloc[this_operand] = BFD_RELOC_64;
6551 else
6552 i.reloc[this_operand] = BFD_RELOC_32;
6553 }
6554
6555 else if (exp->X_op == O_absent
6556 || exp->X_op == O_illegal
6557 || exp->X_op == O_big)
6558 {
6559 inv_disp:
6560 as_bad (_("missing or invalid displacement expression `%s'"),
6561 disp_start);
6562 ret = 0;
6563 }
6564
6565#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6566 else if (exp->X_op != O_constant
6567 && OUTPUT_FLAVOR == bfd_target_aout_flavour
6568 && exp_seg != absolute_section
6569 && exp_seg != text_section
6570 && exp_seg != data_section
6571 && exp_seg != bss_section
6572 && exp_seg != undefined_section
6573 && !bfd_is_com_section (exp_seg))
6574 {
6575 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6576 ret = 0;
6577 }
6578#endif
6579
6580 /* Check if this is a displacement only operand. */
6581 bigdisp = i.types[this_operand];
6582 bigdisp.bitfield.disp8 = 0;
6583 bigdisp.bitfield.disp16 = 0;
6584 bigdisp.bitfield.disp32 = 0;
6585 bigdisp.bitfield.disp32s = 0;
6586 bigdisp.bitfield.disp64 = 0;
6587 if (operand_type_all_zero (&bigdisp))
6588 i.types[this_operand] = operand_type_and (i.types[this_operand],
6589 types);
6590
6591 return ret;
6592}
6593
6594/* Make sure the memory operand we've been dealt is valid.
6595 Return 1 on success, 0 on a failure. */
6596
6597static int
6598i386_index_check (const char *operand_string)
6599{
6600 int ok;
6601 const char *kind = "base/index";
6602#if INFER_ADDR_PREFIX
6603 int fudged = 0;
6604
6605 tryprefix:
6606#endif
6607 ok = 1;
6608 if (current_templates->start->opcode_modifier.isstring
6609 && !current_templates->start->opcode_modifier.immext
6610 && (current_templates->end[-1].opcode_modifier.isstring
6611 || i.mem_operands))
6612 {
6613 /* Memory operands of string insns are special in that they only allow
6614 a single register (rDI, rSI, or rBX) as their memory address. */
6615 unsigned int expected;
6616
6617 kind = "string address";
6618
6619 if (current_templates->start->opcode_modifier.w)
6620 {
6621 i386_operand_type type = current_templates->end[-1].operand_types[0];
6622
6623 if (!type.bitfield.baseindex
6624 || ((!i.mem_operands != !intel_syntax)
6625 && current_templates->end[-1].operand_types[1]
6626 .bitfield.baseindex))
6627 type = current_templates->end[-1].operand_types[1];
6628 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
6629 }
6630 else
6631 expected = 3 /* rBX */;
6632
6633 if (!i.base_reg || i.index_reg
6634 || operand_type_check (i.types[this_operand], disp))
6635 ok = -1;
6636 else if (!(flag_code == CODE_64BIT
6637 ? i.prefix[ADDR_PREFIX]
6638 ? i.base_reg->reg_type.bitfield.reg32
6639 : i.base_reg->reg_type.bitfield.reg64
6640 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
6641 ? i.base_reg->reg_type.bitfield.reg32
6642 : i.base_reg->reg_type.bitfield.reg16))
6643 ok = 0;
6644 else if (i.base_reg->reg_num != expected)
6645 ok = -1;
6646
6647 if (ok < 0)
6648 {
6649 unsigned int j;
6650
6651 for (j = 0; j < i386_regtab_size; ++j)
6652 if ((flag_code == CODE_64BIT
6653 ? i.prefix[ADDR_PREFIX]
6654 ? i386_regtab[j].reg_type.bitfield.reg32
6655 : i386_regtab[j].reg_type.bitfield.reg64
6656 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
6657 ? i386_regtab[j].reg_type.bitfield.reg32
6658 : i386_regtab[j].reg_type.bitfield.reg16)
6659 && i386_regtab[j].reg_num == expected)
6660 break;
6661 gas_assert (j < i386_regtab_size);
6662 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
6663 operand_string,
6664 intel_syntax ? '[' : '(',
6665 register_prefix,
6666 i386_regtab[j].reg_name,
6667 intel_syntax ? ']' : ')');
6668 ok = 1;
6669 }
6670 }
6671 else if (flag_code == CODE_64BIT)
6672 {
6673 if ((i.base_reg
6674 && ((i.prefix[ADDR_PREFIX] == 0
6675 && !i.base_reg->reg_type.bitfield.reg64)
6676 || (i.prefix[ADDR_PREFIX]
6677 && !i.base_reg->reg_type.bitfield.reg32))
6678 && (i.index_reg
6679 || i.base_reg->reg_num !=
6680 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
6681 || (i.index_reg
6682 && (!i.index_reg->reg_type.bitfield.baseindex
6683 || (i.prefix[ADDR_PREFIX] == 0
6684 && i.index_reg->reg_num != RegRiz
6685 && !i.index_reg->reg_type.bitfield.reg64
6686 )
6687 || (i.prefix[ADDR_PREFIX]
6688 && i.index_reg->reg_num != RegEiz
6689 && !i.index_reg->reg_type.bitfield.reg32))))
6690 ok = 0;
6691 }
6692 else
6693 {
6694 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
6695 {
6696 /* 16bit checks. */
6697 if ((i.base_reg
6698 && (!i.base_reg->reg_type.bitfield.reg16
6699 || !i.base_reg->reg_type.bitfield.baseindex))
6700 || (i.index_reg
6701 && (!i.index_reg->reg_type.bitfield.reg16
6702 || !i.index_reg->reg_type.bitfield.baseindex
6703 || !(i.base_reg
6704 && i.base_reg->reg_num < 6
6705 && i.index_reg->reg_num >= 6
6706 && i.log2_scale_factor == 0))))
6707 ok = 0;
6708 }
6709 else
6710 {
6711 /* 32bit checks. */
6712 if ((i.base_reg
6713 && !i.base_reg->reg_type.bitfield.reg32)
6714 || (i.index_reg
6715 && ((!i.index_reg->reg_type.bitfield.reg32
6716 && i.index_reg->reg_num != RegEiz)
6717 || !i.index_reg->reg_type.bitfield.baseindex)))
6718 ok = 0;
6719 }
6720 }
6721 if (!ok)
6722 {
6723#if INFER_ADDR_PREFIX
6724 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
6725 {
6726 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
6727 i.prefixes += 1;
6728 /* Change the size of any displacement too. At most one of
6729 Disp16 or Disp32 is set.
6730 FIXME. There doesn't seem to be any real need for separate
6731 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
6732 Removing them would probably clean up the code quite a lot. */
6733 if (flag_code != CODE_64BIT
6734 && (i.types[this_operand].bitfield.disp16
6735 || i.types[this_operand].bitfield.disp32))
6736 i.types[this_operand]
6737 = operand_type_xor (i.types[this_operand], disp16_32);
6738 fudged = 1;
6739 goto tryprefix;
6740 }
6741 if (fudged)
6742 as_bad (_("`%s' is not a valid %s expression"),
6743 operand_string,
6744 kind);
6745 else
6746#endif
6747 as_bad (_("`%s' is not a valid %s-bit %s expression"),
6748 operand_string,
6749 flag_code_names[i.prefix[ADDR_PREFIX]
6750 ? flag_code == CODE_32BIT
6751 ? CODE_16BIT
6752 : CODE_32BIT
6753 : flag_code],
6754 kind);
6755 }
6756 return ok;
6757}
6758
6759/* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
6760 on error. */
6761
6762static int
6763i386_att_operand (char *operand_string)
6764{
6765 const reg_entry *r;
6766 char *end_op;
6767 char *op_string = operand_string;
6768
6769 if (is_space_char (*op_string))
6770 ++op_string;
6771
6772 /* We check for an absolute prefix (differentiating,
6773 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
6774 if (*op_string == ABSOLUTE_PREFIX)
6775 {
6776 ++op_string;
6777 if (is_space_char (*op_string))
6778 ++op_string;
6779 i.types[this_operand].bitfield.jumpabsolute = 1;
6780 }
6781
6782 /* Check if operand is a register. */
6783 if ((r = parse_register (op_string, &end_op)) != NULL)
6784 {
6785 i386_operand_type temp;
6786
6787 /* Check for a segment override by searching for ':' after a
6788 segment register. */
6789 op_string = end_op;
6790 if (is_space_char (*op_string))
6791 ++op_string;
6792 if (*op_string == ':'
6793 && (r->reg_type.bitfield.sreg2
6794 || r->reg_type.bitfield.sreg3))
6795 {
6796 switch (r->reg_num)
6797 {
6798 case 0:
6799 i.seg[i.mem_operands] = &es;
6800 break;
6801 case 1:
6802 i.seg[i.mem_operands] = &cs;
6803 break;
6804 case 2:
6805 i.seg[i.mem_operands] = &ss;
6806 break;
6807 case 3:
6808 i.seg[i.mem_operands] = &ds;
6809 break;
6810 case 4:
6811 i.seg[i.mem_operands] = &fs;
6812 break;
6813 case 5:
6814 i.seg[i.mem_operands] = &gs;
6815 break;
6816 }
6817
6818 /* Skip the ':' and whitespace. */
6819 ++op_string;
6820 if (is_space_char (*op_string))
6821 ++op_string;
6822
6823 if (!is_digit_char (*op_string)
6824 && !is_identifier_char (*op_string)
6825 && *op_string != '('
6826 && *op_string != ABSOLUTE_PREFIX)
6827 {
6828 as_bad (_("bad memory operand `%s'"), op_string);
6829 return 0;
6830 }
6831 /* Handle case of %es:*foo. */
6832 if (*op_string == ABSOLUTE_PREFIX)
6833 {
6834 ++op_string;
6835 if (is_space_char (*op_string))
6836 ++op_string;
6837 i.types[this_operand].bitfield.jumpabsolute = 1;
6838 }
6839 goto do_memory_reference;
6840 }
6841 if (*op_string)
6842 {
6843 as_bad (_("junk `%s' after register"), op_string);
6844 return 0;
6845 }
6846 temp = r->reg_type;
6847 temp.bitfield.baseindex = 0;
6848 i.types[this_operand] = operand_type_or (i.types[this_operand],
6849 temp);
6850 i.types[this_operand].bitfield.unspecified = 0;
6851 i.op[this_operand].regs = r;
6852 i.reg_operands++;
6853 }
6854 else if (*op_string == REGISTER_PREFIX)
6855 {
6856 as_bad (_("bad register name `%s'"), op_string);
6857 return 0;
6858 }
6859 else if (*op_string == IMMEDIATE_PREFIX)
6860 {
6861 ++op_string;
6862 if (i.types[this_operand].bitfield.jumpabsolute)
6863 {
6864 as_bad (_("immediate operand illegal with absolute jump"));
6865 return 0;
6866 }
6867 if (!i386_immediate (op_string))
6868 return 0;
6869 }
6870 else if (is_digit_char (*op_string)
6871 || is_identifier_char (*op_string)
6872 || *op_string == '(')
6873 {
6874 /* This is a memory reference of some sort. */
6875 char *base_string;
6876
6877 /* Start and end of displacement string expression (if found). */
6878 char *displacement_string_start;
6879 char *displacement_string_end;
6880
6881 do_memory_reference:
6882 if ((i.mem_operands == 1
6883 && !current_templates->start->opcode_modifier.isstring)
6884 || i.mem_operands == 2)
6885 {
6886 as_bad (_("too many memory references for `%s'"),
6887 current_templates->start->name);
6888 return 0;
6889 }
6890
6891 /* Check for base index form. We detect the base index form by
6892 looking for an ')' at the end of the operand, searching
6893 for the '(' matching it, and finding a REGISTER_PREFIX or ','
6894 after the '('. */
6895 base_string = op_string + strlen (op_string);
6896
6897 --base_string;
6898 if (is_space_char (*base_string))
6899 --base_string;
6900
6901 /* If we only have a displacement, set-up for it to be parsed later. */
6902 displacement_string_start = op_string;
6903 displacement_string_end = base_string + 1;
6904
6905 if (*base_string == ')')
6906 {
6907 char *temp_string;
6908 unsigned int parens_balanced = 1;
6909 /* We've already checked that the number of left & right ()'s are
6910 equal, so this loop will not be infinite. */
6911 do
6912 {
6913 base_string--;
6914 if (*base_string == ')')
6915 parens_balanced++;
6916 if (*base_string == '(')
6917 parens_balanced--;
6918 }
6919 while (parens_balanced);
6920
6921 temp_string = base_string;
6922
6923 /* Skip past '(' and whitespace. */
6924 ++base_string;
6925 if (is_space_char (*base_string))
6926 ++base_string;
6927
6928 if (*base_string == ','
6929 || ((i.base_reg = parse_register (base_string, &end_op))
6930 != NULL))
6931 {
6932 displacement_string_end = temp_string;
6933
6934 i.types[this_operand].bitfield.baseindex = 1;
6935
6936 if (i.base_reg)
6937 {
6938 base_string = end_op;
6939 if (is_space_char (*base_string))
6940 ++base_string;
6941 }
6942
6943 /* There may be an index reg or scale factor here. */
6944 if (*base_string == ',')
6945 {
6946 ++base_string;
6947 if (is_space_char (*base_string))
6948 ++base_string;
6949
6950 if ((i.index_reg = parse_register (base_string, &end_op))
6951 != NULL)
6952 {
6953 base_string = end_op;
6954 if (is_space_char (*base_string))
6955 ++base_string;
6956 if (*base_string == ',')
6957 {
6958 ++base_string;
6959 if (is_space_char (*base_string))
6960 ++base_string;
6961 }
6962 else if (*base_string != ')')
6963 {
6964 as_bad (_("expecting `,' or `)' "
6965 "after index register in `%s'"),
6966 operand_string);
6967 return 0;
6968 }
6969 }
6970 else if (*base_string == REGISTER_PREFIX)
6971 {
6972 as_bad (_("bad register name `%s'"), base_string);
6973 return 0;
6974 }
6975
6976 /* Check for scale factor. */
6977 if (*base_string != ')')
6978 {
6979 char *end_scale = i386_scale (base_string);
6980
6981 if (!end_scale)
6982 return 0;
6983
6984 base_string = end_scale;
6985 if (is_space_char (*base_string))
6986 ++base_string;
6987 if (*base_string != ')')
6988 {
6989 as_bad (_("expecting `)' "
6990 "after scale factor in `%s'"),
6991 operand_string);
6992 return 0;
6993 }
6994 }
6995 else if (!i.index_reg)
6996 {
6997 as_bad (_("expecting index register or scale factor "
6998 "after `,'; got '%c'"),
6999 *base_string);
7000 return 0;
7001 }
7002 }
7003 else if (*base_string != ')')
7004 {
7005 as_bad (_("expecting `,' or `)' "
7006 "after base register in `%s'"),
7007 operand_string);
7008 return 0;
7009 }
7010 }
7011 else if (*base_string == REGISTER_PREFIX)
7012 {
7013 as_bad (_("bad register name `%s'"), base_string);
7014 return 0;
7015 }
7016 }
7017
7018 /* If there's an expression beginning the operand, parse it,
7019 assuming displacement_string_start and
7020 displacement_string_end are meaningful. */
7021 if (displacement_string_start != displacement_string_end)
7022 {
7023 if (!i386_displacement (displacement_string_start,
7024 displacement_string_end))
7025 return 0;
7026 }
7027
7028 /* Special case for (%dx) while doing input/output op. */
7029 if (i.base_reg
7030 && operand_type_equal (&i.base_reg->reg_type,
7031 &reg16_inoutportreg)
7032 && i.index_reg == 0
7033 && i.log2_scale_factor == 0
7034 && i.seg[i.mem_operands] == 0
7035 && !operand_type_check (i.types[this_operand], disp))
7036 {
7037 i.types[this_operand] = inoutportreg;
7038 return 1;
7039 }
7040
7041 if (i386_index_check (operand_string) == 0)
7042 return 0;
7043 i.types[this_operand].bitfield.mem = 1;
7044 i.mem_operands++;
7045 }
7046 else
7047 {
7048 /* It's not a memory operand; argh! */
7049 as_bad (_("invalid char %s beginning operand %d `%s'"),
7050 output_invalid (*op_string),
7051 this_operand + 1,
7052 op_string);
7053 return 0;
7054 }
7055 return 1; /* Normal return. */
7056}
7057\f
7058/* md_estimate_size_before_relax()
7059
7060 Called just before relax() for rs_machine_dependent frags. The x86
7061 assembler uses these frags to handle variable size jump
7062 instructions.
7063
7064 Any symbol that is now undefined will not become defined.
7065 Return the correct fr_subtype in the frag.
7066 Return the initial "guess for variable size of frag" to caller.
7067 The guess is actually the growth beyond the fixed part. Whatever
7068 we do to grow the fixed or variable part contributes to our
7069 returned value. */
7070
7071int
7072md_estimate_size_before_relax (fragP, segment)
7073 fragS *fragP;
7074 segT segment;
7075{
7076 /* We've already got fragP->fr_subtype right; all we have to do is
7077 check for un-relaxable symbols. On an ELF system, we can't relax
7078 an externally visible symbol, because it may be overridden by a
7079 shared library. */
7080 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7081#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7082 || (IS_ELF
7083 && (S_IS_EXTERNAL (fragP->fr_symbol)
7084 || S_IS_WEAK (fragP->fr_symbol)
7085 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7086 & BSF_GNU_INDIRECT_FUNCTION))))
7087#endif
7088#if defined (OBJ_COFF) && defined (TE_PE)
7089 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7090 && S_IS_WEAK (fragP->fr_symbol))
7091#endif
7092 )
7093 {
7094 /* Symbol is undefined in this segment, or we need to keep a
7095 reloc so that weak symbols can be overridden. */
7096 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7097 enum bfd_reloc_code_real reloc_type;
7098 unsigned char *opcode;
7099 int old_fr_fix;
7100
7101 if (fragP->fr_var != NO_RELOC)
7102 reloc_type = fragP->fr_var;
7103 else if (size == 2)
7104 reloc_type = BFD_RELOC_16_PCREL;
7105 else
7106 reloc_type = BFD_RELOC_32_PCREL;
7107
7108 old_fr_fix = fragP->fr_fix;
7109 opcode = (unsigned char *) fragP->fr_opcode;
7110
7111 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7112 {
7113 case UNCOND_JUMP:
7114 /* Make jmp (0xeb) a (d)word displacement jump. */
7115 opcode[0] = 0xe9;
7116 fragP->fr_fix += size;
7117 fix_new (fragP, old_fr_fix, size,
7118 fragP->fr_symbol,
7119 fragP->fr_offset, 1,
7120 reloc_type);
7121 break;
7122
7123 case COND_JUMP86:
7124 if (size == 2
7125 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7126 {
7127 /* Negate the condition, and branch past an
7128 unconditional jump. */
7129 opcode[0] ^= 1;
7130 opcode[1] = 3;
7131 /* Insert an unconditional jump. */
7132 opcode[2] = 0xe9;
7133 /* We added two extra opcode bytes, and have a two byte
7134 offset. */
7135 fragP->fr_fix += 2 + 2;
7136 fix_new (fragP, old_fr_fix + 2, 2,
7137 fragP->fr_symbol,
7138 fragP->fr_offset, 1,
7139 reloc_type);
7140 break;
7141 }
7142 /* Fall through. */
7143
7144 case COND_JUMP:
7145 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7146 {
7147 fixS *fixP;
7148
7149 fragP->fr_fix += 1;
7150 fixP = fix_new (fragP, old_fr_fix, 1,
7151 fragP->fr_symbol,
7152 fragP->fr_offset, 1,
7153 BFD_RELOC_8_PCREL);
7154 fixP->fx_signed = 1;
7155 break;
7156 }
7157
7158 /* This changes the byte-displacement jump 0x7N
7159 to the (d)word-displacement jump 0x0f,0x8N. */
7160 opcode[1] = opcode[0] + 0x10;
7161 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7162 /* We've added an opcode byte. */
7163 fragP->fr_fix += 1 + size;
7164 fix_new (fragP, old_fr_fix + 1, size,
7165 fragP->fr_symbol,
7166 fragP->fr_offset, 1,
7167 reloc_type);
7168 break;
7169
7170 default:
7171 BAD_CASE (fragP->fr_subtype);
7172 break;
7173 }
7174 frag_wane (fragP);
7175 return fragP->fr_fix - old_fr_fix;
7176 }
7177
7178 /* Guess size depending on current relax state. Initially the relax
7179 state will correspond to a short jump and we return 1, because
7180 the variable part of the frag (the branch offset) is one byte
7181 long. However, we can relax a section more than once and in that
7182 case we must either set fr_subtype back to the unrelaxed state,
7183 or return the value for the appropriate branch. */
7184 return md_relax_table[fragP->fr_subtype].rlx_length;
7185}
7186
7187/* Called after relax() is finished.
7188
7189 In: Address of frag.
7190 fr_type == rs_machine_dependent.
7191 fr_subtype is what the address relaxed to.
7192
7193 Out: Any fixSs and constants are set up.
7194 Caller will turn frag into a ".space 0". */
7195
7196void
7197md_convert_frag (abfd, sec, fragP)
7198 bfd *abfd ATTRIBUTE_UNUSED;
7199 segT sec ATTRIBUTE_UNUSED;
7200 fragS *fragP;
7201{
7202 unsigned char *opcode;
7203 unsigned char *where_to_put_displacement = NULL;
7204 offsetT target_address;
7205 offsetT opcode_address;
7206 unsigned int extension = 0;
7207 offsetT displacement_from_opcode_start;
7208
7209 opcode = (unsigned char *) fragP->fr_opcode;
7210
7211 /* Address we want to reach in file space. */
7212 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7213
7214 /* Address opcode resides at in file space. */
7215 opcode_address = fragP->fr_address + fragP->fr_fix;
7216
7217 /* Displacement from opcode start to fill into instruction. */
7218 displacement_from_opcode_start = target_address - opcode_address;
7219
7220 if ((fragP->fr_subtype & BIG) == 0)
7221 {
7222 /* Don't have to change opcode. */
7223 extension = 1; /* 1 opcode + 1 displacement */
7224 where_to_put_displacement = &opcode[1];
7225 }
7226 else
7227 {
7228 if (no_cond_jump_promotion
7229 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7230 as_warn_where (fragP->fr_file, fragP->fr_line,
7231 _("long jump required"));
7232
7233 switch (fragP->fr_subtype)
7234 {
7235 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7236 extension = 4; /* 1 opcode + 4 displacement */
7237 opcode[0] = 0xe9;
7238 where_to_put_displacement = &opcode[1];
7239 break;
7240
7241 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7242 extension = 2; /* 1 opcode + 2 displacement */
7243 opcode[0] = 0xe9;
7244 where_to_put_displacement = &opcode[1];
7245 break;
7246
7247 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7248 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7249 extension = 5; /* 2 opcode + 4 displacement */
7250 opcode[1] = opcode[0] + 0x10;
7251 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7252 where_to_put_displacement = &opcode[2];
7253 break;
7254
7255 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7256 extension = 3; /* 2 opcode + 2 displacement */
7257 opcode[1] = opcode[0] + 0x10;
7258 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7259 where_to_put_displacement = &opcode[2];
7260 break;
7261
7262 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7263 extension = 4;
7264 opcode[0] ^= 1;
7265 opcode[1] = 3;
7266 opcode[2] = 0xe9;
7267 where_to_put_displacement = &opcode[3];
7268 break;
7269
7270 default:
7271 BAD_CASE (fragP->fr_subtype);
7272 break;
7273 }
7274 }
7275
7276 /* If size if less then four we are sure that the operand fits,
7277 but if it's 4, then it could be that the displacement is larger
7278 then -/+ 2GB. */
7279 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7280 && object_64bit
7281 && ((addressT) (displacement_from_opcode_start - extension
7282 + ((addressT) 1 << 31))
7283 > (((addressT) 2 << 31) - 1)))
7284 {
7285 as_bad_where (fragP->fr_file, fragP->fr_line,
7286 _("jump target out of range"));
7287 /* Make us emit 0. */
7288 displacement_from_opcode_start = extension;
7289 }
7290 /* Now put displacement after opcode. */
7291 md_number_to_chars ((char *) where_to_put_displacement,
7292 (valueT) (displacement_from_opcode_start - extension),
7293 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7294 fragP->fr_fix += extension;
7295}
7296\f
7297/* Apply a fixup (fixS) to segment data, once it has been determined
7298 by our caller that we have all the info we need to fix it up.
7299
7300 On the 386, immediates, displacements, and data pointers are all in
7301 the same (little-endian) format, so we don't need to care about which
7302 we are handling. */
7303
7304void
7305md_apply_fix (fixP, valP, seg)
7306 /* The fix we're to put in. */
7307 fixS *fixP;
7308 /* Pointer to the value of the bits. */
7309 valueT *valP;
7310 /* Segment fix is from. */
7311 segT seg ATTRIBUTE_UNUSED;
7312{
7313 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7314 valueT value = *valP;
7315
7316#if !defined (TE_Mach)
7317 if (fixP->fx_pcrel)
7318 {
7319 switch (fixP->fx_r_type)
7320 {
7321 default:
7322 break;
7323
7324 case BFD_RELOC_64:
7325 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7326 break;
7327 case BFD_RELOC_32:
7328 case BFD_RELOC_X86_64_32S:
7329 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7330 break;
7331 case BFD_RELOC_16:
7332 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7333 break;
7334 case BFD_RELOC_8:
7335 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7336 break;
7337 }
7338 }
7339
7340 if (fixP->fx_addsy != NULL
7341 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7342 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7343 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7344 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7345 && !use_rela_relocations)
7346 {
7347 /* This is a hack. There should be a better way to handle this.
7348 This covers for the fact that bfd_install_relocation will
7349 subtract the current location (for partial_inplace, PC relative
7350 relocations); see more below. */
7351#ifndef OBJ_AOUT
7352 if (IS_ELF
7353#ifdef TE_PE
7354 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7355#endif
7356 )
7357 value += fixP->fx_where + fixP->fx_frag->fr_address;
7358#endif
7359#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7360 if (IS_ELF)
7361 {
7362 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7363
7364 if ((sym_seg == seg
7365 || (symbol_section_p (fixP->fx_addsy)
7366 && sym_seg != absolute_section))
7367 && !generic_force_reloc (fixP))
7368 {
7369 /* Yes, we add the values in twice. This is because
7370 bfd_install_relocation subtracts them out again. I think
7371 bfd_install_relocation is broken, but I don't dare change
7372 it. FIXME. */
7373 value += fixP->fx_where + fixP->fx_frag->fr_address;
7374 }
7375 }
7376#endif
7377#if defined (OBJ_COFF) && defined (TE_PE)
7378 /* For some reason, the PE format does not store a
7379 section address offset for a PC relative symbol. */
7380 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7381 || S_IS_WEAK (fixP->fx_addsy))
7382 value += md_pcrel_from (fixP);
7383#endif
7384 }
7385#if defined (OBJ_COFF) && defined (TE_PE)
7386 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7387 {
7388 value -= S_GET_VALUE (fixP->fx_addsy);
7389 }
7390#endif
7391
7392 /* Fix a few things - the dynamic linker expects certain values here,
7393 and we must not disappoint it. */
7394#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7395 if (IS_ELF && fixP->fx_addsy)
7396 switch (fixP->fx_r_type)
7397 {
7398 case BFD_RELOC_386_PLT32:
7399 case BFD_RELOC_X86_64_PLT32:
7400 /* Make the jump instruction point to the address of the operand. At
7401 runtime we merely add the offset to the actual PLT entry. */
7402 value = -4;
7403 break;
7404
7405 case BFD_RELOC_386_TLS_GD:
7406 case BFD_RELOC_386_TLS_LDM:
7407 case BFD_RELOC_386_TLS_IE_32:
7408 case BFD_RELOC_386_TLS_IE:
7409 case BFD_RELOC_386_TLS_GOTIE:
7410 case BFD_RELOC_386_TLS_GOTDESC:
7411 case BFD_RELOC_X86_64_TLSGD:
7412 case BFD_RELOC_X86_64_TLSLD:
7413 case BFD_RELOC_X86_64_GOTTPOFF:
7414 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7415 value = 0; /* Fully resolved at runtime. No addend. */
7416 /* Fallthrough */
7417 case BFD_RELOC_386_TLS_LE:
7418 case BFD_RELOC_386_TLS_LDO_32:
7419 case BFD_RELOC_386_TLS_LE_32:
7420 case BFD_RELOC_X86_64_DTPOFF32:
7421 case BFD_RELOC_X86_64_DTPOFF64:
7422 case BFD_RELOC_X86_64_TPOFF32:
7423 case BFD_RELOC_X86_64_TPOFF64:
7424 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7425 break;
7426
7427 case BFD_RELOC_386_TLS_DESC_CALL:
7428 case BFD_RELOC_X86_64_TLSDESC_CALL:
7429 value = 0; /* Fully resolved at runtime. No addend. */
7430 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7431 fixP->fx_done = 0;
7432 return;
7433
7434 case BFD_RELOC_386_GOT32:
7435 case BFD_RELOC_X86_64_GOT32:
7436 value = 0; /* Fully resolved at runtime. No addend. */
7437 break;
7438
7439 case BFD_RELOC_VTABLE_INHERIT:
7440 case BFD_RELOC_VTABLE_ENTRY:
7441 fixP->fx_done = 0;
7442 return;
7443
7444 default:
7445 break;
7446 }
7447#endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7448 *valP = value;
7449#endif /* !defined (TE_Mach) */
7450
7451 /* Are we finished with this relocation now? */
7452 if (fixP->fx_addsy == NULL)
7453 fixP->fx_done = 1;
7454#if defined (OBJ_COFF) && defined (TE_PE)
7455 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7456 {
7457 fixP->fx_done = 0;
7458 /* Remember value for tc_gen_reloc. */
7459 fixP->fx_addnumber = value;
7460 /* Clear out the frag for now. */
7461 value = 0;
7462 }
7463#endif
7464 else if (use_rela_relocations)
7465 {
7466 fixP->fx_no_overflow = 1;
7467 /* Remember value for tc_gen_reloc. */
7468 fixP->fx_addnumber = value;
7469 value = 0;
7470 }
7471
7472 md_number_to_chars (p, value, fixP->fx_size);
7473}
7474\f
7475char *
7476md_atof (int type, char *litP, int *sizeP)
7477{
7478 /* This outputs the LITTLENUMs in REVERSE order;
7479 in accord with the bigendian 386. */
7480 return ieee_md_atof (type, litP, sizeP, FALSE);
7481}
7482\f
7483static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
7484
7485static char *
7486output_invalid (int c)
7487{
7488 if (ISPRINT (c))
7489 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7490 "'%c'", c);
7491 else
7492 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7493 "(0x%x)", (unsigned char) c);
7494 return output_invalid_buf;
7495}
7496
7497/* REG_STRING starts *before* REGISTER_PREFIX. */
7498
7499static const reg_entry *
7500parse_real_register (char *reg_string, char **end_op)
7501{
7502 char *s = reg_string;
7503 char *p;
7504 char reg_name_given[MAX_REG_NAME_SIZE + 1];
7505 const reg_entry *r;
7506
7507 /* Skip possible REGISTER_PREFIX and possible whitespace. */
7508 if (*s == REGISTER_PREFIX)
7509 ++s;
7510
7511 if (is_space_char (*s))
7512 ++s;
7513
7514 p = reg_name_given;
7515 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
7516 {
7517 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
7518 return (const reg_entry *) NULL;
7519 s++;
7520 }
7521
7522 /* For naked regs, make sure that we are not dealing with an identifier.
7523 This prevents confusing an identifier like `eax_var' with register
7524 `eax'. */
7525 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
7526 return (const reg_entry *) NULL;
7527
7528 *end_op = s;
7529
7530 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
7531
7532 /* Handle floating point regs, allowing spaces in the (i) part. */
7533 if (r == i386_regtab /* %st is first entry of table */)
7534 {
7535 if (is_space_char (*s))
7536 ++s;
7537 if (*s == '(')
7538 {
7539 ++s;
7540 if (is_space_char (*s))
7541 ++s;
7542 if (*s >= '0' && *s <= '7')
7543 {
7544 int fpr = *s - '0';
7545 ++s;
7546 if (is_space_char (*s))
7547 ++s;
7548 if (*s == ')')
7549 {
7550 *end_op = s + 1;
7551 r = hash_find (reg_hash, "st(0)");
7552 know (r);
7553 return r + fpr;
7554 }
7555 }
7556 /* We have "%st(" then garbage. */
7557 return (const reg_entry *) NULL;
7558 }
7559 }
7560
7561 if (r == NULL || allow_pseudo_reg)
7562 return r;
7563
7564 if (operand_type_all_zero (&r->reg_type))
7565 return (const reg_entry *) NULL;
7566
7567 if ((r->reg_type.bitfield.reg32
7568 || r->reg_type.bitfield.sreg3
7569 || r->reg_type.bitfield.control
7570 || r->reg_type.bitfield.debug
7571 || r->reg_type.bitfield.test)
7572 && !cpu_arch_flags.bitfield.cpui386)
7573 return (const reg_entry *) NULL;
7574
7575 if (r->reg_type.bitfield.floatreg
7576 && !cpu_arch_flags.bitfield.cpu8087
7577 && !cpu_arch_flags.bitfield.cpu287
7578 && !cpu_arch_flags.bitfield.cpu387)
7579 return (const reg_entry *) NULL;
7580
7581 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
7582 return (const reg_entry *) NULL;
7583
7584 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
7585 return (const reg_entry *) NULL;
7586
7587 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
7588 return (const reg_entry *) NULL;
7589
7590 /* Don't allow fake index register unless allow_index_reg isn't 0. */
7591 if (!allow_index_reg
7592 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
7593 return (const reg_entry *) NULL;
7594
7595 if (((r->reg_flags & (RegRex64 | RegRex))
7596 || r->reg_type.bitfield.reg64)
7597 && (!cpu_arch_flags.bitfield.cpulm
7598 || !operand_type_equal (&r->reg_type, &control))
7599 && flag_code != CODE_64BIT)
7600 return (const reg_entry *) NULL;
7601
7602 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
7603 return (const reg_entry *) NULL;
7604
7605 return r;
7606}
7607
7608/* REG_STRING starts *before* REGISTER_PREFIX. */
7609
7610static const reg_entry *
7611parse_register (char *reg_string, char **end_op)
7612{
7613 const reg_entry *r;
7614
7615 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
7616 r = parse_real_register (reg_string, end_op);
7617 else
7618 r = NULL;
7619 if (!r)
7620 {
7621 char *save = input_line_pointer;
7622 char c;
7623 symbolS *symbolP;
7624
7625 input_line_pointer = reg_string;
7626 c = get_symbol_end ();
7627 symbolP = symbol_find (reg_string);
7628 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
7629 {
7630 const expressionS *e = symbol_get_value_expression (symbolP);
7631
7632 know (e->X_op == O_register);
7633 know (e->X_add_number >= 0
7634 && (valueT) e->X_add_number < i386_regtab_size);
7635 r = i386_regtab + e->X_add_number;
7636 *end_op = input_line_pointer;
7637 }
7638 *input_line_pointer = c;
7639 input_line_pointer = save;
7640 }
7641 return r;
7642}
7643
7644int
7645i386_parse_name (char *name, expressionS *e, char *nextcharP)
7646{
7647 const reg_entry *r;
7648 char *end = input_line_pointer;
7649
7650 *end = *nextcharP;
7651 r = parse_register (name, &input_line_pointer);
7652 if (r && end <= input_line_pointer)
7653 {
7654 *nextcharP = *input_line_pointer;
7655 *input_line_pointer = 0;
7656 e->X_op = O_register;
7657 e->X_add_number = r - i386_regtab;
7658 return 1;
7659 }
7660 input_line_pointer = end;
7661 *end = 0;
7662 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
7663}
7664
7665void
7666md_operand (expressionS *e)
7667{
7668 char *end;
7669 const reg_entry *r;
7670
7671 switch (*input_line_pointer)
7672 {
7673 case REGISTER_PREFIX:
7674 r = parse_real_register (input_line_pointer, &end);
7675 if (r)
7676 {
7677 e->X_op = O_register;
7678 e->X_add_number = r - i386_regtab;
7679 input_line_pointer = end;
7680 }
7681 break;
7682
7683 case '[':
7684 gas_assert (intel_syntax);
7685 end = input_line_pointer++;
7686 expression (e);
7687 if (*input_line_pointer == ']')
7688 {
7689 ++input_line_pointer;
7690 e->X_op_symbol = make_expr_symbol (e);
7691 e->X_add_symbol = NULL;
7692 e->X_add_number = 0;
7693 e->X_op = O_index;
7694 }
7695 else
7696 {
7697 e->X_op = O_absent;
7698 input_line_pointer = end;
7699 }
7700 break;
7701 }
7702}
7703
7704\f
7705#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7706const char *md_shortopts = "kVQ:sqn";
7707#else
7708const char *md_shortopts = "qn";
7709#endif
7710
7711#define OPTION_32 (OPTION_MD_BASE + 0)
7712#define OPTION_64 (OPTION_MD_BASE + 1)
7713#define OPTION_DIVIDE (OPTION_MD_BASE + 2)
7714#define OPTION_MARCH (OPTION_MD_BASE + 3)
7715#define OPTION_MTUNE (OPTION_MD_BASE + 4)
7716#define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
7717#define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
7718#define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
7719#define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
7720#define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
7721#define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
7722#define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
7723
7724struct option md_longopts[] =
7725{
7726 {"32", no_argument, NULL, OPTION_32},
7727#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
7728 || defined (TE_PE) || defined (TE_PEP))
7729 {"64", no_argument, NULL, OPTION_64},
7730#endif
7731 {"divide", no_argument, NULL, OPTION_DIVIDE},
7732 {"march", required_argument, NULL, OPTION_MARCH},
7733 {"mtune", required_argument, NULL, OPTION_MTUNE},
7734 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
7735 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
7736 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
7737 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
7738 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
7739 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
7740 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
7741 {NULL, no_argument, NULL, 0}
7742};
7743size_t md_longopts_size = sizeof (md_longopts);
7744
7745int
7746md_parse_option (int c, char *arg)
7747{
7748 unsigned int i;
7749 char *arch, *next;
7750
7751 switch (c)
7752 {
7753 case 'n':
7754 optimize_align_code = 0;
7755 break;
7756
7757 case 'q':
7758 quiet_warnings = 1;
7759 break;
7760
7761#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7762 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
7763 should be emitted or not. FIXME: Not implemented. */
7764 case 'Q':
7765 break;
7766
7767 /* -V: SVR4 argument to print version ID. */
7768 case 'V':
7769 print_version_id ();
7770 break;
7771
7772 /* -k: Ignore for FreeBSD compatibility. */
7773 case 'k':
7774 break;
7775
7776 case 's':
7777 /* -s: On i386 Solaris, this tells the native assembler to use
7778 .stab instead of .stab.excl. We always use .stab anyhow. */
7779 break;
7780#endif
7781#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
7782 || defined (TE_PE) || defined (TE_PEP))
7783 case OPTION_64:
7784 {
7785 const char **list, **l;
7786
7787 list = bfd_target_list ();
7788 for (l = list; *l != NULL; l++)
7789 if (CONST_STRNEQ (*l, "elf64-x86-64")
7790 || strcmp (*l, "coff-x86-64") == 0
7791 || strcmp (*l, "pe-x86-64") == 0
7792 || strcmp (*l, "pei-x86-64") == 0)
7793 {
7794 default_arch = "x86_64";
7795 break;
7796 }
7797 if (*l == NULL)
7798 as_fatal (_("No compiled in support for x86_64"));
7799 free (list);
7800 }
7801 break;
7802#endif
7803
7804 case OPTION_32:
7805 default_arch = "i386";
7806 break;
7807
7808 case OPTION_DIVIDE:
7809#ifdef SVR4_COMMENT_CHARS
7810 {
7811 char *n, *t;
7812 const char *s;
7813
7814 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
7815 t = n;
7816 for (s = i386_comment_chars; *s != '\0'; s++)
7817 if (*s != '/')
7818 *t++ = *s;
7819 *t = '\0';
7820 i386_comment_chars = n;
7821 }
7822#endif
7823 break;
7824
7825 case OPTION_MARCH:
7826 arch = xstrdup (arg);
7827 do
7828 {
7829 if (*arch == '.')
7830 as_fatal (_("Invalid -march= option: `%s'"), arg);
7831 next = strchr (arch, '+');
7832 if (next)
7833 *next++ = '\0';
7834 for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
7835 {
7836 if (strcmp (arch, cpu_arch [i].name) == 0)
7837 {
7838 /* Processor. */
7839 cpu_arch_name = cpu_arch[i].name;
7840 cpu_sub_arch_name = NULL;
7841 cpu_arch_flags = cpu_arch[i].flags;
7842 cpu_arch_isa = cpu_arch[i].type;
7843 cpu_arch_isa_flags = cpu_arch[i].flags;
7844 if (!cpu_arch_tune_set)
7845 {
7846 cpu_arch_tune = cpu_arch_isa;
7847 cpu_arch_tune_flags = cpu_arch_isa_flags;
7848 }
7849 break;
7850 }
7851 else if (*cpu_arch [i].name == '.'
7852 && strcmp (arch, cpu_arch [i].name + 1) == 0)
7853 {
7854 /* ISA entension. */
7855 i386_cpu_flags flags;
7856
7857 if (strncmp (arch, "no", 2))
7858 flags = cpu_flags_or (cpu_arch_flags,
7859 cpu_arch[i].flags);
7860 else
7861 flags = cpu_flags_and_not (cpu_arch_flags,
7862 cpu_arch[i].flags);
7863 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
7864 {
7865 if (cpu_sub_arch_name)
7866 {
7867 char *name = cpu_sub_arch_name;
7868 cpu_sub_arch_name = concat (name,
7869 cpu_arch[i].name,
7870 (const char *) NULL);
7871 free (name);
7872 }
7873 else
7874 cpu_sub_arch_name = xstrdup (cpu_arch[i].name);
7875 cpu_arch_flags = flags;
7876 }
7877 break;
7878 }
7879 }
7880
7881 if (i >= ARRAY_SIZE (cpu_arch))
7882 as_fatal (_("Invalid -march= option: `%s'"), arg);
7883
7884 arch = next;
7885 }
7886 while (next != NULL );
7887 break;
7888
7889 case OPTION_MTUNE:
7890 if (*arg == '.')
7891 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
7892 for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
7893 {
7894 if (strcmp (arg, cpu_arch [i].name) == 0)
7895 {
7896 cpu_arch_tune_set = 1;
7897 cpu_arch_tune = cpu_arch [i].type;
7898 cpu_arch_tune_flags = cpu_arch[i].flags;
7899 break;
7900 }
7901 }
7902 if (i >= ARRAY_SIZE (cpu_arch))
7903 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
7904 break;
7905
7906 case OPTION_MMNEMONIC:
7907 if (strcasecmp (arg, "att") == 0)
7908 intel_mnemonic = 0;
7909 else if (strcasecmp (arg, "intel") == 0)
7910 intel_mnemonic = 1;
7911 else
7912 as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
7913 break;
7914
7915 case OPTION_MSYNTAX:
7916 if (strcasecmp (arg, "att") == 0)
7917 intel_syntax = 0;
7918 else if (strcasecmp (arg, "intel") == 0)
7919 intel_syntax = 1;
7920 else
7921 as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
7922 break;
7923
7924 case OPTION_MINDEX_REG:
7925 allow_index_reg = 1;
7926 break;
7927
7928 case OPTION_MNAKED_REG:
7929 allow_naked_reg = 1;
7930 break;
7931
7932 case OPTION_MOLD_GCC:
7933 old_gcc = 1;
7934 break;
7935
7936 case OPTION_MSSE2AVX:
7937 sse2avx = 1;
7938 break;
7939
7940 case OPTION_MSSE_CHECK:
7941 if (strcasecmp (arg, "error") == 0)
7942 sse_check = sse_check_error;
7943 else if (strcasecmp (arg, "warning") == 0)
7944 sse_check = sse_check_warning;
7945 else if (strcasecmp (arg, "none") == 0)
7946 sse_check = sse_check_none;
7947 else
7948 as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
7949 break;
7950
7951 default:
7952 return 0;
7953 }
7954 return 1;
7955}
7956
7957void
7958md_show_usage (stream)
7959 FILE *stream;
7960{
7961#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7962 fprintf (stream, _("\
7963 -Q ignored\n\
7964 -V print assembler version number\n\
7965 -k ignored\n"));
7966#endif
7967 fprintf (stream, _("\
7968 -n Do not optimize code alignment\n\
7969 -q quieten some warnings\n"));
7970#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7971 fprintf (stream, _("\
7972 -s ignored\n"));
7973#endif
7974#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
7975 || defined (TE_PE) || defined (TE_PEP))
7976 fprintf (stream, _("\
7977 --32/--64 generate 32bit/64bit code\n"));
7978#endif
7979#ifdef SVR4_COMMENT_CHARS
7980 fprintf (stream, _("\
7981 --divide do not treat `/' as a comment character\n"));
7982#else
7983 fprintf (stream, _("\
7984 --divide ignored\n"));
7985#endif
7986 fprintf (stream, _("\
7987 -march=CPU[,+EXTENSION...]\n\
7988 generate code for CPU and EXTENSION, CPU is one of:\n\
7989 i8086, i186, i286, i386, i486, pentium, pentiumpro,\n\
7990 pentiumii, pentiumiii, pentium4, prescott, nocona,\n\
7991 core, core2, corei7, l1om, k6, k6_2, athlon, k8,\n\
7992 amdfam10, generic32, generic64\n\
7993 EXTENSION is combination of:\n\
7994 8087, 287, 387, no87, mmx, nommx, sse, sse2, sse3,\n\
7995 ssse3, sse4.1, sse4.2, sse4, nosse, avx, noavx,\n\
7996 vmx, smx, xsave, movbe, ept, aes, pclmul, fma,\n\
7997 clflush, syscall, rdtscp, 3dnow, 3dnowa, sse4a,\n\
7998 svme, abm, padlock, fma4\n"));
7999 fprintf (stream, _("\
8000 -mtune=CPU optimize for CPU, CPU is one of:\n\
8001 i8086, i186, i286, i386, i486, pentium, pentiumpro,\n\
8002 pentiumii, pentiumiii, pentium4, prescott, nocona,\n\
8003 core, core2, corei7, l1om, k6, k6_2, athlon, k8,\n\
8004 amdfam10, generic32, generic64\n"));
8005 fprintf (stream, _("\
8006 -msse2avx encode SSE instructions with VEX prefix\n"));
8007 fprintf (stream, _("\
8008 -msse-check=[none|error|warning]\n\
8009 check SSE instructions\n"));
8010 fprintf (stream, _("\
8011 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8012 fprintf (stream, _("\
8013 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8014 fprintf (stream, _("\
8015 -mindex-reg support pseudo index registers\n"));
8016 fprintf (stream, _("\
8017 -mnaked-reg don't require `%%' prefix for registers\n"));
8018 fprintf (stream, _("\
8019 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8020}
8021
8022#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8023 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8024 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8025
8026/* Pick the target format to use. */
8027
8028const char *
8029i386_target_format (void)
8030{
8031 if (!strcmp (default_arch, "x86_64"))
8032 {
8033 set_code_flag (CODE_64BIT);
8034 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8035 {
8036 cpu_arch_isa_flags.bitfield.cpui186 = 1;
8037 cpu_arch_isa_flags.bitfield.cpui286 = 1;
8038 cpu_arch_isa_flags.bitfield.cpui386 = 1;
8039 cpu_arch_isa_flags.bitfield.cpui486 = 1;
8040 cpu_arch_isa_flags.bitfield.cpui586 = 1;
8041 cpu_arch_isa_flags.bitfield.cpui686 = 1;
8042 cpu_arch_isa_flags.bitfield.cpuclflush = 1;
8043 cpu_arch_isa_flags.bitfield.cpummx= 1;
8044 cpu_arch_isa_flags.bitfield.cpusse = 1;
8045 cpu_arch_isa_flags.bitfield.cpusse2 = 1;
8046 cpu_arch_isa_flags.bitfield.cpulm = 1;
8047 }
8048 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8049 {
8050 cpu_arch_tune_flags.bitfield.cpui186 = 1;
8051 cpu_arch_tune_flags.bitfield.cpui286 = 1;
8052 cpu_arch_tune_flags.bitfield.cpui386 = 1;
8053 cpu_arch_tune_flags.bitfield.cpui486 = 1;
8054 cpu_arch_tune_flags.bitfield.cpui586 = 1;
8055 cpu_arch_tune_flags.bitfield.cpui686 = 1;
8056 cpu_arch_tune_flags.bitfield.cpuclflush = 1;
8057 cpu_arch_tune_flags.bitfield.cpummx= 1;
8058 cpu_arch_tune_flags.bitfield.cpusse = 1;
8059 cpu_arch_tune_flags.bitfield.cpusse2 = 1;
8060 }
8061 }
8062 else if (!strcmp (default_arch, "i386"))
8063 {
8064 set_code_flag (CODE_32BIT);
8065 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8066 {
8067 cpu_arch_isa_flags.bitfield.cpui186 = 1;
8068 cpu_arch_isa_flags.bitfield.cpui286 = 1;
8069 cpu_arch_isa_flags.bitfield.cpui386 = 1;
8070 }
8071 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8072 {
8073 cpu_arch_tune_flags.bitfield.cpui186 = 1;
8074 cpu_arch_tune_flags.bitfield.cpui286 = 1;
8075 cpu_arch_tune_flags.bitfield.cpui386 = 1;
8076 }
8077 }
8078 else
8079 as_fatal (_("Unknown architecture"));
8080 switch (OUTPUT_FLAVOR)
8081 {
8082#if defined (TE_PE) || defined (TE_PEP)
8083 case bfd_target_coff_flavour:
8084 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8085#endif
8086#ifdef OBJ_MAYBE_AOUT
8087 case bfd_target_aout_flavour:
8088 return AOUT_TARGET_FORMAT;
8089#endif
8090#ifdef OBJ_MAYBE_COFF
8091 case bfd_target_coff_flavour:
8092 return "coff-i386";
8093#endif
8094#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8095 case bfd_target_elf_flavour:
8096 {
8097 if (flag_code == CODE_64BIT)
8098 {
8099 object_64bit = 1;
8100 use_rela_relocations = 1;
8101 }
8102 if (cpu_arch_isa_flags.bitfield.cpul1om)
8103 {
8104 if (flag_code != CODE_64BIT)
8105 as_fatal (_("Intel L1OM is 64bit only"));
8106 return ELF_TARGET_L1OM_FORMAT;
8107 }
8108 else
8109 return (flag_code == CODE_64BIT
8110 ? ELF_TARGET_FORMAT64 : ELF_TARGET_FORMAT);
8111 }
8112#endif
8113#if defined (OBJ_MACH_O)
8114 case bfd_target_mach_o_flavour:
8115 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8116#endif
8117 default:
8118 abort ();
8119 return NULL;
8120 }
8121}
8122
8123#endif /* OBJ_MAYBE_ more than one */
8124
8125#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8126void
8127i386_elf_emit_arch_note (void)
8128{
8129 if (IS_ELF && cpu_arch_name != NULL)
8130 {
8131 char *p;
8132 asection *seg = now_seg;
8133 subsegT subseg = now_subseg;
8134 Elf_Internal_Note i_note;
8135 Elf_External_Note e_note;
8136 asection *note_secp;
8137 int len;
8138
8139 /* Create the .note section. */
8140 note_secp = subseg_new (".note", 0);
8141 bfd_set_section_flags (stdoutput,
8142 note_secp,
8143 SEC_HAS_CONTENTS | SEC_READONLY);
8144
8145 /* Process the arch string. */
8146 len = strlen (cpu_arch_name);
8147
8148 i_note.namesz = len + 1;
8149 i_note.descsz = 0;
8150 i_note.type = NT_ARCH;
8151 p = frag_more (sizeof (e_note.namesz));
8152 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8153 p = frag_more (sizeof (e_note.descsz));
8154 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8155 p = frag_more (sizeof (e_note.type));
8156 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8157 p = frag_more (len + 1);
8158 strcpy (p, cpu_arch_name);
8159
8160 frag_align (2, 0, 0);
8161
8162 subseg_set (seg, subseg);
8163 }
8164}
8165#endif
8166\f
8167symbolS *
8168md_undefined_symbol (name)
8169 char *name;
8170{
8171 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8172 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8173 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8174 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8175 {
8176 if (!GOT_symbol)
8177 {
8178 if (symbol_find (name))
8179 as_bad (_("GOT already in symbol table"));
8180 GOT_symbol = symbol_new (name, undefined_section,
8181 (valueT) 0, &zero_address_frag);
8182 };
8183 return GOT_symbol;
8184 }
8185 return 0;
8186}
8187
8188/* Round up a section size to the appropriate boundary. */
8189
8190valueT
8191md_section_align (segment, size)
8192 segT segment ATTRIBUTE_UNUSED;
8193 valueT size;
8194{
8195#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8196 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8197 {
8198 /* For a.out, force the section size to be aligned. If we don't do
8199 this, BFD will align it for us, but it will not write out the
8200 final bytes of the section. This may be a bug in BFD, but it is
8201 easier to fix it here since that is how the other a.out targets
8202 work. */
8203 int align;
8204
8205 align = bfd_get_section_alignment (stdoutput, segment);
8206 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8207 }
8208#endif
8209
8210 return size;
8211}
8212
8213/* On the i386, PC-relative offsets are relative to the start of the
8214 next instruction. That is, the address of the offset, plus its
8215 size, since the offset is always the last part of the insn. */
8216
8217long
8218md_pcrel_from (fixS *fixP)
8219{
8220 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8221}
8222
8223#ifndef I386COFF
8224
8225static void
8226s_bss (int ignore ATTRIBUTE_UNUSED)
8227{
8228 int temp;
8229
8230#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8231 if (IS_ELF)
8232 obj_elf_section_change_hook ();
8233#endif
8234 temp = get_absolute_expression ();
8235 subseg_set (bss_section, (subsegT) temp);
8236 demand_empty_rest_of_line ();
8237}
8238
8239#endif
8240
8241void
8242i386_validate_fix (fixS *fixp)
8243{
8244 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8245 {
8246 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8247 {
8248 if (!object_64bit)
8249 abort ();
8250 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8251 }
8252 else
8253 {
8254 if (!object_64bit)
8255 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8256 else
8257 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8258 }
8259 fixp->fx_subsy = 0;
8260 }
8261}
8262
8263arelent *
8264tc_gen_reloc (section, fixp)
8265 asection *section ATTRIBUTE_UNUSED;
8266 fixS *fixp;
8267{
8268 arelent *rel;
8269 bfd_reloc_code_real_type code;
8270
8271 switch (fixp->fx_r_type)
8272 {
8273 case BFD_RELOC_X86_64_PLT32:
8274 case BFD_RELOC_X86_64_GOT32:
8275 case BFD_RELOC_X86_64_GOTPCREL:
8276 case BFD_RELOC_386_PLT32:
8277 case BFD_RELOC_386_GOT32:
8278 case BFD_RELOC_386_GOTOFF:
8279 case BFD_RELOC_386_GOTPC:
8280 case BFD_RELOC_386_TLS_GD:
8281 case BFD_RELOC_386_TLS_LDM:
8282 case BFD_RELOC_386_TLS_LDO_32:
8283 case BFD_RELOC_386_TLS_IE_32:
8284 case BFD_RELOC_386_TLS_IE:
8285 case BFD_RELOC_386_TLS_GOTIE:
8286 case BFD_RELOC_386_TLS_LE_32:
8287 case BFD_RELOC_386_TLS_LE:
8288 case BFD_RELOC_386_TLS_GOTDESC:
8289 case BFD_RELOC_386_TLS_DESC_CALL:
8290 case BFD_RELOC_X86_64_TLSGD:
8291 case BFD_RELOC_X86_64_TLSLD:
8292 case BFD_RELOC_X86_64_DTPOFF32:
8293 case BFD_RELOC_X86_64_DTPOFF64:
8294 case BFD_RELOC_X86_64_GOTTPOFF:
8295 case BFD_RELOC_X86_64_TPOFF32:
8296 case BFD_RELOC_X86_64_TPOFF64:
8297 case BFD_RELOC_X86_64_GOTOFF64:
8298 case BFD_RELOC_X86_64_GOTPC32:
8299 case BFD_RELOC_X86_64_GOT64:
8300 case BFD_RELOC_X86_64_GOTPCREL64:
8301 case BFD_RELOC_X86_64_GOTPC64:
8302 case BFD_RELOC_X86_64_GOTPLT64:
8303 case BFD_RELOC_X86_64_PLTOFF64:
8304 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8305 case BFD_RELOC_X86_64_TLSDESC_CALL:
8306 case BFD_RELOC_RVA:
8307 case BFD_RELOC_VTABLE_ENTRY:
8308 case BFD_RELOC_VTABLE_INHERIT:
8309#ifdef TE_PE
8310 case BFD_RELOC_32_SECREL:
8311#endif
8312 code = fixp->fx_r_type;
8313 break;
8314 case BFD_RELOC_X86_64_32S:
8315 if (!fixp->fx_pcrel)
8316 {
8317 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8318 code = fixp->fx_r_type;
8319 break;
8320 }
8321 default:
8322 if (fixp->fx_pcrel)
8323 {
8324 switch (fixp->fx_size)
8325 {
8326 default:
8327 as_bad_where (fixp->fx_file, fixp->fx_line,
8328 _("can not do %d byte pc-relative relocation"),
8329 fixp->fx_size);
8330 code = BFD_RELOC_32_PCREL;
8331 break;
8332 case 1: code = BFD_RELOC_8_PCREL; break;
8333 case 2: code = BFD_RELOC_16_PCREL; break;
8334 case 4: code = BFD_RELOC_32_PCREL; break;
8335#ifdef BFD64
8336 case 8: code = BFD_RELOC_64_PCREL; break;
8337#endif
8338 }
8339 }
8340 else
8341 {
8342 switch (fixp->fx_size)
8343 {
8344 default:
8345 as_bad_where (fixp->fx_file, fixp->fx_line,
8346 _("can not do %d byte relocation"),
8347 fixp->fx_size);
8348 code = BFD_RELOC_32;
8349 break;
8350 case 1: code = BFD_RELOC_8; break;
8351 case 2: code = BFD_RELOC_16; break;
8352 case 4: code = BFD_RELOC_32; break;
8353#ifdef BFD64
8354 case 8: code = BFD_RELOC_64; break;
8355#endif
8356 }
8357 }
8358 break;
8359 }
8360
8361 if ((code == BFD_RELOC_32
8362 || code == BFD_RELOC_32_PCREL
8363 || code == BFD_RELOC_X86_64_32S)
8364 && GOT_symbol
8365 && fixp->fx_addsy == GOT_symbol)
8366 {
8367 if (!object_64bit)
8368 code = BFD_RELOC_386_GOTPC;
8369 else
8370 code = BFD_RELOC_X86_64_GOTPC32;
8371 }
8372 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
8373 && GOT_symbol
8374 && fixp->fx_addsy == GOT_symbol)
8375 {
8376 code = BFD_RELOC_X86_64_GOTPC64;
8377 }
8378
8379 rel = (arelent *) xmalloc (sizeof (arelent));
8380 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
8381 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8382
8383 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
8384
8385 if (!use_rela_relocations)
8386 {
8387 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
8388 vtable entry to be used in the relocation's section offset. */
8389 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
8390 rel->address = fixp->fx_offset;
8391#if defined (OBJ_COFF) && defined (TE_PE)
8392 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
8393 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
8394 else
8395#endif
8396 rel->addend = 0;
8397 }
8398 /* Use the rela in 64bit mode. */
8399 else
8400 {
8401 if (!fixp->fx_pcrel)
8402 rel->addend = fixp->fx_offset;
8403 else
8404 switch (code)
8405 {
8406 case BFD_RELOC_X86_64_PLT32:
8407 case BFD_RELOC_X86_64_GOT32:
8408 case BFD_RELOC_X86_64_GOTPCREL:
8409 case BFD_RELOC_X86_64_TLSGD:
8410 case BFD_RELOC_X86_64_TLSLD:
8411 case BFD_RELOC_X86_64_GOTTPOFF:
8412 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8413 case BFD_RELOC_X86_64_TLSDESC_CALL:
8414 rel->addend = fixp->fx_offset - fixp->fx_size;
8415 break;
8416 default:
8417 rel->addend = (section->vma
8418 - fixp->fx_size
8419 + fixp->fx_addnumber
8420 + md_pcrel_from (fixp));
8421 break;
8422 }
8423 }
8424
8425 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
8426 if (rel->howto == NULL)
8427 {
8428 as_bad_where (fixp->fx_file, fixp->fx_line,
8429 _("cannot represent relocation type %s"),
8430 bfd_get_reloc_code_name (code));
8431 /* Set howto to a garbage value so that we can keep going. */
8432 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
8433 gas_assert (rel->howto != NULL);
8434 }
8435
8436 return rel;
8437}
8438
8439#include "tc-i386-intel.c"
8440
8441void
8442tc_x86_parse_to_dw2regnum (expressionS *exp)
8443{
8444 int saved_naked_reg;
8445 char saved_register_dot;
8446
8447 saved_naked_reg = allow_naked_reg;
8448 allow_naked_reg = 1;
8449 saved_register_dot = register_chars['.'];
8450 register_chars['.'] = '.';
8451 allow_pseudo_reg = 1;
8452 expression_and_evaluate (exp);
8453 allow_pseudo_reg = 0;
8454 register_chars['.'] = saved_register_dot;
8455 allow_naked_reg = saved_naked_reg;
8456
8457 if (exp->X_op == O_register && exp->X_add_number >= 0)
8458 {
8459 if ((addressT) exp->X_add_number < i386_regtab_size)
8460 {
8461 exp->X_op = O_constant;
8462 exp->X_add_number = i386_regtab[exp->X_add_number]
8463 .dw2_regnum[flag_code >> 1];
8464 }
8465 else
8466 exp->X_op = O_illegal;
8467 }
8468}
8469
8470void
8471tc_x86_frame_initial_instructions (void)
8472{
8473 static unsigned int sp_regno[2];
8474
8475 if (!sp_regno[flag_code >> 1])
8476 {
8477 char *saved_input = input_line_pointer;
8478 char sp[][4] = {"esp", "rsp"};
8479 expressionS exp;
8480
8481 input_line_pointer = sp[flag_code >> 1];
8482 tc_x86_parse_to_dw2regnum (&exp);
8483 gas_assert (exp.X_op == O_constant);
8484 sp_regno[flag_code >> 1] = exp.X_add_number;
8485 input_line_pointer = saved_input;
8486 }
8487
8488 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
8489 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
8490}
8491
8492int
8493i386_elf_section_type (const char *str, size_t len)
8494{
8495 if (flag_code == CODE_64BIT
8496 && len == sizeof ("unwind") - 1
8497 && strncmp (str, "unwind", 6) == 0)
8498 return SHT_X86_64_UNWIND;
8499
8500 return -1;
8501}
8502
8503#ifdef TE_SOLARIS
8504void
8505i386_solaris_fix_up_eh_frame (segT sec)
8506{
8507 if (flag_code == CODE_64BIT)
8508 elf_section_type (sec) = SHT_X86_64_UNWIND;
8509}
8510#endif
8511
8512#ifdef TE_PE
8513void
8514tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
8515{
8516 expressionS expr;
8517
8518 expr.X_op = O_secrel;
8519 expr.X_add_symbol = symbol;
8520 expr.X_add_number = 0;
8521 emit_expr (&expr, size);
8522}
8523#endif
8524
8525#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8526/* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
8527
8528bfd_vma
8529x86_64_section_letter (int letter, char **ptr_msg)
8530{
8531 if (flag_code == CODE_64BIT)
8532 {
8533 if (letter == 'l')
8534 return SHF_X86_64_LARGE;
8535
8536 *ptr_msg = _("Bad .section directive: want a,l,w,x,M,S,G,T in string");
8537 }
8538 else
8539 *ptr_msg = _("Bad .section directive: want a,w,x,M,S,G,T in string");
8540 return -1;
8541}
8542
8543bfd_vma
8544x86_64_section_word (char *str, size_t len)
8545{
8546 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
8547 return SHF_X86_64_LARGE;
8548
8549 return -1;
8550}
8551
8552static void
8553handle_large_common (int small ATTRIBUTE_UNUSED)
8554{
8555 if (flag_code != CODE_64BIT)
8556 {
8557 s_comm_internal (0, elf_common_parse);
8558 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
8559 }
8560 else
8561 {
8562 static segT lbss_section;
8563 asection *saved_com_section_ptr = elf_com_section_ptr;
8564 asection *saved_bss_section = bss_section;
8565
8566 if (lbss_section == NULL)
8567 {
8568 flagword applicable;
8569 segT seg = now_seg;
8570 subsegT subseg = now_subseg;
8571
8572 /* The .lbss section is for local .largecomm symbols. */
8573 lbss_section = subseg_new (".lbss", 0);
8574 applicable = bfd_applicable_section_flags (stdoutput);
8575 bfd_set_section_flags (stdoutput, lbss_section,
8576 applicable & SEC_ALLOC);
8577 seg_info (lbss_section)->bss = 1;
8578
8579 subseg_set (seg, subseg);
8580 }
8581
8582 elf_com_section_ptr = &_bfd_elf_large_com_section;
8583 bss_section = lbss_section;
8584
8585 s_comm_internal (0, elf_common_parse);
8586
8587 elf_com_section_ptr = saved_com_section_ptr;
8588 bss_section = saved_bss_section;
8589 }
8590}
8591#endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.051696 seconds and 4 git commands to generate.