Optimize REP prefix check
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
... / ...
CommitLineData
1/* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6
7 This file is part of GAS, the GNU Assembler.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24/* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
30
31#include "as.h"
32#include "safe-ctype.h"
33#include "subsegs.h"
34#include "dwarf2dbg.h"
35#include "dw2gencfi.h"
36#include "elf/x86-64.h"
37#include "opcodes/i386-init.h"
38
39#ifndef REGISTER_WARNINGS
40#define REGISTER_WARNINGS 1
41#endif
42
43#ifndef INFER_ADDR_PREFIX
44#define INFER_ADDR_PREFIX 1
45#endif
46
47#ifndef DEFAULT_ARCH
48#define DEFAULT_ARCH "i386"
49#endif
50
51#ifndef INLINE
52#if __GNUC__ >= 2
53#define INLINE __inline__
54#else
55#define INLINE
56#endif
57#endif
58
59/* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
64#define WAIT_PREFIX 0
65#define SEG_PREFIX 1
66#define ADDR_PREFIX 2
67#define DATA_PREFIX 3
68#define REP_PREFIX 4
69#define HLE_PREFIX REP_PREFIX
70#define LOCK_PREFIX 5
71#define REX_PREFIX 6 /* must come last. */
72#define MAX_PREFIXES 7 /* max prefixes per opcode */
73
74/* we define the syntax here (modulo base,index,scale syntax) */
75#define REGISTER_PREFIX '%'
76#define IMMEDIATE_PREFIX '$'
77#define ABSOLUTE_PREFIX '*'
78
79/* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81#define WORD_MNEM_SUFFIX 'w'
82#define BYTE_MNEM_SUFFIX 'b'
83#define SHORT_MNEM_SUFFIX 's'
84#define LONG_MNEM_SUFFIX 'l'
85#define QWORD_MNEM_SUFFIX 'q'
86#define XMMWORD_MNEM_SUFFIX 'x'
87#define YMMWORD_MNEM_SUFFIX 'y'
88/* Intel Syntax. Use a non-ascii letter since since it never appears
89 in instructions. */
90#define LONG_DOUBLE_MNEM_SUFFIX '\1'
91
92#define END_OF_INSN '\0'
93
94/*
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
99 END.
100 */
101typedef struct
102{
103 const insn_template *start;
104 const insn_template *end;
105}
106templates;
107
108/* 386 operand encoding bytes: see 386 book for details of this. */
109typedef struct
110{
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
114}
115modrm_byte;
116
117/* x86-64 extension prefix. */
118typedef int rex_byte;
119
120/* 386 opcode byte to code indirect addressing. */
121typedef struct
122{
123 unsigned base;
124 unsigned index;
125 unsigned scale;
126}
127sib_byte;
128
129/* x86 arch names, types and features */
130typedef struct
131{
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
138}
139arch_entry;
140
141static void update_code_flag (int, int);
142static void set_code_flag (int);
143static void set_16bit_gcc_code_flag (int);
144static void set_intel_syntax (int);
145static void set_intel_mnemonic (int);
146static void set_allow_index_reg (int);
147static void set_check (int);
148static void set_cpu_arch (int);
149#ifdef TE_PE
150static void pe_directive_secrel (int);
151#endif
152static void signed_cons (int);
153static char *output_invalid (int c);
154static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
155 const char *);
156static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
157 const char *);
158static int i386_att_operand (char *);
159static int i386_intel_operand (char *, int);
160static int i386_intel_simplify (expressionS *);
161static int i386_intel_parse_name (const char *, expressionS *);
162static const reg_entry *parse_register (char *, char **);
163static char *parse_insn (char *, char *);
164static char *parse_operands (char *, const char *);
165static void swap_operands (void);
166static void swap_2_operands (int, int);
167static void optimize_imm (void);
168static void optimize_disp (void);
169static const insn_template *match_template (void);
170static int check_string (void);
171static int process_suffix (void);
172static int check_byte_reg (void);
173static int check_long_reg (void);
174static int check_qword_reg (void);
175static int check_word_reg (void);
176static int finalize_imm (void);
177static int process_operands (void);
178static const seg_entry *build_modrm_byte (void);
179static void output_insn (void);
180static void output_imm (fragS *, offsetT);
181static void output_disp (fragS *, offsetT);
182#ifndef I386COFF
183static void s_bss (int);
184#endif
185#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186static void handle_large_common (int small ATTRIBUTE_UNUSED);
187#endif
188
189static const char *default_arch = DEFAULT_ARCH;
190
191/* VEX prefix. */
192typedef struct
193{
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
196 unsigned int length;
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
199} vex_prefix;
200
201/* 'md_assemble ()' gathers together information and puts it into a
202 i386_insn. */
203
204union i386_op
205 {
206 expressionS *disps;
207 expressionS *imms;
208 const reg_entry *regs;
209 };
210
211enum i386_error
212 {
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
218 bad_imm4,
219 old_gcc_only,
220 unsupported_with_intel_mnemonic,
221 unsupported_syntax,
222 unsupported,
223 invalid_vsib_address,
224 invalid_vector_register_set,
225 unsupported_vector_index_register
226 };
227
228struct _i386_insn
229 {
230 /* TM holds the template for the insn were currently assembling. */
231 insn_template tm;
232
233 /* SUFFIX holds the instruction size suffix for byte, word, dword
234 or qword, if given. */
235 char suffix;
236
237 /* OPERANDS gives the number of given operands. */
238 unsigned int operands;
239
240 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
241 of given register, displacement, memory operands and immediate
242 operands. */
243 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
244
245 /* TYPES [i] is the type (see above #defines) which tells us how to
246 use OP[i] for the corresponding operand. */
247 i386_operand_type types[MAX_OPERANDS];
248
249 /* Displacement expression, immediate expression, or register for each
250 operand. */
251 union i386_op op[MAX_OPERANDS];
252
253 /* Flags for operands. */
254 unsigned int flags[MAX_OPERANDS];
255#define Operand_PCrel 1
256
257 /* Relocation type for operand */
258 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
259
260 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
261 the base index byte below. */
262 const reg_entry *base_reg;
263 const reg_entry *index_reg;
264 unsigned int log2_scale_factor;
265
266 /* SEG gives the seg_entries of this insn. They are zero unless
267 explicit segment overrides are given. */
268 const seg_entry *seg[2];
269
270 /* PREFIX holds all the given prefix opcodes (usually null).
271 PREFIXES is the number of prefix opcodes. */
272 unsigned int prefixes;
273 unsigned char prefix[MAX_PREFIXES];
274
275 /* RM and SIB are the modrm byte and the sib byte where the
276 addressing modes of this insn are encoded. */
277 modrm_byte rm;
278 rex_byte rex;
279 sib_byte sib;
280 vex_prefix vex;
281
282 /* Swap operand in encoding. */
283 unsigned int swap_operand;
284
285 /* Prefer 8bit or 32bit displacement in encoding. */
286 enum
287 {
288 disp_encoding_default = 0,
289 disp_encoding_8bit,
290 disp_encoding_32bit
291 } disp_encoding;
292
293 /* REP prefix. */
294 const char *rep_prefix;
295
296 /* Have HLE prefix. */
297 unsigned int have_hle;
298
299 /* Error message. */
300 enum i386_error error;
301 };
302
303typedef struct _i386_insn i386_insn;
304
305/* List of chars besides those in app.c:symbol_chars that can start an
306 operand. Used to prevent the scrubber eating vital white-space. */
307const char extra_symbol_chars[] = "*%-(["
308#ifdef LEX_AT
309 "@"
310#endif
311#ifdef LEX_QM
312 "?"
313#endif
314 ;
315
316#if (defined (TE_I386AIX) \
317 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
318 && !defined (TE_GNU) \
319 && !defined (TE_LINUX) \
320 && !defined (TE_NACL) \
321 && !defined (TE_NETWARE) \
322 && !defined (TE_FreeBSD) \
323 && !defined (TE_DragonFly) \
324 && !defined (TE_NetBSD)))
325/* This array holds the chars that always start a comment. If the
326 pre-processor is disabled, these aren't very useful. The option
327 --divide will remove '/' from this list. */
328const char *i386_comment_chars = "#/";
329#define SVR4_COMMENT_CHARS 1
330#define PREFIX_SEPARATOR '\\'
331
332#else
333const char *i386_comment_chars = "#";
334#define PREFIX_SEPARATOR '/'
335#endif
336
337/* This array holds the chars that only start a comment at the beginning of
338 a line. If the line seems to have the form '# 123 filename'
339 .line and .file directives will appear in the pre-processed output.
340 Note that input_file.c hand checks for '#' at the beginning of the
341 first line of the input file. This is because the compiler outputs
342 #NO_APP at the beginning of its output.
343 Also note that comments started like this one will always work if
344 '/' isn't otherwise defined. */
345const char line_comment_chars[] = "#/";
346
347const char line_separator_chars[] = ";";
348
349/* Chars that can be used to separate mant from exp in floating point
350 nums. */
351const char EXP_CHARS[] = "eE";
352
353/* Chars that mean this number is a floating point constant
354 As in 0f12.456
355 or 0d1.2345e12. */
356const char FLT_CHARS[] = "fFdDxX";
357
358/* Tables for lexical analysis. */
359static char mnemonic_chars[256];
360static char register_chars[256];
361static char operand_chars[256];
362static char identifier_chars[256];
363static char digit_chars[256];
364
365/* Lexical macros. */
366#define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
367#define is_operand_char(x) (operand_chars[(unsigned char) x])
368#define is_register_char(x) (register_chars[(unsigned char) x])
369#define is_space_char(x) ((x) == ' ')
370#define is_identifier_char(x) (identifier_chars[(unsigned char) x])
371#define is_digit_char(x) (digit_chars[(unsigned char) x])
372
373/* All non-digit non-letter characters that may occur in an operand. */
374static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
375
376/* md_assemble() always leaves the strings it's passed unaltered. To
377 effect this we maintain a stack of saved characters that we've smashed
378 with '\0's (indicating end of strings for various sub-fields of the
379 assembler instruction). */
380static char save_stack[32];
381static char *save_stack_p;
382#define END_STRING_AND_SAVE(s) \
383 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
384#define RESTORE_END_STRING(s) \
385 do { *(s) = *--save_stack_p; } while (0)
386
387/* The instruction we're assembling. */
388static i386_insn i;
389
390/* Possible templates for current insn. */
391static const templates *current_templates;
392
393/* Per instruction expressionS buffers: max displacements & immediates. */
394static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
395static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
396
397/* Current operand we are working on. */
398static int this_operand = -1;
399
400/* We support four different modes. FLAG_CODE variable is used to distinguish
401 these. */
402
403enum flag_code {
404 CODE_32BIT,
405 CODE_16BIT,
406 CODE_64BIT };
407
408static enum flag_code flag_code;
409static unsigned int object_64bit;
410static unsigned int disallow_64bit_reloc;
411static int use_rela_relocations = 0;
412
413#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
414 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
415 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
416
417/* The ELF ABI to use. */
418enum x86_elf_abi
419{
420 I386_ABI,
421 X86_64_ABI,
422 X86_64_X32_ABI
423};
424
425static enum x86_elf_abi x86_elf_abi = I386_ABI;
426#endif
427
428/* The names used to print error messages. */
429static const char *flag_code_names[] =
430 {
431 "32",
432 "16",
433 "64"
434 };
435
436/* 1 for intel syntax,
437 0 if att syntax. */
438static int intel_syntax = 0;
439
440/* 1 for intel mnemonic,
441 0 if att mnemonic. */
442static int intel_mnemonic = !SYSV386_COMPAT;
443
444/* 1 if support old (<= 2.8.1) versions of gcc. */
445static int old_gcc = OLDGCC_COMPAT;
446
447/* 1 if pseudo registers are permitted. */
448static int allow_pseudo_reg = 0;
449
450/* 1 if register prefix % not required. */
451static int allow_naked_reg = 0;
452
453/* 1 if pseudo index register, eiz/riz, is allowed . */
454static int allow_index_reg = 0;
455
456static enum check_kind
457 {
458 check_none = 0,
459 check_warning,
460 check_error
461 }
462sse_check, operand_check = check_warning;
463
464/* Register prefix used for error message. */
465static const char *register_prefix = "%";
466
467/* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
468 leave, push, and pop instructions so that gcc has the same stack
469 frame as in 32 bit mode. */
470static char stackop_size = '\0';
471
472/* Non-zero to optimize code alignment. */
473int optimize_align_code = 1;
474
475/* Non-zero to quieten some warnings. */
476static int quiet_warnings = 0;
477
478/* CPU name. */
479static const char *cpu_arch_name = NULL;
480static char *cpu_sub_arch_name = NULL;
481
482/* CPU feature flags. */
483static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
484
485/* If we have selected a cpu we are generating instructions for. */
486static int cpu_arch_tune_set = 0;
487
488/* Cpu we are generating instructions for. */
489enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
490
491/* CPU feature flags of cpu we are generating instructions for. */
492static i386_cpu_flags cpu_arch_tune_flags;
493
494/* CPU instruction set architecture used. */
495enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
496
497/* CPU feature flags of instruction set architecture used. */
498i386_cpu_flags cpu_arch_isa_flags;
499
500/* If set, conditional jumps are not automatically promoted to handle
501 larger than a byte offset. */
502static unsigned int no_cond_jump_promotion = 0;
503
504/* Encode SSE instructions with VEX prefix. */
505static unsigned int sse2avx;
506
507/* Encode scalar AVX instructions with specific vector length. */
508static enum
509 {
510 vex128 = 0,
511 vex256
512 } avxscalar;
513
514/* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
515static symbolS *GOT_symbol;
516
517/* The dwarf2 return column, adjusted for 32 or 64 bit. */
518unsigned int x86_dwarf2_return_column;
519
520/* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
521int x86_cie_data_alignment;
522
523/* Interface to relax_segment.
524 There are 3 major relax states for 386 jump insns because the
525 different types of jumps add different sizes to frags when we're
526 figuring out what sort of jump to choose to reach a given label. */
527
528/* Types. */
529#define UNCOND_JUMP 0
530#define COND_JUMP 1
531#define COND_JUMP86 2
532
533/* Sizes. */
534#define CODE16 1
535#define SMALL 0
536#define SMALL16 (SMALL | CODE16)
537#define BIG 2
538#define BIG16 (BIG | CODE16)
539
540#ifndef INLINE
541#ifdef __GNUC__
542#define INLINE __inline__
543#else
544#define INLINE
545#endif
546#endif
547
548#define ENCODE_RELAX_STATE(type, size) \
549 ((relax_substateT) (((type) << 2) | (size)))
550#define TYPE_FROM_RELAX_STATE(s) \
551 ((s) >> 2)
552#define DISP_SIZE_FROM_RELAX_STATE(s) \
553 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
554
555/* This table is used by relax_frag to promote short jumps to long
556 ones where necessary. SMALL (short) jumps may be promoted to BIG
557 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
558 don't allow a short jump in a 32 bit code segment to be promoted to
559 a 16 bit offset jump because it's slower (requires data size
560 prefix), and doesn't work, unless the destination is in the bottom
561 64k of the code segment (The top 16 bits of eip are zeroed). */
562
563const relax_typeS md_relax_table[] =
564{
565 /* The fields are:
566 1) most positive reach of this state,
567 2) most negative reach of this state,
568 3) how many bytes this mode will have in the variable part of the frag
569 4) which index into the table to try if we can't fit into this one. */
570
571 /* UNCOND_JUMP states. */
572 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
573 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
574 /* dword jmp adds 4 bytes to frag:
575 0 extra opcode bytes, 4 displacement bytes. */
576 {0, 0, 4, 0},
577 /* word jmp adds 2 byte2 to frag:
578 0 extra opcode bytes, 2 displacement bytes. */
579 {0, 0, 2, 0},
580
581 /* COND_JUMP states. */
582 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
583 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
584 /* dword conditionals adds 5 bytes to frag:
585 1 extra opcode byte, 4 displacement bytes. */
586 {0, 0, 5, 0},
587 /* word conditionals add 3 bytes to frag:
588 1 extra opcode byte, 2 displacement bytes. */
589 {0, 0, 3, 0},
590
591 /* COND_JUMP86 states. */
592 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
593 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
594 /* dword conditionals adds 5 bytes to frag:
595 1 extra opcode byte, 4 displacement bytes. */
596 {0, 0, 5, 0},
597 /* word conditionals add 4 bytes to frag:
598 1 displacement byte and a 3 byte long branch insn. */
599 {0, 0, 4, 0}
600};
601
602static const arch_entry cpu_arch[] =
603{
604 /* Do not replace the first two entries - i386_target_format()
605 relies on them being there in this order. */
606 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
607 CPU_GENERIC32_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
609 CPU_GENERIC64_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
611 CPU_NONE_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
613 CPU_I186_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
615 CPU_I286_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
617 CPU_I386_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
619 CPU_I486_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
621 CPU_I586_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
623 CPU_I686_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
625 CPU_I586_FLAGS, 0, 0 },
626 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
627 CPU_PENTIUMPRO_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
629 CPU_P2_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
631 CPU_P3_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
633 CPU_P4_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
635 CPU_CORE_FLAGS, 0, 0 },
636 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
637 CPU_NOCONA_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
639 CPU_CORE_FLAGS, 1, 0 },
640 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
641 CPU_CORE_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
643 CPU_CORE2_FLAGS, 1, 0 },
644 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
645 CPU_CORE2_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
647 CPU_COREI7_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
649 CPU_L1OM_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
651 CPU_K1OM_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
653 CPU_K6_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
655 CPU_K6_2_FLAGS, 0, 0 },
656 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
657 CPU_ATHLON_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
659 CPU_K8_FLAGS, 1, 0 },
660 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
661 CPU_K8_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
663 CPU_K8_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
665 CPU_AMDFAM10_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
667 CPU_BDVER1_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
669 CPU_BDVER2_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
671 CPU_BDVER3_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
673 CPU_BTVER1_FLAGS, 0, 0 },
674 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
675 CPU_BTVER2_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
677 CPU_8087_FLAGS, 0, 0 },
678 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
679 CPU_287_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
681 CPU_387_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
683 CPU_ANY87_FLAGS, 0, 1 },
684 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
685 CPU_MMX_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
687 CPU_3DNOWA_FLAGS, 0, 1 },
688 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
689 CPU_SSE_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
691 CPU_SSE2_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
693 CPU_SSE3_FLAGS, 0, 0 },
694 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
695 CPU_SSSE3_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
697 CPU_SSE4_1_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
699 CPU_SSE4_2_FLAGS, 0, 0 },
700 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
701 CPU_SSE4_2_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
703 CPU_ANY_SSE_FLAGS, 0, 1 },
704 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
705 CPU_AVX_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
707 CPU_AVX2_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
709 CPU_ANY_AVX_FLAGS, 0, 1 },
710 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
711 CPU_VMX_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
713 CPU_VMFUNC_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
715 CPU_SMX_FLAGS, 0, 0 },
716 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
717 CPU_XSAVE_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
719 CPU_XSAVEOPT_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
721 CPU_AES_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
723 CPU_PCLMUL_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
725 CPU_PCLMUL_FLAGS, 1, 0 },
726 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
727 CPU_FSGSBASE_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
729 CPU_RDRND_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
731 CPU_F16C_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
733 CPU_BMI2_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
735 CPU_FMA_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
737 CPU_FMA4_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
739 CPU_XOP_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
741 CPU_LWP_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
743 CPU_MOVBE_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
745 CPU_CX16_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
747 CPU_EPT_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
749 CPU_LZCNT_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
751 CPU_HLE_FLAGS, 0, 0 },
752 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
753 CPU_RTM_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
755 CPU_INVPCID_FLAGS, 0, 0 },
756 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
757 CPU_CLFLUSH_FLAGS, 0, 0 },
758 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
759 CPU_NOP_FLAGS, 0, 0 },
760 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
761 CPU_SYSCALL_FLAGS, 0, 0 },
762 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
763 CPU_RDTSCP_FLAGS, 0, 0 },
764 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
765 CPU_3DNOW_FLAGS, 0, 0 },
766 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
767 CPU_3DNOWA_FLAGS, 0, 0 },
768 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
769 CPU_PADLOCK_FLAGS, 0, 0 },
770 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
771 CPU_SVME_FLAGS, 1, 0 },
772 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
773 CPU_SVME_FLAGS, 0, 0 },
774 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
775 CPU_SSE4A_FLAGS, 0, 0 },
776 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
777 CPU_ABM_FLAGS, 0, 0 },
778 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
779 CPU_BMI_FLAGS, 0, 0 },
780 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
781 CPU_TBM_FLAGS, 0, 0 },
782 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
783 CPU_ADX_FLAGS, 0, 0 },
784 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
785 CPU_RDSEED_FLAGS, 0, 0 },
786 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
787 CPU_PRFCHW_FLAGS, 0, 0 },
788 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
789 CPU_SMAP_FLAGS, 0, 0 },
790};
791
792#ifdef I386COFF
793/* Like s_lcomm_internal in gas/read.c but the alignment string
794 is allowed to be optional. */
795
796static symbolS *
797pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
798{
799 addressT align = 0;
800
801 SKIP_WHITESPACE ();
802
803 if (needs_align
804 && *input_line_pointer == ',')
805 {
806 align = parse_align (needs_align - 1);
807
808 if (align == (addressT) -1)
809 return NULL;
810 }
811 else
812 {
813 if (size >= 8)
814 align = 3;
815 else if (size >= 4)
816 align = 2;
817 else if (size >= 2)
818 align = 1;
819 else
820 align = 0;
821 }
822
823 bss_alloc (symbolP, size, align);
824 return symbolP;
825}
826
827static void
828pe_lcomm (int needs_align)
829{
830 s_comm_internal (needs_align * 2, pe_lcomm_internal);
831}
832#endif
833
834const pseudo_typeS md_pseudo_table[] =
835{
836#if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
837 {"align", s_align_bytes, 0},
838#else
839 {"align", s_align_ptwo, 0},
840#endif
841 {"arch", set_cpu_arch, 0},
842#ifndef I386COFF
843 {"bss", s_bss, 0},
844#else
845 {"lcomm", pe_lcomm, 1},
846#endif
847 {"ffloat", float_cons, 'f'},
848 {"dfloat", float_cons, 'd'},
849 {"tfloat", float_cons, 'x'},
850 {"value", cons, 2},
851 {"slong", signed_cons, 4},
852 {"noopt", s_ignore, 0},
853 {"optim", s_ignore, 0},
854 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
855 {"code16", set_code_flag, CODE_16BIT},
856 {"code32", set_code_flag, CODE_32BIT},
857 {"code64", set_code_flag, CODE_64BIT},
858 {"intel_syntax", set_intel_syntax, 1},
859 {"att_syntax", set_intel_syntax, 0},
860 {"intel_mnemonic", set_intel_mnemonic, 1},
861 {"att_mnemonic", set_intel_mnemonic, 0},
862 {"allow_index_reg", set_allow_index_reg, 1},
863 {"disallow_index_reg", set_allow_index_reg, 0},
864 {"sse_check", set_check, 0},
865 {"operand_check", set_check, 1},
866#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
867 {"largecomm", handle_large_common, 0},
868#else
869 {"file", (void (*) (int)) dwarf2_directive_file, 0},
870 {"loc", dwarf2_directive_loc, 0},
871 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
872#endif
873#ifdef TE_PE
874 {"secrel32", pe_directive_secrel, 0},
875#endif
876 {0, 0, 0}
877};
878
879/* For interface with expression (). */
880extern char *input_line_pointer;
881
882/* Hash table for instruction mnemonic lookup. */
883static struct hash_control *op_hash;
884
885/* Hash table for register lookup. */
886static struct hash_control *reg_hash;
887\f
888void
889i386_align_code (fragS *fragP, int count)
890{
891 /* Various efficient no-op patterns for aligning code labels.
892 Note: Don't try to assemble the instructions in the comments.
893 0L and 0w are not legal. */
894 static const char f32_1[] =
895 {0x90}; /* nop */
896 static const char f32_2[] =
897 {0x66,0x90}; /* xchg %ax,%ax */
898 static const char f32_3[] =
899 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
900 static const char f32_4[] =
901 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
902 static const char f32_5[] =
903 {0x90, /* nop */
904 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
905 static const char f32_6[] =
906 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
907 static const char f32_7[] =
908 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
909 static const char f32_8[] =
910 {0x90, /* nop */
911 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
912 static const char f32_9[] =
913 {0x89,0xf6, /* movl %esi,%esi */
914 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
915 static const char f32_10[] =
916 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
917 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
918 static const char f32_11[] =
919 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
920 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
921 static const char f32_12[] =
922 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
923 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
924 static const char f32_13[] =
925 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
926 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
927 static const char f32_14[] =
928 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
929 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
930 static const char f16_3[] =
931 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
932 static const char f16_4[] =
933 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
934 static const char f16_5[] =
935 {0x90, /* nop */
936 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
937 static const char f16_6[] =
938 {0x89,0xf6, /* mov %si,%si */
939 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
940 static const char f16_7[] =
941 {0x8d,0x74,0x00, /* lea 0(%si),%si */
942 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
943 static const char f16_8[] =
944 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
945 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
946 static const char jump_31[] =
947 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
948 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
949 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
950 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
951 static const char *const f32_patt[] = {
952 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
953 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
954 };
955 static const char *const f16_patt[] = {
956 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
957 };
958 /* nopl (%[re]ax) */
959 static const char alt_3[] =
960 {0x0f,0x1f,0x00};
961 /* nopl 0(%[re]ax) */
962 static const char alt_4[] =
963 {0x0f,0x1f,0x40,0x00};
964 /* nopl 0(%[re]ax,%[re]ax,1) */
965 static const char alt_5[] =
966 {0x0f,0x1f,0x44,0x00,0x00};
967 /* nopw 0(%[re]ax,%[re]ax,1) */
968 static const char alt_6[] =
969 {0x66,0x0f,0x1f,0x44,0x00,0x00};
970 /* nopl 0L(%[re]ax) */
971 static const char alt_7[] =
972 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
973 /* nopl 0L(%[re]ax,%[re]ax,1) */
974 static const char alt_8[] =
975 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
976 /* nopw 0L(%[re]ax,%[re]ax,1) */
977 static const char alt_9[] =
978 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
979 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
980 static const char alt_10[] =
981 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
982 /* data16
983 nopw %cs:0L(%[re]ax,%[re]ax,1) */
984 static const char alt_long_11[] =
985 {0x66,
986 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
987 /* data16
988 data16
989 nopw %cs:0L(%[re]ax,%[re]ax,1) */
990 static const char alt_long_12[] =
991 {0x66,
992 0x66,
993 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
994 /* data16
995 data16
996 data16
997 nopw %cs:0L(%[re]ax,%[re]ax,1) */
998 static const char alt_long_13[] =
999 {0x66,
1000 0x66,
1001 0x66,
1002 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1003 /* data16
1004 data16
1005 data16
1006 data16
1007 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1008 static const char alt_long_14[] =
1009 {0x66,
1010 0x66,
1011 0x66,
1012 0x66,
1013 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1014 /* data16
1015 data16
1016 data16
1017 data16
1018 data16
1019 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1020 static const char alt_long_15[] =
1021 {0x66,
1022 0x66,
1023 0x66,
1024 0x66,
1025 0x66,
1026 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1027 /* nopl 0(%[re]ax,%[re]ax,1)
1028 nopw 0(%[re]ax,%[re]ax,1) */
1029 static const char alt_short_11[] =
1030 {0x0f,0x1f,0x44,0x00,0x00,
1031 0x66,0x0f,0x1f,0x44,0x00,0x00};
1032 /* nopw 0(%[re]ax,%[re]ax,1)
1033 nopw 0(%[re]ax,%[re]ax,1) */
1034 static const char alt_short_12[] =
1035 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1036 0x66,0x0f,0x1f,0x44,0x00,0x00};
1037 /* nopw 0(%[re]ax,%[re]ax,1)
1038 nopl 0L(%[re]ax) */
1039 static const char alt_short_13[] =
1040 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1041 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1042 /* nopl 0L(%[re]ax)
1043 nopl 0L(%[re]ax) */
1044 static const char alt_short_14[] =
1045 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1046 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1047 /* nopl 0L(%[re]ax)
1048 nopl 0L(%[re]ax,%[re]ax,1) */
1049 static const char alt_short_15[] =
1050 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1051 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1052 static const char *const alt_short_patt[] = {
1053 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1054 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1055 alt_short_14, alt_short_15
1056 };
1057 static const char *const alt_long_patt[] = {
1058 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1059 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1060 alt_long_14, alt_long_15
1061 };
1062
1063 /* Only align for at least a positive non-zero boundary. */
1064 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1065 return;
1066
1067 /* We need to decide which NOP sequence to use for 32bit and
1068 64bit. When -mtune= is used:
1069
1070 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1071 PROCESSOR_GENERIC32, f32_patt will be used.
1072 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1073 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1074 PROCESSOR_GENERIC64, alt_long_patt will be used.
1075 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1076 PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
1077 will be used.
1078
1079 When -mtune= isn't used, alt_long_patt will be used if
1080 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1081 be used.
1082
1083 When -march= or .arch is used, we can't use anything beyond
1084 cpu_arch_isa_flags. */
1085
1086 if (flag_code == CODE_16BIT)
1087 {
1088 if (count > 8)
1089 {
1090 memcpy (fragP->fr_literal + fragP->fr_fix,
1091 jump_31, count);
1092 /* Adjust jump offset. */
1093 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1094 }
1095 else
1096 memcpy (fragP->fr_literal + fragP->fr_fix,
1097 f16_patt[count - 1], count);
1098 }
1099 else
1100 {
1101 const char *const *patt = NULL;
1102
1103 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1104 {
1105 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1106 switch (cpu_arch_tune)
1107 {
1108 case PROCESSOR_UNKNOWN:
1109 /* We use cpu_arch_isa_flags to check if we SHOULD
1110 optimize with nops. */
1111 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1112 patt = alt_long_patt;
1113 else
1114 patt = f32_patt;
1115 break;
1116 case PROCESSOR_PENTIUM4:
1117 case PROCESSOR_NOCONA:
1118 case PROCESSOR_CORE:
1119 case PROCESSOR_CORE2:
1120 case PROCESSOR_COREI7:
1121 case PROCESSOR_L1OM:
1122 case PROCESSOR_K1OM:
1123 case PROCESSOR_GENERIC64:
1124 patt = alt_long_patt;
1125 break;
1126 case PROCESSOR_K6:
1127 case PROCESSOR_ATHLON:
1128 case PROCESSOR_K8:
1129 case PROCESSOR_AMDFAM10:
1130 case PROCESSOR_BD:
1131 case PROCESSOR_BT:
1132 patt = alt_short_patt;
1133 break;
1134 case PROCESSOR_I386:
1135 case PROCESSOR_I486:
1136 case PROCESSOR_PENTIUM:
1137 case PROCESSOR_PENTIUMPRO:
1138 case PROCESSOR_GENERIC32:
1139 patt = f32_patt;
1140 break;
1141 }
1142 }
1143 else
1144 {
1145 switch (fragP->tc_frag_data.tune)
1146 {
1147 case PROCESSOR_UNKNOWN:
1148 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1149 PROCESSOR_UNKNOWN. */
1150 abort ();
1151 break;
1152
1153 case PROCESSOR_I386:
1154 case PROCESSOR_I486:
1155 case PROCESSOR_PENTIUM:
1156 case PROCESSOR_K6:
1157 case PROCESSOR_ATHLON:
1158 case PROCESSOR_K8:
1159 case PROCESSOR_AMDFAM10:
1160 case PROCESSOR_BD:
1161 case PROCESSOR_BT:
1162 case PROCESSOR_GENERIC32:
1163 /* We use cpu_arch_isa_flags to check if we CAN optimize
1164 with nops. */
1165 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1166 patt = alt_short_patt;
1167 else
1168 patt = f32_patt;
1169 break;
1170 case PROCESSOR_PENTIUMPRO:
1171 case PROCESSOR_PENTIUM4:
1172 case PROCESSOR_NOCONA:
1173 case PROCESSOR_CORE:
1174 case PROCESSOR_CORE2:
1175 case PROCESSOR_COREI7:
1176 case PROCESSOR_L1OM:
1177 case PROCESSOR_K1OM:
1178 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1179 patt = alt_long_patt;
1180 else
1181 patt = f32_patt;
1182 break;
1183 case PROCESSOR_GENERIC64:
1184 patt = alt_long_patt;
1185 break;
1186 }
1187 }
1188
1189 if (patt == f32_patt)
1190 {
1191 /* If the padding is less than 15 bytes, we use the normal
1192 ones. Otherwise, we use a jump instruction and adjust
1193 its offset. */
1194 int limit;
1195
1196 /* For 64bit, the limit is 3 bytes. */
1197 if (flag_code == CODE_64BIT
1198 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1199 limit = 3;
1200 else
1201 limit = 15;
1202 if (count < limit)
1203 memcpy (fragP->fr_literal + fragP->fr_fix,
1204 patt[count - 1], count);
1205 else
1206 {
1207 memcpy (fragP->fr_literal + fragP->fr_fix,
1208 jump_31, count);
1209 /* Adjust jump offset. */
1210 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1211 }
1212 }
1213 else
1214 {
1215 /* Maximum length of an instruction is 15 byte. If the
1216 padding is greater than 15 bytes and we don't use jump,
1217 we have to break it into smaller pieces. */
1218 int padding = count;
1219 while (padding > 15)
1220 {
1221 padding -= 15;
1222 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1223 patt [14], 15);
1224 }
1225
1226 if (padding)
1227 memcpy (fragP->fr_literal + fragP->fr_fix,
1228 patt [padding - 1], padding);
1229 }
1230 }
1231 fragP->fr_var = count;
1232}
1233
1234static INLINE int
1235operand_type_all_zero (const union i386_operand_type *x)
1236{
1237 switch (ARRAY_SIZE(x->array))
1238 {
1239 case 3:
1240 if (x->array[2])
1241 return 0;
1242 case 2:
1243 if (x->array[1])
1244 return 0;
1245 case 1:
1246 return !x->array[0];
1247 default:
1248 abort ();
1249 }
1250}
1251
1252static INLINE void
1253operand_type_set (union i386_operand_type *x, unsigned int v)
1254{
1255 switch (ARRAY_SIZE(x->array))
1256 {
1257 case 3:
1258 x->array[2] = v;
1259 case 2:
1260 x->array[1] = v;
1261 case 1:
1262 x->array[0] = v;
1263 break;
1264 default:
1265 abort ();
1266 }
1267}
1268
1269static INLINE int
1270operand_type_equal (const union i386_operand_type *x,
1271 const union i386_operand_type *y)
1272{
1273 switch (ARRAY_SIZE(x->array))
1274 {
1275 case 3:
1276 if (x->array[2] != y->array[2])
1277 return 0;
1278 case 2:
1279 if (x->array[1] != y->array[1])
1280 return 0;
1281 case 1:
1282 return x->array[0] == y->array[0];
1283 break;
1284 default:
1285 abort ();
1286 }
1287}
1288
1289static INLINE int
1290cpu_flags_all_zero (const union i386_cpu_flags *x)
1291{
1292 switch (ARRAY_SIZE(x->array))
1293 {
1294 case 3:
1295 if (x->array[2])
1296 return 0;
1297 case 2:
1298 if (x->array[1])
1299 return 0;
1300 case 1:
1301 return !x->array[0];
1302 default:
1303 abort ();
1304 }
1305}
1306
1307static INLINE void
1308cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1309{
1310 switch (ARRAY_SIZE(x->array))
1311 {
1312 case 3:
1313 x->array[2] = v;
1314 case 2:
1315 x->array[1] = v;
1316 case 1:
1317 x->array[0] = v;
1318 break;
1319 default:
1320 abort ();
1321 }
1322}
1323
1324static INLINE int
1325cpu_flags_equal (const union i386_cpu_flags *x,
1326 const union i386_cpu_flags *y)
1327{
1328 switch (ARRAY_SIZE(x->array))
1329 {
1330 case 3:
1331 if (x->array[2] != y->array[2])
1332 return 0;
1333 case 2:
1334 if (x->array[1] != y->array[1])
1335 return 0;
1336 case 1:
1337 return x->array[0] == y->array[0];
1338 break;
1339 default:
1340 abort ();
1341 }
1342}
1343
1344static INLINE int
1345cpu_flags_check_cpu64 (i386_cpu_flags f)
1346{
1347 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1348 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1349}
1350
1351static INLINE i386_cpu_flags
1352cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1353{
1354 switch (ARRAY_SIZE (x.array))
1355 {
1356 case 3:
1357 x.array [2] &= y.array [2];
1358 case 2:
1359 x.array [1] &= y.array [1];
1360 case 1:
1361 x.array [0] &= y.array [0];
1362 break;
1363 default:
1364 abort ();
1365 }
1366 return x;
1367}
1368
1369static INLINE i386_cpu_flags
1370cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1371{
1372 switch (ARRAY_SIZE (x.array))
1373 {
1374 case 3:
1375 x.array [2] |= y.array [2];
1376 case 2:
1377 x.array [1] |= y.array [1];
1378 case 1:
1379 x.array [0] |= y.array [0];
1380 break;
1381 default:
1382 abort ();
1383 }
1384 return x;
1385}
1386
1387static INLINE i386_cpu_flags
1388cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1389{
1390 switch (ARRAY_SIZE (x.array))
1391 {
1392 case 3:
1393 x.array [2] &= ~y.array [2];
1394 case 2:
1395 x.array [1] &= ~y.array [1];
1396 case 1:
1397 x.array [0] &= ~y.array [0];
1398 break;
1399 default:
1400 abort ();
1401 }
1402 return x;
1403}
1404
1405#define CPU_FLAGS_ARCH_MATCH 0x1
1406#define CPU_FLAGS_64BIT_MATCH 0x2
1407#define CPU_FLAGS_AES_MATCH 0x4
1408#define CPU_FLAGS_PCLMUL_MATCH 0x8
1409#define CPU_FLAGS_AVX_MATCH 0x10
1410
1411#define CPU_FLAGS_32BIT_MATCH \
1412 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1413 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1414#define CPU_FLAGS_PERFECT_MATCH \
1415 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1416
1417/* Return CPU flags match bits. */
1418
1419static int
1420cpu_flags_match (const insn_template *t)
1421{
1422 i386_cpu_flags x = t->cpu_flags;
1423 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1424
1425 x.bitfield.cpu64 = 0;
1426 x.bitfield.cpuno64 = 0;
1427
1428 if (cpu_flags_all_zero (&x))
1429 {
1430 /* This instruction is available on all archs. */
1431 match |= CPU_FLAGS_32BIT_MATCH;
1432 }
1433 else
1434 {
1435 /* This instruction is available only on some archs. */
1436 i386_cpu_flags cpu = cpu_arch_flags;
1437
1438 cpu.bitfield.cpu64 = 0;
1439 cpu.bitfield.cpuno64 = 0;
1440 cpu = cpu_flags_and (x, cpu);
1441 if (!cpu_flags_all_zero (&cpu))
1442 {
1443 if (x.bitfield.cpuavx)
1444 {
1445 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1446 if (cpu.bitfield.cpuavx)
1447 {
1448 /* Check SSE2AVX. */
1449 if (!t->opcode_modifier.sse2avx|| sse2avx)
1450 {
1451 match |= (CPU_FLAGS_ARCH_MATCH
1452 | CPU_FLAGS_AVX_MATCH);
1453 /* Check AES. */
1454 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1455 match |= CPU_FLAGS_AES_MATCH;
1456 /* Check PCLMUL. */
1457 if (!x.bitfield.cpupclmul
1458 || cpu.bitfield.cpupclmul)
1459 match |= CPU_FLAGS_PCLMUL_MATCH;
1460 }
1461 }
1462 else
1463 match |= CPU_FLAGS_ARCH_MATCH;
1464 }
1465 else
1466 match |= CPU_FLAGS_32BIT_MATCH;
1467 }
1468 }
1469 return match;
1470}
1471
1472static INLINE i386_operand_type
1473operand_type_and (i386_operand_type x, i386_operand_type y)
1474{
1475 switch (ARRAY_SIZE (x.array))
1476 {
1477 case 3:
1478 x.array [2] &= y.array [2];
1479 case 2:
1480 x.array [1] &= y.array [1];
1481 case 1:
1482 x.array [0] &= y.array [0];
1483 break;
1484 default:
1485 abort ();
1486 }
1487 return x;
1488}
1489
1490static INLINE i386_operand_type
1491operand_type_or (i386_operand_type x, i386_operand_type y)
1492{
1493 switch (ARRAY_SIZE (x.array))
1494 {
1495 case 3:
1496 x.array [2] |= y.array [2];
1497 case 2:
1498 x.array [1] |= y.array [1];
1499 case 1:
1500 x.array [0] |= y.array [0];
1501 break;
1502 default:
1503 abort ();
1504 }
1505 return x;
1506}
1507
1508static INLINE i386_operand_type
1509operand_type_xor (i386_operand_type x, i386_operand_type y)
1510{
1511 switch (ARRAY_SIZE (x.array))
1512 {
1513 case 3:
1514 x.array [2] ^= y.array [2];
1515 case 2:
1516 x.array [1] ^= y.array [1];
1517 case 1:
1518 x.array [0] ^= y.array [0];
1519 break;
1520 default:
1521 abort ();
1522 }
1523 return x;
1524}
1525
1526static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1527static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1528static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1529static const i386_operand_type inoutportreg
1530 = OPERAND_TYPE_INOUTPORTREG;
1531static const i386_operand_type reg16_inoutportreg
1532 = OPERAND_TYPE_REG16_INOUTPORTREG;
1533static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1534static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1535static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1536static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1537static const i386_operand_type anydisp
1538 = OPERAND_TYPE_ANYDISP;
1539static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1540static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1541static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1542static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1543static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1544static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1545static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1546static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1547static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1548static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1549static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1550static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1551
1552enum operand_type
1553{
1554 reg,
1555 imm,
1556 disp,
1557 anymem
1558};
1559
1560static INLINE int
1561operand_type_check (i386_operand_type t, enum operand_type c)
1562{
1563 switch (c)
1564 {
1565 case reg:
1566 return (t.bitfield.reg8
1567 || t.bitfield.reg16
1568 || t.bitfield.reg32
1569 || t.bitfield.reg64);
1570
1571 case imm:
1572 return (t.bitfield.imm8
1573 || t.bitfield.imm8s
1574 || t.bitfield.imm16
1575 || t.bitfield.imm32
1576 || t.bitfield.imm32s
1577 || t.bitfield.imm64);
1578
1579 case disp:
1580 return (t.bitfield.disp8
1581 || t.bitfield.disp16
1582 || t.bitfield.disp32
1583 || t.bitfield.disp32s
1584 || t.bitfield.disp64);
1585
1586 case anymem:
1587 return (t.bitfield.disp8
1588 || t.bitfield.disp16
1589 || t.bitfield.disp32
1590 || t.bitfield.disp32s
1591 || t.bitfield.disp64
1592 || t.bitfield.baseindex);
1593
1594 default:
1595 abort ();
1596 }
1597
1598 return 0;
1599}
1600
1601/* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1602 operand J for instruction template T. */
1603
1604static INLINE int
1605match_reg_size (const insn_template *t, unsigned int j)
1606{
1607 return !((i.types[j].bitfield.byte
1608 && !t->operand_types[j].bitfield.byte)
1609 || (i.types[j].bitfield.word
1610 && !t->operand_types[j].bitfield.word)
1611 || (i.types[j].bitfield.dword
1612 && !t->operand_types[j].bitfield.dword)
1613 || (i.types[j].bitfield.qword
1614 && !t->operand_types[j].bitfield.qword));
1615}
1616
1617/* Return 1 if there is no conflict in any size on operand J for
1618 instruction template T. */
1619
1620static INLINE int
1621match_mem_size (const insn_template *t, unsigned int j)
1622{
1623 return (match_reg_size (t, j)
1624 && !((i.types[j].bitfield.unspecified
1625 && !t->operand_types[j].bitfield.unspecified)
1626 || (i.types[j].bitfield.fword
1627 && !t->operand_types[j].bitfield.fword)
1628 || (i.types[j].bitfield.tbyte
1629 && !t->operand_types[j].bitfield.tbyte)
1630 || (i.types[j].bitfield.xmmword
1631 && !t->operand_types[j].bitfield.xmmword)
1632 || (i.types[j].bitfield.ymmword
1633 && !t->operand_types[j].bitfield.ymmword)));
1634}
1635
1636/* Return 1 if there is no size conflict on any operands for
1637 instruction template T. */
1638
1639static INLINE int
1640operand_size_match (const insn_template *t)
1641{
1642 unsigned int j;
1643 int match = 1;
1644
1645 /* Don't check jump instructions. */
1646 if (t->opcode_modifier.jump
1647 || t->opcode_modifier.jumpbyte
1648 || t->opcode_modifier.jumpdword
1649 || t->opcode_modifier.jumpintersegment)
1650 return match;
1651
1652 /* Check memory and accumulator operand size. */
1653 for (j = 0; j < i.operands; j++)
1654 {
1655 if (t->operand_types[j].bitfield.anysize)
1656 continue;
1657
1658 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1659 {
1660 match = 0;
1661 break;
1662 }
1663
1664 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1665 {
1666 match = 0;
1667 break;
1668 }
1669 }
1670
1671 if (match)
1672 return match;
1673 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1674 {
1675mismatch:
1676 i.error = operand_size_mismatch;
1677 return 0;
1678 }
1679
1680 /* Check reverse. */
1681 gas_assert (i.operands == 2);
1682
1683 match = 1;
1684 for (j = 0; j < 2; j++)
1685 {
1686 if (t->operand_types[j].bitfield.acc
1687 && !match_reg_size (t, j ? 0 : 1))
1688 goto mismatch;
1689
1690 if (i.types[j].bitfield.mem
1691 && !match_mem_size (t, j ? 0 : 1))
1692 goto mismatch;
1693 }
1694
1695 return match;
1696}
1697
1698static INLINE int
1699operand_type_match (i386_operand_type overlap,
1700 i386_operand_type given)
1701{
1702 i386_operand_type temp = overlap;
1703
1704 temp.bitfield.jumpabsolute = 0;
1705 temp.bitfield.unspecified = 0;
1706 temp.bitfield.byte = 0;
1707 temp.bitfield.word = 0;
1708 temp.bitfield.dword = 0;
1709 temp.bitfield.fword = 0;
1710 temp.bitfield.qword = 0;
1711 temp.bitfield.tbyte = 0;
1712 temp.bitfield.xmmword = 0;
1713 temp.bitfield.ymmword = 0;
1714 if (operand_type_all_zero (&temp))
1715 goto mismatch;
1716
1717 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1718 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1719 return 1;
1720
1721mismatch:
1722 i.error = operand_type_mismatch;
1723 return 0;
1724}
1725
1726/* If given types g0 and g1 are registers they must be of the same type
1727 unless the expected operand type register overlap is null.
1728 Note that Acc in a template matches every size of reg. */
1729
1730static INLINE int
1731operand_type_register_match (i386_operand_type m0,
1732 i386_operand_type g0,
1733 i386_operand_type t0,
1734 i386_operand_type m1,
1735 i386_operand_type g1,
1736 i386_operand_type t1)
1737{
1738 if (!operand_type_check (g0, reg))
1739 return 1;
1740
1741 if (!operand_type_check (g1, reg))
1742 return 1;
1743
1744 if (g0.bitfield.reg8 == g1.bitfield.reg8
1745 && g0.bitfield.reg16 == g1.bitfield.reg16
1746 && g0.bitfield.reg32 == g1.bitfield.reg32
1747 && g0.bitfield.reg64 == g1.bitfield.reg64)
1748 return 1;
1749
1750 if (m0.bitfield.acc)
1751 {
1752 t0.bitfield.reg8 = 1;
1753 t0.bitfield.reg16 = 1;
1754 t0.bitfield.reg32 = 1;
1755 t0.bitfield.reg64 = 1;
1756 }
1757
1758 if (m1.bitfield.acc)
1759 {
1760 t1.bitfield.reg8 = 1;
1761 t1.bitfield.reg16 = 1;
1762 t1.bitfield.reg32 = 1;
1763 t1.bitfield.reg64 = 1;
1764 }
1765
1766 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1767 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1768 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1769 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1770 return 1;
1771
1772 i.error = register_type_mismatch;
1773
1774 return 0;
1775}
1776
1777static INLINE unsigned int
1778register_number (const reg_entry *r)
1779{
1780 unsigned int nr = r->reg_num;
1781
1782 if (r->reg_flags & RegRex)
1783 nr += 8;
1784
1785 return nr;
1786}
1787
1788static INLINE unsigned int
1789mode_from_disp_size (i386_operand_type t)
1790{
1791 if (t.bitfield.disp8)
1792 return 1;
1793 else if (t.bitfield.disp16
1794 || t.bitfield.disp32
1795 || t.bitfield.disp32s)
1796 return 2;
1797 else
1798 return 0;
1799}
1800
1801static INLINE int
1802fits_in_signed_byte (offsetT num)
1803{
1804 return (num >= -128) && (num <= 127);
1805}
1806
1807static INLINE int
1808fits_in_unsigned_byte (offsetT num)
1809{
1810 return (num & 0xff) == num;
1811}
1812
1813static INLINE int
1814fits_in_unsigned_word (offsetT num)
1815{
1816 return (num & 0xffff) == num;
1817}
1818
1819static INLINE int
1820fits_in_signed_word (offsetT num)
1821{
1822 return (-32768 <= num) && (num <= 32767);
1823}
1824
1825static INLINE int
1826fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1827{
1828#ifndef BFD64
1829 return 1;
1830#else
1831 return (!(((offsetT) -1 << 31) & num)
1832 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1833#endif
1834} /* fits_in_signed_long() */
1835
1836static INLINE int
1837fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1838{
1839#ifndef BFD64
1840 return 1;
1841#else
1842 return (num & (((offsetT) 2 << 31) - 1)) == num;
1843#endif
1844} /* fits_in_unsigned_long() */
1845
1846static INLINE int
1847fits_in_imm4 (offsetT num)
1848{
1849 return (num & 0xf) == num;
1850}
1851
1852static i386_operand_type
1853smallest_imm_type (offsetT num)
1854{
1855 i386_operand_type t;
1856
1857 operand_type_set (&t, 0);
1858 t.bitfield.imm64 = 1;
1859
1860 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1861 {
1862 /* This code is disabled on the 486 because all the Imm1 forms
1863 in the opcode table are slower on the i486. They're the
1864 versions with the implicitly specified single-position
1865 displacement, which has another syntax if you really want to
1866 use that form. */
1867 t.bitfield.imm1 = 1;
1868 t.bitfield.imm8 = 1;
1869 t.bitfield.imm8s = 1;
1870 t.bitfield.imm16 = 1;
1871 t.bitfield.imm32 = 1;
1872 t.bitfield.imm32s = 1;
1873 }
1874 else if (fits_in_signed_byte (num))
1875 {
1876 t.bitfield.imm8 = 1;
1877 t.bitfield.imm8s = 1;
1878 t.bitfield.imm16 = 1;
1879 t.bitfield.imm32 = 1;
1880 t.bitfield.imm32s = 1;
1881 }
1882 else if (fits_in_unsigned_byte (num))
1883 {
1884 t.bitfield.imm8 = 1;
1885 t.bitfield.imm16 = 1;
1886 t.bitfield.imm32 = 1;
1887 t.bitfield.imm32s = 1;
1888 }
1889 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1890 {
1891 t.bitfield.imm16 = 1;
1892 t.bitfield.imm32 = 1;
1893 t.bitfield.imm32s = 1;
1894 }
1895 else if (fits_in_signed_long (num))
1896 {
1897 t.bitfield.imm32 = 1;
1898 t.bitfield.imm32s = 1;
1899 }
1900 else if (fits_in_unsigned_long (num))
1901 t.bitfield.imm32 = 1;
1902
1903 return t;
1904}
1905
1906static offsetT
1907offset_in_range (offsetT val, int size)
1908{
1909 addressT mask;
1910
1911 switch (size)
1912 {
1913 case 1: mask = ((addressT) 1 << 8) - 1; break;
1914 case 2: mask = ((addressT) 1 << 16) - 1; break;
1915 case 4: mask = ((addressT) 2 << 31) - 1; break;
1916#ifdef BFD64
1917 case 8: mask = ((addressT) 2 << 63) - 1; break;
1918#endif
1919 default: abort ();
1920 }
1921
1922#ifdef BFD64
1923 /* If BFD64, sign extend val for 32bit address mode. */
1924 if (flag_code != CODE_64BIT
1925 || i.prefix[ADDR_PREFIX])
1926 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1927 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1928#endif
1929
1930 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1931 {
1932 char buf1[40], buf2[40];
1933
1934 sprint_value (buf1, val);
1935 sprint_value (buf2, val & mask);
1936 as_warn (_("%s shortened to %s"), buf1, buf2);
1937 }
1938 return val & mask;
1939}
1940
1941enum PREFIX_GROUP
1942{
1943 PREFIX_EXIST = 0,
1944 PREFIX_LOCK,
1945 PREFIX_REP,
1946 PREFIX_OTHER
1947};
1948
1949/* Returns
1950 a. PREFIX_EXIST if attempting to add a prefix where one from the
1951 same class already exists.
1952 b. PREFIX_LOCK if lock prefix is added.
1953 c. PREFIX_REP if rep/repne prefix is added.
1954 d. PREFIX_OTHER if other prefix is added.
1955 */
1956
1957static enum PREFIX_GROUP
1958add_prefix (unsigned int prefix)
1959{
1960 enum PREFIX_GROUP ret = PREFIX_OTHER;
1961 unsigned int q;
1962
1963 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1964 && flag_code == CODE_64BIT)
1965 {
1966 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1967 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1968 && (prefix & (REX_R | REX_X | REX_B))))
1969 ret = PREFIX_EXIST;
1970 q = REX_PREFIX;
1971 }
1972 else
1973 {
1974 switch (prefix)
1975 {
1976 default:
1977 abort ();
1978
1979 case CS_PREFIX_OPCODE:
1980 case DS_PREFIX_OPCODE:
1981 case ES_PREFIX_OPCODE:
1982 case FS_PREFIX_OPCODE:
1983 case GS_PREFIX_OPCODE:
1984 case SS_PREFIX_OPCODE:
1985 q = SEG_PREFIX;
1986 break;
1987
1988 case REPNE_PREFIX_OPCODE:
1989 case REPE_PREFIX_OPCODE:
1990 q = REP_PREFIX;
1991 ret = PREFIX_REP;
1992 break;
1993
1994 case LOCK_PREFIX_OPCODE:
1995 q = LOCK_PREFIX;
1996 ret = PREFIX_LOCK;
1997 break;
1998
1999 case FWAIT_OPCODE:
2000 q = WAIT_PREFIX;
2001 break;
2002
2003 case ADDR_PREFIX_OPCODE:
2004 q = ADDR_PREFIX;
2005 break;
2006
2007 case DATA_PREFIX_OPCODE:
2008 q = DATA_PREFIX;
2009 break;
2010 }
2011 if (i.prefix[q] != 0)
2012 ret = PREFIX_EXIST;
2013 }
2014
2015 if (ret)
2016 {
2017 if (!i.prefix[q])
2018 ++i.prefixes;
2019 i.prefix[q] |= prefix;
2020 }
2021 else
2022 as_bad (_("same type of prefix used twice"));
2023
2024 return ret;
2025}
2026
2027static void
2028update_code_flag (int value, int check)
2029{
2030 PRINTF_LIKE ((*as_error));
2031
2032 flag_code = (enum flag_code) value;
2033 if (flag_code == CODE_64BIT)
2034 {
2035 cpu_arch_flags.bitfield.cpu64 = 1;
2036 cpu_arch_flags.bitfield.cpuno64 = 0;
2037 }
2038 else
2039 {
2040 cpu_arch_flags.bitfield.cpu64 = 0;
2041 cpu_arch_flags.bitfield.cpuno64 = 1;
2042 }
2043 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2044 {
2045 if (check)
2046 as_error = as_fatal;
2047 else
2048 as_error = as_bad;
2049 (*as_error) (_("64bit mode not supported on `%s'."),
2050 cpu_arch_name ? cpu_arch_name : default_arch);
2051 }
2052 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2053 {
2054 if (check)
2055 as_error = as_fatal;
2056 else
2057 as_error = as_bad;
2058 (*as_error) (_("32bit mode not supported on `%s'."),
2059 cpu_arch_name ? cpu_arch_name : default_arch);
2060 }
2061 stackop_size = '\0';
2062}
2063
2064static void
2065set_code_flag (int value)
2066{
2067 update_code_flag (value, 0);
2068}
2069
2070static void
2071set_16bit_gcc_code_flag (int new_code_flag)
2072{
2073 flag_code = (enum flag_code) new_code_flag;
2074 if (flag_code != CODE_16BIT)
2075 abort ();
2076 cpu_arch_flags.bitfield.cpu64 = 0;
2077 cpu_arch_flags.bitfield.cpuno64 = 1;
2078 stackop_size = LONG_MNEM_SUFFIX;
2079}
2080
2081static void
2082set_intel_syntax (int syntax_flag)
2083{
2084 /* Find out if register prefixing is specified. */
2085 int ask_naked_reg = 0;
2086
2087 SKIP_WHITESPACE ();
2088 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2089 {
2090 char *string = input_line_pointer;
2091 int e = get_symbol_end ();
2092
2093 if (strcmp (string, "prefix") == 0)
2094 ask_naked_reg = 1;
2095 else if (strcmp (string, "noprefix") == 0)
2096 ask_naked_reg = -1;
2097 else
2098 as_bad (_("bad argument to syntax directive."));
2099 *input_line_pointer = e;
2100 }
2101 demand_empty_rest_of_line ();
2102
2103 intel_syntax = syntax_flag;
2104
2105 if (ask_naked_reg == 0)
2106 allow_naked_reg = (intel_syntax
2107 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2108 else
2109 allow_naked_reg = (ask_naked_reg < 0);
2110
2111 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2112
2113 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2114 identifier_chars['$'] = intel_syntax ? '$' : 0;
2115 register_prefix = allow_naked_reg ? "" : "%";
2116}
2117
2118static void
2119set_intel_mnemonic (int mnemonic_flag)
2120{
2121 intel_mnemonic = mnemonic_flag;
2122}
2123
2124static void
2125set_allow_index_reg (int flag)
2126{
2127 allow_index_reg = flag;
2128}
2129
2130static void
2131set_check (int what)
2132{
2133 enum check_kind *kind;
2134 const char *str;
2135
2136 if (what)
2137 {
2138 kind = &operand_check;
2139 str = "operand";
2140 }
2141 else
2142 {
2143 kind = &sse_check;
2144 str = "sse";
2145 }
2146
2147 SKIP_WHITESPACE ();
2148
2149 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2150 {
2151 char *string = input_line_pointer;
2152 int e = get_symbol_end ();
2153
2154 if (strcmp (string, "none") == 0)
2155 *kind = check_none;
2156 else if (strcmp (string, "warning") == 0)
2157 *kind = check_warning;
2158 else if (strcmp (string, "error") == 0)
2159 *kind = check_error;
2160 else
2161 as_bad (_("bad argument to %s_check directive."), str);
2162 *input_line_pointer = e;
2163 }
2164 else
2165 as_bad (_("missing argument for %s_check directive"), str);
2166
2167 demand_empty_rest_of_line ();
2168}
2169
2170static void
2171check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2172 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2173{
2174#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2175 static const char *arch;
2176
2177 /* Intel LIOM is only supported on ELF. */
2178 if (!IS_ELF)
2179 return;
2180
2181 if (!arch)
2182 {
2183 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2184 use default_arch. */
2185 arch = cpu_arch_name;
2186 if (!arch)
2187 arch = default_arch;
2188 }
2189
2190 /* If we are targeting Intel L1OM, we must enable it. */
2191 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2192 || new_flag.bitfield.cpul1om)
2193 return;
2194
2195 /* If we are targeting Intel K1OM, we must enable it. */
2196 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2197 || new_flag.bitfield.cpuk1om)
2198 return;
2199
2200 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2201#endif
2202}
2203
2204static void
2205set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2206{
2207 SKIP_WHITESPACE ();
2208
2209 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2210 {
2211 char *string = input_line_pointer;
2212 int e = get_symbol_end ();
2213 unsigned int j;
2214 i386_cpu_flags flags;
2215
2216 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2217 {
2218 if (strcmp (string, cpu_arch[j].name) == 0)
2219 {
2220 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2221
2222 if (*string != '.')
2223 {
2224 cpu_arch_name = cpu_arch[j].name;
2225 cpu_sub_arch_name = NULL;
2226 cpu_arch_flags = cpu_arch[j].flags;
2227 if (flag_code == CODE_64BIT)
2228 {
2229 cpu_arch_flags.bitfield.cpu64 = 1;
2230 cpu_arch_flags.bitfield.cpuno64 = 0;
2231 }
2232 else
2233 {
2234 cpu_arch_flags.bitfield.cpu64 = 0;
2235 cpu_arch_flags.bitfield.cpuno64 = 1;
2236 }
2237 cpu_arch_isa = cpu_arch[j].type;
2238 cpu_arch_isa_flags = cpu_arch[j].flags;
2239 if (!cpu_arch_tune_set)
2240 {
2241 cpu_arch_tune = cpu_arch_isa;
2242 cpu_arch_tune_flags = cpu_arch_isa_flags;
2243 }
2244 break;
2245 }
2246
2247 if (!cpu_arch[j].negated)
2248 flags = cpu_flags_or (cpu_arch_flags,
2249 cpu_arch[j].flags);
2250 else
2251 flags = cpu_flags_and_not (cpu_arch_flags,
2252 cpu_arch[j].flags);
2253 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2254 {
2255 if (cpu_sub_arch_name)
2256 {
2257 char *name = cpu_sub_arch_name;
2258 cpu_sub_arch_name = concat (name,
2259 cpu_arch[j].name,
2260 (const char *) NULL);
2261 free (name);
2262 }
2263 else
2264 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2265 cpu_arch_flags = flags;
2266 cpu_arch_isa_flags = flags;
2267 }
2268 *input_line_pointer = e;
2269 demand_empty_rest_of_line ();
2270 return;
2271 }
2272 }
2273 if (j >= ARRAY_SIZE (cpu_arch))
2274 as_bad (_("no such architecture: `%s'"), string);
2275
2276 *input_line_pointer = e;
2277 }
2278 else
2279 as_bad (_("missing cpu architecture"));
2280
2281 no_cond_jump_promotion = 0;
2282 if (*input_line_pointer == ','
2283 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2284 {
2285 char *string = ++input_line_pointer;
2286 int e = get_symbol_end ();
2287
2288 if (strcmp (string, "nojumps") == 0)
2289 no_cond_jump_promotion = 1;
2290 else if (strcmp (string, "jumps") == 0)
2291 ;
2292 else
2293 as_bad (_("no such architecture modifier: `%s'"), string);
2294
2295 *input_line_pointer = e;
2296 }
2297
2298 demand_empty_rest_of_line ();
2299}
2300
2301enum bfd_architecture
2302i386_arch (void)
2303{
2304 if (cpu_arch_isa == PROCESSOR_L1OM)
2305 {
2306 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2307 || flag_code != CODE_64BIT)
2308 as_fatal (_("Intel L1OM is 64bit ELF only"));
2309 return bfd_arch_l1om;
2310 }
2311 else if (cpu_arch_isa == PROCESSOR_K1OM)
2312 {
2313 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2314 || flag_code != CODE_64BIT)
2315 as_fatal (_("Intel K1OM is 64bit ELF only"));
2316 return bfd_arch_k1om;
2317 }
2318 else
2319 return bfd_arch_i386;
2320}
2321
2322unsigned long
2323i386_mach (void)
2324{
2325 if (!strncmp (default_arch, "x86_64", 6))
2326 {
2327 if (cpu_arch_isa == PROCESSOR_L1OM)
2328 {
2329 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2330 || default_arch[6] != '\0')
2331 as_fatal (_("Intel L1OM is 64bit ELF only"));
2332 return bfd_mach_l1om;
2333 }
2334 else if (cpu_arch_isa == PROCESSOR_K1OM)
2335 {
2336 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2337 || default_arch[6] != '\0')
2338 as_fatal (_("Intel K1OM is 64bit ELF only"));
2339 return bfd_mach_k1om;
2340 }
2341 else if (default_arch[6] == '\0')
2342 return bfd_mach_x86_64;
2343 else
2344 return bfd_mach_x64_32;
2345 }
2346 else if (!strcmp (default_arch, "i386"))
2347 return bfd_mach_i386_i386;
2348 else
2349 as_fatal (_("unknown architecture"));
2350}
2351\f
2352void
2353md_begin (void)
2354{
2355 const char *hash_err;
2356
2357 /* Initialize op_hash hash table. */
2358 op_hash = hash_new ();
2359
2360 {
2361 const insn_template *optab;
2362 templates *core_optab;
2363
2364 /* Setup for loop. */
2365 optab = i386_optab;
2366 core_optab = (templates *) xmalloc (sizeof (templates));
2367 core_optab->start = optab;
2368
2369 while (1)
2370 {
2371 ++optab;
2372 if (optab->name == NULL
2373 || strcmp (optab->name, (optab - 1)->name) != 0)
2374 {
2375 /* different name --> ship out current template list;
2376 add to hash table; & begin anew. */
2377 core_optab->end = optab;
2378 hash_err = hash_insert (op_hash,
2379 (optab - 1)->name,
2380 (void *) core_optab);
2381 if (hash_err)
2382 {
2383 as_fatal (_("can't hash %s: %s"),
2384 (optab - 1)->name,
2385 hash_err);
2386 }
2387 if (optab->name == NULL)
2388 break;
2389 core_optab = (templates *) xmalloc (sizeof (templates));
2390 core_optab->start = optab;
2391 }
2392 }
2393 }
2394
2395 /* Initialize reg_hash hash table. */
2396 reg_hash = hash_new ();
2397 {
2398 const reg_entry *regtab;
2399 unsigned int regtab_size = i386_regtab_size;
2400
2401 for (regtab = i386_regtab; regtab_size--; regtab++)
2402 {
2403 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2404 if (hash_err)
2405 as_fatal (_("can't hash %s: %s"),
2406 regtab->reg_name,
2407 hash_err);
2408 }
2409 }
2410
2411 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2412 {
2413 int c;
2414 char *p;
2415
2416 for (c = 0; c < 256; c++)
2417 {
2418 if (ISDIGIT (c))
2419 {
2420 digit_chars[c] = c;
2421 mnemonic_chars[c] = c;
2422 register_chars[c] = c;
2423 operand_chars[c] = c;
2424 }
2425 else if (ISLOWER (c))
2426 {
2427 mnemonic_chars[c] = c;
2428 register_chars[c] = c;
2429 operand_chars[c] = c;
2430 }
2431 else if (ISUPPER (c))
2432 {
2433 mnemonic_chars[c] = TOLOWER (c);
2434 register_chars[c] = mnemonic_chars[c];
2435 operand_chars[c] = c;
2436 }
2437
2438 if (ISALPHA (c) || ISDIGIT (c))
2439 identifier_chars[c] = c;
2440 else if (c >= 128)
2441 {
2442 identifier_chars[c] = c;
2443 operand_chars[c] = c;
2444 }
2445 }
2446
2447#ifdef LEX_AT
2448 identifier_chars['@'] = '@';
2449#endif
2450#ifdef LEX_QM
2451 identifier_chars['?'] = '?';
2452 operand_chars['?'] = '?';
2453#endif
2454 digit_chars['-'] = '-';
2455 mnemonic_chars['_'] = '_';
2456 mnemonic_chars['-'] = '-';
2457 mnemonic_chars['.'] = '.';
2458 identifier_chars['_'] = '_';
2459 identifier_chars['.'] = '.';
2460
2461 for (p = operand_special_chars; *p != '\0'; p++)
2462 operand_chars[(unsigned char) *p] = *p;
2463 }
2464
2465#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2466 if (IS_ELF)
2467 {
2468 record_alignment (text_section, 2);
2469 record_alignment (data_section, 2);
2470 record_alignment (bss_section, 2);
2471 }
2472#endif
2473
2474 if (flag_code == CODE_64BIT)
2475 {
2476#if defined (OBJ_COFF) && defined (TE_PE)
2477 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2478 ? 32 : 16);
2479#else
2480 x86_dwarf2_return_column = 16;
2481#endif
2482 x86_cie_data_alignment = -8;
2483 }
2484 else
2485 {
2486 x86_dwarf2_return_column = 8;
2487 x86_cie_data_alignment = -4;
2488 }
2489}
2490
2491void
2492i386_print_statistics (FILE *file)
2493{
2494 hash_print_statistics (file, "i386 opcode", op_hash);
2495 hash_print_statistics (file, "i386 register", reg_hash);
2496}
2497\f
2498#ifdef DEBUG386
2499
2500/* Debugging routines for md_assemble. */
2501static void pte (insn_template *);
2502static void pt (i386_operand_type);
2503static void pe (expressionS *);
2504static void ps (symbolS *);
2505
2506static void
2507pi (char *line, i386_insn *x)
2508{
2509 unsigned int j;
2510
2511 fprintf (stdout, "%s: template ", line);
2512 pte (&x->tm);
2513 fprintf (stdout, " address: base %s index %s scale %x\n",
2514 x->base_reg ? x->base_reg->reg_name : "none",
2515 x->index_reg ? x->index_reg->reg_name : "none",
2516 x->log2_scale_factor);
2517 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2518 x->rm.mode, x->rm.reg, x->rm.regmem);
2519 fprintf (stdout, " sib: base %x index %x scale %x\n",
2520 x->sib.base, x->sib.index, x->sib.scale);
2521 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2522 (x->rex & REX_W) != 0,
2523 (x->rex & REX_R) != 0,
2524 (x->rex & REX_X) != 0,
2525 (x->rex & REX_B) != 0);
2526 for (j = 0; j < x->operands; j++)
2527 {
2528 fprintf (stdout, " #%d: ", j + 1);
2529 pt (x->types[j]);
2530 fprintf (stdout, "\n");
2531 if (x->types[j].bitfield.reg8
2532 || x->types[j].bitfield.reg16
2533 || x->types[j].bitfield.reg32
2534 || x->types[j].bitfield.reg64
2535 || x->types[j].bitfield.regmmx
2536 || x->types[j].bitfield.regxmm
2537 || x->types[j].bitfield.regymm
2538 || x->types[j].bitfield.sreg2
2539 || x->types[j].bitfield.sreg3
2540 || x->types[j].bitfield.control
2541 || x->types[j].bitfield.debug
2542 || x->types[j].bitfield.test)
2543 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2544 if (operand_type_check (x->types[j], imm))
2545 pe (x->op[j].imms);
2546 if (operand_type_check (x->types[j], disp))
2547 pe (x->op[j].disps);
2548 }
2549}
2550
2551static void
2552pte (insn_template *t)
2553{
2554 unsigned int j;
2555 fprintf (stdout, " %d operands ", t->operands);
2556 fprintf (stdout, "opcode %x ", t->base_opcode);
2557 if (t->extension_opcode != None)
2558 fprintf (stdout, "ext %x ", t->extension_opcode);
2559 if (t->opcode_modifier.d)
2560 fprintf (stdout, "D");
2561 if (t->opcode_modifier.w)
2562 fprintf (stdout, "W");
2563 fprintf (stdout, "\n");
2564 for (j = 0; j < t->operands; j++)
2565 {
2566 fprintf (stdout, " #%d type ", j + 1);
2567 pt (t->operand_types[j]);
2568 fprintf (stdout, "\n");
2569 }
2570}
2571
2572static void
2573pe (expressionS *e)
2574{
2575 fprintf (stdout, " operation %d\n", e->X_op);
2576 fprintf (stdout, " add_number %ld (%lx)\n",
2577 (long) e->X_add_number, (long) e->X_add_number);
2578 if (e->X_add_symbol)
2579 {
2580 fprintf (stdout, " add_symbol ");
2581 ps (e->X_add_symbol);
2582 fprintf (stdout, "\n");
2583 }
2584 if (e->X_op_symbol)
2585 {
2586 fprintf (stdout, " op_symbol ");
2587 ps (e->X_op_symbol);
2588 fprintf (stdout, "\n");
2589 }
2590}
2591
2592static void
2593ps (symbolS *s)
2594{
2595 fprintf (stdout, "%s type %s%s",
2596 S_GET_NAME (s),
2597 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2598 segment_name (S_GET_SEGMENT (s)));
2599}
2600
2601static struct type_name
2602 {
2603 i386_operand_type mask;
2604 const char *name;
2605 }
2606const type_names[] =
2607{
2608 { OPERAND_TYPE_REG8, "r8" },
2609 { OPERAND_TYPE_REG16, "r16" },
2610 { OPERAND_TYPE_REG32, "r32" },
2611 { OPERAND_TYPE_REG64, "r64" },
2612 { OPERAND_TYPE_IMM8, "i8" },
2613 { OPERAND_TYPE_IMM8, "i8s" },
2614 { OPERAND_TYPE_IMM16, "i16" },
2615 { OPERAND_TYPE_IMM32, "i32" },
2616 { OPERAND_TYPE_IMM32S, "i32s" },
2617 { OPERAND_TYPE_IMM64, "i64" },
2618 { OPERAND_TYPE_IMM1, "i1" },
2619 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2620 { OPERAND_TYPE_DISP8, "d8" },
2621 { OPERAND_TYPE_DISP16, "d16" },
2622 { OPERAND_TYPE_DISP32, "d32" },
2623 { OPERAND_TYPE_DISP32S, "d32s" },
2624 { OPERAND_TYPE_DISP64, "d64" },
2625 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2626 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2627 { OPERAND_TYPE_CONTROL, "control reg" },
2628 { OPERAND_TYPE_TEST, "test reg" },
2629 { OPERAND_TYPE_DEBUG, "debug reg" },
2630 { OPERAND_TYPE_FLOATREG, "FReg" },
2631 { OPERAND_TYPE_FLOATACC, "FAcc" },
2632 { OPERAND_TYPE_SREG2, "SReg2" },
2633 { OPERAND_TYPE_SREG3, "SReg3" },
2634 { OPERAND_TYPE_ACC, "Acc" },
2635 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2636 { OPERAND_TYPE_REGMMX, "rMMX" },
2637 { OPERAND_TYPE_REGXMM, "rXMM" },
2638 { OPERAND_TYPE_REGYMM, "rYMM" },
2639 { OPERAND_TYPE_ESSEG, "es" },
2640};
2641
2642static void
2643pt (i386_operand_type t)
2644{
2645 unsigned int j;
2646 i386_operand_type a;
2647
2648 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2649 {
2650 a = operand_type_and (t, type_names[j].mask);
2651 if (!operand_type_all_zero (&a))
2652 fprintf (stdout, "%s, ", type_names[j].name);
2653 }
2654 fflush (stdout);
2655}
2656
2657#endif /* DEBUG386 */
2658\f
2659static bfd_reloc_code_real_type
2660reloc (unsigned int size,
2661 int pcrel,
2662 int sign,
2663 bfd_reloc_code_real_type other)
2664{
2665 if (other != NO_RELOC)
2666 {
2667 reloc_howto_type *rel;
2668
2669 if (size == 8)
2670 switch (other)
2671 {
2672 case BFD_RELOC_X86_64_GOT32:
2673 return BFD_RELOC_X86_64_GOT64;
2674 break;
2675 case BFD_RELOC_X86_64_PLTOFF64:
2676 return BFD_RELOC_X86_64_PLTOFF64;
2677 break;
2678 case BFD_RELOC_X86_64_GOTPC32:
2679 other = BFD_RELOC_X86_64_GOTPC64;
2680 break;
2681 case BFD_RELOC_X86_64_GOTPCREL:
2682 other = BFD_RELOC_X86_64_GOTPCREL64;
2683 break;
2684 case BFD_RELOC_X86_64_TPOFF32:
2685 other = BFD_RELOC_X86_64_TPOFF64;
2686 break;
2687 case BFD_RELOC_X86_64_DTPOFF32:
2688 other = BFD_RELOC_X86_64_DTPOFF64;
2689 break;
2690 default:
2691 break;
2692 }
2693
2694#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2695 if (other == BFD_RELOC_SIZE32)
2696 {
2697 if (size == 8)
2698 return BFD_RELOC_SIZE64;
2699 if (pcrel)
2700 as_bad (_("there are no pc-relative size relocations"));
2701 }
2702#endif
2703
2704 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2705 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2706 sign = -1;
2707
2708 rel = bfd_reloc_type_lookup (stdoutput, other);
2709 if (!rel)
2710 as_bad (_("unknown relocation (%u)"), other);
2711 else if (size != bfd_get_reloc_size (rel))
2712 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2713 bfd_get_reloc_size (rel),
2714 size);
2715 else if (pcrel && !rel->pc_relative)
2716 as_bad (_("non-pc-relative relocation for pc-relative field"));
2717 else if ((rel->complain_on_overflow == complain_overflow_signed
2718 && !sign)
2719 || (rel->complain_on_overflow == complain_overflow_unsigned
2720 && sign > 0))
2721 as_bad (_("relocated field and relocation type differ in signedness"));
2722 else
2723 return other;
2724 return NO_RELOC;
2725 }
2726
2727 if (pcrel)
2728 {
2729 if (!sign)
2730 as_bad (_("there are no unsigned pc-relative relocations"));
2731 switch (size)
2732 {
2733 case 1: return BFD_RELOC_8_PCREL;
2734 case 2: return BFD_RELOC_16_PCREL;
2735 case 4: return BFD_RELOC_32_PCREL;
2736 case 8: return BFD_RELOC_64_PCREL;
2737 }
2738 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2739 }
2740 else
2741 {
2742 if (sign > 0)
2743 switch (size)
2744 {
2745 case 4: return BFD_RELOC_X86_64_32S;
2746 }
2747 else
2748 switch (size)
2749 {
2750 case 1: return BFD_RELOC_8;
2751 case 2: return BFD_RELOC_16;
2752 case 4: return BFD_RELOC_32;
2753 case 8: return BFD_RELOC_64;
2754 }
2755 as_bad (_("cannot do %s %u byte relocation"),
2756 sign > 0 ? "signed" : "unsigned", size);
2757 }
2758
2759 return NO_RELOC;
2760}
2761
2762/* Here we decide which fixups can be adjusted to make them relative to
2763 the beginning of the section instead of the symbol. Basically we need
2764 to make sure that the dynamic relocations are done correctly, so in
2765 some cases we force the original symbol to be used. */
2766
2767int
2768tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2769{
2770#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2771 if (!IS_ELF)
2772 return 1;
2773
2774 /* Don't adjust pc-relative references to merge sections in 64-bit
2775 mode. */
2776 if (use_rela_relocations
2777 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2778 && fixP->fx_pcrel)
2779 return 0;
2780
2781 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2782 and changed later by validate_fix. */
2783 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2784 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2785 return 0;
2786
2787 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2788 for size relocations. */
2789 if (fixP->fx_r_type == BFD_RELOC_SIZE32
2790 || fixP->fx_r_type == BFD_RELOC_SIZE64
2791 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2792 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2793 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2794 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2795 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2796 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2797 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2798 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2799 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2800 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2801 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2802 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2803 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2804 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2805 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2806 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2807 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2808 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2809 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2810 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2811 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2812 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2813 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2814 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2815 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2816 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2817 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2818 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2819 return 0;
2820#endif
2821 return 1;
2822}
2823
2824static int
2825intel_float_operand (const char *mnemonic)
2826{
2827 /* Note that the value returned is meaningful only for opcodes with (memory)
2828 operands, hence the code here is free to improperly handle opcodes that
2829 have no operands (for better performance and smaller code). */
2830
2831 if (mnemonic[0] != 'f')
2832 return 0; /* non-math */
2833
2834 switch (mnemonic[1])
2835 {
2836 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2837 the fs segment override prefix not currently handled because no
2838 call path can make opcodes without operands get here */
2839 case 'i':
2840 return 2 /* integer op */;
2841 case 'l':
2842 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2843 return 3; /* fldcw/fldenv */
2844 break;
2845 case 'n':
2846 if (mnemonic[2] != 'o' /* fnop */)
2847 return 3; /* non-waiting control op */
2848 break;
2849 case 'r':
2850 if (mnemonic[2] == 's')
2851 return 3; /* frstor/frstpm */
2852 break;
2853 case 's':
2854 if (mnemonic[2] == 'a')
2855 return 3; /* fsave */
2856 if (mnemonic[2] == 't')
2857 {
2858 switch (mnemonic[3])
2859 {
2860 case 'c': /* fstcw */
2861 case 'd': /* fstdw */
2862 case 'e': /* fstenv */
2863 case 's': /* fsts[gw] */
2864 return 3;
2865 }
2866 }
2867 break;
2868 case 'x':
2869 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2870 return 0; /* fxsave/fxrstor are not really math ops */
2871 break;
2872 }
2873
2874 return 1;
2875}
2876
2877/* Build the VEX prefix. */
2878
2879static void
2880build_vex_prefix (const insn_template *t)
2881{
2882 unsigned int register_specifier;
2883 unsigned int implied_prefix;
2884 unsigned int vector_length;
2885
2886 /* Check register specifier. */
2887 if (i.vex.register_specifier)
2888 register_specifier = ~register_number (i.vex.register_specifier) & 0xf;
2889 else
2890 register_specifier = 0xf;
2891
2892 /* Use 2-byte VEX prefix by swappping destination and source
2893 operand. */
2894 if (!i.swap_operand
2895 && i.operands == i.reg_operands
2896 && i.tm.opcode_modifier.vexopcode == VEX0F
2897 && i.tm.opcode_modifier.s
2898 && i.rex == REX_B)
2899 {
2900 unsigned int xchg = i.operands - 1;
2901 union i386_op temp_op;
2902 i386_operand_type temp_type;
2903
2904 temp_type = i.types[xchg];
2905 i.types[xchg] = i.types[0];
2906 i.types[0] = temp_type;
2907 temp_op = i.op[xchg];
2908 i.op[xchg] = i.op[0];
2909 i.op[0] = temp_op;
2910
2911 gas_assert (i.rm.mode == 3);
2912
2913 i.rex = REX_R;
2914 xchg = i.rm.regmem;
2915 i.rm.regmem = i.rm.reg;
2916 i.rm.reg = xchg;
2917
2918 /* Use the next insn. */
2919 i.tm = t[1];
2920 }
2921
2922 if (i.tm.opcode_modifier.vex == VEXScalar)
2923 vector_length = avxscalar;
2924 else
2925 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2926
2927 switch ((i.tm.base_opcode >> 8) & 0xff)
2928 {
2929 case 0:
2930 implied_prefix = 0;
2931 break;
2932 case DATA_PREFIX_OPCODE:
2933 implied_prefix = 1;
2934 break;
2935 case REPE_PREFIX_OPCODE:
2936 implied_prefix = 2;
2937 break;
2938 case REPNE_PREFIX_OPCODE:
2939 implied_prefix = 3;
2940 break;
2941 default:
2942 abort ();
2943 }
2944
2945 /* Use 2-byte VEX prefix if possible. */
2946 if (i.tm.opcode_modifier.vexopcode == VEX0F
2947 && i.tm.opcode_modifier.vexw != VEXW1
2948 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2949 {
2950 /* 2-byte VEX prefix. */
2951 unsigned int r;
2952
2953 i.vex.length = 2;
2954 i.vex.bytes[0] = 0xc5;
2955
2956 /* Check the REX.R bit. */
2957 r = (i.rex & REX_R) ? 0 : 1;
2958 i.vex.bytes[1] = (r << 7
2959 | register_specifier << 3
2960 | vector_length << 2
2961 | implied_prefix);
2962 }
2963 else
2964 {
2965 /* 3-byte VEX prefix. */
2966 unsigned int m, w;
2967
2968 i.vex.length = 3;
2969
2970 switch (i.tm.opcode_modifier.vexopcode)
2971 {
2972 case VEX0F:
2973 m = 0x1;
2974 i.vex.bytes[0] = 0xc4;
2975 break;
2976 case VEX0F38:
2977 m = 0x2;
2978 i.vex.bytes[0] = 0xc4;
2979 break;
2980 case VEX0F3A:
2981 m = 0x3;
2982 i.vex.bytes[0] = 0xc4;
2983 break;
2984 case XOP08:
2985 m = 0x8;
2986 i.vex.bytes[0] = 0x8f;
2987 break;
2988 case XOP09:
2989 m = 0x9;
2990 i.vex.bytes[0] = 0x8f;
2991 break;
2992 case XOP0A:
2993 m = 0xa;
2994 i.vex.bytes[0] = 0x8f;
2995 break;
2996 default:
2997 abort ();
2998 }
2999
3000 /* The high 3 bits of the second VEX byte are 1's compliment
3001 of RXB bits from REX. */
3002 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3003
3004 /* Check the REX.W bit. */
3005 w = (i.rex & REX_W) ? 1 : 0;
3006 if (i.tm.opcode_modifier.vexw)
3007 {
3008 if (w)
3009 abort ();
3010
3011 if (i.tm.opcode_modifier.vexw == VEXW1)
3012 w = 1;
3013 }
3014
3015 i.vex.bytes[2] = (w << 7
3016 | register_specifier << 3
3017 | vector_length << 2
3018 | implied_prefix);
3019 }
3020}
3021
3022static void
3023process_immext (void)
3024{
3025 expressionS *exp;
3026
3027 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3028 && i.operands > 0)
3029 {
3030 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3031 with an opcode suffix which is coded in the same place as an
3032 8-bit immediate field would be.
3033 Here we check those operands and remove them afterwards. */
3034 unsigned int x;
3035
3036 for (x = 0; x < i.operands; x++)
3037 if (register_number (i.op[x].regs) != x)
3038 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3039 register_prefix, i.op[x].regs->reg_name, x + 1,
3040 i.tm.name);
3041
3042 i.operands = 0;
3043 }
3044
3045 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3046 which is coded in the same place as an 8-bit immediate field
3047 would be. Here we fake an 8-bit immediate operand from the
3048 opcode suffix stored in tm.extension_opcode.
3049
3050 AVX instructions also use this encoding, for some of
3051 3 argument instructions. */
3052
3053 gas_assert (i.imm_operands == 0
3054 && (i.operands <= 2
3055 || (i.tm.opcode_modifier.vex
3056 && i.operands <= 4)));
3057
3058 exp = &im_expressions[i.imm_operands++];
3059 i.op[i.operands].imms = exp;
3060 i.types[i.operands] = imm8;
3061 i.operands++;
3062 exp->X_op = O_constant;
3063 exp->X_add_number = i.tm.extension_opcode;
3064 i.tm.extension_opcode = None;
3065}
3066
3067
3068static int
3069check_hle (void)
3070{
3071 switch (i.tm.opcode_modifier.hleprefixok)
3072 {
3073 default:
3074 abort ();
3075 case HLEPrefixNone:
3076 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3077 as_bad (_("invalid instruction `%s' after `xacquire'"),
3078 i.tm.name);
3079 else
3080 as_bad (_("invalid instruction `%s' after `xrelease'"),
3081 i.tm.name);
3082 return 0;
3083 case HLEPrefixLock:
3084 if (i.prefix[LOCK_PREFIX])
3085 return 1;
3086 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3087 as_bad (_("missing `lock' with `xacquire'"));
3088 else
3089 as_bad (_("missing `lock' with `xrelease'"));
3090 return 0;
3091 case HLEPrefixAny:
3092 return 1;
3093 case HLEPrefixRelease:
3094 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3095 {
3096 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3097 i.tm.name);
3098 return 0;
3099 }
3100 if (i.mem_operands == 0
3101 || !operand_type_check (i.types[i.operands - 1], anymem))
3102 {
3103 as_bad (_("memory destination needed for instruction `%s'"
3104 " after `xrelease'"), i.tm.name);
3105 return 0;
3106 }
3107 return 1;
3108 }
3109}
3110
3111/* This is the guts of the machine-dependent assembler. LINE points to a
3112 machine dependent instruction. This function is supposed to emit
3113 the frags/bytes it assembles to. */
3114
3115void
3116md_assemble (char *line)
3117{
3118 unsigned int j;
3119 char mnemonic[MAX_MNEM_SIZE];
3120 const insn_template *t;
3121
3122 /* Initialize globals. */
3123 memset (&i, '\0', sizeof (i));
3124 for (j = 0; j < MAX_OPERANDS; j++)
3125 i.reloc[j] = NO_RELOC;
3126 memset (disp_expressions, '\0', sizeof (disp_expressions));
3127 memset (im_expressions, '\0', sizeof (im_expressions));
3128 save_stack_p = save_stack;
3129
3130 /* First parse an instruction mnemonic & call i386_operand for the operands.
3131 We assume that the scrubber has arranged it so that line[0] is the valid
3132 start of a (possibly prefixed) mnemonic. */
3133
3134 line = parse_insn (line, mnemonic);
3135 if (line == NULL)
3136 return;
3137
3138 line = parse_operands (line, mnemonic);
3139 this_operand = -1;
3140 if (line == NULL)
3141 return;
3142
3143 /* Now we've parsed the mnemonic into a set of templates, and have the
3144 operands at hand. */
3145
3146 /* All intel opcodes have reversed operands except for "bound" and
3147 "enter". We also don't reverse intersegment "jmp" and "call"
3148 instructions with 2 immediate operands so that the immediate segment
3149 precedes the offset, as it does when in AT&T mode. */
3150 if (intel_syntax
3151 && i.operands > 1
3152 && (strcmp (mnemonic, "bound") != 0)
3153 && (strcmp (mnemonic, "invlpga") != 0)
3154 && !(operand_type_check (i.types[0], imm)
3155 && operand_type_check (i.types[1], imm)))
3156 swap_operands ();
3157
3158 /* The order of the immediates should be reversed
3159 for 2 immediates extrq and insertq instructions */
3160 if (i.imm_operands == 2
3161 && (strcmp (mnemonic, "extrq") == 0
3162 || strcmp (mnemonic, "insertq") == 0))
3163 swap_2_operands (0, 1);
3164
3165 if (i.imm_operands)
3166 optimize_imm ();
3167
3168 /* Don't optimize displacement for movabs since it only takes 64bit
3169 displacement. */
3170 if (i.disp_operands
3171 && i.disp_encoding != disp_encoding_32bit
3172 && (flag_code != CODE_64BIT
3173 || strcmp (mnemonic, "movabs") != 0))
3174 optimize_disp ();
3175
3176 /* Next, we find a template that matches the given insn,
3177 making sure the overlap of the given operands types is consistent
3178 with the template operand types. */
3179
3180 if (!(t = match_template ()))
3181 return;
3182
3183 if (sse_check != check_none
3184 && !i.tm.opcode_modifier.noavx
3185 && (i.tm.cpu_flags.bitfield.cpusse
3186 || i.tm.cpu_flags.bitfield.cpusse2
3187 || i.tm.cpu_flags.bitfield.cpusse3
3188 || i.tm.cpu_flags.bitfield.cpussse3
3189 || i.tm.cpu_flags.bitfield.cpusse4_1
3190 || i.tm.cpu_flags.bitfield.cpusse4_2))
3191 {
3192 (sse_check == check_warning
3193 ? as_warn
3194 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3195 }
3196
3197 /* Zap movzx and movsx suffix. The suffix has been set from
3198 "word ptr" or "byte ptr" on the source operand in Intel syntax
3199 or extracted from mnemonic in AT&T syntax. But we'll use
3200 the destination register to choose the suffix for encoding. */
3201 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3202 {
3203 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3204 there is no suffix, the default will be byte extension. */
3205 if (i.reg_operands != 2
3206 && !i.suffix
3207 && intel_syntax)
3208 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3209
3210 i.suffix = 0;
3211 }
3212
3213 if (i.tm.opcode_modifier.fwait)
3214 if (!add_prefix (FWAIT_OPCODE))
3215 return;
3216
3217 /* Check if REP prefix is OK. */
3218 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
3219 {
3220 as_bad (_("invalid instruction `%s' after `%s'"),
3221 i.tm.name, i.rep_prefix);
3222 return;
3223 }
3224
3225 /* Check for lock without a lockable instruction. Destination operand
3226 must be memory unless it is xchg (0x86). */
3227 if (i.prefix[LOCK_PREFIX]
3228 && (!i.tm.opcode_modifier.islockable
3229 || i.mem_operands == 0
3230 || (i.tm.base_opcode != 0x86
3231 && !operand_type_check (i.types[i.operands - 1], anymem))))
3232 {
3233 as_bad (_("expecting lockable instruction after `lock'"));
3234 return;
3235 }
3236
3237 /* Check if HLE prefix is OK. */
3238 if (i.have_hle && !check_hle ())
3239 return;
3240
3241 /* Check string instruction segment overrides. */
3242 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3243 {
3244 if (!check_string ())
3245 return;
3246 i.disp_operands = 0;
3247 }
3248
3249 if (!process_suffix ())
3250 return;
3251
3252 /* Update operand types. */
3253 for (j = 0; j < i.operands; j++)
3254 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3255
3256 /* Make still unresolved immediate matches conform to size of immediate
3257 given in i.suffix. */
3258 if (!finalize_imm ())
3259 return;
3260
3261 if (i.types[0].bitfield.imm1)
3262 i.imm_operands = 0; /* kludge for shift insns. */
3263
3264 /* We only need to check those implicit registers for instructions
3265 with 3 operands or less. */
3266 if (i.operands <= 3)
3267 for (j = 0; j < i.operands; j++)
3268 if (i.types[j].bitfield.inoutportreg
3269 || i.types[j].bitfield.shiftcount
3270 || i.types[j].bitfield.acc
3271 || i.types[j].bitfield.floatacc)
3272 i.reg_operands--;
3273
3274 /* ImmExt should be processed after SSE2AVX. */
3275 if (!i.tm.opcode_modifier.sse2avx
3276 && i.tm.opcode_modifier.immext)
3277 process_immext ();
3278
3279 /* For insns with operands there are more diddles to do to the opcode. */
3280 if (i.operands)
3281 {
3282 if (!process_operands ())
3283 return;
3284 }
3285 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3286 {
3287 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3288 as_warn (_("translating to `%sp'"), i.tm.name);
3289 }
3290
3291 if (i.tm.opcode_modifier.vex)
3292 build_vex_prefix (t);
3293
3294 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3295 instructions may define INT_OPCODE as well, so avoid this corner
3296 case for those instructions that use MODRM. */
3297 if (i.tm.base_opcode == INT_OPCODE
3298 && !i.tm.opcode_modifier.modrm
3299 && i.op[0].imms->X_add_number == 3)
3300 {
3301 i.tm.base_opcode = INT3_OPCODE;
3302 i.imm_operands = 0;
3303 }
3304
3305 if ((i.tm.opcode_modifier.jump
3306 || i.tm.opcode_modifier.jumpbyte
3307 || i.tm.opcode_modifier.jumpdword)
3308 && i.op[0].disps->X_op == O_constant)
3309 {
3310 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3311 the absolute address given by the constant. Since ix86 jumps and
3312 calls are pc relative, we need to generate a reloc. */
3313 i.op[0].disps->X_add_symbol = &abs_symbol;
3314 i.op[0].disps->X_op = O_symbol;
3315 }
3316
3317 if (i.tm.opcode_modifier.rex64)
3318 i.rex |= REX_W;
3319
3320 /* For 8 bit registers we need an empty rex prefix. Also if the
3321 instruction already has a prefix, we need to convert old
3322 registers to new ones. */
3323
3324 if ((i.types[0].bitfield.reg8
3325 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3326 || (i.types[1].bitfield.reg8
3327 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3328 || ((i.types[0].bitfield.reg8
3329 || i.types[1].bitfield.reg8)
3330 && i.rex != 0))
3331 {
3332 int x;
3333
3334 i.rex |= REX_OPCODE;
3335 for (x = 0; x < 2; x++)
3336 {
3337 /* Look for 8 bit operand that uses old registers. */
3338 if (i.types[x].bitfield.reg8
3339 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3340 {
3341 /* In case it is "hi" register, give up. */
3342 if (i.op[x].regs->reg_num > 3)
3343 as_bad (_("can't encode register '%s%s' in an "
3344 "instruction requiring REX prefix."),
3345 register_prefix, i.op[x].regs->reg_name);
3346
3347 /* Otherwise it is equivalent to the extended register.
3348 Since the encoding doesn't change this is merely
3349 cosmetic cleanup for debug output. */
3350
3351 i.op[x].regs = i.op[x].regs + 8;
3352 }
3353 }
3354 }
3355
3356 if (i.rex != 0)
3357 add_prefix (REX_OPCODE | i.rex);
3358
3359 /* We are ready to output the insn. */
3360 output_insn ();
3361}
3362
3363static char *
3364parse_insn (char *line, char *mnemonic)
3365{
3366 char *l = line;
3367 char *token_start = l;
3368 char *mnem_p;
3369 int supported;
3370 const insn_template *t;
3371 char *dot_p = NULL;
3372
3373 while (1)
3374 {
3375 mnem_p = mnemonic;
3376 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3377 {
3378 if (*mnem_p == '.')
3379 dot_p = mnem_p;
3380 mnem_p++;
3381 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3382 {
3383 as_bad (_("no such instruction: `%s'"), token_start);
3384 return NULL;
3385 }
3386 l++;
3387 }
3388 if (!is_space_char (*l)
3389 && *l != END_OF_INSN
3390 && (intel_syntax
3391 || (*l != PREFIX_SEPARATOR
3392 && *l != ',')))
3393 {
3394 as_bad (_("invalid character %s in mnemonic"),
3395 output_invalid (*l));
3396 return NULL;
3397 }
3398 if (token_start == l)
3399 {
3400 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3401 as_bad (_("expecting prefix; got nothing"));
3402 else
3403 as_bad (_("expecting mnemonic; got nothing"));
3404 return NULL;
3405 }
3406
3407 /* Look up instruction (or prefix) via hash table. */
3408 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3409
3410 if (*l != END_OF_INSN
3411 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3412 && current_templates
3413 && current_templates->start->opcode_modifier.isprefix)
3414 {
3415 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3416 {
3417 as_bad ((flag_code != CODE_64BIT
3418 ? _("`%s' is only supported in 64-bit mode")
3419 : _("`%s' is not supported in 64-bit mode")),
3420 current_templates->start->name);
3421 return NULL;
3422 }
3423 /* If we are in 16-bit mode, do not allow addr16 or data16.
3424 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3425 if ((current_templates->start->opcode_modifier.size16
3426 || current_templates->start->opcode_modifier.size32)
3427 && flag_code != CODE_64BIT
3428 && (current_templates->start->opcode_modifier.size32
3429 ^ (flag_code == CODE_16BIT)))
3430 {
3431 as_bad (_("redundant %s prefix"),
3432 current_templates->start->name);
3433 return NULL;
3434 }
3435 /* Add prefix, checking for repeated prefixes. */
3436 switch (add_prefix (current_templates->start->base_opcode))
3437 {
3438 case PREFIX_EXIST:
3439 return NULL;
3440 case PREFIX_REP:
3441 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3442 i.have_hle = 1;
3443 else
3444 i.rep_prefix = current_templates->start->name;
3445 break;
3446 default:
3447 break;
3448 }
3449 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3450 token_start = ++l;
3451 }
3452 else
3453 break;
3454 }
3455
3456 if (!current_templates)
3457 {
3458 /* Check if we should swap operand or force 32bit displacement in
3459 encoding. */
3460 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3461 i.swap_operand = 1;
3462 else if (mnem_p - 3 == dot_p
3463 && dot_p[1] == 'd'
3464 && dot_p[2] == '8')
3465 i.disp_encoding = disp_encoding_8bit;
3466 else if (mnem_p - 4 == dot_p
3467 && dot_p[1] == 'd'
3468 && dot_p[2] == '3'
3469 && dot_p[3] == '2')
3470 i.disp_encoding = disp_encoding_32bit;
3471 else
3472 goto check_suffix;
3473 mnem_p = dot_p;
3474 *dot_p = '\0';
3475 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3476 }
3477
3478 if (!current_templates)
3479 {
3480check_suffix:
3481 /* See if we can get a match by trimming off a suffix. */
3482 switch (mnem_p[-1])
3483 {
3484 case WORD_MNEM_SUFFIX:
3485 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3486 i.suffix = SHORT_MNEM_SUFFIX;
3487 else
3488 case BYTE_MNEM_SUFFIX:
3489 case QWORD_MNEM_SUFFIX:
3490 i.suffix = mnem_p[-1];
3491 mnem_p[-1] = '\0';
3492 current_templates = (const templates *) hash_find (op_hash,
3493 mnemonic);
3494 break;
3495 case SHORT_MNEM_SUFFIX:
3496 case LONG_MNEM_SUFFIX:
3497 if (!intel_syntax)
3498 {
3499 i.suffix = mnem_p[-1];
3500 mnem_p[-1] = '\0';
3501 current_templates = (const templates *) hash_find (op_hash,
3502 mnemonic);
3503 }
3504 break;
3505
3506 /* Intel Syntax. */
3507 case 'd':
3508 if (intel_syntax)
3509 {
3510 if (intel_float_operand (mnemonic) == 1)
3511 i.suffix = SHORT_MNEM_SUFFIX;
3512 else
3513 i.suffix = LONG_MNEM_SUFFIX;
3514 mnem_p[-1] = '\0';
3515 current_templates = (const templates *) hash_find (op_hash,
3516 mnemonic);
3517 }
3518 break;
3519 }
3520 if (!current_templates)
3521 {
3522 as_bad (_("no such instruction: `%s'"), token_start);
3523 return NULL;
3524 }
3525 }
3526
3527 if (current_templates->start->opcode_modifier.jump
3528 || current_templates->start->opcode_modifier.jumpbyte)
3529 {
3530 /* Check for a branch hint. We allow ",pt" and ",pn" for
3531 predict taken and predict not taken respectively.
3532 I'm not sure that branch hints actually do anything on loop
3533 and jcxz insns (JumpByte) for current Pentium4 chips. They
3534 may work in the future and it doesn't hurt to accept them
3535 now. */
3536 if (l[0] == ',' && l[1] == 'p')
3537 {
3538 if (l[2] == 't')
3539 {
3540 if (!add_prefix (DS_PREFIX_OPCODE))
3541 return NULL;
3542 l += 3;
3543 }
3544 else if (l[2] == 'n')
3545 {
3546 if (!add_prefix (CS_PREFIX_OPCODE))
3547 return NULL;
3548 l += 3;
3549 }
3550 }
3551 }
3552 /* Any other comma loses. */
3553 if (*l == ',')
3554 {
3555 as_bad (_("invalid character %s in mnemonic"),
3556 output_invalid (*l));
3557 return NULL;
3558 }
3559
3560 /* Check if instruction is supported on specified architecture. */
3561 supported = 0;
3562 for (t = current_templates->start; t < current_templates->end; ++t)
3563 {
3564 supported |= cpu_flags_match (t);
3565 if (supported == CPU_FLAGS_PERFECT_MATCH)
3566 goto skip;
3567 }
3568
3569 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3570 {
3571 as_bad (flag_code == CODE_64BIT
3572 ? _("`%s' is not supported in 64-bit mode")
3573 : _("`%s' is only supported in 64-bit mode"),
3574 current_templates->start->name);
3575 return NULL;
3576 }
3577 if (supported != CPU_FLAGS_PERFECT_MATCH)
3578 {
3579 as_bad (_("`%s' is not supported on `%s%s'"),
3580 current_templates->start->name,
3581 cpu_arch_name ? cpu_arch_name : default_arch,
3582 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3583 return NULL;
3584 }
3585
3586skip:
3587 if (!cpu_arch_flags.bitfield.cpui386
3588 && (flag_code != CODE_16BIT))
3589 {
3590 as_warn (_("use .code16 to ensure correct addressing mode"));
3591 }
3592
3593 return l;
3594}
3595
3596static char *
3597parse_operands (char *l, const char *mnemonic)
3598{
3599 char *token_start;
3600
3601 /* 1 if operand is pending after ','. */
3602 unsigned int expecting_operand = 0;
3603
3604 /* Non-zero if operand parens not balanced. */
3605 unsigned int paren_not_balanced;
3606
3607 while (*l != END_OF_INSN)
3608 {
3609 /* Skip optional white space before operand. */
3610 if (is_space_char (*l))
3611 ++l;
3612 if (!is_operand_char (*l) && *l != END_OF_INSN)
3613 {
3614 as_bad (_("invalid character %s before operand %d"),
3615 output_invalid (*l),
3616 i.operands + 1);
3617 return NULL;
3618 }
3619 token_start = l; /* after white space */
3620 paren_not_balanced = 0;
3621 while (paren_not_balanced || *l != ',')
3622 {
3623 if (*l == END_OF_INSN)
3624 {
3625 if (paren_not_balanced)
3626 {
3627 if (!intel_syntax)
3628 as_bad (_("unbalanced parenthesis in operand %d."),
3629 i.operands + 1);
3630 else
3631 as_bad (_("unbalanced brackets in operand %d."),
3632 i.operands + 1);
3633 return NULL;
3634 }
3635 else
3636 break; /* we are done */
3637 }
3638 else if (!is_operand_char (*l) && !is_space_char (*l))
3639 {
3640 as_bad (_("invalid character %s in operand %d"),
3641 output_invalid (*l),
3642 i.operands + 1);
3643 return NULL;
3644 }
3645 if (!intel_syntax)
3646 {
3647 if (*l == '(')
3648 ++paren_not_balanced;
3649 if (*l == ')')
3650 --paren_not_balanced;
3651 }
3652 else
3653 {
3654 if (*l == '[')
3655 ++paren_not_balanced;
3656 if (*l == ']')
3657 --paren_not_balanced;
3658 }
3659 l++;
3660 }
3661 if (l != token_start)
3662 { /* Yes, we've read in another operand. */
3663 unsigned int operand_ok;
3664 this_operand = i.operands++;
3665 i.types[this_operand].bitfield.unspecified = 1;
3666 if (i.operands > MAX_OPERANDS)
3667 {
3668 as_bad (_("spurious operands; (%d operands/instruction max)"),
3669 MAX_OPERANDS);
3670 return NULL;
3671 }
3672 /* Now parse operand adding info to 'i' as we go along. */
3673 END_STRING_AND_SAVE (l);
3674
3675 if (intel_syntax)
3676 operand_ok =
3677 i386_intel_operand (token_start,
3678 intel_float_operand (mnemonic));
3679 else
3680 operand_ok = i386_att_operand (token_start);
3681
3682 RESTORE_END_STRING (l);
3683 if (!operand_ok)
3684 return NULL;
3685 }
3686 else
3687 {
3688 if (expecting_operand)
3689 {
3690 expecting_operand_after_comma:
3691 as_bad (_("expecting operand after ','; got nothing"));
3692 return NULL;
3693 }
3694 if (*l == ',')
3695 {
3696 as_bad (_("expecting operand before ','; got nothing"));
3697 return NULL;
3698 }
3699 }
3700
3701 /* Now *l must be either ',' or END_OF_INSN. */
3702 if (*l == ',')
3703 {
3704 if (*++l == END_OF_INSN)
3705 {
3706 /* Just skip it, if it's \n complain. */
3707 goto expecting_operand_after_comma;
3708 }
3709 expecting_operand = 1;
3710 }
3711 }
3712 return l;
3713}
3714
3715static void
3716swap_2_operands (int xchg1, int xchg2)
3717{
3718 union i386_op temp_op;
3719 i386_operand_type temp_type;
3720 enum bfd_reloc_code_real temp_reloc;
3721
3722 temp_type = i.types[xchg2];
3723 i.types[xchg2] = i.types[xchg1];
3724 i.types[xchg1] = temp_type;
3725 temp_op = i.op[xchg2];
3726 i.op[xchg2] = i.op[xchg1];
3727 i.op[xchg1] = temp_op;
3728 temp_reloc = i.reloc[xchg2];
3729 i.reloc[xchg2] = i.reloc[xchg1];
3730 i.reloc[xchg1] = temp_reloc;
3731}
3732
3733static void
3734swap_operands (void)
3735{
3736 switch (i.operands)
3737 {
3738 case 5:
3739 case 4:
3740 swap_2_operands (1, i.operands - 2);
3741 case 3:
3742 case 2:
3743 swap_2_operands (0, i.operands - 1);
3744 break;
3745 default:
3746 abort ();
3747 }
3748
3749 if (i.mem_operands == 2)
3750 {
3751 const seg_entry *temp_seg;
3752 temp_seg = i.seg[0];
3753 i.seg[0] = i.seg[1];
3754 i.seg[1] = temp_seg;
3755 }
3756}
3757
3758/* Try to ensure constant immediates are represented in the smallest
3759 opcode possible. */
3760static void
3761optimize_imm (void)
3762{
3763 char guess_suffix = 0;
3764 int op;
3765
3766 if (i.suffix)
3767 guess_suffix = i.suffix;
3768 else if (i.reg_operands)
3769 {
3770 /* Figure out a suffix from the last register operand specified.
3771 We can't do this properly yet, ie. excluding InOutPortReg,
3772 but the following works for instructions with immediates.
3773 In any case, we can't set i.suffix yet. */
3774 for (op = i.operands; --op >= 0;)
3775 if (i.types[op].bitfield.reg8)
3776 {
3777 guess_suffix = BYTE_MNEM_SUFFIX;
3778 break;
3779 }
3780 else if (i.types[op].bitfield.reg16)
3781 {
3782 guess_suffix = WORD_MNEM_SUFFIX;
3783 break;
3784 }
3785 else if (i.types[op].bitfield.reg32)
3786 {
3787 guess_suffix = LONG_MNEM_SUFFIX;
3788 break;
3789 }
3790 else if (i.types[op].bitfield.reg64)
3791 {
3792 guess_suffix = QWORD_MNEM_SUFFIX;
3793 break;
3794 }
3795 }
3796 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3797 guess_suffix = WORD_MNEM_SUFFIX;
3798
3799 for (op = i.operands; --op >= 0;)
3800 if (operand_type_check (i.types[op], imm))
3801 {
3802 switch (i.op[op].imms->X_op)
3803 {
3804 case O_constant:
3805 /* If a suffix is given, this operand may be shortened. */
3806 switch (guess_suffix)
3807 {
3808 case LONG_MNEM_SUFFIX:
3809 i.types[op].bitfield.imm32 = 1;
3810 i.types[op].bitfield.imm64 = 1;
3811 break;
3812 case WORD_MNEM_SUFFIX:
3813 i.types[op].bitfield.imm16 = 1;
3814 i.types[op].bitfield.imm32 = 1;
3815 i.types[op].bitfield.imm32s = 1;
3816 i.types[op].bitfield.imm64 = 1;
3817 break;
3818 case BYTE_MNEM_SUFFIX:
3819 i.types[op].bitfield.imm8 = 1;
3820 i.types[op].bitfield.imm8s = 1;
3821 i.types[op].bitfield.imm16 = 1;
3822 i.types[op].bitfield.imm32 = 1;
3823 i.types[op].bitfield.imm32s = 1;
3824 i.types[op].bitfield.imm64 = 1;
3825 break;
3826 }
3827
3828 /* If this operand is at most 16 bits, convert it
3829 to a signed 16 bit number before trying to see
3830 whether it will fit in an even smaller size.
3831 This allows a 16-bit operand such as $0xffe0 to
3832 be recognised as within Imm8S range. */
3833 if ((i.types[op].bitfield.imm16)
3834 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3835 {
3836 i.op[op].imms->X_add_number =
3837 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3838 }
3839 if ((i.types[op].bitfield.imm32)
3840 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3841 == 0))
3842 {
3843 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3844 ^ ((offsetT) 1 << 31))
3845 - ((offsetT) 1 << 31));
3846 }
3847 i.types[op]
3848 = operand_type_or (i.types[op],
3849 smallest_imm_type (i.op[op].imms->X_add_number));
3850
3851 /* We must avoid matching of Imm32 templates when 64bit
3852 only immediate is available. */
3853 if (guess_suffix == QWORD_MNEM_SUFFIX)
3854 i.types[op].bitfield.imm32 = 0;
3855 break;
3856
3857 case O_absent:
3858 case O_register:
3859 abort ();
3860
3861 /* Symbols and expressions. */
3862 default:
3863 /* Convert symbolic operand to proper sizes for matching, but don't
3864 prevent matching a set of insns that only supports sizes other
3865 than those matching the insn suffix. */
3866 {
3867 i386_operand_type mask, allowed;
3868 const insn_template *t;
3869
3870 operand_type_set (&mask, 0);
3871 operand_type_set (&allowed, 0);
3872
3873 for (t = current_templates->start;
3874 t < current_templates->end;
3875 ++t)
3876 allowed = operand_type_or (allowed,
3877 t->operand_types[op]);
3878 switch (guess_suffix)
3879 {
3880 case QWORD_MNEM_SUFFIX:
3881 mask.bitfield.imm64 = 1;
3882 mask.bitfield.imm32s = 1;
3883 break;
3884 case LONG_MNEM_SUFFIX:
3885 mask.bitfield.imm32 = 1;
3886 break;
3887 case WORD_MNEM_SUFFIX:
3888 mask.bitfield.imm16 = 1;
3889 break;
3890 case BYTE_MNEM_SUFFIX:
3891 mask.bitfield.imm8 = 1;
3892 break;
3893 default:
3894 break;
3895 }
3896 allowed = operand_type_and (mask, allowed);
3897 if (!operand_type_all_zero (&allowed))
3898 i.types[op] = operand_type_and (i.types[op], mask);
3899 }
3900 break;
3901 }
3902 }
3903}
3904
3905/* Try to use the smallest displacement type too. */
3906static void
3907optimize_disp (void)
3908{
3909 int op;
3910
3911 for (op = i.operands; --op >= 0;)
3912 if (operand_type_check (i.types[op], disp))
3913 {
3914 if (i.op[op].disps->X_op == O_constant)
3915 {
3916 offsetT op_disp = i.op[op].disps->X_add_number;
3917
3918 if (i.types[op].bitfield.disp16
3919 && (op_disp & ~(offsetT) 0xffff) == 0)
3920 {
3921 /* If this operand is at most 16 bits, convert
3922 to a signed 16 bit number and don't use 64bit
3923 displacement. */
3924 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3925 i.types[op].bitfield.disp64 = 0;
3926 }
3927 if (i.types[op].bitfield.disp32
3928 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3929 {
3930 /* If this operand is at most 32 bits, convert
3931 to a signed 32 bit number and don't use 64bit
3932 displacement. */
3933 op_disp &= (((offsetT) 2 << 31) - 1);
3934 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3935 i.types[op].bitfield.disp64 = 0;
3936 }
3937 if (!op_disp && i.types[op].bitfield.baseindex)
3938 {
3939 i.types[op].bitfield.disp8 = 0;
3940 i.types[op].bitfield.disp16 = 0;
3941 i.types[op].bitfield.disp32 = 0;
3942 i.types[op].bitfield.disp32s = 0;
3943 i.types[op].bitfield.disp64 = 0;
3944 i.op[op].disps = 0;
3945 i.disp_operands--;
3946 }
3947 else if (flag_code == CODE_64BIT)
3948 {
3949 if (fits_in_signed_long (op_disp))
3950 {
3951 i.types[op].bitfield.disp64 = 0;
3952 i.types[op].bitfield.disp32s = 1;
3953 }
3954 if (i.prefix[ADDR_PREFIX]
3955 && fits_in_unsigned_long (op_disp))
3956 i.types[op].bitfield.disp32 = 1;
3957 }
3958 if ((i.types[op].bitfield.disp32
3959 || i.types[op].bitfield.disp32s
3960 || i.types[op].bitfield.disp16)
3961 && fits_in_signed_byte (op_disp))
3962 i.types[op].bitfield.disp8 = 1;
3963 }
3964 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3965 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3966 {
3967 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3968 i.op[op].disps, 0, i.reloc[op]);
3969 i.types[op].bitfield.disp8 = 0;
3970 i.types[op].bitfield.disp16 = 0;
3971 i.types[op].bitfield.disp32 = 0;
3972 i.types[op].bitfield.disp32s = 0;
3973 i.types[op].bitfield.disp64 = 0;
3974 }
3975 else
3976 /* We only support 64bit displacement on constants. */
3977 i.types[op].bitfield.disp64 = 0;
3978 }
3979}
3980
3981/* Check if operands are valid for the instruction. */
3982
3983static int
3984check_VecOperands (const insn_template *t)
3985{
3986 /* Without VSIB byte, we can't have a vector register for index. */
3987 if (!t->opcode_modifier.vecsib
3988 && i.index_reg
3989 && (i.index_reg->reg_type.bitfield.regxmm
3990 || i.index_reg->reg_type.bitfield.regymm))
3991 {
3992 i.error = unsupported_vector_index_register;
3993 return 1;
3994 }
3995
3996 /* For VSIB byte, we need a vector register for index, and all vector
3997 registers must be distinct. */
3998 if (t->opcode_modifier.vecsib)
3999 {
4000 if (!i.index_reg
4001 || !((t->opcode_modifier.vecsib == VecSIB128
4002 && i.index_reg->reg_type.bitfield.regxmm)
4003 || (t->opcode_modifier.vecsib == VecSIB256
4004 && i.index_reg->reg_type.bitfield.regymm)))
4005 {
4006 i.error = invalid_vsib_address;
4007 return 1;
4008 }
4009
4010 gas_assert (i.reg_operands == 2);
4011 gas_assert (i.types[0].bitfield.regxmm
4012 || i.types[0].bitfield.regymm);
4013 gas_assert (i.types[2].bitfield.regxmm
4014 || i.types[2].bitfield.regymm);
4015
4016 if (operand_check == check_none)
4017 return 0;
4018 if (register_number (i.op[0].regs) != register_number (i.index_reg)
4019 && register_number (i.op[2].regs) != register_number (i.index_reg)
4020 && register_number (i.op[0].regs) != register_number (i.op[2].regs))
4021 return 0;
4022 if (operand_check == check_error)
4023 {
4024 i.error = invalid_vector_register_set;
4025 return 1;
4026 }
4027 as_warn (_("mask, index, and destination registers should be distinct"));
4028 }
4029
4030 return 0;
4031}
4032
4033/* Check if operands are valid for the instruction. Update VEX
4034 operand types. */
4035
4036static int
4037VEX_check_operands (const insn_template *t)
4038{
4039 if (!t->opcode_modifier.vex)
4040 return 0;
4041
4042 /* Only check VEX_Imm4, which must be the first operand. */
4043 if (t->operand_types[0].bitfield.vec_imm4)
4044 {
4045 if (i.op[0].imms->X_op != O_constant
4046 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4047 {
4048 i.error = bad_imm4;
4049 return 1;
4050 }
4051
4052 /* Turn off Imm8 so that update_imm won't complain. */
4053 i.types[0] = vec_imm4;
4054 }
4055
4056 return 0;
4057}
4058
4059static const insn_template *
4060match_template (void)
4061{
4062 /* Points to template once we've found it. */
4063 const insn_template *t;
4064 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4065 i386_operand_type overlap4;
4066 unsigned int found_reverse_match;
4067 i386_opcode_modifier suffix_check;
4068 i386_operand_type operand_types [MAX_OPERANDS];
4069 int addr_prefix_disp;
4070 unsigned int j;
4071 unsigned int found_cpu_match;
4072 unsigned int check_register;
4073 enum i386_error specific_error = 0;
4074
4075#if MAX_OPERANDS != 5
4076# error "MAX_OPERANDS must be 5."
4077#endif
4078
4079 found_reverse_match = 0;
4080 addr_prefix_disp = -1;
4081
4082 memset (&suffix_check, 0, sizeof (suffix_check));
4083 if (i.suffix == BYTE_MNEM_SUFFIX)
4084 suffix_check.no_bsuf = 1;
4085 else if (i.suffix == WORD_MNEM_SUFFIX)
4086 suffix_check.no_wsuf = 1;
4087 else if (i.suffix == SHORT_MNEM_SUFFIX)
4088 suffix_check.no_ssuf = 1;
4089 else if (i.suffix == LONG_MNEM_SUFFIX)
4090 suffix_check.no_lsuf = 1;
4091 else if (i.suffix == QWORD_MNEM_SUFFIX)
4092 suffix_check.no_qsuf = 1;
4093 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4094 suffix_check.no_ldsuf = 1;
4095
4096 /* Must have right number of operands. */
4097 i.error = number_of_operands_mismatch;
4098
4099 for (t = current_templates->start; t < current_templates->end; t++)
4100 {
4101 addr_prefix_disp = -1;
4102
4103 if (i.operands != t->operands)
4104 continue;
4105
4106 /* Check processor support. */
4107 i.error = unsupported;
4108 found_cpu_match = (cpu_flags_match (t)
4109 == CPU_FLAGS_PERFECT_MATCH);
4110 if (!found_cpu_match)
4111 continue;
4112
4113 /* Check old gcc support. */
4114 i.error = old_gcc_only;
4115 if (!old_gcc && t->opcode_modifier.oldgcc)
4116 continue;
4117
4118 /* Check AT&T mnemonic. */
4119 i.error = unsupported_with_intel_mnemonic;
4120 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4121 continue;
4122
4123 /* Check AT&T/Intel syntax. */
4124 i.error = unsupported_syntax;
4125 if ((intel_syntax && t->opcode_modifier.attsyntax)
4126 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4127 continue;
4128
4129 /* Check the suffix, except for some instructions in intel mode. */
4130 i.error = invalid_instruction_suffix;
4131 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4132 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4133 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4134 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4135 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4136 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4137 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4138 continue;
4139
4140 if (!operand_size_match (t))
4141 continue;
4142
4143 for (j = 0; j < MAX_OPERANDS; j++)
4144 operand_types[j] = t->operand_types[j];
4145
4146 /* In general, don't allow 64-bit operands in 32-bit mode. */
4147 if (i.suffix == QWORD_MNEM_SUFFIX
4148 && flag_code != CODE_64BIT
4149 && (intel_syntax
4150 ? (!t->opcode_modifier.ignoresize
4151 && !intel_float_operand (t->name))
4152 : intel_float_operand (t->name) != 2)
4153 && ((!operand_types[0].bitfield.regmmx
4154 && !operand_types[0].bitfield.regxmm
4155 && !operand_types[0].bitfield.regymm)
4156 || (!operand_types[t->operands > 1].bitfield.regmmx
4157 && !!operand_types[t->operands > 1].bitfield.regxmm
4158 && !!operand_types[t->operands > 1].bitfield.regymm))
4159 && (t->base_opcode != 0x0fc7
4160 || t->extension_opcode != 1 /* cmpxchg8b */))
4161 continue;
4162
4163 /* In general, don't allow 32-bit operands on pre-386. */
4164 else if (i.suffix == LONG_MNEM_SUFFIX
4165 && !cpu_arch_flags.bitfield.cpui386
4166 && (intel_syntax
4167 ? (!t->opcode_modifier.ignoresize
4168 && !intel_float_operand (t->name))
4169 : intel_float_operand (t->name) != 2)
4170 && ((!operand_types[0].bitfield.regmmx
4171 && !operand_types[0].bitfield.regxmm)
4172 || (!operand_types[t->operands > 1].bitfield.regmmx
4173 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4174 continue;
4175
4176 /* Do not verify operands when there are none. */
4177 else
4178 {
4179 if (!t->operands)
4180 /* We've found a match; break out of loop. */
4181 break;
4182 }
4183
4184 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4185 into Disp32/Disp16/Disp32 operand. */
4186 if (i.prefix[ADDR_PREFIX] != 0)
4187 {
4188 /* There should be only one Disp operand. */
4189 switch (flag_code)
4190 {
4191 case CODE_16BIT:
4192 for (j = 0; j < MAX_OPERANDS; j++)
4193 {
4194 if (operand_types[j].bitfield.disp16)
4195 {
4196 addr_prefix_disp = j;
4197 operand_types[j].bitfield.disp32 = 1;
4198 operand_types[j].bitfield.disp16 = 0;
4199 break;
4200 }
4201 }
4202 break;
4203 case CODE_32BIT:
4204 for (j = 0; j < MAX_OPERANDS; j++)
4205 {
4206 if (operand_types[j].bitfield.disp32)
4207 {
4208 addr_prefix_disp = j;
4209 operand_types[j].bitfield.disp32 = 0;
4210 operand_types[j].bitfield.disp16 = 1;
4211 break;
4212 }
4213 }
4214 break;
4215 case CODE_64BIT:
4216 for (j = 0; j < MAX_OPERANDS; j++)
4217 {
4218 if (operand_types[j].bitfield.disp64)
4219 {
4220 addr_prefix_disp = j;
4221 operand_types[j].bitfield.disp64 = 0;
4222 operand_types[j].bitfield.disp32 = 1;
4223 break;
4224 }
4225 }
4226 break;
4227 }
4228 }
4229
4230 /* We check register size if needed. */
4231 check_register = t->opcode_modifier.checkregsize;
4232 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4233 switch (t->operands)
4234 {
4235 case 1:
4236 if (!operand_type_match (overlap0, i.types[0]))
4237 continue;
4238 break;
4239 case 2:
4240 /* xchg %eax, %eax is a special case. It is an aliase for nop
4241 only in 32bit mode and we can use opcode 0x90. In 64bit
4242 mode, we can't use 0x90 for xchg %eax, %eax since it should
4243 zero-extend %eax to %rax. */
4244 if (flag_code == CODE_64BIT
4245 && t->base_opcode == 0x90
4246 && operand_type_equal (&i.types [0], &acc32)
4247 && operand_type_equal (&i.types [1], &acc32))
4248 continue;
4249 if (i.swap_operand)
4250 {
4251 /* If we swap operand in encoding, we either match
4252 the next one or reverse direction of operands. */
4253 if (t->opcode_modifier.s)
4254 continue;
4255 else if (t->opcode_modifier.d)
4256 goto check_reverse;
4257 }
4258
4259 case 3:
4260 /* If we swap operand in encoding, we match the next one. */
4261 if (i.swap_operand && t->opcode_modifier.s)
4262 continue;
4263 case 4:
4264 case 5:
4265 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4266 if (!operand_type_match (overlap0, i.types[0])
4267 || !operand_type_match (overlap1, i.types[1])
4268 || (check_register
4269 && !operand_type_register_match (overlap0, i.types[0],
4270 operand_types[0],
4271 overlap1, i.types[1],
4272 operand_types[1])))
4273 {
4274 /* Check if other direction is valid ... */
4275 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4276 continue;
4277
4278check_reverse:
4279 /* Try reversing direction of operands. */
4280 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4281 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4282 if (!operand_type_match (overlap0, i.types[0])
4283 || !operand_type_match (overlap1, i.types[1])
4284 || (check_register
4285 && !operand_type_register_match (overlap0,
4286 i.types[0],
4287 operand_types[1],
4288 overlap1,
4289 i.types[1],
4290 operand_types[0])))
4291 {
4292 /* Does not match either direction. */
4293 continue;
4294 }
4295 /* found_reverse_match holds which of D or FloatDR
4296 we've found. */
4297 if (t->opcode_modifier.d)
4298 found_reverse_match = Opcode_D;
4299 else if (t->opcode_modifier.floatd)
4300 found_reverse_match = Opcode_FloatD;
4301 else
4302 found_reverse_match = 0;
4303 if (t->opcode_modifier.floatr)
4304 found_reverse_match |= Opcode_FloatR;
4305 }
4306 else
4307 {
4308 /* Found a forward 2 operand match here. */
4309 switch (t->operands)
4310 {
4311 case 5:
4312 overlap4 = operand_type_and (i.types[4],
4313 operand_types[4]);
4314 case 4:
4315 overlap3 = operand_type_and (i.types[3],
4316 operand_types[3]);
4317 case 3:
4318 overlap2 = operand_type_and (i.types[2],
4319 operand_types[2]);
4320 break;
4321 }
4322
4323 switch (t->operands)
4324 {
4325 case 5:
4326 if (!operand_type_match (overlap4, i.types[4])
4327 || !operand_type_register_match (overlap3,
4328 i.types[3],
4329 operand_types[3],
4330 overlap4,
4331 i.types[4],
4332 operand_types[4]))
4333 continue;
4334 case 4:
4335 if (!operand_type_match (overlap3, i.types[3])
4336 || (check_register
4337 && !operand_type_register_match (overlap2,
4338 i.types[2],
4339 operand_types[2],
4340 overlap3,
4341 i.types[3],
4342 operand_types[3])))
4343 continue;
4344 case 3:
4345 /* Here we make use of the fact that there are no
4346 reverse match 3 operand instructions, and all 3
4347 operand instructions only need to be checked for
4348 register consistency between operands 2 and 3. */
4349 if (!operand_type_match (overlap2, i.types[2])
4350 || (check_register
4351 && !operand_type_register_match (overlap1,
4352 i.types[1],
4353 operand_types[1],
4354 overlap2,
4355 i.types[2],
4356 operand_types[2])))
4357 continue;
4358 break;
4359 }
4360 }
4361 /* Found either forward/reverse 2, 3 or 4 operand match here:
4362 slip through to break. */
4363 }
4364 if (!found_cpu_match)
4365 {
4366 found_reverse_match = 0;
4367 continue;
4368 }
4369
4370 /* Check if vector and VEX operands are valid. */
4371 if (check_VecOperands (t) || VEX_check_operands (t))
4372 {
4373 specific_error = i.error;
4374 continue;
4375 }
4376
4377 /* We've found a match; break out of loop. */
4378 break;
4379 }
4380
4381 if (t == current_templates->end)
4382 {
4383 /* We found no match. */
4384 const char *err_msg;
4385 switch (specific_error ? specific_error : i.error)
4386 {
4387 default:
4388 abort ();
4389 case operand_size_mismatch:
4390 err_msg = _("operand size mismatch");
4391 break;
4392 case operand_type_mismatch:
4393 err_msg = _("operand type mismatch");
4394 break;
4395 case register_type_mismatch:
4396 err_msg = _("register type mismatch");
4397 break;
4398 case number_of_operands_mismatch:
4399 err_msg = _("number of operands mismatch");
4400 break;
4401 case invalid_instruction_suffix:
4402 err_msg = _("invalid instruction suffix");
4403 break;
4404 case bad_imm4:
4405 err_msg = _("constant doesn't fit in 4 bits");
4406 break;
4407 case old_gcc_only:
4408 err_msg = _("only supported with old gcc");
4409 break;
4410 case unsupported_with_intel_mnemonic:
4411 err_msg = _("unsupported with Intel mnemonic");
4412 break;
4413 case unsupported_syntax:
4414 err_msg = _("unsupported syntax");
4415 break;
4416 case unsupported:
4417 as_bad (_("unsupported instruction `%s'"),
4418 current_templates->start->name);
4419 return NULL;
4420 case invalid_vsib_address:
4421 err_msg = _("invalid VSIB address");
4422 break;
4423 case invalid_vector_register_set:
4424 err_msg = _("mask, index, and destination registers must be distinct");
4425 break;
4426 case unsupported_vector_index_register:
4427 err_msg = _("unsupported vector index register");
4428 break;
4429 }
4430 as_bad (_("%s for `%s'"), err_msg,
4431 current_templates->start->name);
4432 return NULL;
4433 }
4434
4435 if (!quiet_warnings)
4436 {
4437 if (!intel_syntax
4438 && (i.types[0].bitfield.jumpabsolute
4439 != operand_types[0].bitfield.jumpabsolute))
4440 {
4441 as_warn (_("indirect %s without `*'"), t->name);
4442 }
4443
4444 if (t->opcode_modifier.isprefix
4445 && t->opcode_modifier.ignoresize)
4446 {
4447 /* Warn them that a data or address size prefix doesn't
4448 affect assembly of the next line of code. */
4449 as_warn (_("stand-alone `%s' prefix"), t->name);
4450 }
4451 }
4452
4453 /* Copy the template we found. */
4454 i.tm = *t;
4455
4456 if (addr_prefix_disp != -1)
4457 i.tm.operand_types[addr_prefix_disp]
4458 = operand_types[addr_prefix_disp];
4459
4460 if (found_reverse_match)
4461 {
4462 /* If we found a reverse match we must alter the opcode
4463 direction bit. found_reverse_match holds bits to change
4464 (different for int & float insns). */
4465
4466 i.tm.base_opcode ^= found_reverse_match;
4467
4468 i.tm.operand_types[0] = operand_types[1];
4469 i.tm.operand_types[1] = operand_types[0];
4470 }
4471
4472 return t;
4473}
4474
4475static int
4476check_string (void)
4477{
4478 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4479 if (i.tm.operand_types[mem_op].bitfield.esseg)
4480 {
4481 if (i.seg[0] != NULL && i.seg[0] != &es)
4482 {
4483 as_bad (_("`%s' operand %d must use `%ses' segment"),
4484 i.tm.name,
4485 mem_op + 1,
4486 register_prefix);
4487 return 0;
4488 }
4489 /* There's only ever one segment override allowed per instruction.
4490 This instruction possibly has a legal segment override on the
4491 second operand, so copy the segment to where non-string
4492 instructions store it, allowing common code. */
4493 i.seg[0] = i.seg[1];
4494 }
4495 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4496 {
4497 if (i.seg[1] != NULL && i.seg[1] != &es)
4498 {
4499 as_bad (_("`%s' operand %d must use `%ses' segment"),
4500 i.tm.name,
4501 mem_op + 2,
4502 register_prefix);
4503 return 0;
4504 }
4505 }
4506 return 1;
4507}
4508
4509static int
4510process_suffix (void)
4511{
4512 /* If matched instruction specifies an explicit instruction mnemonic
4513 suffix, use it. */
4514 if (i.tm.opcode_modifier.size16)
4515 i.suffix = WORD_MNEM_SUFFIX;
4516 else if (i.tm.opcode_modifier.size32)
4517 i.suffix = LONG_MNEM_SUFFIX;
4518 else if (i.tm.opcode_modifier.size64)
4519 i.suffix = QWORD_MNEM_SUFFIX;
4520 else if (i.reg_operands)
4521 {
4522 /* If there's no instruction mnemonic suffix we try to invent one
4523 based on register operands. */
4524 if (!i.suffix)
4525 {
4526 /* We take i.suffix from the last register operand specified,
4527 Destination register type is more significant than source
4528 register type. crc32 in SSE4.2 prefers source register
4529 type. */
4530 if (i.tm.base_opcode == 0xf20f38f1)
4531 {
4532 if (i.types[0].bitfield.reg16)
4533 i.suffix = WORD_MNEM_SUFFIX;
4534 else if (i.types[0].bitfield.reg32)
4535 i.suffix = LONG_MNEM_SUFFIX;
4536 else if (i.types[0].bitfield.reg64)
4537 i.suffix = QWORD_MNEM_SUFFIX;
4538 }
4539 else if (i.tm.base_opcode == 0xf20f38f0)
4540 {
4541 if (i.types[0].bitfield.reg8)
4542 i.suffix = BYTE_MNEM_SUFFIX;
4543 }
4544
4545 if (!i.suffix)
4546 {
4547 int op;
4548
4549 if (i.tm.base_opcode == 0xf20f38f1
4550 || i.tm.base_opcode == 0xf20f38f0)
4551 {
4552 /* We have to know the operand size for crc32. */
4553 as_bad (_("ambiguous memory operand size for `%s`"),
4554 i.tm.name);
4555 return 0;
4556 }
4557
4558 for (op = i.operands; --op >= 0;)
4559 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4560 {
4561 if (i.types[op].bitfield.reg8)
4562 {
4563 i.suffix = BYTE_MNEM_SUFFIX;
4564 break;
4565 }
4566 else if (i.types[op].bitfield.reg16)
4567 {
4568 i.suffix = WORD_MNEM_SUFFIX;
4569 break;
4570 }
4571 else if (i.types[op].bitfield.reg32)
4572 {
4573 i.suffix = LONG_MNEM_SUFFIX;
4574 break;
4575 }
4576 else if (i.types[op].bitfield.reg64)
4577 {
4578 i.suffix = QWORD_MNEM_SUFFIX;
4579 break;
4580 }
4581 }
4582 }
4583 }
4584 else if (i.suffix == BYTE_MNEM_SUFFIX)
4585 {
4586 if (intel_syntax
4587 && i.tm.opcode_modifier.ignoresize
4588 && i.tm.opcode_modifier.no_bsuf)
4589 i.suffix = 0;
4590 else if (!check_byte_reg ())
4591 return 0;
4592 }
4593 else if (i.suffix == LONG_MNEM_SUFFIX)
4594 {
4595 if (intel_syntax
4596 && i.tm.opcode_modifier.ignoresize
4597 && i.tm.opcode_modifier.no_lsuf)
4598 i.suffix = 0;
4599 else if (!check_long_reg ())
4600 return 0;
4601 }
4602 else if (i.suffix == QWORD_MNEM_SUFFIX)
4603 {
4604 if (intel_syntax
4605 && i.tm.opcode_modifier.ignoresize
4606 && i.tm.opcode_modifier.no_qsuf)
4607 i.suffix = 0;
4608 else if (!check_qword_reg ())
4609 return 0;
4610 }
4611 else if (i.suffix == WORD_MNEM_SUFFIX)
4612 {
4613 if (intel_syntax
4614 && i.tm.opcode_modifier.ignoresize
4615 && i.tm.opcode_modifier.no_wsuf)
4616 i.suffix = 0;
4617 else if (!check_word_reg ())
4618 return 0;
4619 }
4620 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4621 || i.suffix == YMMWORD_MNEM_SUFFIX)
4622 {
4623 /* Skip if the instruction has x/y suffix. match_template
4624 should check if it is a valid suffix. */
4625 }
4626 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4627 /* Do nothing if the instruction is going to ignore the prefix. */
4628 ;
4629 else
4630 abort ();
4631 }
4632 else if (i.tm.opcode_modifier.defaultsize
4633 && !i.suffix
4634 /* exclude fldenv/frstor/fsave/fstenv */
4635 && i.tm.opcode_modifier.no_ssuf)
4636 {
4637 i.suffix = stackop_size;
4638 }
4639 else if (intel_syntax
4640 && !i.suffix
4641 && (i.tm.operand_types[0].bitfield.jumpabsolute
4642 || i.tm.opcode_modifier.jumpbyte
4643 || i.tm.opcode_modifier.jumpintersegment
4644 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4645 && i.tm.extension_opcode <= 3)))
4646 {
4647 switch (flag_code)
4648 {
4649 case CODE_64BIT:
4650 if (!i.tm.opcode_modifier.no_qsuf)
4651 {
4652 i.suffix = QWORD_MNEM_SUFFIX;
4653 break;
4654 }
4655 case CODE_32BIT:
4656 if (!i.tm.opcode_modifier.no_lsuf)
4657 i.suffix = LONG_MNEM_SUFFIX;
4658 break;
4659 case CODE_16BIT:
4660 if (!i.tm.opcode_modifier.no_wsuf)
4661 i.suffix = WORD_MNEM_SUFFIX;
4662 break;
4663 }
4664 }
4665
4666 if (!i.suffix)
4667 {
4668 if (!intel_syntax)
4669 {
4670 if (i.tm.opcode_modifier.w)
4671 {
4672 as_bad (_("no instruction mnemonic suffix given and "
4673 "no register operands; can't size instruction"));
4674 return 0;
4675 }
4676 }
4677 else
4678 {
4679 unsigned int suffixes;
4680
4681 suffixes = !i.tm.opcode_modifier.no_bsuf;
4682 if (!i.tm.opcode_modifier.no_wsuf)
4683 suffixes |= 1 << 1;
4684 if (!i.tm.opcode_modifier.no_lsuf)
4685 suffixes |= 1 << 2;
4686 if (!i.tm.opcode_modifier.no_ldsuf)
4687 suffixes |= 1 << 3;
4688 if (!i.tm.opcode_modifier.no_ssuf)
4689 suffixes |= 1 << 4;
4690 if (!i.tm.opcode_modifier.no_qsuf)
4691 suffixes |= 1 << 5;
4692
4693 /* There are more than suffix matches. */
4694 if (i.tm.opcode_modifier.w
4695 || ((suffixes & (suffixes - 1))
4696 && !i.tm.opcode_modifier.defaultsize
4697 && !i.tm.opcode_modifier.ignoresize))
4698 {
4699 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4700 return 0;
4701 }
4702 }
4703 }
4704
4705 /* Change the opcode based on the operand size given by i.suffix;
4706 We don't need to change things for byte insns. */
4707
4708 if (i.suffix
4709 && i.suffix != BYTE_MNEM_SUFFIX
4710 && i.suffix != XMMWORD_MNEM_SUFFIX
4711 && i.suffix != YMMWORD_MNEM_SUFFIX)
4712 {
4713 /* It's not a byte, select word/dword operation. */
4714 if (i.tm.opcode_modifier.w)
4715 {
4716 if (i.tm.opcode_modifier.shortform)
4717 i.tm.base_opcode |= 8;
4718 else
4719 i.tm.base_opcode |= 1;
4720 }
4721
4722 /* Now select between word & dword operations via the operand
4723 size prefix, except for instructions that will ignore this
4724 prefix anyway. */
4725 if (i.tm.opcode_modifier.addrprefixop0)
4726 {
4727 /* The address size override prefix changes the size of the
4728 first operand. */
4729 if ((flag_code == CODE_32BIT
4730 && i.op->regs[0].reg_type.bitfield.reg16)
4731 || (flag_code != CODE_32BIT
4732 && i.op->regs[0].reg_type.bitfield.reg32))
4733 if (!add_prefix (ADDR_PREFIX_OPCODE))
4734 return 0;
4735 }
4736 else if (i.suffix != QWORD_MNEM_SUFFIX
4737 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4738 && !i.tm.opcode_modifier.ignoresize
4739 && !i.tm.opcode_modifier.floatmf
4740 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4741 || (flag_code == CODE_64BIT
4742 && i.tm.opcode_modifier.jumpbyte)))
4743 {
4744 unsigned int prefix = DATA_PREFIX_OPCODE;
4745
4746 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4747 prefix = ADDR_PREFIX_OPCODE;
4748
4749 if (!add_prefix (prefix))
4750 return 0;
4751 }
4752
4753 /* Set mode64 for an operand. */
4754 if (i.suffix == QWORD_MNEM_SUFFIX
4755 && flag_code == CODE_64BIT
4756 && !i.tm.opcode_modifier.norex64)
4757 {
4758 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4759 need rex64. cmpxchg8b is also a special case. */
4760 if (! (i.operands == 2
4761 && i.tm.base_opcode == 0x90
4762 && i.tm.extension_opcode == None
4763 && operand_type_equal (&i.types [0], &acc64)
4764 && operand_type_equal (&i.types [1], &acc64))
4765 && ! (i.operands == 1
4766 && i.tm.base_opcode == 0xfc7
4767 && i.tm.extension_opcode == 1
4768 && !operand_type_check (i.types [0], reg)
4769 && operand_type_check (i.types [0], anymem)))
4770 i.rex |= REX_W;
4771 }
4772
4773 /* Size floating point instruction. */
4774 if (i.suffix == LONG_MNEM_SUFFIX)
4775 if (i.tm.opcode_modifier.floatmf)
4776 i.tm.base_opcode ^= 4;
4777 }
4778
4779 return 1;
4780}
4781
4782static int
4783check_byte_reg (void)
4784{
4785 int op;
4786
4787 for (op = i.operands; --op >= 0;)
4788 {
4789 /* If this is an eight bit register, it's OK. If it's the 16 or
4790 32 bit version of an eight bit register, we will just use the
4791 low portion, and that's OK too. */
4792 if (i.types[op].bitfield.reg8)
4793 continue;
4794
4795 /* I/O port address operands are OK too. */
4796 if (i.tm.operand_types[op].bitfield.inoutportreg)
4797 continue;
4798
4799 /* crc32 doesn't generate this warning. */
4800 if (i.tm.base_opcode == 0xf20f38f0)
4801 continue;
4802
4803 if ((i.types[op].bitfield.reg16
4804 || i.types[op].bitfield.reg32
4805 || i.types[op].bitfield.reg64)
4806 && i.op[op].regs->reg_num < 4
4807 /* Prohibit these changes in 64bit mode, since the lowering
4808 would be more complicated. */
4809 && flag_code != CODE_64BIT)
4810 {
4811#if REGISTER_WARNINGS
4812 if (!quiet_warnings)
4813 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4814 register_prefix,
4815 (i.op[op].regs + (i.types[op].bitfield.reg16
4816 ? REGNAM_AL - REGNAM_AX
4817 : REGNAM_AL - REGNAM_EAX))->reg_name,
4818 register_prefix,
4819 i.op[op].regs->reg_name,
4820 i.suffix);
4821#endif
4822 continue;
4823 }
4824 /* Any other register is bad. */
4825 if (i.types[op].bitfield.reg16
4826 || i.types[op].bitfield.reg32
4827 || i.types[op].bitfield.reg64
4828 || i.types[op].bitfield.regmmx
4829 || i.types[op].bitfield.regxmm
4830 || i.types[op].bitfield.regymm
4831 || i.types[op].bitfield.sreg2
4832 || i.types[op].bitfield.sreg3
4833 || i.types[op].bitfield.control
4834 || i.types[op].bitfield.debug
4835 || i.types[op].bitfield.test
4836 || i.types[op].bitfield.floatreg
4837 || i.types[op].bitfield.floatacc)
4838 {
4839 as_bad (_("`%s%s' not allowed with `%s%c'"),
4840 register_prefix,
4841 i.op[op].regs->reg_name,
4842 i.tm.name,
4843 i.suffix);
4844 return 0;
4845 }
4846 }
4847 return 1;
4848}
4849
4850static int
4851check_long_reg (void)
4852{
4853 int op;
4854
4855 for (op = i.operands; --op >= 0;)
4856 /* Reject eight bit registers, except where the template requires
4857 them. (eg. movzb) */
4858 if (i.types[op].bitfield.reg8
4859 && (i.tm.operand_types[op].bitfield.reg16
4860 || i.tm.operand_types[op].bitfield.reg32
4861 || i.tm.operand_types[op].bitfield.acc))
4862 {
4863 as_bad (_("`%s%s' not allowed with `%s%c'"),
4864 register_prefix,
4865 i.op[op].regs->reg_name,
4866 i.tm.name,
4867 i.suffix);
4868 return 0;
4869 }
4870 /* Warn if the e prefix on a general reg is missing. */
4871 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4872 && i.types[op].bitfield.reg16
4873 && (i.tm.operand_types[op].bitfield.reg32
4874 || i.tm.operand_types[op].bitfield.acc))
4875 {
4876 /* Prohibit these changes in the 64bit mode, since the
4877 lowering is more complicated. */
4878 if (flag_code == CODE_64BIT)
4879 {
4880 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4881 register_prefix, i.op[op].regs->reg_name,
4882 i.suffix);
4883 return 0;
4884 }
4885#if REGISTER_WARNINGS
4886 else
4887 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4888 register_prefix,
4889 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4890 register_prefix,
4891 i.op[op].regs->reg_name,
4892 i.suffix);
4893#endif
4894 }
4895 /* Warn if the r prefix on a general reg is missing. */
4896 else if (i.types[op].bitfield.reg64
4897 && (i.tm.operand_types[op].bitfield.reg32
4898 || i.tm.operand_types[op].bitfield.acc))
4899 {
4900 if (intel_syntax
4901 && i.tm.opcode_modifier.toqword
4902 && !i.types[0].bitfield.regxmm)
4903 {
4904 /* Convert to QWORD. We want REX byte. */
4905 i.suffix = QWORD_MNEM_SUFFIX;
4906 }
4907 else
4908 {
4909 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4910 register_prefix, i.op[op].regs->reg_name,
4911 i.suffix);
4912 return 0;
4913 }
4914 }
4915 return 1;
4916}
4917
4918static int
4919check_qword_reg (void)
4920{
4921 int op;
4922
4923 for (op = i.operands; --op >= 0; )
4924 /* Reject eight bit registers, except where the template requires
4925 them. (eg. movzb) */
4926 if (i.types[op].bitfield.reg8
4927 && (i.tm.operand_types[op].bitfield.reg16
4928 || i.tm.operand_types[op].bitfield.reg32
4929 || i.tm.operand_types[op].bitfield.acc))
4930 {
4931 as_bad (_("`%s%s' not allowed with `%s%c'"),
4932 register_prefix,
4933 i.op[op].regs->reg_name,
4934 i.tm.name,
4935 i.suffix);
4936 return 0;
4937 }
4938 /* Warn if the e prefix on a general reg is missing. */
4939 else if ((i.types[op].bitfield.reg16
4940 || i.types[op].bitfield.reg32)
4941 && (i.tm.operand_types[op].bitfield.reg32
4942 || i.tm.operand_types[op].bitfield.acc))
4943 {
4944 /* Prohibit these changes in the 64bit mode, since the
4945 lowering is more complicated. */
4946 if (intel_syntax
4947 && i.tm.opcode_modifier.todword
4948 && !i.types[0].bitfield.regxmm)
4949 {
4950 /* Convert to DWORD. We don't want REX byte. */
4951 i.suffix = LONG_MNEM_SUFFIX;
4952 }
4953 else
4954 {
4955 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4956 register_prefix, i.op[op].regs->reg_name,
4957 i.suffix);
4958 return 0;
4959 }
4960 }
4961 return 1;
4962}
4963
4964static int
4965check_word_reg (void)
4966{
4967 int op;
4968 for (op = i.operands; --op >= 0;)
4969 /* Reject eight bit registers, except where the template requires
4970 them. (eg. movzb) */
4971 if (i.types[op].bitfield.reg8
4972 && (i.tm.operand_types[op].bitfield.reg16
4973 || i.tm.operand_types[op].bitfield.reg32
4974 || i.tm.operand_types[op].bitfield.acc))
4975 {
4976 as_bad (_("`%s%s' not allowed with `%s%c'"),
4977 register_prefix,
4978 i.op[op].regs->reg_name,
4979 i.tm.name,
4980 i.suffix);
4981 return 0;
4982 }
4983 /* Warn if the e prefix on a general reg is present. */
4984 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4985 && i.types[op].bitfield.reg32
4986 && (i.tm.operand_types[op].bitfield.reg16
4987 || i.tm.operand_types[op].bitfield.acc))
4988 {
4989 /* Prohibit these changes in the 64bit mode, since the
4990 lowering is more complicated. */
4991 if (flag_code == CODE_64BIT)
4992 {
4993 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4994 register_prefix, i.op[op].regs->reg_name,
4995 i.suffix);
4996 return 0;
4997 }
4998 else
4999#if REGISTER_WARNINGS
5000 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5001 register_prefix,
5002 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5003 register_prefix,
5004 i.op[op].regs->reg_name,
5005 i.suffix);
5006#endif
5007 }
5008 return 1;
5009}
5010
5011static int
5012update_imm (unsigned int j)
5013{
5014 i386_operand_type overlap = i.types[j];
5015 if ((overlap.bitfield.imm8
5016 || overlap.bitfield.imm8s
5017 || overlap.bitfield.imm16
5018 || overlap.bitfield.imm32
5019 || overlap.bitfield.imm32s
5020 || overlap.bitfield.imm64)
5021 && !operand_type_equal (&overlap, &imm8)
5022 && !operand_type_equal (&overlap, &imm8s)
5023 && !operand_type_equal (&overlap, &imm16)
5024 && !operand_type_equal (&overlap, &imm32)
5025 && !operand_type_equal (&overlap, &imm32s)
5026 && !operand_type_equal (&overlap, &imm64))
5027 {
5028 if (i.suffix)
5029 {
5030 i386_operand_type temp;
5031
5032 operand_type_set (&temp, 0);
5033 if (i.suffix == BYTE_MNEM_SUFFIX)
5034 {
5035 temp.bitfield.imm8 = overlap.bitfield.imm8;
5036 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5037 }
5038 else if (i.suffix == WORD_MNEM_SUFFIX)
5039 temp.bitfield.imm16 = overlap.bitfield.imm16;
5040 else if (i.suffix == QWORD_MNEM_SUFFIX)
5041 {
5042 temp.bitfield.imm64 = overlap.bitfield.imm64;
5043 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5044 }
5045 else
5046 temp.bitfield.imm32 = overlap.bitfield.imm32;
5047 overlap = temp;
5048 }
5049 else if (operand_type_equal (&overlap, &imm16_32_32s)
5050 || operand_type_equal (&overlap, &imm16_32)
5051 || operand_type_equal (&overlap, &imm16_32s))
5052 {
5053 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5054 overlap = imm16;
5055 else
5056 overlap = imm32s;
5057 }
5058 if (!operand_type_equal (&overlap, &imm8)
5059 && !operand_type_equal (&overlap, &imm8s)
5060 && !operand_type_equal (&overlap, &imm16)
5061 && !operand_type_equal (&overlap, &imm32)
5062 && !operand_type_equal (&overlap, &imm32s)
5063 && !operand_type_equal (&overlap, &imm64))
5064 {
5065 as_bad (_("no instruction mnemonic suffix given; "
5066 "can't determine immediate size"));
5067 return 0;
5068 }
5069 }
5070 i.types[j] = overlap;
5071
5072 return 1;
5073}
5074
5075static int
5076finalize_imm (void)
5077{
5078 unsigned int j, n;
5079
5080 /* Update the first 2 immediate operands. */
5081 n = i.operands > 2 ? 2 : i.operands;
5082 if (n)
5083 {
5084 for (j = 0; j < n; j++)
5085 if (update_imm (j) == 0)
5086 return 0;
5087
5088 /* The 3rd operand can't be immediate operand. */
5089 gas_assert (operand_type_check (i.types[2], imm) == 0);
5090 }
5091
5092 return 1;
5093}
5094
5095static int
5096bad_implicit_operand (int xmm)
5097{
5098 const char *ireg = xmm ? "xmm0" : "ymm0";
5099
5100 if (intel_syntax)
5101 as_bad (_("the last operand of `%s' must be `%s%s'"),
5102 i.tm.name, register_prefix, ireg);
5103 else
5104 as_bad (_("the first operand of `%s' must be `%s%s'"),
5105 i.tm.name, register_prefix, ireg);
5106 return 0;
5107}
5108
5109static int
5110process_operands (void)
5111{
5112 /* Default segment register this instruction will use for memory
5113 accesses. 0 means unknown. This is only for optimizing out
5114 unnecessary segment overrides. */
5115 const seg_entry *default_seg = 0;
5116
5117 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5118 {
5119 unsigned int dupl = i.operands;
5120 unsigned int dest = dupl - 1;
5121 unsigned int j;
5122
5123 /* The destination must be an xmm register. */
5124 gas_assert (i.reg_operands
5125 && MAX_OPERANDS > dupl
5126 && operand_type_equal (&i.types[dest], &regxmm));
5127
5128 if (i.tm.opcode_modifier.firstxmm0)
5129 {
5130 /* The first operand is implicit and must be xmm0. */
5131 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5132 if (register_number (i.op[0].regs) != 0)
5133 return bad_implicit_operand (1);
5134
5135 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5136 {
5137 /* Keep xmm0 for instructions with VEX prefix and 3
5138 sources. */
5139 goto duplicate;
5140 }
5141 else
5142 {
5143 /* We remove the first xmm0 and keep the number of
5144 operands unchanged, which in fact duplicates the
5145 destination. */
5146 for (j = 1; j < i.operands; j++)
5147 {
5148 i.op[j - 1] = i.op[j];
5149 i.types[j - 1] = i.types[j];
5150 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5151 }
5152 }
5153 }
5154 else if (i.tm.opcode_modifier.implicit1stxmm0)
5155 {
5156 gas_assert ((MAX_OPERANDS - 1) > dupl
5157 && (i.tm.opcode_modifier.vexsources
5158 == VEX3SOURCES));
5159
5160 /* Add the implicit xmm0 for instructions with VEX prefix
5161 and 3 sources. */
5162 for (j = i.operands; j > 0; j--)
5163 {
5164 i.op[j] = i.op[j - 1];
5165 i.types[j] = i.types[j - 1];
5166 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5167 }
5168 i.op[0].regs
5169 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5170 i.types[0] = regxmm;
5171 i.tm.operand_types[0] = regxmm;
5172
5173 i.operands += 2;
5174 i.reg_operands += 2;
5175 i.tm.operands += 2;
5176
5177 dupl++;
5178 dest++;
5179 i.op[dupl] = i.op[dest];
5180 i.types[dupl] = i.types[dest];
5181 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5182 }
5183 else
5184 {
5185duplicate:
5186 i.operands++;
5187 i.reg_operands++;
5188 i.tm.operands++;
5189
5190 i.op[dupl] = i.op[dest];
5191 i.types[dupl] = i.types[dest];
5192 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5193 }
5194
5195 if (i.tm.opcode_modifier.immext)
5196 process_immext ();
5197 }
5198 else if (i.tm.opcode_modifier.firstxmm0)
5199 {
5200 unsigned int j;
5201
5202 /* The first operand is implicit and must be xmm0/ymm0. */
5203 gas_assert (i.reg_operands
5204 && (operand_type_equal (&i.types[0], &regxmm)
5205 || operand_type_equal (&i.types[0], &regymm)));
5206 if (register_number (i.op[0].regs) != 0)
5207 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5208
5209 for (j = 1; j < i.operands; j++)
5210 {
5211 i.op[j - 1] = i.op[j];
5212 i.types[j - 1] = i.types[j];
5213
5214 /* We need to adjust fields in i.tm since they are used by
5215 build_modrm_byte. */
5216 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5217 }
5218
5219 i.operands--;
5220 i.reg_operands--;
5221 i.tm.operands--;
5222 }
5223 else if (i.tm.opcode_modifier.regkludge)
5224 {
5225 /* The imul $imm, %reg instruction is converted into
5226 imul $imm, %reg, %reg, and the clr %reg instruction
5227 is converted into xor %reg, %reg. */
5228
5229 unsigned int first_reg_op;
5230
5231 if (operand_type_check (i.types[0], reg))
5232 first_reg_op = 0;
5233 else
5234 first_reg_op = 1;
5235 /* Pretend we saw the extra register operand. */
5236 gas_assert (i.reg_operands == 1
5237 && i.op[first_reg_op + 1].regs == 0);
5238 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5239 i.types[first_reg_op + 1] = i.types[first_reg_op];
5240 i.operands++;
5241 i.reg_operands++;
5242 }
5243
5244 if (i.tm.opcode_modifier.shortform)
5245 {
5246 if (i.types[0].bitfield.sreg2
5247 || i.types[0].bitfield.sreg3)
5248 {
5249 if (i.tm.base_opcode == POP_SEG_SHORT
5250 && i.op[0].regs->reg_num == 1)
5251 {
5252 as_bad (_("you can't `pop %scs'"), register_prefix);
5253 return 0;
5254 }
5255 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5256 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5257 i.rex |= REX_B;
5258 }
5259 else
5260 {
5261 /* The register or float register operand is in operand
5262 0 or 1. */
5263 unsigned int op;
5264
5265 if (i.types[0].bitfield.floatreg
5266 || operand_type_check (i.types[0], reg))
5267 op = 0;
5268 else
5269 op = 1;
5270 /* Register goes in low 3 bits of opcode. */
5271 i.tm.base_opcode |= i.op[op].regs->reg_num;
5272 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5273 i.rex |= REX_B;
5274 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5275 {
5276 /* Warn about some common errors, but press on regardless.
5277 The first case can be generated by gcc (<= 2.8.1). */
5278 if (i.operands == 2)
5279 {
5280 /* Reversed arguments on faddp, fsubp, etc. */
5281 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5282 register_prefix, i.op[!intel_syntax].regs->reg_name,
5283 register_prefix, i.op[intel_syntax].regs->reg_name);
5284 }
5285 else
5286 {
5287 /* Extraneous `l' suffix on fp insn. */
5288 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5289 register_prefix, i.op[0].regs->reg_name);
5290 }
5291 }
5292 }
5293 }
5294 else if (i.tm.opcode_modifier.modrm)
5295 {
5296 /* The opcode is completed (modulo i.tm.extension_opcode which
5297 must be put into the modrm byte). Now, we make the modrm and
5298 index base bytes based on all the info we've collected. */
5299
5300 default_seg = build_modrm_byte ();
5301 }
5302 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5303 {
5304 default_seg = &ds;
5305 }
5306 else if (i.tm.opcode_modifier.isstring)
5307 {
5308 /* For the string instructions that allow a segment override
5309 on one of their operands, the default segment is ds. */
5310 default_seg = &ds;
5311 }
5312
5313 if (i.tm.base_opcode == 0x8d /* lea */
5314 && i.seg[0]
5315 && !quiet_warnings)
5316 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5317
5318 /* If a segment was explicitly specified, and the specified segment
5319 is not the default, use an opcode prefix to select it. If we
5320 never figured out what the default segment is, then default_seg
5321 will be zero at this point, and the specified segment prefix will
5322 always be used. */
5323 if ((i.seg[0]) && (i.seg[0] != default_seg))
5324 {
5325 if (!add_prefix (i.seg[0]->seg_prefix))
5326 return 0;
5327 }
5328 return 1;
5329}
5330
5331static const seg_entry *
5332build_modrm_byte (void)
5333{
5334 const seg_entry *default_seg = 0;
5335 unsigned int source, dest;
5336 int vex_3_sources;
5337
5338 /* The first operand of instructions with VEX prefix and 3 sources
5339 must be VEX_Imm4. */
5340 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5341 if (vex_3_sources)
5342 {
5343 unsigned int nds, reg_slot;
5344 expressionS *exp;
5345
5346 if (i.tm.opcode_modifier.veximmext
5347 && i.tm.opcode_modifier.immext)
5348 {
5349 dest = i.operands - 2;
5350 gas_assert (dest == 3);
5351 }
5352 else
5353 dest = i.operands - 1;
5354 nds = dest - 1;
5355
5356 /* There are 2 kinds of instructions:
5357 1. 5 operands: 4 register operands or 3 register operands
5358 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5359 VexW0 or VexW1. The destination must be either XMM or YMM
5360 register.
5361 2. 4 operands: 4 register operands or 3 register operands
5362 plus 1 memory operand, VexXDS, and VexImmExt */
5363 gas_assert ((i.reg_operands == 4
5364 || (i.reg_operands == 3 && i.mem_operands == 1))
5365 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5366 && (i.tm.opcode_modifier.veximmext
5367 || (i.imm_operands == 1
5368 && i.types[0].bitfield.vec_imm4
5369 && (i.tm.opcode_modifier.vexw == VEXW0
5370 || i.tm.opcode_modifier.vexw == VEXW1)
5371 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5372 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5373
5374 if (i.imm_operands == 0)
5375 {
5376 /* When there is no immediate operand, generate an 8bit
5377 immediate operand to encode the first operand. */
5378 exp = &im_expressions[i.imm_operands++];
5379 i.op[i.operands].imms = exp;
5380 i.types[i.operands] = imm8;
5381 i.operands++;
5382 /* If VexW1 is set, the first operand is the source and
5383 the second operand is encoded in the immediate operand. */
5384 if (i.tm.opcode_modifier.vexw == VEXW1)
5385 {
5386 source = 0;
5387 reg_slot = 1;
5388 }
5389 else
5390 {
5391 source = 1;
5392 reg_slot = 0;
5393 }
5394
5395 /* FMA swaps REG and NDS. */
5396 if (i.tm.cpu_flags.bitfield.cpufma)
5397 {
5398 unsigned int tmp;
5399 tmp = reg_slot;
5400 reg_slot = nds;
5401 nds = tmp;
5402 }
5403
5404 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5405 &regxmm)
5406 || operand_type_equal (&i.tm.operand_types[reg_slot],
5407 &regymm));
5408 exp->X_op = O_constant;
5409 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5410 }
5411 else
5412 {
5413 unsigned int imm_slot;
5414
5415 if (i.tm.opcode_modifier.vexw == VEXW0)
5416 {
5417 /* If VexW0 is set, the third operand is the source and
5418 the second operand is encoded in the immediate
5419 operand. */
5420 source = 2;
5421 reg_slot = 1;
5422 }
5423 else
5424 {
5425 /* VexW1 is set, the second operand is the source and
5426 the third operand is encoded in the immediate
5427 operand. */
5428 source = 1;
5429 reg_slot = 2;
5430 }
5431
5432 if (i.tm.opcode_modifier.immext)
5433 {
5434 /* When ImmExt is set, the immdiate byte is the last
5435 operand. */
5436 imm_slot = i.operands - 1;
5437 source--;
5438 reg_slot--;
5439 }
5440 else
5441 {
5442 imm_slot = 0;
5443
5444 /* Turn on Imm8 so that output_imm will generate it. */
5445 i.types[imm_slot].bitfield.imm8 = 1;
5446 }
5447
5448 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5449 &regxmm)
5450 || operand_type_equal (&i.tm.operand_types[reg_slot],
5451 &regymm));
5452 i.op[imm_slot].imms->X_add_number
5453 |= register_number (i.op[reg_slot].regs) << 4;
5454 }
5455
5456 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5457 || operand_type_equal (&i.tm.operand_types[nds],
5458 &regymm));
5459 i.vex.register_specifier = i.op[nds].regs;
5460 }
5461 else
5462 source = dest = 0;
5463
5464 /* i.reg_operands MUST be the number of real register operands;
5465 implicit registers do not count. If there are 3 register
5466 operands, it must be a instruction with VexNDS. For a
5467 instruction with VexNDD, the destination register is encoded
5468 in VEX prefix. If there are 4 register operands, it must be
5469 a instruction with VEX prefix and 3 sources. */
5470 if (i.mem_operands == 0
5471 && ((i.reg_operands == 2
5472 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5473 || (i.reg_operands == 3
5474 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5475 || (i.reg_operands == 4 && vex_3_sources)))
5476 {
5477 switch (i.operands)
5478 {
5479 case 2:
5480 source = 0;
5481 break;
5482 case 3:
5483 /* When there are 3 operands, one of them may be immediate,
5484 which may be the first or the last operand. Otherwise,
5485 the first operand must be shift count register (cl) or it
5486 is an instruction with VexNDS. */
5487 gas_assert (i.imm_operands == 1
5488 || (i.imm_operands == 0
5489 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5490 || i.types[0].bitfield.shiftcount)));
5491 if (operand_type_check (i.types[0], imm)
5492 || i.types[0].bitfield.shiftcount)
5493 source = 1;
5494 else
5495 source = 0;
5496 break;
5497 case 4:
5498 /* When there are 4 operands, the first two must be 8bit
5499 immediate operands. The source operand will be the 3rd
5500 one.
5501
5502 For instructions with VexNDS, if the first operand
5503 an imm8, the source operand is the 2nd one. If the last
5504 operand is imm8, the source operand is the first one. */
5505 gas_assert ((i.imm_operands == 2
5506 && i.types[0].bitfield.imm8
5507 && i.types[1].bitfield.imm8)
5508 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5509 && i.imm_operands == 1
5510 && (i.types[0].bitfield.imm8
5511 || i.types[i.operands - 1].bitfield.imm8)));
5512 if (i.imm_operands == 2)
5513 source = 2;
5514 else
5515 {
5516 if (i.types[0].bitfield.imm8)
5517 source = 1;
5518 else
5519 source = 0;
5520 }
5521 break;
5522 case 5:
5523 break;
5524 default:
5525 abort ();
5526 }
5527
5528 if (!vex_3_sources)
5529 {
5530 dest = source + 1;
5531
5532 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5533 {
5534 /* For instructions with VexNDS, the register-only
5535 source operand must be 32/64bit integer, XMM or
5536 YMM register. It is encoded in VEX prefix. We
5537 need to clear RegMem bit before calling
5538 operand_type_equal. */
5539
5540 i386_operand_type op;
5541 unsigned int vvvv;
5542
5543 /* Check register-only source operand when two source
5544 operands are swapped. */
5545 if (!i.tm.operand_types[source].bitfield.baseindex
5546 && i.tm.operand_types[dest].bitfield.baseindex)
5547 {
5548 vvvv = source;
5549 source = dest;
5550 }
5551 else
5552 vvvv = dest;
5553
5554 op = i.tm.operand_types[vvvv];
5555 op.bitfield.regmem = 0;
5556 if ((dest + 1) >= i.operands
5557 || (op.bitfield.reg32 != 1
5558 && !op.bitfield.reg64 != 1
5559 && !operand_type_equal (&op, &regxmm)
5560 && !operand_type_equal (&op, &regymm)))
5561 abort ();
5562 i.vex.register_specifier = i.op[vvvv].regs;
5563 dest++;
5564 }
5565 }
5566
5567 i.rm.mode = 3;
5568 /* One of the register operands will be encoded in the i.tm.reg
5569 field, the other in the combined i.tm.mode and i.tm.regmem
5570 fields. If no form of this instruction supports a memory
5571 destination operand, then we assume the source operand may
5572 sometimes be a memory operand and so we need to store the
5573 destination in the i.rm.reg field. */
5574 if (!i.tm.operand_types[dest].bitfield.regmem
5575 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5576 {
5577 i.rm.reg = i.op[dest].regs->reg_num;
5578 i.rm.regmem = i.op[source].regs->reg_num;
5579 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5580 i.rex |= REX_R;
5581 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5582 i.rex |= REX_B;
5583 }
5584 else
5585 {
5586 i.rm.reg = i.op[source].regs->reg_num;
5587 i.rm.regmem = i.op[dest].regs->reg_num;
5588 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5589 i.rex |= REX_B;
5590 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5591 i.rex |= REX_R;
5592 }
5593 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5594 {
5595 if (!i.types[0].bitfield.control
5596 && !i.types[1].bitfield.control)
5597 abort ();
5598 i.rex &= ~(REX_R | REX_B);
5599 add_prefix (LOCK_PREFIX_OPCODE);
5600 }
5601 }
5602 else
5603 { /* If it's not 2 reg operands... */
5604 unsigned int mem;
5605
5606 if (i.mem_operands)
5607 {
5608 unsigned int fake_zero_displacement = 0;
5609 unsigned int op;
5610
5611 for (op = 0; op < i.operands; op++)
5612 if (operand_type_check (i.types[op], anymem))
5613 break;
5614 gas_assert (op < i.operands);
5615
5616 if (i.tm.opcode_modifier.vecsib)
5617 {
5618 if (i.index_reg->reg_num == RegEiz
5619 || i.index_reg->reg_num == RegRiz)
5620 abort ();
5621
5622 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5623 if (!i.base_reg)
5624 {
5625 i.sib.base = NO_BASE_REGISTER;
5626 i.sib.scale = i.log2_scale_factor;
5627 i.types[op].bitfield.disp8 = 0;
5628 i.types[op].bitfield.disp16 = 0;
5629 i.types[op].bitfield.disp64 = 0;
5630 if (flag_code != CODE_64BIT)
5631 {
5632 /* Must be 32 bit */
5633 i.types[op].bitfield.disp32 = 1;
5634 i.types[op].bitfield.disp32s = 0;
5635 }
5636 else
5637 {
5638 i.types[op].bitfield.disp32 = 0;
5639 i.types[op].bitfield.disp32s = 1;
5640 }
5641 }
5642 i.sib.index = i.index_reg->reg_num;
5643 if ((i.index_reg->reg_flags & RegRex) != 0)
5644 i.rex |= REX_X;
5645 }
5646
5647 default_seg = &ds;
5648
5649 if (i.base_reg == 0)
5650 {
5651 i.rm.mode = 0;
5652 if (!i.disp_operands)
5653 {
5654 fake_zero_displacement = 1;
5655 /* Instructions with VSIB byte need 32bit displacement
5656 if there is no base register. */
5657 if (i.tm.opcode_modifier.vecsib)
5658 i.types[op].bitfield.disp32 = 1;
5659 }
5660 if (i.index_reg == 0)
5661 {
5662 gas_assert (!i.tm.opcode_modifier.vecsib);
5663 /* Operand is just <disp> */
5664 if (flag_code == CODE_64BIT)
5665 {
5666 /* 64bit mode overwrites the 32bit absolute
5667 addressing by RIP relative addressing and
5668 absolute addressing is encoded by one of the
5669 redundant SIB forms. */
5670 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5671 i.sib.base = NO_BASE_REGISTER;
5672 i.sib.index = NO_INDEX_REGISTER;
5673 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5674 ? disp32s : disp32);
5675 }
5676 else if ((flag_code == CODE_16BIT)
5677 ^ (i.prefix[ADDR_PREFIX] != 0))
5678 {
5679 i.rm.regmem = NO_BASE_REGISTER_16;
5680 i.types[op] = disp16;
5681 }
5682 else
5683 {
5684 i.rm.regmem = NO_BASE_REGISTER;
5685 i.types[op] = disp32;
5686 }
5687 }
5688 else if (!i.tm.opcode_modifier.vecsib)
5689 {
5690 /* !i.base_reg && i.index_reg */
5691 if (i.index_reg->reg_num == RegEiz
5692 || i.index_reg->reg_num == RegRiz)
5693 i.sib.index = NO_INDEX_REGISTER;
5694 else
5695 i.sib.index = i.index_reg->reg_num;
5696 i.sib.base = NO_BASE_REGISTER;
5697 i.sib.scale = i.log2_scale_factor;
5698 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5699 i.types[op].bitfield.disp8 = 0;
5700 i.types[op].bitfield.disp16 = 0;
5701 i.types[op].bitfield.disp64 = 0;
5702 if (flag_code != CODE_64BIT)
5703 {
5704 /* Must be 32 bit */
5705 i.types[op].bitfield.disp32 = 1;
5706 i.types[op].bitfield.disp32s = 0;
5707 }
5708 else
5709 {
5710 i.types[op].bitfield.disp32 = 0;
5711 i.types[op].bitfield.disp32s = 1;
5712 }
5713 if ((i.index_reg->reg_flags & RegRex) != 0)
5714 i.rex |= REX_X;
5715 }
5716 }
5717 /* RIP addressing for 64bit mode. */
5718 else if (i.base_reg->reg_num == RegRip ||
5719 i.base_reg->reg_num == RegEip)
5720 {
5721 gas_assert (!i.tm.opcode_modifier.vecsib);
5722 i.rm.regmem = NO_BASE_REGISTER;
5723 i.types[op].bitfield.disp8 = 0;
5724 i.types[op].bitfield.disp16 = 0;
5725 i.types[op].bitfield.disp32 = 0;
5726 i.types[op].bitfield.disp32s = 1;
5727 i.types[op].bitfield.disp64 = 0;
5728 i.flags[op] |= Operand_PCrel;
5729 if (! i.disp_operands)
5730 fake_zero_displacement = 1;
5731 }
5732 else if (i.base_reg->reg_type.bitfield.reg16)
5733 {
5734 gas_assert (!i.tm.opcode_modifier.vecsib);
5735 switch (i.base_reg->reg_num)
5736 {
5737 case 3: /* (%bx) */
5738 if (i.index_reg == 0)
5739 i.rm.regmem = 7;
5740 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5741 i.rm.regmem = i.index_reg->reg_num - 6;
5742 break;
5743 case 5: /* (%bp) */
5744 default_seg = &ss;
5745 if (i.index_reg == 0)
5746 {
5747 i.rm.regmem = 6;
5748 if (operand_type_check (i.types[op], disp) == 0)
5749 {
5750 /* fake (%bp) into 0(%bp) */
5751 i.types[op].bitfield.disp8 = 1;
5752 fake_zero_displacement = 1;
5753 }
5754 }
5755 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5756 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5757 break;
5758 default: /* (%si) -> 4 or (%di) -> 5 */
5759 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5760 }
5761 i.rm.mode = mode_from_disp_size (i.types[op]);
5762 }
5763 else /* i.base_reg and 32/64 bit mode */
5764 {
5765 if (flag_code == CODE_64BIT
5766 && operand_type_check (i.types[op], disp))
5767 {
5768 i386_operand_type temp;
5769 operand_type_set (&temp, 0);
5770 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5771 i.types[op] = temp;
5772 if (i.prefix[ADDR_PREFIX] == 0)
5773 i.types[op].bitfield.disp32s = 1;
5774 else
5775 i.types[op].bitfield.disp32 = 1;
5776 }
5777
5778 if (!i.tm.opcode_modifier.vecsib)
5779 i.rm.regmem = i.base_reg->reg_num;
5780 if ((i.base_reg->reg_flags & RegRex) != 0)
5781 i.rex |= REX_B;
5782 i.sib.base = i.base_reg->reg_num;
5783 /* x86-64 ignores REX prefix bit here to avoid decoder
5784 complications. */
5785 if (!(i.base_reg->reg_flags & RegRex)
5786 && (i.base_reg->reg_num == EBP_REG_NUM
5787 || i.base_reg->reg_num == ESP_REG_NUM))
5788 default_seg = &ss;
5789 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
5790 {
5791 fake_zero_displacement = 1;
5792 i.types[op].bitfield.disp8 = 1;
5793 }
5794 i.sib.scale = i.log2_scale_factor;
5795 if (i.index_reg == 0)
5796 {
5797 gas_assert (!i.tm.opcode_modifier.vecsib);
5798 /* <disp>(%esp) becomes two byte modrm with no index
5799 register. We've already stored the code for esp
5800 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5801 Any base register besides %esp will not use the
5802 extra modrm byte. */
5803 i.sib.index = NO_INDEX_REGISTER;
5804 }
5805 else if (!i.tm.opcode_modifier.vecsib)
5806 {
5807 if (i.index_reg->reg_num == RegEiz
5808 || i.index_reg->reg_num == RegRiz)
5809 i.sib.index = NO_INDEX_REGISTER;
5810 else
5811 i.sib.index = i.index_reg->reg_num;
5812 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5813 if ((i.index_reg->reg_flags & RegRex) != 0)
5814 i.rex |= REX_X;
5815 }
5816
5817 if (i.disp_operands
5818 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5819 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5820 i.rm.mode = 0;
5821 else
5822 {
5823 if (!fake_zero_displacement
5824 && !i.disp_operands
5825 && i.disp_encoding)
5826 {
5827 fake_zero_displacement = 1;
5828 if (i.disp_encoding == disp_encoding_8bit)
5829 i.types[op].bitfield.disp8 = 1;
5830 else
5831 i.types[op].bitfield.disp32 = 1;
5832 }
5833 i.rm.mode = mode_from_disp_size (i.types[op]);
5834 }
5835 }
5836
5837 if (fake_zero_displacement)
5838 {
5839 /* Fakes a zero displacement assuming that i.types[op]
5840 holds the correct displacement size. */
5841 expressionS *exp;
5842
5843 gas_assert (i.op[op].disps == 0);
5844 exp = &disp_expressions[i.disp_operands++];
5845 i.op[op].disps = exp;
5846 exp->X_op = O_constant;
5847 exp->X_add_number = 0;
5848 exp->X_add_symbol = (symbolS *) 0;
5849 exp->X_op_symbol = (symbolS *) 0;
5850 }
5851
5852 mem = op;
5853 }
5854 else
5855 mem = ~0;
5856
5857 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5858 {
5859 if (operand_type_check (i.types[0], imm))
5860 i.vex.register_specifier = NULL;
5861 else
5862 {
5863 /* VEX.vvvv encodes one of the sources when the first
5864 operand is not an immediate. */
5865 if (i.tm.opcode_modifier.vexw == VEXW0)
5866 i.vex.register_specifier = i.op[0].regs;
5867 else
5868 i.vex.register_specifier = i.op[1].regs;
5869 }
5870
5871 /* Destination is a XMM register encoded in the ModRM.reg
5872 and VEX.R bit. */
5873 i.rm.reg = i.op[2].regs->reg_num;
5874 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5875 i.rex |= REX_R;
5876
5877 /* ModRM.rm and VEX.B encodes the other source. */
5878 if (!i.mem_operands)
5879 {
5880 i.rm.mode = 3;
5881
5882 if (i.tm.opcode_modifier.vexw == VEXW0)
5883 i.rm.regmem = i.op[1].regs->reg_num;
5884 else
5885 i.rm.regmem = i.op[0].regs->reg_num;
5886
5887 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5888 i.rex |= REX_B;
5889 }
5890 }
5891 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5892 {
5893 i.vex.register_specifier = i.op[2].regs;
5894 if (!i.mem_operands)
5895 {
5896 i.rm.mode = 3;
5897 i.rm.regmem = i.op[1].regs->reg_num;
5898 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5899 i.rex |= REX_B;
5900 }
5901 }
5902 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5903 (if any) based on i.tm.extension_opcode. Again, we must be
5904 careful to make sure that segment/control/debug/test/MMX
5905 registers are coded into the i.rm.reg field. */
5906 else if (i.reg_operands)
5907 {
5908 unsigned int op;
5909 unsigned int vex_reg = ~0;
5910
5911 for (op = 0; op < i.operands; op++)
5912 if (i.types[op].bitfield.reg8
5913 || i.types[op].bitfield.reg16
5914 || i.types[op].bitfield.reg32
5915 || i.types[op].bitfield.reg64
5916 || i.types[op].bitfield.regmmx
5917 || i.types[op].bitfield.regxmm
5918 || i.types[op].bitfield.regymm
5919 || i.types[op].bitfield.sreg2
5920 || i.types[op].bitfield.sreg3
5921 || i.types[op].bitfield.control
5922 || i.types[op].bitfield.debug
5923 || i.types[op].bitfield.test)
5924 break;
5925
5926 if (vex_3_sources)
5927 op = dest;
5928 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5929 {
5930 /* For instructions with VexNDS, the register-only
5931 source operand is encoded in VEX prefix. */
5932 gas_assert (mem != (unsigned int) ~0);
5933
5934 if (op > mem)
5935 {
5936 vex_reg = op++;
5937 gas_assert (op < i.operands);
5938 }
5939 else
5940 {
5941 /* Check register-only source operand when two source
5942 operands are swapped. */
5943 if (!i.tm.operand_types[op].bitfield.baseindex
5944 && i.tm.operand_types[op + 1].bitfield.baseindex)
5945 {
5946 vex_reg = op;
5947 op += 2;
5948 gas_assert (mem == (vex_reg + 1)
5949 && op < i.operands);
5950 }
5951 else
5952 {
5953 vex_reg = op + 1;
5954 gas_assert (vex_reg < i.operands);
5955 }
5956 }
5957 }
5958 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5959 {
5960 /* For instructions with VexNDD, the register destination
5961 is encoded in VEX prefix. */
5962 if (i.mem_operands == 0)
5963 {
5964 /* There is no memory operand. */
5965 gas_assert ((op + 2) == i.operands);
5966 vex_reg = op + 1;
5967 }
5968 else
5969 {
5970 /* There are only 2 operands. */
5971 gas_assert (op < 2 && i.operands == 2);
5972 vex_reg = 1;
5973 }
5974 }
5975 else
5976 gas_assert (op < i.operands);
5977
5978 if (vex_reg != (unsigned int) ~0)
5979 {
5980 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5981
5982 if (type->bitfield.reg32 != 1
5983 && type->bitfield.reg64 != 1
5984 && !operand_type_equal (type, &regxmm)
5985 && !operand_type_equal (type, &regymm))
5986 abort ();
5987
5988 i.vex.register_specifier = i.op[vex_reg].regs;
5989 }
5990
5991 /* Don't set OP operand twice. */
5992 if (vex_reg != op)
5993 {
5994 /* If there is an extension opcode to put here, the
5995 register number must be put into the regmem field. */
5996 if (i.tm.extension_opcode != None)
5997 {
5998 i.rm.regmem = i.op[op].regs->reg_num;
5999 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6000 i.rex |= REX_B;
6001 }
6002 else
6003 {
6004 i.rm.reg = i.op[op].regs->reg_num;
6005 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6006 i.rex |= REX_R;
6007 }
6008 }
6009
6010 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6011 must set it to 3 to indicate this is a register operand
6012 in the regmem field. */
6013 if (!i.mem_operands)
6014 i.rm.mode = 3;
6015 }
6016
6017 /* Fill in i.rm.reg field with extension opcode (if any). */
6018 if (i.tm.extension_opcode != None)
6019 i.rm.reg = i.tm.extension_opcode;
6020 }
6021 return default_seg;
6022}
6023
6024static void
6025output_branch (void)
6026{
6027 char *p;
6028 int size;
6029 int code16;
6030 int prefix;
6031 relax_substateT subtype;
6032 symbolS *sym;
6033 offsetT off;
6034
6035 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6036 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6037
6038 prefix = 0;
6039 if (i.prefix[DATA_PREFIX] != 0)
6040 {
6041 prefix = 1;
6042 i.prefixes -= 1;
6043 code16 ^= CODE16;
6044 }
6045 /* Pentium4 branch hints. */
6046 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6047 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6048 {
6049 prefix++;
6050 i.prefixes--;
6051 }
6052 if (i.prefix[REX_PREFIX] != 0)
6053 {
6054 prefix++;
6055 i.prefixes--;
6056 }
6057
6058 if (i.prefixes != 0 && !intel_syntax)
6059 as_warn (_("skipping prefixes on this instruction"));
6060
6061 /* It's always a symbol; End frag & setup for relax.
6062 Make sure there is enough room in this frag for the largest
6063 instruction we may generate in md_convert_frag. This is 2
6064 bytes for the opcode and room for the prefix and largest
6065 displacement. */
6066 frag_grow (prefix + 2 + 4);
6067 /* Prefix and 1 opcode byte go in fr_fix. */
6068 p = frag_more (prefix + 1);
6069 if (i.prefix[DATA_PREFIX] != 0)
6070 *p++ = DATA_PREFIX_OPCODE;
6071 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6072 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6073 *p++ = i.prefix[SEG_PREFIX];
6074 if (i.prefix[REX_PREFIX] != 0)
6075 *p++ = i.prefix[REX_PREFIX];
6076 *p = i.tm.base_opcode;
6077
6078 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6079 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6080 else if (cpu_arch_flags.bitfield.cpui386)
6081 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6082 else
6083 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6084 subtype |= code16;
6085
6086 sym = i.op[0].disps->X_add_symbol;
6087 off = i.op[0].disps->X_add_number;
6088
6089 if (i.op[0].disps->X_op != O_constant
6090 && i.op[0].disps->X_op != O_symbol)
6091 {
6092 /* Handle complex expressions. */
6093 sym = make_expr_symbol (i.op[0].disps);
6094 off = 0;
6095 }
6096
6097 /* 1 possible extra opcode + 4 byte displacement go in var part.
6098 Pass reloc in fr_var. */
6099 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6100}
6101
6102static void
6103output_jump (void)
6104{
6105 char *p;
6106 int size;
6107 fixS *fixP;
6108
6109 if (i.tm.opcode_modifier.jumpbyte)
6110 {
6111 /* This is a loop or jecxz type instruction. */
6112 size = 1;
6113 if (i.prefix[ADDR_PREFIX] != 0)
6114 {
6115 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6116 i.prefixes -= 1;
6117 }
6118 /* Pentium4 branch hints. */
6119 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6120 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6121 {
6122 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6123 i.prefixes--;
6124 }
6125 }
6126 else
6127 {
6128 int code16;
6129
6130 code16 = 0;
6131 if (flag_code == CODE_16BIT)
6132 code16 = CODE16;
6133
6134 if (i.prefix[DATA_PREFIX] != 0)
6135 {
6136 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6137 i.prefixes -= 1;
6138 code16 ^= CODE16;
6139 }
6140
6141 size = 4;
6142 if (code16)
6143 size = 2;
6144 }
6145
6146 if (i.prefix[REX_PREFIX] != 0)
6147 {
6148 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6149 i.prefixes -= 1;
6150 }
6151
6152 if (i.prefixes != 0 && !intel_syntax)
6153 as_warn (_("skipping prefixes on this instruction"));
6154
6155 p = frag_more (i.tm.opcode_length + size);
6156 switch (i.tm.opcode_length)
6157 {
6158 case 2:
6159 *p++ = i.tm.base_opcode >> 8;
6160 case 1:
6161 *p++ = i.tm.base_opcode;
6162 break;
6163 default:
6164 abort ();
6165 }
6166
6167 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6168 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6169
6170 /* All jumps handled here are signed, but don't use a signed limit
6171 check for 32 and 16 bit jumps as we want to allow wrap around at
6172 4G and 64k respectively. */
6173 if (size == 1)
6174 fixP->fx_signed = 1;
6175}
6176
6177static void
6178output_interseg_jump (void)
6179{
6180 char *p;
6181 int size;
6182 int prefix;
6183 int code16;
6184
6185 code16 = 0;
6186 if (flag_code == CODE_16BIT)
6187 code16 = CODE16;
6188
6189 prefix = 0;
6190 if (i.prefix[DATA_PREFIX] != 0)
6191 {
6192 prefix = 1;
6193 i.prefixes -= 1;
6194 code16 ^= CODE16;
6195 }
6196 if (i.prefix[REX_PREFIX] != 0)
6197 {
6198 prefix++;
6199 i.prefixes -= 1;
6200 }
6201
6202 size = 4;
6203 if (code16)
6204 size = 2;
6205
6206 if (i.prefixes != 0 && !intel_syntax)
6207 as_warn (_("skipping prefixes on this instruction"));
6208
6209 /* 1 opcode; 2 segment; offset */
6210 p = frag_more (prefix + 1 + 2 + size);
6211
6212 if (i.prefix[DATA_PREFIX] != 0)
6213 *p++ = DATA_PREFIX_OPCODE;
6214
6215 if (i.prefix[REX_PREFIX] != 0)
6216 *p++ = i.prefix[REX_PREFIX];
6217
6218 *p++ = i.tm.base_opcode;
6219 if (i.op[1].imms->X_op == O_constant)
6220 {
6221 offsetT n = i.op[1].imms->X_add_number;
6222
6223 if (size == 2
6224 && !fits_in_unsigned_word (n)
6225 && !fits_in_signed_word (n))
6226 {
6227 as_bad (_("16-bit jump out of range"));
6228 return;
6229 }
6230 md_number_to_chars (p, n, size);
6231 }
6232 else
6233 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6234 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6235 if (i.op[0].imms->X_op != O_constant)
6236 as_bad (_("can't handle non absolute segment in `%s'"),
6237 i.tm.name);
6238 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6239}
6240
6241static void
6242output_insn (void)
6243{
6244 fragS *insn_start_frag;
6245 offsetT insn_start_off;
6246
6247 /* Tie dwarf2 debug info to the address at the start of the insn.
6248 We can't do this after the insn has been output as the current
6249 frag may have been closed off. eg. by frag_var. */
6250 dwarf2_emit_insn (0);
6251
6252 insn_start_frag = frag_now;
6253 insn_start_off = frag_now_fix ();
6254
6255 /* Output jumps. */
6256 if (i.tm.opcode_modifier.jump)
6257 output_branch ();
6258 else if (i.tm.opcode_modifier.jumpbyte
6259 || i.tm.opcode_modifier.jumpdword)
6260 output_jump ();
6261 else if (i.tm.opcode_modifier.jumpintersegment)
6262 output_interseg_jump ();
6263 else
6264 {
6265 /* Output normal instructions here. */
6266 char *p;
6267 unsigned char *q;
6268 unsigned int j;
6269 unsigned int prefix;
6270
6271 /* Since the VEX prefix contains the implicit prefix, we don't
6272 need the explicit prefix. */
6273 if (!i.tm.opcode_modifier.vex)
6274 {
6275 switch (i.tm.opcode_length)
6276 {
6277 case 3:
6278 if (i.tm.base_opcode & 0xff000000)
6279 {
6280 prefix = (i.tm.base_opcode >> 24) & 0xff;
6281 goto check_prefix;
6282 }
6283 break;
6284 case 2:
6285 if ((i.tm.base_opcode & 0xff0000) != 0)
6286 {
6287 prefix = (i.tm.base_opcode >> 16) & 0xff;
6288 if (i.tm.cpu_flags.bitfield.cpupadlock)
6289 {
6290check_prefix:
6291 if (prefix != REPE_PREFIX_OPCODE
6292 || (i.prefix[REP_PREFIX]
6293 != REPE_PREFIX_OPCODE))
6294 add_prefix (prefix);
6295 }
6296 else
6297 add_prefix (prefix);
6298 }
6299 break;
6300 case 1:
6301 break;
6302 default:
6303 abort ();
6304 }
6305
6306 /* The prefix bytes. */
6307 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6308 if (*q)
6309 FRAG_APPEND_1_CHAR (*q);
6310 }
6311 else
6312 {
6313 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6314 if (*q)
6315 switch (j)
6316 {
6317 case REX_PREFIX:
6318 /* REX byte is encoded in VEX prefix. */
6319 break;
6320 case SEG_PREFIX:
6321 case ADDR_PREFIX:
6322 FRAG_APPEND_1_CHAR (*q);
6323 break;
6324 default:
6325 /* There should be no other prefixes for instructions
6326 with VEX prefix. */
6327 abort ();
6328 }
6329
6330 /* Now the VEX prefix. */
6331 p = frag_more (i.vex.length);
6332 for (j = 0; j < i.vex.length; j++)
6333 p[j] = i.vex.bytes[j];
6334 }
6335
6336 /* Now the opcode; be careful about word order here! */
6337 if (i.tm.opcode_length == 1)
6338 {
6339 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6340 }
6341 else
6342 {
6343 switch (i.tm.opcode_length)
6344 {
6345 case 3:
6346 p = frag_more (3);
6347 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6348 break;
6349 case 2:
6350 p = frag_more (2);
6351 break;
6352 default:
6353 abort ();
6354 break;
6355 }
6356
6357 /* Put out high byte first: can't use md_number_to_chars! */
6358 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6359 *p = i.tm.base_opcode & 0xff;
6360 }
6361
6362 /* Now the modrm byte and sib byte (if present). */
6363 if (i.tm.opcode_modifier.modrm)
6364 {
6365 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6366 | i.rm.reg << 3
6367 | i.rm.mode << 6));
6368 /* If i.rm.regmem == ESP (4)
6369 && i.rm.mode != (Register mode)
6370 && not 16 bit
6371 ==> need second modrm byte. */
6372 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6373 && i.rm.mode != 3
6374 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6375 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6376 | i.sib.index << 3
6377 | i.sib.scale << 6));
6378 }
6379
6380 if (i.disp_operands)
6381 output_disp (insn_start_frag, insn_start_off);
6382
6383 if (i.imm_operands)
6384 output_imm (insn_start_frag, insn_start_off);
6385 }
6386
6387#ifdef DEBUG386
6388 if (flag_debug)
6389 {
6390 pi ("" /*line*/, &i);
6391 }
6392#endif /* DEBUG386 */
6393}
6394
6395/* Return the size of the displacement operand N. */
6396
6397static int
6398disp_size (unsigned int n)
6399{
6400 int size = 4;
6401 if (i.types[n].bitfield.disp64)
6402 size = 8;
6403 else if (i.types[n].bitfield.disp8)
6404 size = 1;
6405 else if (i.types[n].bitfield.disp16)
6406 size = 2;
6407 return size;
6408}
6409
6410/* Return the size of the immediate operand N. */
6411
6412static int
6413imm_size (unsigned int n)
6414{
6415 int size = 4;
6416 if (i.types[n].bitfield.imm64)
6417 size = 8;
6418 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6419 size = 1;
6420 else if (i.types[n].bitfield.imm16)
6421 size = 2;
6422 return size;
6423}
6424
6425static void
6426output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6427{
6428 char *p;
6429 unsigned int n;
6430
6431 for (n = 0; n < i.operands; n++)
6432 {
6433 if (operand_type_check (i.types[n], disp))
6434 {
6435 if (i.op[n].disps->X_op == O_constant)
6436 {
6437 int size = disp_size (n);
6438 offsetT val;
6439
6440 val = offset_in_range (i.op[n].disps->X_add_number,
6441 size);
6442 p = frag_more (size);
6443 md_number_to_chars (p, val, size);
6444 }
6445 else
6446 {
6447 enum bfd_reloc_code_real reloc_type;
6448 int size = disp_size (n);
6449 int sign = i.types[n].bitfield.disp32s;
6450 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6451
6452 /* We can't have 8 bit displacement here. */
6453 gas_assert (!i.types[n].bitfield.disp8);
6454
6455 /* The PC relative address is computed relative
6456 to the instruction boundary, so in case immediate
6457 fields follows, we need to adjust the value. */
6458 if (pcrel && i.imm_operands)
6459 {
6460 unsigned int n1;
6461 int sz = 0;
6462
6463 for (n1 = 0; n1 < i.operands; n1++)
6464 if (operand_type_check (i.types[n1], imm))
6465 {
6466 /* Only one immediate is allowed for PC
6467 relative address. */
6468 gas_assert (sz == 0);
6469 sz = imm_size (n1);
6470 i.op[n].disps->X_add_number -= sz;
6471 }
6472 /* We should find the immediate. */
6473 gas_assert (sz != 0);
6474 }
6475
6476 p = frag_more (size);
6477 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6478 if (GOT_symbol
6479 && GOT_symbol == i.op[n].disps->X_add_symbol
6480 && (((reloc_type == BFD_RELOC_32
6481 || reloc_type == BFD_RELOC_X86_64_32S
6482 || (reloc_type == BFD_RELOC_64
6483 && object_64bit))
6484 && (i.op[n].disps->X_op == O_symbol
6485 || (i.op[n].disps->X_op == O_add
6486 && ((symbol_get_value_expression
6487 (i.op[n].disps->X_op_symbol)->X_op)
6488 == O_subtract))))
6489 || reloc_type == BFD_RELOC_32_PCREL))
6490 {
6491 offsetT add;
6492
6493 if (insn_start_frag == frag_now)
6494 add = (p - frag_now->fr_literal) - insn_start_off;
6495 else
6496 {
6497 fragS *fr;
6498
6499 add = insn_start_frag->fr_fix - insn_start_off;
6500 for (fr = insn_start_frag->fr_next;
6501 fr && fr != frag_now; fr = fr->fr_next)
6502 add += fr->fr_fix;
6503 add += p - frag_now->fr_literal;
6504 }
6505
6506 if (!object_64bit)
6507 {
6508 reloc_type = BFD_RELOC_386_GOTPC;
6509 i.op[n].imms->X_add_number += add;
6510 }
6511 else if (reloc_type == BFD_RELOC_64)
6512 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6513 else
6514 /* Don't do the adjustment for x86-64, as there
6515 the pcrel addressing is relative to the _next_
6516 insn, and that is taken care of in other code. */
6517 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6518 }
6519 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6520 i.op[n].disps, pcrel, reloc_type);
6521 }
6522 }
6523 }
6524}
6525
6526static void
6527output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6528{
6529 char *p;
6530 unsigned int n;
6531
6532 for (n = 0; n < i.operands; n++)
6533 {
6534 if (operand_type_check (i.types[n], imm))
6535 {
6536 if (i.op[n].imms->X_op == O_constant)
6537 {
6538 int size = imm_size (n);
6539 offsetT val;
6540
6541 val = offset_in_range (i.op[n].imms->X_add_number,
6542 size);
6543 p = frag_more (size);
6544 md_number_to_chars (p, val, size);
6545 }
6546 else
6547 {
6548 /* Not absolute_section.
6549 Need a 32-bit fixup (don't support 8bit
6550 non-absolute imms). Try to support other
6551 sizes ... */
6552 enum bfd_reloc_code_real reloc_type;
6553 int size = imm_size (n);
6554 int sign;
6555
6556 if (i.types[n].bitfield.imm32s
6557 && (i.suffix == QWORD_MNEM_SUFFIX
6558 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6559 sign = 1;
6560 else
6561 sign = 0;
6562
6563 p = frag_more (size);
6564 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6565
6566 /* This is tough to explain. We end up with this one if we
6567 * have operands that look like
6568 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6569 * obtain the absolute address of the GOT, and it is strongly
6570 * preferable from a performance point of view to avoid using
6571 * a runtime relocation for this. The actual sequence of
6572 * instructions often look something like:
6573 *
6574 * call .L66
6575 * .L66:
6576 * popl %ebx
6577 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6578 *
6579 * The call and pop essentially return the absolute address
6580 * of the label .L66 and store it in %ebx. The linker itself
6581 * will ultimately change the first operand of the addl so
6582 * that %ebx points to the GOT, but to keep things simple, the
6583 * .o file must have this operand set so that it generates not
6584 * the absolute address of .L66, but the absolute address of
6585 * itself. This allows the linker itself simply treat a GOTPC
6586 * relocation as asking for a pcrel offset to the GOT to be
6587 * added in, and the addend of the relocation is stored in the
6588 * operand field for the instruction itself.
6589 *
6590 * Our job here is to fix the operand so that it would add
6591 * the correct offset so that %ebx would point to itself. The
6592 * thing that is tricky is that .-.L66 will point to the
6593 * beginning of the instruction, so we need to further modify
6594 * the operand so that it will point to itself. There are
6595 * other cases where you have something like:
6596 *
6597 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6598 *
6599 * and here no correction would be required. Internally in
6600 * the assembler we treat operands of this form as not being
6601 * pcrel since the '.' is explicitly mentioned, and I wonder
6602 * whether it would simplify matters to do it this way. Who
6603 * knows. In earlier versions of the PIC patches, the
6604 * pcrel_adjust field was used to store the correction, but
6605 * since the expression is not pcrel, I felt it would be
6606 * confusing to do it this way. */
6607
6608 if ((reloc_type == BFD_RELOC_32
6609 || reloc_type == BFD_RELOC_X86_64_32S
6610 || reloc_type == BFD_RELOC_64)
6611 && GOT_symbol
6612 && GOT_symbol == i.op[n].imms->X_add_symbol
6613 && (i.op[n].imms->X_op == O_symbol
6614 || (i.op[n].imms->X_op == O_add
6615 && ((symbol_get_value_expression
6616 (i.op[n].imms->X_op_symbol)->X_op)
6617 == O_subtract))))
6618 {
6619 offsetT add;
6620
6621 if (insn_start_frag == frag_now)
6622 add = (p - frag_now->fr_literal) - insn_start_off;
6623 else
6624 {
6625 fragS *fr;
6626
6627 add = insn_start_frag->fr_fix - insn_start_off;
6628 for (fr = insn_start_frag->fr_next;
6629 fr && fr != frag_now; fr = fr->fr_next)
6630 add += fr->fr_fix;
6631 add += p - frag_now->fr_literal;
6632 }
6633
6634 if (!object_64bit)
6635 reloc_type = BFD_RELOC_386_GOTPC;
6636 else if (size == 4)
6637 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6638 else if (size == 8)
6639 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6640 i.op[n].imms->X_add_number += add;
6641 }
6642 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6643 i.op[n].imms, 0, reloc_type);
6644 }
6645 }
6646 }
6647}
6648\f
6649/* x86_cons_fix_new is called via the expression parsing code when a
6650 reloc is needed. We use this hook to get the correct .got reloc. */
6651static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6652static int cons_sign = -1;
6653
6654void
6655x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6656 expressionS *exp)
6657{
6658 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6659
6660 got_reloc = NO_RELOC;
6661
6662#ifdef TE_PE
6663 if (exp->X_op == O_secrel)
6664 {
6665 exp->X_op = O_symbol;
6666 r = BFD_RELOC_32_SECREL;
6667 }
6668#endif
6669
6670 fix_new_exp (frag, off, len, exp, 0, r);
6671}
6672
6673/* Export the ABI address size for use by TC_ADDRESS_BYTES for the
6674 purpose of the `.dc.a' internal pseudo-op. */
6675
6676int
6677x86_address_bytes (void)
6678{
6679 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
6680 return 4;
6681 return stdoutput->arch_info->bits_per_address / 8;
6682}
6683
6684#if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6685 || defined (LEX_AT)
6686# define lex_got(reloc, adjust, types) NULL
6687#else
6688/* Parse operands of the form
6689 <symbol>@GOTOFF+<nnn>
6690 and similar .plt or .got references.
6691
6692 If we find one, set up the correct relocation in RELOC and copy the
6693 input string, minus the `@GOTOFF' into a malloc'd buffer for
6694 parsing by the calling routine. Return this buffer, and if ADJUST
6695 is non-null set it to the length of the string we removed from the
6696 input line. Otherwise return NULL. */
6697static char *
6698lex_got (enum bfd_reloc_code_real *rel,
6699 int *adjust,
6700 i386_operand_type *types)
6701{
6702 /* Some of the relocations depend on the size of what field is to
6703 be relocated. But in our callers i386_immediate and i386_displacement
6704 we don't yet know the operand size (this will be set by insn
6705 matching). Hence we record the word32 relocation here,
6706 and adjust the reloc according to the real size in reloc(). */
6707 static const struct {
6708 const char *str;
6709 int len;
6710 const enum bfd_reloc_code_real rel[2];
6711 const i386_operand_type types64;
6712 } gotrel[] = {
6713#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
6714 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
6715 BFD_RELOC_SIZE32 },
6716 OPERAND_TYPE_IMM32_64 },
6717#endif
6718 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6719 BFD_RELOC_X86_64_PLTOFF64 },
6720 OPERAND_TYPE_IMM64 },
6721 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6722 BFD_RELOC_X86_64_PLT32 },
6723 OPERAND_TYPE_IMM32_32S_DISP32 },
6724 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6725 BFD_RELOC_X86_64_GOTPLT64 },
6726 OPERAND_TYPE_IMM64_DISP64 },
6727 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6728 BFD_RELOC_X86_64_GOTOFF64 },
6729 OPERAND_TYPE_IMM64_DISP64 },
6730 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6731 BFD_RELOC_X86_64_GOTPCREL },
6732 OPERAND_TYPE_IMM32_32S_DISP32 },
6733 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6734 BFD_RELOC_X86_64_TLSGD },
6735 OPERAND_TYPE_IMM32_32S_DISP32 },
6736 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6737 _dummy_first_bfd_reloc_code_real },
6738 OPERAND_TYPE_NONE },
6739 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6740 BFD_RELOC_X86_64_TLSLD },
6741 OPERAND_TYPE_IMM32_32S_DISP32 },
6742 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6743 BFD_RELOC_X86_64_GOTTPOFF },
6744 OPERAND_TYPE_IMM32_32S_DISP32 },
6745 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6746 BFD_RELOC_X86_64_TPOFF32 },
6747 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6748 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6749 _dummy_first_bfd_reloc_code_real },
6750 OPERAND_TYPE_NONE },
6751 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6752 BFD_RELOC_X86_64_DTPOFF32 },
6753 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6754 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6755 _dummy_first_bfd_reloc_code_real },
6756 OPERAND_TYPE_NONE },
6757 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6758 _dummy_first_bfd_reloc_code_real },
6759 OPERAND_TYPE_NONE },
6760 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6761 BFD_RELOC_X86_64_GOT32 },
6762 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6763 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6764 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6765 OPERAND_TYPE_IMM32_32S_DISP32 },
6766 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6767 BFD_RELOC_X86_64_TLSDESC_CALL },
6768 OPERAND_TYPE_IMM32_32S_DISP32 },
6769 };
6770 char *cp;
6771 unsigned int j;
6772
6773#if defined (OBJ_MAYBE_ELF)
6774 if (!IS_ELF)
6775 return NULL;
6776#endif
6777
6778 for (cp = input_line_pointer; *cp != '@'; cp++)
6779 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6780 return NULL;
6781
6782 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6783 {
6784 int len = gotrel[j].len;
6785 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6786 {
6787 if (gotrel[j].rel[object_64bit] != 0)
6788 {
6789 int first, second;
6790 char *tmpbuf, *past_reloc;
6791
6792 *rel = gotrel[j].rel[object_64bit];
6793
6794 if (types)
6795 {
6796 if (flag_code != CODE_64BIT)
6797 {
6798 types->bitfield.imm32 = 1;
6799 types->bitfield.disp32 = 1;
6800 }
6801 else
6802 *types = gotrel[j].types64;
6803 }
6804
6805 if (j != 0 && GOT_symbol == NULL)
6806 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6807
6808 /* The length of the first part of our input line. */
6809 first = cp - input_line_pointer;
6810
6811 /* The second part goes from after the reloc token until
6812 (and including) an end_of_line char or comma. */
6813 past_reloc = cp + 1 + len;
6814 cp = past_reloc;
6815 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6816 ++cp;
6817 second = cp + 1 - past_reloc;
6818
6819 /* Allocate and copy string. The trailing NUL shouldn't
6820 be necessary, but be safe. */
6821 tmpbuf = (char *) xmalloc (first + second + 2);
6822 memcpy (tmpbuf, input_line_pointer, first);
6823 if (second != 0 && *past_reloc != ' ')
6824 /* Replace the relocation token with ' ', so that
6825 errors like foo@GOTOFF1 will be detected. */
6826 tmpbuf[first++] = ' ';
6827 else
6828 /* Increment length by 1 if the relocation token is
6829 removed. */
6830 len++;
6831 if (adjust)
6832 *adjust = len;
6833 memcpy (tmpbuf + first, past_reloc, second);
6834 tmpbuf[first + second] = '\0';
6835 return tmpbuf;
6836 }
6837
6838 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6839 gotrel[j].str, 1 << (5 + object_64bit));
6840 return NULL;
6841 }
6842 }
6843
6844 /* Might be a symbol version string. Don't as_bad here. */
6845 return NULL;
6846}
6847#endif
6848
6849#ifdef TE_PE
6850#ifdef lex_got
6851#undef lex_got
6852#endif
6853/* Parse operands of the form
6854 <symbol>@SECREL32+<nnn>
6855
6856 If we find one, set up the correct relocation in RELOC and copy the
6857 input string, minus the `@SECREL32' into a malloc'd buffer for
6858 parsing by the calling routine. Return this buffer, and if ADJUST
6859 is non-null set it to the length of the string we removed from the
6860 input line. Otherwise return NULL.
6861
6862 This function is copied from the ELF version above adjusted for PE targets. */
6863
6864static char *
6865lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
6866 int *adjust ATTRIBUTE_UNUSED,
6867 i386_operand_type *types ATTRIBUTE_UNUSED)
6868{
6869 static const struct
6870 {
6871 const char *str;
6872 int len;
6873 const enum bfd_reloc_code_real rel[2];
6874 const i386_operand_type types64;
6875 }
6876 gotrel[] =
6877 {
6878 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
6879 BFD_RELOC_32_SECREL },
6880 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6881 };
6882
6883 char *cp;
6884 unsigned j;
6885
6886 for (cp = input_line_pointer; *cp != '@'; cp++)
6887 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6888 return NULL;
6889
6890 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6891 {
6892 int len = gotrel[j].len;
6893
6894 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6895 {
6896 if (gotrel[j].rel[object_64bit] != 0)
6897 {
6898 int first, second;
6899 char *tmpbuf, *past_reloc;
6900
6901 *rel = gotrel[j].rel[object_64bit];
6902 if (adjust)
6903 *adjust = len;
6904
6905 if (types)
6906 {
6907 if (flag_code != CODE_64BIT)
6908 {
6909 types->bitfield.imm32 = 1;
6910 types->bitfield.disp32 = 1;
6911 }
6912 else
6913 *types = gotrel[j].types64;
6914 }
6915
6916 /* The length of the first part of our input line. */
6917 first = cp - input_line_pointer;
6918
6919 /* The second part goes from after the reloc token until
6920 (and including) an end_of_line char or comma. */
6921 past_reloc = cp + 1 + len;
6922 cp = past_reloc;
6923 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6924 ++cp;
6925 second = cp + 1 - past_reloc;
6926
6927 /* Allocate and copy string. The trailing NUL shouldn't
6928 be necessary, but be safe. */
6929 tmpbuf = (char *) xmalloc (first + second + 2);
6930 memcpy (tmpbuf, input_line_pointer, first);
6931 if (second != 0 && *past_reloc != ' ')
6932 /* Replace the relocation token with ' ', so that
6933 errors like foo@SECLREL321 will be detected. */
6934 tmpbuf[first++] = ' ';
6935 memcpy (tmpbuf + first, past_reloc, second);
6936 tmpbuf[first + second] = '\0';
6937 return tmpbuf;
6938 }
6939
6940 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6941 gotrel[j].str, 1 << (5 + object_64bit));
6942 return NULL;
6943 }
6944 }
6945
6946 /* Might be a symbol version string. Don't as_bad here. */
6947 return NULL;
6948}
6949
6950#endif /* TE_PE */
6951
6952void
6953x86_cons (expressionS *exp, int size)
6954{
6955 intel_syntax = -intel_syntax;
6956
6957 exp->X_md = 0;
6958 if (size == 4 || (object_64bit && size == 8))
6959 {
6960 /* Handle @GOTOFF and the like in an expression. */
6961 char *save;
6962 char *gotfree_input_line;
6963 int adjust = 0;
6964
6965 save = input_line_pointer;
6966 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6967 if (gotfree_input_line)
6968 input_line_pointer = gotfree_input_line;
6969
6970 expression (exp);
6971
6972 if (gotfree_input_line)
6973 {
6974 /* expression () has merrily parsed up to the end of line,
6975 or a comma - in the wrong buffer. Transfer how far
6976 input_line_pointer has moved to the right buffer. */
6977 input_line_pointer = (save
6978 + (input_line_pointer - gotfree_input_line)
6979 + adjust);
6980 free (gotfree_input_line);
6981 if (exp->X_op == O_constant
6982 || exp->X_op == O_absent
6983 || exp->X_op == O_illegal
6984 || exp->X_op == O_register
6985 || exp->X_op == O_big)
6986 {
6987 char c = *input_line_pointer;
6988 *input_line_pointer = 0;
6989 as_bad (_("missing or invalid expression `%s'"), save);
6990 *input_line_pointer = c;
6991 }
6992 }
6993 }
6994 else
6995 expression (exp);
6996
6997 intel_syntax = -intel_syntax;
6998
6999 if (intel_syntax)
7000 i386_intel_simplify (exp);
7001}
7002
7003static void
7004signed_cons (int size)
7005{
7006 if (flag_code == CODE_64BIT)
7007 cons_sign = 1;
7008 cons (size);
7009 cons_sign = -1;
7010}
7011
7012#ifdef TE_PE
7013static void
7014pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7015{
7016 expressionS exp;
7017
7018 do
7019 {
7020 expression (&exp);
7021 if (exp.X_op == O_symbol)
7022 exp.X_op = O_secrel;
7023
7024 emit_expr (&exp, 4);
7025 }
7026 while (*input_line_pointer++ == ',');
7027
7028 input_line_pointer--;
7029 demand_empty_rest_of_line ();
7030}
7031#endif
7032
7033static int
7034i386_immediate (char *imm_start)
7035{
7036 char *save_input_line_pointer;
7037 char *gotfree_input_line;
7038 segT exp_seg = 0;
7039 expressionS *exp;
7040 i386_operand_type types;
7041
7042 operand_type_set (&types, ~0);
7043
7044 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7045 {
7046 as_bad (_("at most %d immediate operands are allowed"),
7047 MAX_IMMEDIATE_OPERANDS);
7048 return 0;
7049 }
7050
7051 exp = &im_expressions[i.imm_operands++];
7052 i.op[this_operand].imms = exp;
7053
7054 if (is_space_char (*imm_start))
7055 ++imm_start;
7056
7057 save_input_line_pointer = input_line_pointer;
7058 input_line_pointer = imm_start;
7059
7060 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7061 if (gotfree_input_line)
7062 input_line_pointer = gotfree_input_line;
7063
7064 exp_seg = expression (exp);
7065
7066 SKIP_WHITESPACE ();
7067 if (*input_line_pointer)
7068 as_bad (_("junk `%s' after expression"), input_line_pointer);
7069
7070 input_line_pointer = save_input_line_pointer;
7071 if (gotfree_input_line)
7072 {
7073 free (gotfree_input_line);
7074
7075 if (exp->X_op == O_constant || exp->X_op == O_register)
7076 exp->X_op = O_illegal;
7077 }
7078
7079 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7080}
7081
7082static int
7083i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7084 i386_operand_type types, const char *imm_start)
7085{
7086 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7087 {
7088 if (imm_start)
7089 as_bad (_("missing or invalid immediate expression `%s'"),
7090 imm_start);
7091 return 0;
7092 }
7093 else if (exp->X_op == O_constant)
7094 {
7095 /* Size it properly later. */
7096 i.types[this_operand].bitfield.imm64 = 1;
7097 /* If not 64bit, sign extend val. */
7098 if (flag_code != CODE_64BIT
7099 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7100 exp->X_add_number
7101 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7102 }
7103#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7104 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7105 && exp_seg != absolute_section
7106 && exp_seg != text_section
7107 && exp_seg != data_section
7108 && exp_seg != bss_section
7109 && exp_seg != undefined_section
7110 && !bfd_is_com_section (exp_seg))
7111 {
7112 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7113 return 0;
7114 }
7115#endif
7116 else if (!intel_syntax && exp->X_op == O_register)
7117 {
7118 if (imm_start)
7119 as_bad (_("illegal immediate register operand %s"), imm_start);
7120 return 0;
7121 }
7122 else
7123 {
7124 /* This is an address. The size of the address will be
7125 determined later, depending on destination register,
7126 suffix, or the default for the section. */
7127 i.types[this_operand].bitfield.imm8 = 1;
7128 i.types[this_operand].bitfield.imm16 = 1;
7129 i.types[this_operand].bitfield.imm32 = 1;
7130 i.types[this_operand].bitfield.imm32s = 1;
7131 i.types[this_operand].bitfield.imm64 = 1;
7132 i.types[this_operand] = operand_type_and (i.types[this_operand],
7133 types);
7134 }
7135
7136 return 1;
7137}
7138
7139static char *
7140i386_scale (char *scale)
7141{
7142 offsetT val;
7143 char *save = input_line_pointer;
7144
7145 input_line_pointer = scale;
7146 val = get_absolute_expression ();
7147
7148 switch (val)
7149 {
7150 case 1:
7151 i.log2_scale_factor = 0;
7152 break;
7153 case 2:
7154 i.log2_scale_factor = 1;
7155 break;
7156 case 4:
7157 i.log2_scale_factor = 2;
7158 break;
7159 case 8:
7160 i.log2_scale_factor = 3;
7161 break;
7162 default:
7163 {
7164 char sep = *input_line_pointer;
7165
7166 *input_line_pointer = '\0';
7167 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7168 scale);
7169 *input_line_pointer = sep;
7170 input_line_pointer = save;
7171 return NULL;
7172 }
7173 }
7174 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7175 {
7176 as_warn (_("scale factor of %d without an index register"),
7177 1 << i.log2_scale_factor);
7178 i.log2_scale_factor = 0;
7179 }
7180 scale = input_line_pointer;
7181 input_line_pointer = save;
7182 return scale;
7183}
7184
7185static int
7186i386_displacement (char *disp_start, char *disp_end)
7187{
7188 expressionS *exp;
7189 segT exp_seg = 0;
7190 char *save_input_line_pointer;
7191 char *gotfree_input_line;
7192 int override;
7193 i386_operand_type bigdisp, types = anydisp;
7194 int ret;
7195
7196 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7197 {
7198 as_bad (_("at most %d displacement operands are allowed"),
7199 MAX_MEMORY_OPERANDS);
7200 return 0;
7201 }
7202
7203 operand_type_set (&bigdisp, 0);
7204 if ((i.types[this_operand].bitfield.jumpabsolute)
7205 || (!current_templates->start->opcode_modifier.jump
7206 && !current_templates->start->opcode_modifier.jumpdword))
7207 {
7208 bigdisp.bitfield.disp32 = 1;
7209 override = (i.prefix[ADDR_PREFIX] != 0);
7210 if (flag_code == CODE_64BIT)
7211 {
7212 if (!override)
7213 {
7214 bigdisp.bitfield.disp32s = 1;
7215 bigdisp.bitfield.disp64 = 1;
7216 }
7217 }
7218 else if ((flag_code == CODE_16BIT) ^ override)
7219 {
7220 bigdisp.bitfield.disp32 = 0;
7221 bigdisp.bitfield.disp16 = 1;
7222 }
7223 }
7224 else
7225 {
7226 /* For PC-relative branches, the width of the displacement
7227 is dependent upon data size, not address size. */
7228 override = (i.prefix[DATA_PREFIX] != 0);
7229 if (flag_code == CODE_64BIT)
7230 {
7231 if (override || i.suffix == WORD_MNEM_SUFFIX)
7232 bigdisp.bitfield.disp16 = 1;
7233 else
7234 {
7235 bigdisp.bitfield.disp32 = 1;
7236 bigdisp.bitfield.disp32s = 1;
7237 }
7238 }
7239 else
7240 {
7241 if (!override)
7242 override = (i.suffix == (flag_code != CODE_16BIT
7243 ? WORD_MNEM_SUFFIX
7244 : LONG_MNEM_SUFFIX));
7245 bigdisp.bitfield.disp32 = 1;
7246 if ((flag_code == CODE_16BIT) ^ override)
7247 {
7248 bigdisp.bitfield.disp32 = 0;
7249 bigdisp.bitfield.disp16 = 1;
7250 }
7251 }
7252 }
7253 i.types[this_operand] = operand_type_or (i.types[this_operand],
7254 bigdisp);
7255
7256 exp = &disp_expressions[i.disp_operands];
7257 i.op[this_operand].disps = exp;
7258 i.disp_operands++;
7259 save_input_line_pointer = input_line_pointer;
7260 input_line_pointer = disp_start;
7261 END_STRING_AND_SAVE (disp_end);
7262
7263#ifndef GCC_ASM_O_HACK
7264#define GCC_ASM_O_HACK 0
7265#endif
7266#if GCC_ASM_O_HACK
7267 END_STRING_AND_SAVE (disp_end + 1);
7268 if (i.types[this_operand].bitfield.baseIndex
7269 && displacement_string_end[-1] == '+')
7270 {
7271 /* This hack is to avoid a warning when using the "o"
7272 constraint within gcc asm statements.
7273 For instance:
7274
7275 #define _set_tssldt_desc(n,addr,limit,type) \
7276 __asm__ __volatile__ ( \
7277 "movw %w2,%0\n\t" \
7278 "movw %w1,2+%0\n\t" \
7279 "rorl $16,%1\n\t" \
7280 "movb %b1,4+%0\n\t" \
7281 "movb %4,5+%0\n\t" \
7282 "movb $0,6+%0\n\t" \
7283 "movb %h1,7+%0\n\t" \
7284 "rorl $16,%1" \
7285 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7286
7287 This works great except that the output assembler ends
7288 up looking a bit weird if it turns out that there is
7289 no offset. You end up producing code that looks like:
7290
7291 #APP
7292 movw $235,(%eax)
7293 movw %dx,2+(%eax)
7294 rorl $16,%edx
7295 movb %dl,4+(%eax)
7296 movb $137,5+(%eax)
7297 movb $0,6+(%eax)
7298 movb %dh,7+(%eax)
7299 rorl $16,%edx
7300 #NO_APP
7301
7302 So here we provide the missing zero. */
7303
7304 *displacement_string_end = '0';
7305 }
7306#endif
7307 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7308 if (gotfree_input_line)
7309 input_line_pointer = gotfree_input_line;
7310
7311 exp_seg = expression (exp);
7312
7313 SKIP_WHITESPACE ();
7314 if (*input_line_pointer)
7315 as_bad (_("junk `%s' after expression"), input_line_pointer);
7316#if GCC_ASM_O_HACK
7317 RESTORE_END_STRING (disp_end + 1);
7318#endif
7319 input_line_pointer = save_input_line_pointer;
7320 if (gotfree_input_line)
7321 {
7322 free (gotfree_input_line);
7323
7324 if (exp->X_op == O_constant || exp->X_op == O_register)
7325 exp->X_op = O_illegal;
7326 }
7327
7328 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7329
7330 RESTORE_END_STRING (disp_end);
7331
7332 return ret;
7333}
7334
7335static int
7336i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7337 i386_operand_type types, const char *disp_start)
7338{
7339 i386_operand_type bigdisp;
7340 int ret = 1;
7341
7342 /* We do this to make sure that the section symbol is in
7343 the symbol table. We will ultimately change the relocation
7344 to be relative to the beginning of the section. */
7345 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7346 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7347 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7348 {
7349 if (exp->X_op != O_symbol)
7350 goto inv_disp;
7351
7352 if (S_IS_LOCAL (exp->X_add_symbol)
7353 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7354 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7355 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7356 exp->X_op = O_subtract;
7357 exp->X_op_symbol = GOT_symbol;
7358 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7359 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7360 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7361 i.reloc[this_operand] = BFD_RELOC_64;
7362 else
7363 i.reloc[this_operand] = BFD_RELOC_32;
7364 }
7365
7366 else if (exp->X_op == O_absent
7367 || exp->X_op == O_illegal
7368 || exp->X_op == O_big)
7369 {
7370 inv_disp:
7371 as_bad (_("missing or invalid displacement expression `%s'"),
7372 disp_start);
7373 ret = 0;
7374 }
7375
7376 else if (flag_code == CODE_64BIT
7377 && !i.prefix[ADDR_PREFIX]
7378 && exp->X_op == O_constant)
7379 {
7380 /* Since displacement is signed extended to 64bit, don't allow
7381 disp32 and turn off disp32s if they are out of range. */
7382 i.types[this_operand].bitfield.disp32 = 0;
7383 if (!fits_in_signed_long (exp->X_add_number))
7384 {
7385 i.types[this_operand].bitfield.disp32s = 0;
7386 if (i.types[this_operand].bitfield.baseindex)
7387 {
7388 as_bad (_("0x%lx out range of signed 32bit displacement"),
7389 (long) exp->X_add_number);
7390 ret = 0;
7391 }
7392 }
7393 }
7394
7395#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7396 else if (exp->X_op != O_constant
7397 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7398 && exp_seg != absolute_section
7399 && exp_seg != text_section
7400 && exp_seg != data_section
7401 && exp_seg != bss_section
7402 && exp_seg != undefined_section
7403 && !bfd_is_com_section (exp_seg))
7404 {
7405 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7406 ret = 0;
7407 }
7408#endif
7409
7410 /* Check if this is a displacement only operand. */
7411 bigdisp = i.types[this_operand];
7412 bigdisp.bitfield.disp8 = 0;
7413 bigdisp.bitfield.disp16 = 0;
7414 bigdisp.bitfield.disp32 = 0;
7415 bigdisp.bitfield.disp32s = 0;
7416 bigdisp.bitfield.disp64 = 0;
7417 if (operand_type_all_zero (&bigdisp))
7418 i.types[this_operand] = operand_type_and (i.types[this_operand],
7419 types);
7420
7421 return ret;
7422}
7423
7424/* Make sure the memory operand we've been dealt is valid.
7425 Return 1 on success, 0 on a failure. */
7426
7427static int
7428i386_index_check (const char *operand_string)
7429{
7430 int ok;
7431 const char *kind = "base/index";
7432#if INFER_ADDR_PREFIX
7433 int fudged = 0;
7434
7435 tryprefix:
7436#endif
7437 ok = 1;
7438 if (current_templates->start->opcode_modifier.isstring
7439 && !current_templates->start->opcode_modifier.immext
7440 && (current_templates->end[-1].opcode_modifier.isstring
7441 || i.mem_operands))
7442 {
7443 /* Memory operands of string insns are special in that they only allow
7444 a single register (rDI, rSI, or rBX) as their memory address. */
7445 unsigned int expected;
7446
7447 kind = "string address";
7448
7449 if (current_templates->start->opcode_modifier.w)
7450 {
7451 i386_operand_type type = current_templates->end[-1].operand_types[0];
7452
7453 if (!type.bitfield.baseindex
7454 || ((!i.mem_operands != !intel_syntax)
7455 && current_templates->end[-1].operand_types[1]
7456 .bitfield.baseindex))
7457 type = current_templates->end[-1].operand_types[1];
7458 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7459 }
7460 else
7461 expected = 3 /* rBX */;
7462
7463 if (!i.base_reg || i.index_reg
7464 || operand_type_check (i.types[this_operand], disp))
7465 ok = -1;
7466 else if (!(flag_code == CODE_64BIT
7467 ? i.prefix[ADDR_PREFIX]
7468 ? i.base_reg->reg_type.bitfield.reg32
7469 : i.base_reg->reg_type.bitfield.reg64
7470 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7471 ? i.base_reg->reg_type.bitfield.reg32
7472 : i.base_reg->reg_type.bitfield.reg16))
7473 ok = 0;
7474 else if (register_number (i.base_reg) != expected)
7475 ok = -1;
7476
7477 if (ok < 0)
7478 {
7479 unsigned int j;
7480
7481 for (j = 0; j < i386_regtab_size; ++j)
7482 if ((flag_code == CODE_64BIT
7483 ? i.prefix[ADDR_PREFIX]
7484 ? i386_regtab[j].reg_type.bitfield.reg32
7485 : i386_regtab[j].reg_type.bitfield.reg64
7486 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7487 ? i386_regtab[j].reg_type.bitfield.reg32
7488 : i386_regtab[j].reg_type.bitfield.reg16)
7489 && register_number(i386_regtab + j) == expected)
7490 break;
7491 gas_assert (j < i386_regtab_size);
7492 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7493 operand_string,
7494 intel_syntax ? '[' : '(',
7495 register_prefix,
7496 i386_regtab[j].reg_name,
7497 intel_syntax ? ']' : ')');
7498 ok = 1;
7499 }
7500 }
7501 else if (flag_code == CODE_64BIT)
7502 {
7503 if ((i.base_reg
7504 && ((i.prefix[ADDR_PREFIX] == 0
7505 && !i.base_reg->reg_type.bitfield.reg64)
7506 || (i.prefix[ADDR_PREFIX]
7507 && !i.base_reg->reg_type.bitfield.reg32))
7508 && (i.index_reg
7509 || i.base_reg->reg_num !=
7510 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7511 || (i.index_reg
7512 && !(i.index_reg->reg_type.bitfield.regxmm
7513 || i.index_reg->reg_type.bitfield.regymm)
7514 && (!i.index_reg->reg_type.bitfield.baseindex
7515 || (i.prefix[ADDR_PREFIX] == 0
7516 && i.index_reg->reg_num != RegRiz
7517 && !i.index_reg->reg_type.bitfield.reg64
7518 )
7519 || (i.prefix[ADDR_PREFIX]
7520 && i.index_reg->reg_num != RegEiz
7521 && !i.index_reg->reg_type.bitfield.reg32))))
7522 ok = 0;
7523 }
7524 else
7525 {
7526 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7527 {
7528 /* 16bit checks. */
7529 if ((i.base_reg
7530 && (!i.base_reg->reg_type.bitfield.reg16
7531 || !i.base_reg->reg_type.bitfield.baseindex))
7532 || (i.index_reg
7533 && (!i.index_reg->reg_type.bitfield.reg16
7534 || !i.index_reg->reg_type.bitfield.baseindex
7535 || !(i.base_reg
7536 && i.base_reg->reg_num < 6
7537 && i.index_reg->reg_num >= 6
7538 && i.log2_scale_factor == 0))))
7539 ok = 0;
7540 }
7541 else
7542 {
7543 /* 32bit checks. */
7544 if ((i.base_reg
7545 && !i.base_reg->reg_type.bitfield.reg32)
7546 || (i.index_reg
7547 && !i.index_reg->reg_type.bitfield.regxmm
7548 && !i.index_reg->reg_type.bitfield.regymm
7549 && ((!i.index_reg->reg_type.bitfield.reg32
7550 && i.index_reg->reg_num != RegEiz)
7551 || !i.index_reg->reg_type.bitfield.baseindex)))
7552 ok = 0;
7553 }
7554 }
7555 if (!ok)
7556 {
7557#if INFER_ADDR_PREFIX
7558 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7559 {
7560 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7561 i.prefixes += 1;
7562 /* Change the size of any displacement too. At most one of
7563 Disp16 or Disp32 is set.
7564 FIXME. There doesn't seem to be any real need for separate
7565 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7566 Removing them would probably clean up the code quite a lot. */
7567 if (flag_code != CODE_64BIT
7568 && (i.types[this_operand].bitfield.disp16
7569 || i.types[this_operand].bitfield.disp32))
7570 i.types[this_operand]
7571 = operand_type_xor (i.types[this_operand], disp16_32);
7572 fudged = 1;
7573 goto tryprefix;
7574 }
7575 if (fudged)
7576 as_bad (_("`%s' is not a valid %s expression"),
7577 operand_string,
7578 kind);
7579 else
7580#endif
7581 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7582 operand_string,
7583 flag_code_names[i.prefix[ADDR_PREFIX]
7584 ? flag_code == CODE_32BIT
7585 ? CODE_16BIT
7586 : CODE_32BIT
7587 : flag_code],
7588 kind);
7589 }
7590 return ok;
7591}
7592
7593/* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7594 on error. */
7595
7596static int
7597i386_att_operand (char *operand_string)
7598{
7599 const reg_entry *r;
7600 char *end_op;
7601 char *op_string = operand_string;
7602
7603 if (is_space_char (*op_string))
7604 ++op_string;
7605
7606 /* We check for an absolute prefix (differentiating,
7607 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7608 if (*op_string == ABSOLUTE_PREFIX)
7609 {
7610 ++op_string;
7611 if (is_space_char (*op_string))
7612 ++op_string;
7613 i.types[this_operand].bitfield.jumpabsolute = 1;
7614 }
7615
7616 /* Check if operand is a register. */
7617 if ((r = parse_register (op_string, &end_op)) != NULL)
7618 {
7619 i386_operand_type temp;
7620
7621 /* Check for a segment override by searching for ':' after a
7622 segment register. */
7623 op_string = end_op;
7624 if (is_space_char (*op_string))
7625 ++op_string;
7626 if (*op_string == ':'
7627 && (r->reg_type.bitfield.sreg2
7628 || r->reg_type.bitfield.sreg3))
7629 {
7630 switch (r->reg_num)
7631 {
7632 case 0:
7633 i.seg[i.mem_operands] = &es;
7634 break;
7635 case 1:
7636 i.seg[i.mem_operands] = &cs;
7637 break;
7638 case 2:
7639 i.seg[i.mem_operands] = &ss;
7640 break;
7641 case 3:
7642 i.seg[i.mem_operands] = &ds;
7643 break;
7644 case 4:
7645 i.seg[i.mem_operands] = &fs;
7646 break;
7647 case 5:
7648 i.seg[i.mem_operands] = &gs;
7649 break;
7650 }
7651
7652 /* Skip the ':' and whitespace. */
7653 ++op_string;
7654 if (is_space_char (*op_string))
7655 ++op_string;
7656
7657 if (!is_digit_char (*op_string)
7658 && !is_identifier_char (*op_string)
7659 && *op_string != '('
7660 && *op_string != ABSOLUTE_PREFIX)
7661 {
7662 as_bad (_("bad memory operand `%s'"), op_string);
7663 return 0;
7664 }
7665 /* Handle case of %es:*foo. */
7666 if (*op_string == ABSOLUTE_PREFIX)
7667 {
7668 ++op_string;
7669 if (is_space_char (*op_string))
7670 ++op_string;
7671 i.types[this_operand].bitfield.jumpabsolute = 1;
7672 }
7673 goto do_memory_reference;
7674 }
7675 if (*op_string)
7676 {
7677 as_bad (_("junk `%s' after register"), op_string);
7678 return 0;
7679 }
7680 temp = r->reg_type;
7681 temp.bitfield.baseindex = 0;
7682 i.types[this_operand] = operand_type_or (i.types[this_operand],
7683 temp);
7684 i.types[this_operand].bitfield.unspecified = 0;
7685 i.op[this_operand].regs = r;
7686 i.reg_operands++;
7687 }
7688 else if (*op_string == REGISTER_PREFIX)
7689 {
7690 as_bad (_("bad register name `%s'"), op_string);
7691 return 0;
7692 }
7693 else if (*op_string == IMMEDIATE_PREFIX)
7694 {
7695 ++op_string;
7696 if (i.types[this_operand].bitfield.jumpabsolute)
7697 {
7698 as_bad (_("immediate operand illegal with absolute jump"));
7699 return 0;
7700 }
7701 if (!i386_immediate (op_string))
7702 return 0;
7703 }
7704 else if (is_digit_char (*op_string)
7705 || is_identifier_char (*op_string)
7706 || *op_string == '(')
7707 {
7708 /* This is a memory reference of some sort. */
7709 char *base_string;
7710
7711 /* Start and end of displacement string expression (if found). */
7712 char *displacement_string_start;
7713 char *displacement_string_end;
7714
7715 do_memory_reference:
7716 if ((i.mem_operands == 1
7717 && !current_templates->start->opcode_modifier.isstring)
7718 || i.mem_operands == 2)
7719 {
7720 as_bad (_("too many memory references for `%s'"),
7721 current_templates->start->name);
7722 return 0;
7723 }
7724
7725 /* Check for base index form. We detect the base index form by
7726 looking for an ')' at the end of the operand, searching
7727 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7728 after the '('. */
7729 base_string = op_string + strlen (op_string);
7730
7731 --base_string;
7732 if (is_space_char (*base_string))
7733 --base_string;
7734
7735 /* If we only have a displacement, set-up for it to be parsed later. */
7736 displacement_string_start = op_string;
7737 displacement_string_end = base_string + 1;
7738
7739 if (*base_string == ')')
7740 {
7741 char *temp_string;
7742 unsigned int parens_balanced = 1;
7743 /* We've already checked that the number of left & right ()'s are
7744 equal, so this loop will not be infinite. */
7745 do
7746 {
7747 base_string--;
7748 if (*base_string == ')')
7749 parens_balanced++;
7750 if (*base_string == '(')
7751 parens_balanced--;
7752 }
7753 while (parens_balanced);
7754
7755 temp_string = base_string;
7756
7757 /* Skip past '(' and whitespace. */
7758 ++base_string;
7759 if (is_space_char (*base_string))
7760 ++base_string;
7761
7762 if (*base_string == ','
7763 || ((i.base_reg = parse_register (base_string, &end_op))
7764 != NULL))
7765 {
7766 displacement_string_end = temp_string;
7767
7768 i.types[this_operand].bitfield.baseindex = 1;
7769
7770 if (i.base_reg)
7771 {
7772 base_string = end_op;
7773 if (is_space_char (*base_string))
7774 ++base_string;
7775 }
7776
7777 /* There may be an index reg or scale factor here. */
7778 if (*base_string == ',')
7779 {
7780 ++base_string;
7781 if (is_space_char (*base_string))
7782 ++base_string;
7783
7784 if ((i.index_reg = parse_register (base_string, &end_op))
7785 != NULL)
7786 {
7787 base_string = end_op;
7788 if (is_space_char (*base_string))
7789 ++base_string;
7790 if (*base_string == ',')
7791 {
7792 ++base_string;
7793 if (is_space_char (*base_string))
7794 ++base_string;
7795 }
7796 else if (*base_string != ')')
7797 {
7798 as_bad (_("expecting `,' or `)' "
7799 "after index register in `%s'"),
7800 operand_string);
7801 return 0;
7802 }
7803 }
7804 else if (*base_string == REGISTER_PREFIX)
7805 {
7806 end_op = strchr (base_string, ',');
7807 if (end_op)
7808 *end_op = '\0';
7809 as_bad (_("bad register name `%s'"), base_string);
7810 return 0;
7811 }
7812
7813 /* Check for scale factor. */
7814 if (*base_string != ')')
7815 {
7816 char *end_scale = i386_scale (base_string);
7817
7818 if (!end_scale)
7819 return 0;
7820
7821 base_string = end_scale;
7822 if (is_space_char (*base_string))
7823 ++base_string;
7824 if (*base_string != ')')
7825 {
7826 as_bad (_("expecting `)' "
7827 "after scale factor in `%s'"),
7828 operand_string);
7829 return 0;
7830 }
7831 }
7832 else if (!i.index_reg)
7833 {
7834 as_bad (_("expecting index register or scale factor "
7835 "after `,'; got '%c'"),
7836 *base_string);
7837 return 0;
7838 }
7839 }
7840 else if (*base_string != ')')
7841 {
7842 as_bad (_("expecting `,' or `)' "
7843 "after base register in `%s'"),
7844 operand_string);
7845 return 0;
7846 }
7847 }
7848 else if (*base_string == REGISTER_PREFIX)
7849 {
7850 end_op = strchr (base_string, ',');
7851 if (end_op)
7852 *end_op = '\0';
7853 as_bad (_("bad register name `%s'"), base_string);
7854 return 0;
7855 }
7856 }
7857
7858 /* If there's an expression beginning the operand, parse it,
7859 assuming displacement_string_start and
7860 displacement_string_end are meaningful. */
7861 if (displacement_string_start != displacement_string_end)
7862 {
7863 if (!i386_displacement (displacement_string_start,
7864 displacement_string_end))
7865 return 0;
7866 }
7867
7868 /* Special case for (%dx) while doing input/output op. */
7869 if (i.base_reg
7870 && operand_type_equal (&i.base_reg->reg_type,
7871 &reg16_inoutportreg)
7872 && i.index_reg == 0
7873 && i.log2_scale_factor == 0
7874 && i.seg[i.mem_operands] == 0
7875 && !operand_type_check (i.types[this_operand], disp))
7876 {
7877 i.types[this_operand] = inoutportreg;
7878 return 1;
7879 }
7880
7881 if (i386_index_check (operand_string) == 0)
7882 return 0;
7883 i.types[this_operand].bitfield.mem = 1;
7884 i.mem_operands++;
7885 }
7886 else
7887 {
7888 /* It's not a memory operand; argh! */
7889 as_bad (_("invalid char %s beginning operand %d `%s'"),
7890 output_invalid (*op_string),
7891 this_operand + 1,
7892 op_string);
7893 return 0;
7894 }
7895 return 1; /* Normal return. */
7896}
7897\f
7898/* Calculate the maximum variable size (i.e., excluding fr_fix)
7899 that an rs_machine_dependent frag may reach. */
7900
7901unsigned int
7902i386_frag_max_var (fragS *frag)
7903{
7904 /* The only relaxable frags are for jumps.
7905 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7906 gas_assert (frag->fr_type == rs_machine_dependent);
7907 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7908}
7909
7910/* md_estimate_size_before_relax()
7911
7912 Called just before relax() for rs_machine_dependent frags. The x86
7913 assembler uses these frags to handle variable size jump
7914 instructions.
7915
7916 Any symbol that is now undefined will not become defined.
7917 Return the correct fr_subtype in the frag.
7918 Return the initial "guess for variable size of frag" to caller.
7919 The guess is actually the growth beyond the fixed part. Whatever
7920 we do to grow the fixed or variable part contributes to our
7921 returned value. */
7922
7923int
7924md_estimate_size_before_relax (fragS *fragP, segT segment)
7925{
7926 /* We've already got fragP->fr_subtype right; all we have to do is
7927 check for un-relaxable symbols. On an ELF system, we can't relax
7928 an externally visible symbol, because it may be overridden by a
7929 shared library. */
7930 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7931#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7932 || (IS_ELF
7933 && (S_IS_EXTERNAL (fragP->fr_symbol)
7934 || S_IS_WEAK (fragP->fr_symbol)
7935 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7936 & BSF_GNU_INDIRECT_FUNCTION))))
7937#endif
7938#if defined (OBJ_COFF) && defined (TE_PE)
7939 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7940 && S_IS_WEAK (fragP->fr_symbol))
7941#endif
7942 )
7943 {
7944 /* Symbol is undefined in this segment, or we need to keep a
7945 reloc so that weak symbols can be overridden. */
7946 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7947 enum bfd_reloc_code_real reloc_type;
7948 unsigned char *opcode;
7949 int old_fr_fix;
7950
7951 if (fragP->fr_var != NO_RELOC)
7952 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7953 else if (size == 2)
7954 reloc_type = BFD_RELOC_16_PCREL;
7955 else
7956 reloc_type = BFD_RELOC_32_PCREL;
7957
7958 old_fr_fix = fragP->fr_fix;
7959 opcode = (unsigned char *) fragP->fr_opcode;
7960
7961 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7962 {
7963 case UNCOND_JUMP:
7964 /* Make jmp (0xeb) a (d)word displacement jump. */
7965 opcode[0] = 0xe9;
7966 fragP->fr_fix += size;
7967 fix_new (fragP, old_fr_fix, size,
7968 fragP->fr_symbol,
7969 fragP->fr_offset, 1,
7970 reloc_type);
7971 break;
7972
7973 case COND_JUMP86:
7974 if (size == 2
7975 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7976 {
7977 /* Negate the condition, and branch past an
7978 unconditional jump. */
7979 opcode[0] ^= 1;
7980 opcode[1] = 3;
7981 /* Insert an unconditional jump. */
7982 opcode[2] = 0xe9;
7983 /* We added two extra opcode bytes, and have a two byte
7984 offset. */
7985 fragP->fr_fix += 2 + 2;
7986 fix_new (fragP, old_fr_fix + 2, 2,
7987 fragP->fr_symbol,
7988 fragP->fr_offset, 1,
7989 reloc_type);
7990 break;
7991 }
7992 /* Fall through. */
7993
7994 case COND_JUMP:
7995 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7996 {
7997 fixS *fixP;
7998
7999 fragP->fr_fix += 1;
8000 fixP = fix_new (fragP, old_fr_fix, 1,
8001 fragP->fr_symbol,
8002 fragP->fr_offset, 1,
8003 BFD_RELOC_8_PCREL);
8004 fixP->fx_signed = 1;
8005 break;
8006 }
8007
8008 /* This changes the byte-displacement jump 0x7N
8009 to the (d)word-displacement jump 0x0f,0x8N. */
8010 opcode[1] = opcode[0] + 0x10;
8011 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8012 /* We've added an opcode byte. */
8013 fragP->fr_fix += 1 + size;
8014 fix_new (fragP, old_fr_fix + 1, size,
8015 fragP->fr_symbol,
8016 fragP->fr_offset, 1,
8017 reloc_type);
8018 break;
8019
8020 default:
8021 BAD_CASE (fragP->fr_subtype);
8022 break;
8023 }
8024 frag_wane (fragP);
8025 return fragP->fr_fix - old_fr_fix;
8026 }
8027
8028 /* Guess size depending on current relax state. Initially the relax
8029 state will correspond to a short jump and we return 1, because
8030 the variable part of the frag (the branch offset) is one byte
8031 long. However, we can relax a section more than once and in that
8032 case we must either set fr_subtype back to the unrelaxed state,
8033 or return the value for the appropriate branch. */
8034 return md_relax_table[fragP->fr_subtype].rlx_length;
8035}
8036
8037/* Called after relax() is finished.
8038
8039 In: Address of frag.
8040 fr_type == rs_machine_dependent.
8041 fr_subtype is what the address relaxed to.
8042
8043 Out: Any fixSs and constants are set up.
8044 Caller will turn frag into a ".space 0". */
8045
8046void
8047md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8048 fragS *fragP)
8049{
8050 unsigned char *opcode;
8051 unsigned char *where_to_put_displacement = NULL;
8052 offsetT target_address;
8053 offsetT opcode_address;
8054 unsigned int extension = 0;
8055 offsetT displacement_from_opcode_start;
8056
8057 opcode = (unsigned char *) fragP->fr_opcode;
8058
8059 /* Address we want to reach in file space. */
8060 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8061
8062 /* Address opcode resides at in file space. */
8063 opcode_address = fragP->fr_address + fragP->fr_fix;
8064
8065 /* Displacement from opcode start to fill into instruction. */
8066 displacement_from_opcode_start = target_address - opcode_address;
8067
8068 if ((fragP->fr_subtype & BIG) == 0)
8069 {
8070 /* Don't have to change opcode. */
8071 extension = 1; /* 1 opcode + 1 displacement */
8072 where_to_put_displacement = &opcode[1];
8073 }
8074 else
8075 {
8076 if (no_cond_jump_promotion
8077 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8078 as_warn_where (fragP->fr_file, fragP->fr_line,
8079 _("long jump required"));
8080
8081 switch (fragP->fr_subtype)
8082 {
8083 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8084 extension = 4; /* 1 opcode + 4 displacement */
8085 opcode[0] = 0xe9;
8086 where_to_put_displacement = &opcode[1];
8087 break;
8088
8089 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
8090 extension = 2; /* 1 opcode + 2 displacement */
8091 opcode[0] = 0xe9;
8092 where_to_put_displacement = &opcode[1];
8093 break;
8094
8095 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
8096 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
8097 extension = 5; /* 2 opcode + 4 displacement */
8098 opcode[1] = opcode[0] + 0x10;
8099 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8100 where_to_put_displacement = &opcode[2];
8101 break;
8102
8103 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
8104 extension = 3; /* 2 opcode + 2 displacement */
8105 opcode[1] = opcode[0] + 0x10;
8106 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8107 where_to_put_displacement = &opcode[2];
8108 break;
8109
8110 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
8111 extension = 4;
8112 opcode[0] ^= 1;
8113 opcode[1] = 3;
8114 opcode[2] = 0xe9;
8115 where_to_put_displacement = &opcode[3];
8116 break;
8117
8118 default:
8119 BAD_CASE (fragP->fr_subtype);
8120 break;
8121 }
8122 }
8123
8124 /* If size if less then four we are sure that the operand fits,
8125 but if it's 4, then it could be that the displacement is larger
8126 then -/+ 2GB. */
8127 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
8128 && object_64bit
8129 && ((addressT) (displacement_from_opcode_start - extension
8130 + ((addressT) 1 << 31))
8131 > (((addressT) 2 << 31) - 1)))
8132 {
8133 as_bad_where (fragP->fr_file, fragP->fr_line,
8134 _("jump target out of range"));
8135 /* Make us emit 0. */
8136 displacement_from_opcode_start = extension;
8137 }
8138 /* Now put displacement after opcode. */
8139 md_number_to_chars ((char *) where_to_put_displacement,
8140 (valueT) (displacement_from_opcode_start - extension),
8141 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
8142 fragP->fr_fix += extension;
8143}
8144\f
8145/* Apply a fixup (fixP) to segment data, once it has been determined
8146 by our caller that we have all the info we need to fix it up.
8147
8148 Parameter valP is the pointer to the value of the bits.
8149
8150 On the 386, immediates, displacements, and data pointers are all in
8151 the same (little-endian) format, so we don't need to care about which
8152 we are handling. */
8153
8154void
8155md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
8156{
8157 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
8158 valueT value = *valP;
8159
8160#if !defined (TE_Mach)
8161 if (fixP->fx_pcrel)
8162 {
8163 switch (fixP->fx_r_type)
8164 {
8165 default:
8166 break;
8167
8168 case BFD_RELOC_64:
8169 fixP->fx_r_type = BFD_RELOC_64_PCREL;
8170 break;
8171 case BFD_RELOC_32:
8172 case BFD_RELOC_X86_64_32S:
8173 fixP->fx_r_type = BFD_RELOC_32_PCREL;
8174 break;
8175 case BFD_RELOC_16:
8176 fixP->fx_r_type = BFD_RELOC_16_PCREL;
8177 break;
8178 case BFD_RELOC_8:
8179 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8180 break;
8181 }
8182 }
8183
8184 if (fixP->fx_addsy != NULL
8185 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8186 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8187 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8188 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8189 && !use_rela_relocations)
8190 {
8191 /* This is a hack. There should be a better way to handle this.
8192 This covers for the fact that bfd_install_relocation will
8193 subtract the current location (for partial_inplace, PC relative
8194 relocations); see more below. */
8195#ifndef OBJ_AOUT
8196 if (IS_ELF
8197#ifdef TE_PE
8198 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8199#endif
8200 )
8201 value += fixP->fx_where + fixP->fx_frag->fr_address;
8202#endif
8203#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8204 if (IS_ELF)
8205 {
8206 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8207
8208 if ((sym_seg == seg
8209 || (symbol_section_p (fixP->fx_addsy)
8210 && sym_seg != absolute_section))
8211 && !generic_force_reloc (fixP))
8212 {
8213 /* Yes, we add the values in twice. This is because
8214 bfd_install_relocation subtracts them out again. I think
8215 bfd_install_relocation is broken, but I don't dare change
8216 it. FIXME. */
8217 value += fixP->fx_where + fixP->fx_frag->fr_address;
8218 }
8219 }
8220#endif
8221#if defined (OBJ_COFF) && defined (TE_PE)
8222 /* For some reason, the PE format does not store a
8223 section address offset for a PC relative symbol. */
8224 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8225 || S_IS_WEAK (fixP->fx_addsy))
8226 value += md_pcrel_from (fixP);
8227#endif
8228 }
8229#if defined (OBJ_COFF) && defined (TE_PE)
8230 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8231 {
8232 value -= S_GET_VALUE (fixP->fx_addsy);
8233 }
8234#endif
8235
8236 /* Fix a few things - the dynamic linker expects certain values here,
8237 and we must not disappoint it. */
8238#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8239 if (IS_ELF && fixP->fx_addsy)
8240 switch (fixP->fx_r_type)
8241 {
8242 case BFD_RELOC_386_PLT32:
8243 case BFD_RELOC_X86_64_PLT32:
8244 /* Make the jump instruction point to the address of the operand. At
8245 runtime we merely add the offset to the actual PLT entry. */
8246 value = -4;
8247 break;
8248
8249 case BFD_RELOC_386_TLS_GD:
8250 case BFD_RELOC_386_TLS_LDM:
8251 case BFD_RELOC_386_TLS_IE_32:
8252 case BFD_RELOC_386_TLS_IE:
8253 case BFD_RELOC_386_TLS_GOTIE:
8254 case BFD_RELOC_386_TLS_GOTDESC:
8255 case BFD_RELOC_X86_64_TLSGD:
8256 case BFD_RELOC_X86_64_TLSLD:
8257 case BFD_RELOC_X86_64_GOTTPOFF:
8258 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8259 value = 0; /* Fully resolved at runtime. No addend. */
8260 /* Fallthrough */
8261 case BFD_RELOC_386_TLS_LE:
8262 case BFD_RELOC_386_TLS_LDO_32:
8263 case BFD_RELOC_386_TLS_LE_32:
8264 case BFD_RELOC_X86_64_DTPOFF32:
8265 case BFD_RELOC_X86_64_DTPOFF64:
8266 case BFD_RELOC_X86_64_TPOFF32:
8267 case BFD_RELOC_X86_64_TPOFF64:
8268 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8269 break;
8270
8271 case BFD_RELOC_386_TLS_DESC_CALL:
8272 case BFD_RELOC_X86_64_TLSDESC_CALL:
8273 value = 0; /* Fully resolved at runtime. No addend. */
8274 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8275 fixP->fx_done = 0;
8276 return;
8277
8278 case BFD_RELOC_386_GOT32:
8279 case BFD_RELOC_X86_64_GOT32:
8280 value = 0; /* Fully resolved at runtime. No addend. */
8281 break;
8282
8283 case BFD_RELOC_VTABLE_INHERIT:
8284 case BFD_RELOC_VTABLE_ENTRY:
8285 fixP->fx_done = 0;
8286 return;
8287
8288 default:
8289 break;
8290 }
8291#endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8292 *valP = value;
8293#endif /* !defined (TE_Mach) */
8294
8295 /* Are we finished with this relocation now? */
8296 if (fixP->fx_addsy == NULL)
8297 fixP->fx_done = 1;
8298#if defined (OBJ_COFF) && defined (TE_PE)
8299 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8300 {
8301 fixP->fx_done = 0;
8302 /* Remember value for tc_gen_reloc. */
8303 fixP->fx_addnumber = value;
8304 /* Clear out the frag for now. */
8305 value = 0;
8306 }
8307#endif
8308 else if (use_rela_relocations)
8309 {
8310 fixP->fx_no_overflow = 1;
8311 /* Remember value for tc_gen_reloc. */
8312 fixP->fx_addnumber = value;
8313 value = 0;
8314 }
8315
8316 md_number_to_chars (p, value, fixP->fx_size);
8317}
8318\f
8319char *
8320md_atof (int type, char *litP, int *sizeP)
8321{
8322 /* This outputs the LITTLENUMs in REVERSE order;
8323 in accord with the bigendian 386. */
8324 return ieee_md_atof (type, litP, sizeP, FALSE);
8325}
8326\f
8327static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8328
8329static char *
8330output_invalid (int c)
8331{
8332 if (ISPRINT (c))
8333 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8334 "'%c'", c);
8335 else
8336 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8337 "(0x%x)", (unsigned char) c);
8338 return output_invalid_buf;
8339}
8340
8341/* REG_STRING starts *before* REGISTER_PREFIX. */
8342
8343static const reg_entry *
8344parse_real_register (char *reg_string, char **end_op)
8345{
8346 char *s = reg_string;
8347 char *p;
8348 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8349 const reg_entry *r;
8350
8351 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8352 if (*s == REGISTER_PREFIX)
8353 ++s;
8354
8355 if (is_space_char (*s))
8356 ++s;
8357
8358 p = reg_name_given;
8359 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8360 {
8361 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8362 return (const reg_entry *) NULL;
8363 s++;
8364 }
8365
8366 /* For naked regs, make sure that we are not dealing with an identifier.
8367 This prevents confusing an identifier like `eax_var' with register
8368 `eax'. */
8369 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8370 return (const reg_entry *) NULL;
8371
8372 *end_op = s;
8373
8374 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8375
8376 /* Handle floating point regs, allowing spaces in the (i) part. */
8377 if (r == i386_regtab /* %st is first entry of table */)
8378 {
8379 if (is_space_char (*s))
8380 ++s;
8381 if (*s == '(')
8382 {
8383 ++s;
8384 if (is_space_char (*s))
8385 ++s;
8386 if (*s >= '0' && *s <= '7')
8387 {
8388 int fpr = *s - '0';
8389 ++s;
8390 if (is_space_char (*s))
8391 ++s;
8392 if (*s == ')')
8393 {
8394 *end_op = s + 1;
8395 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8396 know (r);
8397 return r + fpr;
8398 }
8399 }
8400 /* We have "%st(" then garbage. */
8401 return (const reg_entry *) NULL;
8402 }
8403 }
8404
8405 if (r == NULL || allow_pseudo_reg)
8406 return r;
8407
8408 if (operand_type_all_zero (&r->reg_type))
8409 return (const reg_entry *) NULL;
8410
8411 if ((r->reg_type.bitfield.reg32
8412 || r->reg_type.bitfield.sreg3
8413 || r->reg_type.bitfield.control
8414 || r->reg_type.bitfield.debug
8415 || r->reg_type.bitfield.test)
8416 && !cpu_arch_flags.bitfield.cpui386)
8417 return (const reg_entry *) NULL;
8418
8419 if (r->reg_type.bitfield.floatreg
8420 && !cpu_arch_flags.bitfield.cpu8087
8421 && !cpu_arch_flags.bitfield.cpu287
8422 && !cpu_arch_flags.bitfield.cpu387)
8423 return (const reg_entry *) NULL;
8424
8425 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8426 return (const reg_entry *) NULL;
8427
8428 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8429 return (const reg_entry *) NULL;
8430
8431 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8432 return (const reg_entry *) NULL;
8433
8434 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8435 if (!allow_index_reg
8436 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8437 return (const reg_entry *) NULL;
8438
8439 if (((r->reg_flags & (RegRex64 | RegRex))
8440 || r->reg_type.bitfield.reg64)
8441 && (!cpu_arch_flags.bitfield.cpulm
8442 || !operand_type_equal (&r->reg_type, &control))
8443 && flag_code != CODE_64BIT)
8444 return (const reg_entry *) NULL;
8445
8446 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8447 return (const reg_entry *) NULL;
8448
8449 return r;
8450}
8451
8452/* REG_STRING starts *before* REGISTER_PREFIX. */
8453
8454static const reg_entry *
8455parse_register (char *reg_string, char **end_op)
8456{
8457 const reg_entry *r;
8458
8459 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8460 r = parse_real_register (reg_string, end_op);
8461 else
8462 r = NULL;
8463 if (!r)
8464 {
8465 char *save = input_line_pointer;
8466 char c;
8467 symbolS *symbolP;
8468
8469 input_line_pointer = reg_string;
8470 c = get_symbol_end ();
8471 symbolP = symbol_find (reg_string);
8472 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8473 {
8474 const expressionS *e = symbol_get_value_expression (symbolP);
8475
8476 know (e->X_op == O_register);
8477 know (e->X_add_number >= 0
8478 && (valueT) e->X_add_number < i386_regtab_size);
8479 r = i386_regtab + e->X_add_number;
8480 *end_op = input_line_pointer;
8481 }
8482 *input_line_pointer = c;
8483 input_line_pointer = save;
8484 }
8485 return r;
8486}
8487
8488int
8489i386_parse_name (char *name, expressionS *e, char *nextcharP)
8490{
8491 const reg_entry *r;
8492 char *end = input_line_pointer;
8493
8494 *end = *nextcharP;
8495 r = parse_register (name, &input_line_pointer);
8496 if (r && end <= input_line_pointer)
8497 {
8498 *nextcharP = *input_line_pointer;
8499 *input_line_pointer = 0;
8500 e->X_op = O_register;
8501 e->X_add_number = r - i386_regtab;
8502 return 1;
8503 }
8504 input_line_pointer = end;
8505 *end = 0;
8506 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8507}
8508
8509void
8510md_operand (expressionS *e)
8511{
8512 char *end;
8513 const reg_entry *r;
8514
8515 switch (*input_line_pointer)
8516 {
8517 case REGISTER_PREFIX:
8518 r = parse_real_register (input_line_pointer, &end);
8519 if (r)
8520 {
8521 e->X_op = O_register;
8522 e->X_add_number = r - i386_regtab;
8523 input_line_pointer = end;
8524 }
8525 break;
8526
8527 case '[':
8528 gas_assert (intel_syntax);
8529 end = input_line_pointer++;
8530 expression (e);
8531 if (*input_line_pointer == ']')
8532 {
8533 ++input_line_pointer;
8534 e->X_op_symbol = make_expr_symbol (e);
8535 e->X_add_symbol = NULL;
8536 e->X_add_number = 0;
8537 e->X_op = O_index;
8538 }
8539 else
8540 {
8541 e->X_op = O_absent;
8542 input_line_pointer = end;
8543 }
8544 break;
8545 }
8546}
8547
8548\f
8549#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8550const char *md_shortopts = "kVQ:sqn";
8551#else
8552const char *md_shortopts = "qn";
8553#endif
8554
8555#define OPTION_32 (OPTION_MD_BASE + 0)
8556#define OPTION_64 (OPTION_MD_BASE + 1)
8557#define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8558#define OPTION_MARCH (OPTION_MD_BASE + 3)
8559#define OPTION_MTUNE (OPTION_MD_BASE + 4)
8560#define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8561#define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8562#define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8563#define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8564#define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8565#define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8566#define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8567#define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
8568#define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
8569#define OPTION_X32 (OPTION_MD_BASE + 14)
8570
8571struct option md_longopts[] =
8572{
8573 {"32", no_argument, NULL, OPTION_32},
8574#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8575 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8576 {"64", no_argument, NULL, OPTION_64},
8577#endif
8578#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8579 {"x32", no_argument, NULL, OPTION_X32},
8580#endif
8581 {"divide", no_argument, NULL, OPTION_DIVIDE},
8582 {"march", required_argument, NULL, OPTION_MARCH},
8583 {"mtune", required_argument, NULL, OPTION_MTUNE},
8584 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8585 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8586 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8587 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8588 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8589 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8590 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8591 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
8592 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8593 {NULL, no_argument, NULL, 0}
8594};
8595size_t md_longopts_size = sizeof (md_longopts);
8596
8597int
8598md_parse_option (int c, char *arg)
8599{
8600 unsigned int j;
8601 char *arch, *next;
8602
8603 switch (c)
8604 {
8605 case 'n':
8606 optimize_align_code = 0;
8607 break;
8608
8609 case 'q':
8610 quiet_warnings = 1;
8611 break;
8612
8613#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8614 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8615 should be emitted or not. FIXME: Not implemented. */
8616 case 'Q':
8617 break;
8618
8619 /* -V: SVR4 argument to print version ID. */
8620 case 'V':
8621 print_version_id ();
8622 break;
8623
8624 /* -k: Ignore for FreeBSD compatibility. */
8625 case 'k':
8626 break;
8627
8628 case 's':
8629 /* -s: On i386 Solaris, this tells the native assembler to use
8630 .stab instead of .stab.excl. We always use .stab anyhow. */
8631 break;
8632#endif
8633#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8634 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8635 case OPTION_64:
8636 {
8637 const char **list, **l;
8638
8639 list = bfd_target_list ();
8640 for (l = list; *l != NULL; l++)
8641 if (CONST_STRNEQ (*l, "elf64-x86-64")
8642 || strcmp (*l, "coff-x86-64") == 0
8643 || strcmp (*l, "pe-x86-64") == 0
8644 || strcmp (*l, "pei-x86-64") == 0
8645 || strcmp (*l, "mach-o-x86-64") == 0)
8646 {
8647 default_arch = "x86_64";
8648 break;
8649 }
8650 if (*l == NULL)
8651 as_fatal (_("no compiled in support for x86_64"));
8652 free (list);
8653 }
8654 break;
8655#endif
8656
8657#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8658 case OPTION_X32:
8659 if (IS_ELF)
8660 {
8661 const char **list, **l;
8662
8663 list = bfd_target_list ();
8664 for (l = list; *l != NULL; l++)
8665 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8666 {
8667 default_arch = "x86_64:32";
8668 break;
8669 }
8670 if (*l == NULL)
8671 as_fatal (_("no compiled in support for 32bit x86_64"));
8672 free (list);
8673 }
8674 else
8675 as_fatal (_("32bit x86_64 is only supported for ELF"));
8676 break;
8677#endif
8678
8679 case OPTION_32:
8680 default_arch = "i386";
8681 break;
8682
8683 case OPTION_DIVIDE:
8684#ifdef SVR4_COMMENT_CHARS
8685 {
8686 char *n, *t;
8687 const char *s;
8688
8689 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8690 t = n;
8691 for (s = i386_comment_chars; *s != '\0'; s++)
8692 if (*s != '/')
8693 *t++ = *s;
8694 *t = '\0';
8695 i386_comment_chars = n;
8696 }
8697#endif
8698 break;
8699
8700 case OPTION_MARCH:
8701 arch = xstrdup (arg);
8702 do
8703 {
8704 if (*arch == '.')
8705 as_fatal (_("invalid -march= option: `%s'"), arg);
8706 next = strchr (arch, '+');
8707 if (next)
8708 *next++ = '\0';
8709 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8710 {
8711 if (strcmp (arch, cpu_arch [j].name) == 0)
8712 {
8713 /* Processor. */
8714 if (! cpu_arch[j].flags.bitfield.cpui386)
8715 continue;
8716
8717 cpu_arch_name = cpu_arch[j].name;
8718 cpu_sub_arch_name = NULL;
8719 cpu_arch_flags = cpu_arch[j].flags;
8720 cpu_arch_isa = cpu_arch[j].type;
8721 cpu_arch_isa_flags = cpu_arch[j].flags;
8722 if (!cpu_arch_tune_set)
8723 {
8724 cpu_arch_tune = cpu_arch_isa;
8725 cpu_arch_tune_flags = cpu_arch_isa_flags;
8726 }
8727 break;
8728 }
8729 else if (*cpu_arch [j].name == '.'
8730 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8731 {
8732 /* ISA entension. */
8733 i386_cpu_flags flags;
8734
8735 if (!cpu_arch[j].negated)
8736 flags = cpu_flags_or (cpu_arch_flags,
8737 cpu_arch[j].flags);
8738 else
8739 flags = cpu_flags_and_not (cpu_arch_flags,
8740 cpu_arch[j].flags);
8741 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8742 {
8743 if (cpu_sub_arch_name)
8744 {
8745 char *name = cpu_sub_arch_name;
8746 cpu_sub_arch_name = concat (name,
8747 cpu_arch[j].name,
8748 (const char *) NULL);
8749 free (name);
8750 }
8751 else
8752 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8753 cpu_arch_flags = flags;
8754 cpu_arch_isa_flags = flags;
8755 }
8756 break;
8757 }
8758 }
8759
8760 if (j >= ARRAY_SIZE (cpu_arch))
8761 as_fatal (_("invalid -march= option: `%s'"), arg);
8762
8763 arch = next;
8764 }
8765 while (next != NULL );
8766 break;
8767
8768 case OPTION_MTUNE:
8769 if (*arg == '.')
8770 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8771 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8772 {
8773 if (strcmp (arg, cpu_arch [j].name) == 0)
8774 {
8775 cpu_arch_tune_set = 1;
8776 cpu_arch_tune = cpu_arch [j].type;
8777 cpu_arch_tune_flags = cpu_arch[j].flags;
8778 break;
8779 }
8780 }
8781 if (j >= ARRAY_SIZE (cpu_arch))
8782 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8783 break;
8784
8785 case OPTION_MMNEMONIC:
8786 if (strcasecmp (arg, "att") == 0)
8787 intel_mnemonic = 0;
8788 else if (strcasecmp (arg, "intel") == 0)
8789 intel_mnemonic = 1;
8790 else
8791 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8792 break;
8793
8794 case OPTION_MSYNTAX:
8795 if (strcasecmp (arg, "att") == 0)
8796 intel_syntax = 0;
8797 else if (strcasecmp (arg, "intel") == 0)
8798 intel_syntax = 1;
8799 else
8800 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8801 break;
8802
8803 case OPTION_MINDEX_REG:
8804 allow_index_reg = 1;
8805 break;
8806
8807 case OPTION_MNAKED_REG:
8808 allow_naked_reg = 1;
8809 break;
8810
8811 case OPTION_MOLD_GCC:
8812 old_gcc = 1;
8813 break;
8814
8815 case OPTION_MSSE2AVX:
8816 sse2avx = 1;
8817 break;
8818
8819 case OPTION_MSSE_CHECK:
8820 if (strcasecmp (arg, "error") == 0)
8821 sse_check = check_error;
8822 else if (strcasecmp (arg, "warning") == 0)
8823 sse_check = check_warning;
8824 else if (strcasecmp (arg, "none") == 0)
8825 sse_check = check_none;
8826 else
8827 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8828 break;
8829
8830 case OPTION_MOPERAND_CHECK:
8831 if (strcasecmp (arg, "error") == 0)
8832 operand_check = check_error;
8833 else if (strcasecmp (arg, "warning") == 0)
8834 operand_check = check_warning;
8835 else if (strcasecmp (arg, "none") == 0)
8836 operand_check = check_none;
8837 else
8838 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
8839 break;
8840
8841 case OPTION_MAVXSCALAR:
8842 if (strcasecmp (arg, "128") == 0)
8843 avxscalar = vex128;
8844 else if (strcasecmp (arg, "256") == 0)
8845 avxscalar = vex256;
8846 else
8847 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8848 break;
8849
8850 default:
8851 return 0;
8852 }
8853 return 1;
8854}
8855
8856#define MESSAGE_TEMPLATE \
8857" "
8858
8859static void
8860show_arch (FILE *stream, int ext, int check)
8861{
8862 static char message[] = MESSAGE_TEMPLATE;
8863 char *start = message + 27;
8864 char *p;
8865 int size = sizeof (MESSAGE_TEMPLATE);
8866 int left;
8867 const char *name;
8868 int len;
8869 unsigned int j;
8870
8871 p = start;
8872 left = size - (start - message);
8873 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8874 {
8875 /* Should it be skipped? */
8876 if (cpu_arch [j].skip)
8877 continue;
8878
8879 name = cpu_arch [j].name;
8880 len = cpu_arch [j].len;
8881 if (*name == '.')
8882 {
8883 /* It is an extension. Skip if we aren't asked to show it. */
8884 if (ext)
8885 {
8886 name++;
8887 len--;
8888 }
8889 else
8890 continue;
8891 }
8892 else if (ext)
8893 {
8894 /* It is an processor. Skip if we show only extension. */
8895 continue;
8896 }
8897 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8898 {
8899 /* It is an impossible processor - skip. */
8900 continue;
8901 }
8902
8903 /* Reserve 2 spaces for ", " or ",\0" */
8904 left -= len + 2;
8905
8906 /* Check if there is any room. */
8907 if (left >= 0)
8908 {
8909 if (p != start)
8910 {
8911 *p++ = ',';
8912 *p++ = ' ';
8913 }
8914 p = mempcpy (p, name, len);
8915 }
8916 else
8917 {
8918 /* Output the current message now and start a new one. */
8919 *p++ = ',';
8920 *p = '\0';
8921 fprintf (stream, "%s\n", message);
8922 p = start;
8923 left = size - (start - message) - len - 2;
8924
8925 gas_assert (left >= 0);
8926
8927 p = mempcpy (p, name, len);
8928 }
8929 }
8930
8931 *p = '\0';
8932 fprintf (stream, "%s\n", message);
8933}
8934
8935void
8936md_show_usage (FILE *stream)
8937{
8938#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8939 fprintf (stream, _("\
8940 -Q ignored\n\
8941 -V print assembler version number\n\
8942 -k ignored\n"));
8943#endif
8944 fprintf (stream, _("\
8945 -n Do not optimize code alignment\n\
8946 -q quieten some warnings\n"));
8947#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8948 fprintf (stream, _("\
8949 -s ignored\n"));
8950#endif
8951#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8952 || defined (TE_PE) || defined (TE_PEP))
8953 fprintf (stream, _("\
8954 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8955#endif
8956#ifdef SVR4_COMMENT_CHARS
8957 fprintf (stream, _("\
8958 --divide do not treat `/' as a comment character\n"));
8959#else
8960 fprintf (stream, _("\
8961 --divide ignored\n"));
8962#endif
8963 fprintf (stream, _("\
8964 -march=CPU[,+EXTENSION...]\n\
8965 generate code for CPU and EXTENSION, CPU is one of:\n"));
8966 show_arch (stream, 0, 1);
8967 fprintf (stream, _("\
8968 EXTENSION is combination of:\n"));
8969 show_arch (stream, 1, 0);
8970 fprintf (stream, _("\
8971 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8972 show_arch (stream, 0, 0);
8973 fprintf (stream, _("\
8974 -msse2avx encode SSE instructions with VEX prefix\n"));
8975 fprintf (stream, _("\
8976 -msse-check=[none|error|warning]\n\
8977 check SSE instructions\n"));
8978 fprintf (stream, _("\
8979 -moperand-check=[none|error|warning]\n\
8980 check operand combinations for validity\n"));
8981 fprintf (stream, _("\
8982 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8983 length\n"));
8984 fprintf (stream, _("\
8985 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8986 fprintf (stream, _("\
8987 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8988 fprintf (stream, _("\
8989 -mindex-reg support pseudo index registers\n"));
8990 fprintf (stream, _("\
8991 -mnaked-reg don't require `%%' prefix for registers\n"));
8992 fprintf (stream, _("\
8993 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8994}
8995
8996#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8997 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8998 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8999
9000/* Pick the target format to use. */
9001
9002const char *
9003i386_target_format (void)
9004{
9005 if (!strncmp (default_arch, "x86_64", 6))
9006 {
9007 update_code_flag (CODE_64BIT, 1);
9008 if (default_arch[6] == '\0')
9009 x86_elf_abi = X86_64_ABI;
9010 else
9011 x86_elf_abi = X86_64_X32_ABI;
9012 }
9013 else if (!strcmp (default_arch, "i386"))
9014 update_code_flag (CODE_32BIT, 1);
9015 else
9016 as_fatal (_("unknown architecture"));
9017
9018 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
9019 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9020 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
9021 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9022
9023 switch (OUTPUT_FLAVOR)
9024 {
9025#if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
9026 case bfd_target_aout_flavour:
9027 return AOUT_TARGET_FORMAT;
9028#endif
9029#if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
9030# if defined (TE_PE) || defined (TE_PEP)
9031 case bfd_target_coff_flavour:
9032 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
9033# elif defined (TE_GO32)
9034 case bfd_target_coff_flavour:
9035 return "coff-go32";
9036# else
9037 case bfd_target_coff_flavour:
9038 return "coff-i386";
9039# endif
9040#endif
9041#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9042 case bfd_target_elf_flavour:
9043 {
9044 const char *format;
9045
9046 switch (x86_elf_abi)
9047 {
9048 default:
9049 format = ELF_TARGET_FORMAT;
9050 break;
9051 case X86_64_ABI:
9052 use_rela_relocations = 1;
9053 object_64bit = 1;
9054 format = ELF_TARGET_FORMAT64;
9055 break;
9056 case X86_64_X32_ABI:
9057 use_rela_relocations = 1;
9058 object_64bit = 1;
9059 disallow_64bit_reloc = 1;
9060 format = ELF_TARGET_FORMAT32;
9061 break;
9062 }
9063 if (cpu_arch_isa == PROCESSOR_L1OM)
9064 {
9065 if (x86_elf_abi != X86_64_ABI)
9066 as_fatal (_("Intel L1OM is 64bit only"));
9067 return ELF_TARGET_L1OM_FORMAT;
9068 }
9069 if (cpu_arch_isa == PROCESSOR_K1OM)
9070 {
9071 if (x86_elf_abi != X86_64_ABI)
9072 as_fatal (_("Intel K1OM is 64bit only"));
9073 return ELF_TARGET_K1OM_FORMAT;
9074 }
9075 else
9076 return format;
9077 }
9078#endif
9079#if defined (OBJ_MACH_O)
9080 case bfd_target_mach_o_flavour:
9081 if (flag_code == CODE_64BIT)
9082 {
9083 use_rela_relocations = 1;
9084 object_64bit = 1;
9085 return "mach-o-x86-64";
9086 }
9087 else
9088 return "mach-o-i386";
9089#endif
9090 default:
9091 abort ();
9092 return NULL;
9093 }
9094}
9095
9096#endif /* OBJ_MAYBE_ more than one */
9097
9098#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
9099void
9100i386_elf_emit_arch_note (void)
9101{
9102 if (IS_ELF && cpu_arch_name != NULL)
9103 {
9104 char *p;
9105 asection *seg = now_seg;
9106 subsegT subseg = now_subseg;
9107 Elf_Internal_Note i_note;
9108 Elf_External_Note e_note;
9109 asection *note_secp;
9110 int len;
9111
9112 /* Create the .note section. */
9113 note_secp = subseg_new (".note", 0);
9114 bfd_set_section_flags (stdoutput,
9115 note_secp,
9116 SEC_HAS_CONTENTS | SEC_READONLY);
9117
9118 /* Process the arch string. */
9119 len = strlen (cpu_arch_name);
9120
9121 i_note.namesz = len + 1;
9122 i_note.descsz = 0;
9123 i_note.type = NT_ARCH;
9124 p = frag_more (sizeof (e_note.namesz));
9125 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
9126 p = frag_more (sizeof (e_note.descsz));
9127 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
9128 p = frag_more (sizeof (e_note.type));
9129 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
9130 p = frag_more (len + 1);
9131 strcpy (p, cpu_arch_name);
9132
9133 frag_align (2, 0, 0);
9134
9135 subseg_set (seg, subseg);
9136 }
9137}
9138#endif
9139\f
9140symbolS *
9141md_undefined_symbol (char *name)
9142{
9143 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
9144 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
9145 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
9146 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
9147 {
9148 if (!GOT_symbol)
9149 {
9150 if (symbol_find (name))
9151 as_bad (_("GOT already in symbol table"));
9152 GOT_symbol = symbol_new (name, undefined_section,
9153 (valueT) 0, &zero_address_frag);
9154 };
9155 return GOT_symbol;
9156 }
9157 return 0;
9158}
9159
9160/* Round up a section size to the appropriate boundary. */
9161
9162valueT
9163md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
9164{
9165#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9166 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
9167 {
9168 /* For a.out, force the section size to be aligned. If we don't do
9169 this, BFD will align it for us, but it will not write out the
9170 final bytes of the section. This may be a bug in BFD, but it is
9171 easier to fix it here since that is how the other a.out targets
9172 work. */
9173 int align;
9174
9175 align = bfd_get_section_alignment (stdoutput, segment);
9176 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
9177 }
9178#endif
9179
9180 return size;
9181}
9182
9183/* On the i386, PC-relative offsets are relative to the start of the
9184 next instruction. That is, the address of the offset, plus its
9185 size, since the offset is always the last part of the insn. */
9186
9187long
9188md_pcrel_from (fixS *fixP)
9189{
9190 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
9191}
9192
9193#ifndef I386COFF
9194
9195static void
9196s_bss (int ignore ATTRIBUTE_UNUSED)
9197{
9198 int temp;
9199
9200#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9201 if (IS_ELF)
9202 obj_elf_section_change_hook ();
9203#endif
9204 temp = get_absolute_expression ();
9205 subseg_set (bss_section, (subsegT) temp);
9206 demand_empty_rest_of_line ();
9207}
9208
9209#endif
9210
9211void
9212i386_validate_fix (fixS *fixp)
9213{
9214 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9215 {
9216 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9217 {
9218 if (!object_64bit)
9219 abort ();
9220 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9221 }
9222 else
9223 {
9224 if (!object_64bit)
9225 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9226 else
9227 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9228 }
9229 fixp->fx_subsy = 0;
9230 }
9231}
9232
9233arelent *
9234tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9235{
9236 arelent *rel;
9237 bfd_reloc_code_real_type code;
9238
9239 switch (fixp->fx_r_type)
9240 {
9241#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9242 case BFD_RELOC_SIZE32:
9243 case BFD_RELOC_SIZE64:
9244 if (S_IS_DEFINED (fixp->fx_addsy)
9245 && !S_IS_EXTERNAL (fixp->fx_addsy))
9246 {
9247 /* Resolve size relocation against local symbol to size of
9248 the symbol plus addend. */
9249 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
9250 if (fixp->fx_r_type == BFD_RELOC_SIZE32
9251 && !fits_in_unsigned_long (value))
9252 as_bad_where (fixp->fx_file, fixp->fx_line,
9253 _("symbol size computation overflow"));
9254 fixp->fx_addsy = NULL;
9255 fixp->fx_subsy = NULL;
9256 md_apply_fix (fixp, (valueT *) &value, NULL);
9257 return NULL;
9258 }
9259#endif
9260
9261 case BFD_RELOC_X86_64_PLT32:
9262 case BFD_RELOC_X86_64_GOT32:
9263 case BFD_RELOC_X86_64_GOTPCREL:
9264 case BFD_RELOC_386_PLT32:
9265 case BFD_RELOC_386_GOT32:
9266 case BFD_RELOC_386_GOTOFF:
9267 case BFD_RELOC_386_GOTPC:
9268 case BFD_RELOC_386_TLS_GD:
9269 case BFD_RELOC_386_TLS_LDM:
9270 case BFD_RELOC_386_TLS_LDO_32:
9271 case BFD_RELOC_386_TLS_IE_32:
9272 case BFD_RELOC_386_TLS_IE:
9273 case BFD_RELOC_386_TLS_GOTIE:
9274 case BFD_RELOC_386_TLS_LE_32:
9275 case BFD_RELOC_386_TLS_LE:
9276 case BFD_RELOC_386_TLS_GOTDESC:
9277 case BFD_RELOC_386_TLS_DESC_CALL:
9278 case BFD_RELOC_X86_64_TLSGD:
9279 case BFD_RELOC_X86_64_TLSLD:
9280 case BFD_RELOC_X86_64_DTPOFF32:
9281 case BFD_RELOC_X86_64_DTPOFF64:
9282 case BFD_RELOC_X86_64_GOTTPOFF:
9283 case BFD_RELOC_X86_64_TPOFF32:
9284 case BFD_RELOC_X86_64_TPOFF64:
9285 case BFD_RELOC_X86_64_GOTOFF64:
9286 case BFD_RELOC_X86_64_GOTPC32:
9287 case BFD_RELOC_X86_64_GOT64:
9288 case BFD_RELOC_X86_64_GOTPCREL64:
9289 case BFD_RELOC_X86_64_GOTPC64:
9290 case BFD_RELOC_X86_64_GOTPLT64:
9291 case BFD_RELOC_X86_64_PLTOFF64:
9292 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9293 case BFD_RELOC_X86_64_TLSDESC_CALL:
9294 case BFD_RELOC_RVA:
9295 case BFD_RELOC_VTABLE_ENTRY:
9296 case BFD_RELOC_VTABLE_INHERIT:
9297#ifdef TE_PE
9298 case BFD_RELOC_32_SECREL:
9299#endif
9300 code = fixp->fx_r_type;
9301 break;
9302 case BFD_RELOC_X86_64_32S:
9303 if (!fixp->fx_pcrel)
9304 {
9305 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9306 code = fixp->fx_r_type;
9307 break;
9308 }
9309 default:
9310 if (fixp->fx_pcrel)
9311 {
9312 switch (fixp->fx_size)
9313 {
9314 default:
9315 as_bad_where (fixp->fx_file, fixp->fx_line,
9316 _("can not do %d byte pc-relative relocation"),
9317 fixp->fx_size);
9318 code = BFD_RELOC_32_PCREL;
9319 break;
9320 case 1: code = BFD_RELOC_8_PCREL; break;
9321 case 2: code = BFD_RELOC_16_PCREL; break;
9322 case 4: code = BFD_RELOC_32_PCREL; break;
9323#ifdef BFD64
9324 case 8: code = BFD_RELOC_64_PCREL; break;
9325#endif
9326 }
9327 }
9328 else
9329 {
9330 switch (fixp->fx_size)
9331 {
9332 default:
9333 as_bad_where (fixp->fx_file, fixp->fx_line,
9334 _("can not do %d byte relocation"),
9335 fixp->fx_size);
9336 code = BFD_RELOC_32;
9337 break;
9338 case 1: code = BFD_RELOC_8; break;
9339 case 2: code = BFD_RELOC_16; break;
9340 case 4: code = BFD_RELOC_32; break;
9341#ifdef BFD64
9342 case 8: code = BFD_RELOC_64; break;
9343#endif
9344 }
9345 }
9346 break;
9347 }
9348
9349 if ((code == BFD_RELOC_32
9350 || code == BFD_RELOC_32_PCREL
9351 || code == BFD_RELOC_X86_64_32S)
9352 && GOT_symbol
9353 && fixp->fx_addsy == GOT_symbol)
9354 {
9355 if (!object_64bit)
9356 code = BFD_RELOC_386_GOTPC;
9357 else
9358 code = BFD_RELOC_X86_64_GOTPC32;
9359 }
9360 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9361 && GOT_symbol
9362 && fixp->fx_addsy == GOT_symbol)
9363 {
9364 code = BFD_RELOC_X86_64_GOTPC64;
9365 }
9366
9367 rel = (arelent *) xmalloc (sizeof (arelent));
9368 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9369 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9370
9371 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9372
9373 if (!use_rela_relocations)
9374 {
9375 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9376 vtable entry to be used in the relocation's section offset. */
9377 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9378 rel->address = fixp->fx_offset;
9379#if defined (OBJ_COFF) && defined (TE_PE)
9380 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9381 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9382 else
9383#endif
9384 rel->addend = 0;
9385 }
9386 /* Use the rela in 64bit mode. */
9387 else
9388 {
9389 if (disallow_64bit_reloc)
9390 switch (code)
9391 {
9392 case BFD_RELOC_X86_64_DTPOFF64:
9393 case BFD_RELOC_X86_64_TPOFF64:
9394 case BFD_RELOC_64_PCREL:
9395 case BFD_RELOC_X86_64_GOTOFF64:
9396 case BFD_RELOC_X86_64_GOT64:
9397 case BFD_RELOC_X86_64_GOTPCREL64:
9398 case BFD_RELOC_X86_64_GOTPC64:
9399 case BFD_RELOC_X86_64_GOTPLT64:
9400 case BFD_RELOC_X86_64_PLTOFF64:
9401 as_bad_where (fixp->fx_file, fixp->fx_line,
9402 _("cannot represent relocation type %s in x32 mode"),
9403 bfd_get_reloc_code_name (code));
9404 break;
9405 default:
9406 break;
9407 }
9408
9409 if (!fixp->fx_pcrel)
9410 rel->addend = fixp->fx_offset;
9411 else
9412 switch (code)
9413 {
9414 case BFD_RELOC_X86_64_PLT32:
9415 case BFD_RELOC_X86_64_GOT32:
9416 case BFD_RELOC_X86_64_GOTPCREL:
9417 case BFD_RELOC_X86_64_TLSGD:
9418 case BFD_RELOC_X86_64_TLSLD:
9419 case BFD_RELOC_X86_64_GOTTPOFF:
9420 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9421 case BFD_RELOC_X86_64_TLSDESC_CALL:
9422 rel->addend = fixp->fx_offset - fixp->fx_size;
9423 break;
9424 default:
9425 rel->addend = (section->vma
9426 - fixp->fx_size
9427 + fixp->fx_addnumber
9428 + md_pcrel_from (fixp));
9429 break;
9430 }
9431 }
9432
9433 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9434 if (rel->howto == NULL)
9435 {
9436 as_bad_where (fixp->fx_file, fixp->fx_line,
9437 _("cannot represent relocation type %s"),
9438 bfd_get_reloc_code_name (code));
9439 /* Set howto to a garbage value so that we can keep going. */
9440 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9441 gas_assert (rel->howto != NULL);
9442 }
9443
9444 return rel;
9445}
9446
9447#include "tc-i386-intel.c"
9448
9449void
9450tc_x86_parse_to_dw2regnum (expressionS *exp)
9451{
9452 int saved_naked_reg;
9453 char saved_register_dot;
9454
9455 saved_naked_reg = allow_naked_reg;
9456 allow_naked_reg = 1;
9457 saved_register_dot = register_chars['.'];
9458 register_chars['.'] = '.';
9459 allow_pseudo_reg = 1;
9460 expression_and_evaluate (exp);
9461 allow_pseudo_reg = 0;
9462 register_chars['.'] = saved_register_dot;
9463 allow_naked_reg = saved_naked_reg;
9464
9465 if (exp->X_op == O_register && exp->X_add_number >= 0)
9466 {
9467 if ((addressT) exp->X_add_number < i386_regtab_size)
9468 {
9469 exp->X_op = O_constant;
9470 exp->X_add_number = i386_regtab[exp->X_add_number]
9471 .dw2_regnum[flag_code >> 1];
9472 }
9473 else
9474 exp->X_op = O_illegal;
9475 }
9476}
9477
9478void
9479tc_x86_frame_initial_instructions (void)
9480{
9481 static unsigned int sp_regno[2];
9482
9483 if (!sp_regno[flag_code >> 1])
9484 {
9485 char *saved_input = input_line_pointer;
9486 char sp[][4] = {"esp", "rsp"};
9487 expressionS exp;
9488
9489 input_line_pointer = sp[flag_code >> 1];
9490 tc_x86_parse_to_dw2regnum (&exp);
9491 gas_assert (exp.X_op == O_constant);
9492 sp_regno[flag_code >> 1] = exp.X_add_number;
9493 input_line_pointer = saved_input;
9494 }
9495
9496 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9497 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9498}
9499
9500int
9501x86_dwarf2_addr_size (void)
9502{
9503#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9504 if (x86_elf_abi == X86_64_X32_ABI)
9505 return 4;
9506#endif
9507 return bfd_arch_bits_per_address (stdoutput) / 8;
9508}
9509
9510int
9511i386_elf_section_type (const char *str, size_t len)
9512{
9513 if (flag_code == CODE_64BIT
9514 && len == sizeof ("unwind") - 1
9515 && strncmp (str, "unwind", 6) == 0)
9516 return SHT_X86_64_UNWIND;
9517
9518 return -1;
9519}
9520
9521#ifdef TE_SOLARIS
9522void
9523i386_solaris_fix_up_eh_frame (segT sec)
9524{
9525 if (flag_code == CODE_64BIT)
9526 elf_section_type (sec) = SHT_X86_64_UNWIND;
9527}
9528#endif
9529
9530#ifdef TE_PE
9531void
9532tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9533{
9534 expressionS exp;
9535
9536 exp.X_op = O_secrel;
9537 exp.X_add_symbol = symbol;
9538 exp.X_add_number = 0;
9539 emit_expr (&exp, size);
9540}
9541#endif
9542
9543#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9544/* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9545
9546bfd_vma
9547x86_64_section_letter (int letter, char **ptr_msg)
9548{
9549 if (flag_code == CODE_64BIT)
9550 {
9551 if (letter == 'l')
9552 return SHF_X86_64_LARGE;
9553
9554 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9555 }
9556 else
9557 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9558 return -1;
9559}
9560
9561bfd_vma
9562x86_64_section_word (char *str, size_t len)
9563{
9564 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9565 return SHF_X86_64_LARGE;
9566
9567 return -1;
9568}
9569
9570static void
9571handle_large_common (int small ATTRIBUTE_UNUSED)
9572{
9573 if (flag_code != CODE_64BIT)
9574 {
9575 s_comm_internal (0, elf_common_parse);
9576 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9577 }
9578 else
9579 {
9580 static segT lbss_section;
9581 asection *saved_com_section_ptr = elf_com_section_ptr;
9582 asection *saved_bss_section = bss_section;
9583
9584 if (lbss_section == NULL)
9585 {
9586 flagword applicable;
9587 segT seg = now_seg;
9588 subsegT subseg = now_subseg;
9589
9590 /* The .lbss section is for local .largecomm symbols. */
9591 lbss_section = subseg_new (".lbss", 0);
9592 applicable = bfd_applicable_section_flags (stdoutput);
9593 bfd_set_section_flags (stdoutput, lbss_section,
9594 applicable & SEC_ALLOC);
9595 seg_info (lbss_section)->bss = 1;
9596
9597 subseg_set (seg, subseg);
9598 }
9599
9600 elf_com_section_ptr = &_bfd_elf_large_com_section;
9601 bss_section = lbss_section;
9602
9603 s_comm_internal (0, elf_common_parse);
9604
9605 elf_com_section_ptr = saved_com_section_ptr;
9606 bss_section = saved_bss_section;
9607 }
9608}
9609#endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.054207 seconds and 4 git commands to generate.