Fix the array access for BFD_RELOC_386_IRELATIVE.
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
... / ...
CommitLineData
1/* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23/* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30#include "as.h"
31#include "safe-ctype.h"
32#include "subsegs.h"
33#include "dwarf2dbg.h"
34#include "dw2gencfi.h"
35#include "elf/x86-64.h"
36#include "opcodes/i386-init.h"
37
38#ifndef REGISTER_WARNINGS
39#define REGISTER_WARNINGS 1
40#endif
41
42#ifndef INFER_ADDR_PREFIX
43#define INFER_ADDR_PREFIX 1
44#endif
45
46#ifndef DEFAULT_ARCH
47#define DEFAULT_ARCH "i386"
48#endif
49
50#ifndef INLINE
51#if __GNUC__ >= 2
52#define INLINE __inline__
53#else
54#define INLINE
55#endif
56#endif
57
58/* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
63#define WAIT_PREFIX 0
64#define SEG_PREFIX 1
65#define ADDR_PREFIX 2
66#define DATA_PREFIX 3
67#define REP_PREFIX 4
68#define LOCK_PREFIX 5
69#define REX_PREFIX 6 /* must come last. */
70#define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72/* we define the syntax here (modulo base,index,scale syntax) */
73#define REGISTER_PREFIX '%'
74#define IMMEDIATE_PREFIX '$'
75#define ABSOLUTE_PREFIX '*'
76
77/* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79#define WORD_MNEM_SUFFIX 'w'
80#define BYTE_MNEM_SUFFIX 'b'
81#define SHORT_MNEM_SUFFIX 's'
82#define LONG_MNEM_SUFFIX 'l'
83#define QWORD_MNEM_SUFFIX 'q'
84#define XMMWORD_MNEM_SUFFIX 'x'
85#define YMMWORD_MNEM_SUFFIX 'y'
86/* Intel Syntax. Use a non-ascii letter since since it never appears
87 in instructions. */
88#define LONG_DOUBLE_MNEM_SUFFIX '\1'
89
90#define END_OF_INSN '\0'
91
92/*
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
97 END.
98 */
99typedef struct
100{
101 const insn_template *start;
102 const insn_template *end;
103}
104templates;
105
106/* 386 operand encoding bytes: see 386 book for details of this. */
107typedef struct
108{
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
112}
113modrm_byte;
114
115/* x86-64 extension prefix. */
116typedef int rex_byte;
117
118/* 386 opcode byte to code indirect addressing. */
119typedef struct
120{
121 unsigned base;
122 unsigned index;
123 unsigned scale;
124}
125sib_byte;
126
127/* x86 arch names, types and features */
128typedef struct
129{
130 const char *name; /* arch name */
131 unsigned int len; /* arch string length */
132 enum processor_type type; /* arch type */
133 i386_cpu_flags flags; /* cpu feature flags */
134 unsigned int skip; /* show_arch should skip this. */
135 unsigned int negated; /* turn off indicated flags. */
136}
137arch_entry;
138
139static void update_code_flag (int, int);
140static void set_code_flag (int);
141static void set_16bit_gcc_code_flag (int);
142static void set_intel_syntax (int);
143static void set_intel_mnemonic (int);
144static void set_allow_index_reg (int);
145static void set_sse_check (int);
146static void set_cpu_arch (int);
147#ifdef TE_PE
148static void pe_directive_secrel (int);
149#endif
150static void signed_cons (int);
151static char *output_invalid (int c);
152static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
153 const char *);
154static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
155 const char *);
156static int i386_att_operand (char *);
157static int i386_intel_operand (char *, int);
158static int i386_intel_simplify (expressionS *);
159static int i386_intel_parse_name (const char *, expressionS *);
160static const reg_entry *parse_register (char *, char **);
161static char *parse_insn (char *, char *);
162static char *parse_operands (char *, const char *);
163static void swap_operands (void);
164static void swap_2_operands (int, int);
165static void optimize_imm (void);
166static void optimize_disp (void);
167static const insn_template *match_template (void);
168static int check_string (void);
169static int process_suffix (void);
170static int check_byte_reg (void);
171static int check_long_reg (void);
172static int check_qword_reg (void);
173static int check_word_reg (void);
174static int finalize_imm (void);
175static int process_operands (void);
176static const seg_entry *build_modrm_byte (void);
177static void output_insn (void);
178static void output_imm (fragS *, offsetT);
179static void output_disp (fragS *, offsetT);
180#ifndef I386COFF
181static void s_bss (int);
182#endif
183#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
184static void handle_large_common (int small ATTRIBUTE_UNUSED);
185static void handle_quad (int);
186#endif
187
188static const char *default_arch = DEFAULT_ARCH;
189
190/* VEX prefix. */
191typedef struct
192{
193 /* VEX prefix is either 2 byte or 3 byte. */
194 unsigned char bytes[3];
195 unsigned int length;
196 /* Destination or source register specifier. */
197 const reg_entry *register_specifier;
198} vex_prefix;
199
200/* 'md_assemble ()' gathers together information and puts it into a
201 i386_insn. */
202
203union i386_op
204 {
205 expressionS *disps;
206 expressionS *imms;
207 const reg_entry *regs;
208 };
209
210enum i386_error
211 {
212 operand_size_mismatch,
213 operand_type_mismatch,
214 register_type_mismatch,
215 number_of_operands_mismatch,
216 invalid_instruction_suffix,
217 bad_imm4,
218 old_gcc_only,
219 unsupported_with_intel_mnemonic,
220 unsupported_syntax,
221 unsupported
222 };
223
224struct _i386_insn
225 {
226 /* TM holds the template for the insn were currently assembling. */
227 insn_template tm;
228
229 /* SUFFIX holds the instruction size suffix for byte, word, dword
230 or qword, if given. */
231 char suffix;
232
233 /* OPERANDS gives the number of given operands. */
234 unsigned int operands;
235
236 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
237 of given register, displacement, memory operands and immediate
238 operands. */
239 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
240
241 /* TYPES [i] is the type (see above #defines) which tells us how to
242 use OP[i] for the corresponding operand. */
243 i386_operand_type types[MAX_OPERANDS];
244
245 /* Displacement expression, immediate expression, or register for each
246 operand. */
247 union i386_op op[MAX_OPERANDS];
248
249 /* Flags for operands. */
250 unsigned int flags[MAX_OPERANDS];
251#define Operand_PCrel 1
252
253 /* Relocation type for operand */
254 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
255
256 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
257 the base index byte below. */
258 const reg_entry *base_reg;
259 const reg_entry *index_reg;
260 unsigned int log2_scale_factor;
261
262 /* SEG gives the seg_entries of this insn. They are zero unless
263 explicit segment overrides are given. */
264 const seg_entry *seg[2];
265
266 /* PREFIX holds all the given prefix opcodes (usually null).
267 PREFIXES is the number of prefix opcodes. */
268 unsigned int prefixes;
269 unsigned char prefix[MAX_PREFIXES];
270
271 /* RM and SIB are the modrm byte and the sib byte where the
272 addressing modes of this insn are encoded. */
273 modrm_byte rm;
274 rex_byte rex;
275 sib_byte sib;
276 vex_prefix vex;
277
278 /* Swap operand in encoding. */
279 unsigned int swap_operand;
280
281 /* Force 32bit displacement in encoding. */
282 unsigned int disp32_encoding;
283
284 /* Error message. */
285 enum i386_error error;
286 };
287
288typedef struct _i386_insn i386_insn;
289
290/* List of chars besides those in app.c:symbol_chars that can start an
291 operand. Used to prevent the scrubber eating vital white-space. */
292const char extra_symbol_chars[] = "*%-(["
293#ifdef LEX_AT
294 "@"
295#endif
296#ifdef LEX_QM
297 "?"
298#endif
299 ;
300
301#if (defined (TE_I386AIX) \
302 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
303 && !defined (TE_GNU) \
304 && !defined (TE_LINUX) \
305 && !defined (TE_NETWARE) \
306 && !defined (TE_FreeBSD) \
307 && !defined (TE_DragonFly) \
308 && !defined (TE_NetBSD)))
309/* This array holds the chars that always start a comment. If the
310 pre-processor is disabled, these aren't very useful. The option
311 --divide will remove '/' from this list. */
312const char *i386_comment_chars = "#/";
313#define SVR4_COMMENT_CHARS 1
314#define PREFIX_SEPARATOR '\\'
315
316#else
317const char *i386_comment_chars = "#";
318#define PREFIX_SEPARATOR '/'
319#endif
320
321/* This array holds the chars that only start a comment at the beginning of
322 a line. If the line seems to have the form '# 123 filename'
323 .line and .file directives will appear in the pre-processed output.
324 Note that input_file.c hand checks for '#' at the beginning of the
325 first line of the input file. This is because the compiler outputs
326 #NO_APP at the beginning of its output.
327 Also note that comments started like this one will always work if
328 '/' isn't otherwise defined. */
329const char line_comment_chars[] = "#/";
330
331const char line_separator_chars[] = ";";
332
333/* Chars that can be used to separate mant from exp in floating point
334 nums. */
335const char EXP_CHARS[] = "eE";
336
337/* Chars that mean this number is a floating point constant
338 As in 0f12.456
339 or 0d1.2345e12. */
340const char FLT_CHARS[] = "fFdDxX";
341
342/* Tables for lexical analysis. */
343static char mnemonic_chars[256];
344static char register_chars[256];
345static char operand_chars[256];
346static char identifier_chars[256];
347static char digit_chars[256];
348
349/* Lexical macros. */
350#define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
351#define is_operand_char(x) (operand_chars[(unsigned char) x])
352#define is_register_char(x) (register_chars[(unsigned char) x])
353#define is_space_char(x) ((x) == ' ')
354#define is_identifier_char(x) (identifier_chars[(unsigned char) x])
355#define is_digit_char(x) (digit_chars[(unsigned char) x])
356
357/* All non-digit non-letter characters that may occur in an operand. */
358static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
359
360/* md_assemble() always leaves the strings it's passed unaltered. To
361 effect this we maintain a stack of saved characters that we've smashed
362 with '\0's (indicating end of strings for various sub-fields of the
363 assembler instruction). */
364static char save_stack[32];
365static char *save_stack_p;
366#define END_STRING_AND_SAVE(s) \
367 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
368#define RESTORE_END_STRING(s) \
369 do { *(s) = *--save_stack_p; } while (0)
370
371/* The instruction we're assembling. */
372static i386_insn i;
373
374/* Possible templates for current insn. */
375static const templates *current_templates;
376
377/* Per instruction expressionS buffers: max displacements & immediates. */
378static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
379static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
380
381/* Current operand we are working on. */
382static int this_operand = -1;
383
384/* We support four different modes. FLAG_CODE variable is used to distinguish
385 these. */
386
387enum flag_code {
388 CODE_32BIT,
389 CODE_16BIT,
390 CODE_64BIT };
391
392static enum flag_code flag_code;
393static unsigned int object_64bit;
394static unsigned int disallow_64bit_reloc;
395static int use_rela_relocations = 0;
396
397#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
398 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
399 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
400
401/* The ELF ABI to use. */
402enum x86_elf_abi
403{
404 I386_ABI,
405 X86_64_ABI,
406 X86_64_X32_ABI
407};
408
409static enum x86_elf_abi x86_elf_abi = I386_ABI;
410#endif
411
412/* The names used to print error messages. */
413static const char *flag_code_names[] =
414 {
415 "32",
416 "16",
417 "64"
418 };
419
420/* 1 for intel syntax,
421 0 if att syntax. */
422static int intel_syntax = 0;
423
424/* 1 for intel mnemonic,
425 0 if att mnemonic. */
426static int intel_mnemonic = !SYSV386_COMPAT;
427
428/* 1 if support old (<= 2.8.1) versions of gcc. */
429static int old_gcc = OLDGCC_COMPAT;
430
431/* 1 if pseudo registers are permitted. */
432static int allow_pseudo_reg = 0;
433
434/* 1 if register prefix % not required. */
435static int allow_naked_reg = 0;
436
437/* 1 if pseudo index register, eiz/riz, is allowed . */
438static int allow_index_reg = 0;
439
440static enum
441 {
442 sse_check_none = 0,
443 sse_check_warning,
444 sse_check_error
445 }
446sse_check;
447
448/* Register prefix used for error message. */
449static const char *register_prefix = "%";
450
451/* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
452 leave, push, and pop instructions so that gcc has the same stack
453 frame as in 32 bit mode. */
454static char stackop_size = '\0';
455
456/* Non-zero to optimize code alignment. */
457int optimize_align_code = 1;
458
459/* Non-zero to quieten some warnings. */
460static int quiet_warnings = 0;
461
462/* CPU name. */
463static const char *cpu_arch_name = NULL;
464static char *cpu_sub_arch_name = NULL;
465
466/* CPU feature flags. */
467static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
468
469/* If we have selected a cpu we are generating instructions for. */
470static int cpu_arch_tune_set = 0;
471
472/* Cpu we are generating instructions for. */
473enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
474
475/* CPU feature flags of cpu we are generating instructions for. */
476static i386_cpu_flags cpu_arch_tune_flags;
477
478/* CPU instruction set architecture used. */
479enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
480
481/* CPU feature flags of instruction set architecture used. */
482i386_cpu_flags cpu_arch_isa_flags;
483
484/* If set, conditional jumps are not automatically promoted to handle
485 larger than a byte offset. */
486static unsigned int no_cond_jump_promotion = 0;
487
488/* Encode SSE instructions with VEX prefix. */
489static unsigned int sse2avx;
490
491/* Encode scalar AVX instructions with specific vector length. */
492static enum
493 {
494 vex128 = 0,
495 vex256
496 } avxscalar;
497
498/* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
499static symbolS *GOT_symbol;
500
501/* The dwarf2 return column, adjusted for 32 or 64 bit. */
502unsigned int x86_dwarf2_return_column;
503
504/* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
505int x86_cie_data_alignment;
506
507/* Interface to relax_segment.
508 There are 3 major relax states for 386 jump insns because the
509 different types of jumps add different sizes to frags when we're
510 figuring out what sort of jump to choose to reach a given label. */
511
512/* Types. */
513#define UNCOND_JUMP 0
514#define COND_JUMP 1
515#define COND_JUMP86 2
516
517/* Sizes. */
518#define CODE16 1
519#define SMALL 0
520#define SMALL16 (SMALL | CODE16)
521#define BIG 2
522#define BIG16 (BIG | CODE16)
523
524#ifndef INLINE
525#ifdef __GNUC__
526#define INLINE __inline__
527#else
528#define INLINE
529#endif
530#endif
531
532#define ENCODE_RELAX_STATE(type, size) \
533 ((relax_substateT) (((type) << 2) | (size)))
534#define TYPE_FROM_RELAX_STATE(s) \
535 ((s) >> 2)
536#define DISP_SIZE_FROM_RELAX_STATE(s) \
537 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
538
539/* This table is used by relax_frag to promote short jumps to long
540 ones where necessary. SMALL (short) jumps may be promoted to BIG
541 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
542 don't allow a short jump in a 32 bit code segment to be promoted to
543 a 16 bit offset jump because it's slower (requires data size
544 prefix), and doesn't work, unless the destination is in the bottom
545 64k of the code segment (The top 16 bits of eip are zeroed). */
546
547const relax_typeS md_relax_table[] =
548{
549 /* The fields are:
550 1) most positive reach of this state,
551 2) most negative reach of this state,
552 3) how many bytes this mode will have in the variable part of the frag
553 4) which index into the table to try if we can't fit into this one. */
554
555 /* UNCOND_JUMP states. */
556 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
557 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
558 /* dword jmp adds 4 bytes to frag:
559 0 extra opcode bytes, 4 displacement bytes. */
560 {0, 0, 4, 0},
561 /* word jmp adds 2 byte2 to frag:
562 0 extra opcode bytes, 2 displacement bytes. */
563 {0, 0, 2, 0},
564
565 /* COND_JUMP states. */
566 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
567 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
568 /* dword conditionals adds 5 bytes to frag:
569 1 extra opcode byte, 4 displacement bytes. */
570 {0, 0, 5, 0},
571 /* word conditionals add 3 bytes to frag:
572 1 extra opcode byte, 2 displacement bytes. */
573 {0, 0, 3, 0},
574
575 /* COND_JUMP86 states. */
576 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
577 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
578 /* dword conditionals adds 5 bytes to frag:
579 1 extra opcode byte, 4 displacement bytes. */
580 {0, 0, 5, 0},
581 /* word conditionals add 4 bytes to frag:
582 1 displacement byte and a 3 byte long branch insn. */
583 {0, 0, 4, 0}
584};
585
586static const arch_entry cpu_arch[] =
587{
588 /* Do not replace the first two entries - i386_target_format()
589 relies on them being there in this order. */
590 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
591 CPU_GENERIC32_FLAGS, 0, 0 },
592 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
593 CPU_GENERIC64_FLAGS, 0, 0 },
594 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
595 CPU_NONE_FLAGS, 0, 0 },
596 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
597 CPU_I186_FLAGS, 0, 0 },
598 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
599 CPU_I286_FLAGS, 0, 0 },
600 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
601 CPU_I386_FLAGS, 0, 0 },
602 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
603 CPU_I486_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
605 CPU_I586_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
607 CPU_I686_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
609 CPU_I586_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
611 CPU_PENTIUMPRO_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
613 CPU_P2_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
615 CPU_P3_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
617 CPU_P4_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
619 CPU_CORE_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
621 CPU_NOCONA_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
623 CPU_CORE_FLAGS, 1, 0 },
624 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
625 CPU_CORE_FLAGS, 0, 0 },
626 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
627 CPU_CORE2_FLAGS, 1, 0 },
628 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
629 CPU_CORE2_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
631 CPU_COREI7_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
633 CPU_L1OM_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
635 CPU_K6_FLAGS, 0, 0 },
636 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
637 CPU_K6_2_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
639 CPU_ATHLON_FLAGS, 0, 0 },
640 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
641 CPU_K8_FLAGS, 1, 0 },
642 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
643 CPU_K8_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
645 CPU_K8_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
647 CPU_AMDFAM10_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BDVER1,
649 CPU_BDVER1_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
651 CPU_8087_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
653 CPU_287_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
655 CPU_387_FLAGS, 0, 0 },
656 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
657 CPU_ANY87_FLAGS, 0, 1 },
658 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
659 CPU_MMX_FLAGS, 0, 0 },
660 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
661 CPU_3DNOWA_FLAGS, 0, 1 },
662 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
663 CPU_SSE_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
665 CPU_SSE2_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
667 CPU_SSE3_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
669 CPU_SSSE3_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
671 CPU_SSE4_1_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
673 CPU_SSE4_2_FLAGS, 0, 0 },
674 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
675 CPU_SSE4_2_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
677 CPU_ANY_SSE_FLAGS, 0, 1 },
678 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
679 CPU_AVX_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
681 CPU_ANY_AVX_FLAGS, 0, 1 },
682 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
683 CPU_VMX_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
685 CPU_SMX_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
687 CPU_XSAVE_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
689 CPU_XSAVEOPT_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
691 CPU_AES_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
693 CPU_PCLMUL_FLAGS, 0, 0 },
694 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
695 CPU_PCLMUL_FLAGS, 1, 0 },
696 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
697 CPU_FSGSBASE_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
699 CPU_RDRND_FLAGS, 0, 0 },
700 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
701 CPU_F16C_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
703 CPU_FMA_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
705 CPU_FMA4_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
707 CPU_XOP_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
709 CPU_LWP_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
711 CPU_MOVBE_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
713 CPU_EPT_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
715 CPU_CLFLUSH_FLAGS, 0, 0 },
716 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
717 CPU_NOP_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
719 CPU_SYSCALL_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
721 CPU_RDTSCP_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
723 CPU_3DNOW_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
725 CPU_3DNOWA_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
727 CPU_PADLOCK_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
729 CPU_SVME_FLAGS, 1, 0 },
730 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
731 CPU_SVME_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
733 CPU_SSE4A_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
735 CPU_ABM_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
737 CPU_BMI_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
739 CPU_TBM_FLAGS, 0, 0 },
740};
741
742#ifdef I386COFF
743/* Like s_lcomm_internal in gas/read.c but the alignment string
744 is allowed to be optional. */
745
746static symbolS *
747pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
748{
749 addressT align = 0;
750
751 SKIP_WHITESPACE ();
752
753 if (needs_align
754 && *input_line_pointer == ',')
755 {
756 align = parse_align (needs_align - 1);
757
758 if (align == (addressT) -1)
759 return NULL;
760 }
761 else
762 {
763 if (size >= 8)
764 align = 3;
765 else if (size >= 4)
766 align = 2;
767 else if (size >= 2)
768 align = 1;
769 else
770 align = 0;
771 }
772
773 bss_alloc (symbolP, size, align);
774 return symbolP;
775}
776
777static void
778pe_lcomm (int needs_align)
779{
780 s_comm_internal (needs_align * 2, pe_lcomm_internal);
781}
782#endif
783
784const pseudo_typeS md_pseudo_table[] =
785{
786#if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
787 {"align", s_align_bytes, 0},
788#else
789 {"align", s_align_ptwo, 0},
790#endif
791 {"arch", set_cpu_arch, 0},
792#ifndef I386COFF
793 {"bss", s_bss, 0},
794#else
795 {"lcomm", pe_lcomm, 1},
796#endif
797 {"ffloat", float_cons, 'f'},
798 {"dfloat", float_cons, 'd'},
799 {"tfloat", float_cons, 'x'},
800 {"value", cons, 2},
801 {"slong", signed_cons, 4},
802 {"noopt", s_ignore, 0},
803 {"optim", s_ignore, 0},
804 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
805 {"code16", set_code_flag, CODE_16BIT},
806 {"code32", set_code_flag, CODE_32BIT},
807 {"code64", set_code_flag, CODE_64BIT},
808 {"intel_syntax", set_intel_syntax, 1},
809 {"att_syntax", set_intel_syntax, 0},
810 {"intel_mnemonic", set_intel_mnemonic, 1},
811 {"att_mnemonic", set_intel_mnemonic, 0},
812 {"allow_index_reg", set_allow_index_reg, 1},
813 {"disallow_index_reg", set_allow_index_reg, 0},
814 {"sse_check", set_sse_check, 0},
815#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
816 {"largecomm", handle_large_common, 0},
817 {"quad", handle_quad, 8},
818#else
819 {"file", (void (*) (int)) dwarf2_directive_file, 0},
820 {"loc", dwarf2_directive_loc, 0},
821 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
822#endif
823#ifdef TE_PE
824 {"secrel32", pe_directive_secrel, 0},
825#endif
826 {0, 0, 0}
827};
828
829/* For interface with expression (). */
830extern char *input_line_pointer;
831
832/* Hash table for instruction mnemonic lookup. */
833static struct hash_control *op_hash;
834
835/* Hash table for register lookup. */
836static struct hash_control *reg_hash;
837\f
838void
839i386_align_code (fragS *fragP, int count)
840{
841 /* Various efficient no-op patterns for aligning code labels.
842 Note: Don't try to assemble the instructions in the comments.
843 0L and 0w are not legal. */
844 static const char f32_1[] =
845 {0x90}; /* nop */
846 static const char f32_2[] =
847 {0x66,0x90}; /* xchg %ax,%ax */
848 static const char f32_3[] =
849 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
850 static const char f32_4[] =
851 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
852 static const char f32_5[] =
853 {0x90, /* nop */
854 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
855 static const char f32_6[] =
856 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
857 static const char f32_7[] =
858 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
859 static const char f32_8[] =
860 {0x90, /* nop */
861 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
862 static const char f32_9[] =
863 {0x89,0xf6, /* movl %esi,%esi */
864 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
865 static const char f32_10[] =
866 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
867 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
868 static const char f32_11[] =
869 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
870 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
871 static const char f32_12[] =
872 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
873 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
874 static const char f32_13[] =
875 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
876 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
877 static const char f32_14[] =
878 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
879 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
880 static const char f16_3[] =
881 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
882 static const char f16_4[] =
883 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
884 static const char f16_5[] =
885 {0x90, /* nop */
886 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
887 static const char f16_6[] =
888 {0x89,0xf6, /* mov %si,%si */
889 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
890 static const char f16_7[] =
891 {0x8d,0x74,0x00, /* lea 0(%si),%si */
892 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
893 static const char f16_8[] =
894 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
895 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
896 static const char jump_31[] =
897 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
898 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
899 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
900 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
901 static const char *const f32_patt[] = {
902 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
903 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
904 };
905 static const char *const f16_patt[] = {
906 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
907 };
908 /* nopl (%[re]ax) */
909 static const char alt_3[] =
910 {0x0f,0x1f,0x00};
911 /* nopl 0(%[re]ax) */
912 static const char alt_4[] =
913 {0x0f,0x1f,0x40,0x00};
914 /* nopl 0(%[re]ax,%[re]ax,1) */
915 static const char alt_5[] =
916 {0x0f,0x1f,0x44,0x00,0x00};
917 /* nopw 0(%[re]ax,%[re]ax,1) */
918 static const char alt_6[] =
919 {0x66,0x0f,0x1f,0x44,0x00,0x00};
920 /* nopl 0L(%[re]ax) */
921 static const char alt_7[] =
922 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
923 /* nopl 0L(%[re]ax,%[re]ax,1) */
924 static const char alt_8[] =
925 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
926 /* nopw 0L(%[re]ax,%[re]ax,1) */
927 static const char alt_9[] =
928 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
929 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
930 static const char alt_10[] =
931 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
932 /* data16
933 nopw %cs:0L(%[re]ax,%[re]ax,1) */
934 static const char alt_long_11[] =
935 {0x66,
936 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
937 /* data16
938 data16
939 nopw %cs:0L(%[re]ax,%[re]ax,1) */
940 static const char alt_long_12[] =
941 {0x66,
942 0x66,
943 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
944 /* data16
945 data16
946 data16
947 nopw %cs:0L(%[re]ax,%[re]ax,1) */
948 static const char alt_long_13[] =
949 {0x66,
950 0x66,
951 0x66,
952 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
953 /* data16
954 data16
955 data16
956 data16
957 nopw %cs:0L(%[re]ax,%[re]ax,1) */
958 static const char alt_long_14[] =
959 {0x66,
960 0x66,
961 0x66,
962 0x66,
963 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
964 /* data16
965 data16
966 data16
967 data16
968 data16
969 nopw %cs:0L(%[re]ax,%[re]ax,1) */
970 static const char alt_long_15[] =
971 {0x66,
972 0x66,
973 0x66,
974 0x66,
975 0x66,
976 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
977 /* nopl 0(%[re]ax,%[re]ax,1)
978 nopw 0(%[re]ax,%[re]ax,1) */
979 static const char alt_short_11[] =
980 {0x0f,0x1f,0x44,0x00,0x00,
981 0x66,0x0f,0x1f,0x44,0x00,0x00};
982 /* nopw 0(%[re]ax,%[re]ax,1)
983 nopw 0(%[re]ax,%[re]ax,1) */
984 static const char alt_short_12[] =
985 {0x66,0x0f,0x1f,0x44,0x00,0x00,
986 0x66,0x0f,0x1f,0x44,0x00,0x00};
987 /* nopw 0(%[re]ax,%[re]ax,1)
988 nopl 0L(%[re]ax) */
989 static const char alt_short_13[] =
990 {0x66,0x0f,0x1f,0x44,0x00,0x00,
991 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
992 /* nopl 0L(%[re]ax)
993 nopl 0L(%[re]ax) */
994 static const char alt_short_14[] =
995 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
996 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
997 /* nopl 0L(%[re]ax)
998 nopl 0L(%[re]ax,%[re]ax,1) */
999 static const char alt_short_15[] =
1000 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1001 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1002 static const char *const alt_short_patt[] = {
1003 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1004 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1005 alt_short_14, alt_short_15
1006 };
1007 static const char *const alt_long_patt[] = {
1008 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1009 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1010 alt_long_14, alt_long_15
1011 };
1012
1013 /* Only align for at least a positive non-zero boundary. */
1014 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1015 return;
1016
1017 /* We need to decide which NOP sequence to use for 32bit and
1018 64bit. When -mtune= is used:
1019
1020 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1021 PROCESSOR_GENERIC32, f32_patt will be used.
1022 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1023 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1024 PROCESSOR_GENERIC64, alt_long_patt will be used.
1025 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1026 PROCESSOR_AMDFAM10, and PROCESSOR_BDVER1, alt_short_patt
1027 will be used.
1028
1029 When -mtune= isn't used, alt_long_patt will be used if
1030 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1031 be used.
1032
1033 When -march= or .arch is used, we can't use anything beyond
1034 cpu_arch_isa_flags. */
1035
1036 if (flag_code == CODE_16BIT)
1037 {
1038 if (count > 8)
1039 {
1040 memcpy (fragP->fr_literal + fragP->fr_fix,
1041 jump_31, count);
1042 /* Adjust jump offset. */
1043 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1044 }
1045 else
1046 memcpy (fragP->fr_literal + fragP->fr_fix,
1047 f16_patt[count - 1], count);
1048 }
1049 else
1050 {
1051 const char *const *patt = NULL;
1052
1053 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1054 {
1055 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1056 switch (cpu_arch_tune)
1057 {
1058 case PROCESSOR_UNKNOWN:
1059 /* We use cpu_arch_isa_flags to check if we SHOULD
1060 optimize with nops. */
1061 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1062 patt = alt_long_patt;
1063 else
1064 patt = f32_patt;
1065 break;
1066 case PROCESSOR_PENTIUM4:
1067 case PROCESSOR_NOCONA:
1068 case PROCESSOR_CORE:
1069 case PROCESSOR_CORE2:
1070 case PROCESSOR_COREI7:
1071 case PROCESSOR_L1OM:
1072 case PROCESSOR_GENERIC64:
1073 patt = alt_long_patt;
1074 break;
1075 case PROCESSOR_K6:
1076 case PROCESSOR_ATHLON:
1077 case PROCESSOR_K8:
1078 case PROCESSOR_AMDFAM10:
1079 case PROCESSOR_BDVER1:
1080 patt = alt_short_patt;
1081 break;
1082 case PROCESSOR_I386:
1083 case PROCESSOR_I486:
1084 case PROCESSOR_PENTIUM:
1085 case PROCESSOR_PENTIUMPRO:
1086 case PROCESSOR_GENERIC32:
1087 patt = f32_patt;
1088 break;
1089 }
1090 }
1091 else
1092 {
1093 switch (fragP->tc_frag_data.tune)
1094 {
1095 case PROCESSOR_UNKNOWN:
1096 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1097 PROCESSOR_UNKNOWN. */
1098 abort ();
1099 break;
1100
1101 case PROCESSOR_I386:
1102 case PROCESSOR_I486:
1103 case PROCESSOR_PENTIUM:
1104 case PROCESSOR_K6:
1105 case PROCESSOR_ATHLON:
1106 case PROCESSOR_K8:
1107 case PROCESSOR_AMDFAM10:
1108 case PROCESSOR_BDVER1:
1109 case PROCESSOR_GENERIC32:
1110 /* We use cpu_arch_isa_flags to check if we CAN optimize
1111 with nops. */
1112 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1113 patt = alt_short_patt;
1114 else
1115 patt = f32_patt;
1116 break;
1117 case PROCESSOR_PENTIUMPRO:
1118 case PROCESSOR_PENTIUM4:
1119 case PROCESSOR_NOCONA:
1120 case PROCESSOR_CORE:
1121 case PROCESSOR_CORE2:
1122 case PROCESSOR_COREI7:
1123 case PROCESSOR_L1OM:
1124 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1125 patt = alt_long_patt;
1126 else
1127 patt = f32_patt;
1128 break;
1129 case PROCESSOR_GENERIC64:
1130 patt = alt_long_patt;
1131 break;
1132 }
1133 }
1134
1135 if (patt == f32_patt)
1136 {
1137 /* If the padding is less than 15 bytes, we use the normal
1138 ones. Otherwise, we use a jump instruction and adjust
1139 its offset. */
1140 int limit;
1141
1142 /* For 64bit, the limit is 3 bytes. */
1143 if (flag_code == CODE_64BIT
1144 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1145 limit = 3;
1146 else
1147 limit = 15;
1148 if (count < limit)
1149 memcpy (fragP->fr_literal + fragP->fr_fix,
1150 patt[count - 1], count);
1151 else
1152 {
1153 memcpy (fragP->fr_literal + fragP->fr_fix,
1154 jump_31, count);
1155 /* Adjust jump offset. */
1156 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1157 }
1158 }
1159 else
1160 {
1161 /* Maximum length of an instruction is 15 byte. If the
1162 padding is greater than 15 bytes and we don't use jump,
1163 we have to break it into smaller pieces. */
1164 int padding = count;
1165 while (padding > 15)
1166 {
1167 padding -= 15;
1168 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1169 patt [14], 15);
1170 }
1171
1172 if (padding)
1173 memcpy (fragP->fr_literal + fragP->fr_fix,
1174 patt [padding - 1], padding);
1175 }
1176 }
1177 fragP->fr_var = count;
1178}
1179
1180static INLINE int
1181operand_type_all_zero (const union i386_operand_type *x)
1182{
1183 switch (ARRAY_SIZE(x->array))
1184 {
1185 case 3:
1186 if (x->array[2])
1187 return 0;
1188 case 2:
1189 if (x->array[1])
1190 return 0;
1191 case 1:
1192 return !x->array[0];
1193 default:
1194 abort ();
1195 }
1196}
1197
1198static INLINE void
1199operand_type_set (union i386_operand_type *x, unsigned int v)
1200{
1201 switch (ARRAY_SIZE(x->array))
1202 {
1203 case 3:
1204 x->array[2] = v;
1205 case 2:
1206 x->array[1] = v;
1207 case 1:
1208 x->array[0] = v;
1209 break;
1210 default:
1211 abort ();
1212 }
1213}
1214
1215static INLINE int
1216operand_type_equal (const union i386_operand_type *x,
1217 const union i386_operand_type *y)
1218{
1219 switch (ARRAY_SIZE(x->array))
1220 {
1221 case 3:
1222 if (x->array[2] != y->array[2])
1223 return 0;
1224 case 2:
1225 if (x->array[1] != y->array[1])
1226 return 0;
1227 case 1:
1228 return x->array[0] == y->array[0];
1229 break;
1230 default:
1231 abort ();
1232 }
1233}
1234
1235static INLINE int
1236cpu_flags_all_zero (const union i386_cpu_flags *x)
1237{
1238 switch (ARRAY_SIZE(x->array))
1239 {
1240 case 3:
1241 if (x->array[2])
1242 return 0;
1243 case 2:
1244 if (x->array[1])
1245 return 0;
1246 case 1:
1247 return !x->array[0];
1248 default:
1249 abort ();
1250 }
1251}
1252
1253static INLINE void
1254cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1255{
1256 switch (ARRAY_SIZE(x->array))
1257 {
1258 case 3:
1259 x->array[2] = v;
1260 case 2:
1261 x->array[1] = v;
1262 case 1:
1263 x->array[0] = v;
1264 break;
1265 default:
1266 abort ();
1267 }
1268}
1269
1270static INLINE int
1271cpu_flags_equal (const union i386_cpu_flags *x,
1272 const union i386_cpu_flags *y)
1273{
1274 switch (ARRAY_SIZE(x->array))
1275 {
1276 case 3:
1277 if (x->array[2] != y->array[2])
1278 return 0;
1279 case 2:
1280 if (x->array[1] != y->array[1])
1281 return 0;
1282 case 1:
1283 return x->array[0] == y->array[0];
1284 break;
1285 default:
1286 abort ();
1287 }
1288}
1289
1290static INLINE int
1291cpu_flags_check_cpu64 (i386_cpu_flags f)
1292{
1293 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1294 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1295}
1296
1297static INLINE i386_cpu_flags
1298cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1299{
1300 switch (ARRAY_SIZE (x.array))
1301 {
1302 case 3:
1303 x.array [2] &= y.array [2];
1304 case 2:
1305 x.array [1] &= y.array [1];
1306 case 1:
1307 x.array [0] &= y.array [0];
1308 break;
1309 default:
1310 abort ();
1311 }
1312 return x;
1313}
1314
1315static INLINE i386_cpu_flags
1316cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1317{
1318 switch (ARRAY_SIZE (x.array))
1319 {
1320 case 3:
1321 x.array [2] |= y.array [2];
1322 case 2:
1323 x.array [1] |= y.array [1];
1324 case 1:
1325 x.array [0] |= y.array [0];
1326 break;
1327 default:
1328 abort ();
1329 }
1330 return x;
1331}
1332
1333static INLINE i386_cpu_flags
1334cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1335{
1336 switch (ARRAY_SIZE (x.array))
1337 {
1338 case 3:
1339 x.array [2] &= ~y.array [2];
1340 case 2:
1341 x.array [1] &= ~y.array [1];
1342 case 1:
1343 x.array [0] &= ~y.array [0];
1344 break;
1345 default:
1346 abort ();
1347 }
1348 return x;
1349}
1350
1351#define CPU_FLAGS_ARCH_MATCH 0x1
1352#define CPU_FLAGS_64BIT_MATCH 0x2
1353#define CPU_FLAGS_AES_MATCH 0x4
1354#define CPU_FLAGS_PCLMUL_MATCH 0x8
1355#define CPU_FLAGS_AVX_MATCH 0x10
1356
1357#define CPU_FLAGS_32BIT_MATCH \
1358 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1359 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1360#define CPU_FLAGS_PERFECT_MATCH \
1361 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1362
1363/* Return CPU flags match bits. */
1364
1365static int
1366cpu_flags_match (const insn_template *t)
1367{
1368 i386_cpu_flags x = t->cpu_flags;
1369 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1370
1371 x.bitfield.cpu64 = 0;
1372 x.bitfield.cpuno64 = 0;
1373
1374 if (cpu_flags_all_zero (&x))
1375 {
1376 /* This instruction is available on all archs. */
1377 match |= CPU_FLAGS_32BIT_MATCH;
1378 }
1379 else
1380 {
1381 /* This instruction is available only on some archs. */
1382 i386_cpu_flags cpu = cpu_arch_flags;
1383
1384 cpu.bitfield.cpu64 = 0;
1385 cpu.bitfield.cpuno64 = 0;
1386 cpu = cpu_flags_and (x, cpu);
1387 if (!cpu_flags_all_zero (&cpu))
1388 {
1389 if (x.bitfield.cpuavx)
1390 {
1391 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1392 if (cpu.bitfield.cpuavx)
1393 {
1394 /* Check SSE2AVX. */
1395 if (!t->opcode_modifier.sse2avx|| sse2avx)
1396 {
1397 match |= (CPU_FLAGS_ARCH_MATCH
1398 | CPU_FLAGS_AVX_MATCH);
1399 /* Check AES. */
1400 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1401 match |= CPU_FLAGS_AES_MATCH;
1402 /* Check PCLMUL. */
1403 if (!x.bitfield.cpupclmul
1404 || cpu.bitfield.cpupclmul)
1405 match |= CPU_FLAGS_PCLMUL_MATCH;
1406 }
1407 }
1408 else
1409 match |= CPU_FLAGS_ARCH_MATCH;
1410 }
1411 else
1412 match |= CPU_FLAGS_32BIT_MATCH;
1413 }
1414 }
1415 return match;
1416}
1417
1418static INLINE i386_operand_type
1419operand_type_and (i386_operand_type x, i386_operand_type y)
1420{
1421 switch (ARRAY_SIZE (x.array))
1422 {
1423 case 3:
1424 x.array [2] &= y.array [2];
1425 case 2:
1426 x.array [1] &= y.array [1];
1427 case 1:
1428 x.array [0] &= y.array [0];
1429 break;
1430 default:
1431 abort ();
1432 }
1433 return x;
1434}
1435
1436static INLINE i386_operand_type
1437operand_type_or (i386_operand_type x, i386_operand_type y)
1438{
1439 switch (ARRAY_SIZE (x.array))
1440 {
1441 case 3:
1442 x.array [2] |= y.array [2];
1443 case 2:
1444 x.array [1] |= y.array [1];
1445 case 1:
1446 x.array [0] |= y.array [0];
1447 break;
1448 default:
1449 abort ();
1450 }
1451 return x;
1452}
1453
1454static INLINE i386_operand_type
1455operand_type_xor (i386_operand_type x, i386_operand_type y)
1456{
1457 switch (ARRAY_SIZE (x.array))
1458 {
1459 case 3:
1460 x.array [2] ^= y.array [2];
1461 case 2:
1462 x.array [1] ^= y.array [1];
1463 case 1:
1464 x.array [0] ^= y.array [0];
1465 break;
1466 default:
1467 abort ();
1468 }
1469 return x;
1470}
1471
1472static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1473static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1474static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1475static const i386_operand_type inoutportreg
1476 = OPERAND_TYPE_INOUTPORTREG;
1477static const i386_operand_type reg16_inoutportreg
1478 = OPERAND_TYPE_REG16_INOUTPORTREG;
1479static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1480static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1481static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1482static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1483static const i386_operand_type anydisp
1484 = OPERAND_TYPE_ANYDISP;
1485static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1486static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1487static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1488static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1489static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1490static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1491static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1492static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1493static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1494static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1495static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1496static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1497
1498enum operand_type
1499{
1500 reg,
1501 imm,
1502 disp,
1503 anymem
1504};
1505
1506static INLINE int
1507operand_type_check (i386_operand_type t, enum operand_type c)
1508{
1509 switch (c)
1510 {
1511 case reg:
1512 return (t.bitfield.reg8
1513 || t.bitfield.reg16
1514 || t.bitfield.reg32
1515 || t.bitfield.reg64);
1516
1517 case imm:
1518 return (t.bitfield.imm8
1519 || t.bitfield.imm8s
1520 || t.bitfield.imm16
1521 || t.bitfield.imm32
1522 || t.bitfield.imm32s
1523 || t.bitfield.imm64);
1524
1525 case disp:
1526 return (t.bitfield.disp8
1527 || t.bitfield.disp16
1528 || t.bitfield.disp32
1529 || t.bitfield.disp32s
1530 || t.bitfield.disp64);
1531
1532 case anymem:
1533 return (t.bitfield.disp8
1534 || t.bitfield.disp16
1535 || t.bitfield.disp32
1536 || t.bitfield.disp32s
1537 || t.bitfield.disp64
1538 || t.bitfield.baseindex);
1539
1540 default:
1541 abort ();
1542 }
1543
1544 return 0;
1545}
1546
1547/* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1548 operand J for instruction template T. */
1549
1550static INLINE int
1551match_reg_size (const insn_template *t, unsigned int j)
1552{
1553 return !((i.types[j].bitfield.byte
1554 && !t->operand_types[j].bitfield.byte)
1555 || (i.types[j].bitfield.word
1556 && !t->operand_types[j].bitfield.word)
1557 || (i.types[j].bitfield.dword
1558 && !t->operand_types[j].bitfield.dword)
1559 || (i.types[j].bitfield.qword
1560 && !t->operand_types[j].bitfield.qword));
1561}
1562
1563/* Return 1 if there is no conflict in any size on operand J for
1564 instruction template T. */
1565
1566static INLINE int
1567match_mem_size (const insn_template *t, unsigned int j)
1568{
1569 return (match_reg_size (t, j)
1570 && !((i.types[j].bitfield.unspecified
1571 && !t->operand_types[j].bitfield.unspecified)
1572 || (i.types[j].bitfield.fword
1573 && !t->operand_types[j].bitfield.fword)
1574 || (i.types[j].bitfield.tbyte
1575 && !t->operand_types[j].bitfield.tbyte)
1576 || (i.types[j].bitfield.xmmword
1577 && !t->operand_types[j].bitfield.xmmword)
1578 || (i.types[j].bitfield.ymmword
1579 && !t->operand_types[j].bitfield.ymmword)));
1580}
1581
1582/* Return 1 if there is no size conflict on any operands for
1583 instruction template T. */
1584
1585static INLINE int
1586operand_size_match (const insn_template *t)
1587{
1588 unsigned int j;
1589 int match = 1;
1590
1591 /* Don't check jump instructions. */
1592 if (t->opcode_modifier.jump
1593 || t->opcode_modifier.jumpbyte
1594 || t->opcode_modifier.jumpdword
1595 || t->opcode_modifier.jumpintersegment)
1596 return match;
1597
1598 /* Check memory and accumulator operand size. */
1599 for (j = 0; j < i.operands; j++)
1600 {
1601 if (t->operand_types[j].bitfield.anysize)
1602 continue;
1603
1604 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1605 {
1606 match = 0;
1607 break;
1608 }
1609
1610 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1611 {
1612 match = 0;
1613 break;
1614 }
1615 }
1616
1617 if (match)
1618 return match;
1619 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1620 {
1621mismatch:
1622 i.error = operand_size_mismatch;
1623 return 0;
1624 }
1625
1626 /* Check reverse. */
1627 gas_assert (i.operands == 2);
1628
1629 match = 1;
1630 for (j = 0; j < 2; j++)
1631 {
1632 if (t->operand_types[j].bitfield.acc
1633 && !match_reg_size (t, j ? 0 : 1))
1634 goto mismatch;
1635
1636 if (i.types[j].bitfield.mem
1637 && !match_mem_size (t, j ? 0 : 1))
1638 goto mismatch;
1639 }
1640
1641 return match;
1642}
1643
1644static INLINE int
1645operand_type_match (i386_operand_type overlap,
1646 i386_operand_type given)
1647{
1648 i386_operand_type temp = overlap;
1649
1650 temp.bitfield.jumpabsolute = 0;
1651 temp.bitfield.unspecified = 0;
1652 temp.bitfield.byte = 0;
1653 temp.bitfield.word = 0;
1654 temp.bitfield.dword = 0;
1655 temp.bitfield.fword = 0;
1656 temp.bitfield.qword = 0;
1657 temp.bitfield.tbyte = 0;
1658 temp.bitfield.xmmword = 0;
1659 temp.bitfield.ymmword = 0;
1660 if (operand_type_all_zero (&temp))
1661 goto mismatch;
1662
1663 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1664 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1665 return 1;
1666
1667mismatch:
1668 i.error = operand_type_mismatch;
1669 return 0;
1670}
1671
1672/* If given types g0 and g1 are registers they must be of the same type
1673 unless the expected operand type register overlap is null.
1674 Note that Acc in a template matches every size of reg. */
1675
1676static INLINE int
1677operand_type_register_match (i386_operand_type m0,
1678 i386_operand_type g0,
1679 i386_operand_type t0,
1680 i386_operand_type m1,
1681 i386_operand_type g1,
1682 i386_operand_type t1)
1683{
1684 if (!operand_type_check (g0, reg))
1685 return 1;
1686
1687 if (!operand_type_check (g1, reg))
1688 return 1;
1689
1690 if (g0.bitfield.reg8 == g1.bitfield.reg8
1691 && g0.bitfield.reg16 == g1.bitfield.reg16
1692 && g0.bitfield.reg32 == g1.bitfield.reg32
1693 && g0.bitfield.reg64 == g1.bitfield.reg64)
1694 return 1;
1695
1696 if (m0.bitfield.acc)
1697 {
1698 t0.bitfield.reg8 = 1;
1699 t0.bitfield.reg16 = 1;
1700 t0.bitfield.reg32 = 1;
1701 t0.bitfield.reg64 = 1;
1702 }
1703
1704 if (m1.bitfield.acc)
1705 {
1706 t1.bitfield.reg8 = 1;
1707 t1.bitfield.reg16 = 1;
1708 t1.bitfield.reg32 = 1;
1709 t1.bitfield.reg64 = 1;
1710 }
1711
1712 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1713 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1714 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1715 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1716 return 1;
1717
1718 i.error = register_type_mismatch;
1719
1720 return 0;
1721}
1722
1723static INLINE unsigned int
1724mode_from_disp_size (i386_operand_type t)
1725{
1726 if (t.bitfield.disp8)
1727 return 1;
1728 else if (t.bitfield.disp16
1729 || t.bitfield.disp32
1730 || t.bitfield.disp32s)
1731 return 2;
1732 else
1733 return 0;
1734}
1735
1736static INLINE int
1737fits_in_signed_byte (offsetT num)
1738{
1739 return (num >= -128) && (num <= 127);
1740}
1741
1742static INLINE int
1743fits_in_unsigned_byte (offsetT num)
1744{
1745 return (num & 0xff) == num;
1746}
1747
1748static INLINE int
1749fits_in_unsigned_word (offsetT num)
1750{
1751 return (num & 0xffff) == num;
1752}
1753
1754static INLINE int
1755fits_in_signed_word (offsetT num)
1756{
1757 return (-32768 <= num) && (num <= 32767);
1758}
1759
1760static INLINE int
1761fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1762{
1763#ifndef BFD64
1764 return 1;
1765#else
1766 return (!(((offsetT) -1 << 31) & num)
1767 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1768#endif
1769} /* fits_in_signed_long() */
1770
1771static INLINE int
1772fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1773{
1774#ifndef BFD64
1775 return 1;
1776#else
1777 return (num & (((offsetT) 2 << 31) - 1)) == num;
1778#endif
1779} /* fits_in_unsigned_long() */
1780
1781static INLINE int
1782fits_in_imm4 (offsetT num)
1783{
1784 return (num & 0xf) == num;
1785}
1786
1787static i386_operand_type
1788smallest_imm_type (offsetT num)
1789{
1790 i386_operand_type t;
1791
1792 operand_type_set (&t, 0);
1793 t.bitfield.imm64 = 1;
1794
1795 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1796 {
1797 /* This code is disabled on the 486 because all the Imm1 forms
1798 in the opcode table are slower on the i486. They're the
1799 versions with the implicitly specified single-position
1800 displacement, which has another syntax if you really want to
1801 use that form. */
1802 t.bitfield.imm1 = 1;
1803 t.bitfield.imm8 = 1;
1804 t.bitfield.imm8s = 1;
1805 t.bitfield.imm16 = 1;
1806 t.bitfield.imm32 = 1;
1807 t.bitfield.imm32s = 1;
1808 }
1809 else if (fits_in_signed_byte (num))
1810 {
1811 t.bitfield.imm8 = 1;
1812 t.bitfield.imm8s = 1;
1813 t.bitfield.imm16 = 1;
1814 t.bitfield.imm32 = 1;
1815 t.bitfield.imm32s = 1;
1816 }
1817 else if (fits_in_unsigned_byte (num))
1818 {
1819 t.bitfield.imm8 = 1;
1820 t.bitfield.imm16 = 1;
1821 t.bitfield.imm32 = 1;
1822 t.bitfield.imm32s = 1;
1823 }
1824 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1825 {
1826 t.bitfield.imm16 = 1;
1827 t.bitfield.imm32 = 1;
1828 t.bitfield.imm32s = 1;
1829 }
1830 else if (fits_in_signed_long (num))
1831 {
1832 t.bitfield.imm32 = 1;
1833 t.bitfield.imm32s = 1;
1834 }
1835 else if (fits_in_unsigned_long (num))
1836 t.bitfield.imm32 = 1;
1837
1838 return t;
1839}
1840
1841static offsetT
1842offset_in_range (offsetT val, int size)
1843{
1844 addressT mask;
1845
1846 switch (size)
1847 {
1848 case 1: mask = ((addressT) 1 << 8) - 1; break;
1849 case 2: mask = ((addressT) 1 << 16) - 1; break;
1850 case 4: mask = ((addressT) 2 << 31) - 1; break;
1851#ifdef BFD64
1852 case 8: mask = ((addressT) 2 << 63) - 1; break;
1853#endif
1854 default: abort ();
1855 }
1856
1857#ifdef BFD64
1858 /* If BFD64, sign extend val for 32bit address mode. */
1859 if (flag_code != CODE_64BIT
1860 || i.prefix[ADDR_PREFIX])
1861 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1862 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1863#endif
1864
1865 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1866 {
1867 char buf1[40], buf2[40];
1868
1869 sprint_value (buf1, val);
1870 sprint_value (buf2, val & mask);
1871 as_warn (_("%s shortened to %s"), buf1, buf2);
1872 }
1873 return val & mask;
1874}
1875
1876enum PREFIX_GROUP
1877{
1878 PREFIX_EXIST = 0,
1879 PREFIX_LOCK,
1880 PREFIX_REP,
1881 PREFIX_OTHER
1882};
1883
1884/* Returns
1885 a. PREFIX_EXIST if attempting to add a prefix where one from the
1886 same class already exists.
1887 b. PREFIX_LOCK if lock prefix is added.
1888 c. PREFIX_REP if rep/repne prefix is added.
1889 d. PREFIX_OTHER if other prefix is added.
1890 */
1891
1892static enum PREFIX_GROUP
1893add_prefix (unsigned int prefix)
1894{
1895 enum PREFIX_GROUP ret = PREFIX_OTHER;
1896 unsigned int q;
1897
1898 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1899 && flag_code == CODE_64BIT)
1900 {
1901 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1902 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1903 && (prefix & (REX_R | REX_X | REX_B))))
1904 ret = PREFIX_EXIST;
1905 q = REX_PREFIX;
1906 }
1907 else
1908 {
1909 switch (prefix)
1910 {
1911 default:
1912 abort ();
1913
1914 case CS_PREFIX_OPCODE:
1915 case DS_PREFIX_OPCODE:
1916 case ES_PREFIX_OPCODE:
1917 case FS_PREFIX_OPCODE:
1918 case GS_PREFIX_OPCODE:
1919 case SS_PREFIX_OPCODE:
1920 q = SEG_PREFIX;
1921 break;
1922
1923 case REPNE_PREFIX_OPCODE:
1924 case REPE_PREFIX_OPCODE:
1925 q = REP_PREFIX;
1926 ret = PREFIX_REP;
1927 break;
1928
1929 case LOCK_PREFIX_OPCODE:
1930 q = LOCK_PREFIX;
1931 ret = PREFIX_LOCK;
1932 break;
1933
1934 case FWAIT_OPCODE:
1935 q = WAIT_PREFIX;
1936 break;
1937
1938 case ADDR_PREFIX_OPCODE:
1939 q = ADDR_PREFIX;
1940 break;
1941
1942 case DATA_PREFIX_OPCODE:
1943 q = DATA_PREFIX;
1944 break;
1945 }
1946 if (i.prefix[q] != 0)
1947 ret = PREFIX_EXIST;
1948 }
1949
1950 if (ret)
1951 {
1952 if (!i.prefix[q])
1953 ++i.prefixes;
1954 i.prefix[q] |= prefix;
1955 }
1956 else
1957 as_bad (_("same type of prefix used twice"));
1958
1959 return ret;
1960}
1961
1962static void
1963update_code_flag (int value, int check)
1964{
1965 PRINTF_LIKE ((*as_error));
1966
1967 flag_code = (enum flag_code) value;
1968 if (flag_code == CODE_64BIT)
1969 {
1970 cpu_arch_flags.bitfield.cpu64 = 1;
1971 cpu_arch_flags.bitfield.cpuno64 = 0;
1972 }
1973 else
1974 {
1975 cpu_arch_flags.bitfield.cpu64 = 0;
1976 cpu_arch_flags.bitfield.cpuno64 = 1;
1977 }
1978 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1979 {
1980 if (check)
1981 as_error = as_fatal;
1982 else
1983 as_error = as_bad;
1984 (*as_error) (_("64bit mode not supported on `%s'."),
1985 cpu_arch_name ? cpu_arch_name : default_arch);
1986 }
1987 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
1988 {
1989 if (check)
1990 as_error = as_fatal;
1991 else
1992 as_error = as_bad;
1993 (*as_error) (_("32bit mode not supported on `%s'."),
1994 cpu_arch_name ? cpu_arch_name : default_arch);
1995 }
1996 stackop_size = '\0';
1997}
1998
1999static void
2000set_code_flag (int value)
2001{
2002 update_code_flag (value, 0);
2003}
2004
2005static void
2006set_16bit_gcc_code_flag (int new_code_flag)
2007{
2008 flag_code = (enum flag_code) new_code_flag;
2009 if (flag_code != CODE_16BIT)
2010 abort ();
2011 cpu_arch_flags.bitfield.cpu64 = 0;
2012 cpu_arch_flags.bitfield.cpuno64 = 1;
2013 stackop_size = LONG_MNEM_SUFFIX;
2014}
2015
2016static void
2017set_intel_syntax (int syntax_flag)
2018{
2019 /* Find out if register prefixing is specified. */
2020 int ask_naked_reg = 0;
2021
2022 SKIP_WHITESPACE ();
2023 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2024 {
2025 char *string = input_line_pointer;
2026 int e = get_symbol_end ();
2027
2028 if (strcmp (string, "prefix") == 0)
2029 ask_naked_reg = 1;
2030 else if (strcmp (string, "noprefix") == 0)
2031 ask_naked_reg = -1;
2032 else
2033 as_bad (_("bad argument to syntax directive."));
2034 *input_line_pointer = e;
2035 }
2036 demand_empty_rest_of_line ();
2037
2038 intel_syntax = syntax_flag;
2039
2040 if (ask_naked_reg == 0)
2041 allow_naked_reg = (intel_syntax
2042 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2043 else
2044 allow_naked_reg = (ask_naked_reg < 0);
2045
2046 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2047
2048 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2049 identifier_chars['$'] = intel_syntax ? '$' : 0;
2050 register_prefix = allow_naked_reg ? "" : "%";
2051}
2052
2053static void
2054set_intel_mnemonic (int mnemonic_flag)
2055{
2056 intel_mnemonic = mnemonic_flag;
2057}
2058
2059static void
2060set_allow_index_reg (int flag)
2061{
2062 allow_index_reg = flag;
2063}
2064
2065static void
2066set_sse_check (int dummy ATTRIBUTE_UNUSED)
2067{
2068 SKIP_WHITESPACE ();
2069
2070 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2071 {
2072 char *string = input_line_pointer;
2073 int e = get_symbol_end ();
2074
2075 if (strcmp (string, "none") == 0)
2076 sse_check = sse_check_none;
2077 else if (strcmp (string, "warning") == 0)
2078 sse_check = sse_check_warning;
2079 else if (strcmp (string, "error") == 0)
2080 sse_check = sse_check_error;
2081 else
2082 as_bad (_("bad argument to sse_check directive."));
2083 *input_line_pointer = e;
2084 }
2085 else
2086 as_bad (_("missing argument for sse_check directive"));
2087
2088 demand_empty_rest_of_line ();
2089}
2090
2091static void
2092check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2093 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2094{
2095#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2096 static const char *arch;
2097
2098 /* Intel LIOM is only supported on ELF. */
2099 if (!IS_ELF)
2100 return;
2101
2102 if (!arch)
2103 {
2104 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2105 use default_arch. */
2106 arch = cpu_arch_name;
2107 if (!arch)
2108 arch = default_arch;
2109 }
2110
2111 /* If we are targeting Intel L1OM, we must enable it. */
2112 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2113 || new_flag.bitfield.cpul1om)
2114 return;
2115
2116 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2117#endif
2118}
2119
2120static void
2121set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2122{
2123 SKIP_WHITESPACE ();
2124
2125 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2126 {
2127 char *string = input_line_pointer;
2128 int e = get_symbol_end ();
2129 unsigned int j;
2130 i386_cpu_flags flags;
2131
2132 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2133 {
2134 if (strcmp (string, cpu_arch[j].name) == 0)
2135 {
2136 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2137
2138 if (*string != '.')
2139 {
2140 cpu_arch_name = cpu_arch[j].name;
2141 cpu_sub_arch_name = NULL;
2142 cpu_arch_flags = cpu_arch[j].flags;
2143 if (flag_code == CODE_64BIT)
2144 {
2145 cpu_arch_flags.bitfield.cpu64 = 1;
2146 cpu_arch_flags.bitfield.cpuno64 = 0;
2147 }
2148 else
2149 {
2150 cpu_arch_flags.bitfield.cpu64 = 0;
2151 cpu_arch_flags.bitfield.cpuno64 = 1;
2152 }
2153 cpu_arch_isa = cpu_arch[j].type;
2154 cpu_arch_isa_flags = cpu_arch[j].flags;
2155 if (!cpu_arch_tune_set)
2156 {
2157 cpu_arch_tune = cpu_arch_isa;
2158 cpu_arch_tune_flags = cpu_arch_isa_flags;
2159 }
2160 break;
2161 }
2162
2163 if (!cpu_arch[j].negated)
2164 flags = cpu_flags_or (cpu_arch_flags,
2165 cpu_arch[j].flags);
2166 else
2167 flags = cpu_flags_and_not (cpu_arch_flags,
2168 cpu_arch[j].flags);
2169 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2170 {
2171 if (cpu_sub_arch_name)
2172 {
2173 char *name = cpu_sub_arch_name;
2174 cpu_sub_arch_name = concat (name,
2175 cpu_arch[j].name,
2176 (const char *) NULL);
2177 free (name);
2178 }
2179 else
2180 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2181 cpu_arch_flags = flags;
2182 cpu_arch_isa_flags = flags;
2183 }
2184 *input_line_pointer = e;
2185 demand_empty_rest_of_line ();
2186 return;
2187 }
2188 }
2189 if (j >= ARRAY_SIZE (cpu_arch))
2190 as_bad (_("no such architecture: `%s'"), string);
2191
2192 *input_line_pointer = e;
2193 }
2194 else
2195 as_bad (_("missing cpu architecture"));
2196
2197 no_cond_jump_promotion = 0;
2198 if (*input_line_pointer == ','
2199 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2200 {
2201 char *string = ++input_line_pointer;
2202 int e = get_symbol_end ();
2203
2204 if (strcmp (string, "nojumps") == 0)
2205 no_cond_jump_promotion = 1;
2206 else if (strcmp (string, "jumps") == 0)
2207 ;
2208 else
2209 as_bad (_("no such architecture modifier: `%s'"), string);
2210
2211 *input_line_pointer = e;
2212 }
2213
2214 demand_empty_rest_of_line ();
2215}
2216
2217enum bfd_architecture
2218i386_arch (void)
2219{
2220 if (cpu_arch_isa == PROCESSOR_L1OM)
2221 {
2222 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2223 || flag_code != CODE_64BIT)
2224 as_fatal (_("Intel L1OM is 64bit ELF only"));
2225 return bfd_arch_l1om;
2226 }
2227 else
2228 return bfd_arch_i386;
2229}
2230
2231unsigned long
2232i386_mach ()
2233{
2234 if (!strncmp (default_arch, "x86_64", 6))
2235 {
2236 if (cpu_arch_isa == PROCESSOR_L1OM)
2237 {
2238 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2239 || default_arch[6] != '\0')
2240 as_fatal (_("Intel L1OM is 64bit ELF only"));
2241 return bfd_mach_l1om;
2242 }
2243 else if (default_arch[6] == '\0')
2244 return bfd_mach_x86_64;
2245 else
2246 return bfd_mach_x64_32;
2247 }
2248 else if (!strcmp (default_arch, "i386"))
2249 return bfd_mach_i386_i386;
2250 else
2251 as_fatal (_("Unknown architecture"));
2252}
2253\f
2254void
2255md_begin ()
2256{
2257 const char *hash_err;
2258
2259 /* Initialize op_hash hash table. */
2260 op_hash = hash_new ();
2261
2262 {
2263 const insn_template *optab;
2264 templates *core_optab;
2265
2266 /* Setup for loop. */
2267 optab = i386_optab;
2268 core_optab = (templates *) xmalloc (sizeof (templates));
2269 core_optab->start = optab;
2270
2271 while (1)
2272 {
2273 ++optab;
2274 if (optab->name == NULL
2275 || strcmp (optab->name, (optab - 1)->name) != 0)
2276 {
2277 /* different name --> ship out current template list;
2278 add to hash table; & begin anew. */
2279 core_optab->end = optab;
2280 hash_err = hash_insert (op_hash,
2281 (optab - 1)->name,
2282 (void *) core_optab);
2283 if (hash_err)
2284 {
2285 as_fatal (_("Internal Error: Can't hash %s: %s"),
2286 (optab - 1)->name,
2287 hash_err);
2288 }
2289 if (optab->name == NULL)
2290 break;
2291 core_optab = (templates *) xmalloc (sizeof (templates));
2292 core_optab->start = optab;
2293 }
2294 }
2295 }
2296
2297 /* Initialize reg_hash hash table. */
2298 reg_hash = hash_new ();
2299 {
2300 const reg_entry *regtab;
2301 unsigned int regtab_size = i386_regtab_size;
2302
2303 for (regtab = i386_regtab; regtab_size--; regtab++)
2304 {
2305 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2306 if (hash_err)
2307 as_fatal (_("Internal Error: Can't hash %s: %s"),
2308 regtab->reg_name,
2309 hash_err);
2310 }
2311 }
2312
2313 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2314 {
2315 int c;
2316 char *p;
2317
2318 for (c = 0; c < 256; c++)
2319 {
2320 if (ISDIGIT (c))
2321 {
2322 digit_chars[c] = c;
2323 mnemonic_chars[c] = c;
2324 register_chars[c] = c;
2325 operand_chars[c] = c;
2326 }
2327 else if (ISLOWER (c))
2328 {
2329 mnemonic_chars[c] = c;
2330 register_chars[c] = c;
2331 operand_chars[c] = c;
2332 }
2333 else if (ISUPPER (c))
2334 {
2335 mnemonic_chars[c] = TOLOWER (c);
2336 register_chars[c] = mnemonic_chars[c];
2337 operand_chars[c] = c;
2338 }
2339
2340 if (ISALPHA (c) || ISDIGIT (c))
2341 identifier_chars[c] = c;
2342 else if (c >= 128)
2343 {
2344 identifier_chars[c] = c;
2345 operand_chars[c] = c;
2346 }
2347 }
2348
2349#ifdef LEX_AT
2350 identifier_chars['@'] = '@';
2351#endif
2352#ifdef LEX_QM
2353 identifier_chars['?'] = '?';
2354 operand_chars['?'] = '?';
2355#endif
2356 digit_chars['-'] = '-';
2357 mnemonic_chars['_'] = '_';
2358 mnemonic_chars['-'] = '-';
2359 mnemonic_chars['.'] = '.';
2360 identifier_chars['_'] = '_';
2361 identifier_chars['.'] = '.';
2362
2363 for (p = operand_special_chars; *p != '\0'; p++)
2364 operand_chars[(unsigned char) *p] = *p;
2365 }
2366
2367#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2368 if (IS_ELF)
2369 {
2370 record_alignment (text_section, 2);
2371 record_alignment (data_section, 2);
2372 record_alignment (bss_section, 2);
2373 }
2374#endif
2375
2376 if (flag_code == CODE_64BIT)
2377 {
2378#if defined (OBJ_COFF) && defined (TE_PE)
2379 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2380 ? 32 : 16);
2381#else
2382 x86_dwarf2_return_column = 16;
2383#endif
2384 x86_cie_data_alignment = -8;
2385 }
2386 else
2387 {
2388 x86_dwarf2_return_column = 8;
2389 x86_cie_data_alignment = -4;
2390 }
2391}
2392
2393void
2394i386_print_statistics (FILE *file)
2395{
2396 hash_print_statistics (file, "i386 opcode", op_hash);
2397 hash_print_statistics (file, "i386 register", reg_hash);
2398}
2399\f
2400#ifdef DEBUG386
2401
2402/* Debugging routines for md_assemble. */
2403static void pte (insn_template *);
2404static void pt (i386_operand_type);
2405static void pe (expressionS *);
2406static void ps (symbolS *);
2407
2408static void
2409pi (char *line, i386_insn *x)
2410{
2411 unsigned int j;
2412
2413 fprintf (stdout, "%s: template ", line);
2414 pte (&x->tm);
2415 fprintf (stdout, " address: base %s index %s scale %x\n",
2416 x->base_reg ? x->base_reg->reg_name : "none",
2417 x->index_reg ? x->index_reg->reg_name : "none",
2418 x->log2_scale_factor);
2419 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2420 x->rm.mode, x->rm.reg, x->rm.regmem);
2421 fprintf (stdout, " sib: base %x index %x scale %x\n",
2422 x->sib.base, x->sib.index, x->sib.scale);
2423 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2424 (x->rex & REX_W) != 0,
2425 (x->rex & REX_R) != 0,
2426 (x->rex & REX_X) != 0,
2427 (x->rex & REX_B) != 0);
2428 for (j = 0; j < x->operands; j++)
2429 {
2430 fprintf (stdout, " #%d: ", j + 1);
2431 pt (x->types[j]);
2432 fprintf (stdout, "\n");
2433 if (x->types[j].bitfield.reg8
2434 || x->types[j].bitfield.reg16
2435 || x->types[j].bitfield.reg32
2436 || x->types[j].bitfield.reg64
2437 || x->types[j].bitfield.regmmx
2438 || x->types[j].bitfield.regxmm
2439 || x->types[j].bitfield.regymm
2440 || x->types[j].bitfield.sreg2
2441 || x->types[j].bitfield.sreg3
2442 || x->types[j].bitfield.control
2443 || x->types[j].bitfield.debug
2444 || x->types[j].bitfield.test)
2445 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2446 if (operand_type_check (x->types[j], imm))
2447 pe (x->op[j].imms);
2448 if (operand_type_check (x->types[j], disp))
2449 pe (x->op[j].disps);
2450 }
2451}
2452
2453static void
2454pte (insn_template *t)
2455{
2456 unsigned int j;
2457 fprintf (stdout, " %d operands ", t->operands);
2458 fprintf (stdout, "opcode %x ", t->base_opcode);
2459 if (t->extension_opcode != None)
2460 fprintf (stdout, "ext %x ", t->extension_opcode);
2461 if (t->opcode_modifier.d)
2462 fprintf (stdout, "D");
2463 if (t->opcode_modifier.w)
2464 fprintf (stdout, "W");
2465 fprintf (stdout, "\n");
2466 for (j = 0; j < t->operands; j++)
2467 {
2468 fprintf (stdout, " #%d type ", j + 1);
2469 pt (t->operand_types[j]);
2470 fprintf (stdout, "\n");
2471 }
2472}
2473
2474static void
2475pe (expressionS *e)
2476{
2477 fprintf (stdout, " operation %d\n", e->X_op);
2478 fprintf (stdout, " add_number %ld (%lx)\n",
2479 (long) e->X_add_number, (long) e->X_add_number);
2480 if (e->X_add_symbol)
2481 {
2482 fprintf (stdout, " add_symbol ");
2483 ps (e->X_add_symbol);
2484 fprintf (stdout, "\n");
2485 }
2486 if (e->X_op_symbol)
2487 {
2488 fprintf (stdout, " op_symbol ");
2489 ps (e->X_op_symbol);
2490 fprintf (stdout, "\n");
2491 }
2492}
2493
2494static void
2495ps (symbolS *s)
2496{
2497 fprintf (stdout, "%s type %s%s",
2498 S_GET_NAME (s),
2499 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2500 segment_name (S_GET_SEGMENT (s)));
2501}
2502
2503static struct type_name
2504 {
2505 i386_operand_type mask;
2506 const char *name;
2507 }
2508const type_names[] =
2509{
2510 { OPERAND_TYPE_REG8, "r8" },
2511 { OPERAND_TYPE_REG16, "r16" },
2512 { OPERAND_TYPE_REG32, "r32" },
2513 { OPERAND_TYPE_REG64, "r64" },
2514 { OPERAND_TYPE_IMM8, "i8" },
2515 { OPERAND_TYPE_IMM8, "i8s" },
2516 { OPERAND_TYPE_IMM16, "i16" },
2517 { OPERAND_TYPE_IMM32, "i32" },
2518 { OPERAND_TYPE_IMM32S, "i32s" },
2519 { OPERAND_TYPE_IMM64, "i64" },
2520 { OPERAND_TYPE_IMM1, "i1" },
2521 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2522 { OPERAND_TYPE_DISP8, "d8" },
2523 { OPERAND_TYPE_DISP16, "d16" },
2524 { OPERAND_TYPE_DISP32, "d32" },
2525 { OPERAND_TYPE_DISP32S, "d32s" },
2526 { OPERAND_TYPE_DISP64, "d64" },
2527 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2528 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2529 { OPERAND_TYPE_CONTROL, "control reg" },
2530 { OPERAND_TYPE_TEST, "test reg" },
2531 { OPERAND_TYPE_DEBUG, "debug reg" },
2532 { OPERAND_TYPE_FLOATREG, "FReg" },
2533 { OPERAND_TYPE_FLOATACC, "FAcc" },
2534 { OPERAND_TYPE_SREG2, "SReg2" },
2535 { OPERAND_TYPE_SREG3, "SReg3" },
2536 { OPERAND_TYPE_ACC, "Acc" },
2537 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2538 { OPERAND_TYPE_REGMMX, "rMMX" },
2539 { OPERAND_TYPE_REGXMM, "rXMM" },
2540 { OPERAND_TYPE_REGYMM, "rYMM" },
2541 { OPERAND_TYPE_ESSEG, "es" },
2542};
2543
2544static void
2545pt (i386_operand_type t)
2546{
2547 unsigned int j;
2548 i386_operand_type a;
2549
2550 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2551 {
2552 a = operand_type_and (t, type_names[j].mask);
2553 if (!operand_type_all_zero (&a))
2554 fprintf (stdout, "%s, ", type_names[j].name);
2555 }
2556 fflush (stdout);
2557}
2558
2559#endif /* DEBUG386 */
2560\f
2561static bfd_reloc_code_real_type
2562reloc (unsigned int size,
2563 int pcrel,
2564 int sign,
2565 bfd_reloc_code_real_type other)
2566{
2567 if (other != NO_RELOC)
2568 {
2569 reloc_howto_type *rel;
2570
2571 if (size == 8)
2572 switch (other)
2573 {
2574 case BFD_RELOC_X86_64_GOT32:
2575 return BFD_RELOC_X86_64_GOT64;
2576 break;
2577 case BFD_RELOC_X86_64_PLTOFF64:
2578 return BFD_RELOC_X86_64_PLTOFF64;
2579 break;
2580 case BFD_RELOC_X86_64_GOTPC32:
2581 other = BFD_RELOC_X86_64_GOTPC64;
2582 break;
2583 case BFD_RELOC_X86_64_GOTPCREL:
2584 other = BFD_RELOC_X86_64_GOTPCREL64;
2585 break;
2586 case BFD_RELOC_X86_64_TPOFF32:
2587 other = BFD_RELOC_X86_64_TPOFF64;
2588 break;
2589 case BFD_RELOC_X86_64_DTPOFF32:
2590 other = BFD_RELOC_X86_64_DTPOFF64;
2591 break;
2592 default:
2593 break;
2594 }
2595
2596 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2597 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2598 sign = -1;
2599
2600 rel = bfd_reloc_type_lookup (stdoutput, other);
2601 if (!rel)
2602 as_bad (_("unknown relocation (%u)"), other);
2603 else if (size != bfd_get_reloc_size (rel))
2604 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2605 bfd_get_reloc_size (rel),
2606 size);
2607 else if (pcrel && !rel->pc_relative)
2608 as_bad (_("non-pc-relative relocation for pc-relative field"));
2609 else if ((rel->complain_on_overflow == complain_overflow_signed
2610 && !sign)
2611 || (rel->complain_on_overflow == complain_overflow_unsigned
2612 && sign > 0))
2613 as_bad (_("relocated field and relocation type differ in signedness"));
2614 else
2615 return other;
2616 return NO_RELOC;
2617 }
2618
2619 if (pcrel)
2620 {
2621 if (!sign)
2622 as_bad (_("there are no unsigned pc-relative relocations"));
2623 switch (size)
2624 {
2625 case 1: return BFD_RELOC_8_PCREL;
2626 case 2: return BFD_RELOC_16_PCREL;
2627 case 4: return BFD_RELOC_32_PCREL;
2628 case 8: return BFD_RELOC_64_PCREL;
2629 }
2630 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2631 }
2632 else
2633 {
2634 if (sign > 0)
2635 switch (size)
2636 {
2637 case 4: return BFD_RELOC_X86_64_32S;
2638 }
2639 else
2640 switch (size)
2641 {
2642 case 1: return BFD_RELOC_8;
2643 case 2: return BFD_RELOC_16;
2644 case 4: return BFD_RELOC_32;
2645 case 8: return BFD_RELOC_64;
2646 }
2647 as_bad (_("cannot do %s %u byte relocation"),
2648 sign > 0 ? "signed" : "unsigned", size);
2649 }
2650
2651 return NO_RELOC;
2652}
2653
2654/* Here we decide which fixups can be adjusted to make them relative to
2655 the beginning of the section instead of the symbol. Basically we need
2656 to make sure that the dynamic relocations are done correctly, so in
2657 some cases we force the original symbol to be used. */
2658
2659int
2660tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2661{
2662#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2663 if (!IS_ELF)
2664 return 1;
2665
2666 /* Don't adjust pc-relative references to merge sections in 64-bit
2667 mode. */
2668 if (use_rela_relocations
2669 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2670 && fixP->fx_pcrel)
2671 return 0;
2672
2673 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2674 and changed later by validate_fix. */
2675 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2676 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2677 return 0;
2678
2679 /* adjust_reloc_syms doesn't know about the GOT. */
2680 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2681 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2682 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2683 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2684 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2685 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2686 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2687 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2688 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2689 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2690 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2691 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2692 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2693 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2694 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2695 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2696 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2697 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2698 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2699 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2700 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2701 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2702 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2703 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2704 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2705 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2706 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2707 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2708 return 0;
2709#endif
2710 return 1;
2711}
2712
2713static int
2714intel_float_operand (const char *mnemonic)
2715{
2716 /* Note that the value returned is meaningful only for opcodes with (memory)
2717 operands, hence the code here is free to improperly handle opcodes that
2718 have no operands (for better performance and smaller code). */
2719
2720 if (mnemonic[0] != 'f')
2721 return 0; /* non-math */
2722
2723 switch (mnemonic[1])
2724 {
2725 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2726 the fs segment override prefix not currently handled because no
2727 call path can make opcodes without operands get here */
2728 case 'i':
2729 return 2 /* integer op */;
2730 case 'l':
2731 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2732 return 3; /* fldcw/fldenv */
2733 break;
2734 case 'n':
2735 if (mnemonic[2] != 'o' /* fnop */)
2736 return 3; /* non-waiting control op */
2737 break;
2738 case 'r':
2739 if (mnemonic[2] == 's')
2740 return 3; /* frstor/frstpm */
2741 break;
2742 case 's':
2743 if (mnemonic[2] == 'a')
2744 return 3; /* fsave */
2745 if (mnemonic[2] == 't')
2746 {
2747 switch (mnemonic[3])
2748 {
2749 case 'c': /* fstcw */
2750 case 'd': /* fstdw */
2751 case 'e': /* fstenv */
2752 case 's': /* fsts[gw] */
2753 return 3;
2754 }
2755 }
2756 break;
2757 case 'x':
2758 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2759 return 0; /* fxsave/fxrstor are not really math ops */
2760 break;
2761 }
2762
2763 return 1;
2764}
2765
2766/* Build the VEX prefix. */
2767
2768static void
2769build_vex_prefix (const insn_template *t)
2770{
2771 unsigned int register_specifier;
2772 unsigned int implied_prefix;
2773 unsigned int vector_length;
2774
2775 /* Check register specifier. */
2776 if (i.vex.register_specifier)
2777 {
2778 register_specifier = i.vex.register_specifier->reg_num;
2779 if ((i.vex.register_specifier->reg_flags & RegRex))
2780 register_specifier += 8;
2781 register_specifier = ~register_specifier & 0xf;
2782 }
2783 else
2784 register_specifier = 0xf;
2785
2786 /* Use 2-byte VEX prefix by swappping destination and source
2787 operand. */
2788 if (!i.swap_operand
2789 && i.operands == i.reg_operands
2790 && i.tm.opcode_modifier.vexopcode == VEX0F
2791 && i.tm.opcode_modifier.s
2792 && i.rex == REX_B)
2793 {
2794 unsigned int xchg = i.operands - 1;
2795 union i386_op temp_op;
2796 i386_operand_type temp_type;
2797
2798 temp_type = i.types[xchg];
2799 i.types[xchg] = i.types[0];
2800 i.types[0] = temp_type;
2801 temp_op = i.op[xchg];
2802 i.op[xchg] = i.op[0];
2803 i.op[0] = temp_op;
2804
2805 gas_assert (i.rm.mode == 3);
2806
2807 i.rex = REX_R;
2808 xchg = i.rm.regmem;
2809 i.rm.regmem = i.rm.reg;
2810 i.rm.reg = xchg;
2811
2812 /* Use the next insn. */
2813 i.tm = t[1];
2814 }
2815
2816 if (i.tm.opcode_modifier.vex == VEXScalar)
2817 vector_length = avxscalar;
2818 else
2819 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2820
2821 switch ((i.tm.base_opcode >> 8) & 0xff)
2822 {
2823 case 0:
2824 implied_prefix = 0;
2825 break;
2826 case DATA_PREFIX_OPCODE:
2827 implied_prefix = 1;
2828 break;
2829 case REPE_PREFIX_OPCODE:
2830 implied_prefix = 2;
2831 break;
2832 case REPNE_PREFIX_OPCODE:
2833 implied_prefix = 3;
2834 break;
2835 default:
2836 abort ();
2837 }
2838
2839 /* Use 2-byte VEX prefix if possible. */
2840 if (i.tm.opcode_modifier.vexopcode == VEX0F
2841 && i.tm.opcode_modifier.vexw != VEXW1
2842 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2843 {
2844 /* 2-byte VEX prefix. */
2845 unsigned int r;
2846
2847 i.vex.length = 2;
2848 i.vex.bytes[0] = 0xc5;
2849
2850 /* Check the REX.R bit. */
2851 r = (i.rex & REX_R) ? 0 : 1;
2852 i.vex.bytes[1] = (r << 7
2853 | register_specifier << 3
2854 | vector_length << 2
2855 | implied_prefix);
2856 }
2857 else
2858 {
2859 /* 3-byte VEX prefix. */
2860 unsigned int m, w;
2861
2862 i.vex.length = 3;
2863
2864 switch (i.tm.opcode_modifier.vexopcode)
2865 {
2866 case VEX0F:
2867 m = 0x1;
2868 i.vex.bytes[0] = 0xc4;
2869 break;
2870 case VEX0F38:
2871 m = 0x2;
2872 i.vex.bytes[0] = 0xc4;
2873 break;
2874 case VEX0F3A:
2875 m = 0x3;
2876 i.vex.bytes[0] = 0xc4;
2877 break;
2878 case XOP08:
2879 m = 0x8;
2880 i.vex.bytes[0] = 0x8f;
2881 break;
2882 case XOP09:
2883 m = 0x9;
2884 i.vex.bytes[0] = 0x8f;
2885 break;
2886 case XOP0A:
2887 m = 0xa;
2888 i.vex.bytes[0] = 0x8f;
2889 break;
2890 default:
2891 abort ();
2892 }
2893
2894 /* The high 3 bits of the second VEX byte are 1's compliment
2895 of RXB bits from REX. */
2896 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2897
2898 /* Check the REX.W bit. */
2899 w = (i.rex & REX_W) ? 1 : 0;
2900 if (i.tm.opcode_modifier.vexw)
2901 {
2902 if (w)
2903 abort ();
2904
2905 if (i.tm.opcode_modifier.vexw == VEXW1)
2906 w = 1;
2907 }
2908
2909 i.vex.bytes[2] = (w << 7
2910 | register_specifier << 3
2911 | vector_length << 2
2912 | implied_prefix);
2913 }
2914}
2915
2916static void
2917process_immext (void)
2918{
2919 expressionS *exp;
2920
2921 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2922 {
2923 /* SSE3 Instructions have the fixed operands with an opcode
2924 suffix which is coded in the same place as an 8-bit immediate
2925 field would be. Here we check those operands and remove them
2926 afterwards. */
2927 unsigned int x;
2928
2929 for (x = 0; x < i.operands; x++)
2930 if (i.op[x].regs->reg_num != x)
2931 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2932 register_prefix, i.op[x].regs->reg_name, x + 1,
2933 i.tm.name);
2934
2935 i.operands = 0;
2936 }
2937
2938 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2939 which is coded in the same place as an 8-bit immediate field
2940 would be. Here we fake an 8-bit immediate operand from the
2941 opcode suffix stored in tm.extension_opcode.
2942
2943 AVX instructions also use this encoding, for some of
2944 3 argument instructions. */
2945
2946 gas_assert (i.imm_operands == 0
2947 && (i.operands <= 2
2948 || (i.tm.opcode_modifier.vex
2949 && i.operands <= 4)));
2950
2951 exp = &im_expressions[i.imm_operands++];
2952 i.op[i.operands].imms = exp;
2953 i.types[i.operands] = imm8;
2954 i.operands++;
2955 exp->X_op = O_constant;
2956 exp->X_add_number = i.tm.extension_opcode;
2957 i.tm.extension_opcode = None;
2958}
2959
2960/* This is the guts of the machine-dependent assembler. LINE points to a
2961 machine dependent instruction. This function is supposed to emit
2962 the frags/bytes it assembles to. */
2963
2964void
2965md_assemble (char *line)
2966{
2967 unsigned int j;
2968 char mnemonic[MAX_MNEM_SIZE];
2969 const insn_template *t;
2970
2971 /* Initialize globals. */
2972 memset (&i, '\0', sizeof (i));
2973 for (j = 0; j < MAX_OPERANDS; j++)
2974 i.reloc[j] = NO_RELOC;
2975 memset (disp_expressions, '\0', sizeof (disp_expressions));
2976 memset (im_expressions, '\0', sizeof (im_expressions));
2977 save_stack_p = save_stack;
2978
2979 /* First parse an instruction mnemonic & call i386_operand for the operands.
2980 We assume that the scrubber has arranged it so that line[0] is the valid
2981 start of a (possibly prefixed) mnemonic. */
2982
2983 line = parse_insn (line, mnemonic);
2984 if (line == NULL)
2985 return;
2986
2987 line = parse_operands (line, mnemonic);
2988 this_operand = -1;
2989 if (line == NULL)
2990 return;
2991
2992 /* Now we've parsed the mnemonic into a set of templates, and have the
2993 operands at hand. */
2994
2995 /* All intel opcodes have reversed operands except for "bound" and
2996 "enter". We also don't reverse intersegment "jmp" and "call"
2997 instructions with 2 immediate operands so that the immediate segment
2998 precedes the offset, as it does when in AT&T mode. */
2999 if (intel_syntax
3000 && i.operands > 1
3001 && (strcmp (mnemonic, "bound") != 0)
3002 && (strcmp (mnemonic, "invlpga") != 0)
3003 && !(operand_type_check (i.types[0], imm)
3004 && operand_type_check (i.types[1], imm)))
3005 swap_operands ();
3006
3007 /* The order of the immediates should be reversed
3008 for 2 immediates extrq and insertq instructions */
3009 if (i.imm_operands == 2
3010 && (strcmp (mnemonic, "extrq") == 0
3011 || strcmp (mnemonic, "insertq") == 0))
3012 swap_2_operands (0, 1);
3013
3014 if (i.imm_operands)
3015 optimize_imm ();
3016
3017 /* Don't optimize displacement for movabs since it only takes 64bit
3018 displacement. */
3019 if (i.disp_operands
3020 && !i.disp32_encoding
3021 && (flag_code != CODE_64BIT
3022 || strcmp (mnemonic, "movabs") != 0))
3023 optimize_disp ();
3024
3025 /* Next, we find a template that matches the given insn,
3026 making sure the overlap of the given operands types is consistent
3027 with the template operand types. */
3028
3029 if (!(t = match_template ()))
3030 return;
3031
3032 if (sse_check != sse_check_none
3033 && !i.tm.opcode_modifier.noavx
3034 && (i.tm.cpu_flags.bitfield.cpusse
3035 || i.tm.cpu_flags.bitfield.cpusse2
3036 || i.tm.cpu_flags.bitfield.cpusse3
3037 || i.tm.cpu_flags.bitfield.cpussse3
3038 || i.tm.cpu_flags.bitfield.cpusse4_1
3039 || i.tm.cpu_flags.bitfield.cpusse4_2))
3040 {
3041 (sse_check == sse_check_warning
3042 ? as_warn
3043 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3044 }
3045
3046 /* Zap movzx and movsx suffix. The suffix has been set from
3047 "word ptr" or "byte ptr" on the source operand in Intel syntax
3048 or extracted from mnemonic in AT&T syntax. But we'll use
3049 the destination register to choose the suffix for encoding. */
3050 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3051 {
3052 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3053 there is no suffix, the default will be byte extension. */
3054 if (i.reg_operands != 2
3055 && !i.suffix
3056 && intel_syntax)
3057 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3058
3059 i.suffix = 0;
3060 }
3061
3062 if (i.tm.opcode_modifier.fwait)
3063 if (!add_prefix (FWAIT_OPCODE))
3064 return;
3065
3066 /* Check for lock without a lockable instruction. Destination operand
3067 must be memory unless it is xchg (0x86). */
3068 if (i.prefix[LOCK_PREFIX]
3069 && (!i.tm.opcode_modifier.islockable
3070 || i.mem_operands == 0
3071 || (i.tm.base_opcode != 0x86
3072 && !operand_type_check (i.types[i.operands - 1], anymem))))
3073 {
3074 as_bad (_("expecting lockable instruction after `lock'"));
3075 return;
3076 }
3077
3078 /* Check string instruction segment overrides. */
3079 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3080 {
3081 if (!check_string ())
3082 return;
3083 i.disp_operands = 0;
3084 }
3085
3086 if (!process_suffix ())
3087 return;
3088
3089 /* Update operand types. */
3090 for (j = 0; j < i.operands; j++)
3091 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3092
3093 /* Make still unresolved immediate matches conform to size of immediate
3094 given in i.suffix. */
3095 if (!finalize_imm ())
3096 return;
3097
3098 if (i.types[0].bitfield.imm1)
3099 i.imm_operands = 0; /* kludge for shift insns. */
3100
3101 /* We only need to check those implicit registers for instructions
3102 with 3 operands or less. */
3103 if (i.operands <= 3)
3104 for (j = 0; j < i.operands; j++)
3105 if (i.types[j].bitfield.inoutportreg
3106 || i.types[j].bitfield.shiftcount
3107 || i.types[j].bitfield.acc
3108 || i.types[j].bitfield.floatacc)
3109 i.reg_operands--;
3110
3111 /* ImmExt should be processed after SSE2AVX. */
3112 if (!i.tm.opcode_modifier.sse2avx
3113 && i.tm.opcode_modifier.immext)
3114 process_immext ();
3115
3116 /* For insns with operands there are more diddles to do to the opcode. */
3117 if (i.operands)
3118 {
3119 if (!process_operands ())
3120 return;
3121 }
3122 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3123 {
3124 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3125 as_warn (_("translating to `%sp'"), i.tm.name);
3126 }
3127
3128 if (i.tm.opcode_modifier.vex)
3129 build_vex_prefix (t);
3130
3131 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3132 instructions may define INT_OPCODE as well, so avoid this corner
3133 case for those instructions that use MODRM. */
3134 if (i.tm.base_opcode == INT_OPCODE
3135 && !i.tm.opcode_modifier.modrm
3136 && i.op[0].imms->X_add_number == 3)
3137 {
3138 i.tm.base_opcode = INT3_OPCODE;
3139 i.imm_operands = 0;
3140 }
3141
3142 if ((i.tm.opcode_modifier.jump
3143 || i.tm.opcode_modifier.jumpbyte
3144 || i.tm.opcode_modifier.jumpdword)
3145 && i.op[0].disps->X_op == O_constant)
3146 {
3147 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3148 the absolute address given by the constant. Since ix86 jumps and
3149 calls are pc relative, we need to generate a reloc. */
3150 i.op[0].disps->X_add_symbol = &abs_symbol;
3151 i.op[0].disps->X_op = O_symbol;
3152 }
3153
3154 if (i.tm.opcode_modifier.rex64)
3155 i.rex |= REX_W;
3156
3157 /* For 8 bit registers we need an empty rex prefix. Also if the
3158 instruction already has a prefix, we need to convert old
3159 registers to new ones. */
3160
3161 if ((i.types[0].bitfield.reg8
3162 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3163 || (i.types[1].bitfield.reg8
3164 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3165 || ((i.types[0].bitfield.reg8
3166 || i.types[1].bitfield.reg8)
3167 && i.rex != 0))
3168 {
3169 int x;
3170
3171 i.rex |= REX_OPCODE;
3172 for (x = 0; x < 2; x++)
3173 {
3174 /* Look for 8 bit operand that uses old registers. */
3175 if (i.types[x].bitfield.reg8
3176 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3177 {
3178 /* In case it is "hi" register, give up. */
3179 if (i.op[x].regs->reg_num > 3)
3180 as_bad (_("can't encode register '%s%s' in an "
3181 "instruction requiring REX prefix."),
3182 register_prefix, i.op[x].regs->reg_name);
3183
3184 /* Otherwise it is equivalent to the extended register.
3185 Since the encoding doesn't change this is merely
3186 cosmetic cleanup for debug output. */
3187
3188 i.op[x].regs = i.op[x].regs + 8;
3189 }
3190 }
3191 }
3192
3193 if (i.rex != 0)
3194 add_prefix (REX_OPCODE | i.rex);
3195
3196 /* We are ready to output the insn. */
3197 output_insn ();
3198}
3199
3200static char *
3201parse_insn (char *line, char *mnemonic)
3202{
3203 char *l = line;
3204 char *token_start = l;
3205 char *mnem_p;
3206 int supported;
3207 const insn_template *t;
3208 char *dot_p = NULL;
3209
3210 /* Non-zero if we found a prefix only acceptable with string insns. */
3211 const char *expecting_string_instruction = NULL;
3212
3213 while (1)
3214 {
3215 mnem_p = mnemonic;
3216 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3217 {
3218 if (*mnem_p == '.')
3219 dot_p = mnem_p;
3220 mnem_p++;
3221 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3222 {
3223 as_bad (_("no such instruction: `%s'"), token_start);
3224 return NULL;
3225 }
3226 l++;
3227 }
3228 if (!is_space_char (*l)
3229 && *l != END_OF_INSN
3230 && (intel_syntax
3231 || (*l != PREFIX_SEPARATOR
3232 && *l != ',')))
3233 {
3234 as_bad (_("invalid character %s in mnemonic"),
3235 output_invalid (*l));
3236 return NULL;
3237 }
3238 if (token_start == l)
3239 {
3240 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3241 as_bad (_("expecting prefix; got nothing"));
3242 else
3243 as_bad (_("expecting mnemonic; got nothing"));
3244 return NULL;
3245 }
3246
3247 /* Look up instruction (or prefix) via hash table. */
3248 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3249
3250 if (*l != END_OF_INSN
3251 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3252 && current_templates
3253 && current_templates->start->opcode_modifier.isprefix)
3254 {
3255 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3256 {
3257 as_bad ((flag_code != CODE_64BIT
3258 ? _("`%s' is only supported in 64-bit mode")
3259 : _("`%s' is not supported in 64-bit mode")),
3260 current_templates->start->name);
3261 return NULL;
3262 }
3263 /* If we are in 16-bit mode, do not allow addr16 or data16.
3264 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3265 if ((current_templates->start->opcode_modifier.size16
3266 || current_templates->start->opcode_modifier.size32)
3267 && flag_code != CODE_64BIT
3268 && (current_templates->start->opcode_modifier.size32
3269 ^ (flag_code == CODE_16BIT)))
3270 {
3271 as_bad (_("redundant %s prefix"),
3272 current_templates->start->name);
3273 return NULL;
3274 }
3275 /* Add prefix, checking for repeated prefixes. */
3276 switch (add_prefix (current_templates->start->base_opcode))
3277 {
3278 case PREFIX_EXIST:
3279 return NULL;
3280 case PREFIX_REP:
3281 expecting_string_instruction = current_templates->start->name;
3282 break;
3283 default:
3284 break;
3285 }
3286 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3287 token_start = ++l;
3288 }
3289 else
3290 break;
3291 }
3292
3293 if (!current_templates)
3294 {
3295 /* Check if we should swap operand or force 32bit displacement in
3296 encoding. */
3297 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3298 i.swap_operand = 1;
3299 else if (mnem_p - 4 == dot_p
3300 && dot_p[1] == 'd'
3301 && dot_p[2] == '3'
3302 && dot_p[3] == '2')
3303 i.disp32_encoding = 1;
3304 else
3305 goto check_suffix;
3306 mnem_p = dot_p;
3307 *dot_p = '\0';
3308 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3309 }
3310
3311 if (!current_templates)
3312 {
3313check_suffix:
3314 /* See if we can get a match by trimming off a suffix. */
3315 switch (mnem_p[-1])
3316 {
3317 case WORD_MNEM_SUFFIX:
3318 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3319 i.suffix = SHORT_MNEM_SUFFIX;
3320 else
3321 case BYTE_MNEM_SUFFIX:
3322 case QWORD_MNEM_SUFFIX:
3323 i.suffix = mnem_p[-1];
3324 mnem_p[-1] = '\0';
3325 current_templates = (const templates *) hash_find (op_hash,
3326 mnemonic);
3327 break;
3328 case SHORT_MNEM_SUFFIX:
3329 case LONG_MNEM_SUFFIX:
3330 if (!intel_syntax)
3331 {
3332 i.suffix = mnem_p[-1];
3333 mnem_p[-1] = '\0';
3334 current_templates = (const templates *) hash_find (op_hash,
3335 mnemonic);
3336 }
3337 break;
3338
3339 /* Intel Syntax. */
3340 case 'd':
3341 if (intel_syntax)
3342 {
3343 if (intel_float_operand (mnemonic) == 1)
3344 i.suffix = SHORT_MNEM_SUFFIX;
3345 else
3346 i.suffix = LONG_MNEM_SUFFIX;
3347 mnem_p[-1] = '\0';
3348 current_templates = (const templates *) hash_find (op_hash,
3349 mnemonic);
3350 }
3351 break;
3352 }
3353 if (!current_templates)
3354 {
3355 as_bad (_("no such instruction: `%s'"), token_start);
3356 return NULL;
3357 }
3358 }
3359
3360 if (current_templates->start->opcode_modifier.jump
3361 || current_templates->start->opcode_modifier.jumpbyte)
3362 {
3363 /* Check for a branch hint. We allow ",pt" and ",pn" for
3364 predict taken and predict not taken respectively.
3365 I'm not sure that branch hints actually do anything on loop
3366 and jcxz insns (JumpByte) for current Pentium4 chips. They
3367 may work in the future and it doesn't hurt to accept them
3368 now. */
3369 if (l[0] == ',' && l[1] == 'p')
3370 {
3371 if (l[2] == 't')
3372 {
3373 if (!add_prefix (DS_PREFIX_OPCODE))
3374 return NULL;
3375 l += 3;
3376 }
3377 else if (l[2] == 'n')
3378 {
3379 if (!add_prefix (CS_PREFIX_OPCODE))
3380 return NULL;
3381 l += 3;
3382 }
3383 }
3384 }
3385 /* Any other comma loses. */
3386 if (*l == ',')
3387 {
3388 as_bad (_("invalid character %s in mnemonic"),
3389 output_invalid (*l));
3390 return NULL;
3391 }
3392
3393 /* Check if instruction is supported on specified architecture. */
3394 supported = 0;
3395 for (t = current_templates->start; t < current_templates->end; ++t)
3396 {
3397 supported |= cpu_flags_match (t);
3398 if (supported == CPU_FLAGS_PERFECT_MATCH)
3399 goto skip;
3400 }
3401
3402 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3403 {
3404 as_bad (flag_code == CODE_64BIT
3405 ? _("`%s' is not supported in 64-bit mode")
3406 : _("`%s' is only supported in 64-bit mode"),
3407 current_templates->start->name);
3408 return NULL;
3409 }
3410 if (supported != CPU_FLAGS_PERFECT_MATCH)
3411 {
3412 as_bad (_("`%s' is not supported on `%s%s'"),
3413 current_templates->start->name,
3414 cpu_arch_name ? cpu_arch_name : default_arch,
3415 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3416 return NULL;
3417 }
3418
3419skip:
3420 if (!cpu_arch_flags.bitfield.cpui386
3421 && (flag_code != CODE_16BIT))
3422 {
3423 as_warn (_("use .code16 to ensure correct addressing mode"));
3424 }
3425
3426 /* Check for rep/repne without a string instruction. */
3427 if (expecting_string_instruction)
3428 {
3429 static templates override;
3430
3431 for (t = current_templates->start; t < current_templates->end; ++t)
3432 if (t->opcode_modifier.isstring)
3433 break;
3434 if (t >= current_templates->end)
3435 {
3436 as_bad (_("expecting string instruction after `%s'"),
3437 expecting_string_instruction);
3438 return NULL;
3439 }
3440 for (override.start = t; t < current_templates->end; ++t)
3441 if (!t->opcode_modifier.isstring)
3442 break;
3443 override.end = t;
3444 current_templates = &override;
3445 }
3446
3447 return l;
3448}
3449
3450static char *
3451parse_operands (char *l, const char *mnemonic)
3452{
3453 char *token_start;
3454
3455 /* 1 if operand is pending after ','. */
3456 unsigned int expecting_operand = 0;
3457
3458 /* Non-zero if operand parens not balanced. */
3459 unsigned int paren_not_balanced;
3460
3461 while (*l != END_OF_INSN)
3462 {
3463 /* Skip optional white space before operand. */
3464 if (is_space_char (*l))
3465 ++l;
3466 if (!is_operand_char (*l) && *l != END_OF_INSN)
3467 {
3468 as_bad (_("invalid character %s before operand %d"),
3469 output_invalid (*l),
3470 i.operands + 1);
3471 return NULL;
3472 }
3473 token_start = l; /* after white space */
3474 paren_not_balanced = 0;
3475 while (paren_not_balanced || *l != ',')
3476 {
3477 if (*l == END_OF_INSN)
3478 {
3479 if (paren_not_balanced)
3480 {
3481 if (!intel_syntax)
3482 as_bad (_("unbalanced parenthesis in operand %d."),
3483 i.operands + 1);
3484 else
3485 as_bad (_("unbalanced brackets in operand %d."),
3486 i.operands + 1);
3487 return NULL;
3488 }
3489 else
3490 break; /* we are done */
3491 }
3492 else if (!is_operand_char (*l) && !is_space_char (*l))
3493 {
3494 as_bad (_("invalid character %s in operand %d"),
3495 output_invalid (*l),
3496 i.operands + 1);
3497 return NULL;
3498 }
3499 if (!intel_syntax)
3500 {
3501 if (*l == '(')
3502 ++paren_not_balanced;
3503 if (*l == ')')
3504 --paren_not_balanced;
3505 }
3506 else
3507 {
3508 if (*l == '[')
3509 ++paren_not_balanced;
3510 if (*l == ']')
3511 --paren_not_balanced;
3512 }
3513 l++;
3514 }
3515 if (l != token_start)
3516 { /* Yes, we've read in another operand. */
3517 unsigned int operand_ok;
3518 this_operand = i.operands++;
3519 i.types[this_operand].bitfield.unspecified = 1;
3520 if (i.operands > MAX_OPERANDS)
3521 {
3522 as_bad (_("spurious operands; (%d operands/instruction max)"),
3523 MAX_OPERANDS);
3524 return NULL;
3525 }
3526 /* Now parse operand adding info to 'i' as we go along. */
3527 END_STRING_AND_SAVE (l);
3528
3529 if (intel_syntax)
3530 operand_ok =
3531 i386_intel_operand (token_start,
3532 intel_float_operand (mnemonic));
3533 else
3534 operand_ok = i386_att_operand (token_start);
3535
3536 RESTORE_END_STRING (l);
3537 if (!operand_ok)
3538 return NULL;
3539 }
3540 else
3541 {
3542 if (expecting_operand)
3543 {
3544 expecting_operand_after_comma:
3545 as_bad (_("expecting operand after ','; got nothing"));
3546 return NULL;
3547 }
3548 if (*l == ',')
3549 {
3550 as_bad (_("expecting operand before ','; got nothing"));
3551 return NULL;
3552 }
3553 }
3554
3555 /* Now *l must be either ',' or END_OF_INSN. */
3556 if (*l == ',')
3557 {
3558 if (*++l == END_OF_INSN)
3559 {
3560 /* Just skip it, if it's \n complain. */
3561 goto expecting_operand_after_comma;
3562 }
3563 expecting_operand = 1;
3564 }
3565 }
3566 return l;
3567}
3568
3569static void
3570swap_2_operands (int xchg1, int xchg2)
3571{
3572 union i386_op temp_op;
3573 i386_operand_type temp_type;
3574 enum bfd_reloc_code_real temp_reloc;
3575
3576 temp_type = i.types[xchg2];
3577 i.types[xchg2] = i.types[xchg1];
3578 i.types[xchg1] = temp_type;
3579 temp_op = i.op[xchg2];
3580 i.op[xchg2] = i.op[xchg1];
3581 i.op[xchg1] = temp_op;
3582 temp_reloc = i.reloc[xchg2];
3583 i.reloc[xchg2] = i.reloc[xchg1];
3584 i.reloc[xchg1] = temp_reloc;
3585}
3586
3587static void
3588swap_operands (void)
3589{
3590 switch (i.operands)
3591 {
3592 case 5:
3593 case 4:
3594 swap_2_operands (1, i.operands - 2);
3595 case 3:
3596 case 2:
3597 swap_2_operands (0, i.operands - 1);
3598 break;
3599 default:
3600 abort ();
3601 }
3602
3603 if (i.mem_operands == 2)
3604 {
3605 const seg_entry *temp_seg;
3606 temp_seg = i.seg[0];
3607 i.seg[0] = i.seg[1];
3608 i.seg[1] = temp_seg;
3609 }
3610}
3611
3612/* Try to ensure constant immediates are represented in the smallest
3613 opcode possible. */
3614static void
3615optimize_imm (void)
3616{
3617 char guess_suffix = 0;
3618 int op;
3619
3620 if (i.suffix)
3621 guess_suffix = i.suffix;
3622 else if (i.reg_operands)
3623 {
3624 /* Figure out a suffix from the last register operand specified.
3625 We can't do this properly yet, ie. excluding InOutPortReg,
3626 but the following works for instructions with immediates.
3627 In any case, we can't set i.suffix yet. */
3628 for (op = i.operands; --op >= 0;)
3629 if (i.types[op].bitfield.reg8)
3630 {
3631 guess_suffix = BYTE_MNEM_SUFFIX;
3632 break;
3633 }
3634 else if (i.types[op].bitfield.reg16)
3635 {
3636 guess_suffix = WORD_MNEM_SUFFIX;
3637 break;
3638 }
3639 else if (i.types[op].bitfield.reg32)
3640 {
3641 guess_suffix = LONG_MNEM_SUFFIX;
3642 break;
3643 }
3644 else if (i.types[op].bitfield.reg64)
3645 {
3646 guess_suffix = QWORD_MNEM_SUFFIX;
3647 break;
3648 }
3649 }
3650 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3651 guess_suffix = WORD_MNEM_SUFFIX;
3652
3653 for (op = i.operands; --op >= 0;)
3654 if (operand_type_check (i.types[op], imm))
3655 {
3656 switch (i.op[op].imms->X_op)
3657 {
3658 case O_constant:
3659 /* If a suffix is given, this operand may be shortened. */
3660 switch (guess_suffix)
3661 {
3662 case LONG_MNEM_SUFFIX:
3663 i.types[op].bitfield.imm32 = 1;
3664 i.types[op].bitfield.imm64 = 1;
3665 break;
3666 case WORD_MNEM_SUFFIX:
3667 i.types[op].bitfield.imm16 = 1;
3668 i.types[op].bitfield.imm32 = 1;
3669 i.types[op].bitfield.imm32s = 1;
3670 i.types[op].bitfield.imm64 = 1;
3671 break;
3672 case BYTE_MNEM_SUFFIX:
3673 i.types[op].bitfield.imm8 = 1;
3674 i.types[op].bitfield.imm8s = 1;
3675 i.types[op].bitfield.imm16 = 1;
3676 i.types[op].bitfield.imm32 = 1;
3677 i.types[op].bitfield.imm32s = 1;
3678 i.types[op].bitfield.imm64 = 1;
3679 break;
3680 }
3681
3682 /* If this operand is at most 16 bits, convert it
3683 to a signed 16 bit number before trying to see
3684 whether it will fit in an even smaller size.
3685 This allows a 16-bit operand such as $0xffe0 to
3686 be recognised as within Imm8S range. */
3687 if ((i.types[op].bitfield.imm16)
3688 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3689 {
3690 i.op[op].imms->X_add_number =
3691 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3692 }
3693 if ((i.types[op].bitfield.imm32)
3694 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3695 == 0))
3696 {
3697 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3698 ^ ((offsetT) 1 << 31))
3699 - ((offsetT) 1 << 31));
3700 }
3701 i.types[op]
3702 = operand_type_or (i.types[op],
3703 smallest_imm_type (i.op[op].imms->X_add_number));
3704
3705 /* We must avoid matching of Imm32 templates when 64bit
3706 only immediate is available. */
3707 if (guess_suffix == QWORD_MNEM_SUFFIX)
3708 i.types[op].bitfield.imm32 = 0;
3709 break;
3710
3711 case O_absent:
3712 case O_register:
3713 abort ();
3714
3715 /* Symbols and expressions. */
3716 default:
3717 /* Convert symbolic operand to proper sizes for matching, but don't
3718 prevent matching a set of insns that only supports sizes other
3719 than those matching the insn suffix. */
3720 {
3721 i386_operand_type mask, allowed;
3722 const insn_template *t;
3723
3724 operand_type_set (&mask, 0);
3725 operand_type_set (&allowed, 0);
3726
3727 for (t = current_templates->start;
3728 t < current_templates->end;
3729 ++t)
3730 allowed = operand_type_or (allowed,
3731 t->operand_types[op]);
3732 switch (guess_suffix)
3733 {
3734 case QWORD_MNEM_SUFFIX:
3735 mask.bitfield.imm64 = 1;
3736 mask.bitfield.imm32s = 1;
3737 break;
3738 case LONG_MNEM_SUFFIX:
3739 mask.bitfield.imm32 = 1;
3740 break;
3741 case WORD_MNEM_SUFFIX:
3742 mask.bitfield.imm16 = 1;
3743 break;
3744 case BYTE_MNEM_SUFFIX:
3745 mask.bitfield.imm8 = 1;
3746 break;
3747 default:
3748 break;
3749 }
3750 allowed = operand_type_and (mask, allowed);
3751 if (!operand_type_all_zero (&allowed))
3752 i.types[op] = operand_type_and (i.types[op], mask);
3753 }
3754 break;
3755 }
3756 }
3757}
3758
3759/* Try to use the smallest displacement type too. */
3760static void
3761optimize_disp (void)
3762{
3763 int op;
3764
3765 for (op = i.operands; --op >= 0;)
3766 if (operand_type_check (i.types[op], disp))
3767 {
3768 if (i.op[op].disps->X_op == O_constant)
3769 {
3770 offsetT op_disp = i.op[op].disps->X_add_number;
3771
3772 if (i.types[op].bitfield.disp16
3773 && (op_disp & ~(offsetT) 0xffff) == 0)
3774 {
3775 /* If this operand is at most 16 bits, convert
3776 to a signed 16 bit number and don't use 64bit
3777 displacement. */
3778 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3779 i.types[op].bitfield.disp64 = 0;
3780 }
3781 if (i.types[op].bitfield.disp32
3782 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3783 {
3784 /* If this operand is at most 32 bits, convert
3785 to a signed 32 bit number and don't use 64bit
3786 displacement. */
3787 op_disp &= (((offsetT) 2 << 31) - 1);
3788 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3789 i.types[op].bitfield.disp64 = 0;
3790 }
3791 if (!op_disp && i.types[op].bitfield.baseindex)
3792 {
3793 i.types[op].bitfield.disp8 = 0;
3794 i.types[op].bitfield.disp16 = 0;
3795 i.types[op].bitfield.disp32 = 0;
3796 i.types[op].bitfield.disp32s = 0;
3797 i.types[op].bitfield.disp64 = 0;
3798 i.op[op].disps = 0;
3799 i.disp_operands--;
3800 }
3801 else if (flag_code == CODE_64BIT)
3802 {
3803 if (fits_in_signed_long (op_disp))
3804 {
3805 i.types[op].bitfield.disp64 = 0;
3806 i.types[op].bitfield.disp32s = 1;
3807 }
3808 if (i.prefix[ADDR_PREFIX]
3809 && fits_in_unsigned_long (op_disp))
3810 i.types[op].bitfield.disp32 = 1;
3811 }
3812 if ((i.types[op].bitfield.disp32
3813 || i.types[op].bitfield.disp32s
3814 || i.types[op].bitfield.disp16)
3815 && fits_in_signed_byte (op_disp))
3816 i.types[op].bitfield.disp8 = 1;
3817 }
3818 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3819 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3820 {
3821 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3822 i.op[op].disps, 0, i.reloc[op]);
3823 i.types[op].bitfield.disp8 = 0;
3824 i.types[op].bitfield.disp16 = 0;
3825 i.types[op].bitfield.disp32 = 0;
3826 i.types[op].bitfield.disp32s = 0;
3827 i.types[op].bitfield.disp64 = 0;
3828 }
3829 else
3830 /* We only support 64bit displacement on constants. */
3831 i.types[op].bitfield.disp64 = 0;
3832 }
3833}
3834
3835/* Check if operands are valid for the instruction. Update VEX
3836 operand types. */
3837
3838static int
3839VEX_check_operands (const insn_template *t)
3840{
3841 if (!t->opcode_modifier.vex)
3842 return 0;
3843
3844 /* Only check VEX_Imm4, which must be the first operand. */
3845 if (t->operand_types[0].bitfield.vec_imm4)
3846 {
3847 if (i.op[0].imms->X_op != O_constant
3848 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3849 {
3850 i.error = bad_imm4;
3851 return 1;
3852 }
3853
3854 /* Turn off Imm8 so that update_imm won't complain. */
3855 i.types[0] = vec_imm4;
3856 }
3857
3858 return 0;
3859}
3860
3861static const insn_template *
3862match_template (void)
3863{
3864 /* Points to template once we've found it. */
3865 const insn_template *t;
3866 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3867 i386_operand_type overlap4;
3868 unsigned int found_reverse_match;
3869 i386_opcode_modifier suffix_check;
3870 i386_operand_type operand_types [MAX_OPERANDS];
3871 int addr_prefix_disp;
3872 unsigned int j;
3873 unsigned int found_cpu_match;
3874 unsigned int check_register;
3875
3876#if MAX_OPERANDS != 5
3877# error "MAX_OPERANDS must be 5."
3878#endif
3879
3880 found_reverse_match = 0;
3881 addr_prefix_disp = -1;
3882
3883 memset (&suffix_check, 0, sizeof (suffix_check));
3884 if (i.suffix == BYTE_MNEM_SUFFIX)
3885 suffix_check.no_bsuf = 1;
3886 else if (i.suffix == WORD_MNEM_SUFFIX)
3887 suffix_check.no_wsuf = 1;
3888 else if (i.suffix == SHORT_MNEM_SUFFIX)
3889 suffix_check.no_ssuf = 1;
3890 else if (i.suffix == LONG_MNEM_SUFFIX)
3891 suffix_check.no_lsuf = 1;
3892 else if (i.suffix == QWORD_MNEM_SUFFIX)
3893 suffix_check.no_qsuf = 1;
3894 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3895 suffix_check.no_ldsuf = 1;
3896
3897 /* Must have right number of operands. */
3898 i.error = number_of_operands_mismatch;
3899
3900 for (t = current_templates->start; t < current_templates->end; t++)
3901 {
3902 addr_prefix_disp = -1;
3903
3904 if (i.operands != t->operands)
3905 continue;
3906
3907 /* Check processor support. */
3908 i.error = unsupported;
3909 found_cpu_match = (cpu_flags_match (t)
3910 == CPU_FLAGS_PERFECT_MATCH);
3911 if (!found_cpu_match)
3912 continue;
3913
3914 /* Check old gcc support. */
3915 i.error = old_gcc_only;
3916 if (!old_gcc && t->opcode_modifier.oldgcc)
3917 continue;
3918
3919 /* Check AT&T mnemonic. */
3920 i.error = unsupported_with_intel_mnemonic;
3921 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3922 continue;
3923
3924 /* Check AT&T/Intel syntax. */
3925 i.error = unsupported_syntax;
3926 if ((intel_syntax && t->opcode_modifier.attsyntax)
3927 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3928 continue;
3929
3930 /* Check the suffix, except for some instructions in intel mode. */
3931 i.error = invalid_instruction_suffix;
3932 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3933 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3934 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3935 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3936 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3937 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3938 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3939 continue;
3940
3941 if (!operand_size_match (t))
3942 continue;
3943
3944 for (j = 0; j < MAX_OPERANDS; j++)
3945 operand_types[j] = t->operand_types[j];
3946
3947 /* In general, don't allow 64-bit operands in 32-bit mode. */
3948 if (i.suffix == QWORD_MNEM_SUFFIX
3949 && flag_code != CODE_64BIT
3950 && (intel_syntax
3951 ? (!t->opcode_modifier.ignoresize
3952 && !intel_float_operand (t->name))
3953 : intel_float_operand (t->name) != 2)
3954 && ((!operand_types[0].bitfield.regmmx
3955 && !operand_types[0].bitfield.regxmm
3956 && !operand_types[0].bitfield.regymm)
3957 || (!operand_types[t->operands > 1].bitfield.regmmx
3958 && !!operand_types[t->operands > 1].bitfield.regxmm
3959 && !!operand_types[t->operands > 1].bitfield.regymm))
3960 && (t->base_opcode != 0x0fc7
3961 || t->extension_opcode != 1 /* cmpxchg8b */))
3962 continue;
3963
3964 /* In general, don't allow 32-bit operands on pre-386. */
3965 else if (i.suffix == LONG_MNEM_SUFFIX
3966 && !cpu_arch_flags.bitfield.cpui386
3967 && (intel_syntax
3968 ? (!t->opcode_modifier.ignoresize
3969 && !intel_float_operand (t->name))
3970 : intel_float_operand (t->name) != 2)
3971 && ((!operand_types[0].bitfield.regmmx
3972 && !operand_types[0].bitfield.regxmm)
3973 || (!operand_types[t->operands > 1].bitfield.regmmx
3974 && !!operand_types[t->operands > 1].bitfield.regxmm)))
3975 continue;
3976
3977 /* Do not verify operands when there are none. */
3978 else
3979 {
3980 if (!t->operands)
3981 /* We've found a match; break out of loop. */
3982 break;
3983 }
3984
3985 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
3986 into Disp32/Disp16/Disp32 operand. */
3987 if (i.prefix[ADDR_PREFIX] != 0)
3988 {
3989 /* There should be only one Disp operand. */
3990 switch (flag_code)
3991 {
3992 case CODE_16BIT:
3993 for (j = 0; j < MAX_OPERANDS; j++)
3994 {
3995 if (operand_types[j].bitfield.disp16)
3996 {
3997 addr_prefix_disp = j;
3998 operand_types[j].bitfield.disp32 = 1;
3999 operand_types[j].bitfield.disp16 = 0;
4000 break;
4001 }
4002 }
4003 break;
4004 case CODE_32BIT:
4005 for (j = 0; j < MAX_OPERANDS; j++)
4006 {
4007 if (operand_types[j].bitfield.disp32)
4008 {
4009 addr_prefix_disp = j;
4010 operand_types[j].bitfield.disp32 = 0;
4011 operand_types[j].bitfield.disp16 = 1;
4012 break;
4013 }
4014 }
4015 break;
4016 case CODE_64BIT:
4017 for (j = 0; j < MAX_OPERANDS; j++)
4018 {
4019 if (operand_types[j].bitfield.disp64)
4020 {
4021 addr_prefix_disp = j;
4022 operand_types[j].bitfield.disp64 = 0;
4023 operand_types[j].bitfield.disp32 = 1;
4024 break;
4025 }
4026 }
4027 break;
4028 }
4029 }
4030
4031 /* We check register size if needed. */
4032 check_register = t->opcode_modifier.checkregsize;
4033 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4034 switch (t->operands)
4035 {
4036 case 1:
4037 if (!operand_type_match (overlap0, i.types[0]))
4038 continue;
4039 break;
4040 case 2:
4041 /* xchg %eax, %eax is a special case. It is an aliase for nop
4042 only in 32bit mode and we can use opcode 0x90. In 64bit
4043 mode, we can't use 0x90 for xchg %eax, %eax since it should
4044 zero-extend %eax to %rax. */
4045 if (flag_code == CODE_64BIT
4046 && t->base_opcode == 0x90
4047 && operand_type_equal (&i.types [0], &acc32)
4048 && operand_type_equal (&i.types [1], &acc32))
4049 continue;
4050 if (i.swap_operand)
4051 {
4052 /* If we swap operand in encoding, we either match
4053 the next one or reverse direction of operands. */
4054 if (t->opcode_modifier.s)
4055 continue;
4056 else if (t->opcode_modifier.d)
4057 goto check_reverse;
4058 }
4059
4060 case 3:
4061 /* If we swap operand in encoding, we match the next one. */
4062 if (i.swap_operand && t->opcode_modifier.s)
4063 continue;
4064 case 4:
4065 case 5:
4066 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4067 if (!operand_type_match (overlap0, i.types[0])
4068 || !operand_type_match (overlap1, i.types[1])
4069 || (check_register
4070 && !operand_type_register_match (overlap0, i.types[0],
4071 operand_types[0],
4072 overlap1, i.types[1],
4073 operand_types[1])))
4074 {
4075 /* Check if other direction is valid ... */
4076 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4077 continue;
4078
4079check_reverse:
4080 /* Try reversing direction of operands. */
4081 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4082 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4083 if (!operand_type_match (overlap0, i.types[0])
4084 || !operand_type_match (overlap1, i.types[1])
4085 || (check_register
4086 && !operand_type_register_match (overlap0,
4087 i.types[0],
4088 operand_types[1],
4089 overlap1,
4090 i.types[1],
4091 operand_types[0])))
4092 {
4093 /* Does not match either direction. */
4094 continue;
4095 }
4096 /* found_reverse_match holds which of D or FloatDR
4097 we've found. */
4098 if (t->opcode_modifier.d)
4099 found_reverse_match = Opcode_D;
4100 else if (t->opcode_modifier.floatd)
4101 found_reverse_match = Opcode_FloatD;
4102 else
4103 found_reverse_match = 0;
4104 if (t->opcode_modifier.floatr)
4105 found_reverse_match |= Opcode_FloatR;
4106 }
4107 else
4108 {
4109 /* Found a forward 2 operand match here. */
4110 switch (t->operands)
4111 {
4112 case 5:
4113 overlap4 = operand_type_and (i.types[4],
4114 operand_types[4]);
4115 case 4:
4116 overlap3 = operand_type_and (i.types[3],
4117 operand_types[3]);
4118 case 3:
4119 overlap2 = operand_type_and (i.types[2],
4120 operand_types[2]);
4121 break;
4122 }
4123
4124 switch (t->operands)
4125 {
4126 case 5:
4127 if (!operand_type_match (overlap4, i.types[4])
4128 || !operand_type_register_match (overlap3,
4129 i.types[3],
4130 operand_types[3],
4131 overlap4,
4132 i.types[4],
4133 operand_types[4]))
4134 continue;
4135 case 4:
4136 if (!operand_type_match (overlap3, i.types[3])
4137 || (check_register
4138 && !operand_type_register_match (overlap2,
4139 i.types[2],
4140 operand_types[2],
4141 overlap3,
4142 i.types[3],
4143 operand_types[3])))
4144 continue;
4145 case 3:
4146 /* Here we make use of the fact that there are no
4147 reverse match 3 operand instructions, and all 3
4148 operand instructions only need to be checked for
4149 register consistency between operands 2 and 3. */
4150 if (!operand_type_match (overlap2, i.types[2])
4151 || (check_register
4152 && !operand_type_register_match (overlap1,
4153 i.types[1],
4154 operand_types[1],
4155 overlap2,
4156 i.types[2],
4157 operand_types[2])))
4158 continue;
4159 break;
4160 }
4161 }
4162 /* Found either forward/reverse 2, 3 or 4 operand match here:
4163 slip through to break. */
4164 }
4165 if (!found_cpu_match)
4166 {
4167 found_reverse_match = 0;
4168 continue;
4169 }
4170
4171 /* Check if VEX operands are valid. */
4172 if (VEX_check_operands (t))
4173 continue;
4174
4175 /* We've found a match; break out of loop. */
4176 break;
4177 }
4178
4179 if (t == current_templates->end)
4180 {
4181 /* We found no match. */
4182 const char *err_msg;
4183 switch (i.error)
4184 {
4185 default:
4186 abort ();
4187 case operand_size_mismatch:
4188 err_msg = _("operand size mismatch");
4189 break;
4190 case operand_type_mismatch:
4191 err_msg = _("operand type mismatch");
4192 break;
4193 case register_type_mismatch:
4194 err_msg = _("register type mismatch");
4195 break;
4196 case number_of_operands_mismatch:
4197 err_msg = _("number of operands mismatch");
4198 break;
4199 case invalid_instruction_suffix:
4200 err_msg = _("invalid instruction suffix");
4201 break;
4202 case bad_imm4:
4203 err_msg = _("Imm4 isn't the first operand");
4204 break;
4205 case old_gcc_only:
4206 err_msg = _("only supported with old gcc");
4207 break;
4208 case unsupported_with_intel_mnemonic:
4209 err_msg = _("unsupported with Intel mnemonic");
4210 break;
4211 case unsupported_syntax:
4212 err_msg = _("unsupported syntax");
4213 break;
4214 case unsupported:
4215 err_msg = _("unsupported");
4216 break;
4217 }
4218 as_bad (_("%s for `%s'"), err_msg,
4219 current_templates->start->name);
4220 return NULL;
4221 }
4222
4223 if (!quiet_warnings)
4224 {
4225 if (!intel_syntax
4226 && (i.types[0].bitfield.jumpabsolute
4227 != operand_types[0].bitfield.jumpabsolute))
4228 {
4229 as_warn (_("indirect %s without `*'"), t->name);
4230 }
4231
4232 if (t->opcode_modifier.isprefix
4233 && t->opcode_modifier.ignoresize)
4234 {
4235 /* Warn them that a data or address size prefix doesn't
4236 affect assembly of the next line of code. */
4237 as_warn (_("stand-alone `%s' prefix"), t->name);
4238 }
4239 }
4240
4241 /* Copy the template we found. */
4242 i.tm = *t;
4243
4244 if (addr_prefix_disp != -1)
4245 i.tm.operand_types[addr_prefix_disp]
4246 = operand_types[addr_prefix_disp];
4247
4248 if (found_reverse_match)
4249 {
4250 /* If we found a reverse match we must alter the opcode
4251 direction bit. found_reverse_match holds bits to change
4252 (different for int & float insns). */
4253
4254 i.tm.base_opcode ^= found_reverse_match;
4255
4256 i.tm.operand_types[0] = operand_types[1];
4257 i.tm.operand_types[1] = operand_types[0];
4258 }
4259
4260 return t;
4261}
4262
4263static int
4264check_string (void)
4265{
4266 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4267 if (i.tm.operand_types[mem_op].bitfield.esseg)
4268 {
4269 if (i.seg[0] != NULL && i.seg[0] != &es)
4270 {
4271 as_bad (_("`%s' operand %d must use `%ses' segment"),
4272 i.tm.name,
4273 mem_op + 1,
4274 register_prefix);
4275 return 0;
4276 }
4277 /* There's only ever one segment override allowed per instruction.
4278 This instruction possibly has a legal segment override on the
4279 second operand, so copy the segment to where non-string
4280 instructions store it, allowing common code. */
4281 i.seg[0] = i.seg[1];
4282 }
4283 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4284 {
4285 if (i.seg[1] != NULL && i.seg[1] != &es)
4286 {
4287 as_bad (_("`%s' operand %d must use `%ses' segment"),
4288 i.tm.name,
4289 mem_op + 2,
4290 register_prefix);
4291 return 0;
4292 }
4293 }
4294 return 1;
4295}
4296
4297static int
4298process_suffix (void)
4299{
4300 /* If matched instruction specifies an explicit instruction mnemonic
4301 suffix, use it. */
4302 if (i.tm.opcode_modifier.size16)
4303 i.suffix = WORD_MNEM_SUFFIX;
4304 else if (i.tm.opcode_modifier.size32)
4305 i.suffix = LONG_MNEM_SUFFIX;
4306 else if (i.tm.opcode_modifier.size64)
4307 i.suffix = QWORD_MNEM_SUFFIX;
4308 else if (i.reg_operands)
4309 {
4310 /* If there's no instruction mnemonic suffix we try to invent one
4311 based on register operands. */
4312 if (!i.suffix)
4313 {
4314 /* We take i.suffix from the last register operand specified,
4315 Destination register type is more significant than source
4316 register type. crc32 in SSE4.2 prefers source register
4317 type. */
4318 if (i.tm.base_opcode == 0xf20f38f1)
4319 {
4320 if (i.types[0].bitfield.reg16)
4321 i.suffix = WORD_MNEM_SUFFIX;
4322 else if (i.types[0].bitfield.reg32)
4323 i.suffix = LONG_MNEM_SUFFIX;
4324 else if (i.types[0].bitfield.reg64)
4325 i.suffix = QWORD_MNEM_SUFFIX;
4326 }
4327 else if (i.tm.base_opcode == 0xf20f38f0)
4328 {
4329 if (i.types[0].bitfield.reg8)
4330 i.suffix = BYTE_MNEM_SUFFIX;
4331 }
4332
4333 if (!i.suffix)
4334 {
4335 int op;
4336
4337 if (i.tm.base_opcode == 0xf20f38f1
4338 || i.tm.base_opcode == 0xf20f38f0)
4339 {
4340 /* We have to know the operand size for crc32. */
4341 as_bad (_("ambiguous memory operand size for `%s`"),
4342 i.tm.name);
4343 return 0;
4344 }
4345
4346 for (op = i.operands; --op >= 0;)
4347 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4348 {
4349 if (i.types[op].bitfield.reg8)
4350 {
4351 i.suffix = BYTE_MNEM_SUFFIX;
4352 break;
4353 }
4354 else if (i.types[op].bitfield.reg16)
4355 {
4356 i.suffix = WORD_MNEM_SUFFIX;
4357 break;
4358 }
4359 else if (i.types[op].bitfield.reg32)
4360 {
4361 i.suffix = LONG_MNEM_SUFFIX;
4362 break;
4363 }
4364 else if (i.types[op].bitfield.reg64)
4365 {
4366 i.suffix = QWORD_MNEM_SUFFIX;
4367 break;
4368 }
4369 }
4370 }
4371 }
4372 else if (i.suffix == BYTE_MNEM_SUFFIX)
4373 {
4374 if (intel_syntax
4375 && i.tm.opcode_modifier.ignoresize
4376 && i.tm.opcode_modifier.no_bsuf)
4377 i.suffix = 0;
4378 else if (!check_byte_reg ())
4379 return 0;
4380 }
4381 else if (i.suffix == LONG_MNEM_SUFFIX)
4382 {
4383 if (intel_syntax
4384 && i.tm.opcode_modifier.ignoresize
4385 && i.tm.opcode_modifier.no_lsuf)
4386 i.suffix = 0;
4387 else if (!check_long_reg ())
4388 return 0;
4389 }
4390 else if (i.suffix == QWORD_MNEM_SUFFIX)
4391 {
4392 if (intel_syntax
4393 && i.tm.opcode_modifier.ignoresize
4394 && i.tm.opcode_modifier.no_qsuf)
4395 i.suffix = 0;
4396 else if (!check_qword_reg ())
4397 return 0;
4398 }
4399 else if (i.suffix == WORD_MNEM_SUFFIX)
4400 {
4401 if (intel_syntax
4402 && i.tm.opcode_modifier.ignoresize
4403 && i.tm.opcode_modifier.no_wsuf)
4404 i.suffix = 0;
4405 else if (!check_word_reg ())
4406 return 0;
4407 }
4408 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4409 || i.suffix == YMMWORD_MNEM_SUFFIX)
4410 {
4411 /* Skip if the instruction has x/y suffix. match_template
4412 should check if it is a valid suffix. */
4413 }
4414 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4415 /* Do nothing if the instruction is going to ignore the prefix. */
4416 ;
4417 else
4418 abort ();
4419 }
4420 else if (i.tm.opcode_modifier.defaultsize
4421 && !i.suffix
4422 /* exclude fldenv/frstor/fsave/fstenv */
4423 && i.tm.opcode_modifier.no_ssuf)
4424 {
4425 i.suffix = stackop_size;
4426 }
4427 else if (intel_syntax
4428 && !i.suffix
4429 && (i.tm.operand_types[0].bitfield.jumpabsolute
4430 || i.tm.opcode_modifier.jumpbyte
4431 || i.tm.opcode_modifier.jumpintersegment
4432 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4433 && i.tm.extension_opcode <= 3)))
4434 {
4435 switch (flag_code)
4436 {
4437 case CODE_64BIT:
4438 if (!i.tm.opcode_modifier.no_qsuf)
4439 {
4440 i.suffix = QWORD_MNEM_SUFFIX;
4441 break;
4442 }
4443 case CODE_32BIT:
4444 if (!i.tm.opcode_modifier.no_lsuf)
4445 i.suffix = LONG_MNEM_SUFFIX;
4446 break;
4447 case CODE_16BIT:
4448 if (!i.tm.opcode_modifier.no_wsuf)
4449 i.suffix = WORD_MNEM_SUFFIX;
4450 break;
4451 }
4452 }
4453
4454 if (!i.suffix)
4455 {
4456 if (!intel_syntax)
4457 {
4458 if (i.tm.opcode_modifier.w)
4459 {
4460 as_bad (_("no instruction mnemonic suffix given and "
4461 "no register operands; can't size instruction"));
4462 return 0;
4463 }
4464 }
4465 else
4466 {
4467 unsigned int suffixes;
4468
4469 suffixes = !i.tm.opcode_modifier.no_bsuf;
4470 if (!i.tm.opcode_modifier.no_wsuf)
4471 suffixes |= 1 << 1;
4472 if (!i.tm.opcode_modifier.no_lsuf)
4473 suffixes |= 1 << 2;
4474 if (!i.tm.opcode_modifier.no_ldsuf)
4475 suffixes |= 1 << 3;
4476 if (!i.tm.opcode_modifier.no_ssuf)
4477 suffixes |= 1 << 4;
4478 if (!i.tm.opcode_modifier.no_qsuf)
4479 suffixes |= 1 << 5;
4480
4481 /* There are more than suffix matches. */
4482 if (i.tm.opcode_modifier.w
4483 || ((suffixes & (suffixes - 1))
4484 && !i.tm.opcode_modifier.defaultsize
4485 && !i.tm.opcode_modifier.ignoresize))
4486 {
4487 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4488 return 0;
4489 }
4490 }
4491 }
4492
4493 /* Change the opcode based on the operand size given by i.suffix;
4494 We don't need to change things for byte insns. */
4495
4496 if (i.suffix
4497 && i.suffix != BYTE_MNEM_SUFFIX
4498 && i.suffix != XMMWORD_MNEM_SUFFIX
4499 && i.suffix != YMMWORD_MNEM_SUFFIX)
4500 {
4501 /* It's not a byte, select word/dword operation. */
4502 if (i.tm.opcode_modifier.w)
4503 {
4504 if (i.tm.opcode_modifier.shortform)
4505 i.tm.base_opcode |= 8;
4506 else
4507 i.tm.base_opcode |= 1;
4508 }
4509
4510 /* Now select between word & dword operations via the operand
4511 size prefix, except for instructions that will ignore this
4512 prefix anyway. */
4513 if (i.tm.opcode_modifier.addrprefixop0)
4514 {
4515 /* The address size override prefix changes the size of the
4516 first operand. */
4517 if ((flag_code == CODE_32BIT
4518 && i.op->regs[0].reg_type.bitfield.reg16)
4519 || (flag_code != CODE_32BIT
4520 && i.op->regs[0].reg_type.bitfield.reg32))
4521 if (!add_prefix (ADDR_PREFIX_OPCODE))
4522 return 0;
4523 }
4524 else if (i.suffix != QWORD_MNEM_SUFFIX
4525 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4526 && !i.tm.opcode_modifier.ignoresize
4527 && !i.tm.opcode_modifier.floatmf
4528 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4529 || (flag_code == CODE_64BIT
4530 && i.tm.opcode_modifier.jumpbyte)))
4531 {
4532 unsigned int prefix = DATA_PREFIX_OPCODE;
4533
4534 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4535 prefix = ADDR_PREFIX_OPCODE;
4536
4537 if (!add_prefix (prefix))
4538 return 0;
4539 }
4540
4541 /* Set mode64 for an operand. */
4542 if (i.suffix == QWORD_MNEM_SUFFIX
4543 && flag_code == CODE_64BIT
4544 && !i.tm.opcode_modifier.norex64)
4545 {
4546 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4547 need rex64. cmpxchg8b is also a special case. */
4548 if (! (i.operands == 2
4549 && i.tm.base_opcode == 0x90
4550 && i.tm.extension_opcode == None
4551 && operand_type_equal (&i.types [0], &acc64)
4552 && operand_type_equal (&i.types [1], &acc64))
4553 && ! (i.operands == 1
4554 && i.tm.base_opcode == 0xfc7
4555 && i.tm.extension_opcode == 1
4556 && !operand_type_check (i.types [0], reg)
4557 && operand_type_check (i.types [0], anymem)))
4558 i.rex |= REX_W;
4559 }
4560
4561 /* Size floating point instruction. */
4562 if (i.suffix == LONG_MNEM_SUFFIX)
4563 if (i.tm.opcode_modifier.floatmf)
4564 i.tm.base_opcode ^= 4;
4565 }
4566
4567 return 1;
4568}
4569
4570static int
4571check_byte_reg (void)
4572{
4573 int op;
4574
4575 for (op = i.operands; --op >= 0;)
4576 {
4577 /* If this is an eight bit register, it's OK. If it's the 16 or
4578 32 bit version of an eight bit register, we will just use the
4579 low portion, and that's OK too. */
4580 if (i.types[op].bitfield.reg8)
4581 continue;
4582
4583 /* crc32 doesn't generate this warning. */
4584 if (i.tm.base_opcode == 0xf20f38f0)
4585 continue;
4586
4587 if ((i.types[op].bitfield.reg16
4588 || i.types[op].bitfield.reg32
4589 || i.types[op].bitfield.reg64)
4590 && i.op[op].regs->reg_num < 4)
4591 {
4592 /* Prohibit these changes in the 64bit mode, since the
4593 lowering is more complicated. */
4594 if (flag_code == CODE_64BIT
4595 && !i.tm.operand_types[op].bitfield.inoutportreg)
4596 {
4597 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4598 register_prefix, i.op[op].regs->reg_name,
4599 i.suffix);
4600 return 0;
4601 }
4602#if REGISTER_WARNINGS
4603 if (!quiet_warnings
4604 && !i.tm.operand_types[op].bitfield.inoutportreg)
4605 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4606 register_prefix,
4607 (i.op[op].regs + (i.types[op].bitfield.reg16
4608 ? REGNAM_AL - REGNAM_AX
4609 : REGNAM_AL - REGNAM_EAX))->reg_name,
4610 register_prefix,
4611 i.op[op].regs->reg_name,
4612 i.suffix);
4613#endif
4614 continue;
4615 }
4616 /* Any other register is bad. */
4617 if (i.types[op].bitfield.reg16
4618 || i.types[op].bitfield.reg32
4619 || i.types[op].bitfield.reg64
4620 || i.types[op].bitfield.regmmx
4621 || i.types[op].bitfield.regxmm
4622 || i.types[op].bitfield.regymm
4623 || i.types[op].bitfield.sreg2
4624 || i.types[op].bitfield.sreg3
4625 || i.types[op].bitfield.control
4626 || i.types[op].bitfield.debug
4627 || i.types[op].bitfield.test
4628 || i.types[op].bitfield.floatreg
4629 || i.types[op].bitfield.floatacc)
4630 {
4631 as_bad (_("`%s%s' not allowed with `%s%c'"),
4632 register_prefix,
4633 i.op[op].regs->reg_name,
4634 i.tm.name,
4635 i.suffix);
4636 return 0;
4637 }
4638 }
4639 return 1;
4640}
4641
4642static int
4643check_long_reg (void)
4644{
4645 int op;
4646
4647 for (op = i.operands; --op >= 0;)
4648 /* Reject eight bit registers, except where the template requires
4649 them. (eg. movzb) */
4650 if (i.types[op].bitfield.reg8
4651 && (i.tm.operand_types[op].bitfield.reg16
4652 || i.tm.operand_types[op].bitfield.reg32
4653 || i.tm.operand_types[op].bitfield.acc))
4654 {
4655 as_bad (_("`%s%s' not allowed with `%s%c'"),
4656 register_prefix,
4657 i.op[op].regs->reg_name,
4658 i.tm.name,
4659 i.suffix);
4660 return 0;
4661 }
4662 /* Warn if the e prefix on a general reg is missing. */
4663 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4664 && i.types[op].bitfield.reg16
4665 && (i.tm.operand_types[op].bitfield.reg32
4666 || i.tm.operand_types[op].bitfield.acc))
4667 {
4668 /* Prohibit these changes in the 64bit mode, since the
4669 lowering is more complicated. */
4670 if (flag_code == CODE_64BIT)
4671 {
4672 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4673 register_prefix, i.op[op].regs->reg_name,
4674 i.suffix);
4675 return 0;
4676 }
4677#if REGISTER_WARNINGS
4678 else
4679 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4680 register_prefix,
4681 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4682 register_prefix,
4683 i.op[op].regs->reg_name,
4684 i.suffix);
4685#endif
4686 }
4687 /* Warn if the r prefix on a general reg is missing. */
4688 else if (i.types[op].bitfield.reg64
4689 && (i.tm.operand_types[op].bitfield.reg32
4690 || i.tm.operand_types[op].bitfield.acc))
4691 {
4692 if (intel_syntax
4693 && i.tm.opcode_modifier.toqword
4694 && !i.types[0].bitfield.regxmm)
4695 {
4696 /* Convert to QWORD. We want REX byte. */
4697 i.suffix = QWORD_MNEM_SUFFIX;
4698 }
4699 else
4700 {
4701 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4702 register_prefix, i.op[op].regs->reg_name,
4703 i.suffix);
4704 return 0;
4705 }
4706 }
4707 return 1;
4708}
4709
4710static int
4711check_qword_reg (void)
4712{
4713 int op;
4714
4715 for (op = i.operands; --op >= 0; )
4716 /* Reject eight bit registers, except where the template requires
4717 them. (eg. movzb) */
4718 if (i.types[op].bitfield.reg8
4719 && (i.tm.operand_types[op].bitfield.reg16
4720 || i.tm.operand_types[op].bitfield.reg32
4721 || i.tm.operand_types[op].bitfield.acc))
4722 {
4723 as_bad (_("`%s%s' not allowed with `%s%c'"),
4724 register_prefix,
4725 i.op[op].regs->reg_name,
4726 i.tm.name,
4727 i.suffix);
4728 return 0;
4729 }
4730 /* Warn if the e prefix on a general reg is missing. */
4731 else if ((i.types[op].bitfield.reg16
4732 || i.types[op].bitfield.reg32)
4733 && (i.tm.operand_types[op].bitfield.reg32
4734 || i.tm.operand_types[op].bitfield.acc))
4735 {
4736 /* Prohibit these changes in the 64bit mode, since the
4737 lowering is more complicated. */
4738 if (intel_syntax
4739 && i.tm.opcode_modifier.todword
4740 && !i.types[0].bitfield.regxmm)
4741 {
4742 /* Convert to DWORD. We don't want REX byte. */
4743 i.suffix = LONG_MNEM_SUFFIX;
4744 }
4745 else
4746 {
4747 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4748 register_prefix, i.op[op].regs->reg_name,
4749 i.suffix);
4750 return 0;
4751 }
4752 }
4753 return 1;
4754}
4755
4756static int
4757check_word_reg (void)
4758{
4759 int op;
4760 for (op = i.operands; --op >= 0;)
4761 /* Reject eight bit registers, except where the template requires
4762 them. (eg. movzb) */
4763 if (i.types[op].bitfield.reg8
4764 && (i.tm.operand_types[op].bitfield.reg16
4765 || i.tm.operand_types[op].bitfield.reg32
4766 || i.tm.operand_types[op].bitfield.acc))
4767 {
4768 as_bad (_("`%s%s' not allowed with `%s%c'"),
4769 register_prefix,
4770 i.op[op].regs->reg_name,
4771 i.tm.name,
4772 i.suffix);
4773 return 0;
4774 }
4775 /* Warn if the e prefix on a general reg is present. */
4776 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4777 && i.types[op].bitfield.reg32
4778 && (i.tm.operand_types[op].bitfield.reg16
4779 || i.tm.operand_types[op].bitfield.acc))
4780 {
4781 /* Prohibit these changes in the 64bit mode, since the
4782 lowering is more complicated. */
4783 if (flag_code == CODE_64BIT)
4784 {
4785 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4786 register_prefix, i.op[op].regs->reg_name,
4787 i.suffix);
4788 return 0;
4789 }
4790 else
4791#if REGISTER_WARNINGS
4792 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4793 register_prefix,
4794 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4795 register_prefix,
4796 i.op[op].regs->reg_name,
4797 i.suffix);
4798#endif
4799 }
4800 return 1;
4801}
4802
4803static int
4804update_imm (unsigned int j)
4805{
4806 i386_operand_type overlap = i.types[j];
4807 if ((overlap.bitfield.imm8
4808 || overlap.bitfield.imm8s
4809 || overlap.bitfield.imm16
4810 || overlap.bitfield.imm32
4811 || overlap.bitfield.imm32s
4812 || overlap.bitfield.imm64)
4813 && !operand_type_equal (&overlap, &imm8)
4814 && !operand_type_equal (&overlap, &imm8s)
4815 && !operand_type_equal (&overlap, &imm16)
4816 && !operand_type_equal (&overlap, &imm32)
4817 && !operand_type_equal (&overlap, &imm32s)
4818 && !operand_type_equal (&overlap, &imm64))
4819 {
4820 if (i.suffix)
4821 {
4822 i386_operand_type temp;
4823
4824 operand_type_set (&temp, 0);
4825 if (i.suffix == BYTE_MNEM_SUFFIX)
4826 {
4827 temp.bitfield.imm8 = overlap.bitfield.imm8;
4828 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4829 }
4830 else if (i.suffix == WORD_MNEM_SUFFIX)
4831 temp.bitfield.imm16 = overlap.bitfield.imm16;
4832 else if (i.suffix == QWORD_MNEM_SUFFIX)
4833 {
4834 temp.bitfield.imm64 = overlap.bitfield.imm64;
4835 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4836 }
4837 else
4838 temp.bitfield.imm32 = overlap.bitfield.imm32;
4839 overlap = temp;
4840 }
4841 else if (operand_type_equal (&overlap, &imm16_32_32s)
4842 || operand_type_equal (&overlap, &imm16_32)
4843 || operand_type_equal (&overlap, &imm16_32s))
4844 {
4845 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4846 overlap = imm16;
4847 else
4848 overlap = imm32s;
4849 }
4850 if (!operand_type_equal (&overlap, &imm8)
4851 && !operand_type_equal (&overlap, &imm8s)
4852 && !operand_type_equal (&overlap, &imm16)
4853 && !operand_type_equal (&overlap, &imm32)
4854 && !operand_type_equal (&overlap, &imm32s)
4855 && !operand_type_equal (&overlap, &imm64))
4856 {
4857 as_bad (_("no instruction mnemonic suffix given; "
4858 "can't determine immediate size"));
4859 return 0;
4860 }
4861 }
4862 i.types[j] = overlap;
4863
4864 return 1;
4865}
4866
4867static int
4868finalize_imm (void)
4869{
4870 unsigned int j, n;
4871
4872 /* Update the first 2 immediate operands. */
4873 n = i.operands > 2 ? 2 : i.operands;
4874 if (n)
4875 {
4876 for (j = 0; j < n; j++)
4877 if (update_imm (j) == 0)
4878 return 0;
4879
4880 /* The 3rd operand can't be immediate operand. */
4881 gas_assert (operand_type_check (i.types[2], imm) == 0);
4882 }
4883
4884 return 1;
4885}
4886
4887static int
4888bad_implicit_operand (int xmm)
4889{
4890 const char *ireg = xmm ? "xmm0" : "ymm0";
4891
4892 if (intel_syntax)
4893 as_bad (_("the last operand of `%s' must be `%s%s'"),
4894 i.tm.name, register_prefix, ireg);
4895 else
4896 as_bad (_("the first operand of `%s' must be `%s%s'"),
4897 i.tm.name, register_prefix, ireg);
4898 return 0;
4899}
4900
4901static int
4902process_operands (void)
4903{
4904 /* Default segment register this instruction will use for memory
4905 accesses. 0 means unknown. This is only for optimizing out
4906 unnecessary segment overrides. */
4907 const seg_entry *default_seg = 0;
4908
4909 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4910 {
4911 unsigned int dupl = i.operands;
4912 unsigned int dest = dupl - 1;
4913 unsigned int j;
4914
4915 /* The destination must be an xmm register. */
4916 gas_assert (i.reg_operands
4917 && MAX_OPERANDS > dupl
4918 && operand_type_equal (&i.types[dest], &regxmm));
4919
4920 if (i.tm.opcode_modifier.firstxmm0)
4921 {
4922 /* The first operand is implicit and must be xmm0. */
4923 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4924 if (i.op[0].regs->reg_num != 0)
4925 return bad_implicit_operand (1);
4926
4927 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
4928 {
4929 /* Keep xmm0 for instructions with VEX prefix and 3
4930 sources. */
4931 goto duplicate;
4932 }
4933 else
4934 {
4935 /* We remove the first xmm0 and keep the number of
4936 operands unchanged, which in fact duplicates the
4937 destination. */
4938 for (j = 1; j < i.operands; j++)
4939 {
4940 i.op[j - 1] = i.op[j];
4941 i.types[j - 1] = i.types[j];
4942 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4943 }
4944 }
4945 }
4946 else if (i.tm.opcode_modifier.implicit1stxmm0)
4947 {
4948 gas_assert ((MAX_OPERANDS - 1) > dupl
4949 && (i.tm.opcode_modifier.vexsources
4950 == VEX3SOURCES));
4951
4952 /* Add the implicit xmm0 for instructions with VEX prefix
4953 and 3 sources. */
4954 for (j = i.operands; j > 0; j--)
4955 {
4956 i.op[j] = i.op[j - 1];
4957 i.types[j] = i.types[j - 1];
4958 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
4959 }
4960 i.op[0].regs
4961 = (const reg_entry *) hash_find (reg_hash, "xmm0");
4962 i.types[0] = regxmm;
4963 i.tm.operand_types[0] = regxmm;
4964
4965 i.operands += 2;
4966 i.reg_operands += 2;
4967 i.tm.operands += 2;
4968
4969 dupl++;
4970 dest++;
4971 i.op[dupl] = i.op[dest];
4972 i.types[dupl] = i.types[dest];
4973 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4974 }
4975 else
4976 {
4977duplicate:
4978 i.operands++;
4979 i.reg_operands++;
4980 i.tm.operands++;
4981
4982 i.op[dupl] = i.op[dest];
4983 i.types[dupl] = i.types[dest];
4984 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4985 }
4986
4987 if (i.tm.opcode_modifier.immext)
4988 process_immext ();
4989 }
4990 else if (i.tm.opcode_modifier.firstxmm0)
4991 {
4992 unsigned int j;
4993
4994 /* The first operand is implicit and must be xmm0/ymm0. */
4995 gas_assert (i.reg_operands
4996 && (operand_type_equal (&i.types[0], &regxmm)
4997 || operand_type_equal (&i.types[0], &regymm)));
4998 if (i.op[0].regs->reg_num != 0)
4999 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5000
5001 for (j = 1; j < i.operands; j++)
5002 {
5003 i.op[j - 1] = i.op[j];
5004 i.types[j - 1] = i.types[j];
5005
5006 /* We need to adjust fields in i.tm since they are used by
5007 build_modrm_byte. */
5008 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5009 }
5010
5011 i.operands--;
5012 i.reg_operands--;
5013 i.tm.operands--;
5014 }
5015 else if (i.tm.opcode_modifier.regkludge)
5016 {
5017 /* The imul $imm, %reg instruction is converted into
5018 imul $imm, %reg, %reg, and the clr %reg instruction
5019 is converted into xor %reg, %reg. */
5020
5021 unsigned int first_reg_op;
5022
5023 if (operand_type_check (i.types[0], reg))
5024 first_reg_op = 0;
5025 else
5026 first_reg_op = 1;
5027 /* Pretend we saw the extra register operand. */
5028 gas_assert (i.reg_operands == 1
5029 && i.op[first_reg_op + 1].regs == 0);
5030 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5031 i.types[first_reg_op + 1] = i.types[first_reg_op];
5032 i.operands++;
5033 i.reg_operands++;
5034 }
5035
5036 if (i.tm.opcode_modifier.shortform)
5037 {
5038 if (i.types[0].bitfield.sreg2
5039 || i.types[0].bitfield.sreg3)
5040 {
5041 if (i.tm.base_opcode == POP_SEG_SHORT
5042 && i.op[0].regs->reg_num == 1)
5043 {
5044 as_bad (_("you can't `pop %scs'"), register_prefix);
5045 return 0;
5046 }
5047 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5048 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5049 i.rex |= REX_B;
5050 }
5051 else
5052 {
5053 /* The register or float register operand is in operand
5054 0 or 1. */
5055 unsigned int op;
5056
5057 if (i.types[0].bitfield.floatreg
5058 || operand_type_check (i.types[0], reg))
5059 op = 0;
5060 else
5061 op = 1;
5062 /* Register goes in low 3 bits of opcode. */
5063 i.tm.base_opcode |= i.op[op].regs->reg_num;
5064 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5065 i.rex |= REX_B;
5066 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5067 {
5068 /* Warn about some common errors, but press on regardless.
5069 The first case can be generated by gcc (<= 2.8.1). */
5070 if (i.operands == 2)
5071 {
5072 /* Reversed arguments on faddp, fsubp, etc. */
5073 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5074 register_prefix, i.op[!intel_syntax].regs->reg_name,
5075 register_prefix, i.op[intel_syntax].regs->reg_name);
5076 }
5077 else
5078 {
5079 /* Extraneous `l' suffix on fp insn. */
5080 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5081 register_prefix, i.op[0].regs->reg_name);
5082 }
5083 }
5084 }
5085 }
5086 else if (i.tm.opcode_modifier.modrm)
5087 {
5088 /* The opcode is completed (modulo i.tm.extension_opcode which
5089 must be put into the modrm byte). Now, we make the modrm and
5090 index base bytes based on all the info we've collected. */
5091
5092 default_seg = build_modrm_byte ();
5093 }
5094 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5095 {
5096 default_seg = &ds;
5097 }
5098 else if (i.tm.opcode_modifier.isstring)
5099 {
5100 /* For the string instructions that allow a segment override
5101 on one of their operands, the default segment is ds. */
5102 default_seg = &ds;
5103 }
5104
5105 if (i.tm.base_opcode == 0x8d /* lea */
5106 && i.seg[0]
5107 && !quiet_warnings)
5108 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5109
5110 /* If a segment was explicitly specified, and the specified segment
5111 is not the default, use an opcode prefix to select it. If we
5112 never figured out what the default segment is, then default_seg
5113 will be zero at this point, and the specified segment prefix will
5114 always be used. */
5115 if ((i.seg[0]) && (i.seg[0] != default_seg))
5116 {
5117 if (!add_prefix (i.seg[0]->seg_prefix))
5118 return 0;
5119 }
5120 return 1;
5121}
5122
5123static const seg_entry *
5124build_modrm_byte (void)
5125{
5126 const seg_entry *default_seg = 0;
5127 unsigned int source, dest;
5128 int vex_3_sources;
5129
5130 /* The first operand of instructions with VEX prefix and 3 sources
5131 must be VEX_Imm4. */
5132 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5133 if (vex_3_sources)
5134 {
5135 unsigned int nds, reg_slot;
5136 expressionS *exp;
5137
5138 if (i.tm.opcode_modifier.veximmext
5139 && i.tm.opcode_modifier.immext)
5140 {
5141 dest = i.operands - 2;
5142 gas_assert (dest == 3);
5143 }
5144 else
5145 dest = i.operands - 1;
5146 nds = dest - 1;
5147
5148 /* There are 2 kinds of instructions:
5149 1. 5 operands: 4 register operands or 3 register operands
5150 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5151 VexW0 or VexW1. The destination must be either XMM or YMM
5152 register.
5153 2. 4 operands: 4 register operands or 3 register operands
5154 plus 1 memory operand, VexXDS, and VexImmExt */
5155 gas_assert ((i.reg_operands == 4
5156 || (i.reg_operands == 3 && i.mem_operands == 1))
5157 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5158 && (i.tm.opcode_modifier.veximmext
5159 || (i.imm_operands == 1
5160 && i.types[0].bitfield.vec_imm4
5161 && (i.tm.opcode_modifier.vexw == VEXW0
5162 || i.tm.opcode_modifier.vexw == VEXW1)
5163 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5164 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5165
5166 if (i.imm_operands == 0)
5167 {
5168 /* When there is no immediate operand, generate an 8bit
5169 immediate operand to encode the first operand. */
5170 exp = &im_expressions[i.imm_operands++];
5171 i.op[i.operands].imms = exp;
5172 i.types[i.operands] = imm8;
5173 i.operands++;
5174 /* If VexW1 is set, the first operand is the source and
5175 the second operand is encoded in the immediate operand. */
5176 if (i.tm.opcode_modifier.vexw == VEXW1)
5177 {
5178 source = 0;
5179 reg_slot = 1;
5180 }
5181 else
5182 {
5183 source = 1;
5184 reg_slot = 0;
5185 }
5186
5187 /* FMA swaps REG and NDS. */
5188 if (i.tm.cpu_flags.bitfield.cpufma)
5189 {
5190 unsigned int tmp;
5191 tmp = reg_slot;
5192 reg_slot = nds;
5193 nds = tmp;
5194 }
5195
5196 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5197 &regxmm)
5198 || operand_type_equal (&i.tm.operand_types[reg_slot],
5199 &regymm));
5200 exp->X_op = O_constant;
5201 exp->X_add_number
5202 = ((i.op[reg_slot].regs->reg_num
5203 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5204 << 4);
5205 }
5206 else
5207 {
5208 unsigned int imm_slot;
5209
5210 if (i.tm.opcode_modifier.vexw == VEXW0)
5211 {
5212 /* If VexW0 is set, the third operand is the source and
5213 the second operand is encoded in the immediate
5214 operand. */
5215 source = 2;
5216 reg_slot = 1;
5217 }
5218 else
5219 {
5220 /* VexW1 is set, the second operand is the source and
5221 the third operand is encoded in the immediate
5222 operand. */
5223 source = 1;
5224 reg_slot = 2;
5225 }
5226
5227 if (i.tm.opcode_modifier.immext)
5228 {
5229 /* When ImmExt is set, the immdiate byte is the last
5230 operand. */
5231 imm_slot = i.operands - 1;
5232 source--;
5233 reg_slot--;
5234 }
5235 else
5236 {
5237 imm_slot = 0;
5238
5239 /* Turn on Imm8 so that output_imm will generate it. */
5240 i.types[imm_slot].bitfield.imm8 = 1;
5241 }
5242
5243 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5244 &regxmm)
5245 || operand_type_equal (&i.tm.operand_types[reg_slot],
5246 &regymm));
5247 i.op[imm_slot].imms->X_add_number
5248 |= ((i.op[reg_slot].regs->reg_num
5249 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5250 << 4);
5251 }
5252
5253 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5254 || operand_type_equal (&i.tm.operand_types[nds],
5255 &regymm));
5256 i.vex.register_specifier = i.op[nds].regs;
5257 }
5258 else
5259 source = dest = 0;
5260
5261 /* i.reg_operands MUST be the number of real register operands;
5262 implicit registers do not count. If there are 3 register
5263 operands, it must be a instruction with VexNDS. For a
5264 instruction with VexNDD, the destination register is encoded
5265 in VEX prefix. If there are 4 register operands, it must be
5266 a instruction with VEX prefix and 3 sources. */
5267 if (i.mem_operands == 0
5268 && ((i.reg_operands == 2
5269 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5270 || (i.reg_operands == 3
5271 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5272 || (i.reg_operands == 4 && vex_3_sources)))
5273 {
5274 switch (i.operands)
5275 {
5276 case 2:
5277 source = 0;
5278 break;
5279 case 3:
5280 /* When there are 3 operands, one of them may be immediate,
5281 which may be the first or the last operand. Otherwise,
5282 the first operand must be shift count register (cl) or it
5283 is an instruction with VexNDS. */
5284 gas_assert (i.imm_operands == 1
5285 || (i.imm_operands == 0
5286 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5287 || i.types[0].bitfield.shiftcount)));
5288 if (operand_type_check (i.types[0], imm)
5289 || i.types[0].bitfield.shiftcount)
5290 source = 1;
5291 else
5292 source = 0;
5293 break;
5294 case 4:
5295 /* When there are 4 operands, the first two must be 8bit
5296 immediate operands. The source operand will be the 3rd
5297 one.
5298
5299 For instructions with VexNDS, if the first operand
5300 an imm8, the source operand is the 2nd one. If the last
5301 operand is imm8, the source operand is the first one. */
5302 gas_assert ((i.imm_operands == 2
5303 && i.types[0].bitfield.imm8
5304 && i.types[1].bitfield.imm8)
5305 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5306 && i.imm_operands == 1
5307 && (i.types[0].bitfield.imm8
5308 || i.types[i.operands - 1].bitfield.imm8)));
5309 if (i.imm_operands == 2)
5310 source = 2;
5311 else
5312 {
5313 if (i.types[0].bitfield.imm8)
5314 source = 1;
5315 else
5316 source = 0;
5317 }
5318 break;
5319 case 5:
5320 break;
5321 default:
5322 abort ();
5323 }
5324
5325 if (!vex_3_sources)
5326 {
5327 dest = source + 1;
5328
5329 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5330 {
5331 /* For instructions with VexNDS, the register-only
5332 source operand must be 32/64bit integer, XMM or
5333 YMM register. It is encoded in VEX prefix. We
5334 need to clear RegMem bit before calling
5335 operand_type_equal. */
5336
5337 i386_operand_type op;
5338 unsigned int vvvv;
5339
5340 /* Check register-only source operand when two source
5341 operands are swapped. */
5342 if (!i.tm.operand_types[source].bitfield.baseindex
5343 && i.tm.operand_types[dest].bitfield.baseindex)
5344 {
5345 vvvv = source;
5346 source = dest;
5347 }
5348 else
5349 vvvv = dest;
5350
5351 op = i.tm.operand_types[vvvv];
5352 op.bitfield.regmem = 0;
5353 if ((dest + 1) >= i.operands
5354 || (op.bitfield.reg32 != 1
5355 && !op.bitfield.reg64 != 1
5356 && !operand_type_equal (&op, &regxmm)
5357 && !operand_type_equal (&op, &regymm)))
5358 abort ();
5359 i.vex.register_specifier = i.op[vvvv].regs;
5360 dest++;
5361 }
5362 }
5363
5364 i.rm.mode = 3;
5365 /* One of the register operands will be encoded in the i.tm.reg
5366 field, the other in the combined i.tm.mode and i.tm.regmem
5367 fields. If no form of this instruction supports a memory
5368 destination operand, then we assume the source operand may
5369 sometimes be a memory operand and so we need to store the
5370 destination in the i.rm.reg field. */
5371 if (!i.tm.operand_types[dest].bitfield.regmem
5372 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5373 {
5374 i.rm.reg = i.op[dest].regs->reg_num;
5375 i.rm.regmem = i.op[source].regs->reg_num;
5376 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5377 i.rex |= REX_R;
5378 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5379 i.rex |= REX_B;
5380 }
5381 else
5382 {
5383 i.rm.reg = i.op[source].regs->reg_num;
5384 i.rm.regmem = i.op[dest].regs->reg_num;
5385 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5386 i.rex |= REX_B;
5387 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5388 i.rex |= REX_R;
5389 }
5390 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5391 {
5392 if (!i.types[0].bitfield.control
5393 && !i.types[1].bitfield.control)
5394 abort ();
5395 i.rex &= ~(REX_R | REX_B);
5396 add_prefix (LOCK_PREFIX_OPCODE);
5397 }
5398 }
5399 else
5400 { /* If it's not 2 reg operands... */
5401 unsigned int mem;
5402
5403 if (i.mem_operands)
5404 {
5405 unsigned int fake_zero_displacement = 0;
5406 unsigned int op;
5407
5408 for (op = 0; op < i.operands; op++)
5409 if (operand_type_check (i.types[op], anymem))
5410 break;
5411 gas_assert (op < i.operands);
5412
5413 default_seg = &ds;
5414
5415 if (i.base_reg == 0)
5416 {
5417 i.rm.mode = 0;
5418 if (!i.disp_operands)
5419 fake_zero_displacement = 1;
5420 if (i.index_reg == 0)
5421 {
5422 /* Operand is just <disp> */
5423 if (flag_code == CODE_64BIT)
5424 {
5425 /* 64bit mode overwrites the 32bit absolute
5426 addressing by RIP relative addressing and
5427 absolute addressing is encoded by one of the
5428 redundant SIB forms. */
5429 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5430 i.sib.base = NO_BASE_REGISTER;
5431 i.sib.index = NO_INDEX_REGISTER;
5432 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5433 ? disp32s : disp32);
5434 }
5435 else if ((flag_code == CODE_16BIT)
5436 ^ (i.prefix[ADDR_PREFIX] != 0))
5437 {
5438 i.rm.regmem = NO_BASE_REGISTER_16;
5439 i.types[op] = disp16;
5440 }
5441 else
5442 {
5443 i.rm.regmem = NO_BASE_REGISTER;
5444 i.types[op] = disp32;
5445 }
5446 }
5447 else /* !i.base_reg && i.index_reg */
5448 {
5449 if (i.index_reg->reg_num == RegEiz
5450 || i.index_reg->reg_num == RegRiz)
5451 i.sib.index = NO_INDEX_REGISTER;
5452 else
5453 i.sib.index = i.index_reg->reg_num;
5454 i.sib.base = NO_BASE_REGISTER;
5455 i.sib.scale = i.log2_scale_factor;
5456 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5457 i.types[op].bitfield.disp8 = 0;
5458 i.types[op].bitfield.disp16 = 0;
5459 i.types[op].bitfield.disp64 = 0;
5460 if (flag_code != CODE_64BIT)
5461 {
5462 /* Must be 32 bit */
5463 i.types[op].bitfield.disp32 = 1;
5464 i.types[op].bitfield.disp32s = 0;
5465 }
5466 else
5467 {
5468 i.types[op].bitfield.disp32 = 0;
5469 i.types[op].bitfield.disp32s = 1;
5470 }
5471 if ((i.index_reg->reg_flags & RegRex) != 0)
5472 i.rex |= REX_X;
5473 }
5474 }
5475 /* RIP addressing for 64bit mode. */
5476 else if (i.base_reg->reg_num == RegRip ||
5477 i.base_reg->reg_num == RegEip)
5478 {
5479 i.rm.regmem = NO_BASE_REGISTER;
5480 i.types[op].bitfield.disp8 = 0;
5481 i.types[op].bitfield.disp16 = 0;
5482 i.types[op].bitfield.disp32 = 0;
5483 i.types[op].bitfield.disp32s = 1;
5484 i.types[op].bitfield.disp64 = 0;
5485 i.flags[op] |= Operand_PCrel;
5486 if (! i.disp_operands)
5487 fake_zero_displacement = 1;
5488 }
5489 else if (i.base_reg->reg_type.bitfield.reg16)
5490 {
5491 switch (i.base_reg->reg_num)
5492 {
5493 case 3: /* (%bx) */
5494 if (i.index_reg == 0)
5495 i.rm.regmem = 7;
5496 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5497 i.rm.regmem = i.index_reg->reg_num - 6;
5498 break;
5499 case 5: /* (%bp) */
5500 default_seg = &ss;
5501 if (i.index_reg == 0)
5502 {
5503 i.rm.regmem = 6;
5504 if (operand_type_check (i.types[op], disp) == 0)
5505 {
5506 /* fake (%bp) into 0(%bp) */
5507 i.types[op].bitfield.disp8 = 1;
5508 fake_zero_displacement = 1;
5509 }
5510 }
5511 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5512 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5513 break;
5514 default: /* (%si) -> 4 or (%di) -> 5 */
5515 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5516 }
5517 i.rm.mode = mode_from_disp_size (i.types[op]);
5518 }
5519 else /* i.base_reg and 32/64 bit mode */
5520 {
5521 if (flag_code == CODE_64BIT
5522 && operand_type_check (i.types[op], disp))
5523 {
5524 i386_operand_type temp;
5525 operand_type_set (&temp, 0);
5526 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5527 i.types[op] = temp;
5528 if (i.prefix[ADDR_PREFIX] == 0)
5529 i.types[op].bitfield.disp32s = 1;
5530 else
5531 i.types[op].bitfield.disp32 = 1;
5532 }
5533
5534 i.rm.regmem = i.base_reg->reg_num;
5535 if ((i.base_reg->reg_flags & RegRex) != 0)
5536 i.rex |= REX_B;
5537 i.sib.base = i.base_reg->reg_num;
5538 /* x86-64 ignores REX prefix bit here to avoid decoder
5539 complications. */
5540 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5541 {
5542 default_seg = &ss;
5543 if (i.disp_operands == 0)
5544 {
5545 fake_zero_displacement = 1;
5546 i.types[op].bitfield.disp8 = 1;
5547 }
5548 }
5549 else if (i.base_reg->reg_num == ESP_REG_NUM)
5550 {
5551 default_seg = &ss;
5552 }
5553 i.sib.scale = i.log2_scale_factor;
5554 if (i.index_reg == 0)
5555 {
5556 /* <disp>(%esp) becomes two byte modrm with no index
5557 register. We've already stored the code for esp
5558 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5559 Any base register besides %esp will not use the
5560 extra modrm byte. */
5561 i.sib.index = NO_INDEX_REGISTER;
5562 }
5563 else
5564 {
5565 if (i.index_reg->reg_num == RegEiz
5566 || i.index_reg->reg_num == RegRiz)
5567 i.sib.index = NO_INDEX_REGISTER;
5568 else
5569 i.sib.index = i.index_reg->reg_num;
5570 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5571 if ((i.index_reg->reg_flags & RegRex) != 0)
5572 i.rex |= REX_X;
5573 }
5574
5575 if (i.disp_operands
5576 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5577 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5578 i.rm.mode = 0;
5579 else
5580 i.rm.mode = mode_from_disp_size (i.types[op]);
5581 }
5582
5583 if (fake_zero_displacement)
5584 {
5585 /* Fakes a zero displacement assuming that i.types[op]
5586 holds the correct displacement size. */
5587 expressionS *exp;
5588
5589 gas_assert (i.op[op].disps == 0);
5590 exp = &disp_expressions[i.disp_operands++];
5591 i.op[op].disps = exp;
5592 exp->X_op = O_constant;
5593 exp->X_add_number = 0;
5594 exp->X_add_symbol = (symbolS *) 0;
5595 exp->X_op_symbol = (symbolS *) 0;
5596 }
5597
5598 mem = op;
5599 }
5600 else
5601 mem = ~0;
5602
5603 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5604 {
5605 if (operand_type_check (i.types[0], imm))
5606 i.vex.register_specifier = NULL;
5607 else
5608 {
5609 /* VEX.vvvv encodes one of the sources when the first
5610 operand is not an immediate. */
5611 if (i.tm.opcode_modifier.vexw == VEXW0)
5612 i.vex.register_specifier = i.op[0].regs;
5613 else
5614 i.vex.register_specifier = i.op[1].regs;
5615 }
5616
5617 /* Destination is a XMM register encoded in the ModRM.reg
5618 and VEX.R bit. */
5619 i.rm.reg = i.op[2].regs->reg_num;
5620 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5621 i.rex |= REX_R;
5622
5623 /* ModRM.rm and VEX.B encodes the other source. */
5624 if (!i.mem_operands)
5625 {
5626 i.rm.mode = 3;
5627
5628 if (i.tm.opcode_modifier.vexw == VEXW0)
5629 i.rm.regmem = i.op[1].regs->reg_num;
5630 else
5631 i.rm.regmem = i.op[0].regs->reg_num;
5632
5633 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5634 i.rex |= REX_B;
5635 }
5636 }
5637 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5638 {
5639 i.vex.register_specifier = i.op[2].regs;
5640 if (!i.mem_operands)
5641 {
5642 i.rm.mode = 3;
5643 i.rm.regmem = i.op[1].regs->reg_num;
5644 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5645 i.rex |= REX_B;
5646 }
5647 }
5648 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5649 (if any) based on i.tm.extension_opcode. Again, we must be
5650 careful to make sure that segment/control/debug/test/MMX
5651 registers are coded into the i.rm.reg field. */
5652 else if (i.reg_operands)
5653 {
5654 unsigned int op;
5655 unsigned int vex_reg = ~0;
5656
5657 for (op = 0; op < i.operands; op++)
5658 if (i.types[op].bitfield.reg8
5659 || i.types[op].bitfield.reg16
5660 || i.types[op].bitfield.reg32
5661 || i.types[op].bitfield.reg64
5662 || i.types[op].bitfield.regmmx
5663 || i.types[op].bitfield.regxmm
5664 || i.types[op].bitfield.regymm
5665 || i.types[op].bitfield.sreg2
5666 || i.types[op].bitfield.sreg3
5667 || i.types[op].bitfield.control
5668 || i.types[op].bitfield.debug
5669 || i.types[op].bitfield.test)
5670 break;
5671
5672 if (vex_3_sources)
5673 op = dest;
5674 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5675 {
5676 /* For instructions with VexNDS, the register-only
5677 source operand is encoded in VEX prefix. */
5678 gas_assert (mem != (unsigned int) ~0);
5679
5680 if (op > mem)
5681 {
5682 vex_reg = op++;
5683 gas_assert (op < i.operands);
5684 }
5685 else
5686 {
5687 /* Check register-only source operand when two source
5688 operands are swapped. */
5689 if (!i.tm.operand_types[op].bitfield.baseindex
5690 && i.tm.operand_types[op + 1].bitfield.baseindex)
5691 {
5692 vex_reg = op;
5693 op += 2;
5694 gas_assert (mem == (vex_reg + 1)
5695 && op < i.operands);
5696 }
5697 else
5698 {
5699 vex_reg = op + 1;
5700 gas_assert (vex_reg < i.operands);
5701 }
5702 }
5703 }
5704 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5705 {
5706 /* For instructions with VexNDD, the register destination
5707 is encoded in VEX prefix. */
5708 if (i.mem_operands == 0)
5709 {
5710 /* There is no memory operand. */
5711 gas_assert ((op + 2) == i.operands);
5712 vex_reg = op + 1;
5713 }
5714 else
5715 {
5716 /* There are only 2 operands. */
5717 gas_assert (op < 2 && i.operands == 2);
5718 vex_reg = 1;
5719 }
5720 }
5721 else
5722 gas_assert (op < i.operands);
5723
5724 if (vex_reg != (unsigned int) ~0)
5725 {
5726 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5727
5728 if (type->bitfield.reg32 != 1
5729 && type->bitfield.reg64 != 1
5730 && !operand_type_equal (type, &regxmm)
5731 && !operand_type_equal (type, &regymm))
5732 abort ();
5733
5734 i.vex.register_specifier = i.op[vex_reg].regs;
5735 }
5736
5737 /* Don't set OP operand twice. */
5738 if (vex_reg != op)
5739 {
5740 /* If there is an extension opcode to put here, the
5741 register number must be put into the regmem field. */
5742 if (i.tm.extension_opcode != None)
5743 {
5744 i.rm.regmem = i.op[op].regs->reg_num;
5745 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5746 i.rex |= REX_B;
5747 }
5748 else
5749 {
5750 i.rm.reg = i.op[op].regs->reg_num;
5751 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5752 i.rex |= REX_R;
5753 }
5754 }
5755
5756 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5757 must set it to 3 to indicate this is a register operand
5758 in the regmem field. */
5759 if (!i.mem_operands)
5760 i.rm.mode = 3;
5761 }
5762
5763 /* Fill in i.rm.reg field with extension opcode (if any). */
5764 if (i.tm.extension_opcode != None)
5765 i.rm.reg = i.tm.extension_opcode;
5766 }
5767 return default_seg;
5768}
5769
5770static void
5771output_branch (void)
5772{
5773 char *p;
5774 int size;
5775 int code16;
5776 int prefix;
5777 relax_substateT subtype;
5778 symbolS *sym;
5779 offsetT off;
5780
5781 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5782 size = i.disp32_encoding ? BIG : SMALL;
5783
5784 prefix = 0;
5785 if (i.prefix[DATA_PREFIX] != 0)
5786 {
5787 prefix = 1;
5788 i.prefixes -= 1;
5789 code16 ^= CODE16;
5790 }
5791 /* Pentium4 branch hints. */
5792 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5793 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5794 {
5795 prefix++;
5796 i.prefixes--;
5797 }
5798 if (i.prefix[REX_PREFIX] != 0)
5799 {
5800 prefix++;
5801 i.prefixes--;
5802 }
5803
5804 if (i.prefixes != 0 && !intel_syntax)
5805 as_warn (_("skipping prefixes on this instruction"));
5806
5807 /* It's always a symbol; End frag & setup for relax.
5808 Make sure there is enough room in this frag for the largest
5809 instruction we may generate in md_convert_frag. This is 2
5810 bytes for the opcode and room for the prefix and largest
5811 displacement. */
5812 frag_grow (prefix + 2 + 4);
5813 /* Prefix and 1 opcode byte go in fr_fix. */
5814 p = frag_more (prefix + 1);
5815 if (i.prefix[DATA_PREFIX] != 0)
5816 *p++ = DATA_PREFIX_OPCODE;
5817 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5818 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5819 *p++ = i.prefix[SEG_PREFIX];
5820 if (i.prefix[REX_PREFIX] != 0)
5821 *p++ = i.prefix[REX_PREFIX];
5822 *p = i.tm.base_opcode;
5823
5824 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5825 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5826 else if (cpu_arch_flags.bitfield.cpui386)
5827 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5828 else
5829 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5830 subtype |= code16;
5831
5832 sym = i.op[0].disps->X_add_symbol;
5833 off = i.op[0].disps->X_add_number;
5834
5835 if (i.op[0].disps->X_op != O_constant
5836 && i.op[0].disps->X_op != O_symbol)
5837 {
5838 /* Handle complex expressions. */
5839 sym = make_expr_symbol (i.op[0].disps);
5840 off = 0;
5841 }
5842
5843 /* 1 possible extra opcode + 4 byte displacement go in var part.
5844 Pass reloc in fr_var. */
5845 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5846}
5847
5848static void
5849output_jump (void)
5850{
5851 char *p;
5852 int size;
5853 fixS *fixP;
5854
5855 if (i.tm.opcode_modifier.jumpbyte)
5856 {
5857 /* This is a loop or jecxz type instruction. */
5858 size = 1;
5859 if (i.prefix[ADDR_PREFIX] != 0)
5860 {
5861 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5862 i.prefixes -= 1;
5863 }
5864 /* Pentium4 branch hints. */
5865 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5866 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5867 {
5868 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5869 i.prefixes--;
5870 }
5871 }
5872 else
5873 {
5874 int code16;
5875
5876 code16 = 0;
5877 if (flag_code == CODE_16BIT)
5878 code16 = CODE16;
5879
5880 if (i.prefix[DATA_PREFIX] != 0)
5881 {
5882 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5883 i.prefixes -= 1;
5884 code16 ^= CODE16;
5885 }
5886
5887 size = 4;
5888 if (code16)
5889 size = 2;
5890 }
5891
5892 if (i.prefix[REX_PREFIX] != 0)
5893 {
5894 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5895 i.prefixes -= 1;
5896 }
5897
5898 if (i.prefixes != 0 && !intel_syntax)
5899 as_warn (_("skipping prefixes on this instruction"));
5900
5901 p = frag_more (1 + size);
5902 *p++ = i.tm.base_opcode;
5903
5904 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5905 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
5906
5907 /* All jumps handled here are signed, but don't use a signed limit
5908 check for 32 and 16 bit jumps as we want to allow wrap around at
5909 4G and 64k respectively. */
5910 if (size == 1)
5911 fixP->fx_signed = 1;
5912}
5913
5914static void
5915output_interseg_jump (void)
5916{
5917 char *p;
5918 int size;
5919 int prefix;
5920 int code16;
5921
5922 code16 = 0;
5923 if (flag_code == CODE_16BIT)
5924 code16 = CODE16;
5925
5926 prefix = 0;
5927 if (i.prefix[DATA_PREFIX] != 0)
5928 {
5929 prefix = 1;
5930 i.prefixes -= 1;
5931 code16 ^= CODE16;
5932 }
5933 if (i.prefix[REX_PREFIX] != 0)
5934 {
5935 prefix++;
5936 i.prefixes -= 1;
5937 }
5938
5939 size = 4;
5940 if (code16)
5941 size = 2;
5942
5943 if (i.prefixes != 0 && !intel_syntax)
5944 as_warn (_("skipping prefixes on this instruction"));
5945
5946 /* 1 opcode; 2 segment; offset */
5947 p = frag_more (prefix + 1 + 2 + size);
5948
5949 if (i.prefix[DATA_PREFIX] != 0)
5950 *p++ = DATA_PREFIX_OPCODE;
5951
5952 if (i.prefix[REX_PREFIX] != 0)
5953 *p++ = i.prefix[REX_PREFIX];
5954
5955 *p++ = i.tm.base_opcode;
5956 if (i.op[1].imms->X_op == O_constant)
5957 {
5958 offsetT n = i.op[1].imms->X_add_number;
5959
5960 if (size == 2
5961 && !fits_in_unsigned_word (n)
5962 && !fits_in_signed_word (n))
5963 {
5964 as_bad (_("16-bit jump out of range"));
5965 return;
5966 }
5967 md_number_to_chars (p, n, size);
5968 }
5969 else
5970 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5971 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
5972 if (i.op[0].imms->X_op != O_constant)
5973 as_bad (_("can't handle non absolute segment in `%s'"),
5974 i.tm.name);
5975 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
5976}
5977
5978static void
5979output_insn (void)
5980{
5981 fragS *insn_start_frag;
5982 offsetT insn_start_off;
5983
5984 /* Tie dwarf2 debug info to the address at the start of the insn.
5985 We can't do this after the insn has been output as the current
5986 frag may have been closed off. eg. by frag_var. */
5987 dwarf2_emit_insn (0);
5988
5989 insn_start_frag = frag_now;
5990 insn_start_off = frag_now_fix ();
5991
5992 /* Output jumps. */
5993 if (i.tm.opcode_modifier.jump)
5994 output_branch ();
5995 else if (i.tm.opcode_modifier.jumpbyte
5996 || i.tm.opcode_modifier.jumpdword)
5997 output_jump ();
5998 else if (i.tm.opcode_modifier.jumpintersegment)
5999 output_interseg_jump ();
6000 else
6001 {
6002 /* Output normal instructions here. */
6003 char *p;
6004 unsigned char *q;
6005 unsigned int j;
6006 unsigned int prefix;
6007
6008 /* Since the VEX prefix contains the implicit prefix, we don't
6009 need the explicit prefix. */
6010 if (!i.tm.opcode_modifier.vex)
6011 {
6012 switch (i.tm.opcode_length)
6013 {
6014 case 3:
6015 if (i.tm.base_opcode & 0xff000000)
6016 {
6017 prefix = (i.tm.base_opcode >> 24) & 0xff;
6018 goto check_prefix;
6019 }
6020 break;
6021 case 2:
6022 if ((i.tm.base_opcode & 0xff0000) != 0)
6023 {
6024 prefix = (i.tm.base_opcode >> 16) & 0xff;
6025 if (i.tm.cpu_flags.bitfield.cpupadlock)
6026 {
6027check_prefix:
6028 if (prefix != REPE_PREFIX_OPCODE
6029 || (i.prefix[REP_PREFIX]
6030 != REPE_PREFIX_OPCODE))
6031 add_prefix (prefix);
6032 }
6033 else
6034 add_prefix (prefix);
6035 }
6036 break;
6037 case 1:
6038 break;
6039 default:
6040 abort ();
6041 }
6042
6043 /* The prefix bytes. */
6044 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6045 if (*q)
6046 FRAG_APPEND_1_CHAR (*q);
6047 }
6048
6049 if (i.tm.opcode_modifier.vex)
6050 {
6051 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6052 if (*q)
6053 switch (j)
6054 {
6055 case REX_PREFIX:
6056 /* REX byte is encoded in VEX prefix. */
6057 break;
6058 case SEG_PREFIX:
6059 case ADDR_PREFIX:
6060 FRAG_APPEND_1_CHAR (*q);
6061 break;
6062 default:
6063 /* There should be no other prefixes for instructions
6064 with VEX prefix. */
6065 abort ();
6066 }
6067
6068 /* Now the VEX prefix. */
6069 p = frag_more (i.vex.length);
6070 for (j = 0; j < i.vex.length; j++)
6071 p[j] = i.vex.bytes[j];
6072 }
6073
6074 /* Now the opcode; be careful about word order here! */
6075 if (i.tm.opcode_length == 1)
6076 {
6077 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6078 }
6079 else
6080 {
6081 switch (i.tm.opcode_length)
6082 {
6083 case 3:
6084 p = frag_more (3);
6085 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6086 break;
6087 case 2:
6088 p = frag_more (2);
6089 break;
6090 default:
6091 abort ();
6092 break;
6093 }
6094
6095 /* Put out high byte first: can't use md_number_to_chars! */
6096 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6097 *p = i.tm.base_opcode & 0xff;
6098 }
6099
6100 /* Now the modrm byte and sib byte (if present). */
6101 if (i.tm.opcode_modifier.modrm)
6102 {
6103 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6104 | i.rm.reg << 3
6105 | i.rm.mode << 6));
6106 /* If i.rm.regmem == ESP (4)
6107 && i.rm.mode != (Register mode)
6108 && not 16 bit
6109 ==> need second modrm byte. */
6110 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6111 && i.rm.mode != 3
6112 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6113 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6114 | i.sib.index << 3
6115 | i.sib.scale << 6));
6116 }
6117
6118 if (i.disp_operands)
6119 output_disp (insn_start_frag, insn_start_off);
6120
6121 if (i.imm_operands)
6122 output_imm (insn_start_frag, insn_start_off);
6123 }
6124
6125#ifdef DEBUG386
6126 if (flag_debug)
6127 {
6128 pi ("" /*line*/, &i);
6129 }
6130#endif /* DEBUG386 */
6131}
6132
6133/* Return the size of the displacement operand N. */
6134
6135static int
6136disp_size (unsigned int n)
6137{
6138 int size = 4;
6139 if (i.types[n].bitfield.disp64)
6140 size = 8;
6141 else if (i.types[n].bitfield.disp8)
6142 size = 1;
6143 else if (i.types[n].bitfield.disp16)
6144 size = 2;
6145 return size;
6146}
6147
6148/* Return the size of the immediate operand N. */
6149
6150static int
6151imm_size (unsigned int n)
6152{
6153 int size = 4;
6154 if (i.types[n].bitfield.imm64)
6155 size = 8;
6156 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6157 size = 1;
6158 else if (i.types[n].bitfield.imm16)
6159 size = 2;
6160 return size;
6161}
6162
6163static void
6164output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6165{
6166 char *p;
6167 unsigned int n;
6168
6169 for (n = 0; n < i.operands; n++)
6170 {
6171 if (operand_type_check (i.types[n], disp))
6172 {
6173 if (i.op[n].disps->X_op == O_constant)
6174 {
6175 int size = disp_size (n);
6176 offsetT val;
6177
6178 val = offset_in_range (i.op[n].disps->X_add_number,
6179 size);
6180 p = frag_more (size);
6181 md_number_to_chars (p, val, size);
6182 }
6183 else
6184 {
6185 enum bfd_reloc_code_real reloc_type;
6186 int size = disp_size (n);
6187 int sign = i.types[n].bitfield.disp32s;
6188 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6189
6190 /* We can't have 8 bit displacement here. */
6191 gas_assert (!i.types[n].bitfield.disp8);
6192
6193 /* The PC relative address is computed relative
6194 to the instruction boundary, so in case immediate
6195 fields follows, we need to adjust the value. */
6196 if (pcrel && i.imm_operands)
6197 {
6198 unsigned int n1;
6199 int sz = 0;
6200
6201 for (n1 = 0; n1 < i.operands; n1++)
6202 if (operand_type_check (i.types[n1], imm))
6203 {
6204 /* Only one immediate is allowed for PC
6205 relative address. */
6206 gas_assert (sz == 0);
6207 sz = imm_size (n1);
6208 i.op[n].disps->X_add_number -= sz;
6209 }
6210 /* We should find the immediate. */
6211 gas_assert (sz != 0);
6212 }
6213
6214 p = frag_more (size);
6215 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6216 if (GOT_symbol
6217 && GOT_symbol == i.op[n].disps->X_add_symbol
6218 && (((reloc_type == BFD_RELOC_32
6219 || reloc_type == BFD_RELOC_X86_64_32S
6220 || (reloc_type == BFD_RELOC_64
6221 && object_64bit))
6222 && (i.op[n].disps->X_op == O_symbol
6223 || (i.op[n].disps->X_op == O_add
6224 && ((symbol_get_value_expression
6225 (i.op[n].disps->X_op_symbol)->X_op)
6226 == O_subtract))))
6227 || reloc_type == BFD_RELOC_32_PCREL))
6228 {
6229 offsetT add;
6230
6231 if (insn_start_frag == frag_now)
6232 add = (p - frag_now->fr_literal) - insn_start_off;
6233 else
6234 {
6235 fragS *fr;
6236
6237 add = insn_start_frag->fr_fix - insn_start_off;
6238 for (fr = insn_start_frag->fr_next;
6239 fr && fr != frag_now; fr = fr->fr_next)
6240 add += fr->fr_fix;
6241 add += p - frag_now->fr_literal;
6242 }
6243
6244 if (!object_64bit)
6245 {
6246 reloc_type = BFD_RELOC_386_GOTPC;
6247 i.op[n].imms->X_add_number += add;
6248 }
6249 else if (reloc_type == BFD_RELOC_64)
6250 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6251 else
6252 /* Don't do the adjustment for x86-64, as there
6253 the pcrel addressing is relative to the _next_
6254 insn, and that is taken care of in other code. */
6255 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6256 }
6257 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6258 i.op[n].disps, pcrel, reloc_type);
6259 }
6260 }
6261 }
6262}
6263
6264static void
6265output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6266{
6267 char *p;
6268 unsigned int n;
6269
6270 for (n = 0; n < i.operands; n++)
6271 {
6272 if (operand_type_check (i.types[n], imm))
6273 {
6274 if (i.op[n].imms->X_op == O_constant)
6275 {
6276 int size = imm_size (n);
6277 offsetT val;
6278
6279 val = offset_in_range (i.op[n].imms->X_add_number,
6280 size);
6281 p = frag_more (size);
6282 md_number_to_chars (p, val, size);
6283 }
6284 else
6285 {
6286 /* Not absolute_section.
6287 Need a 32-bit fixup (don't support 8bit
6288 non-absolute imms). Try to support other
6289 sizes ... */
6290 enum bfd_reloc_code_real reloc_type;
6291 int size = imm_size (n);
6292 int sign;
6293
6294 if (i.types[n].bitfield.imm32s
6295 && (i.suffix == QWORD_MNEM_SUFFIX
6296 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6297 sign = 1;
6298 else
6299 sign = 0;
6300
6301 p = frag_more (size);
6302 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6303
6304 /* This is tough to explain. We end up with this one if we
6305 * have operands that look like
6306 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6307 * obtain the absolute address of the GOT, and it is strongly
6308 * preferable from a performance point of view to avoid using
6309 * a runtime relocation for this. The actual sequence of
6310 * instructions often look something like:
6311 *
6312 * call .L66
6313 * .L66:
6314 * popl %ebx
6315 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6316 *
6317 * The call and pop essentially return the absolute address
6318 * of the label .L66 and store it in %ebx. The linker itself
6319 * will ultimately change the first operand of the addl so
6320 * that %ebx points to the GOT, but to keep things simple, the
6321 * .o file must have this operand set so that it generates not
6322 * the absolute address of .L66, but the absolute address of
6323 * itself. This allows the linker itself simply treat a GOTPC
6324 * relocation as asking for a pcrel offset to the GOT to be
6325 * added in, and the addend of the relocation is stored in the
6326 * operand field for the instruction itself.
6327 *
6328 * Our job here is to fix the operand so that it would add
6329 * the correct offset so that %ebx would point to itself. The
6330 * thing that is tricky is that .-.L66 will point to the
6331 * beginning of the instruction, so we need to further modify
6332 * the operand so that it will point to itself. There are
6333 * other cases where you have something like:
6334 *
6335 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6336 *
6337 * and here no correction would be required. Internally in
6338 * the assembler we treat operands of this form as not being
6339 * pcrel since the '.' is explicitly mentioned, and I wonder
6340 * whether it would simplify matters to do it this way. Who
6341 * knows. In earlier versions of the PIC patches, the
6342 * pcrel_adjust field was used to store the correction, but
6343 * since the expression is not pcrel, I felt it would be
6344 * confusing to do it this way. */
6345
6346 if ((reloc_type == BFD_RELOC_32
6347 || reloc_type == BFD_RELOC_X86_64_32S
6348 || reloc_type == BFD_RELOC_64)
6349 && GOT_symbol
6350 && GOT_symbol == i.op[n].imms->X_add_symbol
6351 && (i.op[n].imms->X_op == O_symbol
6352 || (i.op[n].imms->X_op == O_add
6353 && ((symbol_get_value_expression
6354 (i.op[n].imms->X_op_symbol)->X_op)
6355 == O_subtract))))
6356 {
6357 offsetT add;
6358
6359 if (insn_start_frag == frag_now)
6360 add = (p - frag_now->fr_literal) - insn_start_off;
6361 else
6362 {
6363 fragS *fr;
6364
6365 add = insn_start_frag->fr_fix - insn_start_off;
6366 for (fr = insn_start_frag->fr_next;
6367 fr && fr != frag_now; fr = fr->fr_next)
6368 add += fr->fr_fix;
6369 add += p - frag_now->fr_literal;
6370 }
6371
6372 if (!object_64bit)
6373 reloc_type = BFD_RELOC_386_GOTPC;
6374 else if (size == 4)
6375 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6376 else if (size == 8)
6377 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6378 i.op[n].imms->X_add_number += add;
6379 }
6380 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6381 i.op[n].imms, 0, reloc_type);
6382 }
6383 }
6384 }
6385}
6386\f
6387/* x86_cons_fix_new is called via the expression parsing code when a
6388 reloc is needed. We use this hook to get the correct .got reloc. */
6389static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6390static int cons_sign = -1;
6391
6392void
6393x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6394 expressionS *exp)
6395{
6396 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6397
6398 got_reloc = NO_RELOC;
6399
6400#ifdef TE_PE
6401 if (exp->X_op == O_secrel)
6402 {
6403 exp->X_op = O_symbol;
6404 r = BFD_RELOC_32_SECREL;
6405 }
6406#endif
6407
6408 fix_new_exp (frag, off, len, exp, 0, r);
6409}
6410
6411#if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6412# define lex_got(reloc, adjust, types) NULL
6413#else
6414/* Parse operands of the form
6415 <symbol>@GOTOFF+<nnn>
6416 and similar .plt or .got references.
6417
6418 If we find one, set up the correct relocation in RELOC and copy the
6419 input string, minus the `@GOTOFF' into a malloc'd buffer for
6420 parsing by the calling routine. Return this buffer, and if ADJUST
6421 is non-null set it to the length of the string we removed from the
6422 input line. Otherwise return NULL. */
6423static char *
6424lex_got (enum bfd_reloc_code_real *rel,
6425 int *adjust,
6426 i386_operand_type *types)
6427{
6428 /* Some of the relocations depend on the size of what field is to
6429 be relocated. But in our callers i386_immediate and i386_displacement
6430 we don't yet know the operand size (this will be set by insn
6431 matching). Hence we record the word32 relocation here,
6432 and adjust the reloc according to the real size in reloc(). */
6433 static const struct {
6434 const char *str;
6435 int len;
6436 const enum bfd_reloc_code_real rel[2];
6437 const i386_operand_type types64;
6438 } gotrel[] = {
6439 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6440 BFD_RELOC_X86_64_PLTOFF64 },
6441 OPERAND_TYPE_IMM64 },
6442 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6443 BFD_RELOC_X86_64_PLT32 },
6444 OPERAND_TYPE_IMM32_32S_DISP32 },
6445 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6446 BFD_RELOC_X86_64_GOTPLT64 },
6447 OPERAND_TYPE_IMM64_DISP64 },
6448 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6449 BFD_RELOC_X86_64_GOTOFF64 },
6450 OPERAND_TYPE_IMM64_DISP64 },
6451 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6452 BFD_RELOC_X86_64_GOTPCREL },
6453 OPERAND_TYPE_IMM32_32S_DISP32 },
6454 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6455 BFD_RELOC_X86_64_TLSGD },
6456 OPERAND_TYPE_IMM32_32S_DISP32 },
6457 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6458 _dummy_first_bfd_reloc_code_real },
6459 OPERAND_TYPE_NONE },
6460 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6461 BFD_RELOC_X86_64_TLSLD },
6462 OPERAND_TYPE_IMM32_32S_DISP32 },
6463 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6464 BFD_RELOC_X86_64_GOTTPOFF },
6465 OPERAND_TYPE_IMM32_32S_DISP32 },
6466 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6467 BFD_RELOC_X86_64_TPOFF32 },
6468 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6469 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6470 _dummy_first_bfd_reloc_code_real },
6471 OPERAND_TYPE_NONE },
6472 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6473 BFD_RELOC_X86_64_DTPOFF32 },
6474 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6475 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6476 _dummy_first_bfd_reloc_code_real },
6477 OPERAND_TYPE_NONE },
6478 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6479 _dummy_first_bfd_reloc_code_real },
6480 OPERAND_TYPE_NONE },
6481 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6482 BFD_RELOC_X86_64_GOT32 },
6483 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6484 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6485 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6486 OPERAND_TYPE_IMM32_32S_DISP32 },
6487 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6488 BFD_RELOC_X86_64_TLSDESC_CALL },
6489 OPERAND_TYPE_IMM32_32S_DISP32 },
6490 };
6491 char *cp;
6492 unsigned int j;
6493
6494 if (!IS_ELF)
6495 return NULL;
6496
6497 for (cp = input_line_pointer; *cp != '@'; cp++)
6498 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6499 return NULL;
6500
6501 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6502 {
6503 int len = gotrel[j].len;
6504 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6505 {
6506 if (gotrel[j].rel[object_64bit] != 0)
6507 {
6508 int first, second;
6509 char *tmpbuf, *past_reloc;
6510
6511 *rel = gotrel[j].rel[object_64bit];
6512 if (adjust)
6513 *adjust = len;
6514
6515 if (types)
6516 {
6517 if (flag_code != CODE_64BIT)
6518 {
6519 types->bitfield.imm32 = 1;
6520 types->bitfield.disp32 = 1;
6521 }
6522 else
6523 *types = gotrel[j].types64;
6524 }
6525
6526 if (GOT_symbol == NULL)
6527 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6528
6529 /* The length of the first part of our input line. */
6530 first = cp - input_line_pointer;
6531
6532 /* The second part goes from after the reloc token until
6533 (and including) an end_of_line char or comma. */
6534 past_reloc = cp + 1 + len;
6535 cp = past_reloc;
6536 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6537 ++cp;
6538 second = cp + 1 - past_reloc;
6539
6540 /* Allocate and copy string. The trailing NUL shouldn't
6541 be necessary, but be safe. */
6542 tmpbuf = (char *) xmalloc (first + second + 2);
6543 memcpy (tmpbuf, input_line_pointer, first);
6544 if (second != 0 && *past_reloc != ' ')
6545 /* Replace the relocation token with ' ', so that
6546 errors like foo@GOTOFF1 will be detected. */
6547 tmpbuf[first++] = ' ';
6548 memcpy (tmpbuf + first, past_reloc, second);
6549 tmpbuf[first + second] = '\0';
6550 return tmpbuf;
6551 }
6552
6553 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6554 gotrel[j].str, 1 << (5 + object_64bit));
6555 return NULL;
6556 }
6557 }
6558
6559 /* Might be a symbol version string. Don't as_bad here. */
6560 return NULL;
6561}
6562#endif
6563
6564void
6565x86_cons (expressionS *exp, int size)
6566{
6567 intel_syntax = -intel_syntax;
6568
6569 exp->X_md = 0;
6570 if (size == 4 || (object_64bit && size == 8))
6571 {
6572 /* Handle @GOTOFF and the like in an expression. */
6573 char *save;
6574 char *gotfree_input_line;
6575 int adjust;
6576
6577 save = input_line_pointer;
6578 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6579 if (gotfree_input_line)
6580 input_line_pointer = gotfree_input_line;
6581
6582 expression (exp);
6583
6584 if (gotfree_input_line)
6585 {
6586 /* expression () has merrily parsed up to the end of line,
6587 or a comma - in the wrong buffer. Transfer how far
6588 input_line_pointer has moved to the right buffer. */
6589 input_line_pointer = (save
6590 + (input_line_pointer - gotfree_input_line)
6591 + adjust);
6592 free (gotfree_input_line);
6593 if (exp->X_op == O_constant
6594 || exp->X_op == O_absent
6595 || exp->X_op == O_illegal
6596 || exp->X_op == O_register
6597 || exp->X_op == O_big)
6598 {
6599 char c = *input_line_pointer;
6600 *input_line_pointer = 0;
6601 as_bad (_("missing or invalid expression `%s'"), save);
6602 *input_line_pointer = c;
6603 }
6604 }
6605 }
6606 else
6607 expression (exp);
6608
6609 intel_syntax = -intel_syntax;
6610
6611 if (intel_syntax)
6612 i386_intel_simplify (exp);
6613}
6614
6615static void
6616signed_cons (int size)
6617{
6618 if (flag_code == CODE_64BIT)
6619 cons_sign = 1;
6620 cons (size);
6621 cons_sign = -1;
6622}
6623
6624#ifdef TE_PE
6625static void
6626pe_directive_secrel (dummy)
6627 int dummy ATTRIBUTE_UNUSED;
6628{
6629 expressionS exp;
6630
6631 do
6632 {
6633 expression (&exp);
6634 if (exp.X_op == O_symbol)
6635 exp.X_op = O_secrel;
6636
6637 emit_expr (&exp, 4);
6638 }
6639 while (*input_line_pointer++ == ',');
6640
6641 input_line_pointer--;
6642 demand_empty_rest_of_line ();
6643}
6644#endif
6645
6646static int
6647i386_immediate (char *imm_start)
6648{
6649 char *save_input_line_pointer;
6650 char *gotfree_input_line;
6651 segT exp_seg = 0;
6652 expressionS *exp;
6653 i386_operand_type types;
6654
6655 operand_type_set (&types, ~0);
6656
6657 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6658 {
6659 as_bad (_("at most %d immediate operands are allowed"),
6660 MAX_IMMEDIATE_OPERANDS);
6661 return 0;
6662 }
6663
6664 exp = &im_expressions[i.imm_operands++];
6665 i.op[this_operand].imms = exp;
6666
6667 if (is_space_char (*imm_start))
6668 ++imm_start;
6669
6670 save_input_line_pointer = input_line_pointer;
6671 input_line_pointer = imm_start;
6672
6673 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6674 if (gotfree_input_line)
6675 input_line_pointer = gotfree_input_line;
6676
6677 exp_seg = expression (exp);
6678
6679 SKIP_WHITESPACE ();
6680 if (*input_line_pointer)
6681 as_bad (_("junk `%s' after expression"), input_line_pointer);
6682
6683 input_line_pointer = save_input_line_pointer;
6684 if (gotfree_input_line)
6685 {
6686 free (gotfree_input_line);
6687
6688 if (exp->X_op == O_constant || exp->X_op == O_register)
6689 exp->X_op = O_illegal;
6690 }
6691
6692 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6693}
6694
6695static int
6696i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6697 i386_operand_type types, const char *imm_start)
6698{
6699 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6700 {
6701 if (imm_start)
6702 as_bad (_("missing or invalid immediate expression `%s'"),
6703 imm_start);
6704 return 0;
6705 }
6706 else if (exp->X_op == O_constant)
6707 {
6708 /* Size it properly later. */
6709 i.types[this_operand].bitfield.imm64 = 1;
6710 /* If not 64bit, sign extend val. */
6711 if (flag_code != CODE_64BIT
6712 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6713 exp->X_add_number
6714 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6715 }
6716#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6717 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6718 && exp_seg != absolute_section
6719 && exp_seg != text_section
6720 && exp_seg != data_section
6721 && exp_seg != bss_section
6722 && exp_seg != undefined_section
6723 && !bfd_is_com_section (exp_seg))
6724 {
6725 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6726 return 0;
6727 }
6728#endif
6729 else if (!intel_syntax && exp->X_op == O_register)
6730 {
6731 if (imm_start)
6732 as_bad (_("illegal immediate register operand %s"), imm_start);
6733 return 0;
6734 }
6735 else
6736 {
6737 /* This is an address. The size of the address will be
6738 determined later, depending on destination register,
6739 suffix, or the default for the section. */
6740 i.types[this_operand].bitfield.imm8 = 1;
6741 i.types[this_operand].bitfield.imm16 = 1;
6742 i.types[this_operand].bitfield.imm32 = 1;
6743 i.types[this_operand].bitfield.imm32s = 1;
6744 i.types[this_operand].bitfield.imm64 = 1;
6745 i.types[this_operand] = operand_type_and (i.types[this_operand],
6746 types);
6747 }
6748
6749 return 1;
6750}
6751
6752static char *
6753i386_scale (char *scale)
6754{
6755 offsetT val;
6756 char *save = input_line_pointer;
6757
6758 input_line_pointer = scale;
6759 val = get_absolute_expression ();
6760
6761 switch (val)
6762 {
6763 case 1:
6764 i.log2_scale_factor = 0;
6765 break;
6766 case 2:
6767 i.log2_scale_factor = 1;
6768 break;
6769 case 4:
6770 i.log2_scale_factor = 2;
6771 break;
6772 case 8:
6773 i.log2_scale_factor = 3;
6774 break;
6775 default:
6776 {
6777 char sep = *input_line_pointer;
6778
6779 *input_line_pointer = '\0';
6780 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6781 scale);
6782 *input_line_pointer = sep;
6783 input_line_pointer = save;
6784 return NULL;
6785 }
6786 }
6787 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6788 {
6789 as_warn (_("scale factor of %d without an index register"),
6790 1 << i.log2_scale_factor);
6791 i.log2_scale_factor = 0;
6792 }
6793 scale = input_line_pointer;
6794 input_line_pointer = save;
6795 return scale;
6796}
6797
6798static int
6799i386_displacement (char *disp_start, char *disp_end)
6800{
6801 expressionS *exp;
6802 segT exp_seg = 0;
6803 char *save_input_line_pointer;
6804 char *gotfree_input_line;
6805 int override;
6806 i386_operand_type bigdisp, types = anydisp;
6807 int ret;
6808
6809 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6810 {
6811 as_bad (_("at most %d displacement operands are allowed"),
6812 MAX_MEMORY_OPERANDS);
6813 return 0;
6814 }
6815
6816 operand_type_set (&bigdisp, 0);
6817 if ((i.types[this_operand].bitfield.jumpabsolute)
6818 || (!current_templates->start->opcode_modifier.jump
6819 && !current_templates->start->opcode_modifier.jumpdword))
6820 {
6821 bigdisp.bitfield.disp32 = 1;
6822 override = (i.prefix[ADDR_PREFIX] != 0);
6823 if (flag_code == CODE_64BIT)
6824 {
6825 if (!override)
6826 {
6827 bigdisp.bitfield.disp32s = 1;
6828 bigdisp.bitfield.disp64 = 1;
6829 }
6830 }
6831 else if ((flag_code == CODE_16BIT) ^ override)
6832 {
6833 bigdisp.bitfield.disp32 = 0;
6834 bigdisp.bitfield.disp16 = 1;
6835 }
6836 }
6837 else
6838 {
6839 /* For PC-relative branches, the width of the displacement
6840 is dependent upon data size, not address size. */
6841 override = (i.prefix[DATA_PREFIX] != 0);
6842 if (flag_code == CODE_64BIT)
6843 {
6844 if (override || i.suffix == WORD_MNEM_SUFFIX)
6845 bigdisp.bitfield.disp16 = 1;
6846 else
6847 {
6848 bigdisp.bitfield.disp32 = 1;
6849 bigdisp.bitfield.disp32s = 1;
6850 }
6851 }
6852 else
6853 {
6854 if (!override)
6855 override = (i.suffix == (flag_code != CODE_16BIT
6856 ? WORD_MNEM_SUFFIX
6857 : LONG_MNEM_SUFFIX));
6858 bigdisp.bitfield.disp32 = 1;
6859 if ((flag_code == CODE_16BIT) ^ override)
6860 {
6861 bigdisp.bitfield.disp32 = 0;
6862 bigdisp.bitfield.disp16 = 1;
6863 }
6864 }
6865 }
6866 i.types[this_operand] = operand_type_or (i.types[this_operand],
6867 bigdisp);
6868
6869 exp = &disp_expressions[i.disp_operands];
6870 i.op[this_operand].disps = exp;
6871 i.disp_operands++;
6872 save_input_line_pointer = input_line_pointer;
6873 input_line_pointer = disp_start;
6874 END_STRING_AND_SAVE (disp_end);
6875
6876#ifndef GCC_ASM_O_HACK
6877#define GCC_ASM_O_HACK 0
6878#endif
6879#if GCC_ASM_O_HACK
6880 END_STRING_AND_SAVE (disp_end + 1);
6881 if (i.types[this_operand].bitfield.baseIndex
6882 && displacement_string_end[-1] == '+')
6883 {
6884 /* This hack is to avoid a warning when using the "o"
6885 constraint within gcc asm statements.
6886 For instance:
6887
6888 #define _set_tssldt_desc(n,addr,limit,type) \
6889 __asm__ __volatile__ ( \
6890 "movw %w2,%0\n\t" \
6891 "movw %w1,2+%0\n\t" \
6892 "rorl $16,%1\n\t" \
6893 "movb %b1,4+%0\n\t" \
6894 "movb %4,5+%0\n\t" \
6895 "movb $0,6+%0\n\t" \
6896 "movb %h1,7+%0\n\t" \
6897 "rorl $16,%1" \
6898 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6899
6900 This works great except that the output assembler ends
6901 up looking a bit weird if it turns out that there is
6902 no offset. You end up producing code that looks like:
6903
6904 #APP
6905 movw $235,(%eax)
6906 movw %dx,2+(%eax)
6907 rorl $16,%edx
6908 movb %dl,4+(%eax)
6909 movb $137,5+(%eax)
6910 movb $0,6+(%eax)
6911 movb %dh,7+(%eax)
6912 rorl $16,%edx
6913 #NO_APP
6914
6915 So here we provide the missing zero. */
6916
6917 *displacement_string_end = '0';
6918 }
6919#endif
6920 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6921 if (gotfree_input_line)
6922 input_line_pointer = gotfree_input_line;
6923
6924 exp_seg = expression (exp);
6925
6926 SKIP_WHITESPACE ();
6927 if (*input_line_pointer)
6928 as_bad (_("junk `%s' after expression"), input_line_pointer);
6929#if GCC_ASM_O_HACK
6930 RESTORE_END_STRING (disp_end + 1);
6931#endif
6932 input_line_pointer = save_input_line_pointer;
6933 if (gotfree_input_line)
6934 {
6935 free (gotfree_input_line);
6936
6937 if (exp->X_op == O_constant || exp->X_op == O_register)
6938 exp->X_op = O_illegal;
6939 }
6940
6941 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
6942
6943 RESTORE_END_STRING (disp_end);
6944
6945 return ret;
6946}
6947
6948static int
6949i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6950 i386_operand_type types, const char *disp_start)
6951{
6952 i386_operand_type bigdisp;
6953 int ret = 1;
6954
6955 /* We do this to make sure that the section symbol is in
6956 the symbol table. We will ultimately change the relocation
6957 to be relative to the beginning of the section. */
6958 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
6959 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
6960 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6961 {
6962 if (exp->X_op != O_symbol)
6963 goto inv_disp;
6964
6965 if (S_IS_LOCAL (exp->X_add_symbol)
6966 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
6967 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
6968 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
6969 exp->X_op = O_subtract;
6970 exp->X_op_symbol = GOT_symbol;
6971 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
6972 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
6973 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6974 i.reloc[this_operand] = BFD_RELOC_64;
6975 else
6976 i.reloc[this_operand] = BFD_RELOC_32;
6977 }
6978
6979 else if (exp->X_op == O_absent
6980 || exp->X_op == O_illegal
6981 || exp->X_op == O_big)
6982 {
6983 inv_disp:
6984 as_bad (_("missing or invalid displacement expression `%s'"),
6985 disp_start);
6986 ret = 0;
6987 }
6988
6989 else if (flag_code == CODE_64BIT
6990 && !i.prefix[ADDR_PREFIX]
6991 && exp->X_op == O_constant)
6992 {
6993 /* Since displacement is signed extended to 64bit, don't allow
6994 disp32 and turn off disp32s if they are out of range. */
6995 i.types[this_operand].bitfield.disp32 = 0;
6996 if (!fits_in_signed_long (exp->X_add_number))
6997 {
6998 i.types[this_operand].bitfield.disp32s = 0;
6999 if (i.types[this_operand].bitfield.baseindex)
7000 {
7001 as_bad (_("0x%lx out range of signed 32bit displacement"),
7002 (long) exp->X_add_number);
7003 ret = 0;
7004 }
7005 }
7006 }
7007
7008#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7009 else if (exp->X_op != O_constant
7010 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7011 && exp_seg != absolute_section
7012 && exp_seg != text_section
7013 && exp_seg != data_section
7014 && exp_seg != bss_section
7015 && exp_seg != undefined_section
7016 && !bfd_is_com_section (exp_seg))
7017 {
7018 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7019 ret = 0;
7020 }
7021#endif
7022
7023 /* Check if this is a displacement only operand. */
7024 bigdisp = i.types[this_operand];
7025 bigdisp.bitfield.disp8 = 0;
7026 bigdisp.bitfield.disp16 = 0;
7027 bigdisp.bitfield.disp32 = 0;
7028 bigdisp.bitfield.disp32s = 0;
7029 bigdisp.bitfield.disp64 = 0;
7030 if (operand_type_all_zero (&bigdisp))
7031 i.types[this_operand] = operand_type_and (i.types[this_operand],
7032 types);
7033
7034 return ret;
7035}
7036
7037/* Make sure the memory operand we've been dealt is valid.
7038 Return 1 on success, 0 on a failure. */
7039
7040static int
7041i386_index_check (const char *operand_string)
7042{
7043 int ok;
7044 const char *kind = "base/index";
7045#if INFER_ADDR_PREFIX
7046 int fudged = 0;
7047
7048 tryprefix:
7049#endif
7050 ok = 1;
7051 if (current_templates->start->opcode_modifier.isstring
7052 && !current_templates->start->opcode_modifier.immext
7053 && (current_templates->end[-1].opcode_modifier.isstring
7054 || i.mem_operands))
7055 {
7056 /* Memory operands of string insns are special in that they only allow
7057 a single register (rDI, rSI, or rBX) as their memory address. */
7058 unsigned int expected;
7059
7060 kind = "string address";
7061
7062 if (current_templates->start->opcode_modifier.w)
7063 {
7064 i386_operand_type type = current_templates->end[-1].operand_types[0];
7065
7066 if (!type.bitfield.baseindex
7067 || ((!i.mem_operands != !intel_syntax)
7068 && current_templates->end[-1].operand_types[1]
7069 .bitfield.baseindex))
7070 type = current_templates->end[-1].operand_types[1];
7071 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7072 }
7073 else
7074 expected = 3 /* rBX */;
7075
7076 if (!i.base_reg || i.index_reg
7077 || operand_type_check (i.types[this_operand], disp))
7078 ok = -1;
7079 else if (!(flag_code == CODE_64BIT
7080 ? i.prefix[ADDR_PREFIX]
7081 ? i.base_reg->reg_type.bitfield.reg32
7082 : i.base_reg->reg_type.bitfield.reg64
7083 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7084 ? i.base_reg->reg_type.bitfield.reg32
7085 : i.base_reg->reg_type.bitfield.reg16))
7086 ok = 0;
7087 else if (i.base_reg->reg_num != expected)
7088 ok = -1;
7089
7090 if (ok < 0)
7091 {
7092 unsigned int j;
7093
7094 for (j = 0; j < i386_regtab_size; ++j)
7095 if ((flag_code == CODE_64BIT
7096 ? i.prefix[ADDR_PREFIX]
7097 ? i386_regtab[j].reg_type.bitfield.reg32
7098 : i386_regtab[j].reg_type.bitfield.reg64
7099 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7100 ? i386_regtab[j].reg_type.bitfield.reg32
7101 : i386_regtab[j].reg_type.bitfield.reg16)
7102 && i386_regtab[j].reg_num == expected)
7103 break;
7104 gas_assert (j < i386_regtab_size);
7105 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7106 operand_string,
7107 intel_syntax ? '[' : '(',
7108 register_prefix,
7109 i386_regtab[j].reg_name,
7110 intel_syntax ? ']' : ')');
7111 ok = 1;
7112 }
7113 }
7114 else if (flag_code == CODE_64BIT)
7115 {
7116 if ((i.base_reg
7117 && ((i.prefix[ADDR_PREFIX] == 0
7118 && !i.base_reg->reg_type.bitfield.reg64)
7119 || (i.prefix[ADDR_PREFIX]
7120 && !i.base_reg->reg_type.bitfield.reg32))
7121 && (i.index_reg
7122 || i.base_reg->reg_num !=
7123 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7124 || (i.index_reg
7125 && (!i.index_reg->reg_type.bitfield.baseindex
7126 || (i.prefix[ADDR_PREFIX] == 0
7127 && i.index_reg->reg_num != RegRiz
7128 && !i.index_reg->reg_type.bitfield.reg64
7129 )
7130 || (i.prefix[ADDR_PREFIX]
7131 && i.index_reg->reg_num != RegEiz
7132 && !i.index_reg->reg_type.bitfield.reg32))))
7133 ok = 0;
7134 }
7135 else
7136 {
7137 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7138 {
7139 /* 16bit checks. */
7140 if ((i.base_reg
7141 && (!i.base_reg->reg_type.bitfield.reg16
7142 || !i.base_reg->reg_type.bitfield.baseindex))
7143 || (i.index_reg
7144 && (!i.index_reg->reg_type.bitfield.reg16
7145 || !i.index_reg->reg_type.bitfield.baseindex
7146 || !(i.base_reg
7147 && i.base_reg->reg_num < 6
7148 && i.index_reg->reg_num >= 6
7149 && i.log2_scale_factor == 0))))
7150 ok = 0;
7151 }
7152 else
7153 {
7154 /* 32bit checks. */
7155 if ((i.base_reg
7156 && !i.base_reg->reg_type.bitfield.reg32)
7157 || (i.index_reg
7158 && ((!i.index_reg->reg_type.bitfield.reg32
7159 && i.index_reg->reg_num != RegEiz)
7160 || !i.index_reg->reg_type.bitfield.baseindex)))
7161 ok = 0;
7162 }
7163 }
7164 if (!ok)
7165 {
7166#if INFER_ADDR_PREFIX
7167 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7168 {
7169 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7170 i.prefixes += 1;
7171 /* Change the size of any displacement too. At most one of
7172 Disp16 or Disp32 is set.
7173 FIXME. There doesn't seem to be any real need for separate
7174 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7175 Removing them would probably clean up the code quite a lot. */
7176 if (flag_code != CODE_64BIT
7177 && (i.types[this_operand].bitfield.disp16
7178 || i.types[this_operand].bitfield.disp32))
7179 i.types[this_operand]
7180 = operand_type_xor (i.types[this_operand], disp16_32);
7181 fudged = 1;
7182 goto tryprefix;
7183 }
7184 if (fudged)
7185 as_bad (_("`%s' is not a valid %s expression"),
7186 operand_string,
7187 kind);
7188 else
7189#endif
7190 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7191 operand_string,
7192 flag_code_names[i.prefix[ADDR_PREFIX]
7193 ? flag_code == CODE_32BIT
7194 ? CODE_16BIT
7195 : CODE_32BIT
7196 : flag_code],
7197 kind);
7198 }
7199 return ok;
7200}
7201
7202/* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7203 on error. */
7204
7205static int
7206i386_att_operand (char *operand_string)
7207{
7208 const reg_entry *r;
7209 char *end_op;
7210 char *op_string = operand_string;
7211
7212 if (is_space_char (*op_string))
7213 ++op_string;
7214
7215 /* We check for an absolute prefix (differentiating,
7216 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7217 if (*op_string == ABSOLUTE_PREFIX)
7218 {
7219 ++op_string;
7220 if (is_space_char (*op_string))
7221 ++op_string;
7222 i.types[this_operand].bitfield.jumpabsolute = 1;
7223 }
7224
7225 /* Check if operand is a register. */
7226 if ((r = parse_register (op_string, &end_op)) != NULL)
7227 {
7228 i386_operand_type temp;
7229
7230 /* Check for a segment override by searching for ':' after a
7231 segment register. */
7232 op_string = end_op;
7233 if (is_space_char (*op_string))
7234 ++op_string;
7235 if (*op_string == ':'
7236 && (r->reg_type.bitfield.sreg2
7237 || r->reg_type.bitfield.sreg3))
7238 {
7239 switch (r->reg_num)
7240 {
7241 case 0:
7242 i.seg[i.mem_operands] = &es;
7243 break;
7244 case 1:
7245 i.seg[i.mem_operands] = &cs;
7246 break;
7247 case 2:
7248 i.seg[i.mem_operands] = &ss;
7249 break;
7250 case 3:
7251 i.seg[i.mem_operands] = &ds;
7252 break;
7253 case 4:
7254 i.seg[i.mem_operands] = &fs;
7255 break;
7256 case 5:
7257 i.seg[i.mem_operands] = &gs;
7258 break;
7259 }
7260
7261 /* Skip the ':' and whitespace. */
7262 ++op_string;
7263 if (is_space_char (*op_string))
7264 ++op_string;
7265
7266 if (!is_digit_char (*op_string)
7267 && !is_identifier_char (*op_string)
7268 && *op_string != '('
7269 && *op_string != ABSOLUTE_PREFIX)
7270 {
7271 as_bad (_("bad memory operand `%s'"), op_string);
7272 return 0;
7273 }
7274 /* Handle case of %es:*foo. */
7275 if (*op_string == ABSOLUTE_PREFIX)
7276 {
7277 ++op_string;
7278 if (is_space_char (*op_string))
7279 ++op_string;
7280 i.types[this_operand].bitfield.jumpabsolute = 1;
7281 }
7282 goto do_memory_reference;
7283 }
7284 if (*op_string)
7285 {
7286 as_bad (_("junk `%s' after register"), op_string);
7287 return 0;
7288 }
7289 temp = r->reg_type;
7290 temp.bitfield.baseindex = 0;
7291 i.types[this_operand] = operand_type_or (i.types[this_operand],
7292 temp);
7293 i.types[this_operand].bitfield.unspecified = 0;
7294 i.op[this_operand].regs = r;
7295 i.reg_operands++;
7296 }
7297 else if (*op_string == REGISTER_PREFIX)
7298 {
7299 as_bad (_("bad register name `%s'"), op_string);
7300 return 0;
7301 }
7302 else if (*op_string == IMMEDIATE_PREFIX)
7303 {
7304 ++op_string;
7305 if (i.types[this_operand].bitfield.jumpabsolute)
7306 {
7307 as_bad (_("immediate operand illegal with absolute jump"));
7308 return 0;
7309 }
7310 if (!i386_immediate (op_string))
7311 return 0;
7312 }
7313 else if (is_digit_char (*op_string)
7314 || is_identifier_char (*op_string)
7315 || *op_string == '(')
7316 {
7317 /* This is a memory reference of some sort. */
7318 char *base_string;
7319
7320 /* Start and end of displacement string expression (if found). */
7321 char *displacement_string_start;
7322 char *displacement_string_end;
7323
7324 do_memory_reference:
7325 if ((i.mem_operands == 1
7326 && !current_templates->start->opcode_modifier.isstring)
7327 || i.mem_operands == 2)
7328 {
7329 as_bad (_("too many memory references for `%s'"),
7330 current_templates->start->name);
7331 return 0;
7332 }
7333
7334 /* Check for base index form. We detect the base index form by
7335 looking for an ')' at the end of the operand, searching
7336 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7337 after the '('. */
7338 base_string = op_string + strlen (op_string);
7339
7340 --base_string;
7341 if (is_space_char (*base_string))
7342 --base_string;
7343
7344 /* If we only have a displacement, set-up for it to be parsed later. */
7345 displacement_string_start = op_string;
7346 displacement_string_end = base_string + 1;
7347
7348 if (*base_string == ')')
7349 {
7350 char *temp_string;
7351 unsigned int parens_balanced = 1;
7352 /* We've already checked that the number of left & right ()'s are
7353 equal, so this loop will not be infinite. */
7354 do
7355 {
7356 base_string--;
7357 if (*base_string == ')')
7358 parens_balanced++;
7359 if (*base_string == '(')
7360 parens_balanced--;
7361 }
7362 while (parens_balanced);
7363
7364 temp_string = base_string;
7365
7366 /* Skip past '(' and whitespace. */
7367 ++base_string;
7368 if (is_space_char (*base_string))
7369 ++base_string;
7370
7371 if (*base_string == ','
7372 || ((i.base_reg = parse_register (base_string, &end_op))
7373 != NULL))
7374 {
7375 displacement_string_end = temp_string;
7376
7377 i.types[this_operand].bitfield.baseindex = 1;
7378
7379 if (i.base_reg)
7380 {
7381 base_string = end_op;
7382 if (is_space_char (*base_string))
7383 ++base_string;
7384 }
7385
7386 /* There may be an index reg or scale factor here. */
7387 if (*base_string == ',')
7388 {
7389 ++base_string;
7390 if (is_space_char (*base_string))
7391 ++base_string;
7392
7393 if ((i.index_reg = parse_register (base_string, &end_op))
7394 != NULL)
7395 {
7396 base_string = end_op;
7397 if (is_space_char (*base_string))
7398 ++base_string;
7399 if (*base_string == ',')
7400 {
7401 ++base_string;
7402 if (is_space_char (*base_string))
7403 ++base_string;
7404 }
7405 else if (*base_string != ')')
7406 {
7407 as_bad (_("expecting `,' or `)' "
7408 "after index register in `%s'"),
7409 operand_string);
7410 return 0;
7411 }
7412 }
7413 else if (*base_string == REGISTER_PREFIX)
7414 {
7415 as_bad (_("bad register name `%s'"), base_string);
7416 return 0;
7417 }
7418
7419 /* Check for scale factor. */
7420 if (*base_string != ')')
7421 {
7422 char *end_scale = i386_scale (base_string);
7423
7424 if (!end_scale)
7425 return 0;
7426
7427 base_string = end_scale;
7428 if (is_space_char (*base_string))
7429 ++base_string;
7430 if (*base_string != ')')
7431 {
7432 as_bad (_("expecting `)' "
7433 "after scale factor in `%s'"),
7434 operand_string);
7435 return 0;
7436 }
7437 }
7438 else if (!i.index_reg)
7439 {
7440 as_bad (_("expecting index register or scale factor "
7441 "after `,'; got '%c'"),
7442 *base_string);
7443 return 0;
7444 }
7445 }
7446 else if (*base_string != ')')
7447 {
7448 as_bad (_("expecting `,' or `)' "
7449 "after base register in `%s'"),
7450 operand_string);
7451 return 0;
7452 }
7453 }
7454 else if (*base_string == REGISTER_PREFIX)
7455 {
7456 as_bad (_("bad register name `%s'"), base_string);
7457 return 0;
7458 }
7459 }
7460
7461 /* If there's an expression beginning the operand, parse it,
7462 assuming displacement_string_start and
7463 displacement_string_end are meaningful. */
7464 if (displacement_string_start != displacement_string_end)
7465 {
7466 if (!i386_displacement (displacement_string_start,
7467 displacement_string_end))
7468 return 0;
7469 }
7470
7471 /* Special case for (%dx) while doing input/output op. */
7472 if (i.base_reg
7473 && operand_type_equal (&i.base_reg->reg_type,
7474 &reg16_inoutportreg)
7475 && i.index_reg == 0
7476 && i.log2_scale_factor == 0
7477 && i.seg[i.mem_operands] == 0
7478 && !operand_type_check (i.types[this_operand], disp))
7479 {
7480 i.types[this_operand] = inoutportreg;
7481 return 1;
7482 }
7483
7484 if (i386_index_check (operand_string) == 0)
7485 return 0;
7486 i.types[this_operand].bitfield.mem = 1;
7487 i.mem_operands++;
7488 }
7489 else
7490 {
7491 /* It's not a memory operand; argh! */
7492 as_bad (_("invalid char %s beginning operand %d `%s'"),
7493 output_invalid (*op_string),
7494 this_operand + 1,
7495 op_string);
7496 return 0;
7497 }
7498 return 1; /* Normal return. */
7499}
7500\f
7501/* md_estimate_size_before_relax()
7502
7503 Called just before relax() for rs_machine_dependent frags. The x86
7504 assembler uses these frags to handle variable size jump
7505 instructions.
7506
7507 Any symbol that is now undefined will not become defined.
7508 Return the correct fr_subtype in the frag.
7509 Return the initial "guess for variable size of frag" to caller.
7510 The guess is actually the growth beyond the fixed part. Whatever
7511 we do to grow the fixed or variable part contributes to our
7512 returned value. */
7513
7514int
7515md_estimate_size_before_relax (fragP, segment)
7516 fragS *fragP;
7517 segT segment;
7518{
7519 /* We've already got fragP->fr_subtype right; all we have to do is
7520 check for un-relaxable symbols. On an ELF system, we can't relax
7521 an externally visible symbol, because it may be overridden by a
7522 shared library. */
7523 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7524#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7525 || (IS_ELF
7526 && (S_IS_EXTERNAL (fragP->fr_symbol)
7527 || S_IS_WEAK (fragP->fr_symbol)
7528 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7529 & BSF_GNU_INDIRECT_FUNCTION))))
7530#endif
7531#if defined (OBJ_COFF) && defined (TE_PE)
7532 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7533 && S_IS_WEAK (fragP->fr_symbol))
7534#endif
7535 )
7536 {
7537 /* Symbol is undefined in this segment, or we need to keep a
7538 reloc so that weak symbols can be overridden. */
7539 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7540 enum bfd_reloc_code_real reloc_type;
7541 unsigned char *opcode;
7542 int old_fr_fix;
7543
7544 if (fragP->fr_var != NO_RELOC)
7545 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7546 else if (size == 2)
7547 reloc_type = BFD_RELOC_16_PCREL;
7548 else
7549 reloc_type = BFD_RELOC_32_PCREL;
7550
7551 old_fr_fix = fragP->fr_fix;
7552 opcode = (unsigned char *) fragP->fr_opcode;
7553
7554 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7555 {
7556 case UNCOND_JUMP:
7557 /* Make jmp (0xeb) a (d)word displacement jump. */
7558 opcode[0] = 0xe9;
7559 fragP->fr_fix += size;
7560 fix_new (fragP, old_fr_fix, size,
7561 fragP->fr_symbol,
7562 fragP->fr_offset, 1,
7563 reloc_type);
7564 break;
7565
7566 case COND_JUMP86:
7567 if (size == 2
7568 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7569 {
7570 /* Negate the condition, and branch past an
7571 unconditional jump. */
7572 opcode[0] ^= 1;
7573 opcode[1] = 3;
7574 /* Insert an unconditional jump. */
7575 opcode[2] = 0xe9;
7576 /* We added two extra opcode bytes, and have a two byte
7577 offset. */
7578 fragP->fr_fix += 2 + 2;
7579 fix_new (fragP, old_fr_fix + 2, 2,
7580 fragP->fr_symbol,
7581 fragP->fr_offset, 1,
7582 reloc_type);
7583 break;
7584 }
7585 /* Fall through. */
7586
7587 case COND_JUMP:
7588 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7589 {
7590 fixS *fixP;
7591
7592 fragP->fr_fix += 1;
7593 fixP = fix_new (fragP, old_fr_fix, 1,
7594 fragP->fr_symbol,
7595 fragP->fr_offset, 1,
7596 BFD_RELOC_8_PCREL);
7597 fixP->fx_signed = 1;
7598 break;
7599 }
7600
7601 /* This changes the byte-displacement jump 0x7N
7602 to the (d)word-displacement jump 0x0f,0x8N. */
7603 opcode[1] = opcode[0] + 0x10;
7604 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7605 /* We've added an opcode byte. */
7606 fragP->fr_fix += 1 + size;
7607 fix_new (fragP, old_fr_fix + 1, size,
7608 fragP->fr_symbol,
7609 fragP->fr_offset, 1,
7610 reloc_type);
7611 break;
7612
7613 default:
7614 BAD_CASE (fragP->fr_subtype);
7615 break;
7616 }
7617 frag_wane (fragP);
7618 return fragP->fr_fix - old_fr_fix;
7619 }
7620
7621 /* Guess size depending on current relax state. Initially the relax
7622 state will correspond to a short jump and we return 1, because
7623 the variable part of the frag (the branch offset) is one byte
7624 long. However, we can relax a section more than once and in that
7625 case we must either set fr_subtype back to the unrelaxed state,
7626 or return the value for the appropriate branch. */
7627 return md_relax_table[fragP->fr_subtype].rlx_length;
7628}
7629
7630/* Called after relax() is finished.
7631
7632 In: Address of frag.
7633 fr_type == rs_machine_dependent.
7634 fr_subtype is what the address relaxed to.
7635
7636 Out: Any fixSs and constants are set up.
7637 Caller will turn frag into a ".space 0". */
7638
7639void
7640md_convert_frag (abfd, sec, fragP)
7641 bfd *abfd ATTRIBUTE_UNUSED;
7642 segT sec ATTRIBUTE_UNUSED;
7643 fragS *fragP;
7644{
7645 unsigned char *opcode;
7646 unsigned char *where_to_put_displacement = NULL;
7647 offsetT target_address;
7648 offsetT opcode_address;
7649 unsigned int extension = 0;
7650 offsetT displacement_from_opcode_start;
7651
7652 opcode = (unsigned char *) fragP->fr_opcode;
7653
7654 /* Address we want to reach in file space. */
7655 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7656
7657 /* Address opcode resides at in file space. */
7658 opcode_address = fragP->fr_address + fragP->fr_fix;
7659
7660 /* Displacement from opcode start to fill into instruction. */
7661 displacement_from_opcode_start = target_address - opcode_address;
7662
7663 if ((fragP->fr_subtype & BIG) == 0)
7664 {
7665 /* Don't have to change opcode. */
7666 extension = 1; /* 1 opcode + 1 displacement */
7667 where_to_put_displacement = &opcode[1];
7668 }
7669 else
7670 {
7671 if (no_cond_jump_promotion
7672 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7673 as_warn_where (fragP->fr_file, fragP->fr_line,
7674 _("long jump required"));
7675
7676 switch (fragP->fr_subtype)
7677 {
7678 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7679 extension = 4; /* 1 opcode + 4 displacement */
7680 opcode[0] = 0xe9;
7681 where_to_put_displacement = &opcode[1];
7682 break;
7683
7684 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7685 extension = 2; /* 1 opcode + 2 displacement */
7686 opcode[0] = 0xe9;
7687 where_to_put_displacement = &opcode[1];
7688 break;
7689
7690 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7691 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7692 extension = 5; /* 2 opcode + 4 displacement */
7693 opcode[1] = opcode[0] + 0x10;
7694 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7695 where_to_put_displacement = &opcode[2];
7696 break;
7697
7698 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7699 extension = 3; /* 2 opcode + 2 displacement */
7700 opcode[1] = opcode[0] + 0x10;
7701 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7702 where_to_put_displacement = &opcode[2];
7703 break;
7704
7705 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7706 extension = 4;
7707 opcode[0] ^= 1;
7708 opcode[1] = 3;
7709 opcode[2] = 0xe9;
7710 where_to_put_displacement = &opcode[3];
7711 break;
7712
7713 default:
7714 BAD_CASE (fragP->fr_subtype);
7715 break;
7716 }
7717 }
7718
7719 /* If size if less then four we are sure that the operand fits,
7720 but if it's 4, then it could be that the displacement is larger
7721 then -/+ 2GB. */
7722 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7723 && object_64bit
7724 && ((addressT) (displacement_from_opcode_start - extension
7725 + ((addressT) 1 << 31))
7726 > (((addressT) 2 << 31) - 1)))
7727 {
7728 as_bad_where (fragP->fr_file, fragP->fr_line,
7729 _("jump target out of range"));
7730 /* Make us emit 0. */
7731 displacement_from_opcode_start = extension;
7732 }
7733 /* Now put displacement after opcode. */
7734 md_number_to_chars ((char *) where_to_put_displacement,
7735 (valueT) (displacement_from_opcode_start - extension),
7736 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7737 fragP->fr_fix += extension;
7738}
7739\f
7740/* Apply a fixup (fixS) to segment data, once it has been determined
7741 by our caller that we have all the info we need to fix it up.
7742
7743 On the 386, immediates, displacements, and data pointers are all in
7744 the same (little-endian) format, so we don't need to care about which
7745 we are handling. */
7746
7747void
7748md_apply_fix (fixP, valP, seg)
7749 /* The fix we're to put in. */
7750 fixS *fixP;
7751 /* Pointer to the value of the bits. */
7752 valueT *valP;
7753 /* Segment fix is from. */
7754 segT seg ATTRIBUTE_UNUSED;
7755{
7756 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7757 valueT value = *valP;
7758
7759#if !defined (TE_Mach)
7760 if (fixP->fx_pcrel)
7761 {
7762 switch (fixP->fx_r_type)
7763 {
7764 default:
7765 break;
7766
7767 case BFD_RELOC_64:
7768 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7769 break;
7770 case BFD_RELOC_32:
7771 case BFD_RELOC_X86_64_32S:
7772 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7773 break;
7774 case BFD_RELOC_16:
7775 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7776 break;
7777 case BFD_RELOC_8:
7778 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7779 break;
7780 }
7781 }
7782
7783 if (fixP->fx_addsy != NULL
7784 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7785 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7786 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7787 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7788 && !use_rela_relocations)
7789 {
7790 /* This is a hack. There should be a better way to handle this.
7791 This covers for the fact that bfd_install_relocation will
7792 subtract the current location (for partial_inplace, PC relative
7793 relocations); see more below. */
7794#ifndef OBJ_AOUT
7795 if (IS_ELF
7796#ifdef TE_PE
7797 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7798#endif
7799 )
7800 value += fixP->fx_where + fixP->fx_frag->fr_address;
7801#endif
7802#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7803 if (IS_ELF)
7804 {
7805 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7806
7807 if ((sym_seg == seg
7808 || (symbol_section_p (fixP->fx_addsy)
7809 && sym_seg != absolute_section))
7810 && !generic_force_reloc (fixP))
7811 {
7812 /* Yes, we add the values in twice. This is because
7813 bfd_install_relocation subtracts them out again. I think
7814 bfd_install_relocation is broken, but I don't dare change
7815 it. FIXME. */
7816 value += fixP->fx_where + fixP->fx_frag->fr_address;
7817 }
7818 }
7819#endif
7820#if defined (OBJ_COFF) && defined (TE_PE)
7821 /* For some reason, the PE format does not store a
7822 section address offset for a PC relative symbol. */
7823 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7824 || S_IS_WEAK (fixP->fx_addsy))
7825 value += md_pcrel_from (fixP);
7826#endif
7827 }
7828#if defined (OBJ_COFF) && defined (TE_PE)
7829 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7830 {
7831 value -= S_GET_VALUE (fixP->fx_addsy);
7832 }
7833#endif
7834
7835 /* Fix a few things - the dynamic linker expects certain values here,
7836 and we must not disappoint it. */
7837#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7838 if (IS_ELF && fixP->fx_addsy)
7839 switch (fixP->fx_r_type)
7840 {
7841 case BFD_RELOC_386_PLT32:
7842 case BFD_RELOC_X86_64_PLT32:
7843 /* Make the jump instruction point to the address of the operand. At
7844 runtime we merely add the offset to the actual PLT entry. */
7845 value = -4;
7846 break;
7847
7848 case BFD_RELOC_386_TLS_GD:
7849 case BFD_RELOC_386_TLS_LDM:
7850 case BFD_RELOC_386_TLS_IE_32:
7851 case BFD_RELOC_386_TLS_IE:
7852 case BFD_RELOC_386_TLS_GOTIE:
7853 case BFD_RELOC_386_TLS_GOTDESC:
7854 case BFD_RELOC_X86_64_TLSGD:
7855 case BFD_RELOC_X86_64_TLSLD:
7856 case BFD_RELOC_X86_64_GOTTPOFF:
7857 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7858 value = 0; /* Fully resolved at runtime. No addend. */
7859 /* Fallthrough */
7860 case BFD_RELOC_386_TLS_LE:
7861 case BFD_RELOC_386_TLS_LDO_32:
7862 case BFD_RELOC_386_TLS_LE_32:
7863 case BFD_RELOC_X86_64_DTPOFF32:
7864 case BFD_RELOC_X86_64_DTPOFF64:
7865 case BFD_RELOC_X86_64_TPOFF32:
7866 case BFD_RELOC_X86_64_TPOFF64:
7867 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7868 break;
7869
7870 case BFD_RELOC_386_TLS_DESC_CALL:
7871 case BFD_RELOC_X86_64_TLSDESC_CALL:
7872 value = 0; /* Fully resolved at runtime. No addend. */
7873 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7874 fixP->fx_done = 0;
7875 return;
7876
7877 case BFD_RELOC_386_GOT32:
7878 case BFD_RELOC_X86_64_GOT32:
7879 value = 0; /* Fully resolved at runtime. No addend. */
7880 break;
7881
7882 case BFD_RELOC_VTABLE_INHERIT:
7883 case BFD_RELOC_VTABLE_ENTRY:
7884 fixP->fx_done = 0;
7885 return;
7886
7887 default:
7888 break;
7889 }
7890#endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7891 *valP = value;
7892#endif /* !defined (TE_Mach) */
7893
7894 /* Are we finished with this relocation now? */
7895 if (fixP->fx_addsy == NULL)
7896 fixP->fx_done = 1;
7897#if defined (OBJ_COFF) && defined (TE_PE)
7898 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7899 {
7900 fixP->fx_done = 0;
7901 /* Remember value for tc_gen_reloc. */
7902 fixP->fx_addnumber = value;
7903 /* Clear out the frag for now. */
7904 value = 0;
7905 }
7906#endif
7907 else if (use_rela_relocations)
7908 {
7909 fixP->fx_no_overflow = 1;
7910 /* Remember value for tc_gen_reloc. */
7911 fixP->fx_addnumber = value;
7912 value = 0;
7913 }
7914
7915 md_number_to_chars (p, value, fixP->fx_size);
7916}
7917\f
7918char *
7919md_atof (int type, char *litP, int *sizeP)
7920{
7921 /* This outputs the LITTLENUMs in REVERSE order;
7922 in accord with the bigendian 386. */
7923 return ieee_md_atof (type, litP, sizeP, FALSE);
7924}
7925\f
7926static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
7927
7928static char *
7929output_invalid (int c)
7930{
7931 if (ISPRINT (c))
7932 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7933 "'%c'", c);
7934 else
7935 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7936 "(0x%x)", (unsigned char) c);
7937 return output_invalid_buf;
7938}
7939
7940/* REG_STRING starts *before* REGISTER_PREFIX. */
7941
7942static const reg_entry *
7943parse_real_register (char *reg_string, char **end_op)
7944{
7945 char *s = reg_string;
7946 char *p;
7947 char reg_name_given[MAX_REG_NAME_SIZE + 1];
7948 const reg_entry *r;
7949
7950 /* Skip possible REGISTER_PREFIX and possible whitespace. */
7951 if (*s == REGISTER_PREFIX)
7952 ++s;
7953
7954 if (is_space_char (*s))
7955 ++s;
7956
7957 p = reg_name_given;
7958 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
7959 {
7960 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
7961 return (const reg_entry *) NULL;
7962 s++;
7963 }
7964
7965 /* For naked regs, make sure that we are not dealing with an identifier.
7966 This prevents confusing an identifier like `eax_var' with register
7967 `eax'. */
7968 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
7969 return (const reg_entry *) NULL;
7970
7971 *end_op = s;
7972
7973 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
7974
7975 /* Handle floating point regs, allowing spaces in the (i) part. */
7976 if (r == i386_regtab /* %st is first entry of table */)
7977 {
7978 if (is_space_char (*s))
7979 ++s;
7980 if (*s == '(')
7981 {
7982 ++s;
7983 if (is_space_char (*s))
7984 ++s;
7985 if (*s >= '0' && *s <= '7')
7986 {
7987 int fpr = *s - '0';
7988 ++s;
7989 if (is_space_char (*s))
7990 ++s;
7991 if (*s == ')')
7992 {
7993 *end_op = s + 1;
7994 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
7995 know (r);
7996 return r + fpr;
7997 }
7998 }
7999 /* We have "%st(" then garbage. */
8000 return (const reg_entry *) NULL;
8001 }
8002 }
8003
8004 if (r == NULL || allow_pseudo_reg)
8005 return r;
8006
8007 if (operand_type_all_zero (&r->reg_type))
8008 return (const reg_entry *) NULL;
8009
8010 if ((r->reg_type.bitfield.reg32
8011 || r->reg_type.bitfield.sreg3
8012 || r->reg_type.bitfield.control
8013 || r->reg_type.bitfield.debug
8014 || r->reg_type.bitfield.test)
8015 && !cpu_arch_flags.bitfield.cpui386)
8016 return (const reg_entry *) NULL;
8017
8018 if (r->reg_type.bitfield.floatreg
8019 && !cpu_arch_flags.bitfield.cpu8087
8020 && !cpu_arch_flags.bitfield.cpu287
8021 && !cpu_arch_flags.bitfield.cpu387)
8022 return (const reg_entry *) NULL;
8023
8024 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8025 return (const reg_entry *) NULL;
8026
8027 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8028 return (const reg_entry *) NULL;
8029
8030 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8031 return (const reg_entry *) NULL;
8032
8033 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8034 if (!allow_index_reg
8035 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8036 return (const reg_entry *) NULL;
8037
8038 if (((r->reg_flags & (RegRex64 | RegRex))
8039 || r->reg_type.bitfield.reg64)
8040 && (!cpu_arch_flags.bitfield.cpulm
8041 || !operand_type_equal (&r->reg_type, &control))
8042 && flag_code != CODE_64BIT)
8043 return (const reg_entry *) NULL;
8044
8045 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8046 return (const reg_entry *) NULL;
8047
8048 return r;
8049}
8050
8051/* REG_STRING starts *before* REGISTER_PREFIX. */
8052
8053static const reg_entry *
8054parse_register (char *reg_string, char **end_op)
8055{
8056 const reg_entry *r;
8057
8058 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8059 r = parse_real_register (reg_string, end_op);
8060 else
8061 r = NULL;
8062 if (!r)
8063 {
8064 char *save = input_line_pointer;
8065 char c;
8066 symbolS *symbolP;
8067
8068 input_line_pointer = reg_string;
8069 c = get_symbol_end ();
8070 symbolP = symbol_find (reg_string);
8071 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8072 {
8073 const expressionS *e = symbol_get_value_expression (symbolP);
8074
8075 know (e->X_op == O_register);
8076 know (e->X_add_number >= 0
8077 && (valueT) e->X_add_number < i386_regtab_size);
8078 r = i386_regtab + e->X_add_number;
8079 *end_op = input_line_pointer;
8080 }
8081 *input_line_pointer = c;
8082 input_line_pointer = save;
8083 }
8084 return r;
8085}
8086
8087int
8088i386_parse_name (char *name, expressionS *e, char *nextcharP)
8089{
8090 const reg_entry *r;
8091 char *end = input_line_pointer;
8092
8093 *end = *nextcharP;
8094 r = parse_register (name, &input_line_pointer);
8095 if (r && end <= input_line_pointer)
8096 {
8097 *nextcharP = *input_line_pointer;
8098 *input_line_pointer = 0;
8099 e->X_op = O_register;
8100 e->X_add_number = r - i386_regtab;
8101 return 1;
8102 }
8103 input_line_pointer = end;
8104 *end = 0;
8105 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8106}
8107
8108void
8109md_operand (expressionS *e)
8110{
8111 char *end;
8112 const reg_entry *r;
8113
8114 switch (*input_line_pointer)
8115 {
8116 case REGISTER_PREFIX:
8117 r = parse_real_register (input_line_pointer, &end);
8118 if (r)
8119 {
8120 e->X_op = O_register;
8121 e->X_add_number = r - i386_regtab;
8122 input_line_pointer = end;
8123 }
8124 break;
8125
8126 case '[':
8127 gas_assert (intel_syntax);
8128 end = input_line_pointer++;
8129 expression (e);
8130 if (*input_line_pointer == ']')
8131 {
8132 ++input_line_pointer;
8133 e->X_op_symbol = make_expr_symbol (e);
8134 e->X_add_symbol = NULL;
8135 e->X_add_number = 0;
8136 e->X_op = O_index;
8137 }
8138 else
8139 {
8140 e->X_op = O_absent;
8141 input_line_pointer = end;
8142 }
8143 break;
8144 }
8145}
8146
8147\f
8148#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8149const char *md_shortopts = "kVQ:sqn";
8150#else
8151const char *md_shortopts = "qn";
8152#endif
8153
8154#define OPTION_32 (OPTION_MD_BASE + 0)
8155#define OPTION_64 (OPTION_MD_BASE + 1)
8156#define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8157#define OPTION_MARCH (OPTION_MD_BASE + 3)
8158#define OPTION_MTUNE (OPTION_MD_BASE + 4)
8159#define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8160#define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8161#define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8162#define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8163#define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8164#define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8165#define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8166#define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8167#define OPTION_X32 (OPTION_MD_BASE + 13)
8168
8169struct option md_longopts[] =
8170{
8171 {"32", no_argument, NULL, OPTION_32},
8172#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8173 || defined (TE_PE) || defined (TE_PEP))
8174 {"64", no_argument, NULL, OPTION_64},
8175#endif
8176#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8177 {"x32", no_argument, NULL, OPTION_X32},
8178#endif
8179 {"divide", no_argument, NULL, OPTION_DIVIDE},
8180 {"march", required_argument, NULL, OPTION_MARCH},
8181 {"mtune", required_argument, NULL, OPTION_MTUNE},
8182 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8183 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8184 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8185 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8186 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8187 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8188 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8189 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8190 {NULL, no_argument, NULL, 0}
8191};
8192size_t md_longopts_size = sizeof (md_longopts);
8193
8194int
8195md_parse_option (int c, char *arg)
8196{
8197 unsigned int j;
8198 char *arch, *next;
8199
8200 switch (c)
8201 {
8202 case 'n':
8203 optimize_align_code = 0;
8204 break;
8205
8206 case 'q':
8207 quiet_warnings = 1;
8208 break;
8209
8210#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8211 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8212 should be emitted or not. FIXME: Not implemented. */
8213 case 'Q':
8214 break;
8215
8216 /* -V: SVR4 argument to print version ID. */
8217 case 'V':
8218 print_version_id ();
8219 break;
8220
8221 /* -k: Ignore for FreeBSD compatibility. */
8222 case 'k':
8223 break;
8224
8225 case 's':
8226 /* -s: On i386 Solaris, this tells the native assembler to use
8227 .stab instead of .stab.excl. We always use .stab anyhow. */
8228 break;
8229#endif
8230#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8231 || defined (TE_PE) || defined (TE_PEP))
8232 case OPTION_64:
8233 {
8234 const char **list, **l;
8235
8236 list = bfd_target_list ();
8237 for (l = list; *l != NULL; l++)
8238 if (CONST_STRNEQ (*l, "elf64-x86-64")
8239 || strcmp (*l, "coff-x86-64") == 0
8240 || strcmp (*l, "pe-x86-64") == 0
8241 || strcmp (*l, "pei-x86-64") == 0)
8242 {
8243 default_arch = "x86_64";
8244 break;
8245 }
8246 if (*l == NULL)
8247 as_fatal (_("No compiled in support for x86_64"));
8248 free (list);
8249 }
8250 break;
8251#endif
8252
8253#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8254 case OPTION_X32:
8255 if (IS_ELF)
8256 {
8257 const char **list, **l;
8258
8259 list = bfd_target_list ();
8260 for (l = list; *l != NULL; l++)
8261 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8262 {
8263 default_arch = "x86_64:32";
8264 break;
8265 }
8266 if (*l == NULL)
8267 as_fatal (_("No compiled in support for 32bit x86_64"));
8268 free (list);
8269 }
8270 else
8271 as_fatal (_("32bit x86_64 is only supported for ELF"));
8272 break;
8273#endif
8274
8275 case OPTION_32:
8276 default_arch = "i386";
8277 break;
8278
8279 case OPTION_DIVIDE:
8280#ifdef SVR4_COMMENT_CHARS
8281 {
8282 char *n, *t;
8283 const char *s;
8284
8285 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8286 t = n;
8287 for (s = i386_comment_chars; *s != '\0'; s++)
8288 if (*s != '/')
8289 *t++ = *s;
8290 *t = '\0';
8291 i386_comment_chars = n;
8292 }
8293#endif
8294 break;
8295
8296 case OPTION_MARCH:
8297 arch = xstrdup (arg);
8298 do
8299 {
8300 if (*arch == '.')
8301 as_fatal (_("Invalid -march= option: `%s'"), arg);
8302 next = strchr (arch, '+');
8303 if (next)
8304 *next++ = '\0';
8305 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8306 {
8307 if (strcmp (arch, cpu_arch [j].name) == 0)
8308 {
8309 /* Processor. */
8310 if (! cpu_arch[j].flags.bitfield.cpui386)
8311 continue;
8312
8313 cpu_arch_name = cpu_arch[j].name;
8314 cpu_sub_arch_name = NULL;
8315 cpu_arch_flags = cpu_arch[j].flags;
8316 cpu_arch_isa = cpu_arch[j].type;
8317 cpu_arch_isa_flags = cpu_arch[j].flags;
8318 if (!cpu_arch_tune_set)
8319 {
8320 cpu_arch_tune = cpu_arch_isa;
8321 cpu_arch_tune_flags = cpu_arch_isa_flags;
8322 }
8323 break;
8324 }
8325 else if (*cpu_arch [j].name == '.'
8326 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8327 {
8328 /* ISA entension. */
8329 i386_cpu_flags flags;
8330
8331 if (!cpu_arch[j].negated)
8332 flags = cpu_flags_or (cpu_arch_flags,
8333 cpu_arch[j].flags);
8334 else
8335 flags = cpu_flags_and_not (cpu_arch_flags,
8336 cpu_arch[j].flags);
8337 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8338 {
8339 if (cpu_sub_arch_name)
8340 {
8341 char *name = cpu_sub_arch_name;
8342 cpu_sub_arch_name = concat (name,
8343 cpu_arch[j].name,
8344 (const char *) NULL);
8345 free (name);
8346 }
8347 else
8348 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8349 cpu_arch_flags = flags;
8350 cpu_arch_isa_flags = flags;
8351 }
8352 break;
8353 }
8354 }
8355
8356 if (j >= ARRAY_SIZE (cpu_arch))
8357 as_fatal (_("Invalid -march= option: `%s'"), arg);
8358
8359 arch = next;
8360 }
8361 while (next != NULL );
8362 break;
8363
8364 case OPTION_MTUNE:
8365 if (*arg == '.')
8366 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8367 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8368 {
8369 if (strcmp (arg, cpu_arch [j].name) == 0)
8370 {
8371 cpu_arch_tune_set = 1;
8372 cpu_arch_tune = cpu_arch [j].type;
8373 cpu_arch_tune_flags = cpu_arch[j].flags;
8374 break;
8375 }
8376 }
8377 if (j >= ARRAY_SIZE (cpu_arch))
8378 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8379 break;
8380
8381 case OPTION_MMNEMONIC:
8382 if (strcasecmp (arg, "att") == 0)
8383 intel_mnemonic = 0;
8384 else if (strcasecmp (arg, "intel") == 0)
8385 intel_mnemonic = 1;
8386 else
8387 as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
8388 break;
8389
8390 case OPTION_MSYNTAX:
8391 if (strcasecmp (arg, "att") == 0)
8392 intel_syntax = 0;
8393 else if (strcasecmp (arg, "intel") == 0)
8394 intel_syntax = 1;
8395 else
8396 as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
8397 break;
8398
8399 case OPTION_MINDEX_REG:
8400 allow_index_reg = 1;
8401 break;
8402
8403 case OPTION_MNAKED_REG:
8404 allow_naked_reg = 1;
8405 break;
8406
8407 case OPTION_MOLD_GCC:
8408 old_gcc = 1;
8409 break;
8410
8411 case OPTION_MSSE2AVX:
8412 sse2avx = 1;
8413 break;
8414
8415 case OPTION_MSSE_CHECK:
8416 if (strcasecmp (arg, "error") == 0)
8417 sse_check = sse_check_error;
8418 else if (strcasecmp (arg, "warning") == 0)
8419 sse_check = sse_check_warning;
8420 else if (strcasecmp (arg, "none") == 0)
8421 sse_check = sse_check_none;
8422 else
8423 as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
8424 break;
8425
8426 case OPTION_MAVXSCALAR:
8427 if (strcasecmp (arg, "128") == 0)
8428 avxscalar = vex128;
8429 else if (strcasecmp (arg, "256") == 0)
8430 avxscalar = vex256;
8431 else
8432 as_fatal (_("Invalid -mavxscalar= option: `%s'"), arg);
8433 break;
8434
8435 default:
8436 return 0;
8437 }
8438 return 1;
8439}
8440
8441#define MESSAGE_TEMPLATE \
8442" "
8443
8444static void
8445show_arch (FILE *stream, int ext, int check)
8446{
8447 static char message[] = MESSAGE_TEMPLATE;
8448 char *start = message + 27;
8449 char *p;
8450 int size = sizeof (MESSAGE_TEMPLATE);
8451 int left;
8452 const char *name;
8453 int len;
8454 unsigned int j;
8455
8456 p = start;
8457 left = size - (start - message);
8458 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8459 {
8460 /* Should it be skipped? */
8461 if (cpu_arch [j].skip)
8462 continue;
8463
8464 name = cpu_arch [j].name;
8465 len = cpu_arch [j].len;
8466 if (*name == '.')
8467 {
8468 /* It is an extension. Skip if we aren't asked to show it. */
8469 if (ext)
8470 {
8471 name++;
8472 len--;
8473 }
8474 else
8475 continue;
8476 }
8477 else if (ext)
8478 {
8479 /* It is an processor. Skip if we show only extension. */
8480 continue;
8481 }
8482 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8483 {
8484 /* It is an impossible processor - skip. */
8485 continue;
8486 }
8487
8488 /* Reserve 2 spaces for ", " or ",\0" */
8489 left -= len + 2;
8490
8491 /* Check if there is any room. */
8492 if (left >= 0)
8493 {
8494 if (p != start)
8495 {
8496 *p++ = ',';
8497 *p++ = ' ';
8498 }
8499 p = mempcpy (p, name, len);
8500 }
8501 else
8502 {
8503 /* Output the current message now and start a new one. */
8504 *p++ = ',';
8505 *p = '\0';
8506 fprintf (stream, "%s\n", message);
8507 p = start;
8508 left = size - (start - message) - len - 2;
8509
8510 gas_assert (left >= 0);
8511
8512 p = mempcpy (p, name, len);
8513 }
8514 }
8515
8516 *p = '\0';
8517 fprintf (stream, "%s\n", message);
8518}
8519
8520void
8521md_show_usage (FILE *stream)
8522{
8523#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8524 fprintf (stream, _("\
8525 -Q ignored\n\
8526 -V print assembler version number\n\
8527 -k ignored\n"));
8528#endif
8529 fprintf (stream, _("\
8530 -n Do not optimize code alignment\n\
8531 -q quieten some warnings\n"));
8532#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8533 fprintf (stream, _("\
8534 -s ignored\n"));
8535#endif
8536#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8537 || defined (TE_PE) || defined (TE_PEP))
8538 fprintf (stream, _("\
8539 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8540#endif
8541#ifdef SVR4_COMMENT_CHARS
8542 fprintf (stream, _("\
8543 --divide do not treat `/' as a comment character\n"));
8544#else
8545 fprintf (stream, _("\
8546 --divide ignored\n"));
8547#endif
8548 fprintf (stream, _("\
8549 -march=CPU[,+EXTENSION...]\n\
8550 generate code for CPU and EXTENSION, CPU is one of:\n"));
8551 show_arch (stream, 0, 1);
8552 fprintf (stream, _("\
8553 EXTENSION is combination of:\n"));
8554 show_arch (stream, 1, 0);
8555 fprintf (stream, _("\
8556 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8557 show_arch (stream, 0, 0);
8558 fprintf (stream, _("\
8559 -msse2avx encode SSE instructions with VEX prefix\n"));
8560 fprintf (stream, _("\
8561 -msse-check=[none|error|warning]\n\
8562 check SSE instructions\n"));
8563 fprintf (stream, _("\
8564 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8565 length\n"));
8566 fprintf (stream, _("\
8567 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8568 fprintf (stream, _("\
8569 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8570 fprintf (stream, _("\
8571 -mindex-reg support pseudo index registers\n"));
8572 fprintf (stream, _("\
8573 -mnaked-reg don't require `%%' prefix for registers\n"));
8574 fprintf (stream, _("\
8575 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8576}
8577
8578#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8579 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8580 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8581
8582/* Pick the target format to use. */
8583
8584const char *
8585i386_target_format (void)
8586{
8587 if (!strncmp (default_arch, "x86_64", 6))
8588 {
8589 update_code_flag (CODE_64BIT, 1);
8590 if (default_arch[6] == '\0')
8591 x86_elf_abi = X86_64_ABI;
8592 else
8593 x86_elf_abi = X86_64_X32_ABI;
8594 }
8595 else if (!strcmp (default_arch, "i386"))
8596 update_code_flag (CODE_32BIT, 1);
8597 else
8598 as_fatal (_("Unknown architecture"));
8599
8600 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8601 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8602 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8603 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8604
8605 switch (OUTPUT_FLAVOR)
8606 {
8607#if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8608 case bfd_target_aout_flavour:
8609 return AOUT_TARGET_FORMAT;
8610#endif
8611#if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8612# if defined (TE_PE) || defined (TE_PEP)
8613 case bfd_target_coff_flavour:
8614 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8615# elif defined (TE_GO32)
8616 case bfd_target_coff_flavour:
8617 return "coff-go32";
8618# else
8619 case bfd_target_coff_flavour:
8620 return "coff-i386";
8621# endif
8622#endif
8623#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8624 case bfd_target_elf_flavour:
8625 {
8626 const char *format;
8627
8628 switch (x86_elf_abi)
8629 {
8630 default:
8631 format = ELF_TARGET_FORMAT;
8632 break;
8633 case X86_64_ABI:
8634 use_rela_relocations = 1;
8635 object_64bit = 1;
8636 format = ELF_TARGET_FORMAT64;
8637 break;
8638 case X86_64_X32_ABI:
8639 use_rela_relocations = 1;
8640 object_64bit = 1;
8641 disallow_64bit_reloc = 1;
8642 format = ELF_TARGET_FORMAT32;
8643 break;
8644 }
8645 if (cpu_arch_isa == PROCESSOR_L1OM)
8646 {
8647 if (x86_elf_abi != X86_64_ABI)
8648 as_fatal (_("Intel L1OM is 64bit only"));
8649 return ELF_TARGET_L1OM_FORMAT;
8650 }
8651 else
8652 return format;
8653 }
8654#endif
8655#if defined (OBJ_MACH_O)
8656 case bfd_target_mach_o_flavour:
8657 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8658#endif
8659 default:
8660 abort ();
8661 return NULL;
8662 }
8663}
8664
8665#endif /* OBJ_MAYBE_ more than one */
8666
8667#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8668void
8669i386_elf_emit_arch_note (void)
8670{
8671 if (IS_ELF && cpu_arch_name != NULL)
8672 {
8673 char *p;
8674 asection *seg = now_seg;
8675 subsegT subseg = now_subseg;
8676 Elf_Internal_Note i_note;
8677 Elf_External_Note e_note;
8678 asection *note_secp;
8679 int len;
8680
8681 /* Create the .note section. */
8682 note_secp = subseg_new (".note", 0);
8683 bfd_set_section_flags (stdoutput,
8684 note_secp,
8685 SEC_HAS_CONTENTS | SEC_READONLY);
8686
8687 /* Process the arch string. */
8688 len = strlen (cpu_arch_name);
8689
8690 i_note.namesz = len + 1;
8691 i_note.descsz = 0;
8692 i_note.type = NT_ARCH;
8693 p = frag_more (sizeof (e_note.namesz));
8694 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8695 p = frag_more (sizeof (e_note.descsz));
8696 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8697 p = frag_more (sizeof (e_note.type));
8698 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8699 p = frag_more (len + 1);
8700 strcpy (p, cpu_arch_name);
8701
8702 frag_align (2, 0, 0);
8703
8704 subseg_set (seg, subseg);
8705 }
8706}
8707#endif
8708\f
8709symbolS *
8710md_undefined_symbol (name)
8711 char *name;
8712{
8713 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8714 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8715 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8716 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8717 {
8718 if (!GOT_symbol)
8719 {
8720 if (symbol_find (name))
8721 as_bad (_("GOT already in symbol table"));
8722 GOT_symbol = symbol_new (name, undefined_section,
8723 (valueT) 0, &zero_address_frag);
8724 };
8725 return GOT_symbol;
8726 }
8727 return 0;
8728}
8729
8730/* Round up a section size to the appropriate boundary. */
8731
8732valueT
8733md_section_align (segment, size)
8734 segT segment ATTRIBUTE_UNUSED;
8735 valueT size;
8736{
8737#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8738 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8739 {
8740 /* For a.out, force the section size to be aligned. If we don't do
8741 this, BFD will align it for us, but it will not write out the
8742 final bytes of the section. This may be a bug in BFD, but it is
8743 easier to fix it here since that is how the other a.out targets
8744 work. */
8745 int align;
8746
8747 align = bfd_get_section_alignment (stdoutput, segment);
8748 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8749 }
8750#endif
8751
8752 return size;
8753}
8754
8755/* On the i386, PC-relative offsets are relative to the start of the
8756 next instruction. That is, the address of the offset, plus its
8757 size, since the offset is always the last part of the insn. */
8758
8759long
8760md_pcrel_from (fixS *fixP)
8761{
8762 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8763}
8764
8765#ifndef I386COFF
8766
8767static void
8768s_bss (int ignore ATTRIBUTE_UNUSED)
8769{
8770 int temp;
8771
8772#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8773 if (IS_ELF)
8774 obj_elf_section_change_hook ();
8775#endif
8776 temp = get_absolute_expression ();
8777 subseg_set (bss_section, (subsegT) temp);
8778 demand_empty_rest_of_line ();
8779}
8780
8781#endif
8782
8783void
8784i386_validate_fix (fixS *fixp)
8785{
8786 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8787 {
8788 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8789 {
8790 if (!object_64bit)
8791 abort ();
8792 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8793 }
8794 else
8795 {
8796 if (!object_64bit)
8797 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8798 else
8799 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8800 }
8801 fixp->fx_subsy = 0;
8802 }
8803}
8804
8805arelent *
8806tc_gen_reloc (section, fixp)
8807 asection *section ATTRIBUTE_UNUSED;
8808 fixS *fixp;
8809{
8810 arelent *rel;
8811 bfd_reloc_code_real_type code;
8812
8813 switch (fixp->fx_r_type)
8814 {
8815 case BFD_RELOC_X86_64_PLT32:
8816 case BFD_RELOC_X86_64_GOT32:
8817 case BFD_RELOC_X86_64_GOTPCREL:
8818 case BFD_RELOC_386_PLT32:
8819 case BFD_RELOC_386_GOT32:
8820 case BFD_RELOC_386_GOTOFF:
8821 case BFD_RELOC_386_GOTPC:
8822 case BFD_RELOC_386_TLS_GD:
8823 case BFD_RELOC_386_TLS_LDM:
8824 case BFD_RELOC_386_TLS_LDO_32:
8825 case BFD_RELOC_386_TLS_IE_32:
8826 case BFD_RELOC_386_TLS_IE:
8827 case BFD_RELOC_386_TLS_GOTIE:
8828 case BFD_RELOC_386_TLS_LE_32:
8829 case BFD_RELOC_386_TLS_LE:
8830 case BFD_RELOC_386_TLS_GOTDESC:
8831 case BFD_RELOC_386_TLS_DESC_CALL:
8832 case BFD_RELOC_X86_64_TLSGD:
8833 case BFD_RELOC_X86_64_TLSLD:
8834 case BFD_RELOC_X86_64_DTPOFF32:
8835 case BFD_RELOC_X86_64_DTPOFF64:
8836 case BFD_RELOC_X86_64_GOTTPOFF:
8837 case BFD_RELOC_X86_64_TPOFF32:
8838 case BFD_RELOC_X86_64_TPOFF64:
8839 case BFD_RELOC_X86_64_GOTOFF64:
8840 case BFD_RELOC_X86_64_GOTPC32:
8841 case BFD_RELOC_X86_64_GOT64:
8842 case BFD_RELOC_X86_64_GOTPCREL64:
8843 case BFD_RELOC_X86_64_GOTPC64:
8844 case BFD_RELOC_X86_64_GOTPLT64:
8845 case BFD_RELOC_X86_64_PLTOFF64:
8846 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8847 case BFD_RELOC_X86_64_TLSDESC_CALL:
8848 case BFD_RELOC_RVA:
8849 case BFD_RELOC_VTABLE_ENTRY:
8850 case BFD_RELOC_VTABLE_INHERIT:
8851#ifdef TE_PE
8852 case BFD_RELOC_32_SECREL:
8853#endif
8854 code = fixp->fx_r_type;
8855 break;
8856 case BFD_RELOC_X86_64_32S:
8857 if (!fixp->fx_pcrel)
8858 {
8859 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8860 code = fixp->fx_r_type;
8861 break;
8862 }
8863 default:
8864 if (fixp->fx_pcrel)
8865 {
8866 switch (fixp->fx_size)
8867 {
8868 default:
8869 as_bad_where (fixp->fx_file, fixp->fx_line,
8870 _("can not do %d byte pc-relative relocation"),
8871 fixp->fx_size);
8872 code = BFD_RELOC_32_PCREL;
8873 break;
8874 case 1: code = BFD_RELOC_8_PCREL; break;
8875 case 2: code = BFD_RELOC_16_PCREL; break;
8876 case 4: code = BFD_RELOC_32_PCREL; break;
8877#ifdef BFD64
8878 case 8: code = BFD_RELOC_64_PCREL; break;
8879#endif
8880 }
8881 }
8882 else
8883 {
8884 switch (fixp->fx_size)
8885 {
8886 default:
8887 as_bad_where (fixp->fx_file, fixp->fx_line,
8888 _("can not do %d byte relocation"),
8889 fixp->fx_size);
8890 code = BFD_RELOC_32;
8891 break;
8892 case 1: code = BFD_RELOC_8; break;
8893 case 2: code = BFD_RELOC_16; break;
8894 case 4: code = BFD_RELOC_32; break;
8895#ifdef BFD64
8896 case 8: code = BFD_RELOC_64; break;
8897#endif
8898 }
8899 }
8900 break;
8901 }
8902
8903 if ((code == BFD_RELOC_32
8904 || code == BFD_RELOC_32_PCREL
8905 || code == BFD_RELOC_X86_64_32S)
8906 && GOT_symbol
8907 && fixp->fx_addsy == GOT_symbol)
8908 {
8909 if (!object_64bit)
8910 code = BFD_RELOC_386_GOTPC;
8911 else
8912 code = BFD_RELOC_X86_64_GOTPC32;
8913 }
8914 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
8915 && GOT_symbol
8916 && fixp->fx_addsy == GOT_symbol)
8917 {
8918 code = BFD_RELOC_X86_64_GOTPC64;
8919 }
8920
8921 rel = (arelent *) xmalloc (sizeof (arelent));
8922 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
8923 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8924
8925 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
8926
8927 if (!use_rela_relocations)
8928 {
8929 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
8930 vtable entry to be used in the relocation's section offset. */
8931 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
8932 rel->address = fixp->fx_offset;
8933#if defined (OBJ_COFF) && defined (TE_PE)
8934 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
8935 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
8936 else
8937#endif
8938 rel->addend = 0;
8939 }
8940 /* Use the rela in 64bit mode. */
8941 else
8942 {
8943 if (disallow_64bit_reloc)
8944 switch (code)
8945 {
8946 case BFD_RELOC_64:
8947 case BFD_RELOC_X86_64_DTPOFF64:
8948 case BFD_RELOC_X86_64_TPOFF64:
8949 case BFD_RELOC_64_PCREL:
8950 case BFD_RELOC_X86_64_GOTOFF64:
8951 case BFD_RELOC_X86_64_GOT64:
8952 case BFD_RELOC_X86_64_GOTPCREL64:
8953 case BFD_RELOC_X86_64_GOTPC64:
8954 case BFD_RELOC_X86_64_GOTPLT64:
8955 case BFD_RELOC_X86_64_PLTOFF64:
8956 as_bad_where (fixp->fx_file, fixp->fx_line,
8957 _("cannot represent relocation type %s in x32 mode"),
8958 bfd_get_reloc_code_name (code));
8959 break;
8960 default:
8961 break;
8962 }
8963
8964 if (!fixp->fx_pcrel)
8965 rel->addend = fixp->fx_offset;
8966 else
8967 switch (code)
8968 {
8969 case BFD_RELOC_X86_64_PLT32:
8970 case BFD_RELOC_X86_64_GOT32:
8971 case BFD_RELOC_X86_64_GOTPCREL:
8972 case BFD_RELOC_X86_64_TLSGD:
8973 case BFD_RELOC_X86_64_TLSLD:
8974 case BFD_RELOC_X86_64_GOTTPOFF:
8975 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8976 case BFD_RELOC_X86_64_TLSDESC_CALL:
8977 rel->addend = fixp->fx_offset - fixp->fx_size;
8978 break;
8979 default:
8980 rel->addend = (section->vma
8981 - fixp->fx_size
8982 + fixp->fx_addnumber
8983 + md_pcrel_from (fixp));
8984 break;
8985 }
8986 }
8987
8988 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
8989 if (rel->howto == NULL)
8990 {
8991 as_bad_where (fixp->fx_file, fixp->fx_line,
8992 _("cannot represent relocation type %s"),
8993 bfd_get_reloc_code_name (code));
8994 /* Set howto to a garbage value so that we can keep going. */
8995 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
8996 gas_assert (rel->howto != NULL);
8997 }
8998
8999 return rel;
9000}
9001
9002#include "tc-i386-intel.c"
9003
9004void
9005tc_x86_parse_to_dw2regnum (expressionS *exp)
9006{
9007 int saved_naked_reg;
9008 char saved_register_dot;
9009
9010 saved_naked_reg = allow_naked_reg;
9011 allow_naked_reg = 1;
9012 saved_register_dot = register_chars['.'];
9013 register_chars['.'] = '.';
9014 allow_pseudo_reg = 1;
9015 expression_and_evaluate (exp);
9016 allow_pseudo_reg = 0;
9017 register_chars['.'] = saved_register_dot;
9018 allow_naked_reg = saved_naked_reg;
9019
9020 if (exp->X_op == O_register && exp->X_add_number >= 0)
9021 {
9022 if ((addressT) exp->X_add_number < i386_regtab_size)
9023 {
9024 exp->X_op = O_constant;
9025 exp->X_add_number = i386_regtab[exp->X_add_number]
9026 .dw2_regnum[flag_code >> 1];
9027 }
9028 else
9029 exp->X_op = O_illegal;
9030 }
9031}
9032
9033void
9034tc_x86_frame_initial_instructions (void)
9035{
9036 static unsigned int sp_regno[2];
9037
9038 if (!sp_regno[flag_code >> 1])
9039 {
9040 char *saved_input = input_line_pointer;
9041 char sp[][4] = {"esp", "rsp"};
9042 expressionS exp;
9043
9044 input_line_pointer = sp[flag_code >> 1];
9045 tc_x86_parse_to_dw2regnum (&exp);
9046 gas_assert (exp.X_op == O_constant);
9047 sp_regno[flag_code >> 1] = exp.X_add_number;
9048 input_line_pointer = saved_input;
9049 }
9050
9051 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9052 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9053}
9054
9055int
9056i386_elf_section_type (const char *str, size_t len)
9057{
9058 if (flag_code == CODE_64BIT
9059 && len == sizeof ("unwind") - 1
9060 && strncmp (str, "unwind", 6) == 0)
9061 return SHT_X86_64_UNWIND;
9062
9063 return -1;
9064}
9065
9066#ifdef TE_SOLARIS
9067void
9068i386_solaris_fix_up_eh_frame (segT sec)
9069{
9070 if (flag_code == CODE_64BIT)
9071 elf_section_type (sec) = SHT_X86_64_UNWIND;
9072}
9073#endif
9074
9075#ifdef TE_PE
9076void
9077tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9078{
9079 expressionS exp;
9080
9081 exp.X_op = O_secrel;
9082 exp.X_add_symbol = symbol;
9083 exp.X_add_number = 0;
9084 emit_expr (&exp, size);
9085}
9086#endif
9087
9088#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9089/* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9090
9091bfd_vma
9092x86_64_section_letter (int letter, char **ptr_msg)
9093{
9094 if (flag_code == CODE_64BIT)
9095 {
9096 if (letter == 'l')
9097 return SHF_X86_64_LARGE;
9098
9099 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9100 }
9101 else
9102 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9103 return -1;
9104}
9105
9106bfd_vma
9107x86_64_section_word (char *str, size_t len)
9108{
9109 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9110 return SHF_X86_64_LARGE;
9111
9112 return -1;
9113}
9114
9115static void
9116handle_large_common (int small ATTRIBUTE_UNUSED)
9117{
9118 if (flag_code != CODE_64BIT)
9119 {
9120 s_comm_internal (0, elf_common_parse);
9121 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9122 }
9123 else
9124 {
9125 static segT lbss_section;
9126 asection *saved_com_section_ptr = elf_com_section_ptr;
9127 asection *saved_bss_section = bss_section;
9128
9129 if (lbss_section == NULL)
9130 {
9131 flagword applicable;
9132 segT seg = now_seg;
9133 subsegT subseg = now_subseg;
9134
9135 /* The .lbss section is for local .largecomm symbols. */
9136 lbss_section = subseg_new (".lbss", 0);
9137 applicable = bfd_applicable_section_flags (stdoutput);
9138 bfd_set_section_flags (stdoutput, lbss_section,
9139 applicable & SEC_ALLOC);
9140 seg_info (lbss_section)->bss = 1;
9141
9142 subseg_set (seg, subseg);
9143 }
9144
9145 elf_com_section_ptr = &_bfd_elf_large_com_section;
9146 bss_section = lbss_section;
9147
9148 s_comm_internal (0, elf_common_parse);
9149
9150 elf_com_section_ptr = saved_com_section_ptr;
9151 bss_section = saved_bss_section;
9152 }
9153}
9154
9155static void
9156handle_quad (int nbytes)
9157{
9158 expressionS exp;
9159
9160 if (x86_elf_abi != X86_64_X32_ABI)
9161 {
9162 cons (nbytes);
9163 return;
9164 }
9165
9166 if (is_it_end_of_statement ())
9167 {
9168 demand_empty_rest_of_line ();
9169 return;
9170 }
9171
9172 do
9173 {
9174 if (*input_line_pointer == '"')
9175 {
9176 as_bad (_("unexpected `\"' in expression"));
9177 ignore_rest_of_line ();
9178 return;
9179 }
9180 x86_cons (&exp, nbytes);
9181 /* Output 4 bytes if not constant. */
9182 if (exp.X_op != O_constant)
9183 nbytes = 4;
9184 emit_expr (&exp, (unsigned int) nbytes);
9185 /* Zero-extends to 8 bytes if not constant. */
9186 if (nbytes == 4)
9187 {
9188 memset (&exp, '\0', sizeof (exp));
9189 exp.X_op = O_constant;
9190 emit_expr (&exp, nbytes);
9191 }
9192 nbytes = 8;
9193 }
9194 while (*input_line_pointer++ == ',');
9195
9196 input_line_pointer--; /* Put terminator back into stream. */
9197
9198 demand_empty_rest_of_line ();
9199}
9200#endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.051463 seconds and 4 git commands to generate.